Aaron Weiss <aaronweiss74@gmail.com>
Abhishek Chanda <abhishek.becs@gmail.com>
Adam Bozanich <adam.boz@gmail.com>
+Adam Heins <mail@adamheins.com>
Adam Jacob <adam@opscode.com>
Adam Roben <adam@roben.org>
Adam Szkoda <adaszko@gmail.com>
Adolfo Ochagavía <aochagavia92@gmail.com>
Adrien Brault <adrien.brault@gmail.com>
Adrien Tétar <adri-from-59@hotmail.fr>
+Agoston Szepessy <agszepp@gmail.com>
Ahmed Charles <ahmedcharles@gmail.com>
Aidan Cully <github@aidan.users.panix.com>
Aidan Hobson Sayers <aidanhs@cantab.net>
Alex Gaynor <alex.gaynor@gmail.com>
Alexis Beingessner <a.beingessner@gmail.com>
Alex Lyon <arcterus@mail.com>
+Alex Newman <posix4e@gmail.com>
Alex Quach <alex@clinkle.com>
Alex Rønne Petersen <alex@lycus.org>
Alex Stokes <r.alex.stokes@gmail.com>
Andrew Gallant <jamslam@gmail.com>
Andrew Hobden <andrew@hoverbear.org>
Andrew Kensler <andrew@eastfarthing.com>
+Andrew Kuchev <0coming.soon@gmail.com>
Andrew Paseltiner <apaseltiner@gmail.com>
Andrew Poelstra <asp11@sfu.ca>
Andrew Seidl <dev@aas.io>
Andrew Straw <strawman@astraw.com>
Andrew Wagner <drewm1980@gmail.com>
Andrzej Janik <vosen@vosen.pl>
+Andy Caldwell <andrew.caldwell@metaswitch.com>
+Andy Grover <agrover@redhat.com>
Angus Lees <gus@inodes.org>
Anthony Juckel <ajuckel@gmail.com>
Anton Löfgren <anton.lofgren@gmail.com>
+Antti Keränen <detegr@gmail.com>
Aram Visser <aramvisser@gmail.com>
Arcterus <Arcterus@mail.com>
Areski Belaid <areski@gmail.com>
Arpad Borsos <arpad.borsos@googlemail.com>
Artem <artemciy@gmail.com>
Arthur Liao <arthurtw8@gmail.com>
+arthurprs <arthurprs@gmail.com>
arturo <arturo@openframeworks.cc>
Ashok Gautham <ScriptDevil@gmail.com>
Augusto Hack <hack.augusto@gmail.com>
Birunthan Mohanathas <birunthan@mohanathas.com>
Björn Steinbrink <bsteinbr@gmail.com>
blake2-ppc <ulrik.sverdrup@gmail.com>
+Blake Loring <Blake.Loring@ig.com>
bluss <bluss>
bluss <bluss@users.noreply.github.com>
Boris Egorov <egorov@linux.com>
Brian Koropoff <bkoropoff@gmail.com>
Brian Leibig <brian@brianleibig.com>
Brian Quinlan <brian@sweetapp.com>
+Brody Holden <brody.holden.r@gmail.com>
Bruno de Oliveira Abinader <bruno.d@partner.samsung.com>
Bryan Dunsmore <dunsmoreb@gmail.com>
Byron Williams <byron@112percent.com>
Chris Sainty <csainty@hotmail.com>
Chris Shea <cmshea@gmail.com>
Chris Thorn <chris@thorn.co>
+Christian Persson <saser@live.se>
Christian Stadelmann <dev@genodeftest.de>
+Christian Weinz <christian@madez.de>
Christoph Burgdorf <christoph.burgdorf@bvsn.org>
Christopher Bergqvist <spambox0@digitalpoetry.se>
Christopher Chambers <chris.chambers@peanutcode.com>
Dan Burkert <dan@danburkert.com>
Dan Callahan <dan.callahan@gmail.com>
Dan Connolly <dckc@madmode.com>
+Daniel Albert <albert_daniel@t-online.de>
Daniel Brooks <db48x@db48x.net>
Daniel Fagnan <dnfagnan@gmail.com>
Daniel Farina <daniel@fdr.io>
Derek Guenther <dguenther9@gmail.com>
Derek Harland <derek.harland@finq.co.nz>
dgoon <dgoon@dgoon.net>
+diaphore <diaphore@gmail.com>
Diego Giagio <diego@giagio.com>
Diego Ongaro <ongaro@cs.stanford.edu>
Diggory Blake <diggsey@googlemail.com>
Dimitri Krassovski <labria@startika.com>
Dirk Gadsden <dirk@esherido.com>
Dirkjan Bussink <d.bussink@gmail.com>
+Dirkjan Ochtman <dirkjan@ochtman.nl>
Dirk Leifeld <leifeld@posteo.de>
Div Shekhar <div@pagerduty.com>
diwic <diwic@users.noreply.github.com>
Elantsev Serj <elantsev@yandex-team.ru>
Eli Friedman <eli.friedman@gmail.com>
eliovir <eliovir@gmail.com>
+Eljay <lee@leejeffery.co.uk>
Elliott Slaughter <elliottslaughter@gmail.com>
Elly Fong-Jones <elly@leptoquark.net>
elszben <notgonna@tellyou>
Erik Price <erik.price16@gmail.com>
Erik Rose <erik@mozilla.com>
Erwan <erwan.ricq@gmail.com>
+Esption <esption@gmail.com>
+eternaleye <eternaleye@gmail.com>
Etienne Millon <me@emillon.org>
Eunchong Yu <kroisse@gmail.com>
Eunji Jeong <eun-ji.jeong@samsung.com>
Francisco Souza <f@souza.cc>
frankamp <frankamp@gmail.com>
Franklin Chen <franklinchen@franklinchen.com>
+Frank McSherry <fmcsherry@me.com>
Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
free-Runner <aali07@students.poly.edu>
FuGangqiang <fu_gangqiang@163.com>
Geoffrey Thomas <geofft@ldpreload.com>
Geoffroy Couprie <geo.couprie@gmail.com>
Geoffry Song <goffrie@gmail.com>
+Georg Brandl <georg@python.org>
George Papanikolaou <g3orge.app@gmail.com>
Georges Dubus <georges.dubus@gmail.com>
Germano Gabbianelli <tyrion@users.noreply.github.com>
Jan Bujak <j@exia.io>
Jan-Erik Rediger <janerik@fnordig.de>
Jan Kobler <eng1@koblersystems.de>
+Jan Likar <likar.jan@gmail.com>
Jan Niklas Hasse <jhasse@gmail.com>
Jannis Harder <jix@jixco.de>
Jannis Redmann <mail@jannisr.de>
Jashank Jeremy <jashank@rulingia.com>
Jason Fager <jfager@gmail.com>
Jason Orendorff <jorendorff@mozilla.com>
+Jason Schein <Jasonschein@gmail.com>
Jason Thompson <jason@jthompson.ca>
Jason Toffaletti <toffaletti@gmail.com>
Jason Yeo <jasonyeo88@gmail.com>
Jeaye <jeaye@arrownext.com>
Jed Davis <jld@panix.com>
Jed Estep <aje@jhu.edu>
+Jeehoon Kang <jeehoon.kang@sf.snu.ac.kr>
Jeff Balogh <jbalogh@mozilla.com>
Jeff Belgum <jeffbelgum@gmail.com>
Jeff Muizelaar <jmuizelaar@mozilla.com>
Jesse Ray <jesse@localhost.localdomain>
Jesse Ruderman <jruderman@gmail.com>
Jessy Diamond Exum <jessy.diamondman@gmail.com>
+Jesús Espino <jespinog@gmail.com>
+jethrogb <github@jbeekman.nl>
Jexell <Jexell@users.noreply.github.com>
Jihyeok Seo <me@limeburst.net>
Jihyun Yu <j.yu@navercorp.com>
Jonas Hietala <tradet.h@gmail.com>
Jonathan Bailey <jbailey@mozilla.com>
Jonathan Boyett <jonathan@failingservers.com>
+Jonathan Hansford <dangthrimble@hansfords.net>
Jonathan Reem <jonathan.reem@gmail.com>
Jonathan S <gereeter@gmail.com>
Jonathan Sternberg <jonathansternberg@gmail.com>
Jorge Israel Peña <jorge.israel.p@gmail.com>
Joris Rehm <joris.rehm@wakusei.fr>
Jormundir <Chaseph@gmail.com>
+Jose Narvaez <jnarvaez@zendesk.com>
Joseph Crail <jbcrail@gmail.com>
Joseph Martin <pythoner6@gmail.com>
Joseph Rushton Wakeling <joe@webdrake.net>
juxiliary <juxiliary@gmail.com>
jxv <joevargas@hush.com>
Jyun-Yan You <jyyou.tw@gmail.com>
+Kagami Sascha Rosylight <saschanaz@outlook.com>
Kang Seonghoon <kang.seonghoon@mearie.org>
Kasey Carrothers <kaseyc.808@gmail.com>
Keegan McAllister <mcallister.keegan@gmail.com>
Kevin Walter <kevin.walter.private@googlemail.com>
Kevin Yap <me@kevinyap.ca>
kgv <mail@kgv.name>
+Kieran Hunt <kieran.hunt92@gmail.com>
Kiet Tran <ktt3ja@gmail.com>
Kim Røen <kim@pam.no>
kjpgit <kjpgit@users.noreply.github.com>
klutzy <klutzytheklutzy@gmail.com>
KokaKiwi <kokakiwi+rust@kokakiwi.net>
korenchkin <korenchkin2@gmail.com>
+Kornel Lesiński <kornel@geekhood.net>
Kostas Karachalios <vrinek@me.com>
+Kristof Söderström <soderstroff@users.noreply.github.com>
+krumelmonster <krumelmonster@zoho.com>
Krzysztof Drewniak <krzysdrewniak@gmail.com>
Kubilay Kocak <koobs@users.noreply.github.com>
kulakowski <george.kulakowski@gmail.com>
Lee Aronson <lee@libertad.ucsd.edu>
Lee Jeffery <leejeffery@gmail.com>
Lee Wondong <wdlee91@gmail.com>
+Leif Arne Storset <leifarne@storset.net>
LemmingAvalanche <haugsbakk@yahoo.no>
Lennart Kudling <github@kudling.de>
Leo Correa <lcorr005@gmail.com>
Magnus Auvinen <magnus.auvinen@gmail.com>
Mahmut Bulut <mahmutbulut0@gmail.com>
maikklein <maikklein@googlemail.com>
+Makoto Kato <m_kato@ga2.so-net.ne.jp>
Makoto Nakashima <makoto.nksm+github@gmail.com>
Manish Goregaokar <manishsmail@gmail.com>
Manuel Hoffmann <manuel@polythematik.de>
Marijn Haverbeke <marijnh@gmail.com>
Marin Atanasov Nikolov <dnaeon@gmail.com>
Mário Feroldi <thelost-t@live.com>
+Mark Buer <mark.buer@booktrack.com>
Mark Lacey <641@rudkx.com>
Mark Mossberg <mark.mossberg@gmail.com>
Mark Rowe <mrowe@bdash.net.nz>
Matt Carberry <carberry.matt@gmail.com>
Matt Coffin <mcoffin13@gmail.com>
Matt Cox <mattcoxpdx@gmail.com>
+Matt Friedman <matthew.friedman@mu.edu>
Matthew Astley <mca@sanger.ac.uk>
Matthew Auld <matthew.auld@intel.com>
Matthew Iselin <matthew@theiselins.net>
Mickaël Raybaud-Roig <raybaudroigm@gmail.com>
Mickaël Salaün <mic@digikod.net>
Mick Koch <kchmck@gmail.com>
+midinastasurazz <mpavlovsky@gmail.com>
Mihnea Dobrescu-Balaur <mihnea@linux.com>
Mike Boutin <mike.boutin@gmail.com>
Mike Dilger <mike@efx.co.nz>
Mike Sampson <mike@sambodata.com>
Mikhail Zabaluev <mikhail.zabaluev@gmail.com>
Mikko Perttunen <cyndis@kapsi.fi>
+mitaa <mitaa.ceb@gmail.com>
mitchmindtree <mitchell.nordine@gmail.com>
Mohammed Attia <skeuomorf@gmail.com>
moonglum <moonglum@moonbeamlabs.com>
Nick Platt <platt.nicholas@gmail.com>
Nick Sarten <gen.battle@gmail.com>
Nicolas Silva <nical.silva@gmail.com>
+Nicolette Verlinden <nicole@nicole.moe>
Niels Egberts <git@nielsegberts.nl>
Niels langager Ellegaard <niels.ellegaard@gmail.com>
Nif Ward <nif.ward@gmail.com>
OGINO Masanori <masanori.ogino@gmail.com>
OlegTsyba <idethrone1@gmail.com>
Oliver Schneider <git1984941651981@oli-obk.de>
-Oliver Schneider <github6541940@oli-obk.de>
Olivier Saut <osaut@airpost.net>
olivren <o.renaud@gmx.fr>
Olle Jonsson <olle.jonsson@gmail.com>
Paul Stansifer <paul.stansifer@gmail.com>
Paul Woolcock <pwoolcoc+github@gmail.com>
Pavel Panchekha <me@pavpanchekha.com>
+Pavel Pravosud <pavel@pravosud.com>
Pawel Olzacki <p.olzacki2@samsung.com>
Pedro Larroy <pedro.larroy@here.com>
Peer Aramillo Irizar <peer.aramillo.irizar@gmail.com>
qwitwa <qwitwa@gmail.com>
Rafael Ávila de Espíndola <respindola@mozilla.com>
Rahul Horé <hore.rahul@gmail.com>
+Ralf Jung <post@ralfj.de>
Ralph Bodenner <rkbodenner+github@gmail.com>
Ralph Giles <giles@thaumas.net>
Ramkumar Ramachandra <artagnon@gmail.com>
Ruud van Asseldonk <dev@veniogames.com>
Ryan Levick <ryan@6wunderkinder.com>
Ryan Mulligan <ryan@ryantm.com>
+Ryan Pendleton <me@ryanp.me>
Ryan Prichard <ryan.prichard@gmail.com>
Ryan Riginding <marc.riginding@gmail.com>
Ryan Scheel <ryan.havvy@gmail.com>
Saurabh Anand <saurabhanandiit@gmail.com>
Scott Jenkins <scottdjwales@gmail.com>
Scott Lawrence <bytbox@gmail.com>
-Scott Olson <scott@scott-olson.org>
+Scott Olson <scott@solson.me>
Sean Bowe <ewillbefull@gmail.com>
Sean Chalmers <sclhiannan@gmail.com>
Sean Collins <sean@cllns.com>
sumito3478 <sumito3478@gmail.com>
Swaroop C H <swaroop@swaroopch.com>
Sylvestre Ledru <sylvestre@debian.org>
+Taliesin Beynon <taliesinb@wolfram.com>
Tamir Duberstein <tamird@gmail.com>
Tamir Duberstein <tamird@squareup.com>
Taras Shpot <mrshpot@gmail.com>
Thomas Karpiniec <tk@1.21jiggawatts.net>
Tiago Nobrega <tigarmo@gmail.com>
Tibor Benke <ihrwein@gmail.com>
+Ticki <Ticki@users.noreply.github.com>
Till Hoeppner <till@hoeppner.ws>
Tim Brooks <brooks@cern.ch>
Tim Chevalier <chevalier@alum.wellesley.edu>
Tyler Thrailkill <tylerbthrailkill@gmail.com>
tynopex <tynopex@users.noreply.github.com>
Ty Overby <ty@pre-alpha.com>
-Ulrik Sverdrup <root@localhost>
+Ulrik Sverdrup <bluss@users.noreply.github.com>
Ulysse Carion <ulysse@ulysse.io>
User Jyyou <jyyou@plaslab.cs.nctu.edu.tw>
Utkarsh Kukreti <utkarshkukreti@gmail.com>
Viktor Dahl <pazaconyoman@gmail.com>
ville-h <ville3.14159@gmail.com>
Vincent Belliard <vincent@famillebelliard.fr>
+Vincent Bernat <vincent@bernat.im>
Vinzent Steinberg <Vinzent.Steinberg@gmail.com>
Virgile Andreani <virgile.andreani@anbuco.fr>
visualfc <visualfc@gmail.com>
Vivek Galatage <vivekgalatage@gmail.com>
Vladimir Matveev <vladimir.matweev@gmail.com>
Vladimir Pouzanov <farcaller@gmail.com>
+Vladimir Rutsky <rutsky@users.noreply.github.com>
Vladimir Smola <smola.vladimir@gmail.com>
Vojtech Kral <vojtech@kral.hk>
Volker Mische <volker.mische@gmail.com>
Will Andrews <will@firepipe.net>
Will Engler <engler.will@gmail.com>
Will Hipschman <whipsch@gmail.com>
+William Throwe <wtt6@cornell.edu>
William Ting <io@williamting.com>
Willson Mock <willson.mock@gmail.com>
Will <will@glozer.net>
Please make pull requests against the `master` branch.
+Compiling all of `make check` can take a while. When testing your pull request,
+consider using one of the more specialized `make` targets to cut down on the
+amount of time you have to wait. You need to have built the compiler at least
+once before running these will work, but that’s only one full build rather than
+one each time.
+
+ $ make -j8 rustc-stage1 && make check-stage1
+
+is one such example, which builds just `rustc`, and then runs the tests. If
+you’re adding something to the standard library, try
+
+ $ make -j8 check-stage1-std NO_REBUILD=1
+
+This will not rebuild the compiler, but will run the tests.
+
All pull requests are reviewed by another person. We have a bot,
@rust-highfive, that will automatically assign a random person to review your
request.
[merge-queue]: http://buildbot.rust-lang.org/homu/queue/rust
+Speaking of tests, Rust has a comprehensive test suite. More information about
+it can be found
+[here](https://github.com/rust-lang/rust-wiki-backup/blob/master/Note-testsuite.md).
+
## Writing Documentation
Documentation improvements are very welcome. The source of `doc.rust-lang.org`
is located in `src/doc` in the tree, and standard API documentation is generated
from the source code itself.
-Documentation pull requests function in the same as other pull requests, though
-you may see a slightly different form of `r+`:
+Documentation pull requests function in the same way as other pull requests,
+though you may see a slightly different form of `r+`:
@bors: r+ 38fe8d2 rollup
# * tidy-basic - show file / line stats
# * tidy-errors - show the highest rustc error code
# * tidy-features - show the status of language and lib features
+# * rustc-stage$(stage) - Only build up to a specific stage
#
# Then mix in some of these environment variables to harness the
# ultimate power of The Rust Build System.
#
# # Rust recipes for build system success
#
-# // Modifying libstd? Use this comment to run unit tests just on your change
+# // Modifying libstd? Use this command to run unit tests just on your change
# make check-stage1-std NO_REBUILD=1 NO_BENCH=1
#
# // Added a run-pass test? Use this to test running your test
memory safety and offers painless concurrency ([no data races]).
It does not employ a garbage collector and has minimal runtime overhead.
-This repo contains the code for `rustc`, the Rust compiler, as well
+This repo contains the code for the compiler (`rustc`), as well
as standard libraries, tools and documentation for Rust.
[no data races]: http://blog.rust-lang.org/2015/04/10/Fearless-Concurrency.html
```
3. Run `mingw32_shell.bat` or `mingw64_shell.bat` from wherever you installed
- MYSY2 (i.e. `C:\msys`), depending on whether you want 32-bit or 64-bit Rust.
+ MSYS2 (i.e. `C:\msys`), depending on whether you want 32-bit or 64-bit Rust.
4. Navigate to Rust's source code, configure and build it:
+Version 1.3.0 (September 2015)
+==============================
+
+* ~900 changes, numerous bugfixes
+
+Highlights
+----------
+
+* The [new object lifetime defaults][nold] have been [turned
+ on][nold2] after a cycle of warnings about the change. Now types
+ like `&'a Box<Trait>` (or `&'a Rc<Trait>`, etc) will change from
+ being interpreted as `&'a Box<Trait+'a>` to `&'a
+ Box<Trait+'static>`.
+* [The Rustonomicon][nom] is a new book in the official documentation
+ that dives into writing unsafe Rust.
+* The [`Duration`] API, [has been stabilized][ds]. This basic unit of
+ timekeeping is employed by other std APIs, as well as out-of-tree
+ time crates.
+
+Breaking Changes
+----------------
+
+* The [new object lifetime defaults][nold] have been [turned
+ on][nold2] after a cycle of warnings about the change.
+* There is a known [regression][lr] in how object lifetime elision is
+ interpreted, the proper solution for which is undetermined.
+* The `#[prelude_import]` attribute, an internal implementation
+ detail, was accidentally stabilized previously. [It has been put
+ behind the `prelude_import` feature gate][pi]. This change is
+ believed to break no existing code.
+* The behavior of [`size_of_val`][dst1] and [`align_of_val`][dst2] is
+ [more sane for dynamically sized types][dst3]. Code that relied on
+ the previous behavior is thought to be broken.
+* The `dropck` rules, which checks that destructors can't access
+ destroyed values, [have been updated][dropck] to match the
+ [RFC][dropckrfc]. This fixes some soundness holes, and as such will
+ cause some previously-compiling code to no longer build.
+
+Language
+--------
+
+* The [new object lifetime defaults][nold] have been [turned
+ on][nold2] after a cycle of warnings about the change.
+* Semicolons may [now follow types and paths in
+ macros](https://github.com/rust-lang/rust/pull/27000).
+* The behavior of [`size_of_val`][dst1] and [`align_of_val`][dst2] is
+ [more sane for dynamically sized types][dst3]. Code that relied on
+ the previous behavior is not known to exist, and suspected to be
+ broken.
+* `'static` variables [may now be recursive][st].
+* `ref` bindings choose between [`Deref`] and [`DerefMut`]
+ implementations correctly.
+* The `dropck` rules, which checks that destructors can't access
+ destroyed values, [have been updated][dropck] to match the
+ [RFC][dropckrfc].
+
+Libraries
+---------
+
+* The [`Duration`] API, [has been stabilized][ds], as well as the
+ `std::time` module, which presently contains only `Duration`.
+* `Box<str>` and `Box<[T]>` both implement `Clone`.
+* The owned C string, [`CString`], implements [`Borrow`] and the
+ borrowed C string, [`CStr`], implements [`ToOwned`]. The two of
+ these allow C strings to be borrowed and cloned in generic code.
+* [`CStr`] implements [`Debug`].
+* [`AtomicPtr`] implements [`Debug`].
+* [`Error`] trait objects [can be downcast to their concrete types][e]
+ in many common configurations, using the [`is`], [`downcast`],
+ [`downcast_ref`] and [`downcast_mut`] methods, similarly to the
+ [`Any`] trait.
+* Searching for substrings now [employs the two-way algorithm][search]
+ instead of doing a naive search. This gives major speedups to a
+ number of methods, including [`contains`][sc], [`find`][sf],
+ [`rfind`][srf], [`split`][ss]. [`starts_with`][ssw] and
+ [`ends_with`][sew] are also faster.
+* The performance of `PartialEq` for slices is [much faster][ps].
+* The [`Hash`] trait offers the default method, [`hash_slice`], which
+ is overridden and optimized by the implementations for scalars.
+* The [`Hasher`] trait now has a number of specialized `write_*`
+ methods for primitive types, for efficiency.
+* The I/O-specific error type, [`std::io::Error`][ie], gained a set of
+ methods for accessing the 'inner error', if any: [`get_ref`][iegr],
+ [`get_mut`][iegm], [`into_inner`][ieii]. As well, the implementation
+ of [`std::error::Error::cause`][iec] also delegates to the inner
+ error.
+* [`process::Child`][pc] gained the [`id`] method, which returns a
+ `u32` representing the platform-specific process identifier.
+* The [`connect`] method on slices is deprecated, replaced by the new
+ [`join`] method (note that both of these are on the *unstable*
+ [`SliceConcatExt`] trait, but through the magic of the prelude are
+ available to stable code anyway).
+* The [`Div`] operator is implemented for [`Wrapping`] types.
+* [`DerefMut` is implemented for `String`][dms].
+* Performance of SipHash (the default hasher for `HashMap`) is
+ [better for long data][sh].
+* [`AtomicPtr`] implements [`Send`].
+* The [`read_to_end`] implementations for [`Stdin`] and [`File`]
+ are now [specialized to use uninitalized buffers for increased
+ performance][rte].
+* Lifetime parameters of foreign functions [are now resolved
+ properly][f].
+
+Misc
+----
+
+* Rust can now, with some coercion, [produce programs that run on
+ Windows XP][xp], though XP is not considered a supported platform.
+* Porting Rust on Windows from the GNU toolchain to MSVC continues
+ ([1][win1], [2][win2], [3][win3], [4][win4]). It is still not
+ recommended for use in 1.3, though should be fully-functional
+ in the [64-bit 1.4 beta][b14].
+* On Fedora-based systems installation will [properly configure the
+ dynamic linker][fl].
+* The compiler gained many new extended error descriptions, which can
+ be accessed with the `--explain` flag.
+* The `dropck` pass, which checks that destructors can't access
+ destroyed values, [has been rewritten][dropck]. This fixes some
+ soundness holes, and as such will cause some previously-compiling
+ code to no longer build.
+* `rustc` now uses [LLVM to write archive files where possible][ar].
+ Eventually this will eliminate the compiler's dependency on the ar
+ utility.
+* Rust has [preliminary support for i686 FreeBSD][fb] (it has long
+ supported FreeBSD on x86_64).
+* The [`unused_mut`][lum], [`unconditional_recursion`][lur],
+ [`improper_ctypes`][lic], and [`negate_unsigned`][lnu] lints are
+ more strict.
+* If landing pads are disabled (with `-Z no-landing-pads`), [`panic!`
+ will kill the process instead of leaking][nlp].
+
+[`Any`]: http://doc.rust-lang.org/nightly/std/any/trait.Any.html
+[`AtomicPtr`]: http://doc.rust-lang.org/nightly/std/sync/atomic/struct.AtomicPtr.html
+[`Borrow`]: http://doc.rust-lang.org/nightly/std/borrow/trait.Borrow.html
+[`CStr`]: http://doc.rust-lang.org/nightly/std/ffi/struct.CStr.html
+[`CString`]: http://doc.rust-lang.org/nightly/std/ffi/struct.CString.html
+[`Debug`]: http://doc.rust-lang.org/nightly/std/fmt/trait.Debug.html
+[`DerefMut`]: http://doc.rust-lang.org/nightly/std/ops/trait.DerefMut.html
+[`Deref`]: http://doc.rust-lang.org/nightly/std/ops/trait.Deref.html
+[`Div`]: http://doc.rust-lang.org/nightly/std/ops/trait.Div.html
+[`Duration`]: http://doc.rust-lang.org/nightly/std/time/struct.Duration.html
+[`Error`]: http://doc.rust-lang.org/nightly/std/error/trait.Error.html
+[`File`]: http://doc.rust-lang.org/nightly/std/fs/struct.File.html
+[`Hash`]: http://doc.rust-lang.org/nightly/std/hash/trait.Hash.html
+[`Hasher`]: http://doc.rust-lang.org/nightly/std/hash/trait.Hash.html
+[`Send`]: http://doc.rust-lang.org/nightly/std/marker/trait.Send.html
+[`SliceConcatExt`]: http://doc.rust-lang.org/nightly/std/slice/trait.SliceConcatExt.html
+[`Stdin`]: http://doc.rust-lang.org/nightly/std/io/struct.Stdin.html
+[`ToOwned`]: http://doc.rust-lang.org/nightly/std/borrow/trait.ToOwned.html
+[`Wrapping`]: http://doc.rust-lang.org/nightly/std/num/struct.Wrapping.html
+[`connect`]: http://doc.rust-lang.org/nightly/std/slice/trait.SliceConcatExt.html#method.connect
+[`downcast_mut`]: http://doc.rust-lang.org/nightly/std/error/trait.Error.html#method.downcast_mut
+[`downcast_ref`]: http://doc.rust-lang.org/nightly/std/error/trait.Error.html#method.downcast_ref
+[`downcast`]: http://doc.rust-lang.org/nightly/std/error/trait.Error.html#method.downcast
+[`hash_slice`]: http://doc.rust-lang.org/nightly/std/hash/trait.Hash.html#method.hash_slice
+[`id`]: http://doc.rust-lang.org/nightly/std/process/struct.Child.html#method.id
+[`is`]: http://doc.rust-lang.org/nightly/std/error/trait.Error.html#method.is
+[`join`]: http://doc.rust-lang.org/nightly/std/slice/trait.SliceConcatExt.html#method.join
+[`read_to_end`]: http://doc.rust-lang.org/nightly/std/io/trait.Read.html#method.read_to_end
+[ar]: https://github.com/rust-lang/rust/pull/26926
+[b14]: https://static.rust-lang.org/dist/rust-beta-x86_64-pc-windows-msvc.msi
+[dms]: https://github.com/rust-lang/rust/pull/26241
+[dropck]: https://github.com/rust-lang/rust/pull/27261
+[dropckrfc]: https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md
+[ds]: https://github.com/rust-lang/rust/pull/26818
+[dst1]: http://doc.rust-lang.org/nightly/std/mem/fn.size_of_val.html
+[dst2]: http://doc.rust-lang.org/nightly/std/mem/fn.align_of_val.html
+[dst3]: https://github.com/rust-lang/rust/pull/27351
+[e]: https://github.com/rust-lang/rust/pull/24793
+[f]: https://github.com/rust-lang/rust/pull/26588
+[fb]: https://github.com/rust-lang/rust/pull/26959
+[fl]: https://github.com/rust-lang/rust-installer/pull/41
+[hs]: http://doc.rust-lang.org/nightly/std/hash/trait.Hash.html#method.hash_slice
+[ie]: http://doc.rust-lang.org/nightly/std/io/struct.Error.html
+[iec]: http://doc.rust-lang.org/nightly/std/io/struct.Error.html#method.cause
+[iegm]: http://doc.rust-lang.org/nightly/std/io/struct.Error.html#method.get_mut
+[iegr]: http://doc.rust-lang.org/nightly/std/io/struct.Error.html#method.get_ref
+[ieii]: http://doc.rust-lang.org/nightly/std/io/struct.Error.html#method.into_inner
+[lic]: https://github.com/rust-lang/rust/pull/26583
+[lnu]: https://github.com/rust-lang/rust/pull/27026
+[lr]: https://github.com/rust-lang/rust/issues/27248
+[lum]: https://github.com/rust-lang/rust/pull/26378
+[lur]: https://github.com/rust-lang/rust/pull/26783
+[nlp]: https://github.com/rust-lang/rust/pull/27176
+[nold2]: https://github.com/rust-lang/rust/pull/27045
+[nold]: https://github.com/rust-lang/rfcs/blob/master/text/1156-adjust-default-object-bounds.md
+[nom]: http://doc.rust-lang.org/nightly/nomicon/
+[pc]: http://doc.rust-lang.org/nightly/std/process/struct.Child.html
+[pi]: https://github.com/rust-lang/rust/pull/26699
+[ps]: https://github.com/rust-lang/rust/pull/26884
+[rte]: https://github.com/rust-lang/rust/pull/26950
+[sc]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.contains
+[search]: https://github.com/rust-lang/rust/pull/26327
+[sew]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.ends_with
+[sf]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.find
+[sh]: https://github.com/rust-lang/rust/pull/27280
+[srf]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.rfind
+[ss]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.split
+[ssw]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.starts_with
+[st]: https://github.com/rust-lang/rust/pull/26630
+[win1]: https://github.com/rust-lang/rust/pull/26569
+[win2]: https://github.com/rust-lang/rust/pull/26741
+[win3]: https://github.com/rust-lang/rust/pull/26741
+[win4]: https://github.com/rust-lang/rust/pull/27210
+[xp]: https://github.com/rust-lang/rust/pull/26569
+
Version 1.2.0 (August 2015)
===========================
Highlights
----------
-* The [`std::fs` module has been expanded][fs-expand] to expand the set of
+* The [`std::fs` module has been expanded][fs] to expand the set of
functionality exposed:
* `DirEntry` now supports optimizations like `file_type` and `metadata` which
don't incur a syscall on some platforms.
* A `symlink_metadata` function has been added.
* The `fs::Metadata` structure now lowers to its OS counterpart, providing
access to all underlying information.
-* The compiler contains extended explanations of many errors. When it
- emits such an error it also suggests using the `--explain` flag to
- read the extended explanations, which are also [cataloged on the web
- site][err].
+* The compiler now contains extended explanations of many errors. When an error
+ with an explanation occurs the compiler suggests using the `--explain` flag
+ to read the explanation. Error explanations are also [available online][err-index].
* Thanks to multiple [improvements][sk] to [type checking][pre], as
well as other work, the time to bootstrap the compiler decreased by
32%.
Libraries
---------
-* The `str::split_whitespace` method splits a string on unicode
+* The [`str::split_whitespace`] method splits a string on unicode
whitespace boundaries.
* On both Windows and Unix, new extension traits provide conversion of
I/O types to and from the underlying system handles. On Unix, these
- traits are [`FrowRawFd`] and [`AsRawFd`], on Windows `FromRawHandle`
+ traits are [`FromRawFd`] and [`AsRawFd`], on Windows `FromRawHandle`
and `AsRawHandle`. These are implemented for `File`, `TcpStream`,
`TcpListener`, and `UpdSocket`. Further implementations for
`std::process` will be stabilized later.
* [The `drop_with_repr_extern` lint warns about mixing `repr(C)`
with `Drop`][drop].
-[`split_whitespace`]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.split_whitespace
-[`Iterator::cloned`]: http://doc.rust-lang.org/nightly/core/iter/trait.Iterator.html#method.cloned
+[`str::split_whitespace`]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.split_whitespace
[`FromRawFd`]: http://doc.rust-lang.org/nightly/std/os/unix/io/trait.FromRawFd.html
[`AsRawFd`]: http://doc.rust-lang.org/nightly/std/os/unix/io/trait.AsRawFd.html
[`std::os::unix::symlink`]: http://doc.rust-lang.org/nightly/std/os/unix/fs/fn.symlink.html
[`IntoIterator`]: http://doc.rust-lang.org/nightly/std/iter/trait.IntoIterator.html
[`From`]: http://doc.rust-lang.org/nightly/std/convert/trait.From.html
[rf]: https://github.com/rust-lang/rust/pull/24491
-[err]: http://doc.rust-lang.org/error-index.html
+[err-index]: http://doc.rust-lang.org/error-index.html
[sk]: https://github.com/rust-lang/rust/pull/24615
[pre]: https://github.com/rust-lang/rust/pull/25323
[file]: https://github.com/rust-lang/rust/pull/24598
[sw]: https://github.com/rust-lang/rfcs/blob/master/text/1054-str-words.md
[th]: https://github.com/rust-lang/rfcs/blob/master/text/0909-move-thread-local-to-std-thread.md
[send-rfc]: https://github.com/rust-lang/rfcs/blob/master/text/0458-send-improvements.md
-[scoped]: http://static.rust-lang.org/doc/master/std/thread/fn.scoped.html
[moar-ufcs]: https://github.com/rust-lang/rust/pull/22172
[prim-inherent]: https://github.com/rust-lang/rust/pull/23104
[overflow]: https://github.com/rust-lang/rfcs/blob/master/text/0560-integer-overflow.md
[string-pattern]: https://github.com/rust-lang/rust/pull/22466
[oibit-final]: https://github.com/rust-lang/rust/pull/21689
[reflect]: https://github.com/rust-lang/rust/pull/23712
-[debug-builder]: https://github.com/rust-lang/rfcs/blob/master/text/0640-debug-improvements.md
[conversion]: https://github.com/rust-lang/rfcs/pull/529
[num-traits]: https://github.com/rust-lang/rust/pull/23549
[index-value]: https://github.com/rust-lang/rust/pull/23601
[dropck]: https://github.com/rust-lang/rfcs/pull/769
-[fundamental]: https://github.com/rust-lang/rfcs/pull/1023
[ci-compare]: https://gist.github.com/brson/a30a77836fbec057cbee
[fn-inherit]: https://github.com/rust-lang/rust/pull/23282
[fn-blanket]: https://github.com/rust-lang/rust/pull/23895
[osstr]: https://github.com/rust-lang/rust/pull/21488
[osstr-rfc]: https://github.com/rust-lang/rfcs/blob/master/text/0517-io-os-reform.md
[Self]: https://github.com/rust-lang/rust/pull/22158
-[ufcs]: https://github.com/rust-lang/rust/pull/21077
[ufcs-rfc]: https://github.com/rust-lang/rfcs/blob/master/text/0132-ufcs.md
[un]: https://github.com/rust-lang/rust/pull/22256
fi
done
else
- if [ ! -z "$META" ]
+ if [ -n "$META" ]
then
OP="$OP=<$META>"
fi
fi
# If script or environment provided a value, save it.
- if [ ! -z "$VV" ]
+ if [ -n "$VV" ]
then
putvar $V
fi
}
+enable_if_not_disabled() {
+ local OP=$1
+ local UOP=$(echo $OP | tr '[:lower:]' '[:upper:]' | tr '\-' '\_')
+ local ENAB_V="CFG_ENABLE_$UOP"
+ local EXPLICITLY_DISABLED="CFG_DISABLE_${UOP}_PROVIDED"
+ eval VV=\$$EXPLICITLY_DISABLED
+ if [ -z "$VV" ]; then
+ eval $ENAB_V=1
+ fi
+}
+
to_llvm_triple() {
case $1 in
i686-w64-mingw32) echo i686-pc-windows-gnu ;;
CFG_OSTYPE=unknown-openbsd
;;
+ NetBSD)
+ CFG_OSTYPE=unknown-netbsd
+ ;;
+
Darwin)
CFG_OSTYPE=apple-darwin
;;
valopt python "" "set path to python"
valopt jemalloc-root "" "set directory where libjemalloc_pic.a is located"
valopt build "${DEFAULT_BUILD}" "GNUs ./configure syntax LLVM build triple"
-valopt android-cross-path "/opt/ndk_standalone" "Android NDK standalone path"
+valopt android-cross-path "/opt/ndk_standalone" "Android NDK standalone path (deprecated)"
+valopt arm-linux-androideabi-ndk "" "arm-linux-androideabi NDK standalone path"
+valopt aarch64-linux-android-ndk "" "aarch64-linux-android NDK standalone path"
valopt release-channel "dev" "the name of the release channel to build"
valopt musl-root "/usr/local" "MUSL root installation directory"
CFG_DISABLE_OPTIMIZE=1
CFG_DISABLE_OPTIMIZE_CXX=1
fi
- CFG_ENABLE_DEBUG_ASSERTIONS=1
- CFG_ENABLE_DEBUG_JEMALLOC=1
- CFG_ENABLE_DEBUGINFO=1
- CFG_ENABLE_LLVM_ASSERTIONS=1
+
+ # Set following variables to 1 unless setting already provided
+ enable_if_not_disabled debug-assertions
+ enable_if_not_disabled debug-jemalloc
+ enable_if_not_disabled debuginfo
+ enable_if_not_disabled llvm-assertions
fi
# OK, now write the debugging options
# On MacOS X, invoking `javac` pops up a dialog if the JDK is not
# installed. Since `javac` is only used if `antlr4` is available,
# probe for it only in this case.
-if [ ! -z "$CFG_ANTLR4" ]
+if [ -n "$CFG_ANTLR4" ]
then
probe CFG_JAVAC javac
fi
fi
fi
-if [ ! -z "$CFG_GDB" ]
+if [ -n "$CFG_GDB" ]
then
# Store GDB's version
CFG_GDB_VERSION=$($CFG_GDB --version 2>/dev/null | head -1)
putvar CFG_GDB_VERSION
fi
-if [ ! -z "$CFG_LLDB" ]
+if [ -n "$CFG_LLDB" ]
then
# Store LLDB's version
CFG_LLDB_VERSION=$($CFG_LLDB --version 2>/dev/null | head -1)
probe CFG_ADB adb
-if [ ! -z "$CFG_PANDOC" ]
+if [ -n "$CFG_PANDOC" ]
then
# Extract "MAJOR MINOR" from Pandoc's version number
PV_MAJOR_MINOR=$(pandoc --version | grep '^pandoc' |
BIN_SUF=.exe
fi
-if [ ! -z "$CFG_ENABLE_LOCAL_RUST" ]
+if [ -n "$CFG_ENABLE_LOCAL_RUST" ]
then
system_rustc=$(which rustc)
if [ -f ${CFG_LOCAL_RUST_ROOT}/bin/rustc${BIN_SUF} ]
fi
fi
+# If the clang isn't already enabled, check for GCC, and if it is missing, turn
+# on clang as a backup.
+if [ -z "$CFG_ENABLE_CLANG" ]
+then
+ CFG_GCC_VERSION=$("$CFG_GCC" --version 2>&1)
+ if [ $? -ne 0 ]
+ then
+ step_msg "GCC not installed, will try using Clang"
+ CFG_ENABLE_CLANG=1
+ fi
+fi
+
# Okay, at this point, we have made up our minds about whether we are
# going to force CFG_ENABLE_CLANG or not; save the setting if so.
-if [ ! -z "$CFG_ENABLE_CLANG" ]
+if [ -n "$CFG_ENABLE_CLANG" ]
then
putvar CFG_ENABLE_CLANG
fi
# Same with jemalloc. save the setting here.
-if [ ! -z "$CFG_DISABLE_JEMALLOC" ]
+if [ -n "$CFG_DISABLE_JEMALLOC" ]
then
putvar CFG_DISABLE_JEMALLOC
fi
-if [ ! -z "$CFG_LLVM_ROOT" -a -z "$CFG_DISABLE_LLVM_VERSION_CHECK" -a -e "$CFG_LLVM_ROOT/bin/llvm-config" ]
+if [ -n "$CFG_LLVM_ROOT" -a -z "$CFG_DISABLE_LLVM_VERSION_CHECK" -a -e "$CFG_LLVM_ROOT/bin/llvm-config" ]
then
step_msg "using custom LLVM at $CFG_LLVM_ROOT"
LLVM_VERSION=$($LLVM_CONFIG --version)
case $LLVM_VERSION in
- (3.[5-6]*)
+ (3.[5-7]*)
msg "found ok version of LLVM: $LLVM_VERSION"
;;
(*)
# CFG_ENABLE_CLANG is set, that indicates that we are opting into
# running such safeguards.
-if [ ! -z "$CC" ]
+if [ -n "$CC" ]
then
msg "skipping compiler inference steps; using provided CC=$CC"
CFG_CC="$CC"
putvar CFG_USING_CLANG
fi
else
- if [ ! -z "$CFG_ENABLE_CLANG" ]
+ if [ -n "$CFG_ENABLE_CLANG" ]
then
if [ -z "$CFG_CLANG" ]
then
fi
fi
-if [ ! -z "$CFG_ENABLE_CLANG" ]
+if [ -n "$CFG_ENABLE_CLANG" ]
then
case "$CC" in
(''|*clang)
CFG_CLANG_REPORTED_VERSION=$($CFG_CC --version | grep version)
- if [[ $CFG_CLANG_REPORTED_VERSION == *"(based on LLVM "* ]]
- then
+ if echo $CFG_CLANG_REPORTED_VERSION | grep -q "(based on LLVM "; then
CFG_CLANG_VERSION=$(echo $CFG_CLANG_REPORTED_VERSION | sed 's/.*(based on LLVM \(.*\))/\1/')
- elif [[ $CFG_CLANG_REPORTED_VERSION == "Apple LLVM"* ]]
- then
+ elif echo $CFG_CLANG_REPORTED_VERSION | grep -q "Apple LLVM"; then
CFG_OSX_CLANG_VERSION=$(echo $CFG_CLANG_REPORTED_VERSION | sed 's/.*version \(.*\) .*/\1/')
else
CFG_CLANG_VERSION=$(echo $CFG_CLANG_REPORTED_VERSION | sed 's/.*version \(.*\) .*/\1/')
fi
- if [ ! -z "$CFG_OSX_CLANG_VERSION" ]
+ if [ -n "$CFG_OSX_CLANG_VERSION" ]
then
case $CFG_OSX_CLANG_VERSION in
(7.0*)
esac
fi
-if [ ! -z "$CFG_ENABLE_CCACHE" ]
+if [ -n "$CFG_ENABLE_CCACHE" ]
then
- if [ -z "$CC" ]
+ if [ -z "$CFG_CCACHE" ]
then
- if [ -z "$CFG_CCACHE" ]
- then
- err "ccache requested but not found"
- fi
-
- CFG_CC="ccache $CFG_CC"
+ err "ccache requested but not found"
fi
+
+ CFG_CC="ccache $CFG_CC"
fi
if [ -z "$CC" -a -z "$CFG_ENABLE_CLANG" -a -z "$CFG_GCC" ]
CFG_SUPPORTED_TARGET="${CFG_SUPPORTED_TARGET} $(basename "$target_file" .mk)"
done
+# copy build-triples to host-triples so that builds are a subset of hosts
+V_TEMP=""
+for i in $CFG_BUILD $CFG_HOST;
+do
+ echo "$V_TEMP" | grep -qF $i || V_TEMP="$V_TEMP${V_TEMP:+ }$i"
+done
+CFG_HOST=$V_TEMP
+
# copy host-triples to target-triples so that hosts are a subset of targets
V_TEMP=""
for i in $CFG_HOST $CFG_TARGET;
fi
case $i in
- arm-linux-androideabi)
-
- if [ ! -f $CFG_ANDROID_CROSS_PATH/bin/arm-linux-androideabi-gcc ]
+ *android*)
+ upper_snake_target=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr '\-' '\_')
+ eval ndk=\$"CFG_${upper_snake_target}_NDK"
+ if [ -z "$ndk" ]
then
- err "NDK $CFG_ANDROID_CROSS_PATH/bin/arm-linux-androideabi-gcc not found"
- fi
- if [ ! -f $CFG_ANDROID_CROSS_PATH/bin/arm-linux-androideabi-g++ ]
- then
- err "NDK $CFG_ANDROID_CROSS_PATH/bin/arm-linux-androideabi-g++ not found"
- fi
- if [ ! -f $CFG_ANDROID_CROSS_PATH/bin/arm-linux-androideabi-ar ]
- then
- err "NDK $CFG_ANDROID_CROSS_PATH/bin/arm-linux-androideabi-ar not found"
+ ndk=$CFG_ANDROID_CROSS_PATH
+ eval "CFG_${upper_snake_target}_NDK"=$CFG_ANDROID_CROSS_PATH
+ warn "generic/default Android NDK option is deprecated (use --$i-ndk option instead)"
fi
+
+ # Perform a basic sanity check of the NDK
+ for android_ndk_tool in "$ndk/bin/$i-gcc" "$ndk/bin/$i-g++" "$ndk/bin/$i-ar"
+ do
+ if [ ! -f $android_ndk_tool ]
+ then
+ err "NDK tool $android_ndk_tool not found (bad or missing --$i-ndk option?)"
+ fi
+ done
;;
arm-apple-darwin)
fi
;;
- x86_64-*-msvc)
+ *-msvc)
# Currently the build system is not configured to build jemalloc
# with MSVC, so we omit this optional dependency.
step_msg "targeting MSVC, disabling jemalloc"
# INCLUDE and LIB variables for MSVC so we can set those in the
# build system as well.
install=$(reg QUERY \
- 'HKLM\SOFTWARE\Wow6432Node\Microsoft\VisualStudio\12.0' \
+ 'HKLM\SOFTWARE\Wow6432Node\Microsoft\VisualStudio\14.0' \
-v InstallDir)
+ if [ -z "$install" ]; then
+ install=$(reg QUERY \
+ 'HKLM\SOFTWARE\Wow6432Node\Microsoft\VisualStudio\12.0' \
+ -v InstallDir)
+ fi
need_ok "couldn't find visual studio install root"
CFG_MSVC_ROOT=$(echo "$install" | grep InstallDir | sed 's/.*REG_SZ[ ]*//')
CFG_MSVC_ROOT=$(dirname "$CFG_MSVC_ROOT")
CFG_MSVC_ROOT=$(dirname "$CFG_MSVC_ROOT")
- CFG_MSVC_CL="${CFG_MSVC_ROOT}/VC/bin/amd64/cl.exe"
- CFG_MSVC_LIB="${CFG_MSVC_ROOT}/VC/bin/amd64/lib.exe"
- CFG_MSVC_LINK="${CFG_MSVC_ROOT}/VC/bin/amd64/link.exe"
+ putvar CFG_MSVC_ROOT
+
+ case $i in
+ x86_64-*)
+ bits=x86_64
+ msvc_part=amd64
+ ;;
+ i686-*)
+ bits=i386
+ msvc_part=
+ ;;
+ *)
+ err "can only target x86 targets for MSVC"
+ ;;
+ esac
+ bindir="${CFG_MSVC_ROOT}/VC/bin"
+ if [ -n "$msvc_part" ]; then
+ bindir="$bindir/$msvc_part"
+ fi
+ eval CFG_MSVC_BINDIR_$bits="\"$bindir\""
+ eval CFG_MSVC_CL_$bits="\"$bindir/cl.exe\""
+ eval CFG_MSVC_LIB_$bits="\"$bindir/lib.exe\""
+ eval CFG_MSVC_LINK_$bits="\"$bindir/link.exe\""
vcvarsall="${CFG_MSVC_ROOT}/VC/vcvarsall.bat"
- CFG_MSVC_INCLUDE_PATH=$(cmd /c "\"$vcvarsall\" amd64 && cmd /c echo %INCLUDE%")
+ include_path=$(cmd /c "\"$vcvarsall\" $msvc_part && cmd /c echo %INCLUDE%")
need_ok "failed to learn about MSVC's INCLUDE"
- CFG_MSVC_LIB_PATH=$(cmd /c "\"$vcvarsall\" amd64 && cmd /c echo %LIB%")
+ lib_path=$(cmd /c "\"$vcvarsall\" $msvc_part && cmd /c echo %LIB%")
need_ok "failed to learn about MSVC's LIB"
- putvar CFG_MSVC_ROOT
- putvar CFG_MSVC_CL
- putvar CFG_MSVC_LIB
- putvar CFG_MSVC_LINK
- putvar CFG_MSVC_INCLUDE_PATH
- putvar CFG_MSVC_LIB_PATH
+ eval CFG_MSVC_INCLUDE_PATH_${bits}="\"$include_path\""
+ eval CFG_MSVC_LIB_PATH_${bits}="\"$lib_path\""
+
+ putvar CFG_MSVC_BINDIR_${bits}
+ putvar CFG_MSVC_CL_${bits}
+ putvar CFG_MSVC_LIB_${bits}
+ putvar CFG_MSVC_LINK_${bits}
+ putvar CFG_MSVC_INCLUDE_PATH_${bits}
+ putvar CFG_MSVC_LIB_PATH_${bits}
;;
*)
esac
done
-if [ ! -z "$CFG_PERF" ]
+if [ -n "$CFG_PERF" ]
then
HAVE_PERF_LOGFD=`$CFG_PERF stat --log-fd 2>&1 | grep 'unknown option'`
if [ -z "$HAVE_PERF_LOGFD" ];
"${CFG_GIT}" submodule init
# Disable submodules that we're not using
- if [ ! -z "${CFG_LLVM_ROOT}" ]; then
+ if [ -n "${CFG_LLVM_ROOT}" ]; then
msg "git: submodule deinit src/llvm"
"${CFG_GIT}" submodule deinit src/llvm
fi
- if [ ! -z "${CFG_JEMALLOC_ROOT}" ]; then
+ if [ -n "${CFG_JEMALLOC_ROOT}" ]; then
msg "git: submodule deinit src/jemalloc"
"${CFG_GIT}" submodule deinit src/jemalloc
fi
if [ -z $CFG_LLVM_ROOT ]
then
LLVM_BUILD_DIR=${CFG_BUILD_DIR}$t/llvm
- if [ ! -z "$CFG_DISABLE_OPTIMIZE_LLVM" ]
+ if [ -n "$CFG_DISABLE_OPTIMIZE_LLVM" ]
then
LLVM_DBG_OPTS="--enable-debug-symbols --disable-optimized"
# Just use LLVM straight from its build directory to
msg "configuring LLVM for $t with cmake"
CMAKE_ARGS="-DLLVM_INCLUDE_TESTS=OFF"
- if [ ! -z "$CFG_DISABLE_OPTIMIZE_LLVM" ]; then
+ if [ -n "$CFG_DISABLE_OPTIMIZE_LLVM" ]; then
CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_BUILD_TYPE=Debug"
else
CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release"
msg "configuring LLVM with:"
msg "$CMAKE_ARGS"
+ case "$CFG_MSVC_ROOT" in
+ *14.0*)
+ generator="Visual Studio 14 2015"
+ ;;
+ *12.0*)
+ generator="Visual Studio 12 2013"
+ ;;
+ *)
+ err "can't determine generator for LLVM cmake"
+ ;;
+ esac
+ case "$t" in
+ x86_64-*)
+ generator="$generator Win64"
+ ;;
+ i686-*)
+ ;;
+ *)
+ err "can only build LLVM for x86 platforms"
+ ;;
+ esac
(cd $LLVM_BUILD_DIR && "$CFG_CMAKE" $CFG_LLVM_SRC_DIR \
- -G "Visual Studio 12 2013 Win64" \
+ -G "$generator" \
$CMAKE_ARGS)
need_ok "LLVM cmake configure failed"
fi
(*)
msg "inferring LLVM_CXX/CC from CXX/CC = $CXX/$CC"
- LLVM_CXX_32="$CXX"
- LLVM_CC_32="$CC"
+ if [ -n "$CFG_ENABLE_CCACHE" ]
+ then
+ if [ -z "$CFG_CCACHE" ]
+ then
+ err "ccache requested but not found"
+ fi
+
+ LLVM_CXX_32="ccache $CXX"
+ LLVM_CC_32="ccache $CC"
+
+ LLVM_CXX_64="ccache $CXX"
+ LLVM_CC_64="ccache $CC"
+ else
+ LLVM_CXX_32="$CXX"
+ LLVM_CC_32="$CC"
+
+ LLVM_CXX_64="$CXX"
+ LLVM_CC_64="$CC"
+ fi
- LLVM_CXX_64="$CXX"
- LLVM_CC_64="$CC"
;;
esac
putvar CFG_TARGET
putvar CFG_LIBDIR_RELATIVE
putvar CFG_DISABLE_MANAGE_SUBMODULES
-putvar CFG_ANDROID_CROSS_PATH
+putvar CFG_AARCH64_LINUX_ANDROID_NDK
+putvar CFG_ARM_LINUX_ANDROIDEABI_NDK
putvar CFG_MANDIR
# Avoid spurious warnings from clang by feeding it original source on
# ccache-miss rather than preprocessed input.
-if [ ! -z "$CFG_ENABLE_CCACHE" ] && [ ! -z "$CFG_USING_CLANG" ]
+if [ -n "$CFG_ENABLE_CCACHE" ] && [ -n "$CFG_USING_CLANG" ]
then
CFG_CCACHE_CPP2=1
putvar CFG_CCACHE_CPP2
fi
-if [ ! -z "$CFG_ENABLE_CCACHE" ]
+if [ -n "$CFG_ENABLE_CCACHE" ]
then
CFG_CCACHE_BASEDIR=${CFG_SRC_DIR}
putvar CFG_CCACHE_BASEDIR
fi
-if [ ! -z $BAD_PANDOC ]
+if [ -n $BAD_PANDOC ]
then
CFG_PANDOC=
putvar CFG_PANDOC
# aarch64-linux-android configuration
# CROSS_PREFIX_aarch64-linux-android-
-CC_aarch64-linux-android=$(CFG_ANDROID_CROSS_PATH)/bin/aarch64-linux-android-gcc
-CXX_aarch64-linux-android=$(CFG_ANDROID_CROSS_PATH)/bin/aarch64-linux-android-g++
-CPP_aarch64-linux-android=$(CFG_ANDROID_CROSS_PATH)/bin/aarch64-linux-android-gcc -E
-AR_aarch64-linux-android=$(CFG_ANDROID_CROSS_PATH)/bin/aarch64-linux-android-ar
+CC_aarch64-linux-android=$(CFG_AARCH64_LINUX_ANDROID_NDK)/bin/aarch64-linux-android-gcc
+CXX_aarch64-linux-android=$(CFG_AARCH64_LINUX_ANDROID_NDK)/bin/aarch64-linux-android-g++
+CPP_aarch64-linux-android=$(CFG_AARCH64_LINUX_ANDROID_NDK)/bin/aarch64-linux-android-gcc -E
+AR_aarch64-linux-android=$(CFG_AARCH64_LINUX_ANDROID_NDK)/bin/aarch64-linux-android-ar
CFG_LIB_NAME_aarch64-linux-android=lib$(1).so
CFG_STATIC_LIB_NAME_aarch64-linux-android=lib$(1).a
CFG_LIB_GLOB_aarch64-linux-android=lib$(1)-*.so
# arm-linux-androideabi configuration
-CC_arm-linux-androideabi=$(CFG_ANDROID_CROSS_PATH)/bin/arm-linux-androideabi-gcc
-CXX_arm-linux-androideabi=$(CFG_ANDROID_CROSS_PATH)/bin/arm-linux-androideabi-g++
-CPP_arm-linux-androideabi=$(CFG_ANDROID_CROSS_PATH)/bin/arm-linux-androideabi-gcc -E
-AR_arm-linux-androideabi=$(CFG_ANDROID_CROSS_PATH)/bin/arm-linux-androideabi-ar
+CC_arm-linux-androideabi=$(CFG_ARM_LINUX_ANDROIDEABI_NDK)/bin/arm-linux-androideabi-gcc
+CXX_arm-linux-androideabi=$(CFG_ARM_LINUX_ANDROIDEABI_NDK)/bin/arm-linux-androideabi-g++
+CPP_arm-linux-androideabi=$(CFG_ARM_LINUX_ANDROIDEABI_NDK)/bin/arm-linux-androideabi-gcc -E
+AR_arm-linux-androideabi=$(CFG_ARM_LINUX_ANDROIDEABI_NDK)/bin/arm-linux-androideabi-ar
CFG_LIB_NAME_arm-linux-androideabi=lib$(1).so
CFG_STATIC_LIB_NAME_arm-linux-androideabi=lib$(1).a
CFG_LIB_GLOB_arm-linux-androideabi=lib$(1)-*.so
--- /dev/null
+# i686-pc-windows-msvc configuration
+CC_i686-pc-windows-msvc="$(CFG_MSVC_CL_i386)" -nologo
+LINK_i686-pc-windows-msvc="$(CFG_MSVC_LINK_i386)" -nologo
+CXX_i686-pc-windows-msvc="$(CFG_MSVC_CL_i386)" -nologo
+CPP_i686-pc-windows-msvc="$(CFG_MSVC_CL_i386)" -nologo
+AR_i686-pc-windows-msvc="$(CFG_MSVC_LIB_i386)" -nologo
+CFG_LIB_NAME_i686-pc-windows-msvc=$(1).dll
+CFG_STATIC_LIB_NAME_i686-pc-windows-msvc=$(1).lib
+CFG_LIB_GLOB_i686-pc-windows-msvc=$(1)-*.{dll,lib}
+CFG_LIB_DSYM_GLOB_i686-pc-windows-msvc=$(1)-*.dylib.dSYM
+CFG_JEMALLOC_CFLAGS_i686-pc-windows-msvc :=
+CFG_GCCISH_CFLAGS_i686-pc-windows-msvc := -MD
+CFG_GCCISH_CXXFLAGS_i686-pc-windows-msvc := -MD
+CFG_GCCISH_LINK_FLAGS_i686-pc-windows-msvc :=
+CFG_GCCISH_DEF_FLAG_i686-pc-windows-msvc :=
+CFG_LLC_FLAGS_i686-pc-windows-msvc :=
+CFG_INSTALL_NAME_i686-pc-windows-msvc =
+CFG_EXE_SUFFIX_i686-pc-windows-msvc := .exe
+CFG_WINDOWSY_i686-pc-windows-msvc := 1
+CFG_UNIXY_i686-pc-windows-msvc :=
+CFG_LDPATH_i686-pc-windows-msvc :=
+CFG_RUN_i686-pc-windows-msvc=$(2)
+CFG_RUN_TARG_i686-pc-windows-msvc=$(call CFG_RUN_i686-pc-windows-msvc,,$(2))
+CFG_GNU_TRIPLE_i686-pc-windows-msvc := i686-pc-win32
+
+# All windows nightiles are currently a GNU triple, so this MSVC triple is not
+# bootstrapping from itself. This is relevant during stage0, and other parts of
+# the build system take this into account.
+BOOTSTRAP_FROM_i686-pc-windows-msvc := i686-pc-windows-gnu
--- /dev/null
+# i686-unknown-freebsd configuration
+CC_i686-unknown-freebsd=$(CC)
+CXX_i686-unknown-freebsd=$(CXX)
+CPP_i686-unknown-freebsd=$(CPP)
+AR_i686-unknown-freebsd=$(AR)
+CFG_LIB_NAME_i686-unknown-freebsd=lib$(1).so
+CFG_STATIC_LIB_NAME_i686-unknown-freebsd=lib$(1).a
+CFG_LIB_GLOB_i686-unknown-freebsd=lib$(1)-*.so
+CFG_LIB_DSYM_GLOB_i686-unknown-freebsd=$(1)-*.dylib.dSYM
+CFG_JEMALLOC_CFLAGS_i686-unknown-freebsd := -m32 -arch i386 -I/usr/local/include $(CFLAGS)
+CFG_GCCISH_CFLAGS_i686-unknown-freebsd := -Wall -Werror -g -fPIC -m32 -arch i386 -I/usr/local/include $(CFLAGS)
+CFG_GCCISH_LINK_FLAGS_i686-unknown-freebsd := -m32 -shared -fPIC -g -pthread -lrt
+CFG_GCCISH_DEF_FLAG_i686-unknown-freebsd := -Wl,--export-dynamic,--dynamic-list=
+CFG_LLC_FLAGS_i686-unknown-freebsd :=
+CFG_INSTALL_NAME_i686-unknown-freebsd =
+CFG_EXE_SUFFIX_i686-unknown-freebsd :=
+CFG_WINDOWSY_i686-unknown-freebsd :=
+CFG_UNIXY_i686-unknown-freebsd := 1
+CFG_LDPATH_i686-unknown-freebsd :=
+CFG_RUN_i686-unknown-freebsd=$(2)
+CFG_RUN_TARG_i686-unknown-freebsd=$(call CFG_RUN_i686-unknown-freebsd,,$(2))
+CFG_GNU_TRIPLE_i686-unknown-freebsd := i686-unknown-freebsd
# x86_64-pc-windows-msvc configuration
-CC_x86_64-pc-windows-msvc="$(CFG_MSVC_CL)" -nologo
-LINK_x86_64-pc-windows-msvc="$(CFG_MSVC_LINK)" -nologo
-CXX_x86_64-pc-windows-msvc="$(CFG_MSVC_CL)" -nologo
-CPP_x86_64-pc-windows-msvc="$(CFG_MSVC_CL)" -nologo
-AR_x86_64-pc-windows-msvc="$(CFG_MSVC_LIB)" -nologo
+CC_x86_64-pc-windows-msvc="$(CFG_MSVC_CL_x86_64)" -nologo
+LINK_x86_64-pc-windows-msvc="$(CFG_MSVC_LINK_x86_64)" -nologo
+CXX_x86_64-pc-windows-msvc="$(CFG_MSVC_CL_x86_64)" -nologo
+CPP_x86_64-pc-windows-msvc="$(CFG_MSVC_CL_x86_64)" -nologo
+AR_x86_64-pc-windows-msvc="$(CFG_MSVC_LIB_x86_64)" -nologo
CFG_LIB_NAME_x86_64-pc-windows-msvc=$(1).dll
CFG_STATIC_LIB_NAME_x86_64-pc-windows-msvc=$(1).lib
CFG_LIB_GLOB_x86_64-pc-windows-msvc=$(1)-*.{dll,lib}
CFG_RUN_TARG_x86_64-pc-windows-msvc=$(call CFG_RUN_x86_64-pc-windows-msvc,,$(2))
CFG_GNU_TRIPLE_x86_64-pc-windows-msvc := x86_64-pc-win32
-# These two environment variables are scraped by the `./configure` script and
-# are necessary for `cl.exe` to find standard headers (the INCLUDE variable) and
-# for `link.exe` to find standard libraries (the LIB variable).
-ifdef CFG_MSVC_INCLUDE_PATH
-export INCLUDE := $(CFG_MSVC_INCLUDE_PATH)
-endif
-ifdef CFG_MSVC_LIB_PATH
-export LIB := $(CFG_MSVC_LIB_PATH)
-endif
-
-# Unfortunately `link.exe` is also a program in `/usr/bin` on MinGW installs,
-# but it's not the one that we want. As a result we make sure that our detected
-# `link.exe` shows up in PATH first.
-ifdef CFG_MSVC_LINK
-export PATH := $(CFG_MSVC_ROOT)/VC/bin/amd64:$(PATH)
-endif
-
-# There are more comments about this available in the target specification for
-# Windows MSVC in the compiler, but the gist of it is that we use `llvm-ar.exe`
-# instead of `lib.exe` for assembling archives, so we need to inject this custom
-# dependency here.
-NATIVE_TOOL_DEPS_core_T_x86_64-pc-windows-msvc += llvm-ar.exe
-INSTALLED_BINS_x86_64-pc-windows-msvc += llvm-ar.exe
-
-# When working with MSVC on windows, each DLL needs to explicitly declare its
-# interface to the outside world through some means. The options for doing so
-# include:
-#
-# 1. A custom attribute on each function itself
-# 2. A linker argument saying what to export
-# 3. A file which lists all symbols that need to be exported
-#
-# The Rust compiler takes care (1) for us for all Rust code by annotating all
-# public-facing functions with dllexport, but we have a few native dependencies
-# which need to cross the DLL boundary. The most important of these dependencies
-# is LLVM which is linked into `rustc_llvm.dll` but primarily used from
-# `rustc_trans.dll`. This means that many of LLVM's C API functions need to be
-# exposed from `rustc_llvm.dll` to be forwarded over the boundary.
-#
-# Unfortunately, at this time, LLVM does not handle this sort of exportation on
-# Windows for us, so we're forced to do it ourselves if we want it (which seems
-# like the path of least resistance right now). To do this we generate a `.DEF`
-# file [1] which we then custom-pass to the linker when building the rustc_llvm
-# crate. This DEF file list all symbols that are exported from
-# `src/librustc_llvm/lib.rs` and is generated by a small python script.
-#
-# Fun times!
-#
-# [1]: https://msdn.microsoft.com/en-us/library/28d6s79h.aspx
-RUSTFLAGS_rustc_llvm_T_x86_64-pc-windows-msvc += \
- -C link-args="-DEF:x86_64-pc-windows-msvc/rt/rustc_llvm.def"
-CUSTOM_DEPS_rustc_llvm_T_x86_64-pc-windows-msvc += \
- x86_64-pc-windows-msvc/rt/rustc_llvm.def
-
-x86_64-pc-windows-msvc/rt/rustc_llvm.def: $(S)src/etc/mklldef.py \
- $(S)src/librustc_llvm/lib.rs
- $(CFG_PYTHON) $^ $@ rustc_llvm-$(CFG_FILENAME_EXTRA)
-
# All windows nightiles are currently a GNU triple, so this MSVC triple is not
# bootstrapping from itself. This is relevant during stage0, and other parts of
# the build system take this into account.
--- /dev/null
+# x86_64-unknown-netbsd configuration
+CC_x86_64-unknown-netbsd=$(CC)
+CXX_x86_64-unknown-netbsd=$(CXX)
+CPP_x86_64-unknown-netbsd=$(CPP)
+AR_x86_64-unknown-netbsd=$(AR)
+CFG_LIB_NAME_x86_64-unknown-netbsd=lib$(1).so
+CFG_STATIC_LIB_NAME_x86_64-unknown-netbsd=lib$(1).a
+CFG_LIB_GLOB_x86_64-unknown-netbsd=lib$(1)-*.so
+CFG_LIB_DSYM_GLOB_x86_64-unknown-netbsd=$(1)-*.dylib.dSYM
+CFG_JEMALLOC_CFLAGS_x86_64-unknown-netbsd := -I/usr/local/include $(CFLAGS)
+CFG_GCCISH_CFLAGS_x86_64-unknown-netbsd := -Wall -Werror -g -fPIC -I/usr/local/include $(CFLAGS)
+CFG_GCCISH_LINK_FLAGS_x86_64-unknown-netbsd := -shared -fPIC -g -pthread -lrt
+CFG_GCCISH_DEF_FLAG_x86_64-unknown-netbsd := -Wl,--export-dynamic,--dynamic-list=
+CFG_LLC_FLAGS_x86_64-unknown-netbsd :=
+CFG_INSTALL_NAME_x86_64-unknown-netbsd =
+CFG_EXE_SUFFIX_x86_64-unknown-netbsd :=
+CFG_WINDOWSY_x86_64-unknown-netbsd :=
+CFG_UNIXY_x86_64-unknown-netbsd := 1
+CFG_LDPATH_x86_64-unknown-netbsd :=
+CFG_RUN_x86_64-unknown-netbsd=$(2)
+CFG_RUN_TARG_x86_64-unknown-netbsd=$(call CFG_RUN_x86_64-unknown-netbsd,,$(2))
+CFG_GNU_TRIPLE_x86_64-unknown-netbsd := x86_64-unknown-netbsd
.PHONY: TAGS.emacs TAGS.vi
-# This is using a blacklist approach, probably more durable than a whitelist.
-# We exclude: external dependencies (llvm, rt/{msvc,vg}),
-# tests (compiletest, test) and a couple of other things (rt/arch, etc)
-CTAGS_LOCATIONS=$(patsubst ${CFG_SRC_DIR}src/llvm,, \
- $(patsubst ${CFG_SRC_DIR}src/compiletest,, \
- $(patsubst ${CFG_SRC_DIR}src/test,, \
- $(patsubst ${CFG_SRC_DIR}src/etc,, \
- $(patsubst ${CFG_SRC_DIR}src/rt,, \
- $(patsubst ${CFG_SRC_DIR}src/rt/arch,, \
- $(patsubst ${CFG_SRC_DIR}src/rt/msvc,, \
- $(patsubst ${CFG_SRC_DIR}src/rt/vg,, \
- $(wildcard ${CFG_SRC_DIR}src/*) $(wildcard ${CFG_SRC_DIR}src/rt/*) \
- ))))))))
-CTAGS_OPTS=--options="${CFG_SRC_DIR}src/etc/ctags.rust" --languages=-javascript --recurse ${CTAGS_LOCATIONS}
-# We could use `--languages=Rust`, but there is value in producing tags for the
-# C++ parts of the code base too (at the time of writing, those are .h and .cpp
-# files in src/rt, src/rt/sync and src/rustllvm); we mainly just want to
-# exclude the external dependencies.
+CTAGS_LOCATIONS=$(wildcard ${CFG_SRC_DIR}src/lib*)
+CTAGS_LOCATIONS=$(patsubst ${CFG_SRC_DIR}src/librust%,, \
+ $(patsubst ${CFG_SRC_DIR}src/lib%test,, \
+ $(wildcard ${CFG_SRC_DIR}src/lib*))) ${CFG_SRC_DIR}src/libtest
+CTAGS_OPTS=--options="${CFG_SRC_DIR}src/etc/ctags.rust" --languages=Rust --recurse ${CTAGS_LOCATIONS}
TAGS.emacs:
ctags -e -f $@ ${CTAGS_OPTS}
D := $(S)src/doc
-DOC_TARGETS := trpl style error-index
+DOC_TARGETS := trpl nomicon style error-index
COMPILER_DOC_TARGETS :=
DOC_L10N_TARGETS :=
$(Q)rm -rf doc/book
$(Q)$(RUSTBOOK) build $(S)src/doc/trpl doc/book
+nomicon: doc/nomicon/index.html
+
+doc/nomicon/index.html: $(RUSTBOOK_EXE) $(wildcard $(S)/src/doc/nomicon/*.md) | doc/
+ @$(call E, rustbook: $@)
+ $(Q)rm -rf doc/nomicon
+ $(Q)$(RUSTBOOK) build $(S)src/doc/nomicon doc/nomicon
+
style: doc/style/index.html
doc/style/index.html: $(RUSTBOOK_EXE) $(wildcard $(S)/src/doc/style/*.md) | doc/
######################################################################
# The version number
-CFG_RELEASE_NUM=1.2.0
+CFG_RELEASE_NUM=1.3.0
# An optional number to put after the label, e.g. '.2' -> '-beta.2'
# NB Make sure it starts with a dot to conform to semver pre-release
# versions (section 9)
-CFG_PRERELEASE_VERSION=.6
+CFG_PRERELEASE_VERSION=.3
# Append a version-dependent hash to each library, so we can install different
# versions in the same place
LLVM_INCDIR_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --includedir)
LLVM_LIBDIR_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --libdir)
LLVM_LIBDIR_RUSTFLAGS_$(1)=-L "$$(LLVM_LIBDIR_$(1))"
-LLVM_LIBS_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --libs $$(LLVM_COMPONENTS))
LLVM_LDFLAGS_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --ldflags)
ifeq ($$(findstring freebsd,$(1)),freebsd)
# On FreeBSD, it may search wrong headers (that are for pre-installed LLVM),
$(foreach target,$(CFG_TARGET), \
$(eval $(call CFG_MAKE_TOOLCHAIN,$(target))))
+
+# There are more comments about this available in the target specification for
+# Windows MSVC in the compiler, but the gist of it is that we use `llvm-ar.exe`
+# instead of `lib.exe` for assembling archives, so we need to inject this custom
+# dependency here.
+define ADD_LLVM_AR_TO_MSVC_DEPS
+ifeq ($$(findstring msvc,$(1)),msvc)
+NATIVE_TOOL_DEPS_core_T_$(1) += llvm-ar.exe
+INSTALLED_BINS_$(1) += llvm-ar.exe
+endif
+endef
+
+$(foreach target,$(CFG_TARGET), \
+ $(eval $(call ADD_LLVM_AR_TO_MSVC_DEPS,$(target))))
+
+# When working with MSVC on windows, each DLL needs to explicitly declare its
+# interface to the outside world through some means. The options for doing so
+# include:
+#
+# 1. A custom attribute on each function itself
+# 2. A linker argument saying what to export
+# 3. A file which lists all symbols that need to be exported
+#
+# The Rust compiler takes care (1) for us for all Rust code by annotating all
+# public-facing functions with dllexport, but we have a few native dependencies
+# which need to cross the DLL boundary. The most important of these dependencies
+# is LLVM which is linked into `rustc_llvm.dll` but primarily used from
+# `rustc_trans.dll`. This means that many of LLVM's C API functions need to be
+# exposed from `rustc_llvm.dll` to be forwarded over the boundary.
+#
+# Unfortunately, at this time, LLVM does not handle this sort of exportation on
+# Windows for us, so we're forced to do it ourselves if we want it (which seems
+# like the path of least resistance right now). To do this we generate a `.DEF`
+# file [1] which we then custom-pass to the linker when building the rustc_llvm
+# crate. This DEF file list all symbols that are exported from
+# `src/librustc_llvm/lib.rs` and is generated by a small python script.
+#
+# Fun times!
+#
+# [1]: https://msdn.microsoft.com/en-us/library/28d6s79h.aspx
+define ADD_RUSTC_LLVM_DEF_TO_MSVC
+ifeq ($$(findstring msvc,$(1)),msvc)
+RUSTFLAGS_rustc_llvm_T_$(1) += -C link-args="-DEF:$(1)/rt/rustc_llvm.def"
+CUSTOM_DEPS_rustc_llvm_T_$(1) += $(1)/rt/rustc_llvm.def
+
+$(1)/rt/rustc_llvm.def: $$(S)src/etc/mklldef.py $$(S)src/librustc_llvm/lib.rs
+ $$(CFG_PYTHON) $$^ $$@ rustc_llvm-$$(CFG_FILENAME_EXTRA)
+endif
+endef
+
+$(foreach target,$(CFG_TARGET), \
+ $(eval $(call ADD_RUSTC_LLVM_DEF_TO_MSVC,$(target))))
+
NATIVE_DEPS_miniz_$(1) = miniz.c
NATIVE_DEPS_rust_builtin_$(1) := rust_builtin.c \
rust_android_dummy.c
-NATIVE_DEPS_rustrt_native_$(1) := \
- rust_try.ll \
- arch/$$(HOST_$(1))/record_sp.S
+NATIVE_DEPS_rustrt_native_$(1) := arch/$$(HOST_$(1))/record_sp.S
NATIVE_DEPS_rust_test_helpers_$(1) := rust_test_helpers.c
NATIVE_DEPS_morestack_$(1) := arch/$$(HOST_$(1))/morestack.S
RT_OUTPUT_DIR_$(1) := $(1)/rt
-$$(RT_OUTPUT_DIR_$(1))/%.o: $(S)src/rt/%.ll $$(MKFILE_DEPS) \
- $$(LLVM_CONFIG_$$(CFG_BUILD))
- @mkdir -p $$(@D)
- @$$(call E, compile: $$@)
- $$(Q)$$(LLC_$$(CFG_BUILD)) $$(CFG_LLC_FLAGS_$(1)) \
- -filetype=obj -mtriple=$$(CFG_LLVM_TARGET_$(1)) \
- -relocation-model=pic -o $$@ $$<
-
$$(RT_OUTPUT_DIR_$(1))/%.o: $(S)src/rt/%.c $$(MKFILE_DEPS)
@mkdir -p $$(@D)
@$$(call E, compile: $$@)
@mkdir -p $$(@D)
@$$(call E, compile: $$@)
$$(Q)$$(call CFG_ASSEMBLE_$(1),$$@,$$<)
+
+# On MSVC targets the compiler's default include path (e.g. where to find system
+# headers) is specified by the INCLUDE environment variable. This may not be set
+# so the ./configure script scraped the relevant values and this is the location
+# that we put them into cl.exe's environment.
+ifeq ($$(findstring msvc,$(1)),msvc)
+$$(RT_OUTPUT_DIR_$(1))/%.o: \
+ export INCLUDE := $$(CFG_MSVC_INCLUDE_PATH_$$(HOST_$(1)))
+$(1)/rustllvm/%.o: \
+ export INCLUDE := $$(CFG_MSVC_INCLUDE_PATH_$$(HOST_$(1)))
+endif
endef
$(foreach target,$(CFG_TARGET),$(eval $(call NATIVE_LIBRARIES,$(target))))
OBJS_$(2)_$(1) := $$(NATIVE_DEPS_$(2)_$(1):%=$$(RT_OUTPUT_DIR_$(1))/%)
OBJS_$(2)_$(1) := $$(OBJS_$(2)_$(1):.c=.o)
OBJS_$(2)_$(1) := $$(OBJS_$(2)_$(1):.cpp=.o)
-OBJS_$(2)_$(1) := $$(OBJS_$(2)_$(1):.ll=.o)
OBJS_$(2)_$(1) := $$(OBJS_$(2)_$(1):.S=.o)
NATIVE_$(2)_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),$(2))
$$(RT_OUTPUT_DIR_$(1))/$$(NATIVE_$(2)_$(1)): $$(OBJS_$(2)_$(1))
ifeq ($$(findstring msvc,$(1)),msvc)
COMPRT_CC_$(1) := gcc
COMPRT_AR_$(1) := ar
+ifeq ($$(findstring i686,$(1)),i686)
+COMPRT_CFLAGS_$(1) := $$(CFG_GCCISH_CFLAGS_$(1)) -m32
+else
COMPRT_CFLAGS_$(1) := $$(CFG_GCCISH_CFLAGS_$(1)) -m64
endif
+endif
$$(COMPRT_LIB_$(1)): $$(COMPRT_DEPS) $$(MKFILE_DEPS)
@$$(call E, make: compiler-rt)
endif
RUSTLLVM_OBJS_CS_$(1) := $$(addprefix rustllvm/, \
- ExecutionEngineWrapper.cpp RustWrapper.cpp PassWrapper.cpp)
+ ExecutionEngineWrapper.cpp RustWrapper.cpp PassWrapper.cpp \
+ ArchiveWrapper.cpp)
RUSTLLVM_INCS_$(1) = $$(LLVM_EXTRA_INCDIRS_$(1)) \
$$(call CFG_CC_INCLUDE_$(1),$$(LLVM_INCDIR_$(1))) \
$(foreach crate,$(CRATES), \
$(foreach tool,$(NATIVE_TOOL_DEPS_$(crate)_T_$(target)), \
$(eval $(call MOVE_TOOLS_TO_SNAPSHOT_HOST_DIR,0,$(target),$(BOOTSTRAP_FROM_$(target)),$(crate),$(tool))))))
+
+# For MSVC targets we need to set up some environment variables for the linker
+# to work correctly when building Rust crates. These two variables are:
+#
+# - LIB tells the linker the default search path for finding system libraries,
+# for example kernel32.dll
+# - PATH needs to be modified to ensure that MSVC's link.exe is first in the
+# path instead of MinGW's /usr/bin/link.exe (entirely unrelated)
+#
+# The values for these variables are detected by the configure script.
+define SETUP_LIB_MSVC_ENV_VARS
+ifeq ($$(findstring msvc,$(2)),msvc)
+$$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$(4): \
+ export LIB := $$(CFG_MSVC_LIB_PATH_$$(HOST_$(2)))
+$$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$(4): \
+ export PATH := $$(CFG_MSVC_BINDIR_$$(HOST_$(2))):$$(PATH)
+endif
+endef
+define SETUP_TOOL_MSVC_ENV_VARS
+ifeq ($$(findstring msvc,$(2)),msvc)
+$$(TBIN$(1)_T_$(2)_H_$(3))/$(4)$$(X_$(2)): \
+ export LIB := $$(CFG_MSVC_LIB_PATH_$$(HOST_$(2)))
+$$(TBIN$(1)_T_$(2)_H_$(3))/$(4)$$(X_$(2)): \
+ export PATH := $$(CFG_MSVC_BINDIR_$$(HOST_$(2))):$$(PATH)
+endif
+endef
+
+$(foreach host,$(CFG_HOST), \
+ $(foreach target,$(CFG_TARGET), \
+ $(foreach crate,$(CRATES), \
+ $(eval $(call SETUP_LIB_MSVC_ENV_VARS,0,$(target),$(host),$(crate))))))
+$(foreach host,$(CFG_HOST), \
+ $(foreach target,$(CFG_TARGET), \
+ $(foreach tool,$(TOOLS), \
+ $(eval $(call SETUP_TOOL_MSVC_ENV_VARS,0,$(target),$(host),$(tool))))))
$(eval $(call DOCTEST,md-$(doc),$(S)src/doc/$(doc).md)))
$(foreach file,$(wildcard $(S)src/doc/trpl/*.md), \
$(eval $(call DOCTEST,$(file:$(S)src/doc/trpl/%.md=trpl-%),$(file))))
-
+$(foreach file,$(wildcard $(S)src/doc/nomicon/*.md), \
+ $(eval $(call DOCTEST,$(file:$(S)src/doc/nomicon/%.md=nomicon-%),$(file))))
######################################################################
# Main test targets
######################################################################
#![feature(libc)]
#![feature(path_ext)]
#![feature(rustc_private)]
-#![feature(slice_extras)]
+#![feature(slice_splits)]
#![feature(str_char)]
#![feature(test)]
#![feature(vec_push_all)]
optopt("", "lldb-python-dir", "directory containing LLDB's python module", "PATH"),
optflag("h", "help", "show this message"));
- assert!(!args.is_empty());
- let argv0 = args[0].clone();
- let args_ = args.tail();
+ let (argv0, args_) = args.split_first().unwrap();
if args[1] == "-h" || args[1] == "--help" {
let message = format!("Usage: {} [OPTIONS] [TESTNAME...]", argv0);
println!("{}", getopts::usage(&message, &groups));
check_lines,
breakpoint_lines
} = parse_debugger_commands(testfile, "gdb");
- let mut cmds = commands.connect("\n");
+ let mut cmds = commands.join("\n");
// compile test file (it should have 'compile-flags:-g' in the header)
let compiler_run_result = compile_test(config, props, testfile);
split_maybe_args(options).into_iter()
.filter(|x| !options_to_remove.contains(x))
.collect::<Vec<String>>()
- .connect(" ");
+ .join(" ");
Some(new_options)
}
fn compile_test(config: &Config, props: &TestProps,
testfile: &Path) -> ProcRes {
- compile_test_(config, props, testfile, &[])
-}
-
-fn compile_test_(config: &Config, props: &TestProps,
- testfile: &Path, extra_args: &[String]) -> ProcRes {
let aux_dir = aux_output_dir_name(config, testfile);
// FIXME (#9639): This needs to handle non-utf8 paths
- let mut link_args = vec!("-L".to_string(),
- aux_dir.to_str().unwrap().to_string());
- link_args.extend(extra_args.iter().cloned());
+ let link_args = vec!("-L".to_string(),
+ aux_dir.to_str().unwrap().to_string());
let args = make_compile_args(config,
props,
link_args,
}
fn document(config: &Config, props: &TestProps,
- testfile: &Path, extra_args: &[String]) -> (ProcRes, PathBuf) {
+ testfile: &Path) -> (ProcRes, PathBuf) {
let aux_dir = aux_output_dir_name(config, testfile);
let out_dir = output_base_name(config, testfile);
let _ = fs::remove_dir_all(&out_dir);
"-o".to_string(),
out_dir.to_str().unwrap().to_string(),
testfile.to_str().unwrap().to_string()];
- args.extend(extra_args.iter().cloned());
args.extend(split_maybe_args(&props.compile_flags));
let args = ProcArgs {
prog: config.rustdoc_path.to_str().unwrap().to_string(),
// Linux and mac don't require adjusting the library search path
if cfg!(unix) {
- format!("{} {}", prog, args.connect(" "))
+ format!("{} {}", prog, args.join(" "))
} else {
// Build the LD_LIBRARY_PATH variable as it would be seen on the command line
// for diagnostic purposes
format!("{}=\"{}\"", util::lib_path_env_var(), util::make_new_path(path))
}
- format!("{} {} {}", lib_path_cmd_prefix(libpath), prog, args.connect(" "))
+ format!("{} {} {}", lib_path_cmd_prefix(libpath), prog, args.join(" "))
}
}
}
fn charset() -> &'static str {
- if cfg!(any(target_os = "bitrig", target_os = "freebsd")) {
+ // FreeBSD 10.1 defaults to GDB 6.1.1 which doesn't support "auto" charset
+ if cfg!(target_os = "bitrig") {
"auto"
+ } else if cfg!(target_os = "freebsd") {
+ "ISO-8859-1"
} else {
"UTF-8"
}
}
fn run_rustdoc_test(config: &Config, props: &TestProps, testfile: &Path) {
- let (proc_res, out_dir) = document(config, props, testfile, &[]);
+ let (proc_res, out_dir) = document(config, props, testfile);
if !proc_res.status.success() {
fatal_proc_rec("rustdoc failed!", &proc_res);
}
("ios", "ios"),
("linux", "linux"),
("mingw32", "windows"),
+ ("netbsd", "netbsd"),
("openbsd", "openbsd"),
("win32", "windows"),
("windows", "windows"),
non-exhaustive match would be to panic the thread if nothing is matched, though
it could fall through if the type of the `match` expression is `()`. This sort
of hidden cost and special casing is against the language's philosophy. It's
-easy to ignore certain cases by using the `_` wildcard:
+easy to ignore all unspecified cases by using the `_` wildcard:
```rust,ignore
match val.do_something() {
-% The (old) Rust Pointer Guide
+% The Rust Pointer Guide
-This content has moved into
-[the Rust Programming Language book](book/pointers.html).
+This content has been removed, with no direct replacement. Rust only
+has two built-in pointer types now,
+[references](book/references-and-borrowing.html) and [raw
+pointers](book/raw-pointers.html). Older Rusts had many more pointer
+types, they’re gone now.
[rbe]: http://rustbyexample.com/
+# The Standard Library
+
+We have [API documentation for the entire standard
+library](std/index.html). There's a list of crates on the left with more
+specific sections, or you can use the search bar at the top to search for
+something if you know its name.
+
# Community & Getting Help
If you need help with something, or just want to talk about Rust with others,
* [Project FAQ](complement-project-faq.html)
* [How to submit a bug report](https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.md#bug-reports)
-# The Standard Library
-
-We have [API documentation for the entire standard
-library](std/index.html). There's a list of crates on the left with more
-specific sections, or you can use the search bar at the top to search for
-something if you know its name.
-
# The Error Index
If you encounter an error while compiling your code you may be able to look it
--- /dev/null
+% The Rustonomicon
+
+#### The Dark Arts of Advanced and Unsafe Rust Programming
+
+# NOTE: This is a draft document, and may contain serious errors
+
+> Instead of the programs I had hoped for, there came only a shuddering blackness
+and ineffable loneliness; and I saw at last a fearful truth which no one had
+ever dared to breathe before — the unwhisperable secret of secrets — The fact
+that this language of stone and stridor is not a sentient perpetuation of Rust
+as London is of Old London and Paris of Old Paris, but that it is in fact
+quite unsafe, its sprawling body imperfectly embalmed and infested with queer
+animate things which have nothing to do with it as it was in compilation.
+
+This book digs into all the awful details that are necessary to understand in
+order to write correct Unsafe Rust programs. Due to the nature of this problem,
+it may lead to unleashing untold horrors that shatter your psyche into a billion
+infinitesimal fragments of despair.
+
+Should you wish a long and happy career of writing Rust programs, you should
+turn back now and forget you ever saw this book. It is not necessary. However
+if you intend to write unsafe code -- or just want to dig into the guts of the
+language -- this book contains invaluable information.
+
+Unlike [The Book][trpl] we will be assuming considerable prior knowledge. In
+particular, you should be comfortable with basic systems programming and Rust.
+If you don't feel comfortable with these topics, you should consider [reading
+The Book][trpl] first. Though we will not be assuming that you have, and will
+take care to occasionally give a refresher on the basics where appropriate. You
+can skip straight to this book if you want; just know that we won't be
+explaining everything from the ground up.
+
+To be clear, this book goes into deep detail. We're going to dig into
+exception-safety, pointer aliasing, memory models, and even some type-theory.
+We will also be spending a lot of time talking about the different kinds
+of safety and guarantees.
+
+[trpl]: ../book/
--- /dev/null
+# Summary
+
+* [Meet Safe and Unsafe](meet-safe-and-unsafe.md)
+ * [How Safe and Unsafe Interact](safe-unsafe-meaning.md)
+ * [Working with Unsafe](working-with-unsafe.md)
+* [Data Layout](data.md)
+ * [repr(Rust)](repr-rust.md)
+ * [Exotically Sized Types](exotic-sizes.md)
+ * [Other reprs](other-reprs.md)
+* [Ownership](ownership.md)
+ * [References](references.md)
+ * [Lifetimes](lifetimes.md)
+ * [Limits of Lifetimes](lifetime-mismatch.md)
+ * [Lifetime Elision](lifetime-elision.md)
+ * [Unbounded Lifetimes](unbounded-lifetimes.md)
+ * [Higher-Rank Trait Bounds](hrtb.md)
+ * [Subtyping and Variance](subtyping.md)
+ * [Drop Check](dropck.md)
+ * [PhantomData](phantom-data.md)
+ * [Splitting Borrows](borrow-splitting.md)
+* [Type Conversions](conversions.md)
+ * [Coercions](coercions.md)
+ * [The Dot Operator](dot-operator.md)
+ * [Casts](casts.md)
+ * [Transmutes](transmutes.md)
+* [Uninitialized Memory](uninitialized.md)
+ * [Checked](checked-uninit.md)
+ * [Drop Flags](drop-flags.md)
+ * [Unchecked](unchecked-uninit.md)
+* [Ownership Based Resource Management](obrm.md)
+ * [Constructors](constructors.md)
+ * [Destructors](destructors.md)
+ * [Leaking](leaking.md)
+* [Unwinding](unwinding.md)
+ * [Exception Safety](exception-safety.md)
+ * [Poisoning](poisoning.md)
+* [Concurrency](concurrency.md)
+ * [Races](races.md)
+ * [Send and Sync](send-and-sync.md)
+ * [Atomics](atomics.md)
+* [Implementing Vec](vec.md)
+ * [Layout](vec-layout.md)
+ * [Allocating](vec-alloc.md)
+ * [Push and Pop](vec-push-pop.md)
+ * [Deallocating](vec-dealloc.md)
+ * [Deref](vec-deref.md)
+ * [Insert and Remove](vec-insert-remove.md)
+ * [IntoIter](vec-into-iter.md)
+ * [RawVec](vec-raw.md)
+ * [Drain](vec-drain.md)
+ * [Handling Zero-Sized Types](vec-zsts.md)
+ * [Final Code](vec-final.md)
+* [Implementing Arc and Mutex](arc-and-mutex.md)
--- /dev/null
+% Implementing Arc and Mutex
+
+Knowing the theory is all fine and good, but the *best* way to understand
+something is to use it. To better understand atomics and interior mutability,
+we'll be implementing versions of the standard library's Arc and Mutex types.
+
+TODO: ALL OF THIS OMG
--- /dev/null
+% Atomics
+
+Rust pretty blatantly just inherits C11's memory model for atomics. This is not
+due this model being particularly excellent or easy to understand. Indeed, this
+model is quite complex and known to have [several flaws][C11-busted]. Rather, it
+is a pragmatic concession to the fact that *everyone* is pretty bad at modeling
+atomics. At very least, we can benefit from existing tooling and research around
+C.
+
+Trying to fully explain the model in this book is fairly hopeless. It's defined
+in terms of madness-inducing causality graphs that require a full book to
+properly understand in a practical way. If you want all the nitty-gritty
+details, you should check out [C's specification (Section 7.17)][C11-model].
+Still, we'll try to cover the basics and some of the problems Rust developers
+face.
+
+The C11 memory model is fundamentally about trying to bridge the gap between the
+semantics we want, the optimizations compilers want, and the inconsistent chaos
+our hardware wants. *We* would like to just write programs and have them do
+exactly what we said but, you know, fast. Wouldn't that be great?
+
+
+
+
+# Compiler Reordering
+
+Compilers fundamentally want to be able to do all sorts of crazy transformations
+to reduce data dependencies and eliminate dead code. In particular, they may
+radically change the actual order of events, or make events never occur! If we
+write something like
+
+```rust,ignore
+x = 1;
+y = 3;
+x = 2;
+```
+
+The compiler may conclude that it would be best if your program did
+
+```rust,ignore
+x = 2;
+y = 3;
+```
+
+This has inverted the order of events and completely eliminated one event.
+From a single-threaded perspective this is completely unobservable: after all
+the statements have executed we are in exactly the same state. But if our
+program is multi-threaded, we may have been relying on `x` to actually be
+assigned to 1 before `y` was assigned. We would like the compiler to be
+able to make these kinds of optimizations, because they can seriously improve
+performance. On the other hand, we'd also like to be able to depend on our
+program *doing the thing we said*.
+
+
+
+
+# Hardware Reordering
+
+On the other hand, even if the compiler totally understood what we wanted and
+respected our wishes, our hardware might instead get us in trouble. Trouble
+comes from CPUs in the form of memory hierarchies. There is indeed a global
+shared memory space somewhere in your hardware, but from the perspective of each
+CPU core it is *so very far away* and *so very slow*. Each CPU would rather work
+with its local cache of the data and only go through all the anguish of
+talking to shared memory only when it doesn't actually have that memory in
+cache.
+
+After all, that's the whole point of the cache, right? If every read from the
+cache had to run back to shared memory to double check that it hadn't changed,
+what would the point be? The end result is that the hardware doesn't guarantee
+that events that occur in the same order on *one* thread, occur in the same
+order on *another* thread. To guarantee this, we must issue special instructions
+to the CPU telling it to be a bit less smart.
+
+For instance, say we convince the compiler to emit this logic:
+
+```text
+initial state: x = 0, y = 1
+
+THREAD 1 THREAD2
+y = 3; if x == 1 {
+x = 1; y *= 2;
+ }
+```
+
+Ideally this program has 2 possible final states:
+
+* `y = 3`: (thread 2 did the check before thread 1 completed)
+* `y = 6`: (thread 2 did the check after thread 1 completed)
+
+However there's a third potential state that the hardware enables:
+
+* `y = 2`: (thread 2 saw `x = 1`, but not `y = 3`, and then overwrote `y = 3`)
+
+It's worth noting that different kinds of CPU provide different guarantees. It
+is common to separate hardware into two categories: strongly-ordered and weakly-
+ordered. Most notably x86/64 provides strong ordering guarantees, while ARM
+provides weak ordering guarantees. This has two consequences for concurrent
+programming:
+
+* Asking for stronger guarantees on strongly-ordered hardware may be cheap or
+ even free because they already provide strong guarantees unconditionally.
+ Weaker guarantees may only yield performance wins on weakly-ordered hardware.
+
+* Asking for guarantees that are too weak on strongly-ordered hardware is
+ more likely to *happen* to work, even though your program is strictly
+ incorrect. If possible, concurrent algorithms should be tested on
+ weakly-ordered hardware.
+
+
+
+
+
+# Data Accesses
+
+The C11 memory model attempts to bridge the gap by allowing us to talk about the
+*causality* of our program. Generally, this is by establishing a *happens
+before* relationship between parts of the program and the threads that are
+running them. This gives the hardware and compiler room to optimize the program
+more aggressively where a strict happens-before relationship isn't established,
+but forces them to be more careful where one is established. The way we
+communicate these relationships are through *data accesses* and *atomic
+accesses*.
+
+Data accesses are the bread-and-butter of the programming world. They are
+fundamentally unsynchronized and compilers are free to aggressively optimize
+them. In particular, data accesses are free to be reordered by the compiler on
+the assumption that the program is single-threaded. The hardware is also free to
+propagate the changes made in data accesses to other threads as lazily and
+inconsistently as it wants. Mostly critically, data accesses are how data races
+happen. Data accesses are very friendly to the hardware and compiler, but as
+we've seen they offer *awful* semantics to try to write synchronized code with.
+Actually, that's too weak.
+
+**It is literally impossible to write correct synchronized code using only data
+accesses.**
+
+Atomic accesses are how we tell the hardware and compiler that our program is
+multi-threaded. Each atomic access can be marked with an *ordering* that
+specifies what kind of relationship it establishes with other accesses. In
+practice, this boils down to telling the compiler and hardware certain things
+they *can't* do. For the compiler, this largely revolves around re-ordering of
+instructions. For the hardware, this largely revolves around how writes are
+propagated to other threads. The set of orderings Rust exposes are:
+
+* Sequentially Consistent (SeqCst)
+* Release
+* Acquire
+* Relaxed
+
+(Note: We explicitly do not expose the C11 *consume* ordering)
+
+TODO: negative reasoning vs positive reasoning? TODO: "can't forget to
+synchronize"
+
+
+
+# Sequentially Consistent
+
+Sequentially Consistent is the most powerful of all, implying the restrictions
+of all other orderings. Intuitively, a sequentially consistent operation
+cannot be reordered: all accesses on one thread that happen before and after a
+SeqCst access stay before and after it. A data-race-free program that uses
+only sequentially consistent atomics and data accesses has the very nice
+property that there is a single global execution of the program's instructions
+that all threads agree on. This execution is also particularly nice to reason
+about: it's just an interleaving of each thread's individual executions. This
+does not hold if you start using the weaker atomic orderings.
+
+The relative developer-friendliness of sequential consistency doesn't come for
+free. Even on strongly-ordered platforms sequential consistency involves
+emitting memory fences.
+
+In practice, sequential consistency is rarely necessary for program correctness.
+However sequential consistency is definitely the right choice if you're not
+confident about the other memory orders. Having your program run a bit slower
+than it needs to is certainly better than it running incorrectly! It's also
+mechanically trivial to downgrade atomic operations to have a weaker
+consistency later on. Just change `SeqCst` to `Relaxed` and you're done! Of
+course, proving that this transformation is *correct* is a whole other matter.
+
+
+
+
+# Acquire-Release
+
+Acquire and Release are largely intended to be paired. Their names hint at their
+use case: they're perfectly suited for acquiring and releasing locks, and
+ensuring that critical sections don't overlap.
+
+Intuitively, an acquire access ensures that every access after it stays after
+it. However operations that occur before an acquire are free to be reordered to
+occur after it. Similarly, a release access ensures that every access before it
+stays before it. However operations that occur after a release are free to be
+reordered to occur before it.
+
+When thread A releases a location in memory and then thread B subsequently
+acquires *the same* location in memory, causality is established. Every write
+that happened before A's release will be observed by B after its release.
+However no causality is established with any other threads. Similarly, no
+causality is established if A and B access *different* locations in memory.
+
+Basic use of release-acquire is therefore simple: you acquire a location of
+memory to begin the critical section, and then release that location to end it.
+For instance, a simple spinlock might look like:
+
+```rust
+use std::sync::Arc;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::thread;
+
+fn main() {
+ let lock = Arc::new(AtomicBool::new(true)); // value answers "am I locked?"
+
+ // ... distribute lock to threads somehow ...
+
+ // Try to acquire the lock by setting it to false
+ while !lock.compare_and_swap(true, false, Ordering::Acquire) { }
+ // broke out of the loop, so we successfully acquired the lock!
+
+ // ... scary data accesses ...
+
+ // ok we're done, release the lock
+ lock.store(true, Ordering::Release);
+}
+```
+
+On strongly-ordered platforms most accesses have release or acquire semantics,
+making release and acquire often totally free. This is not the case on
+weakly-ordered platforms.
+
+
+
+
+# Relaxed
+
+Relaxed accesses are the absolute weakest. They can be freely re-ordered and
+provide no happens-before relationship. Still, relaxed operations are still
+atomic. That is, they don't count as data accesses and any read-modify-write
+operations done to them occur atomically. Relaxed operations are appropriate for
+things that you definitely want to happen, but don't particularly otherwise care
+about. For instance, incrementing a counter can be safely done by multiple
+threads using a relaxed `fetch_add` if you're not using the counter to
+synchronize any other accesses.
+
+There's rarely a benefit in making an operation relaxed on strongly-ordered
+platforms, since they usually provide release-acquire semantics anyway. However
+relaxed operations can be cheaper on weakly-ordered platforms.
+
+
+
+
+
+[C11-busted]: http://plv.mpi-sws.org/c11comp/popl15.pdf
+[C11-model]: http://www.open-std.org/jtc1/sc22/wg14/www/standards.html#9899
--- /dev/null
+% Splitting Borrows
+
+The mutual exclusion property of mutable references can be very limiting when
+working with a composite structure. The borrow checker understands some basic
+stuff, but will fall over pretty easily. It does understand structs
+sufficiently to know that it's possible to borrow disjoint fields of a struct
+simultaneously. So this works today:
+
+```rust
+struct Foo {
+ a: i32,
+ b: i32,
+ c: i32,
+}
+
+let mut x = Foo {a: 0, b: 0, c: 0};
+let a = &mut x.a;
+let b = &mut x.b;
+let c = &x.c;
+*b += 1;
+let c2 = &x.c;
+*a += 10;
+println!("{} {} {} {}", a, b, c, c2);
+```
+
+However borrowck doesn't understand arrays or slices in any way, so this doesn't
+work:
+
+```rust,ignore
+let mut x = [1, 2, 3];
+let a = &mut x[0];
+let b = &mut x[1];
+println!("{} {}", a, b);
+```
+
+```text
+<anon>:4:14: 4:18 error: cannot borrow `x[..]` as mutable more than once at a time
+<anon>:4 let b = &mut x[1];
+ ^~~~
+<anon>:3:14: 3:18 note: previous borrow of `x[..]` occurs here; the mutable borrow prevents subsequent moves, borrows, or modification of `x[..]` until the borrow ends
+<anon>:3 let a = &mut x[0];
+ ^~~~
+<anon>:6:2: 6:2 note: previous borrow ends here
+<anon>:1 fn main() {
+<anon>:2 let mut x = [1, 2, 3];
+<anon>:3 let a = &mut x[0];
+<anon>:4 let b = &mut x[1];
+<anon>:5 println!("{} {}", a, b);
+<anon>:6 }
+ ^
+error: aborting due to 2 previous errors
+```
+
+While it was plausible that borrowck could understand this simple case, it's
+pretty clearly hopeless for borrowck to understand disjointness in general
+container types like a tree, especially if distinct keys actually *do* map
+to the same value.
+
+In order to "teach" borrowck that what we're doing is ok, we need to drop down
+to unsafe code. For instance, mutable slices expose a `split_at_mut` function
+that consumes the slice and returns two mutable slices. One for everything to
+the left of the index, and one for everything to the right. Intuitively we know
+this is safe because the slices don't overlap, and therefore alias. However
+the implementation requires some unsafety:
+
+```rust,ignore
+fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
+ let len = self.len();
+ let ptr = self.as_mut_ptr();
+ assert!(mid <= len);
+ unsafe {
+ (from_raw_parts_mut(ptr, mid),
+ from_raw_parts_mut(ptr.offset(mid as isize), len - mid))
+ }
+}
+```
+
+This is actually a bit subtle. So as to avoid ever making two `&mut`'s to the
+same value, we explicitly construct brand-new slices through raw pointers.
+
+However more subtle is how iterators that yield mutable references work.
+The iterator trait is defined as follows:
+
+```rust
+trait Iterator {
+ type Item;
+
+ fn next(&mut self) -> Option<Self::Item>;
+}
+```
+
+Given this definition, Self::Item has *no* connection to `self`. This means that
+we can call `next` several times in a row, and hold onto all the results
+*concurrently*. This is perfectly fine for by-value iterators, which have
+exactly these semantics. It's also actually fine for shared references, as they
+admit arbitrarily many references to the same thing (although the iterator needs
+to be a separate object from the thing being shared).
+
+But mutable references make this a mess. At first glance, they might seem
+completely incompatible with this API, as it would produce multiple mutable
+references to the same object!
+
+However it actually *does* work, exactly because iterators are one-shot objects.
+Everything an IterMut yields will be yielded at most once, so we don't
+actually ever yield multiple mutable references to the same piece of data.
+
+Perhaps surprisingly, mutable iterators don't require unsafe code to be
+implemented for many types!
+
+For instance here's a singly linked list:
+
+```rust
+# fn main() {}
+type Link<T> = Option<Box<Node<T>>>;
+
+struct Node<T> {
+ elem: T,
+ next: Link<T>,
+}
+
+pub struct LinkedList<T> {
+ head: Link<T>,
+}
+
+pub struct IterMut<'a, T: 'a>(Option<&'a mut Node<T>>);
+
+impl<T> LinkedList<T> {
+ fn iter_mut(&mut self) -> IterMut<T> {
+ IterMut(self.head.as_mut().map(|node| &mut **node))
+ }
+}
+
+impl<'a, T> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.0.take().map(|node| {
+ self.0 = node.next.as_mut().map(|node| &mut **node);
+ &mut node.elem
+ })
+ }
+}
+```
+
+Here's a mutable slice:
+
+```rust
+# fn main() {}
+use std::mem;
+
+pub struct IterMut<'a, T: 'a>(&'a mut[T]);
+
+impl<'a, T> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let slice = mem::replace(&mut self.0, &mut []);
+ if slice.is_empty() { return None; }
+
+ let (l, r) = slice.split_at_mut(1);
+ self.0 = r;
+ l.get_mut(0)
+ }
+}
+
+impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let slice = mem::replace(&mut self.0, &mut []);
+ if slice.is_empty() { return None; }
+
+ let new_len = slice.len() - 1;
+ let (l, r) = slice.split_at_mut(new_len);
+ self.0 = l;
+ r.get_mut(0)
+ }
+}
+```
+
+And here's a binary tree:
+
+```rust
+# fn main() {}
+use std::collections::VecDeque;
+
+type Link<T> = Option<Box<Node<T>>>;
+
+struct Node<T> {
+ elem: T,
+ left: Link<T>,
+ right: Link<T>,
+}
+
+pub struct Tree<T> {
+ root: Link<T>,
+}
+
+struct NodeIterMut<'a, T: 'a> {
+ elem: Option<&'a mut T>,
+ left: Option<&'a mut Node<T>>,
+ right: Option<&'a mut Node<T>>,
+}
+
+enum State<'a, T: 'a> {
+ Elem(&'a mut T),
+ Node(&'a mut Node<T>),
+}
+
+pub struct IterMut<'a, T: 'a>(VecDeque<NodeIterMut<'a, T>>);
+
+impl<T> Tree<T> {
+ pub fn iter_mut(&mut self) -> IterMut<T> {
+ let mut deque = VecDeque::new();
+ self.root.as_mut().map(|root| deque.push_front(root.iter_mut()));
+ IterMut(deque)
+ }
+}
+
+impl<T> Node<T> {
+ pub fn iter_mut(&mut self) -> NodeIterMut<T> {
+ NodeIterMut {
+ elem: Some(&mut self.elem),
+ left: self.left.as_mut().map(|node| &mut **node),
+ right: self.right.as_mut().map(|node| &mut **node),
+ }
+ }
+}
+
+
+impl<'a, T> Iterator for NodeIterMut<'a, T> {
+ type Item = State<'a, T>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match self.left.take() {
+ Some(node) => Some(State::Node(node)),
+ None => match self.elem.take() {
+ Some(elem) => Some(State::Elem(elem)),
+ None => match self.right.take() {
+ Some(node) => Some(State::Node(node)),
+ None => None,
+ }
+ }
+ }
+ }
+}
+
+impl<'a, T> DoubleEndedIterator for NodeIterMut<'a, T> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ match self.right.take() {
+ Some(node) => Some(State::Node(node)),
+ None => match self.elem.take() {
+ Some(elem) => Some(State::Elem(elem)),
+ None => match self.left.take() {
+ Some(node) => Some(State::Node(node)),
+ None => None,
+ }
+ }
+ }
+ }
+}
+
+impl<'a, T> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+ fn next(&mut self) -> Option<Self::Item> {
+ loop {
+ match self.0.front_mut().and_then(|node_it| node_it.next()) {
+ Some(State::Elem(elem)) => return Some(elem),
+ Some(State::Node(node)) => self.0.push_front(node.iter_mut()),
+ None => if let None = self.0.pop_front() { return None },
+ }
+ }
+ }
+}
+
+impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ loop {
+ match self.0.back_mut().and_then(|node_it| node_it.next_back()) {
+ Some(State::Elem(elem)) => return Some(elem),
+ Some(State::Node(node)) => self.0.push_back(node.iter_mut()),
+ None => if let None = self.0.pop_back() { return None },
+ }
+ }
+ }
+}
+```
+
+All of these are completely safe and work on stable Rust! This ultimately
+falls out of the simple struct case we saw before: Rust understands that you
+can safely split a mutable reference into subfields. We can then encode
+permanently consuming a reference via Options (or in the case of slices,
+replacing with an empty slice).
--- /dev/null
+% Casts
+
+Casts are a superset of coercions: every coercion can be explicitly
+invoked via a cast. However some conversions require a cast.
+While coercions are pervasive and largely harmless, these "true casts"
+are rare and potentially dangerous. As such, casts must be explicitly invoked
+using the `as` keyword: `expr as Type`.
+
+True casts generally revolve around raw pointers and the primitive numeric
+types. Even though they're dangerous, these casts are infallible at runtime.
+If a cast triggers some subtle corner case no indication will be given that
+this occurred. The cast will simply succeed. That said, casts must be valid
+at the type level, or else they will be prevented statically. For instance,
+`7u8 as bool` will not compile.
+
+That said, casts aren't `unsafe` because they generally can't violate memory
+safety *on their own*. For instance, converting an integer to a raw pointer can
+very easily lead to terrible things. However the act of creating the pointer
+itself is safe, because actually using a raw pointer is already marked as
+`unsafe`.
+
+Here's an exhaustive list of all the true casts. For brevity, we will use `*`
+to denote either a `*const` or `*mut`, and `integer` to denote any integral
+primitive:
+
+ * `*T as *U` where `T, U: Sized`
+ * `*T as *U` TODO: explain unsized situation
+ * `*T as integer`
+ * `integer as *T`
+ * `number as number`
+ * `C-like-enum as integer`
+ * `bool as integer`
+ * `char as integer`
+ * `u8 as char`
+ * `&[T; n] as *const T`
+ * `fn as *T` where `T: Sized`
+ * `fn as integer`
+
+Note that lengths are not adjusted when casting raw slices -
+`*const [u16] as *const [u8]` creates a slice that only includes
+half of the original memory.
+
+Casting is not transitive, that is, even if `e as U1 as U2` is a valid
+expression, `e as U2` is not necessarily so.
+
+For numeric casts, there are quite a few cases to consider:
+
+* casting between two integers of the same size (e.g. i32 -> u32) is a no-op
+* casting from a larger integer to a smaller integer (e.g. u32 -> u8) will
+ truncate
+* casting from a smaller integer to a larger integer (e.g. u8 -> u32) will
+ * zero-extend if the source is unsigned
+ * sign-extend if the source is signed
+* casting from a float to an integer will round the float towards zero
+ * **[NOTE: currently this will cause Undefined Behaviour if the rounded
+ value cannot be represented by the target integer type][float-int]**.
+ This includes Inf and NaN. This is a bug and will be fixed.
+* casting from an integer to float will produce the floating point
+ representation of the integer, rounded if necessary (rounding strategy
+ unspecified)
+* casting from an f32 to an f64 is perfect and lossless
+* casting from an f64 to an f32 will produce the closest possible value
+ (rounding strategy unspecified)
+ * **[NOTE: currently this will cause Undefined Behaviour if the value
+ is finite but larger or smaller than the largest or smallest finite
+ value representable by f32][float-float]**. This is a bug and will
+ be fixed.
+
+
+[float-int]: https://github.com/rust-lang/rust/issues/10184
+[float-float]: https://github.com/rust-lang/rust/issues/15536
--- /dev/null
+% Checked Uninitialized Memory
+
+Like C, all stack variables in Rust are uninitialized until a value is
+explicitly assigned to them. Unlike C, Rust statically prevents you from ever
+reading them until you do:
+
+```rust,ignore
+fn main() {
+ let x: i32;
+ println!("{}", x);
+}
+```
+
+```text
+src/main.rs:3:20: 3:21 error: use of possibly uninitialized variable: `x`
+src/main.rs:3 println!("{}", x);
+ ^
+```
+
+This is based off of a basic branch analysis: every branch must assign a value
+to `x` before it is first used. Interestingly, Rust doesn't require the variable
+to be mutable to perform a delayed initialization if every branch assigns
+exactly once. However the analysis does not take advantage of constant analysis
+or anything like that. So this compiles:
+
+```rust
+fn main() {
+ let x: i32;
+
+ if true {
+ x = 1;
+ } else {
+ x = 2;
+ }
+
+ println!("{}", x);
+}
+```
+
+but this doesn't:
+
+```rust,ignore
+fn main() {
+ let x: i32;
+ if true {
+ x = 1;
+ }
+ println!("{}", x);
+}
+```
+
+```text
+src/main.rs:6:17: 6:18 error: use of possibly uninitialized variable: `x`
+src/main.rs:6 println!("{}", x);
+```
+
+while this does:
+
+```rust
+fn main() {
+ let x: i32;
+ if true {
+ x = 1;
+ println!("{}", x);
+ }
+ // Don't care that there are branches where it's not initialized
+ // since we don't use the value in those branches
+}
+```
+
+Of course, while the analysis doesn't consider actual values, it does
+have a relatively sophisticated understanding of dependencies and control
+flow. For instance, this works:
+
+```rust
+let x: i32;
+
+loop {
+ // Rust doesn't understand that this branch will be taken unconditionally,
+ // because it relies on actual values.
+ if true {
+ // But it does understand that it will only be taken once because
+ // we unconditionally break out of it. Therefore `x` doesn't
+ // need to be marked as mutable.
+ x = 0;
+ break;
+ }
+}
+// It also knows that it's impossible to get here without reaching the break.
+// And therefore that `x` must be initialized here!
+println!("{}", x);
+```
+
+If a value is moved out of a variable, that variable becomes logically
+uninitialized if the type of the value isn't Copy. That is:
+
+```rust
+fn main() {
+ let x = 0;
+ let y = Box::new(0);
+ let z1 = x; // x is still valid because i32 is Copy
+ let z2 = y; // y is now logically uninitialized because Box isn't Copy
+}
+```
+
+However reassigning `y` in this example *would* require `y` to be marked as
+mutable, as a Safe Rust program could observe that the value of `y` changed:
+
+```rust
+fn main() {
+ let mut y = Box::new(0);
+ let z = y; // y is now logically uninitialized because Box isn't Copy
+ y = Box::new(1); // reinitialize y
+}
+```
+
+Otherwise it's like `y` is a brand new variable.
--- /dev/null
+% Coercions
+
+Types can implicitly be coerced to change in certain contexts. These changes are
+generally just *weakening* of types, largely focused around pointers and
+lifetimes. They mostly exist to make Rust "just work" in more cases, and are
+largely harmless.
+
+Here's all the kinds of coercion:
+
+Coercion is allowed between the following types:
+
+* Transitivity: `T_1` to `T_3` where `T_1` coerces to `T_2` and `T_2` coerces to
+ `T_3`
+* Pointer Weakening:
+ * `&mut T` to `&T`
+ * `*mut T` to `*const T`
+ * `&T` to `*const T`
+ * `&mut T` to `*mut T`
+* Unsizing: `T` to `U` if `T` implements `CoerceUnsized<U>`
+
+`CoerceUnsized<Pointer<U>> for Pointer<T> where T: Unsize<U>` is implemented
+for all pointer types (including smart pointers like Box and Rc). Unsize is
+only implemented automatically, and enables the following transformations:
+
+* `[T, ..n]` => `[T]`
+* `T` => `Trait` where `T: Trait`
+* `Foo<..., T, ...>` => `Foo<..., U, ...>` where:
+ * `T: Unsize<U>`
+ * `Foo` is a struct
+ * Only the last field of `Foo` has type `T`
+ * `T` is not part of the type of any other fields
+
+Coercions occur at a *coercion site*. Any location that is explicitly typed
+will cause a coercion to its type. If inference is necessary, the coercion will
+not be performed. Exhaustively, the coercion sites for an expression `e` to
+type `U` are:
+
+* let statements, statics, and consts: `let x: U = e`
+* Arguments to functions: `takes_a_U(e)`
+* Any expression that will be returned: `fn foo() -> U { e }`
+* Struct literals: `Foo { some_u: e }`
+* Array literals: `let x: [U; 10] = [e, ..]`
+* Tuple literals: `let x: (U, ..) = (e, ..)`
+* The last expression in a block: `let x: U = { ..; e }`
+
+Note that we do not perform coercions when matching traits (except for
+receivers, see below). If there is an impl for some type `U` and `T` coerces to
+`U`, that does not constitute an implementation for `T`. For example, the
+following will not type check, even though it is OK to coerce `t` to `&T` and
+there is an impl for `&T`:
+
+```rust,ignore
+trait Trait {}
+
+fn foo<X: Trait>(t: X) {}
+
+impl<'a> Trait for &'a i32 {}
+
+
+fn main() {
+ let t: &mut i32 = &mut 0;
+ foo(t);
+}
+```
+
+```text
+<anon>:10:5: 10:8 error: the trait `Trait` is not implemented for the type `&mut i32` [E0277]
+<anon>:10 foo(t);
+ ^~~
+```
--- /dev/null
+% Concurrency and Paralellism
+
+Rust as a language doesn't *really* have an opinion on how to do concurrency or
+parallelism. The standard library exposes OS threads and blocking sys-calls
+because everyone has those, and they're uniform enough that you can provide
+an abstraction over them in a relatively uncontroversial way. Message passing,
+green threads, and async APIs are all diverse enough that any abstraction over
+them tends to involve trade-offs that we weren't willing to commit to for 1.0.
+
+However the way Rust models concurrency makes it relatively easy design your own
+concurrency paradigm as a library and have everyone else's code Just Work
+with yours. Just require the right lifetimes and Send and Sync where appropriate
+and you're off to the races. Or rather, off to the... not... having... races.
--- /dev/null
+% Constructors
+
+There is exactly one way to create an instance of a user-defined type: name it,
+and initialize all its fields at once:
+
+```rust
+struct Foo {
+ a: u8,
+ b: u32,
+ c: bool,
+}
+
+enum Bar {
+ X(u32),
+ Y(bool),
+}
+
+struct Unit;
+
+let foo = Foo { a: 0, b: 1, c: false };
+let bar = Bar::X(0);
+let empty = Unit;
+```
+
+That's it. Every other way you make an instance of a type is just calling a
+totally vanilla function that does some stuff and eventually bottoms out to The
+One True Constructor.
+
+Unlike C++, Rust does not come with a slew of built-in kinds of constructor.
+There are no Copy, Default, Assignment, Move, or whatever constructors. The
+reasons for this are varied, but it largely boils down to Rust's philosophy of
+*being explicit*.
+
+Move constructors are meaningless in Rust because we don't enable types to
+"care" about their location in memory. Every type must be ready for it to be
+blindly memcopied to somewhere else in memory. This means pure on-the-stack-but-
+still-movable intrusive linked lists are simply not happening in Rust (safely).
+
+Assignment and copy constructors similarly don't exist because move semantics
+are the only semantics in Rust. At most `x = y` just moves the bits of y into
+the x variable. Rust does provide two facilities for providing C++'s copy-
+oriented semantics: `Copy` and `Clone`. Clone is our moral equivalent of a copy
+constructor, but it's never implicitly invoked. You have to explicitly call
+`clone` on an element you want to be cloned. Copy is a special case of Clone
+where the implementation is just "copy the bits". Copy types *are* implicitly
+cloned whenever they're moved, but because of the definition of Copy this just
+means not treating the old copy as uninitialized -- a no-op.
+
+While Rust provides a `Default` trait for specifying the moral equivalent of a
+default constructor, it's incredibly rare for this trait to be used. This is
+because variables [aren't implicitly initialized][uninit]. Default is basically
+only useful for generic programming. In concrete contexts, a type will provide a
+static `new` method for any kind of "default" constructor. This has no relation
+to `new` in other languages and has no special meaning. It's just a naming
+convention.
+
+TODO: talk about "placement new"?
+
+[uninit]: uninitialized.html
--- /dev/null
+% Type Conversions
+
+At the end of the day, everything is just a pile of bits somewhere, and type
+systems are just there to help us use those bits right. There are two common
+problems with typing bits: needing to reinterpret those exact bits as a
+different type, and needing to change the bits to have equivalent meaning for
+a different type. Because Rust encourages encoding important properties in the
+type system, these problems are incredibly pervasive. As such, Rust
+consequently gives you several ways to solve them.
+
+First we'll look at the ways that Safe Rust gives you to reinterpret values.
+The most trivial way to do this is to just destructure a value into its
+constituent parts and then build a new type out of them. e.g.
+
+```rust
+struct Foo {
+ x: u32,
+ y: u16,
+}
+
+struct Bar {
+ a: u32,
+ b: u16,
+}
+
+fn reinterpret(foo: Foo) -> Bar {
+ let Foo { x, y } = foo;
+ Bar { a: x, b: y }
+}
+```
+
+But this is, at best, annoying. For common conversions, Rust provides
+more ergonomic alternatives.
+
--- /dev/null
+% Data Representation in Rust
+
+Low-level programming cares a lot about data layout. It's a big deal. It also
+pervasively influences the rest of the language, so we're going to start by
+digging into how data is represented in Rust.
--- /dev/null
+% Destructors
+
+What the language *does* provide is full-blown automatic destructors through the
+`Drop` trait, which provides the following method:
+
+```rust,ignore
+fn drop(&mut self);
+```
+
+This method gives the type time to somehow finish what it was doing.
+
+**After `drop` is run, Rust will recursively try to drop all of the fields
+of `self`.**
+
+This is a convenience feature so that you don't have to write "destructor
+boilerplate" to drop children. If a struct has no special logic for being
+dropped other than dropping its children, then it means `Drop` doesn't need to
+be implemented at all!
+
+**There is no stable way to prevent this behaviour in Rust 1.0.**
+
+Note that taking `&mut self` means that even if you could suppress recursive
+Drop, Rust will prevent you from e.g. moving fields out of self. For most types,
+this is totally fine.
+
+For instance, a custom implementation of `Box` might write `Drop` like this:
+
+```rust
+#![feature(heap_api, core_intrinsics, unique)]
+
+use std::rt::heap;
+use std::ptr::Unique;
+use std::intrinsics::drop_in_place;
+use std::mem;
+
+struct Box<T>{ ptr: Unique<T> }
+
+impl<T> Drop for Box<T> {
+ fn drop(&mut self) {
+ unsafe {
+ drop_in_place(*self.ptr);
+ heap::deallocate((*self.ptr) as *mut u8,
+ mem::size_of::<T>(),
+ mem::align_of::<T>());
+ }
+ }
+}
+```
+
+and this works fine because when Rust goes to drop the `ptr` field it just sees
+a [Unique][] that has no actual `Drop` implementation. Similarly nothing can
+use-after-free the `ptr` because when drop exits, it becomes inacessible.
+
+However this wouldn't work:
+
+```rust
+#![feature(heap_api, core_intrinsics, unique)]
+
+use std::rt::heap;
+use std::ptr::Unique;
+use std::intrinsics::drop_in_place;
+use std::mem;
+
+struct Box<T>{ ptr: Unique<T> }
+
+impl<T> Drop for Box<T> {
+ fn drop(&mut self) {
+ unsafe {
+ drop_in_place(*self.ptr);
+ heap::deallocate((*self.ptr) as *mut u8,
+ mem::size_of::<T>(),
+ mem::align_of::<T>());
+ }
+ }
+}
+
+struct SuperBox<T> { my_box: Box<T> }
+
+impl<T> Drop for SuperBox<T> {
+ fn drop(&mut self) {
+ unsafe {
+ // Hyper-optimized: deallocate the box's contents for it
+ // without `drop`ing the contents
+ heap::deallocate((*self.my_box.ptr) as *mut u8,
+ mem::size_of::<T>(),
+ mem::align_of::<T>());
+ }
+ }
+}
+```
+
+After we deallocate the `box`'s ptr in SuperBox's destructor, Rust will
+happily proceed to tell the box to Drop itself and everything will blow up with
+use-after-frees and double-frees.
+
+Note that the recursive drop behaviour applies to all structs and enums
+regardless of whether they implement Drop. Therefore something like
+
+```rust
+struct Boxy<T> {
+ data1: Box<T>,
+ data2: Box<T>,
+ info: u32,
+}
+```
+
+will have its data1 and data2's fields destructors whenever it "would" be
+dropped, even though it itself doesn't implement Drop. We say that such a type
+*needs Drop*, even though it is not itself Drop.
+
+Similarly,
+
+```rust
+enum Link {
+ Next(Box<Link>),
+ None,
+}
+```
+
+will have its inner Box field dropped if and only if an instance stores the
+Next variant.
+
+In general this works really nice because you don't need to worry about
+adding/removing drops when you refactor your data layout. Still there's
+certainly many valid usecases for needing to do trickier things with
+destructors.
+
+The classic safe solution to overriding recursive drop and allowing moving out
+of Self during `drop` is to use an Option:
+
+```rust
+#![feature(heap_api, core_intrinsics, unique)]
+
+use std::rt::heap;
+use std::ptr::Unique;
+use std::intrinsics::drop_in_place;
+use std::mem;
+
+struct Box<T>{ ptr: Unique<T> }
+
+impl<T> Drop for Box<T> {
+ fn drop(&mut self) {
+ unsafe {
+ drop_in_place(*self.ptr);
+ heap::deallocate((*self.ptr) as *mut u8,
+ mem::size_of::<T>(),
+ mem::align_of::<T>());
+ }
+ }
+}
+
+struct SuperBox<T> { my_box: Option<Box<T>> }
+
+impl<T> Drop for SuperBox<T> {
+ fn drop(&mut self) {
+ unsafe {
+ // Hyper-optimized: deallocate the box's contents for it
+ // without `drop`ing the contents. Need to set the `box`
+ // field as `None` to prevent Rust from trying to Drop it.
+ let my_box = self.my_box.take().unwrap();
+ heap::deallocate((*my_box.ptr) as *mut u8,
+ mem::size_of::<T>(),
+ mem::align_of::<T>());
+ mem::forget(my_box);
+ }
+ }
+}
+```
+
+However this has fairly odd semantics: you're saying that a field that *should*
+always be Some *may* be None, just because that happens in the destructor. Of
+course this conversely makes a lot of sense: you can call arbitrary methods on
+self during the destructor, and this should prevent you from ever doing so after
+deinitializing the field. Not that it will prevent you from producing any other
+arbitrarily invalid state in there.
+
+On balance this is an ok choice. Certainly what you should reach for by default.
+However, in the future we expect there to be a first-class way to announce that
+a field shouldn't be automatically dropped.
+
+[Unique]: phantom-data.html
--- /dev/null
+% The Dot Operator
+
+The dot operator will perform a lot of magic to convert types. It will perform
+auto-referencing, auto-dereferencing, and coercion until types match.
+
+TODO: steal information from http://stackoverflow.com/questions/28519997/what-are-rusts-exact-auto-dereferencing-rules/28552082#28552082
--- /dev/null
+% Drop Flags
+
+The examples in the previous section introduce an interesting problem for Rust.
+We have seen that's possible to conditionally initialize, deinitialize, and
+reinitialize locations of memory totally safely. For Copy types, this isn't
+particularly notable since they're just a random pile of bits. However types
+with destructors are a different story: Rust needs to know whether to call a
+destructor whenever a variable is assigned to, or a variable goes out of scope.
+How can it do this with conditional initialization?
+
+Note that this is not a problem that all assignments need worry about. In
+particular, assigning through a dereference unconditionally drops, and assigning
+in a `let` unconditionally doesn't drop:
+
+```
+let mut x = Box::new(0); // let makes a fresh variable, so never need to drop
+let y = &mut x;
+*y = Box::new(1); // Deref assumes the referent is initialized, so always drops
+```
+
+This is only a problem when overwriting a previously initialized variable or
+one of its subfields.
+
+It turns out that Rust actually tracks whether a type should be dropped or not
+*at runtime*. As a variable becomes initialized and uninitialized, a *drop flag*
+for that variable is toggled. When a variable might need to be dropped, this
+flag is evaluated to determine if it should be dropped.
+
+Of course, it is often the case that a value's initialization state can be
+statically known at every point in the program. If this is the case, then the
+compiler can theoretically generate more efficient code! For instance, straight-
+line code has such *static drop semantics*:
+
+```rust
+let mut x = Box::new(0); // x was uninit; just overwrite.
+let mut y = x; // y was uninit; just overwrite and make x uninit.
+x = Box::new(0); // x was uninit; just overwrite.
+y = x; // y was init; Drop y, overwrite it, and make x uninit!
+ // y goes out of scope; y was init; Drop y!
+ // x goes out of scope; x was uninit; do nothing.
+```
+
+Similarly, branched code where all branches have the same behaviour with respect
+to initialization has static drop semantics:
+
+```rust
+# let condition = true;
+let mut x = Box::new(0); // x was uninit; just overwrite.
+if condition {
+ drop(x) // x gets moved out; make x uninit.
+} else {
+ println!("{}", x);
+ drop(x) // x gets moved out; make x uninit.
+}
+x = Box::new(0); // x was uninit; just overwrite.
+ // x goes out of scope; x was init; Drop x!
+```
+
+However code like this *requires* runtime information to correctly Drop:
+
+```rust
+# let condition = true;
+let x;
+if condition {
+ x = Box::new(0); // x was uninit; just overwrite.
+ println!("{}", x);
+}
+ // x goes out of scope; x might be uninit;
+ // check the flag!
+```
+
+Of course, in this case it's trivial to retrieve static drop semantics:
+
+```rust
+# let condition = true;
+if condition {
+ let x = Box::new(0);
+ println!("{}", x);
+}
+```
+
+As of Rust 1.0, the drop flags are actually not-so-secretly stashed in a hidden
+field of any type that implements Drop. Rust sets the drop flag by overwriting
+the entire value with a particular bit pattern. This is pretty obviously Not
+The Fastest and causes a bunch of trouble with optimizing code. It's legacy from
+a time when you could do much more complex conditional initialization.
+
+As such work is currently under way to move the flags out onto the stack frame
+where they more reasonably belong. Unfortunately, this work will take some time
+as it requires fairly substantial changes to the compiler.
+
+Regardless, Rust programs don't need to worry about uninitialized values on
+the stack for correctness. Although they might care for performance. Thankfully,
+Rust makes it easy to take control here! Uninitialized values are there, and
+you can work with them in Safe Rust, but you're never in danger.
--- /dev/null
+% Drop Check
+
+We have seen how lifetimes provide us some fairly simple rules for ensuring
+that never read dangling references. However up to this point we have only ever
+interacted with the *outlives* relationship in an inclusive manner. That is,
+when we talked about `'a: 'b`, it was ok for `'a` to live *exactly* as long as
+`'b`. At first glance, this seems to be a meaningless distinction. Nothing ever
+gets dropped at the same time as another, right? This is why we used the
+following desugarring of `let` statements:
+
+```rust,ignore
+let x;
+let y;
+```
+
+```rust,ignore
+{
+ let x;
+ {
+ let y;
+ }
+}
+```
+
+Each creates its own scope, clearly establishing that one drops before the
+other. However, what if we do the following?
+
+```rust,ignore
+let (x, y) = (vec![], vec![]);
+```
+
+Does either value strictly outlive the other? The answer is in fact *no*,
+neither value strictly outlives the other. Of course, one of x or y will be
+dropped before the other, but the actual order is not specified. Tuples aren't
+special in this regard; composite structures just don't guarantee their
+destruction order as of Rust 1.0.
+
+We *could* specify this for the fields of built-in composites like tuples and
+structs. However, what about something like Vec? Vec has to manually drop its
+elements via pure-library code. In general, anything that implements Drop has
+a chance to fiddle with its innards during its final death knell. Therefore
+the compiler can't sufficiently reason about the actual destruction order
+of the contents of any type that implements Drop.
+
+So why do we care? We care because if the type system isn't careful, it could
+accidentally make dangling pointers. Consider the following simple program:
+
+```rust
+struct Inspector<'a>(&'a u8);
+
+fn main() {
+ let (inspector, days);
+ days = Box::new(1);
+ inspector = Inspector(&days);
+}
+```
+
+This program is totally sound and compiles today. The fact that `days` does
+not *strictly* outlive `inspector` doesn't matter. As long as the `inspector`
+is alive, so is days.
+
+However if we add a destructor, the program will no longer compile!
+
+```rust,ignore
+struct Inspector<'a>(&'a u8);
+
+impl<'a> Drop for Inspector<'a> {
+ fn drop(&mut self) {
+ println!("I was only {} days from retirement!", self.0);
+ }
+}
+
+fn main() {
+ let (inspector, days);
+ days = Box::new(1);
+ inspector = Inspector(&days);
+ // Let's say `days` happens to get dropped first.
+ // Then when Inspector is dropped, it will try to read free'd memory!
+}
+```
+
+```text
+<anon>:12:28: 12:32 error: `days` does not live long enough
+<anon>:12 inspector = Inspector(&days);
+ ^~~~
+<anon>:9:11: 15:2 note: reference must be valid for the block at 9:10...
+<anon>:9 fn main() {
+<anon>:10 let (inspector, days);
+<anon>:11 days = Box::new(1);
+<anon>:12 inspector = Inspector(&days);
+<anon>:13 // Let's say `days` happens to get dropped first.
+<anon>:14 // Then when Inspector is dropped, it will try to read free'd memory!
+ ...
+<anon>:10:27: 15:2 note: ...but borrowed value is only valid for the block suffix following statement 0 at 10:26
+<anon>:10 let (inspector, days);
+<anon>:11 days = Box::new(1);
+<anon>:12 inspector = Inspector(&days);
+<anon>:13 // Let's say `days` happens to get dropped first.
+<anon>:14 // Then when Inspector is dropped, it will try to read free'd memory!
+<anon>:15 }
+```
+
+Implementing Drop lets the Inspector execute some arbitrary code during its
+death. This means it can potentially observe that types that are supposed to
+live as long as it does actually were destroyed first.
+
+Interestingly, only generic types need to worry about this. If they aren't
+generic, then the only lifetimes they can harbor are `'static`, which will truly
+live *forever*. This is why this problem is referred to as *sound generic drop*.
+Sound generic drop is enforced by the *drop checker*. As of this writing, some
+of the finer details of how the drop checker validates types is totally up in
+the air. However The Big Rule is the subtlety that we have focused on this whole
+section:
+
+**For a generic type to soundly implement drop, its generics arguments must
+strictly outlive it.**
+
+This rule is sufficient but not necessary to satisfy the drop checker. That is,
+if your type obeys this rule then it's definitely sound to drop. However
+there are special cases where you can fail to satisfy this, but still
+successfully pass the borrow checker. These are the precise rules that are
+currently up in the air.
+
+It turns out that when writing unsafe code, we generally don't need to
+worry at all about doing the right thing for the drop checker. However there
+is one special case that you need to worry about, which we will look at in
+the next section.
--- /dev/null
+% Exception Safety
+
+Although programs should use unwinding sparingly, there's a lot of code that
+*can* panic. If you unwrap a None, index out of bounds, or divide by 0, your
+program will panic. On debug builds, every arithmetic operation can panic
+if it overflows. Unless you are very careful and tightly control what code runs,
+pretty much everything can unwind, and you need to be ready for it.
+
+Being ready for unwinding is often referred to as *exception safety*
+in the broader programming world. In Rust, there are two levels of exception
+safety that one may concern themselves with:
+
+* In unsafe code, we *must* be exception safe to the point of not violating
+ memory safety. We'll call this *minimal* exception safety.
+
+* In safe code, it is *good* to be exception safe to the point of your program
+ doing the right thing. We'll call this *maximal* exception safety.
+
+As is the case in many places in Rust, Unsafe code must be ready to deal with
+bad Safe code when it comes to unwinding. Code that transiently creates
+unsound states must be careful that a panic does not cause that state to be
+used. Generally this means ensuring that only non-panicking code is run while
+these states exist, or making a guard that cleans up the state in the case of
+a panic. This does not necessarily mean that the state a panic witnesses is a
+fully coherent state. We need only guarantee that it's a *safe* state.
+
+Most Unsafe code is leaf-like, and therefore fairly easy to make exception-safe.
+It controls all the code that runs, and most of that code can't panic. However
+it is not uncommon for Unsafe code to work with arrays of temporarily
+uninitialized data while repeatedly invoking caller-provided code. Such code
+needs to be careful and consider exception safety.
+
+
+
+
+
+## Vec::push_all
+
+`Vec::push_all` is a temporary hack to get extending a Vec by a slice reliably
+efficient without specialization. Here's a simple implementation:
+
+```rust,ignore
+impl<T: Clone> Vec<T> {
+ fn push_all(&mut self, to_push: &[T]) {
+ self.reserve(to_push.len());
+ unsafe {
+ // can't overflow because we just reserved this
+ self.set_len(self.len() + to_push.len());
+
+ for (i, x) in to_push.iter().enumerate() {
+ self.ptr().offset(i as isize).write(x.clone());
+ }
+ }
+ }
+}
+```
+
+We bypass `push` in order to avoid redundant capacity and `len` checks on the
+Vec that we definitely know has capacity. The logic is totally correct, except
+there's a subtle problem with our code: it's not exception-safe! `set_len`,
+`offset`, and `write` are all fine; `clone` is the panic bomb we over-looked.
+
+Clone is completely out of our control, and is totally free to panic. If it
+does, our function will exit early with the length of the Vec set too large. If
+the Vec is looked at or dropped, uninitialized memory will be read!
+
+The fix in this case is fairly simple. If we want to guarantee that the values
+we *did* clone are dropped, we can set the `len` every loop iteration. If we
+just want to guarantee that uninitialized memory can't be observed, we can set
+the `len` after the loop.
+
+
+
+
+
+## BinaryHeap::sift_up
+
+Bubbling an element up a heap is a bit more complicated than extending a Vec.
+The pseudocode is as follows:
+
+```text
+bubble_up(heap, index):
+ while index != 0 && heap[index] < heap[parent(index)]:
+ heap.swap(index, parent(index))
+ index = parent(index)
+
+```
+
+A literal transcription of this code to Rust is totally fine, but has an annoying
+performance characteristic: the `self` element is swapped over and over again
+uselessly. We would rather have the following:
+
+```text
+bubble_up(heap, index):
+ let elem = heap[index]
+ while index != 0 && element < heap[parent(index)]:
+ heap[index] = heap[parent(index)]
+ index = parent(index)
+ heap[index] = elem
+```
+
+This code ensures that each element is copied as little as possible (it is in
+fact necessary that elem be copied twice in general). However it now exposes
+some exception safety trouble! At all times, there exists two copies of one
+value. If we panic in this function something will be double-dropped.
+Unfortunately, we also don't have full control of the code: that comparison is
+user-defined!
+
+Unlike Vec, the fix isn't as easy here. One option is to break the user-defined
+code and the unsafe code into two separate phases:
+
+```text
+bubble_up(heap, index):
+ let end_index = index;
+ while end_index != 0 && heap[end_index] < heap[parent(end_index)]:
+ end_index = parent(end_index)
+
+ let elem = heap[index]
+ while index != end_index:
+ heap[index] = heap[parent(index)]
+ index = parent(index)
+ heap[index] = elem
+```
+
+If the user-defined code blows up, that's no problem anymore, because we haven't
+actually touched the state of the heap yet. Once we do start messing with the
+heap, we're working with only data and functions that we trust, so there's no
+concern of panics.
+
+Perhaps you're not happy with this design. Surely it's cheating! And we have
+to do the complex heap traversal *twice*! Alright, let's bite the bullet. Let's
+intermix untrusted and unsafe code *for reals*.
+
+If Rust had `try` and `finally` like in Java, we could do the following:
+
+```text
+bubble_up(heap, index):
+ let elem = heap[index]
+ try:
+ while index != 0 && element < heap[parent(index)]:
+ heap[index] = heap[parent(index)]
+ index = parent(index)
+ finally:
+ heap[index] = elem
+```
+
+The basic idea is simple: if the comparison panics, we just toss the loose
+element in the logically uninitialized index and bail out. Anyone who observes
+the heap will see a potentially *inconsistent* heap, but at least it won't
+cause any double-drops! If the algorithm terminates normally, then this
+operation happens to coincide precisely with the how we finish up regardless.
+
+Sadly, Rust has no such construct, so we're going to need to roll our own! The
+way to do this is to store the algorithm's state in a separate struct with a
+destructor for the "finally" logic. Whether we panic or not, that destructor
+will run and clean up after us.
+
+```rust,ignore
+struct Hole<'a, T: 'a> {
+ data: &'a mut [T],
+ /// `elt` is always `Some` from new until drop.
+ elt: Option<T>,
+ pos: usize,
+}
+
+impl<'a, T> Hole<'a, T> {
+ fn new(data: &'a mut [T], pos: usize) -> Self {
+ unsafe {
+ let elt = ptr::read(&data[pos]);
+ Hole {
+ data: data,
+ elt: Some(elt),
+ pos: pos,
+ }
+ }
+ }
+
+ fn pos(&self) -> usize { self.pos }
+
+ fn removed(&self) -> &T { self.elt.as_ref().unwrap() }
+
+ unsafe fn get(&self, index: usize) -> &T { &self.data[index] }
+
+ unsafe fn move_to(&mut self, index: usize) {
+ let index_ptr: *const _ = &self.data[index];
+ let hole_ptr = &mut self.data[self.pos];
+ ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
+ self.pos = index;
+ }
+}
+
+impl<'a, T> Drop for Hole<'a, T> {
+ fn drop(&mut self) {
+ // fill the hole again
+ unsafe {
+ let pos = self.pos;
+ ptr::write(&mut self.data[pos], self.elt.take().unwrap());
+ }
+ }
+}
+
+impl<T: Ord> BinaryHeap<T> {
+ fn sift_up(&mut self, pos: usize) {
+ unsafe {
+ // Take out the value at `pos` and create a hole.
+ let mut hole = Hole::new(&mut self.data, pos);
+
+ while hole.pos() != 0 {
+ let parent = parent(hole.pos());
+ if hole.removed() <= hole.get(parent) { break }
+ hole.move_to(parent);
+ }
+ // Hole will be unconditionally filled here; panic or not!
+ }
+ }
+}
+```
--- /dev/null
+% Exotically Sized Types
+
+Most of the time, we think in terms of types with a fixed, positive size. This
+is not always the case, however.
+
+
+
+
+
+# Dynamically Sized Types (DSTs)
+
+Rust in fact supports Dynamically Sized Types (DSTs): types without a statically
+known size or alignment. On the surface, this is a bit nonsensical: Rust *must*
+know the size and alignment of something in order to correctly work with it! In
+this regard, DSTs are not normal types. Due to their lack of a statically known
+size, these types can only exist behind some kind of pointer. Any pointer to a
+DST consequently becomes a *fat* pointer consisting of the pointer and the
+information that "completes" them (more on this below).
+
+There are two major DSTs exposed by the language: trait objects, and slices.
+
+A trait object represents some type that implements the traits it specifies.
+The exact original type is *erased* in favour of runtime reflection
+with a vtable containing all the information necessary to use the type.
+This is the information that completes a trait object: a pointer to its vtable.
+
+A slice is simply a view into some contiguous storage -- typically an array or
+`Vec`. The information that completes a slice is just the number of elements
+it points to.
+
+Structs can actually store a single DST directly as their last field, but this
+makes them a DST as well:
+
+```rust
+// Can't be stored on the stack directly
+struct Foo {
+ info: u32,
+ data: [u8],
+}
+```
+
+**NOTE: [As of Rust 1.0 struct DSTs are broken if the last field has
+a variable position based on its alignment][dst-issue].**
+
+
+
+
+
+# Zero Sized Types (ZSTs)
+
+Rust actually allows types to be specified that occupy no space:
+
+```rust
+struct Foo; // No fields = no size
+
+// All fields have no size = no size
+struct Baz {
+ foo: Foo,
+ qux: (), // empty tuple has no size
+ baz: [u8; 0], // empty array has no size
+}
+```
+
+On their own, Zero Sized Types (ZSTs) are, for obvious reasons, pretty useless.
+However as with many curious layout choices in Rust, their potential is realized
+in a generic context: Rust largely understands that any operation that produces
+or stores a ZST can be reduced to a no-op. First off, storing it doesn't even
+make sense -- it doesn't occupy any space. Also there's only one value of that
+type, so anything that loads it can just produce it from the aether -- which is
+also a no-op since it doesn't occupy any space.
+
+One of the most extreme example's of this is Sets and Maps. Given a
+`Map<Key, Value>`, it is common to implement a `Set<Key>` as just a thin wrapper
+around `Map<Key, UselessJunk>`. In many languages, this would necessitate
+allocating space for UselessJunk and doing work to store and load UselessJunk
+only to discard it. Proving this unnecessary would be a difficult analysis for
+the compiler.
+
+However in Rust, we can just say that `Set<Key> = Map<Key, ()>`. Now Rust
+statically knows that every load and store is useless, and no allocation has any
+size. The result is that the monomorphized code is basically a custom
+implementation of a HashSet with none of the overhead that HashMap would have to
+support values.
+
+Safe code need not worry about ZSTs, but *unsafe* code must be careful about the
+consequence of types with no size. In particular, pointer offsets are no-ops,
+and standard allocators (including jemalloc, the one used by default in Rust)
+generally consider passing in `0` for the size of an allocation as Undefined
+Behaviour.
+
+
+
+
+
+# Empty Types
+
+Rust also enables types to be declared that *cannot even be instantiated*. These
+types can only be talked about at the type level, and never at the value level.
+Empty types can be declared by specifying an enum with no variants:
+
+```rust
+enum Void {} // No variants = EMPTY
+```
+
+Empty types are even more marginal than ZSTs. The primary motivating example for
+Void types is type-level unreachability. For instance, suppose an API needs to
+return a Result in general, but a specific case actually is infallible. It's
+actually possible to communicate this at the type level by returning a
+`Result<T, Void>`. Consumers of the API can confidently unwrap such a Result
+knowing that it's *statically impossible* for this value to be an `Err`, as
+this would require providing a value of type `Void`.
+
+In principle, Rust can do some interesting analyses and optimizations based
+on this fact. For instance, `Result<T, Void>` could be represented as just `T`,
+because the `Err` case doesn't actually exist. The following *could* also
+compile:
+
+```rust,ignore
+enum Void {}
+
+let res: Result<u32, Void> = Ok(0);
+
+// Err doesn't exist anymore, so Ok is actually irrefutable.
+let Ok(num) = res;
+```
+
+But neither of these tricks work today, so all Void types get you is
+the ability to be confident that certain situations are statically impossible.
+
+One final subtle detail about empty types is that raw pointers to them are
+actually valid to construct, but dereferencing them is Undefined Behaviour
+because that doesn't actually make sense. That is, you could model C's `void *`
+type with `*const Void`, but this doesn't necessarily gain anything over using
+e.g. `*const ()`, which *is* safe to randomly dereference.
+
+
+[dst-issue]: https://github.com/rust-lang/rust/issues/26403
--- /dev/null
+% Higher-Rank Trait Bounds (HRTBs)
+
+Rust's `Fn` traits are a little bit magic. For instance, we can write the
+following code:
+
+```rust
+struct Closure<F> {
+ data: (u8, u16),
+ func: F,
+}
+
+impl<F> Closure<F>
+ where F: Fn(&(u8, u16)) -> &u8,
+{
+ fn call(&self) -> &u8 {
+ (self.func)(&self.data)
+ }
+}
+
+fn do_it(data: &(u8, u16)) -> &u8 { &data.0 }
+
+fn main() {
+ let clo = Closure { data: (0, 1), func: do_it };
+ println!("{}", clo.call());
+}
+```
+
+If we try to naively desugar this code in the same way that we did in the
+lifetimes section, we run into some trouble:
+
+```rust,ignore
+struct Closure<F> {
+ data: (u8, u16),
+ func: F,
+}
+
+impl<F> Closure<F>
+ // where F: Fn(&'??? (u8, u16)) -> &'??? u8,
+{
+ fn call<'a>(&'a self) -> &'a u8 {
+ (self.func)(&self.data)
+ }
+}
+
+fn do_it<'b>(data: &'b (u8, u16)) -> &'b u8 { &'b data.0 }
+
+fn main() {
+ 'x: {
+ let clo = Closure { data: (0, 1), func: do_it };
+ println!("{}", clo.call());
+ }
+}
+```
+
+How on earth are we supposed to express the lifetimes on `F`'s trait bound? We
+need to provide some lifetime there, but the lifetime we care about can't be
+named until we enter the body of `call`! Also, that isn't some fixed lifetime;
+`call` works with *any* lifetime `&self` happens to have at that point.
+
+This job requires The Magic of Higher-Rank Trait Bounds (HRTBs). The way we
+desugar this is as follows:
+
+```rust,ignore
+where for<'a> F: Fn(&'a (u8, u16)) -> &'a u8,
+```
+
+(Where `Fn(a, b, c) -> d` is itself just sugar for the unstable *real* `Fn`
+trait)
+
+`for<'a>` can be read as "for all choices of `'a`", and basically produces an
+*infinite list* of trait bounds that F must satisfy. Intense. There aren't many
+places outside of the `Fn` traits where we encounter HRTBs, and even for
+those we have a nice magic sugar for the common cases.
--- /dev/null
+% Leaking
+
+Ownership-based resource management is intended to simplify composition. You
+acquire resources when you create the object, and you release the resources when
+it gets destroyed. Since destruction is handled for you, it means you can't
+forget to release the resources, and it happens as soon as possible! Surely this
+is perfect and all of our problems are solved.
+
+Everything is terrible and we have new and exotic problems to try to solve.
+
+Many people like to believe that Rust eliminates resource leaks. In practice,
+this is basically true. You would be surprised to see a Safe Rust program
+leak resources in an uncontrolled way.
+
+However from a theoretical perspective this is absolutely not the case, no
+matter how you look at it. In the strictest sense, "leaking" is so abstract as
+to be unpreventable. It's quite trivial to initialize a collection at the start
+of a program, fill it with tons of objects with destructors, and then enter an
+infinite event loop that never refers to it. The collection will sit around
+uselessly, holding on to its precious resources until the program terminates (at
+which point all those resources would have been reclaimed by the OS anyway).
+
+We may consider a more restricted form of leak: failing to drop a value that is
+unreachable. Rust also doesn't prevent this. In fact Rust *has a function for
+doing this*: `mem::forget`. This function consumes the value it is passed *and
+then doesn't run its destructor*.
+
+In the past `mem::forget` was marked as unsafe as a sort of lint against using
+it, since failing to call a destructor is generally not a well-behaved thing to
+do (though useful for some special unsafe code). However this was generally
+determined to be an untenable stance to take: there are many ways to fail to
+call a destructor in safe code. The most famous example is creating a cycle of
+reference-counted pointers using interior mutability.
+
+It is reasonable for safe code to assume that destructor leaks do not happen, as
+any program that leaks destructors is probably wrong. However *unsafe* code
+cannot rely on destructors to be run in order to be safe. For most types this
+doesn't matter: if you leak the destructor then the type is by definition
+inaccessible, so it doesn't matter, right? For instance, if you leak a `Box<u8>`
+then you waste some memory but that's hardly going to violate memory-safety.
+
+However where we must be careful with destructor leaks are *proxy* types. These
+are types which manage access to a distinct object, but don't actually own it.
+Proxy objects are quite rare. Proxy objects you'll need to care about are even
+rarer. However we'll focus on three interesting examples in the standard
+library:
+
+* `vec::Drain`
+* `Rc`
+* `thread::scoped::JoinGuard`
+
+
+
+## Drain
+
+`drain` is a collections API that moves data out of the container without
+consuming the container. This enables us to reuse the allocation of a `Vec`
+after claiming ownership over all of its contents. It produces an iterator
+(Drain) that returns the contents of the Vec by-value.
+
+Now, consider Drain in the middle of iteration: some values have been moved out,
+and others haven't. This means that part of the Vec is now full of logically
+uninitialized data! We could backshift all the elements in the Vec every time we
+remove a value, but this would have pretty catastrophic performance
+consequences.
+
+Instead, we would like Drain to fix the Vec's backing storage when it is
+dropped. It should run itself to completion, backshift any elements that weren't
+removed (drain supports subranges), and then fix Vec's `len`. It's even
+unwinding-safe! Easy!
+
+Now consider the following:
+
+```rust,ignore
+let mut vec = vec![Box::new(0); 4];
+
+{
+ // start draining, vec can no longer be accessed
+ let mut drainer = vec.drain(..);
+
+ // pull out two elements and immediately drop them
+ drainer.next();
+ drainer.next();
+
+ // get rid of drainer, but don't call its destructor
+ mem::forget(drainer);
+}
+
+// Oops, vec[0] was dropped, we're reading a pointer into free'd memory!
+println!("{}", vec[0]);
+```
+
+This is pretty clearly Not Good. Unfortunately, we're kind've stuck between a
+rock and a hard place: maintaining consistent state at every step has an
+enormous cost (and would negate any benefits of the API). Failing to maintain
+consistent state gives us Undefined Behaviour in safe code (making the API
+unsound).
+
+So what can we do? Well, we can pick a trivially consistent state: set the Vec's
+len to be 0 when we start the iteration, and fix it up if necessary in the
+destructor. That way, if everything executes like normal we get the desired
+behaviour with minimal overhead. But if someone has the *audacity* to
+mem::forget us in the middle of the iteration, all that does is *leak even more*
+(and possibly leave the Vec in an unexpected but otherwise consistent state).
+Since we've accepted that mem::forget is safe, this is definitely safe. We call
+leaks causing more leaks a *leak amplification*.
+
+
+
+
+## Rc
+
+Rc is an interesting case because at first glance it doesn't appear to be a
+proxy value at all. After all, it manages the data it points to, and dropping
+all the Rcs for a value will drop that value. Leaking an Rc doesn't seem like it
+would be particularly dangerous. It will leave the refcount permanently
+incremented and prevent the data from being freed or dropped, but that seems
+just like Box, right?
+
+Nope.
+
+Let's consider a simplified implementation of Rc:
+
+```rust,ignore
+struct Rc<T> {
+ ptr: *mut RcBox<T>,
+}
+
+struct RcBox<T> {
+ data: T,
+ ref_count: usize,
+}
+
+impl<T> Rc<T> {
+ fn new(data: T) -> Self {
+ unsafe {
+ // Wouldn't it be nice if heap::allocate worked like this?
+ let ptr = heap::allocate<RcBox<T>>();
+ ptr::write(ptr, RcBox {
+ data: data,
+ ref_count: 1,
+ });
+ Rc { ptr: ptr }
+ }
+ }
+
+ fn clone(&self) -> Self {
+ unsafe {
+ (*self.ptr).ref_count += 1;
+ }
+ Rc { ptr: self.ptr }
+ }
+}
+
+impl<T> Drop for Rc<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let inner = &mut ;
+ (*self.ptr).ref_count -= 1;
+ if (*self.ptr).ref_count == 0 {
+ // drop the data and then free it
+ ptr::read(self.ptr);
+ heap::deallocate(self.ptr);
+ }
+ }
+ }
+}
+```
+
+This code contains an implicit and subtle assumption: `ref_count` can fit in a
+`usize`, because there can't be more than `usize::MAX` Rcs in memory. However
+this itself assumes that the `ref_count` accurately reflects the number of Rcs
+in memory, which we know is false with `mem::forget`. Using `mem::forget` we can
+overflow the `ref_count`, and then get it down to 0 with outstanding Rcs. Then
+we can happily use-after-free the inner data. Bad Bad Not Good.
+
+This can be solved by just checking the `ref_count` and doing *something*. The
+standard library's stance is to just abort, because your program has become
+horribly degenerate. Also *oh my gosh* it's such a ridiculous corner case.
+
+
+
+
+## thread::scoped::JoinGuard
+
+The thread::scoped API intends to allow threads to be spawned that reference
+data on their parent's stack without any synchronization over that data by
+ensuring the parent joins the thread before any of the shared data goes out
+of scope.
+
+```rust,ignore
+pub fn scoped<'a, F>(f: F) -> JoinGuard<'a>
+ where F: FnOnce() + Send + 'a
+```
+
+Here `f` is some closure for the other thread to execute. Saying that
+`F: Send +'a` is saying that it closes over data that lives for `'a`, and it
+either owns that data or the data was Sync (implying `&data` is Send).
+
+Because JoinGuard has a lifetime, it keeps all the data it closes over
+borrowed in the parent thread. This means the JoinGuard can't outlive
+the data that the other thread is working on. When the JoinGuard *does* get
+dropped it blocks the parent thread, ensuring the child terminates before any
+of the closed-over data goes out of scope in the parent.
+
+Usage looked like:
+
+```rust,ignore
+let mut data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+{
+ let guards = vec![];
+ for x in &mut data {
+ // Move the mutable reference into the closure, and execute
+ // it on a different thread. The closure has a lifetime bound
+ // by the lifetime of the mutable reference `x` we store in it.
+ // The guard that is returned is in turn assigned the lifetime
+ // of the closure, so it also mutably borrows `data` as `x` did.
+ // This means we cannot access `data` until the guard goes away.
+ let guard = thread::scoped(move || {
+ *x *= 2;
+ });
+ // store the thread's guard for later
+ guards.push(guard);
+ }
+ // All guards are dropped here, forcing the threads to join
+ // (this thread blocks here until the others terminate).
+ // Once the threads join, the borrow expires and the data becomes
+ // accessible again in this thread.
+}
+// data is definitely mutated here.
+```
+
+In principle, this totally works! Rust's ownership system perfectly ensures it!
+...except it relies on a destructor being called to be safe.
+
+```rust,ignore
+let mut data = Box::new(0);
+{
+ let guard = thread::scoped(|| {
+ // This is at best a data race. At worst, it's also a use-after-free.
+ *data += 1;
+ });
+ // Because the guard is forgotten, expiring the loan without blocking this
+ // thread.
+ mem::forget(guard);
+}
+// So the Box is dropped here while the scoped thread may or may not be trying
+// to access it.
+```
+
+Dang. Here the destructor running was pretty fundamental to the API, and it had
+to be scrapped in favour of a completely different design.
--- /dev/null
+% Lifetime Elision
+
+In order to make common patterns more ergonomic, Rust allows lifetimes to be
+*elided* in function signatures.
+
+A *lifetime position* is anywhere you can write a lifetime in a type:
+
+```rust,ignore
+&'a T
+&'a mut T
+T<'a>
+```
+
+Lifetime positions can appear as either "input" or "output":
+
+* For `fn` definitions, input refers to the types of the formal arguments
+ in the `fn` definition, while output refers to
+ result types. So `fn foo(s: &str) -> (&str, &str)` has elided one lifetime in
+ input position and two lifetimes in output position.
+ Note that the input positions of a `fn` method definition do not
+ include the lifetimes that occur in the method's `impl` header
+ (nor lifetimes that occur in the trait header, for a default method).
+
+* In the future, it should be possible to elide `impl` headers in the same manner.
+
+Elision rules are as follows:
+
+* Each elided lifetime in input position becomes a distinct lifetime
+ parameter.
+
+* If there is exactly one input lifetime position (elided or not), that lifetime
+ is assigned to *all* elided output lifetimes.
+
+* If there are multiple input lifetime positions, but one of them is `&self` or
+ `&mut self`, the lifetime of `self` is assigned to *all* elided output lifetimes.
+
+* Otherwise, it is an error to elide an output lifetime.
+
+Examples:
+
+```rust,ignore
+fn print(s: &str); // elided
+fn print<'a>(s: &'a str); // expanded
+
+fn debug(lvl: uint, s: &str); // elided
+fn debug<'a>(lvl: uint, s: &'a str); // expanded
+
+fn substr(s: &str, until: uint) -> &str; // elided
+fn substr<'a>(s: &'a str, until: uint) -> &'a str; // expanded
+
+fn get_str() -> &str; // ILLEGAL
+
+fn frob(s: &str, t: &str) -> &str; // ILLEGAL
+
+fn get_mut(&mut self) -> &mut T; // elided
+fn get_mut<'a>(&'a mut self) -> &'a mut T; // expanded
+
+fn args<T:ToCStr>(&mut self, args: &[T]) -> &mut Command // elided
+fn args<'a, 'b, T:ToCStr>(&'a mut self, args: &'b [T]) -> &'a mut Command // expanded
+
+fn new(buf: &mut [u8]) -> BufWriter; // elided
+fn new<'a>(buf: &'a mut [u8]) -> BufWriter<'a> // expanded
+
+```
--- /dev/null
+% Limits of Lifetimes
+
+Given the following code:
+
+```rust,ignore
+struct Foo;
+
+impl Foo {
+ fn mutate_and_share(&mut self) -> &Self { &*self }
+ fn share(&self) {}
+}
+
+fn main() {
+ let mut foo = Foo;
+ let loan = foo.mutate_and_share();
+ foo.share();
+}
+```
+
+One might expect it to compile. We call `mutate_and_share`, which mutably borrows
+`foo` temporarily, but then returns only a shared reference. Therefore we
+would expect `foo.share()` to succeed as `foo` shouldn't be mutably borrowed.
+
+However when we try to compile it:
+
+```text
+<anon>:11:5: 11:8 error: cannot borrow `foo` as immutable because it is also borrowed as mutable
+<anon>:11 foo.share();
+ ^~~
+<anon>:10:16: 10:19 note: previous borrow of `foo` occurs here; the mutable borrow prevents subsequent moves, borrows, or modification of `foo` until the borrow ends
+<anon>:10 let loan = foo.mutate_and_share();
+ ^~~
+<anon>:12:2: 12:2 note: previous borrow ends here
+<anon>:8 fn main() {
+<anon>:9 let mut foo = Foo;
+<anon>:10 let loan = foo.mutate_and_share();
+<anon>:11 foo.share();
+<anon>:12 }
+ ^
+```
+
+What happened? Well, we got the exact same reasoning as we did for
+[Example 2 in the previous section][ex2]. We desugar the program and we get
+the following:
+
+```rust,ignore
+struct Foo;
+
+impl Foo {
+ fn mutate_and_share<'a>(&'a mut self) -> &'a Self { &'a *self }
+ fn share<'a>(&'a self) {}
+}
+
+fn main() {
+ 'b: {
+ let mut foo: Foo = Foo;
+ 'c: {
+ let loan: &'c Foo = Foo::mutate_and_share::<'c>(&'c mut foo);
+ 'd: {
+ Foo::share::<'d>(&'d foo);
+ }
+ }
+ }
+}
+```
+
+The lifetime system is forced to extend the `&mut foo` to have lifetime `'c`,
+due to the lifetime of `loan` and mutate_and_share's signature. Then when we
+try to call `share`, and it sees we're trying to alias that `&'c mut foo` and
+blows up in our face!
+
+This program is clearly correct according to the reference semantics we actually
+care about, but the lifetime system is too coarse-grained to handle that.
+
+
+TODO: other common problems? SEME regions stuff, mostly?
+
+
+
+
+[ex2]: lifetimes.html#example-2:-aliasing-a-mutable-reference
--- /dev/null
+% Lifetimes
+
+Rust enforces these rules through *lifetimes*. Lifetimes are effectively
+just names for scopes somewhere in the program. Each reference,
+and anything that contains a reference, is tagged with a lifetime specifying
+the scope it's valid for.
+
+Within a function body, Rust generally doesn't let you explicitly name the
+lifetimes involved. This is because it's generally not really necessary
+to talk about lifetimes in a local context; Rust has all the information and
+can work out everything as optimally as possible. Many anonymous scopes and
+temporaries that you would otherwise have to write are often introduced to
+make your code Just Work.
+
+However once you cross the function boundary, you need to start talking about
+lifetimes. Lifetimes are denoted with an apostrophe: `'a`, `'static`. To dip
+our toes with lifetimes, we're going to pretend that we're actually allowed
+to label scopes with lifetimes, and desugar the examples from the start of
+this chapter.
+
+Originally, our examples made use of *aggressive* sugar -- high fructose corn
+syrup even -- around scopes and lifetimes, because writing everything out
+explicitly is *extremely noisy*. All Rust code relies on aggressive inference
+and elision of "obvious" things.
+
+One particularly interesting piece of sugar is that each `let` statement implicitly
+introduces a scope. For the most part, this doesn't really matter. However it
+does matter for variables that refer to each other. As a simple example, let's
+completely desugar this simple piece of Rust code:
+
+```rust
+let x = 0;
+let y = &x;
+let z = &y;
+```
+
+The borrow checker always tries to minimize the extent of a lifetime, so it will
+likely desugar to the following:
+
+```rust,ignore
+// NOTE: `'a: {` and `&'b x` is not valid syntax!
+'a: {
+ let x: i32 = 0;
+ 'b: {
+ // lifetime used is 'b because that's good enough.
+ let y: &'b i32 = &'b x;
+ 'c: {
+ // ditto on 'c
+ let z: &'c &'b i32 = &'c y;
+ }
+ }
+}
+```
+
+Wow. That’s... awful. Let’s all take a moment to thank Rust for making this easier.
+
+Actually passing references to outer scopes will cause Rust to infer
+a larger lifetime:
+
+```rust
+let x = 0;
+let z;
+let y = &x;
+z = y;
+```
+
+```rust,ignore
+'a: {
+ let x: i32 = 0;
+ 'b: {
+ let z: &'b i32;
+ 'c: {
+ // Must use 'b here because this reference is
+ // being passed to that scope.
+ let y: &'b i32 = &'b x;
+ z = y;
+ }
+ }
+}
+```
+
+
+
+# Example: references that outlive referents
+
+Alright, let's look at some of those examples from before:
+
+```rust,ignore
+fn as_str(data: &u32) -> &str {
+ let s = format!("{}", data);
+ &s
+}
+```
+
+desugars to:
+
+```rust,ignore
+fn as_str<'a>(data: &'a u32) -> &'a str {
+ 'b: {
+ let s = format!("{}", data);
+ return &'a s;
+ }
+}
+```
+
+This signature of `as_str` takes a reference to a u32 with *some* lifetime, and
+promises that it can produce a reference to a str that can live *just as long*.
+Already we can see why this signature might be trouble. That basically implies
+that we're going to find a str somewhere in the scope the reference
+to the u32 originated in, or somewhere *even earlier*. That's a bit of a big
+ask.
+
+We then proceed to compute the string `s`, and return a reference to it. Since
+the contract of our function says the reference must outlive `'a`, that's the
+lifetime we infer for the reference. Unfortunately, `s` was defined in the
+scope `'b`, so the only way this is sound is if `'b` contains `'a` -- which is
+clearly false since `'a` must contain the function call itself. We have therefore
+created a reference whose lifetime outlives its referent, which is *literally*
+the first thing we said that references can't do. The compiler rightfully blows
+up in our face.
+
+To make this more clear, we can expand the example:
+
+```rust,ignore
+fn as_str<'a>(data: &'a u32) -> &'a str {
+ 'b: {
+ let s = format!("{}", data);
+ return &'a s
+ }
+}
+
+fn main() {
+ 'c: {
+ let x: u32 = 0;
+ 'd: {
+ // An anonymous scope is introduced because the borrow does not
+ // need to last for the whole scope x is valid for. The return
+ // of as_str must find a str somewhere before this function
+ // call. Obviously not happening.
+ println!("{}", as_str::<'d>(&'d x));
+ }
+ }
+}
+```
+
+Shoot!
+
+Of course, the right way to write this function is as follows:
+
+```rust
+fn to_string(data: &u32) -> String {
+ format!("{}", data)
+}
+```
+
+We must produce an owned value inside the function to return it! The only way
+we could have returned an `&'a str` would have been if it was in a field of the
+`&'a u32`, which is obviously not the case.
+
+(Actually we could have also just returned a string literal, which as a global
+can be considered to reside at the bottom of the stack; though this limits
+our implementation *just a bit*.)
+
+
+
+
+
+# Example: aliasing a mutable reference
+
+How about the other example:
+
+```rust,ignore
+let mut data = vec![1, 2, 3];
+let x = &data[0];
+data.push(4);
+println!("{}", x);
+```
+
+```rust,ignore
+'a: {
+ let mut data: Vec<i32> = vec![1, 2, 3];
+ 'b: {
+ // 'b is as big as we need this borrow to be
+ // (just need to get to `println!`)
+ let x: &'b i32 = Index::index::<'b>(&'b data, 0);
+ 'c: {
+ // Temporary scope because we don't need the
+ // &mut to last any longer.
+ Vec::push(&'c mut data, 4);
+ }
+ println!("{}", x);
+ }
+}
+```
+
+The problem here is is bit more subtle and interesting. We want Rust to
+reject this program for the following reason: We have a live shared reference `x`
+to a descendent of `data` when we try to take a mutable reference to `data`
+to `push`. This would create an aliased mutable reference, which would
+violate the *second* rule of references.
+
+However this is *not at all* how Rust reasons that this program is bad. Rust
+doesn't understand that `x` is a reference to a subpath of `data`. It doesn't
+understand Vec at all. What it *does* see is that `x` has to live for `'b` to
+be printed. The signature of `Index::index` subsequently demands that the
+reference we take to `data` has to survive for `'b`. When we try to call `push`,
+it then sees us try to make an `&'c mut data`. Rust knows that `'c` is contained
+within `'b`, and rejects our program because the `&'b data` must still be live!
+
+Here we see that the lifetime system is much more coarse than the reference
+semantics we're actually interested in preserving. For the most part, *that's
+totally ok*, because it keeps us from spending all day explaining our program
+to the compiler. However it does mean that several programs that are totally
+correct with respect to Rust's *true* semantics are rejected because lifetimes
+are too dumb.
--- /dev/null
+% Meet Safe and Unsafe
+
+Programmers in safe "high-level" languages face a fundamental dilemma. On one
+hand, it would be *really* great to just say what you want and not worry about
+how it's done. On the other hand, that can lead to unacceptably poor
+performance. It may be necessary to drop down to less clear or idiomatic
+practices to get the performance characteristics you want. Or maybe you just
+throw up your hands in disgust and decide to shell out to an implementation in
+a less sugary-wonderful *unsafe* language.
+
+Worse, when you want to talk directly to the operating system, you *have* to
+talk to an unsafe language: *C*. C is ever-present and unavoidable. It's the
+lingua-franca of the programming world.
+Even other safe languages generally expose C interfaces for the world at large!
+Regardless of why you're doing it, as soon as your program starts talking to
+C it stops being safe.
+
+With that said, Rust is *totally* a safe programming language.
+
+Well, Rust *has* a safe programming language. Let's step back a bit.
+
+Rust can be thought of as being composed of two programming languages: *Safe
+Rust* and *Unsafe Rust*. Safe Rust is For Reals Totally Safe. Unsafe Rust,
+unsurprisingly, is *not* For Reals Totally Safe. In fact, Unsafe Rust lets you
+do some really crazy unsafe things.
+
+Safe Rust is the *true* Rust programming language. If all you do is write Safe
+Rust, you will never have to worry about type-safety or memory-safety. You will
+never endure a null or dangling pointer, or any of that Undefined Behaviour
+nonsense.
+
+*That's totally awesome.*
+
+The standard library also gives you enough utilities out-of-the-box that you'll
+be able to write awesome high-performance applications and libraries in pure
+idiomatic Safe Rust.
+
+But maybe you want to talk to another language. Maybe you're writing a
+low-level abstraction not exposed by the standard library. Maybe you're
+*writing* the standard library (which is written entirely in Rust). Maybe you
+need to do something the type-system doesn't understand and just *frob some dang
+bits*. Maybe you need Unsafe Rust.
+
+Unsafe Rust is exactly like Safe Rust with all the same rules and semantics.
+However Unsafe Rust lets you do some *extra* things that are Definitely Not Safe.
+
+The only things that are different in Unsafe Rust are that you can:
+
+* Dereference raw pointers
+* Call `unsafe` functions (including C functions, intrinsics, and the raw allocator)
+* Implement `unsafe` traits
+* Mutate statics
+
+That's it. The reason these operations are relegated to Unsafe is that misusing
+any of these things will cause the ever dreaded Undefined Behaviour. Invoking
+Undefined Behaviour gives the compiler full rights to do arbitrarily bad things
+to your program. You definitely *should not* invoke Undefined Behaviour.
+
+Unlike C, Undefined Behaviour is pretty limited in scope in Rust. All the core
+language cares about is preventing the following things:
+
+* Dereferencing null or dangling pointers
+* Reading [uninitialized memory][]
+* Breaking the [pointer aliasing rules][]
+* Producing invalid primitive values:
+ * dangling/null references
+ * a `bool` that isn't 0 or 1
+ * an undefined `enum` discriminant
+ * a `char` outside the ranges [0x0, 0xD7FF] and [0xE000, 0x10FFFF]
+ * A non-utf8 `str`
+* Unwinding into another language
+* Causing a [data race][race]
+
+That's it. That's all the causes of Undefined Behaviour baked into Rust. Of
+course, unsafe functions and traits are free to declare arbitrary other
+constraints that a program must maintain to avoid Undefined Behaviour. However,
+generally violations of these constraints will just transitively lead to one of
+the above problems. Some additional constraints may also derive from compiler
+intrinsics that make special assumptions about how code can be optimized.
+
+Rust is otherwise quite permissive with respect to other dubious operations.
+Rust considers it "safe" to:
+
+* Deadlock
+* Have a [race condition][race]
+* Leak memory
+* Fail to call destructors
+* Overflow integers
+* Abort the program
+* Delete the production database
+
+However any program that actually manages to do such a thing is *probably*
+incorrect. Rust provides lots of tools to make these things rare, but
+these problems are considered impractical to categorically prevent.
+
+[pointer aliasing rules]: references.html
+[uninitialized memory]: uninitialized.html
+[race]: races.html
--- /dev/null
+% The Perils Of Ownership Based Resource Management (OBRM)
+
+OBRM (AKA RAII: Resource Acquisition Is Initialization) is something you'll
+interact with a lot in Rust. Especially if you use the standard library.
+
+Roughly speaking the pattern is as follows: to acquire a resource, you create an
+object that manages it. To release the resource, you simply destroy the object,
+and it cleans up the resource for you. The most common "resource" this pattern
+manages is simply *memory*. `Box`, `Rc`, and basically everything in
+`std::collections` is a convenience to enable correctly managing memory. This is
+particularly important in Rust because we have no pervasive GC to rely on for
+memory management. Which is the point, really: Rust is about control. However we
+are not limited to just memory. Pretty much every other system resource like a
+thread, file, or socket is exposed through this kind of API.
--- /dev/null
+% Alternative representations
+
+Rust allows you to specify alternative data layout strategies from the default.
+
+
+
+
+# repr(C)
+
+This is the most important `repr`. It has fairly simple intent: do what C does.
+The order, size, and alignment of fields is exactly what you would expect from C
+or C++. Any type you expect to pass through an FFI boundary should have
+`repr(C)`, as C is the lingua-franca of the programming world. This is also
+necessary to soundly do more elaborate tricks with data layout such as
+reinterpreting values as a different type.
+
+However, the interaction with Rust's more exotic data layout features must be
+kept in mind. Due to its dual purpose as "for FFI" and "for layout control",
+`repr(C)` can be applied to types that will be nonsensical or problematic if
+passed through the FFI boundary.
+
+* ZSTs are still zero-sized, even though this is not a standard behaviour in
+C, and is explicitly contrary to the behaviour of an empty type in C++, which
+still consumes a byte of space.
+
+* DSTs, tuples, and tagged unions are not a concept in C and as such are never
+FFI safe.
+
+* **If the type would have any [drop flags][], they will still be added**
+
+* This is equivalent to one of `repr(u*)` (see the next section) for enums. The
+chosen size is the default enum size for the target platform's C ABI. Note that
+enum representation in C is implementation defined, so this is really a "best
+guess". In particular, this may be incorrect when the C code of interest is
+compiled with certain flags.
+
+
+
+# repr(u8), repr(u16), repr(u32), repr(u64)
+
+These specify the size to make a C-like enum. If the discriminant overflows the
+integer it has to fit in, it will produce a compile-time error. You can manually
+ask Rust to allow this by setting the overflowing element to explicitly be 0.
+However Rust will not allow you to create an enum where two variants have the
+same discriminant.
+
+On non-C-like enums, this will inhibit certain optimizations like the null-
+pointer optimization.
+
+These reprs have no effect on a struct.
+
+
+
+
+# repr(packed)
+
+`repr(packed)` forces rust to strip any padding, and only align the type to a
+byte. This may improve the memory footprint, but will likely have other negative
+side-effects.
+
+In particular, most architectures *strongly* prefer values to be aligned. This
+may mean the unaligned loads are penalized (x86), or even fault (some ARM
+chips). For simple cases like directly loading or storing a packed field, the
+compiler might be able to paper over alignment issues with shifts and masks.
+However if you take a reference to a packed field, it's unlikely that the
+compiler will be able to emit code to avoid an unaligned load.
+
+**[As of Rust 1.0 this can cause undefined behaviour.][ub loads]**
+
+`repr(packed)` is not to be used lightly. Unless you have extreme requirements,
+this should not be used.
+
+This repr is a modifier on `repr(C)` and `repr(rust)`.
+
+[drop flags]: drop-flags.html
+[ub loads]: https://github.com/rust-lang/rust/issues/27060
--- /dev/null
+% Ownership and Lifetimes
+
+Ownership is the breakout feature of Rust. It allows Rust to be completely
+memory-safe and efficient, while avoiding garbage collection. Before getting
+into the ownership system in detail, we will consider the motivation of this
+design.
+
+We will assume that you accept that garbage collection (GC) is not always an
+optimal solution, and that it is desirable to manually manage memory in some
+contexts. If you do not accept this, might I interest you in a different
+language?
+
+Regardless of your feelings on GC, it is pretty clearly a *massive* boon to
+making code safe. You never have to worry about things going away *too soon*
+(although whether you still wanted to be pointing at that thing is a different
+issue...). This is a pervasive problem that C and C++ programs need to deal
+with. Consider this simple mistake that all of us who have used a non-GC'd
+language have made at one point:
+
+```rust,ignore
+fn as_str(data: &u32) -> &str {
+ // compute the string
+ let s = format!("{}", data);
+
+ // OH NO! We returned a reference to something that
+ // exists only in this function!
+ // Dangling pointer! Use after free! Alas!
+ // (this does not compile in Rust)
+ &s
+}
+```
+
+This is exactly what Rust's ownership system was built to solve.
+Rust knows the scope in which the `&s` lives, and as such can prevent it from
+escaping. However this is a simple case that even a C compiler could plausibly
+catch. Things get more complicated as code gets bigger and pointers get fed through
+various functions. Eventually, a C compiler will fall down and won't be able to
+perform sufficient escape analysis to prove your code unsound. It will consequently
+be forced to accept your program on the assumption that it is correct.
+
+This will never happen to Rust. It's up to the programmer to prove to the
+compiler that everything is sound.
+
+Of course, Rust's story around ownership is much more complicated than just
+verifying that references don't escape the scope of their referent. That's
+because ensuring pointers are always valid is much more complicated than this.
+For instance in this code,
+
+```rust,ignore
+let mut data = vec![1, 2, 3];
+// get an internal reference
+let x = &data[0];
+
+// OH NO! `push` causes the backing storage of `data` to be reallocated.
+// Dangling pointer! User after free! Alas!
+// (this does not compile in Rust)
+data.push(4);
+
+println!("{}", x);
+```
+
+naive scope analysis would be insufficient to prevent this bug, because `data`
+does in fact live as long as we needed. However it was *changed* while we had
+a reference into it. This is why Rust requires any references to freeze the
+referent and its owners.
+
+
--- /dev/null
+% PhantomData
+
+When working with unsafe code, we can often end up in a situation where
+types or lifetimes are logically associated with a struct, but not actually
+part of a field. This most commonly occurs with lifetimes. For instance, the
+`Iter` for `&'a [T]` is (approximately) defined as follows:
+
+```rust,ignore
+struct Iter<'a, T: 'a> {
+ ptr: *const T,
+ end: *const T,
+}
+```
+
+However because `'a` is unused within the struct's body, it's *unbounded*.
+Because of the troubles this has historically caused, unbounded lifetimes and
+types are *forbidden* in struct definitions. Therefore we must somehow refer
+to these types in the body. Correctly doing this is necessary to have
+correct variance and drop checking.
+
+We do this using `PhantomData`, which is a special marker type. `PhantomData`
+consumes no space, but simulates a field of the given type for the purpose of
+static analysis. This was deemed to be less error-prone than explicitly telling
+the type-system the kind of variance that you want, while also providing other
+useful such as the information needed by drop check.
+
+Iter logically contains a bunch of `&'a T`s, so this is exactly what we tell
+the PhantomData to simulate:
+
+```
+use std::marker;
+
+struct Iter<'a, T: 'a> {
+ ptr: *const T,
+ end: *const T,
+ _marker: marker::PhantomData<&'a T>,
+}
+```
+
+and that's it. The lifetime will be bounded, and your iterator will be variant
+over `'a` and `T`. Everything Just Works.
+
+Another important example is Vec, which is (approximately) defined as follows:
+
+```
+struct Vec<T> {
+ data: *const T, // *const for variance!
+ len: usize,
+ cap: usize,
+}
+```
+
+Unlike the previous example it *appears* that everything is exactly as we
+want. Every generic argument to Vec shows up in the at least one field.
+Good to go!
+
+Nope.
+
+The drop checker will generously determine that Vec<T> does not own any values
+of type T. This will in turn make it conclude that it doesn't need to worry
+about Vec dropping any T's in its destructor for determining drop check
+soundness. This will in turn allow people to create unsoundness using
+Vec's destructor.
+
+In order to tell dropck that we *do* own values of type T, and therefore may
+drop some T's when *we* drop, we must add an extra PhantomData saying exactly
+that:
+
+```
+use std::marker;
+
+struct Vec<T> {
+ data: *const T, // *const for covariance!
+ len: usize,
+ cap: usize,
+ _marker: marker::PhantomData<T>,
+}
+```
+
+Raw pointers that own an allocation is such a pervasive pattern that the
+standard library made a utility for itself called `Unique<T>` which:
+
+* wraps a `*const T` for variance
+* includes a `PhantomData<T>`,
+* auto-derives Send/Sync as if T was contained
+* marks the pointer as NonZero for the null-pointer optimization
+
--- /dev/null
+% Poisoning
+
+Although all unsafe code *must* ensure it has minimal exception safety, not all
+types ensure *maximal* exception safety. Even if the type does, your code may
+ascribe additional meaning to it. For instance, an integer is certainly
+exception-safe, but has no semantics on its own. It's possible that code that
+panics could fail to correctly update the integer, producing an inconsistent
+program state.
+
+This is *usually* fine, because anything that witnesses an exception is about
+to get destroyed. For instance, if you send a Vec to another thread and that
+thread panics, it doesn't matter if the Vec is in a weird state. It will be
+dropped and go away forever. However some types are especially good at smuggling
+values across the panic boundary.
+
+These types may choose to explicitly *poison* themselves if they witness a panic.
+Poisoning doesn't entail anything in particular. Generally it just means
+preventing normal usage from proceeding. The most notable example of this is the
+standard library's Mutex type. A Mutex will poison itself if one of its
+MutexGuards (the thing it returns when a lock is obtained) is dropped during a
+panic. Any future attempts to lock the Mutex will return an `Err` or panic.
+
+Mutex poisons not for true safety in the sense that Rust normally cares about. It
+poisons as a safety-guard against blindly using the data that comes out of a Mutex
+that has witnessed a panic while locked. The data in such a Mutex was likely in the
+middle of being modified, and as such may be in an inconsistent or incomplete state.
+It is important to note that one cannot violate memory safety with such a type
+if it is correctly written. After all, it must be minimally exception-safe!
+
+However if the Mutex contained, say, a BinaryHeap that does not actually have the
+heap property, it's unlikely that any code that uses it will do
+what the author intended. As such, the program should not proceed normally.
+Still, if you're double-plus-sure that you can do *something* with the value,
+the Mutex exposes a method to get the lock anyway. It *is* safe, after all.
+Just maybe nonsense.
--- /dev/null
+% Data Races and Race Conditions
+
+Safe Rust guarantees an absence of data races, which are defined as:
+
+* two or more threads concurrently accessing a location of memory
+* one of them is a write
+* one of them is unsynchronized
+
+A data race has Undefined Behaviour, and is therefore impossible to perform
+in Safe Rust. Data races are *mostly* prevented through rust's ownership system:
+it's impossible to alias a mutable reference, so it's impossible to perform a
+data race. Interior mutability makes this more complicated, which is largely why
+we have the Send and Sync traits (see below).
+
+**However Rust does not prevent general race conditions.**
+
+This is pretty fundamentally impossible, and probably honestly undesirable. Your
+hardware is racy, your OS is racy, the other programs on your computer are racy,
+and the world this all runs in is racy. Any system that could genuinely claim to
+prevent *all* race conditions would be pretty awful to use, if not just
+incorrect.
+
+So it's perfectly "fine" for a Safe Rust program to get deadlocked or do
+something incredibly stupid with incorrect synchronization. Obviously such a
+program isn't very good, but Rust can only hold your hand so far. Still, a
+race condition can't violate memory safety in a Rust program on
+its own. Only in conjunction with some other unsafe code can a race condition
+actually violate memory safety. For instance:
+
+```rust,no_run
+use std::thread;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::Arc;
+
+let data = vec![1, 2, 3, 4];
+// Arc so that the memory the AtomicUsize is stored in still exists for
+// the other thread to increment, even if we completely finish executing
+// before it. Rust won't compile the program without it, because of the
+// lifetime requirements of thread::spawn!
+let idx = Arc::new(AtomicUsize::new(0));
+let other_idx = idx.clone();
+
+// `move` captures other_idx by-value, moving it into this thread
+thread::spawn(move || {
+ // It's ok to mutate idx because this value
+ // is an atomic, so it can't cause a Data Race.
+ other_idx.fetch_add(10, Ordering::SeqCst);
+});
+
+// Index with the value loaded from the atomic. This is safe because we
+// read the atomic memory only once, and then pass a copy of that value
+// to the Vec's indexing implementation. This indexing will be correctly
+// bounds checked, and there's no chance of the value getting changed
+// in the middle. However our program may panic if the thread we spawned
+// managed to increment before this ran. A race condition because correct
+// program execution (panicing is rarely correct) depends on order of
+// thread execution.
+println!("{}", data[idx.load(Ordering::SeqCst)]);
+```
+
+```rust,no_run
+use std::thread;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::Arc;
+
+let data = vec![1, 2, 3, 4];
+
+let idx = Arc::new(AtomicUsize::new(0));
+let other_idx = idx.clone();
+
+// `move` captures other_idx by-value, moving it into this thread
+thread::spawn(move || {
+ // It's ok to mutate idx because this value
+ // is an atomic, so it can't cause a Data Race.
+ other_idx.fetch_add(10, Ordering::SeqCst);
+});
+
+if idx.load(Ordering::SeqCst) < data.len() {
+ unsafe {
+ // Incorrectly loading the idx after we did the bounds check.
+ // It could have changed. This is a race condition, *and dangerous*
+ // because we decided to do `get_unchecked`, which is `unsafe`.
+ println!("{}", data.get_unchecked(idx.load(Ordering::SeqCst)));
+ }
+}
+```
--- /dev/null
+% References
+
+This section gives a high-level view of the memory model that *all* Rust
+programs must satisfy to be correct. Safe code is statically verified
+to obey this model by the borrow checker. Unsafe code may go above
+and beyond the borrow checker while still satisfying this model. The borrow
+checker may also be extended to allow more programs to compile, as long as
+this more fundamental model is satisfied.
+
+There are two kinds of reference:
+
+* Shared reference: `&`
+* Mutable reference: `&mut`
+
+Which obey the following rules:
+
+* A reference cannot outlive its referent
+* A mutable reference cannot be aliased
+
+That's it. That's the whole model. Of course, we should probably define
+what *aliased* means. To define aliasing, we must define the notion of
+*paths* and *liveness*.
+
+
+**NOTE: The model that follows is generally agreed to be dubious and have
+issues. It's ok-ish as an intuitive model, but fails to capture the desired
+semantics. We leave this here to be able to use notions introduced here in later
+sections. This will be significantly changed in the future. TODO: do that.**
+
+
+# Paths
+
+If all Rust had were values (no pointers), then every value would be uniquely
+owned by a variable or composite structure. From this we naturally derive a
+*tree* of ownership. The stack itself is the root of the tree, with every
+variable as its direct children. Each variable's direct children would be their
+fields (if any), and so on.
+
+From this view, every value in Rust has a unique *path* in the tree of
+ownership. Of particular interest are *ancestors* and *descendants*: if `x` owns
+`y`, then `x` is an ancestor of `y`, and `y` is a descendant of `x`. Note
+that this is an inclusive relationship: `x` is a descendant and ancestor of
+itself.
+
+We can then define references as simply *names* for paths. When you create a
+reference, you're declaring that an ownership path exists to this address
+of memory.
+
+Tragically, plenty of data doesn't reside on the stack, and we must also
+accommodate this. Globals and thread-locals are simple enough to model as
+residing at the bottom of the stack (though we must be careful with mutable
+globals). Data on the heap poses a different problem.
+
+If all Rust had on the heap was data uniquely owned by a pointer on the stack,
+then we could just treat such a pointer as a struct that owns the value on the
+heap. Box, Vec, String, and HashMap, are examples of types which uniquely
+own data on the heap.
+
+Unfortunately, data on the heap is not *always* uniquely owned. Rc for instance
+introduces a notion of *shared* ownership. Shared ownership of a value means
+there is no unique path to it. A value with no unique path limits what we can do
+with it.
+
+In general, only shared references can be created to non-unique paths. However
+mechanisms which ensure mutual exclusion may establish One True Owner
+temporarily, establishing a unique path to that value (and therefore all
+its children). If this is done, the value may be mutated. In particular, a
+mutable reference can be taken.
+
+The most common way to establish such a path is through *interior mutability*,
+in contrast to the *inherited mutability* that everything in Rust normally uses.
+Cell, RefCell, Mutex, and RWLock are all examples of interior mutability types.
+These types provide exclusive access through runtime restrictions.
+
+An interesting case of this effect is Rc itself: if an Rc has refcount 1,
+then it is safe to mutate or even move its internals. Note however that the
+refcount itself uses interior mutability.
+
+In order to correctly communicate to the type system that a variable or field of
+a struct can have interior mutability, it must be wrapped in an UnsafeCell. This
+does not in itself make it safe to perform interior mutability operations on
+that value. You still must yourself ensure that mutual exclusion is upheld.
+
+
+
+
+# Liveness
+
+Note: Liveness is not the same thing as a *lifetime*, which will be explained
+in detail in the next section of this chapter.
+
+Roughly, a reference is *live* at some point in a program if it can be
+dereferenced. Shared references are always live unless they are literally
+unreachable (for instance, they reside in freed or leaked memory). Mutable
+references can be reachable but *not* live through the process of *reborrowing*.
+
+A mutable reference can be reborrowed to either a shared or mutable reference to
+one of its descendants. A reborrowed reference will only be live again once all
+reborrows derived from it expire. For instance, a mutable reference can be
+reborrowed to point to a field of its referent:
+
+```rust
+let x = &mut (1, 2);
+{
+ // reborrow x to a subfield
+ let y = &mut x.0;
+ // y is now live, but x isn't
+ *y = 3;
+}
+// y goes out of scope, so x is live again
+*x = (5, 7);
+```
+
+It is also possible to reborrow into *multiple* mutable references, as long as
+they are *disjoint*: no reference is an ancestor of another. Rust
+explicitly enables this to be done with disjoint struct fields, because
+disjointness can be statically proven:
+
+```rust
+let x = &mut (1, 2);
+{
+ // reborrow x to two disjoint subfields
+ let y = &mut x.0;
+ let z = &mut x.1;
+
+ // y and z are now live, but x isn't
+ *y = 3;
+ *z = 4;
+}
+// y and z go out of scope, so x is live again
+*x = (5, 7);
+```
+
+However it's often the case that Rust isn't sufficiently smart to prove that
+multiple borrows are disjoint. *This does not mean it is fundamentally illegal
+to make such a borrow*, just that Rust isn't as smart as you want.
+
+To simplify things, we can model variables as a fake type of reference: *owned*
+references. Owned references have much the same semantics as mutable references:
+they can be re-borrowed in a mutable or shared manner, which makes them no
+longer live. Live owned references have the unique property that they can be
+moved out of (though mutable references *can* be swapped out of). This power is
+only given to *live* owned references because moving its referent would of
+course invalidate all outstanding references prematurely.
+
+As a local lint against inappropriate mutation, only variables that are marked
+as `mut` can be borrowed mutably.
+
+It is interesting to note that Box behaves exactly like an owned reference. It
+can be moved out of, and Rust understands it sufficiently to reason about its
+paths like a normal variable.
+
+
+
+
+# Aliasing
+
+With liveness and paths defined, we can now properly define *aliasing*:
+
+**A mutable reference is aliased if there exists another live reference to one
+of its ancestors or descendants.**
+
+(If you prefer, you may also say the two live references alias *each other*.
+This has no semantic consequences, but is probably a more useful notion when
+verifying the soundness of a construct.)
+
+That's it. Super simple right? Except for the fact that it took us two pages to
+define all of the terms in that definition. You know: Super. Simple.
+
+Actually it's a bit more complicated than that. In addition to references, Rust
+has *raw pointers*: `*const T` and `*mut T`. Raw pointers have no inherent
+ownership or aliasing semantics. As a result, Rust makes absolutely no effort to
+track that they are used correctly, and they are wildly unsafe.
+
+**It is an open question to what degree raw pointers have alias semantics.
+However it is important for these definitions to be sound that the existence of
+a raw pointer does not imply some kind of live path.**
--- /dev/null
+% repr(Rust)
+
+First and foremost, all types have an alignment specified in bytes. The
+alignment of a type specifies what addresses are valid to store the value at. A
+value of alignment `n` must only be stored at an address that is a multiple of
+`n`. So alignment 2 means you must be stored at an even address, and 1 means
+that you can be stored anywhere. Alignment is at least 1, and always a power of
+2. Most primitives are generally aligned to their size, although this is
+platform-specific behaviour. In particular, on x86 `u64` and `f64` may be only
+aligned to 32 bits.
+
+A type's size must always be a multiple of its alignment. This ensures that an
+array of that type may always be indexed by offsetting by a multiple of its
+size. Note that the size and alignment of a type may not be known
+statically in the case of [dynamically sized types][dst].
+
+Rust gives you the following ways to lay out composite data:
+
+* structs (named product types)
+* tuples (anonymous product types)
+* arrays (homogeneous product types)
+* enums (named sum types -- tagged unions)
+
+An enum is said to be *C-like* if none of its variants have associated data.
+
+Composite structures will have an alignment equal to the maximum
+of their fields' alignment. Rust will consequently insert padding where
+necessary to ensure that all fields are properly aligned and that the overall
+type's size is a multiple of its alignment. For instance:
+
+```rust
+struct A {
+ a: u8,
+ b: u32,
+ c: u16,
+}
+```
+
+will be 32-bit aligned assuming these primitives are aligned to their size.
+It will therefore have a size that is a multiple of 32-bits. It will potentially
+*really* become:
+
+```rust
+struct A {
+ a: u8,
+ _pad1: [u8; 3], // to align `b`
+ b: u32,
+ c: u16,
+ _pad2: [u8; 2], // to make overall size multiple of 4
+}
+```
+
+There is *no indirection* for these types; all data is stored contiguously as
+you would expect in C. However with the exception of arrays (which are densely
+packed and in-order), the layout of data is not by default specified in Rust.
+Given the two following struct definitions:
+
+```rust
+struct A {
+ a: i32,
+ b: u64,
+}
+
+struct B {
+ x: i32,
+ b: u64,
+}
+```
+
+Rust *does* guarantee that two instances of A have their data laid out in
+exactly the same way. However Rust *does not* guarantee that an instance of A
+has the same field ordering or padding as an instance of B (in practice there's
+no particular reason why they wouldn't, other than that its not currently
+guaranteed).
+
+With A and B as written, this is basically nonsensical, but several other
+features of Rust make it desirable for the language to play with data layout in
+complex ways.
+
+For instance, consider this struct:
+
+```rust
+struct Foo<T, U> {
+ count: u16,
+ data1: T,
+ data2: U,
+}
+```
+
+Now consider the monomorphizations of `Foo<u32, u16>` and `Foo<u16, u32>`. If
+Rust lays out the fields in the order specified, we expect it to pad the
+values in the struct to satisfy their alignment requirements. So if Rust
+didn't reorder fields, we would expect it to produce the following:
+
+```rust,ignore
+struct Foo<u16, u32> {
+ count: u16,
+ data1: u16,
+ data2: u32,
+}
+
+struct Foo<u32, u16> {
+ count: u16,
+ _pad1: u16,
+ data1: u32,
+ data2: u16,
+ _pad2: u16,
+}
+```
+
+The latter case quite simply wastes space. An optimal use of space therefore
+requires different monomorphizations to have *different field orderings*.
+
+**Note: this is a hypothetical optimization that is not yet implemented in Rust
+1.0**
+
+Enums make this consideration even more complicated. Naively, an enum such as:
+
+```rust
+enum Foo {
+ A(u32),
+ B(u64),
+ C(u8),
+}
+```
+
+would be laid out as:
+
+```rust
+struct FooRepr {
+ data: u64, // this is either a u64, u32, or u8 based on `tag`
+ tag: u8, // 0 = A, 1 = B, 2 = C
+}
+```
+
+And indeed this is approximately how it would be laid out in general
+(modulo the size and position of `tag`). However there are several cases where
+such a representation is inefficient. The classic case of this is Rust's
+"null pointer optimization". Given a pointer that is known to not be null
+(e.g. `&u32`), an enum can *store* a discriminant bit *inside* the pointer
+by using null as a special value. The net result is that
+`size_of::<Option<&T>>() == size_of::<&T>()`
+
+There are many types in Rust that are, or contain, "not null" pointers such as
+`Box<T>`, `Vec<T>`, `String`, `&T`, and `&mut T`. Similarly, one can imagine
+nested enums pooling their tags into a single discriminant, as they are by
+definition known to have a limited range of valid values. In principle enums can
+use fairly elaborate algorithms to cache bits throughout nested types with
+special constrained representations. As such it is *especially* desirable that
+we leave enum layout unspecified today.
+
+[dst]: exotic-sizes.html#dynamically-sized-types-(dsts)
--- /dev/null
+% How Safe and Unsafe Interact
+
+So what's the relationship between Safe and Unsafe Rust? How do they interact?
+
+Rust models the separation between Safe and Unsafe Rust with the `unsafe`
+keyword, which can be thought as a sort of *foreign function interface* (FFI)
+between Safe and Unsafe Rust. This is the magic behind why we can say Safe Rust
+is a safe language: all the scary unsafe bits are relegated exclusively to FFI
+*just like every other safe language*.
+
+However because one language is a subset of the other, the two can be cleanly
+intermixed as long as the boundary between Safe and Unsafe Rust is denoted with
+the `unsafe` keyword. No need to write headers, initialize runtimes, or any of
+that other FFI boiler-plate.
+
+There are several places `unsafe` can appear in Rust today, which can largely be
+grouped into two categories:
+
+* There are unchecked contracts here. To declare you understand this, I require
+you to write `unsafe` elsewhere:
+ * On functions, `unsafe` is declaring the function to be unsafe to call.
+ Users of the function must check the documentation to determine what this
+ means, and then have to write `unsafe` somewhere to identify that they're
+ aware of the danger.
+ * On trait declarations, `unsafe` is declaring that *implementing* the trait
+ is an unsafe operation, as it has contracts that other unsafe code is free
+ to trust blindly. (More on this below.)
+
+* I am declaring that I have, to the best of my knowledge, adhered to the
+unchecked contracts:
+ * On trait implementations, `unsafe` is declaring that the contract of the
+ `unsafe` trait has been upheld.
+ * On blocks, `unsafe` is declaring any unsafety from an unsafe
+ operation within to be handled, and therefore the parent function is safe.
+
+There is also `#[unsafe_no_drop_flag]`, which is a special case that exists for
+historical reasons and is in the process of being phased out. See the section on
+[drop flags][] for details.
+
+Some examples of unsafe functions:
+
+* `slice::get_unchecked` will perform unchecked indexing, allowing memory
+ safety to be freely violated.
+* `ptr::offset` is an intrinsic that invokes Undefined Behaviour if it is
+ not "in bounds" as defined by LLVM.
+* `mem::transmute` reinterprets some value as having the given type,
+ bypassing type safety in arbitrary ways. (see [conversions][] for details)
+* All FFI functions are `unsafe` because they can do arbitrary things.
+ C being an obvious culprit, but generally any language can do something
+ that Rust isn't happy about.
+
+As of Rust 1.0 there are exactly two unsafe traits:
+
+* `Send` is a marker trait (it has no actual API) that promises implementors
+ are safe to send (move) to another thread.
+* `Sync` is a marker trait that promises that threads can safely share
+ implementors through a shared reference.
+
+The need for unsafe traits boils down to the fundamental property of safe code:
+
+**No matter how completely awful Safe code is, it can't cause Undefined
+Behaviour.**
+
+This means that Unsafe Rust, **the royal vanguard of Undefined Behaviour**, has to be
+*super paranoid* about generic safe code. To be clear, Unsafe Rust is totally free to trust
+specific safe code. Anything else would degenerate into infinite spirals of
+paranoid despair. In particular it's generally regarded as ok to trust the standard library
+to be correct. `std` is effectively an extension of the language, and you
+really just have to trust the language. If `std` fails to uphold the
+guarantees it declares, then it's basically a language bug.
+
+That said, it would be best to minimize *needlessly* relying on properties of
+concrete safe code. Bugs happen! Of course, I must reinforce that this is only
+a concern for Unsafe code. Safe code can blindly trust anyone and everyone
+as far as basic memory-safety is concerned.
+
+On the other hand, safe traits are free to declare arbitrary contracts, but because
+implementing them is safe, unsafe code can't trust those contracts to actually
+be upheld. This is different from the concrete case because *anyone* can
+randomly implement the interface. There is something fundamentally different
+about trusting a particular piece of code to be correct, and trusting *all the
+code that will ever be written* to be correct.
+
+For instance Rust has `PartialOrd` and `Ord` traits to try to differentiate
+between types which can "just" be compared, and those that actually implement a
+total ordering. Pretty much every API that wants to work with data that can be
+compared wants Ord data. For instance, a sorted map like BTreeMap
+*doesn't even make sense* for partially ordered types. If you claim to implement
+Ord for a type, but don't actually provide a proper total ordering, BTreeMap will
+get *really confused* and start making a total mess of itself. Data that is
+inserted may be impossible to find!
+
+But that's okay. BTreeMap is safe, so it guarantees that even if you give it a
+completely garbage Ord implementation, it will still do something *safe*. You
+won't start reading uninitialized or unallocated memory. In fact, BTreeMap
+manages to not actually lose any of your data. When the map is dropped, all the
+destructors will be successfully called! Hooray!
+
+However BTreeMap is implemented using a modest spoonful of Unsafe Rust (most collections
+are). That means that it's not necessarily *trivially true* that a bad Ord
+implementation will make BTreeMap behave safely. BTreeMap must be sure not to rely
+on Ord *where safety is at stake*. Ord is provided by safe code, and safety is not
+safe code's responsibility to uphold.
+
+But wouldn't it be grand if there was some way for Unsafe to trust some trait
+contracts *somewhere*? This is the problem that unsafe traits tackle: by marking
+*the trait itself* as unsafe to implement, unsafe code can trust the implementation
+to uphold the trait's contract. Although the trait implementation may be
+incorrect in arbitrary other ways.
+
+For instance, given a hypothetical UnsafeOrd trait, this is technically a valid
+implementation:
+
+```rust
+# use std::cmp::Ordering;
+# struct MyType;
+# unsafe trait UnsafeOrd { fn cmp(&self, other: &Self) -> Ordering; }
+unsafe impl UnsafeOrd for MyType {
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ordering::Equal
+ }
+}
+```
+
+But it's probably not the implementation you want.
+
+Rust has traditionally avoided making traits unsafe because it makes Unsafe
+pervasive, which is not desirable. Send and Sync are unsafe is because thread
+safety is a *fundamental property* that unsafe code cannot possibly hope to defend
+against in the same way it would defend against a bad Ord implementation. The
+only way to possibly defend against thread-unsafety would be to *not use
+threading at all*. Making every load and store atomic isn't even sufficient,
+because it's possible for complex invariants to exist between disjoint locations
+in memory. For instance, the pointer and capacity of a Vec must be in sync.
+
+Even concurrent paradigms that are traditionally regarded as Totally Safe like
+message passing implicitly rely on some notion of thread safety -- are you
+really message-passing if you pass a pointer? Send and Sync therefore require
+some fundamental level of trust that Safe code can't provide, so they must be
+unsafe to implement. To help obviate the pervasive unsafety that this would
+introduce, Send (resp. Sync) is automatically derived for all types composed only
+of Send (resp. Sync) values. 99% of types are Send and Sync, and 99% of those
+never actually say it (the remaining 1% is overwhelmingly synchronization
+primitives).
+
+
+
+
+[drop flags]: drop-flags.html
+[conversions]: conversions.html
--- /dev/null
+% Send and Sync
+
+Not everything obeys inherited mutability, though. Some types allow you to
+multiply alias a location in memory while mutating it. Unless these types use
+synchronization to manage this access, they are absolutely not thread safe. Rust
+captures this with through the `Send` and `Sync` traits.
+
+* A type is Send if it is safe to send it to another thread.
+* A type is Sync if it is safe to share between threads (`&T` is Send).
+
+Send and Sync are fundamental to Rust's concurrency story. As such, a
+substantial amount of special tooling exists to make them work right. First and
+foremost, they're [unsafe traits][]. This means that they are unsafe to
+implement, and other unsafe code can that they are correctly
+implemented. Since they're *marker traits* (they have no associated items like
+methods), correctly implemented simply means that they have the intrinsic
+properties an implementor should have. Incorrectly implementing Send or Sync can
+cause Undefined Behaviour.
+
+Send and Sync are also automatically derived traits. This means that, unlike
+every other trait, if a type is composed entirely of Send or Sync types, then it
+is Send or Sync. Almost all primitives are Send and Sync, and as a consequence
+pretty much all types you'll ever interact with are Send and Sync.
+
+Major exceptions include:
+
+* raw pointers are neither Send nor Sync (because they have no safety guards).
+* `UnsafeCell` isn't Sync (and therefore `Cell` and `RefCell` aren't).
+* `Rc` isn't Send or Sync (because the refcount is shared and unsynchronized).
+
+`Rc` and `UnsafeCell` are very fundamentally not thread-safe: they enable
+unsynchronized shared mutable state. However raw pointers are, strictly
+speaking, marked as thread-unsafe as more of a *lint*. Doing anything useful
+with a raw pointer requires dereferencing it, which is already unsafe. In that
+sense, one could argue that it would be "fine" for them to be marked as thread
+safe.
+
+However it's important that they aren't thread safe to prevent types that
+contain them from being automatically marked as thread safe. These types have
+non-trivial untracked ownership, and it's unlikely that their author was
+necessarily thinking hard about thread safety. In the case of Rc, we have a nice
+example of a type that contains a `*mut` that is definitely not thread safe.
+
+Types that aren't automatically derived can simply implement them if desired:
+
+```rust
+struct MyBox(*mut u8);
+
+unsafe impl Send for MyBox {}
+unsafe impl Sync for MyBox {}
+```
+
+In the *incredibly rare* case that a type is inappropriately automatically
+derived to be Send or Sync, then one can also unimplement Send and Sync:
+
+```rust
+#![feature(optin_builtin_traits)]
+
+// I have some magic semantics for some synchronization primitive!
+struct SpecialThreadToken(u8);
+
+impl !Send for SpecialThreadToken {}
+impl !Sync for SpecialThreadToken {}
+```
+
+Note that *in and of itself* it is impossible to incorrectly derive Send and
+Sync. Only types that are ascribed special meaning by other unsafe code can
+possible cause trouble by being incorrectly Send or Sync.
+
+Most uses of raw pointers should be encapsulated behind a sufficient abstraction
+that Send and Sync can be derived. For instance all of Rust's standard
+collections are Send and Sync (when they contain Send and Sync types) in spite
+of their pervasive use of raw pointers to manage allocations and complex ownership.
+Similarly, most iterators into these collections are Send and Sync because they
+largely behave like an `&` or `&mut` into the collection.
+
+TODO: better explain what can or can't be Send or Sync. Sufficient to appeal
+only to data races?
+
+[unsafe traits]: safe-unsafe-meaning.html
--- /dev/null
+% Subtyping and Variance
+
+Although Rust doesn't have any notion of structural inheritance, it *does*
+include subtyping. In Rust, subtyping derives entirely from lifetimes. Since
+lifetimes are scopes, we can partially order them based on the *contains*
+(outlives) relationship. We can even express this as a generic bound.
+
+Subtyping on lifetimes is in terms of that relationship: if `'a: 'b` ("a contains
+b" or "a outlives b"), then `'a` is a subtype of `'b`. This is a large source of
+confusion, because it seems intuitively backwards to many: the bigger scope is a
+*subtype* of the smaller scope.
+
+This does in fact make sense, though. The intuitive reason for this is that if
+you expect an `&'a u8`, then it's totally fine for me to hand you an `&'static
+u8`, in the same way that if you expect an Animal in Java, it's totally fine for
+me to hand you a Cat. Cats are just Animals *and more*, just as `'static` is
+just `'a` *and more*.
+
+(Note, the subtyping relationship and typed-ness of lifetimes is a fairly
+arbitrary construct that some disagree with. However it simplifies our analysis
+to treat lifetimes and types uniformly.)
+
+Higher-ranked lifetimes are also subtypes of every concrete lifetime. This is
+because taking an arbitrary lifetime is strictly more general than taking a
+specific one.
+
+
+
+# Variance
+
+Variance is where things get a bit complicated.
+
+Variance is a property that *type constructors* have with respect to their
+arguments. A type constructor in Rust is a generic type with unbound arguments.
+For instance `Vec` is a type constructor that takes a `T` and returns a
+`Vec<T>`. `&` and `&mut` are type constructors that take two inputs: a
+lifetime, and a type to point to.
+
+A type constructor's *variance* is how the subtyping of its inputs affects the
+subtyping of its outputs. There are two kinds of variance in Rust:
+
+* F is *variant* over `T` if `T` being a subtype of `U` implies
+ `F<T>` is a subtype of `F<U>` (subtyping "passes through")
+* F is *invariant* over `T` otherwise (no subtyping relation can be derived)
+
+(For those of you who are familiar with variance from other languages, what we
+refer to as "just" variance is in fact *covariance*. Rust does not have
+contravariance. Historically Rust did have some contravariance but it was
+scrapped due to poor interactions with other features. If you experience
+contravariance in Rust call your local compiler developer for medical advice.)
+
+Some important variances:
+
+* `&'a T` is variant over `'a` and `T` (as is `*const T` by metaphor)
+* `&'a mut T` is variant with over `'a` but invariant over `T`
+* `Fn(T) -> U` is invariant over `T`, but variant over `U`
+* `Box`, `Vec`, and all other collections are variant over the types of
+ their contents
+* `UnsafeCell<T>`, `Cell<T>`, `RefCell<T>`, `Mutex<T>` and all other
+ interior mutability types are invariant over T (as is `*mut T` by metaphor)
+
+To understand why these variances are correct and desirable, we will consider
+several examples.
+
+
+We have already covered why `&'a T` should be variant over `'a` when
+introducing subtyping: it's desirable to be able to pass longer-lived things
+where shorter-lived things are needed.
+
+Similar reasoning applies to why it should be variant over T. It is reasonable
+to be able to pass `&&'static str` where an `&&'a str` is expected. The
+additional level of indirection does not change the desire to be able to pass
+longer lived things where shorted lived things are expected.
+
+However this logic doesn't apply to `&mut`. To see why `&mut` should
+be invariant over T, consider the following code:
+
+```rust,ignore
+fn overwrite<T: Copy>(input: &mut T, new: &mut T) {
+ *input = *new;
+}
+
+fn main() {
+ let mut forever_str: &'static str = "hello";
+ {
+ let string = String::from("world");
+ overwrite(&mut forever_str, &mut &*string);
+ }
+ // Oops, printing free'd memory
+ println!("{}", forever_str);
+}
+```
+
+The signature of `overwrite` is clearly valid: it takes mutable references to
+two values of the same type, and overwrites one with the other. If `&mut T` was
+variant over T, then `&mut &'a str` would be a subtype of `&mut &'static str`,
+since `&'a str` is a subtype of `&'static str`. Therefore the lifetime of
+`forever_str` would successfully be "shrunk" down to the shorter lifetime of
+`string`, and `overwrite` would be called successfully. `string` would
+subsequently be dropped, and `forever_str` would point to freed memory when we
+print it! Therefore `&mut` should be invariant.
+
+This is the general theme of variance vs invariance: if variance would allow you
+to store a short-lived value into a longer-lived slot, then you must be
+invariant.
+
+However it *is* sound for `&'a mut T` to be variant over `'a`. The key difference
+between `'a` and T is that `'a` is a property of the reference itself,
+while T is something the reference is borrowing. If you change T's type, then
+the source still remembers the original type. However if you change the
+lifetime's type, no one but the reference knows this information, so it's fine.
+Put another way: `&'a mut T` owns `'a`, but only *borrows* T.
+
+`Box` and `Vec` are interesting cases because they're variant, but you can
+definitely store values in them! This is where Rust gets really clever: it's
+fine for them to be variant because you can only store values
+in them *via a mutable reference*! The mutable reference makes the whole type
+invariant, and therefore prevents you from smuggling a short-lived type into
+them.
+
+Being variant allows `Box` and `Vec` to be weakened when shared
+immutably. So you can pass a `&Box<&'static str>` where a `&Box<&'a str>` is
+expected.
+
+However what should happen when passing *by-value* is less obvious. It turns out
+that, yes, you can use subtyping when passing by-value. That is, this works:
+
+```rust
+fn get_box<'a>(str: &'a str) -> Box<&'a str> {
+ // string literals are `&'static str`s
+ Box::new("hello")
+}
+```
+
+Weakening when you pass by-value is fine because there's no one else who
+"remembers" the old lifetime in the Box. The reason a variant `&mut` was
+trouble was because there's always someone else who remembers the original
+subtype: the actual owner.
+
+The invariance of the cell types can be seen as follows: `&` is like an `&mut`
+for a cell, because you can still store values in them through an `&`. Therefore
+cells must be invariant to avoid lifetime smuggling.
+
+`Fn` is the most subtle case because it has mixed variance. To see why
+`Fn(T) -> U` should be invariant over T, consider the following function
+signature:
+
+```rust,ignore
+// 'a is derived from some parent scope
+fn foo(&'a str) -> usize;
+```
+
+This signature claims that it can handle any `&str` that lives at least as
+long as `'a`. Now if this signature was variant over `&'a str`, that
+would mean
+
+```rust,ignore
+fn foo(&'static str) -> usize;
+```
+
+could be provided in its place, as it would be a subtype. However this function
+has a stronger requirement: it says that it can only handle `&'static str`s,
+and nothing else. Giving `&'a str`s to it would be unsound, as it's free to
+assume that what it's given lives forever. Therefore functions are not variant
+over their arguments.
+
+To see why `Fn(T) -> U` should be variant over U, consider the following
+function signature:
+
+```rust,ignore
+// 'a is derived from some parent scope
+fn foo(usize) -> &'a str;
+```
+
+This signature claims that it will return something that outlives `'a`. It is
+therefore completely reasonable to provide
+
+```rust,ignore
+fn foo(usize) -> &'static str;
+```
+
+in its place. Therefore functions are variant over their return type.
+
+`*const` has the exact same semantics as `&`, so variance follows. `*mut` on the
+other hand can dereference to an `&mut` whether shared or not, so it is marked
+as invariant just like cells.
+
+This is all well and good for the types the standard library provides, but
+how is variance determined for type that *you* define? A struct, informally
+speaking, inherits the variance of its fields. If a struct `Foo`
+has a generic argument `A` that is used in a field `a`, then Foo's variance
+over `A` is exactly `a`'s variance. However this is complicated if `A` is used
+in multiple fields.
+
+* If all uses of A are variant, then Foo is variant over A
+* Otherwise, Foo is invariant over A
+
+```rust
+use std::cell::Cell;
+
+struct Foo<'a, 'b, A: 'a, B: 'b, C, D, E, F, G, H> {
+ a: &'a A, // variant over 'a and A
+ b: &'b mut B, // invariant over 'b and B
+ c: *const C, // variant over C
+ d: *mut D, // invariant over D
+ e: Vec<E>, // variant over E
+ f: Cell<F>, // invariant over F
+ g: G, // variant over G
+ h1: H, // would also be variant over H except...
+ h2: Cell<H>, // invariant over H, because invariance wins
+}
+```
--- /dev/null
+% Transmutes
+
+Get out of our way type system! We're going to reinterpret these bits or die
+trying! Even though this book is all about doing things that are unsafe, I
+really can't emphasize that you should deeply think about finding Another Way
+than the operations covered in this section. This is really, truly, the most
+horribly unsafe thing you can do in Rust. The railguards here are dental floss.
+
+`mem::transmute<T, U>` takes a value of type `T` and reinterprets it to have
+type `U`. The only restriction is that the `T` and `U` are verified to have the
+same size. The ways to cause Undefined Behaviour with this are mind boggling.
+
+* First and foremost, creating an instance of *any* type with an invalid state
+ is going to cause arbitrary chaos that can't really be predicted.
+* Transmute has an overloaded return type. If you do not specify the return type
+ it may produce a surprising type to satisfy inference.
+* Making a primitive with an invalid value is UB
+* Transmuting between non-repr(C) types is UB
+* Transmuting an & to &mut is UB
+ * Transmuting an & to &mut is *always* UB
+ * No you can't do it
+ * No you're not special
+* Transmuting to a reference without an explicitly provided lifetime
+ produces an [unbounded lifetime][]
+
+`mem::transmute_copy<T, U>` somehow manages to be *even more* wildly unsafe than
+this. It copies `size_of<U>` bytes out of an `&T` and interprets them as a `U`.
+The size check that `mem::transmute` has is gone (as it may be valid to copy
+out a prefix), though it is Undefined Behaviour for `U` to be larger than `T`.
+
+Also of course you can get most of the functionality of these functions using
+pointer casts.
+
+
+[unbounded lifetime]: unbounded-lifetimes.html
--- /dev/null
+% Unbounded Lifetimes
+
+Unsafe code can often end up producing references or lifetimes out of thin air.
+Such lifetimes come into the world as *unbounded*. The most common source of this
+is derefencing a raw pointer, which produces a reference with an unbounded lifetime.
+Such a lifetime becomes as big as context demands. This is in fact more powerful
+than simply becoming `'static`, because for instance `&'static &'a T`
+will fail to typecheck, but the unbound lifetime will perfectly mold into
+`&'a &'a T` as needed. However for most intents and purposes, such an unbounded
+lifetime can be regarded as `'static`.
+
+Almost no reference is `'static`, so this is probably wrong. `transmute` and
+`transmute_copy` are the two other primary offenders. One should endeavour to
+bound an unbounded lifetime as quick as possible, especially across function
+boundaries.
+
+Given a function, any output lifetimes that don't derive from inputs are
+unbounded. For instance:
+
+```rust,ignore
+fn get_str<'a>() -> &'a str;
+```
+
+will produce an `&str` with an unbounded lifetime. The easiest way to avoid
+unbounded lifetimes is to use lifetime elision at the function boundary.
+If an output lifetime is elided, then it *must* be bounded by an input lifetime.
+Of course it might be bounded by the *wrong* lifetime, but this will usually
+just cause a compiler error, rather than allow memory safety to be trivially
+violated.
+
+Within a function, bounding lifetimes is more error-prone. The safest and easiest
+way to bound a lifetime is to return it from a function with a bound lifetime.
+However if this is unacceptable, the reference can be placed in a location with
+a specific lifetime. Unfortunately it's impossible to name all lifetimes involved
+in a function. To get around this, you can in principle use `copy_lifetime`, though
+these are unstable due to their awkward nature and questionable utility.
+
--- /dev/null
+% Unchecked Uninitialized Memory
+
+One interesting exception to this rule is working with arrays. Safe Rust doesn't
+permit you to partially initialize an array. When you initialize an array, you
+can either set every value to the same thing with `let x = [val; N]`, or you can
+specify each member individually with `let x = [val1, val2, val3]`.
+Unfortunately this is pretty rigid, especially if you need to initialize your
+array in a more incremental or dynamic way.
+
+Unsafe Rust gives us a powerful tool to handle this problem:
+`mem::uninitialized`. This function pretends to return a value when really
+it does nothing at all. Using it, we can convince Rust that we have initialized
+a variable, allowing us to do trickier things with conditional and incremental
+initialization.
+
+Unfortunately, this opens us up to all kinds of problems. Assignment has a
+different meaning to Rust based on whether it believes that a variable is
+initialized or not. If it's believed uninitialized, then Rust will semantically
+just memcopy the bits over the uninitialized ones, and do nothing else. However
+if Rust believes a value to be initialized, it will try to `Drop` the old value!
+Since we've tricked Rust into believing that the value is initialized, we can no
+longer safely use normal assignment.
+
+This is also a problem if you're working with a raw system allocator, which
+returns a pointer to uninitialized memory.
+
+To handle this, we must use the `ptr` module. In particular, it provides
+three functions that allow us to assign bytes to a location in memory without
+dropping the old value: `write`, `copy`, and `copy_nonoverlapping`.
+
+* `ptr::write(ptr, val)` takes a `val` and moves it into the address pointed
+ to by `ptr`.
+* `ptr::copy(src, dest, count)` copies the bits that `count` T's would occupy
+ from src to dest. (this is equivalent to memmove -- note that the argument
+ order is reversed!)
+* `ptr::copy_nonoverlapping(src, dest, count)` does what `copy` does, but a
+ little faster on the assumption that the two ranges of memory don't overlap.
+ (this is equivalent to memcpy -- note that the argument order is reversed!)
+
+It should go without saying that these functions, if misused, will cause serious
+havoc or just straight up Undefined Behaviour. The only things that these
+functions *themselves* require is that the locations you want to read and write
+are allocated. However the ways writing arbitrary bits to arbitrary
+locations of memory can break things are basically uncountable!
+
+Putting this all together, we get the following:
+
+```rust
+use std::mem;
+use std::ptr;
+
+// size of the array is hard-coded but easy to change. This means we can't
+// use [a, b, c] syntax to initialize the array, though!
+const SIZE: usize = 10;
+
+let mut x: [Box<u32>; SIZE];
+
+unsafe {
+ // convince Rust that x is Totally Initialized
+ x = mem::uninitialized();
+ for i in 0..SIZE {
+ // very carefully overwrite each index without reading it
+ // NOTE: exception safety is not a concern; Box can't panic
+ ptr::write(&mut x[i], Box::new(i as u32));
+ }
+}
+
+println!("{:?}", x);
+```
+
+It's worth noting that you don't need to worry about `ptr::write`-style
+shenanigans with types which don't implement `Drop` or contain `Drop` types,
+because Rust knows not to try to drop them. Similarly you should be able to
+assign to fields of partially initialized structs directly if those fields don't
+contain any `Drop` types.
+
+However when working with uninitialized memory you need to be ever-vigilant for
+Rust trying to drop values you make like this before they're fully initialized.
+Every control path through that variable's scope must initialize the value
+before it ends, if has a destructor.
+*[This includes code panicking](unwinding.html)*.
+
+And that's about it for working with uninitialized memory! Basically nothing
+anywhere expects to be handed uninitialized memory, so if you're going to pass
+it around at all, be sure to be *really* careful.
--- /dev/null
+% Working With Uninitialized Memory
+
+All runtime-allocated memory in a Rust program begins its life as
+*uninitialized*. In this state the value of the memory is an indeterminate pile
+of bits that may or may not even reflect a valid state for the type that is
+supposed to inhabit that location of memory. Attempting to interpret this memory
+as a value of *any* type will cause Undefined Behaviour. Do Not Do This.
+
+Rust provides mechanisms to work with uninitialized memory in checked (safe) and
+unchecked (unsafe) ways.
\ No newline at end of file
--- /dev/null
+% Unwinding
+
+Rust has a *tiered* error-handling scheme:
+
+* If something might reasonably be absent, Option is used.
+* If something goes wrong and can reasonably be handled, Result is used.
+* If something goes wrong and cannot reasonably be handled, the thread panics.
+* If something catastrophic happens, the program aborts.
+
+Option and Result are overwhelmingly preferred in most situations, especially
+since they can be promoted into a panic or abort at the API user's discretion.
+Panics cause the thread to halt normal execution and unwind its stack, calling
+destructors as if every function instantly returned.
+
+As of 1.0, Rust is of two minds when it comes to panics. In the long-long-ago,
+Rust was much more like Erlang. Like Erlang, Rust had lightweight tasks,
+and tasks were intended to kill themselves with a panic when they reached an
+untenable state. Unlike an exception in Java or C++, a panic could not be
+caught at any time. Panics could only be caught by the owner of the task, at which
+point they had to be handled or *that* task would itself panic.
+
+Unwinding was important to this story because if a task's
+destructors weren't called, it would cause memory and other system resources to
+leak. Since tasks were expected to die during normal execution, this would make
+Rust very poor for long-running systems!
+
+As the Rust we know today came to be, this style of programming grew out of
+fashion in the push for less-and-less abstraction. Light-weight tasks were
+killed in the name of heavy-weight OS threads. Still, on stable Rust as of 1.0
+panics can only be caught by the parent thread. This means catching a panic
+requires spinning up an entire OS thread! This unfortunately stands in conflict
+to Rust's philosophy of zero-cost abstractions.
+
+There is an unstable API called `catch_panic` that enables catching a panic
+without spawning a thread. Still, we would encourage you to only do this
+sparingly. In particular, Rust's current unwinding implementation is heavily
+optimized for the "doesn't unwind" case. If a program doesn't unwind, there
+should be no runtime cost for the program being *ready* to unwind. As a
+consequence, actually unwinding will be more expensive than in e.g. Java.
+Don't build your programs to unwind under normal circumstances. Ideally, you
+should only panic for programming errors or *extreme* problems.
+
+Rust's unwinding strategy is not specified to be fundamentally compatible
+with any other language's unwinding. As such, unwinding into Rust from another
+language, or unwinding into another language from Rust is Undefined Behaviour.
+You must *absolutely* catch any panics at the FFI boundary! What you do at that
+point is up to you, but *something* must be done. If you fail to do this,
+at best, your application will crash and burn. At worst, your application *won't*
+crash and burn, and will proceed with completely clobbered state.
--- /dev/null
+% Allocating Memory
+
+Using Unique throws a wrench in an important feature of Vec (and indeed all of
+the std collections): an empty Vec doesn't actually allocate at all. So if we
+can't allocate, but also can't put a null pointer in `ptr`, what do we do in
+`Vec::new`? Well, we just put some other garbage in there!
+
+This is perfectly fine because we already have `cap == 0` as our sentinel for no
+allocation. We don't even need to handle it specially in almost any code because
+we usually need to check if `cap > len` or `len > 0` anyway. The traditional
+Rust value to put here is `0x01`. The standard library actually exposes this
+as `std::rt::heap::EMPTY`. There are quite a few places where we'll
+want to use `heap::EMPTY` because there's no real allocation to talk about but
+`null` would make the compiler do bad things.
+
+All of the `heap` API is totally unstable under the `heap_api` feature, though.
+We could trivially define `heap::EMPTY` ourselves, but we'll want the rest of
+the `heap` API anyway, so let's just get that dependency over with.
+
+So:
+
+```rust,ignore
+#![feature(heap_api)]
+
+use std::rt::heap::EMPTY;
+use std::mem;
+
+impl<T> Vec<T> {
+ fn new() -> Self {
+ assert!(mem::size_of::<T>() != 0, "We're not ready to handle ZSTs");
+ unsafe {
+ // need to cast EMPTY to the actual ptr type we want, let
+ // inference handle it.
+ Vec { ptr: Unique::new(heap::EMPTY as *mut _), len: 0, cap: 0 }
+ }
+ }
+}
+```
+
+I slipped in that assert there because zero-sized types will require some
+special handling throughout our code, and I want to defer the issue for now.
+Without this assert, some of our early drafts will do some Very Bad Things.
+
+Next we need to figure out what to actually do when we *do* want space. For
+that, we'll need to use the rest of the heap APIs. These basically allow us to
+talk directly to Rust's allocator (jemalloc by default).
+
+We'll also need a way to handle out-of-memory (OOM) conditions. The standard
+library calls the `abort` intrinsic, which just calls an illegal instruction to
+crash the whole program. The reason we abort and don't panic is because
+unwinding can cause allocations to happen, and that seems like a bad thing to do
+when your allocator just came back with "hey I don't have any more memory".
+
+Of course, this is a bit silly since most platforms don't actually run out of
+memory in a conventional way. Your operating system will probably kill the
+application by another means if you legitimately start using up all the memory.
+The most likely way we'll trigger OOM is by just asking for ludicrous quantities
+of memory at once (e.g. half the theoretical address space). As such it's
+*probably* fine to panic and nothing bad will happen. Still, we're trying to be
+like the standard library as much as possible, so we'll just kill the whole
+program.
+
+We said we don't want to use intrinsics, so doing exactly what `std` does is
+out. Instead, we'll call `std::process::exit` with some random number.
+
+```rust
+fn oom() {
+ ::std::process::exit(-9999);
+}
+```
+
+Okay, now we can write growing. Roughly, we want to have this logic:
+
+```text
+if cap == 0:
+ allocate()
+ cap = 1
+else:
+ reallocate()
+ cap *= 2
+```
+
+But Rust's only supported allocator API is so low level that we'll need to do a
+fair bit of extra work. We also need to guard against some special
+conditions that can occur with really large allocations or empty allocations.
+
+In particular, `ptr::offset` will cause us a lot of trouble, because it has
+the semantics of LLVM's GEP inbounds instruction. If you're fortunate enough to
+not have dealt with this instruction, here's the basic story with GEP: alias
+analysis, alias analysis, alias analysis. It's super important to an optimizing
+compiler to be able to reason about data dependencies and aliasing.
+
+As a simple example, consider the following fragment of code:
+
+```rust
+# let x = &mut 0;
+# let y = &mut 0;
+*x *= 7;
+*y *= 3;
+```
+
+If the compiler can prove that `x` and `y` point to different locations in
+memory, the two operations can in theory be executed in parallel (by e.g.
+loading them into different registers and working on them independently).
+However the compiler can't do this in general because if x and y point to
+the same location in memory, the operations need to be done to the same value,
+and they can't just be merged afterwards.
+
+When you use GEP inbounds, you are specifically telling LLVM that the offsets
+you're about to do are within the bounds of a single "allocated" entity. The
+ultimate payoff being that LLVM can assume that if two pointers are known to
+point to two disjoint objects, all the offsets of those pointers are *also*
+known to not alias (because you won't just end up in some random place in
+memory). LLVM is heavily optimized to work with GEP offsets, and inbounds
+offsets are the best of all, so it's important that we use them as much as
+possible.
+
+So that's what GEP's about, how can it cause us trouble?
+
+The first problem is that we index into arrays with unsigned integers, but
+GEP (and as a consequence `ptr::offset`) takes a signed integer. This means
+that half of the seemingly valid indices into an array will overflow GEP and
+actually go in the wrong direction! As such we must limit all allocations to
+`isize::MAX` elements. This actually means we only need to worry about
+byte-sized objects, because e.g. `> isize::MAX` `u16`s will truly exhaust all of
+the system's memory. However in order to avoid subtle corner cases where someone
+reinterprets some array of `< isize::MAX` objects as bytes, std limits all
+allocations to `isize::MAX` bytes.
+
+On all 64-bit targets that Rust currently supports we're artificially limited
+to significantly less than all 64 bits of the address space (modern x64
+platforms only expose 48-bit addressing), so we can rely on just running out of
+memory first. However on 32-bit targets, particularly those with extensions to
+use more of the address space (PAE x86 or x32), it's theoretically possible to
+successfully allocate more than `isize::MAX` bytes of memory.
+
+However since this is a tutorial, we're not going to be particularly optimal
+here, and just unconditionally check, rather than use clever platform-specific
+`cfg`s.
+
+The other corner-case we need to worry about is empty allocations. There will
+be two kinds of empty allocations we need to worry about: `cap = 0` for all T,
+and `cap > 0` for zero-sized types.
+
+These cases are tricky because they come
+down to what LLVM means by "allocated". LLVM's notion of an
+allocation is significantly more abstract than how we usually use it. Because
+LLVM needs to work with different languages' semantics and custom allocators,
+it can't really intimately understand allocation. Instead, the main idea behind
+allocation is "doesn't overlap with other stuff". That is, heap allocations,
+stack allocations, and globals don't randomly overlap. Yep, it's about alias
+analysis. As such, Rust can technically play a bit fast an loose with the notion of
+an allocation as long as it's *consistent*.
+
+Getting back to the empty allocation case, there are a couple of places where
+we want to offset by 0 as a consequence of generic code. The question is then:
+is it consistent to do so? For zero-sized types, we have concluded that it is
+indeed consistent to do a GEP inbounds offset by an arbitrary number of
+elements. This is a runtime no-op because every element takes up no space,
+and it's fine to pretend that there's infinite zero-sized types allocated
+at `0x01`. No allocator will ever allocate that address, because they won't
+allocate `0x00` and they generally allocate to some minimal alignment higher
+than a byte. Also generally the whole first page of memory is
+protected from being allocated anyway (a whole 4k, on many platforms).
+
+However what about for positive-sized types? That one's a bit trickier. In
+principle, you can argue that offsetting by 0 gives LLVM no information: either
+there's an element before the address or after it, but it can't know which.
+However we've chosen to conservatively assume that it may do bad things. As
+such we will guard against this case explicitly.
+
+*Phew*
+
+Ok with all the nonsense out of the way, let's actually allocate some memory:
+
+```rust,ignore
+fn grow(&mut self) {
+ // this is all pretty delicate, so let's say it's all unsafe
+ unsafe {
+ // current API requires us to specify size and alignment manually.
+ let align = mem::align_of::<T>();
+ let elem_size = mem::size_of::<T>();
+
+ let (new_cap, ptr) = if self.cap == 0 {
+ let ptr = heap::allocate(elem_size, align);
+ (1, ptr)
+ } else {
+ // as an invariant, we can assume that `self.cap < isize::MAX`,
+ // so this doesn't need to be checked.
+ let new_cap = self.cap * 2;
+ // Similarly this can't overflow due to previously allocating this
+ let old_num_bytes = self.cap * elem_size;
+
+ // check that the new allocation doesn't exceed `isize::MAX` at all
+ // regardless of the actual size of the capacity. This combines the
+ // `new_cap <= isize::MAX` and `new_num_bytes <= usize::MAX` checks
+ // we need to make. We lose the ability to allocate e.g. 2/3rds of
+ // the address space with a single Vec of i16's on 32-bit though.
+ // Alas, poor Yorick -- I knew him, Horatio.
+ assert!(old_num_bytes <= (::std::isize::MAX as usize) / 2,
+ "capacity overflow");
+
+ let new_num_bytes = old_num_bytes * 2;
+ let ptr = heap::reallocate(*self.ptr as *mut _,
+ old_num_bytes,
+ new_num_bytes,
+ align);
+ (new_cap, ptr)
+ };
+
+ // If allocate or reallocate fail, we'll get `null` back
+ if ptr.is_null() { oom(); }
+
+ self.ptr = Unique::new(ptr as *mut _);
+ self.cap = new_cap;
+ }
+}
+```
+
+Nothing particularly tricky here. Just computing sizes and alignments and doing
+some careful multiplication checks.
+
--- /dev/null
+% Deallocating
+
+Next we should implement Drop so that we don't massively leak tons of resources.
+The easiest way is to just call `pop` until it yields None, and then deallocate
+our buffer. Note that calling `pop` is unneeded if `T: !Drop`. In theory we can
+ask Rust if `T` `needs_drop` and omit the calls to `pop`. However in practice
+LLVM is *really* good at removing simple side-effect free code like this, so I
+wouldn't bother unless you notice it's not being stripped (in this case it is).
+
+We must not call `heap::deallocate` when `self.cap == 0`, as in this case we
+haven't actually allocated any memory.
+
+
+```rust,ignore
+impl<T> Drop for Vec<T> {
+ fn drop(&mut self) {
+ if self.cap != 0 {
+ while let Some(_) = self.pop() { }
+
+ let align = mem::align_of::<T>();
+ let elem_size = mem::size_of::<T>();
+ let num_bytes = elem_size * self.cap;
+ unsafe {
+ heap::deallocate(*self.ptr, num_bytes, align);
+ }
+ }
+ }
+}
+```
--- /dev/null
+% Deref
+
+Alright! We've got a decent minimal stack implemented. We can push, we can
+pop, and we can clean up after ourselves. However there's a whole mess of
+functionality we'd reasonably want. In particular, we have a proper array, but
+none of the slice functionality. That's actually pretty easy to solve: we can
+implement `Deref<Target=[T]>`. This will magically make our Vec coerce to, and
+behave like, a slice in all sorts of conditions.
+
+All we need is `slice::from_raw_parts`. It will correctly handle empty slices
+for us. Later once we set up zero-sized type support it will also Just Work
+for those too.
+
+```rust,ignore
+use std::ops::Deref;
+
+impl<T> Deref for Vec<T> {
+ type Target = [T];
+ fn deref(&self) -> &[T] {
+ unsafe {
+ ::std::slice::from_raw_parts(*self.ptr, self.len)
+ }
+ }
+}
+```
+
+And let's do DerefMut too:
+
+```rust,ignore
+use std::ops::DerefMut;
+
+impl<T> DerefMut for Vec<T> {
+ fn deref_mut(&mut self) -> &mut [T] {
+ unsafe {
+ ::std::slice::from_raw_parts_mut(*self.ptr, self.len)
+ }
+ }
+}
+```
+
+Now we have `len`, `first`, `last`, indexing, slicing, sorting, `iter`,
+`iter_mut`, and all other sorts of bells and whistles provided by slice. Sweet!
--- /dev/null
+% Drain
+
+Let's move on to Drain. Drain is largely the same as IntoIter, except that
+instead of consuming the Vec, it borrows the Vec and leaves its allocation
+untouched. For now we'll only implement the "basic" full-range version.
+
+```rust,ignore
+use std::marker::PhantomData;
+
+struct Drain<'a, T: 'a> {
+ // Need to bound the lifetime here, so we do it with `&'a mut Vec<T>`
+ // because that's semantically what we contain. We're "just" calling
+ // `pop()` and `remove(0)`.
+ vec: PhantomData<&'a mut Vec<T>>
+ start: *const T,
+ end: *const T,
+}
+
+impl<'a, T> Iterator for Drain<'a, T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> {
+ if self.start == self.end {
+ None
+```
+
+-- wait, this is seeming familiar. Let's do some more compression. Both
+IntoIter and Drain have the exact same structure, let's just factor it out.
+
+```rust
+struct RawValIter<T> {
+ start: *const T,
+ end: *const T,
+}
+
+impl<T> RawValIter<T> {
+ // unsafe to construct because it has no associated lifetimes.
+ // This is necessary to store a RawValIter in the same struct as
+ // its actual allocation. OK since it's a private implementation
+ // detail.
+ unsafe fn new(slice: &[T]) -> Self {
+ RawValIter {
+ start: slice.as_ptr(),
+ end: if slice.len() == 0 {
+ // if `len = 0`, then this is not actually allocated memory.
+ // Need to avoid offsetting because that will give wrong
+ // information to LLVM via GEP.
+ slice.as_ptr()
+ } else {
+ slice.as_ptr().offset(slice.len() as isize)
+ }
+ }
+ }
+}
+
+// Iterator and DoubleEndedIterator impls identical to IntoIter.
+```
+
+And IntoIter becomes the following:
+
+```rust,ignore
+pub struct IntoIter<T> {
+ _buf: RawVec<T>, // we don't actually care about this. Just need it to live.
+ iter: RawValIter<T>,
+}
+
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> { self.iter.next() }
+ fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
+}
+
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ fn next_back(&mut self) -> Option<T> { self.iter.next_back() }
+}
+
+impl<T> Drop for IntoIter<T> {
+ fn drop(&mut self) {
+ for _ in &mut self.iter {}
+ }
+}
+
+impl<T> Vec<T> {
+ pub fn into_iter(self) -> IntoIter<T> {
+ unsafe {
+ let iter = RawValIter::new(&self);
+
+ let buf = ptr::read(&self.buf);
+ mem::forget(self);
+
+ IntoIter {
+ iter: iter,
+ _buf: buf,
+ }
+ }
+ }
+}
+```
+
+Note that I've left a few quirks in this design to make upgrading Drain to work
+with arbitrary subranges a bit easier. In particular we *could* have RawValIter
+drain itself on drop, but that won't work right for a more complex Drain.
+We also take a slice to simplify Drain initialization.
+
+Alright, now Drain is really easy:
+
+```rust,ignore
+use std::marker::PhantomData;
+
+pub struct Drain<'a, T: 'a> {
+ vec: PhantomData<&'a mut Vec<T>>,
+ iter: RawValIter<T>,
+}
+
+impl<'a, T> Iterator for Drain<'a, T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> { self.iter.next() }
+ fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
+}
+
+impl<'a, T> DoubleEndedIterator for Drain<'a, T> {
+ fn next_back(&mut self) -> Option<T> { self.iter.next_back() }
+}
+
+impl<'a, T> Drop for Drain<'a, T> {
+ fn drop(&mut self) {
+ for _ in &mut self.iter {}
+ }
+}
+
+impl<T> Vec<T> {
+ pub fn drain(&mut self) -> Drain<T> {
+ // this is a mem::forget safety thing. If Drain is forgotten, we just
+ // leak the whole Vec's contents. Also we need to do this eventually
+ // anyway, so why not do it now?
+ self.len = 0;
+
+ unsafe {
+ Drain {
+ iter: RawValIter::new(&self),
+ vec: PhantomData,
+ }
+ }
+ }
+}
+```
+
+For more details on the `mem::forget` problem, see the
+[section on leaks][leaks].
+
+[leaks]: leaking.html
--- /dev/null
+% The Final Code
+
+```rust
+#![feature(unique)]
+#![feature(heap_api)]
+
+use std::ptr::{Unique, self};
+use std::rt::heap;
+use std::mem;
+use std::ops::{Deref, DerefMut};
+use std::marker::PhantomData;
+
+
+
+
+
+struct RawVec<T> {
+ ptr: Unique<T>,
+ cap: usize,
+}
+
+impl<T> RawVec<T> {
+ fn new() -> Self {
+ unsafe {
+ // !0 is usize::MAX. This branch should be stripped at compile time.
+ let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
+
+ // heap::EMPTY doubles as "unallocated" and "zero-sized allocation"
+ RawVec { ptr: Unique::new(heap::EMPTY as *mut T), cap: cap }
+ }
+ }
+
+ fn grow(&mut self) {
+ unsafe {
+ let elem_size = mem::size_of::<T>();
+
+ // since we set the capacity to usize::MAX when elem_size is
+ // 0, getting to here necessarily means the Vec is overfull.
+ assert!(elem_size != 0, "capacity overflow");
+
+ let align = mem::align_of::<T>();
+
+ let (new_cap, ptr) = if self.cap == 0 {
+ let ptr = heap::allocate(elem_size, align);
+ (1, ptr)
+ } else {
+ let new_cap = 2 * self.cap;
+ let ptr = heap::reallocate(*self.ptr as *mut _,
+ self.cap * elem_size,
+ new_cap * elem_size,
+ align);
+ (new_cap, ptr)
+ };
+
+ // If allocate or reallocate fail, we'll get `null` back
+ if ptr.is_null() { oom() }
+
+ self.ptr = Unique::new(ptr as *mut _);
+ self.cap = new_cap;
+ }
+ }
+}
+
+impl<T> Drop for RawVec<T> {
+ fn drop(&mut self) {
+ let elem_size = mem::size_of::<T>();
+ if self.cap != 0 && elem_size != 0 {
+ let align = mem::align_of::<T>();
+
+ let num_bytes = elem_size * self.cap;
+ unsafe {
+ heap::deallocate(*self.ptr as *mut _, num_bytes, align);
+ }
+ }
+ }
+}
+
+
+
+
+
+pub struct Vec<T> {
+ buf: RawVec<T>,
+ len: usize,
+}
+
+impl<T> Vec<T> {
+ fn ptr(&self) -> *mut T { *self.buf.ptr }
+
+ fn cap(&self) -> usize { self.buf.cap }
+
+ pub fn new() -> Self {
+ Vec { buf: RawVec::new(), len: 0 }
+ }
+ pub fn push(&mut self, elem: T) {
+ if self.len == self.cap() { self.buf.grow(); }
+
+ unsafe {
+ ptr::write(self.ptr().offset(self.len as isize), elem);
+ }
+
+ // Can't fail, we'll OOM first.
+ self.len += 1;
+ }
+
+ pub fn pop(&mut self) -> Option<T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.len -= 1;
+ unsafe {
+ Some(ptr::read(self.ptr().offset(self.len as isize)))
+ }
+ }
+ }
+
+ pub fn insert(&mut self, index: usize, elem: T) {
+ assert!(index <= self.len, "index out of bounds");
+ if self.cap() == self.len { self.buf.grow(); }
+
+ unsafe {
+ if index < self.len {
+ ptr::copy(self.ptr().offset(index as isize),
+ self.ptr().offset(index as isize + 1),
+ self.len - index);
+ }
+ ptr::write(self.ptr().offset(index as isize), elem);
+ self.len += 1;
+ }
+ }
+
+ pub fn remove(&mut self, index: usize) -> T {
+ assert!(index < self.len, "index out of bounds");
+ unsafe {
+ self.len -= 1;
+ let result = ptr::read(self.ptr().offset(index as isize));
+ ptr::copy(self.ptr().offset(index as isize + 1),
+ self.ptr().offset(index as isize),
+ self.len - index);
+ result
+ }
+ }
+
+ pub fn into_iter(self) -> IntoIter<T> {
+ unsafe {
+ let iter = RawValIter::new(&self);
+ let buf = ptr::read(&self.buf);
+ mem::forget(self);
+
+ IntoIter {
+ iter: iter,
+ _buf: buf,
+ }
+ }
+ }
+
+ pub fn drain(&mut self) -> Drain<T> {
+ // this is a mem::forget safety thing. If this is forgotten, we just
+ // leak the whole Vec's contents. Also we need to do this *eventually*
+ // anyway, so why not do it now?
+ self.len = 0;
+ unsafe {
+ Drain {
+ iter: RawValIter::new(&self),
+ vec: PhantomData,
+ }
+ }
+ }
+}
+
+impl<T> Drop for Vec<T> {
+ fn drop(&mut self) {
+ while let Some(_) = self.pop() {}
+ // allocation is handled by RawVec
+ }
+}
+
+impl<T> Deref for Vec<T> {
+ type Target = [T];
+ fn deref(&self) -> &[T] {
+ unsafe {
+ ::std::slice::from_raw_parts(self.ptr(), self.len)
+ }
+ }
+}
+
+impl<T> DerefMut for Vec<T> {
+ fn deref_mut(&mut self) -> &mut [T] {
+ unsafe {
+ ::std::slice::from_raw_parts_mut(self.ptr(), self.len)
+ }
+ }
+}
+
+
+
+
+
+struct RawValIter<T> {
+ start: *const T,
+ end: *const T,
+}
+
+impl<T> RawValIter<T> {
+ unsafe fn new(slice: &[T]) -> Self {
+ RawValIter {
+ start: slice.as_ptr(),
+ end: if mem::size_of::<T>() == 0 {
+ ((slice.as_ptr() as usize) + slice.len()) as *const _
+ } else if slice.len() == 0 {
+ slice.as_ptr()
+ } else {
+ slice.as_ptr().offset(slice.len() as isize)
+ }
+ }
+ }
+}
+
+impl<T> Iterator for RawValIter<T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> {
+ if self.start == self.end {
+ None
+ } else {
+ unsafe {
+ let result = ptr::read(self.start);
+ self.start = self.start.offset(1);
+ Some(result)
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let elem_size = mem::size_of::<T>();
+ let len = (self.end as usize - self.start as usize)
+ / if elem_size == 0 { 1 } else { elem_size };
+ (len, Some(len))
+ }
+}
+
+impl<T> DoubleEndedIterator for RawValIter<T> {
+ fn next_back(&mut self) -> Option<T> {
+ if self.start == self.end {
+ None
+ } else {
+ unsafe {
+ self.end = self.end.offset(-1);
+ Some(ptr::read(self.end))
+ }
+ }
+ }
+}
+
+
+
+
+pub struct IntoIter<T> {
+ _buf: RawVec<T>, // we don't actually care about this. Just need it to live.
+ iter: RawValIter<T>,
+}
+
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> { self.iter.next() }
+ fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
+}
+
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ fn next_back(&mut self) -> Option<T> { self.iter.next_back() }
+}
+
+impl<T> Drop for IntoIter<T> {
+ fn drop(&mut self) {
+ for _ in &mut *self {}
+ }
+}
+
+
+
+
+pub struct Drain<'a, T: 'a> {
+ vec: PhantomData<&'a mut Vec<T>>,
+ iter: RawValIter<T>,
+}
+
+impl<'a, T> Iterator for Drain<'a, T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> { self.iter.next_back() }
+ fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
+}
+
+impl<'a, T> DoubleEndedIterator for Drain<'a, T> {
+ fn next_back(&mut self) -> Option<T> { self.iter.next_back() }
+}
+
+impl<'a, T> Drop for Drain<'a, T> {
+ fn drop(&mut self) {
+ // pre-drain the iter
+ for _ in &mut self.iter {}
+ }
+}
+
+/// Abort the process, we're out of memory!
+///
+/// In practice this is probably dead code on most OSes
+fn oom() {
+ ::std::process::exit(-9999);
+}
+
+# fn main() {}
+```
--- /dev/null
+% Insert and Remove
+
+Something *not* provided by slice is `insert` and `remove`, so let's do those
+next.
+
+Insert needs to shift all the elements at the target index to the right by one.
+To do this we need to use `ptr::copy`, which is our version of C's `memmove`.
+This copies some chunk of memory from one location to another, correctly
+handling the case where the source and destination overlap (which will
+definitely happen here).
+
+If we insert at index `i`, we want to shift the `[i .. len]` to `[i+1 .. len+1]`
+using the old len.
+
+```rust,ignore
+pub fn insert(&mut self, index: usize, elem: T) {
+ // Note: `<=` because it's valid to insert after everything
+ // which would be equivalent to push.
+ assert!(index <= self.len, "index out of bounds");
+ if self.cap == self.len { self.grow(); }
+
+ unsafe {
+ if index < self.len {
+ // ptr::copy(src, dest, len): "copy from source to dest len elems"
+ ptr::copy(self.ptr.offset(index as isize),
+ self.ptr.offset(index as isize + 1),
+ len - index);
+ }
+ ptr::write(self.ptr.offset(index as isize), elem);
+ self.len += 1;
+ }
+}
+```
+
+Remove behaves in the opposite manner. We need to shift all the elements from
+`[i+1 .. len + 1]` to `[i .. len]` using the *new* len.
+
+```rust,ignore
+pub fn remove(&mut self, index: usize) -> T {
+ // Note: `<` because it's *not* valid to remove after everything
+ assert!(index < self.len, "index out of bounds");
+ unsafe {
+ self.len -= 1;
+ let result = ptr::read(self.ptr.offset(index as isize));
+ ptr::copy(self.ptr.offset(index as isize + 1),
+ self.ptr.offset(index as isize),
+ len - index);
+ result
+ }
+}
+```
--- /dev/null
+% IntoIter
+
+Let's move on to writing iterators. `iter` and `iter_mut` have already been
+written for us thanks to The Magic of Deref. However there's two interesting
+iterators that Vec provides that slices can't: `into_iter` and `drain`.
+
+IntoIter consumes the Vec by-value, and can consequently yield its elements
+by-value. In order to enable this, IntoIter needs to take control of Vec's
+allocation.
+
+IntoIter needs to be DoubleEnded as well, to enable reading from both ends.
+Reading from the back could just be implemented as calling `pop`, but reading
+from the front is harder. We could call `remove(0)` but that would be insanely
+expensive. Instead we're going to just use ptr::read to copy values out of
+either end of the Vec without mutating the buffer at all.
+
+To do this we're going to use a very common C idiom for array iteration. We'll
+make two pointers; one that points to the start of the array, and one that
+points to one-element past the end. When we want an element from one end, we'll
+read out the value pointed to at that end and move the pointer over by one. When
+the two pointers are equal, we know we're done.
+
+Note that the order of read and offset are reversed for `next` and `next_back`
+For `next_back` the pointer is always after the element it wants to read next,
+while for `next` the pointer is always at the element it wants to read next.
+To see why this is, consider the case where every element but one has been
+yielded.
+
+The array looks like this:
+
+```text
+ S E
+[X, X, X, O, X, X, X]
+```
+
+If E pointed directly at the element it wanted to yield next, it would be
+indistinguishable from the case where there are no more elements to yield.
+
+Although we don't actually care about it during iteration, we also need to hold
+onto the Vec's allocation information in order to free it once IntoIter is
+dropped.
+
+So we're going to use the following struct:
+
+```rust,ignore
+struct IntoIter<T> {
+ buf: Unique<T>,
+ cap: usize,
+ start: *const T,
+ end: *const T,
+}
+```
+
+And this is what we end up with for initialization:
+
+```rust,ignore
+impl<T> Vec<T> {
+ fn into_iter(self) -> IntoIter<T> {
+ // Can't destructure Vec since it's Drop
+ let ptr = self.ptr;
+ let cap = self.cap;
+ let len = self.len;
+
+ // Make sure not to drop Vec since that will free the buffer
+ mem::forget(self);
+
+ unsafe {
+ IntoIter {
+ buf: ptr,
+ cap: cap,
+ start: *ptr,
+ end: if cap == 0 {
+ // can't offset off this pointer, it's not allocated!
+ *ptr
+ } else {
+ ptr.offset(len as isize)
+ }
+ }
+ }
+ }
+}
+```
+
+Here's iterating forward:
+
+```rust,ignore
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> {
+ if self.start == self.end {
+ None
+ } else {
+ unsafe {
+ let result = ptr::read(self.start);
+ self.start = self.start.offset(1);
+ Some(result)
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = (self.end as usize - self.start as usize)
+ / mem::size_of::<T>();
+ (len, Some(len))
+ }
+}
+```
+
+And here's iterating backwards.
+
+```rust,ignore
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ fn next_back(&mut self) -> Option<T> {
+ if self.start == self.end {
+ None
+ } else {
+ unsafe {
+ self.end = self.end.offset(-1);
+ Some(ptr::read(self.end))
+ }
+ }
+ }
+}
+```
+
+Because IntoIter takes ownership of its allocation, it needs to implement Drop
+to free it. However it also wants to implement Drop to drop any elements it
+contains that weren't yielded.
+
+
+```rust,ignore
+impl<T> Drop for IntoIter<T> {
+ fn drop(&mut self) {
+ if self.cap != 0 {
+ // drop any remaining elements
+ for _ in &mut *self {}
+
+ let align = mem::align_of::<T>();
+ let elem_size = mem::size_of::<T>();
+ let num_bytes = elem_size * self.cap;
+ unsafe {
+ heap::deallocate(*self.buf as *mut _, num_bytes, align);
+ }
+ }
+ }
+}
+```
--- /dev/null
+% Layout
+
+First off, we need to come up with the struct layout. A Vec has three parts:
+a pointer to the allocation, the size of the allocation, and the number of
+elements that have been initialized.
+
+Naively, this means we just want this design:
+
+```rust
+pub struct Vec<T> {
+ ptr: *mut T,
+ cap: usize,
+ len: usize,
+}
+# fn main() {}
+```
+
+And indeed this would compile. Unfortunately, it would be incorrect. First, the
+compiler will give us too strict variance. So a `&Vec<&'static str>`
+couldn't be used where an `&Vec<&'a str>` was expected. More importantly, it
+will give incorrect ownership information to the drop checker, as it will
+conservatively assume we don't own any values of type `T`. See [the chapter
+on ownership and lifetimes][ownership] for all the details on variance and
+drop check.
+
+As we saw in the ownership chapter, we should use `Unique<T>` in place of
+`*mut T` when we have a raw pointer to an allocation we own. Unique is unstable,
+so we'd like to not use it if possible, though.
+
+As a recap, Unique is a wrapper around a raw pointer that declares that:
+
+* We are variant over `T`
+* We may own a value of type `T` (for drop check)
+* We are Send/Sync if `T` is Send/Sync
+* We deref to `*mut T` (so it largely acts like a `*mut` in our code)
+* Our pointer is never null (so `Option<Vec<T>>` is null-pointer-optimized)
+
+We can implement all of the above requirements except for the last
+one in stable Rust:
+
+```rust
+use std::marker::PhantomData;
+use std::ops::Deref;
+use std::mem;
+
+struct Unique<T> {
+ ptr: *const T, // *const for variance
+ _marker: PhantomData<T>, // For the drop checker
+}
+
+// Deriving Send and Sync is safe because we are the Unique owners
+// of this data. It's like Unique<T> is "just" T.
+unsafe impl<T: Send> Send for Unique<T> {}
+unsafe impl<T: Sync> Sync for Unique<T> {}
+
+impl<T> Unique<T> {
+ pub fn new(ptr: *mut T) -> Self {
+ Unique { ptr: ptr, _marker: PhantomData }
+ }
+}
+
+impl<T> Deref for Unique<T> {
+ type Target = *mut T;
+ fn deref(&self) -> &*mut T {
+ // There's no way to cast the *const to a *mut
+ // while also taking a reference. So we just
+ // transmute it since it's all "just pointers".
+ unsafe { mem::transmute(&self.ptr) }
+ }
+}
+# fn main() {}
+```
+
+Unfortunately the mechanism for stating that your value is non-zero is
+unstable and unlikely to be stabilized soon. As such we're just going to
+take the hit and use std's Unique:
+
+
+```rust
+#![feature(unique)]
+
+use std::ptr::{Unique, self};
+
+pub struct Vec<T> {
+ ptr: Unique<T>,
+ cap: usize,
+ len: usize,
+}
+
+# fn main() {}
+```
+
+If you don't care about the null-pointer optimization, then you can use the
+stable code. However we will be designing the rest of the code around enabling
+the optimization. In particular, `Unique::new` is unsafe to call, because
+putting `null` inside of it is Undefined Behaviour. Our stable Unique doesn't
+need `new` to be unsafe because it doesn't make any interesting guarantees about
+its contents.
+
+[ownership]: ownership.html
--- /dev/null
+% Push and Pop
+
+Alright. We can initialize. We can allocate. Let's actually implement some
+functionality! Let's start with `push`. All it needs to do is check if we're
+full to grow, unconditionally write to the next index, and then increment our
+length.
+
+To do the write we have to be careful not to evaluate the memory we want to write
+to. At worst, it's truly uninitialized memory from the allocator. At best it's the
+bits of some old value we popped off. Either way, we can't just index to the memory
+and dereference it, because that will evaluate the memory as a valid instance of
+T. Worse, `foo[idx] = x` will try to call `drop` on the old value of `foo[idx]`!
+
+The correct way to do this is with `ptr::write`, which just blindly overwrites the
+target address with the bits of the value we provide. No evaluation involved.
+
+For `push`, if the old len (before push was called) is 0, then we want to write
+to the 0th index. So we should offset by the old len.
+
+```rust,ignore
+pub fn push(&mut self, elem: T) {
+ if self.len == self.cap { self.grow(); }
+
+ unsafe {
+ ptr::write(self.ptr.offset(self.len as isize), elem);
+ }
+
+ // Can't fail, we'll OOM first.
+ self.len += 1;
+}
+```
+
+Easy! How about `pop`? Although this time the index we want to access is
+initialized, Rust won't just let us dereference the location of memory to move
+the value out, because that would leave the memory uninitialized! For this we
+need `ptr::read`, which just copies out the bits from the target address and
+intrprets it as a value of type T. This will leave the memory at this address
+logically uninitialized, even though there is in fact a perfectly good instance
+of T there.
+
+For `pop`, if the old len is 1, we want to read out of the 0th index. So we
+should offset by the new len.
+
+```rust,ignore
+pub fn pop(&mut self) -> Option<T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.len -= 1;
+ unsafe {
+ Some(ptr::read(self.ptr.offset(self.len as isize)))
+ }
+ }
+}
+```
--- /dev/null
+% RawVec
+
+We've actually reached an interesting situation here: we've duplicated the logic
+for specifying a buffer and freeing its memory in Vec and IntoIter. Now that
+we've implemented it and identified *actual* logic duplication, this is a good
+time to perform some logic compression.
+
+We're going to abstract out the `(ptr, cap)` pair and give them the logic for
+allocating, growing, and freeing:
+
+```rust,ignore
+struct RawVec<T> {
+ ptr: Unique<T>,
+ cap: usize,
+}
+
+impl<T> RawVec<T> {
+ fn new() -> Self {
+ assert!(mem::size_of::<T>() != 0, "TODO: implement ZST support");
+ unsafe {
+ RawVec { ptr: Unique::new(heap::EMPTY as *mut T), cap: 0 }
+ }
+ }
+
+ // unchanged from Vec
+ fn grow(&mut self) {
+ unsafe {
+ let align = mem::align_of::<T>();
+ let elem_size = mem::size_of::<T>();
+
+ let (new_cap, ptr) = if self.cap == 0 {
+ let ptr = heap::allocate(elem_size, align);
+ (1, ptr)
+ } else {
+ let new_cap = 2 * self.cap;
+ let ptr = heap::reallocate(*self.ptr as *mut _,
+ self.cap * elem_size,
+ new_cap * elem_size,
+ align);
+ (new_cap, ptr)
+ };
+
+ // If allocate or reallocate fail, we'll get `null` back
+ if ptr.is_null() { oom() }
+
+ self.ptr = Unique::new(ptr as *mut _);
+ self.cap = new_cap;
+ }
+ }
+}
+
+
+impl<T> Drop for RawVec<T> {
+ fn drop(&mut self) {
+ if self.cap != 0 {
+ let align = mem::align_of::<T>();
+ let elem_size = mem::size_of::<T>();
+ let num_bytes = elem_size * self.cap;
+ unsafe {
+ heap::deallocate(*self.ptr as *mut _, num_bytes, align);
+ }
+ }
+ }
+}
+```
+
+And change Vec as follows:
+
+```rust,ignore
+pub struct Vec<T> {
+ buf: RawVec<T>,
+ len: usize,
+}
+
+impl<T> Vec<T> {
+ fn ptr(&self) -> *mut T { *self.buf.ptr }
+
+ fn cap(&self) -> usize { self.buf.cap }
+
+ pub fn new() -> Self {
+ Vec { buf: RawVec::new(), len: 0 }
+ }
+
+ // push/pop/insert/remove largely unchanged:
+ // * `self.ptr -> self.ptr()`
+ // * `self.cap -> self.cap()`
+ // * `self.grow -> self.buf.grow()`
+}
+
+impl<T> Drop for Vec<T> {
+ fn drop(&mut self) {
+ while let Some(_) = self.pop() {}
+ // deallocation is handled by RawVec
+ }
+}
+```
+
+And finally we can really simplify IntoIter:
+
+```rust,ignore
+struct IntoIter<T> {
+ _buf: RawVec<T>, // we don't actually care about this. Just need it to live.
+ start: *const T,
+ end: *const T,
+}
+
+// next and next_back literally unchanged since they never referred to the buf
+
+impl<T> Drop for IntoIter<T> {
+ fn drop(&mut self) {
+ // only need to ensure all our elements are read;
+ // buffer will clean itself up afterwards.
+ for _ in &mut *self {}
+ }
+}
+
+impl<T> Vec<T> {
+ pub fn into_iter(self) -> IntoIter<T> {
+ unsafe {
+ // need to use ptr::read to unsafely move the buf out since it's
+ // not Copy, and Vec implements Drop (so we can't destructure it).
+ let buf = ptr::read(&self.buf);
+ let len = self.len;
+ mem::forget(self);
+
+ IntoIter {
+ start: *buf.ptr,
+ end: buf.ptr.offset(len as isize),
+ _buf: buf,
+ }
+ }
+ }
+}
+```
+
+Much better.
--- /dev/null
+% Handling Zero-Sized Types
+
+It's time. We're going to fight the spectre that is zero-sized types. Safe Rust
+*never* needs to care about this, but Vec is very intensive on raw pointers and
+raw allocations, which are exactly the two things that care about
+zero-sized types. We need to be careful of two things:
+
+* The raw allocator API has undefined behaviour if you pass in 0 for an
+ allocation size.
+* raw pointer offsets are no-ops for zero-sized types, which will break our
+ C-style pointer iterator.
+
+Thankfully we abstracted out pointer-iterators and allocating handling into
+RawValIter and RawVec respectively. How mysteriously convenient.
+
+
+
+
+## Allocating Zero-Sized Types
+
+So if the allocator API doesn't support zero-sized allocations, what on earth
+do we store as our allocation? Why, `heap::EMPTY` of course! Almost every operation
+with a ZST is a no-op since ZSTs have exactly one value, and therefore no state needs
+to be considered to store or load them. This actually extends to `ptr::read` and
+`ptr::write`: they won't actually look at the pointer at all. As such we never need
+to change the pointer.
+
+Note however that our previous reliance on running out of memory before overflow is
+no longer valid with zero-sized types. We must explicitly guard against capacity
+overflow for zero-sized types.
+
+Due to our current architecture, all this means is writing 3 guards, one in each
+method of RawVec.
+
+```rust,ignore
+impl<T> RawVec<T> {
+ fn new() -> Self {
+ unsafe {
+ // !0 is usize::MAX. This branch should be stripped at compile time.
+ let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
+
+ // heap::EMPTY doubles as "unallocated" and "zero-sized allocation"
+ RawVec { ptr: Unique::new(heap::EMPTY as *mut T), cap: cap }
+ }
+ }
+
+ fn grow(&mut self) {
+ unsafe {
+ let elem_size = mem::size_of::<T>();
+
+ // since we set the capacity to usize::MAX when elem_size is
+ // 0, getting to here necessarily means the Vec is overfull.
+ assert!(elem_size != 0, "capacity overflow");
+
+ let align = mem::align_of::<T>();
+
+ let (new_cap, ptr) = if self.cap == 0 {
+ let ptr = heap::allocate(elem_size, align);
+ (1, ptr)
+ } else {
+ let new_cap = 2 * self.cap;
+ let ptr = heap::reallocate(*self.ptr as *mut _,
+ self.cap * elem_size,
+ new_cap * elem_size,
+ align);
+ (new_cap, ptr)
+ };
+
+ // If allocate or reallocate fail, we'll get `null` back
+ if ptr.is_null() { oom() }
+
+ self.ptr = Unique::new(ptr as *mut _);
+ self.cap = new_cap;
+ }
+ }
+}
+
+impl<T> Drop for RawVec<T> {
+ fn drop(&mut self) {
+ let elem_size = mem::size_of::<T>();
+
+ // don't free zero-sized allocations, as they were never allocated.
+ if self.cap != 0 && elem_size != 0 {
+ let align = mem::align_of::<T>();
+
+ let num_bytes = elem_size * self.cap;
+ unsafe {
+ heap::deallocate(*self.ptr as *mut _, num_bytes, align);
+ }
+ }
+ }
+}
+```
+
+That's it. We support pushing and popping zero-sized types now. Our iterators
+(that aren't provided by slice Deref) are still busted, though.
+
+
+
+
+## Iterating Zero-Sized Types
+
+Zero-sized offsets are no-ops. This means that our current design will always
+initialize `start` and `end` as the same value, and our iterators will yield
+nothing. The current solution to this is to cast the pointers to integers,
+increment, and then cast them back:
+
+```rust,ignore
+impl<T> RawValIter<T> {
+ unsafe fn new(slice: &[T]) -> Self {
+ RawValIter {
+ start: slice.as_ptr(),
+ end: if mem::size_of::<T>() == 0 {
+ ((slice.as_ptr() as usize) + slice.len()) as *const _
+ } else if slice.len() == 0 {
+ slice.as_ptr()
+ } else {
+ slice.as_ptr().offset(slice.len() as isize)
+ }
+ }
+ }
+}
+```
+
+Now we have a different bug. Instead of our iterators not running at all, our
+iterators now run *forever*. We need to do the same trick in our iterator impls.
+Also, our size_hint computation code will divide by 0 for ZSTs. Since we'll
+basically be treating the two pointers as if they point to bytes, we'll just
+map size 0 to divide by 1.
+
+```rust,ignore
+impl<T> Iterator for RawValIter<T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> {
+ if self.start == self.end {
+ None
+ } else {
+ unsafe {
+ let result = ptr::read(self.start);
+ self.start = if mem::size_of::<T>() == 0 {
+ (self.start as usize + 1) as *const _
+ } else {
+ self.start.offset(1);
+ }
+ Some(result)
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let elem_size = mem::size_of::<T>();
+ let len = (self.end as usize - self.start as usize)
+ / if elem_size == 0 { 1 } else { elem_size };
+ (len, Some(len))
+ }
+}
+
+impl<T> DoubleEndedIterator for RawValIter<T> {
+ fn next_back(&mut self) -> Option<T> {
+ if self.start == self.end {
+ None
+ } else {
+ unsafe {
+ self.end = if mem::size_of::<T>() == 0 {
+ (self.end as usize - 1) as *const _
+ } else {
+ self.end.offset(-1);
+ }
+ Some(ptr::read(self.end))
+ }
+ }
+ }
+}
+```
+
+And that's it. Iteration works!
--- /dev/null
+% Example: Implementing Vec
+
+To bring everything together, we're going to write `std::Vec` from scratch.
+Because all the best tools for writing unsafe code are unstable, this
+project will only work on nightly (as of Rust 1.2.0). With the exception of the
+allocator API, much of the unstable code we'll use is expected to be stabilized
+in a similar form as it is today.
+
+However we will generally try to avoid unstable code where possible. In
+particular we won't use any intrinsics that could make a code a little
+bit nicer or efficient because intrinsics are permanently unstable. Although
+many intrinsics *do* become stabilized elsewhere (`std::ptr` and `str::mem`
+consist of many intrinsics).
+
+Ultimately this means our implementation may not take advantage of all
+possible optimizations, though it will be by no means *naive*. We will
+definitely get into the weeds over nitty-gritty details, even
+when the problem doesn't *really* merit it.
+
+You wanted advanced. We're gonna go advanced.
--- /dev/null
+% Working with Unsafe
+
+Rust generally only gives us the tools to talk about Unsafe Rust in a scoped and
+binary manner. Unfortunately, reality is significantly more complicated than
+that. For instance, consider the following toy function:
+
+```rust
+fn index(idx: usize, arr: &[u8]) -> Option<u8> {
+ if idx < arr.len() {
+ unsafe {
+ Some(*arr.get_unchecked(idx))
+ }
+ } else {
+ None
+ }
+}
+```
+
+Clearly, this function is safe. We check that the index is in bounds, and if it
+is, index into the array in an unchecked manner. But even in such a trivial
+function, the scope of the unsafe block is questionable. Consider changing the
+`<` to a `<=`:
+
+```rust
+fn index(idx: usize, arr: &[u8]) -> Option<u8> {
+ if idx <= arr.len() {
+ unsafe {
+ Some(*arr.get_unchecked(idx))
+ }
+ } else {
+ None
+ }
+}
+```
+
+This program is now unsound, and yet *we only modified safe code*. This is the
+fundamental problem of safety: it's non-local. The soundness of our unsafe
+operations necessarily depends on the state established by otherwise
+"safe" operations.
+
+Safety is modular in the sense that opting into unsafety doesn't require you
+to consider arbitrary other kinds of badness. For instance, doing an unchecked
+index into a slice doesn't mean you suddenly need to worry about the slice being
+null or containing uninitialized memory. Nothing fundamentally changes. However
+safety *isn't* modular in the sense that programs are inherently stateful and
+your unsafe operations may depend on arbitrary other state.
+
+Trickier than that is when we get into actual statefulness. Consider a simple
+implementation of `Vec`:
+
+```rust
+use std::ptr;
+
+// Note this definition is insufficient. See the section on implementing Vec.
+pub struct Vec<T> {
+ ptr: *mut T,
+ len: usize,
+ cap: usize,
+}
+
+// Note this implementation does not correctly handle zero-sized types.
+// We currently live in a nice imaginary world of only positive fixed-size
+// types.
+impl<T> Vec<T> {
+ pub fn push(&mut self, elem: T) {
+ if self.len == self.cap {
+ // not important for this example
+ self.reallocate();
+ }
+ unsafe {
+ ptr::write(self.ptr.offset(self.len as isize), elem);
+ self.len += 1;
+ }
+ }
+
+ # fn reallocate(&mut self) { }
+}
+
+# fn main() {}
+```
+
+This code is simple enough to reasonably audit and verify. Now consider
+adding the following method:
+
+```rust,ignore
+fn make_room(&mut self) {
+ // grow the capacity
+ self.cap += 1;
+}
+```
+
+This code is 100% Safe Rust but it is also completely unsound. Changing the
+capacity violates the invariants of Vec (that `cap` reflects the allocated space
+in the Vec). This is not something the rest of Vec can guard against. It *has*
+to trust the capacity field because there's no way to verify it.
+
+`unsafe` does more than pollute a whole function: it pollutes a whole *module*.
+Generally, the only bullet-proof way to limit the scope of unsafe code is at the
+module boundary with privacy.
+
+However this works *perfectly*. The existence of `make_room` is *not* a
+problem for the soundness of Vec because we didn't mark it as public. Only the
+module that defines this function can call it. Also, `make_room` directly
+accesses the private fields of Vec, so it can only be written in the same module
+as Vec.
+
+It is therefore possible for us to write a completely safe abstraction that
+relies on complex invariants. This is *critical* to the relationship between
+Safe Rust and Unsafe Rust. We have already seen that Unsafe code must trust
+*some* Safe code, but can't trust *generic* Safe code. It can't trust an
+arbitrary implementor of a trait or any function that was passed to it to be
+well-behaved in a way that safe code doesn't care about.
+
+However if unsafe code couldn't prevent client safe code from messing with its
+state in arbitrary ways, safety would be a lost cause. Thankfully, it *can*
+prevent arbitrary code from messing with critical state due to privacy.
+
+Safety lives!
+
integral types: `u8`, `i8`, `u16`, `i16`, `u32`, `i32`, `u64`, `i64`,
`isize`, or `usize`.
-The type of an _unsuffixed_ integer literal is determined by type inference.
-If an integer type can be _uniquely_ determined from the surrounding program
-context, the unsuffixed integer literal has that type. If the program context
-underconstrains the type, it defaults to the signed 32-bit integer `i32`; if
-the program context overconstrains the type, it is considered a static type
-error.
+The type of an _unsuffixed_ integer literal is determined by type inference:
+
+* If an integer type can be _uniquely_ determined from the surrounding
+ program context, the unsuffixed integer literal has that type.
+
+* If the program context under-constrains the type, it defaults to the
+ signed 32-bit integer `i32`.
+
+* If the program context over-constrains the type, it is considered a
+ static type error.
Examples of integer literals of various forms:
_floating-point suffixes_, `f32` and `f64` (the 32-bit and 64-bit floating point
types), which explicitly determine the type of the literal.
-The type of an _unsuffixed_ floating-point literal is determined by type
-inference. If a floating-point type can be _uniquely_ determined from the
-surrounding program context, the unsuffixed floating-point literal has that type.
-If the program context underconstrains the type, it defaults to double-precision `f64`;
-if the program context overconstrains the type, it is considered a static type
-error.
+The type of an _unsuffixed_ floating-point literal is determined by
+type inference:
+
+* If a floating-point type can be _uniquely_ determined from the
+ surrounding program context, the unsuffixed floating-point literal
+ has that type.
+
+* If the program context under-constrains the type, it defaults to `f64`.
+
+* If the program context over-constrains the type, it is considered a
+ static type error.
Examples of floating-point literals of various forms:
compiler.
Rust's semantics obey a *phase distinction* between compile-time and
-run-time.[^phase-distinction] Those semantic rules that have a *static
-interpretation* govern the success or failure of compilation. Those semantics
+run-time.[^phase-distinction] Semantic rules that have a *static
+interpretation* govern the success or failure of compilation, while
+semantic rules
that have a *dynamic interpretation* govern the behavior of the program at
run-time.
be undesired.
* Deadlocks
-* Reading data from private fields (`std::repr`)
* Leaks of memory and other resources
* Exiting without calling destructors
-* Sending signals
-* Accessing/modifying the file system
* Integer overflow
- Overflow is considered "unexpected" behavior and is always user-error,
unless the `wrapping` primitives are used. In non-optimized builds, the compiler
```
enum Animal {
- Dog,
- Cat
+ Dog,
+ Cat,
}
let mut a: Animal = Animal::Dog;
statics:
* Statics may not contain any destructors.
-* The types of static values must ascribe to `Sync` to allow threadsafe access.
+* The types of static values must ascribe to `Sync` to allow thread-safe access.
* Statics may not refer to other statics by value, only by reference.
* Constants cannot refer to statics.
An) -> R`, where `A1...An` are the declared types of its arguments and `R` is
the declared return type.
+It is valid to add the `link` attribute on an empty extern block. You can use
+this to satisfy the linking requirements of extern blocks elsewhere in your code
+(including upstream crates) instead of adding the attribute to each extern block.
+
## Visibility and Privacy
These two terms are often used interchangeably, and what they are attempting to
* A crate needs a global available "helper module" to itself, but it doesn't
want to expose the helper module as a public API. To accomplish this, the
root of the crate's hierarchy would have a private module which then
- internally has a "public api". Because the entire crate is a descendant of
+ internally has a "public API". Because the entire crate is a descendant of
the root, then the entire local crate can access this private module through
the second case.
object file that this item's contents will be placed into.
- `no_mangle` - on any item, do not apply the standard name mangling. Set the
symbol for this item to its identifier.
-- `packed` - on structs or enums, eliminate any padding that would be used to
- align fields.
- `simd` - on certain tuple structs, derive the arithmetic operators, which
lower to the target's SIMD instructions, if any; the `simd` feature gate
is necessary to use this attribute.
as a configuration itself, like `unix` or `windows`.
* `target_os = "..."`. Operating system of the target, examples include
`"windows"`, `"macos"`, `"ios"`, `"linux"`, `"android"`, `"freebsd"`, `"dragonfly"`,
- `"bitrig"` or `"openbsd"`.
+ `"bitrig"` , `"openbsd"` or `"netbsd"`.
* `target_pointer_width = "..."`. Target pointer width in bits. This is set
to `"32"` for targets with 32-bit pointers, and likewise set to `"64"` for
64-bit pointers.
internally without imposing on callers
(i.e. making them behave like function calls in
terms of encapsulation).
+* - `default_type_parameter_fallback` - Allows type parameter defaults to
+ influence type inference.
If a feature is promoted to a language feature, then all existing programs will
start to receive compilation warnings about `#![feature]` directives which enabled
#### Moved and copied types
When a [local variable](#variables) is used as an
-[rvalue](#lvalues,-rvalues-and-temporaries) the variable will either be moved
-or copied, depending on its type. All values whose type implements `Copy` are
-copied, all others are moved.
+[rvalue](#lvalues,-rvalues-and-temporaries), the variable will be copied
+if its type implements `Copy`. All others are moved.
### Literal expressions
```
# let mut x = 0;
# let y = 0;
-
x = y;
```
expression's captured environment.
In this example, we define a function `ten_times` that takes a higher-order
-function argument, and call it with a lambda expression as an argument:
+function argument, and we then call it with a lambda expression as an argument:
```
fn ten_times<F>(f: F) where F: Fn(i32) {
- let mut i = 0i32;
- while i < 10 {
- f(i);
- i += 1;
+ for index in 0..10 {
+ f(index);
}
}
```
type Pair<'a> = (i32, &'a str);
-let p: Pair<'static> = (10, "hello");
+let p: Pair<'static> = (10, "ten");
let (a, b) = p;
-assert!(b != "world");
-assert!(p.0 == 10);
+
+assert_eq!(a, 10);
+assert_eq!(b, "ten");
+assert_eq!(p.0, 10);
+assert_eq!(p.1, "ten");
```
For historical reasons and convenience, the tuple type with no elements (`()`)
Rust has two different types for a list of items:
-* `[T; N]`, an 'array'.
-* `&[T]`, a 'slice'.
+* `[T; N]`, an 'array'
+* `&[T]`, a 'slice'
An array has a fixed size, and can be allocated on either the stack or the
heap.
A slice is a 'view' into an array. It doesn't own the data it points
to, it borrows it.
-An example of each kind:
+Examples:
```{rust}
-let vec: Vec<i32> = vec![1, 2, 3];
-let arr: [i32; 3] = [1, 2, 3];
-let s: &[i32] = &vec[..];
+// A stack-allocated array
+let array: [i32; 3] = [1, 2, 3];
+
+// A heap-allocated array
+let vector: Vec<i32> = vec![1, 2, 3];
+
+// A slice into an array
+let slice: &[i32] = &vector[..];
```
As you can see, the `vec!` macro allows you to create a `Vec<T>` easily. The
`vec!` macro is also part of the standard library, rather than the language.
-All in-bounds elements of arrays, and slices are always initialized, and access
+All in-bounds elements of arrays and slices are always initialized, and access
to an array or slice is always bounds-checked.
### Structure types
#### Function types for specific items
-Internally to the compiler, there are also function types that are specific to a particular
+Internal to the compiler, there are also function types that are specific to a particular
function item. In the following snippet, for example, the internal types of the functions
`foo` and `bar` are different, despite the fact that they have the same signature:
* `FnMut`
: The closure can be called multiple times as mutable. A closure called as
- `FnMut` can mutate values from its environment. `FnMut` implies
- `FnOnce`.
+ `FnMut` can mutate values from its environment. `FnMut` inherits from
+ `FnOnce` (i.e. anything implementing `FnMut` also implements `FnOnce`).
* `Fn`
: The closure can be called multiple times through a shared reference.
A closure called as `Fn` can neither move out from nor mutate values
- from its environment. `Fn` implies `FnMut` and `FnOnce`.
+ from its environment. `Fn` inherits from `FnMut`, which itself
+ inherits from `FnOnce`.
### Trait objects
### Coercion sites
A coercion can only occur at certain coercion sites in a program; these are
-typically places where the desired type is explicit or can be dervied by
+typically places where the desired type is explicit or can be derived by
propagation from explicit types (without type inference). Possible coercion
sites are:
* `let` statements where an explicit type is given.
- In `let _: U = e;`, `e` is coerced to have type `U`.
+ For example, `128` is coerced to have type `i8` in the following:
+
+ ```rust
+ let _: i8 = 128;
+ ```
* `static` and `const` statements (similar to `let` statements).
-* arguments for function calls.
+* Arguments for function calls
+
+ The value being coerced is the actual parameter, and it is coerced to
+ the type of the formal parameter.
- The value being coerced is the
- actual parameter and it is coerced to the type of the formal parameter. For
- example, let `foo` be defined as `fn foo(x: U) { ... }` and call it as
- `foo(e);`. Then `e` is coerced to have type `U`;
+ For example, `128` is coerced to have type `i8` in the following:
-* instantiations of struct or variant fields.
+ ```rust
+ fn bar(_: i8) { }
- Assume we have a `struct
- Foo { x: U }` and instantiate it as `Foo { x: e }`. Then `e` is coerced to
- have type `U`.
+ fn main() {
+ bar(128);
+ }
+ ```
-* function results (either the final line of a block if it is not semicolon
-terminated or any expression in a `return` statement).
+* Instantiations of struct or variant fields
- In `fn foo() -> U { e }`, `e` is coerced to to have type `U`.
+ For example, `128` is coerced to have type `i8` in the following:
+
+ ```rust
+ struct Foo { x: i8 }
+
+ fn main() {
+ Foo { x: 128 };
+ }
+ ```
+
+* Function results, either the final line of a block if it is not
+ semicolon-terminated or any expression in a `return` statement
+
+ For example, `128` is coerced to have type `i8` in the following:
+
+ ```rust
+ fn foo() -> i8 {
+ 128
+ }
+ ```
If the expression in one of these coercion sites is a coercion-propagating
expression, then the relevant sub-expressions in that expression are also
coercion sites. Propagation recurses from these new coercion sites.
Propagating expressions and their relevant sub-expressions are:
-* array literals, where the array has type `[U; n]`. Each sub-expression in
+* Array literals, where the array has type `[U; n]`. Each sub-expression in
the array literal is a coercion site for coercion to type `U`.
-* array literals with repeating syntax, where the array has type `[U; n]`. The
+* Array literals with repeating syntax, where the array has type `[U; n]`. The
repeated sub-expression is a coercion site for coercion to type `U`.
-* tuples, where a tuple is a coercion site to type `(U_0, U_1, ..., U_n)`.
+* Tuples, where a tuple is a coercion site to type `(U_0, U_1, ..., U_n)`.
Each sub-expression is a coercion site to the respective type, e.g. the
zeroth sub-expression is a coercion site to type `U_0`.
-* parenthesised sub-expressions (`(e)`). If the expression has type `U`, then
+* Parenthesised sub-expressions (`(e)`): if the expression has type `U`, then
the sub-expression is a coercion site to `U`.
-* blocks. If a block has type `U`, then the last expression in the block (if
+* Blocks: if a block has type `U`, then the last expression in the block (if
it is not semicolon-terminated) is a coercion site to `U`. This includes
blocks which are part of control flow statements, such as `if`/`else`, if
the block has a known type.
Coercion is allowed between the following types:
-* `T` to `U` if `T` is a subtype of `U` (*reflexive case*).
+* `T` to `U` if `T` is a subtype of `U` (*reflexive case*)
* `T_1` to `T_3` where `T_1` coerces to `T_2` and `T_2` coerces to `T_3`
-(*transitive case*).
+(*transitive case*)
Note that this is not fully supported yet
-* `&mut T` to `&T`.
+* `&mut T` to `&T`
-* `*mut T` to `*const T`.
+* `*mut T` to `*const T`
-* `&T` to `*const T`.
+* `&T` to `*const T`
-* `&mut T` to `*mut T`.
+* `&mut T` to `*mut T`
* `&T` to `&U` if `T` implements `Deref<Target = U>`. For example:
-```rust
-use std::ops::Deref;
+ ```rust
+ use std::ops::Deref;
-struct CharContainer {
- value: char
-}
+ struct CharContainer {
+ value: char
+ }
-impl Deref for CharContainer {
- type Target = char;
+ impl Deref for CharContainer {
+ type Target = char;
- fn deref<'a>(&'a self) -> &'a char {
- &self.value
- }
-}
+ fn deref<'a>(&'a self) -> &'a char {
+ &self.value
+ }
+ }
-fn foo(arg: &char) {}
+ fn foo(arg: &char) {}
+
+ fn main() {
+ let x = &mut CharContainer { value: 'y' };
+ foo(x); //&mut CharContainer is coerced to &char.
+ }
+ ```
-fn main() {
- let x = &mut CharContainer { value: 'y' };
- foo(x); //&mut CharContainer is coerced to &char.
-}
-```
* `&mut T` to `&mut U` if `T` implements `DerefMut<Target = U>`.
* TyCtor(`T`) to TyCtor(coerce_inner(`T`)), where TyCtor(`T`) is one of
all compilation needs, and the other options are just available if more
fine-grained control is desired over the output format of a Rust crate.
-# Appendix: Rationales and design tradeoffs
+# Appendix: Rationales and design trade-offs
*TODO*.
a wide range of sources. Some of these are listed below (including elements
that have since been removed):
-* SML, OCaml: algebraic datatypes, pattern matching, type inference,
+* SML, OCaml: algebraic data types, pattern matching, type inference,
semicolon statement separation
* C++: references, RAII, smart pointers, move semantics, monomorphisation,
memory model
color: #428BCA;
}
+.section-header > a > code {
+ color: #8D1A38;
+}
+
/* Code highlighting */
pre.rust .kw { color: #8959A8; }
pre.rust .kw-2, pre.rust .prelude-ty { color: #4271AE; }
explicit conversions or other method calls would usually be necessary. See the
[overloading/implicits use case](#use-case:-limited-overloading-and/or-implicit-conversions)
below.
-* _Precise types_. Because generic give a _name_ to the specific type
+* _Precise types_. Because generics give a _name_ to the specific type
implementing a trait, it is possible to be precise about places where that
exact type is required or produced. For example, a function
* [Iterators](iterators.md)
* [Concurrency](concurrency.md)
* [Error Handling](error-handling.md)
+ * [Choosing your Guarantees](choosing-your-guarantees.md)
* [FFI](ffi.md)
* [Borrow and AsRef](borrow-and-asref.md)
* [Release Channels](release-channels.md)
* [Primitive Types](primitive-types.md)
* [Comments](comments.md)
* [if](if.md)
- * [for loops](for-loops.md)
- * [while loops](while-loops.md)
+ * [Loops](loops.md)
* [Ownership](ownership.md)
* [References and Borrowing](references-and-borrowing.md)
* [Lifetimes](lifetimes.md)
* [No stdlib](no-stdlib.md)
* [Intrinsics](intrinsics.md)
* [Lang items](lang-items.md)
- * [Link args](link-args.md)
+ * [Advanced linking](advanced-linking.md)
* [Benchmark Tests](benchmark-tests.md)
* [Box Syntax and Patterns](box-syntax-and-patterns.md)
* [Slice Patterns](slice-patterns.md)
* [Macros that work together](https://www.cs.utah.edu/plt/publications/jfp12-draft-fcdf.pdf)
* [Traits: composable units of behavior](http://scg.unibe.ch/archive/papers/Scha03aTraits.pdf)
* [Alias burying](http://www.cs.uwm.edu/faculty/boyland/papers/unique-preprint.ps) - We tried something similar and abandoned it.
-* [External uniqueness is unique enough](http://www.computingscience.nl/research/techreps/repo/CS-2002/2002-048.pdf)
+* [External uniqueness is unique enough](http://www.cs.uu.nl/research/techreps/UU-CS-2002-048.html)
* [Uniqueness and Reference Immutability for Safe Parallelism](https://research.microsoft.com/pubs/170528/msr-tr-2012-79.pdf)
* [Region Based Memory Management](http://www.cs.ucla.edu/~palsberg/tba/papers/tofte-talpin-iandc97.pdf)
* [Dynamic circular work stealing deque](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.170.1097&rep=rep1&type=pdf) - The Chase/Lev deque
* [Work-first and help-first scheduling policies for async-finish task parallelism](http://www.cs.rice.edu/%7Eyguo/pubs/PID824943.pdf) - More general than fully-strict work stealing
* [A Java fork/join calamity](http://www.coopsoft.com/ar/CalamityArticle.html) - critique of Java's fork/join library, particularly its application of work stealing to non-strict computation
-* [Scheduling techniques for concurrent systems](http://www.ece.rutgers.edu/%7Eparashar/Classes/ece572-papers/05/ps-ousterhout.pdf)
+* [Scheduling techniques for concurrent systems](http://www.stanford.edu/~ouster/cgi-bin/papers/coscheduling.pdf)
* [Contention aware scheduling](http://www.blagodurov.net/files/a8-blagodurov.pdf)
* [Balanced work stealing for time-sharing multicores](http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/papers/TR-12-1.pdf)
-* [Three layer cake](http://www.upcrc.illinois.edu/workshops/paraplop10/papers/paraplop10_submission_8.pdf)
+* [Three layer cake for shared-memory programming](http://dl.acm.org/citation.cfm?id=1953616&dl=ACM&coll=DL&CFID=524387192&CFTOKEN=44362705)
* [Non-blocking steal-half work queues](http://www.cs.bgu.ac.il/%7Ehendlerd/papers/p280-hendler.pdf)
* [Reagents: expressing and composing fine-grained concurrency](http://www.mpi-sws.org/~turon/reagents.pdf)
* [Algorithms for scalable synchronization of shared-memory multiprocessors](https://www.cs.rochester.edu/u/scott/papers/1991_TOCS_synch.pdf)
### Papers *about* Rust
-* [GPU programming in Rust](http://www.cs.indiana.edu/~eholk/papers/hips2013.pdf)
-* [Parallel closures: a new twist on an old idea](https://www.usenix.org/conference/hotpar12/parallel-closures-new-twist-old-idea) - not exactly about rust, but by nmatsakis
+* [GPU Programming in Rust: Implementing High Level Abstractions in a
+Systems Level
+Language](http://www.cs.indiana.edu/~eholk/papers/hips2013.pdf). Early GPU work by Eric Holk.
+* [Parallel closures: a new twist on an old
+ idea](https://www.usenix.org/conference/hotpar12/parallel-closures-new-twist-old-idea)
+ - not exactly about rust, but by nmatsakis
+* [Patina: A Formalization of the Rust Programming
+ Language](ftp://ftp.cs.washington.edu/tr/2015/03/UW-CSE-15-03-02.pdf). Early
+ formalization of a subset of the type system, by Eric Reed.
+* [Experience Report: Developing the Servo Web Browser Engine using
+ Rust](http://arxiv.org/abs/1505.07383). By Lars Bergstrom.
+* [Implementing a Generic Radix Trie in
+ Rust](https://michaelsproul.github.io/rust_radix_paper/rust-radix-sproul.pdf). Undergrad
+ paper by Michael Sproul.
+* [Reenix: Implementing a Unix-Like Operating System in
+ Rust](http://scialex.github.io/reenix.pdf). Undergrad paper by Alex
+ Light.
+* [Evaluation of performance and productivity metrics of potential
+ programming languages in the HPC environment](). Bachelor's thesis by
+ Florian Wilkens. Compares C, Go and Rust.
+* [Nom, a byte oriented, streaming, zero copy, parser combinators library
+ in Rust](http://spw15.langsec.org/papers/couprie-nom.pdf). By
+ Geoffroy Couprie, research for VLC.
+* [Graph-Based Higher-Order Intermediate
+ Representation](http://compilers.cs.uni-saarland.de/papers/lkh15_cgo.pdf). An
+ experimental IR implemented in Impala, a Rust-like language.
+* [Code Refinement of Stencil
+ Codes](http://compilers.cs.uni-saarland.de/papers/ppl14_web.pdf). Another
+ paper using Impala.
--- /dev/null
+% Advanced Linking
+
+The common cases of linking with Rust have been covered earlier in this book,
+but supporting the range of linking possibilities made available by other
+languages is important for Rust to achieve seamless interaction with native
+libraries.
+
+# Link args
+
+There is one other way to tell `rustc` how to customize linking, and that is via
+the `link_args` attribute. This attribute is applied to `extern` blocks and
+specifies raw flags which need to get passed to the linker when producing an
+artifact. An example usage would be:
+
+``` no_run
+#![feature(link_args)]
+
+#[link_args = "-foo -bar -baz"]
+extern {}
+# fn main() {}
+```
+
+Note that this feature is currently hidden behind the `feature(link_args)` gate
+because this is not a sanctioned way of performing linking. Right now `rustc`
+shells out to the system linker (`gcc` on most systems, `link.exe` on MSVC),
+so it makes sense to provide extra command line
+arguments, but this will not always be the case. In the future `rustc` may use
+LLVM directly to link native libraries, in which case `link_args` will have no
+meaning. You can achieve the same effect as the `link-args` attribute with the
+`-C link-args` argument to `rustc`.
+
+It is highly recommended to *not* use this attribute, and rather use the more
+formal `#[link(...)]` attribute on `extern` blocks instead.
+
+# Static linking
+
+Static linking refers to the process of creating output that contain all
+required libraries and so don't need libraries installed on every system where
+you want to use your compiled project. Pure-Rust dependencies are statically
+linked by default so you can use created binaries and libraries without
+installing the Rust everywhere. By contrast, native libraries
+(e.g. `libc` and `libm`) usually dynamically linked, but it is possible to
+change this and statically link them as well.
+
+Linking is a very platform dependent topic — on some platforms, static linking
+may not be possible at all! This section assumes some basic familiarity with
+linking on your platform of choice.
+
+## Linux
+
+By default, all Rust programs on Linux will link to the system `libc` along with
+a number of other libraries. Let's look at an example on a 64-bit Linux machine
+with GCC and `glibc` (by far the most common `libc` on Linux):
+
+``` text
+$ cat example.rs
+fn main() {}
+$ rustc example.rs
+$ ldd example
+ linux-vdso.so.1 => (0x00007ffd565fd000)
+ libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007fa81889c000)
+ libpthread.so.0 => /lib/x86_64-linux-gnu/libpthread.so.0 (0x00007fa81867e000)
+ librt.so.1 => /lib/x86_64-linux-gnu/librt.so.1 (0x00007fa818475000)
+ libgcc_s.so.1 => /lib/x86_64-linux-gnu/libgcc_s.so.1 (0x00007fa81825f000)
+ libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007fa817e9a000)
+ /lib64/ld-linux-x86-64.so.2 (0x00007fa818cf9000)
+ libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 (0x00007fa817b93000)
+```
+
+Dynamic linking on Linux can be undesirable if you wish to use new library
+features on old systems or target systems which do not have the required
+dependencies for your program to run.
+
+Static linking is supported via an alternative `libc`, `musl` - this must be
+enabled at Rust compile-time with some prerequisites available. You can compile
+your own version of Rust with `musl` enabled and install it into a custom
+directory with the instructions below:
+
+```text
+$ mkdir musldist
+$ PREFIX=$(pwd)/musldist
+$
+$ # Build musl
+$ wget http://www.musl-libc.org/releases/musl-1.1.10.tar.gz
+[...]
+$ tar xf musl-1.1.10.tar.gz
+$ cd musl-1.1.10/
+musl-1.1.10 $ ./configure --disable-shared --prefix=$PREFIX
+[...]
+musl-1.1.10 $ make
+[...]
+musl-1.1.10 $ make install
+[...]
+musl-1.1.10 $ cd ..
+$ du -h musldist/lib/libc.a
+2.2M musldist/lib/libc.a
+$
+$ # Build libunwind.a
+$ wget http://llvm.org/releases/3.6.1/llvm-3.6.1.src.tar.xz
+$ tar xf llvm-3.6.1.src.tar.xz
+$ cd llvm-3.6.1.src/projects/
+llvm-3.6.1.src/projects $ svn co http://llvm.org/svn/llvm-project/libcxxabi/trunk/ libcxxabi
+llvm-3.6.1.src/projects $ svn co http://llvm.org/svn/llvm-project/libunwind/trunk/ libunwind
+llvm-3.6.1.src/projects $ sed -i 's#^\(include_directories\).*$#\0\n\1(../libcxxabi/include)#' libunwind/CMakeLists.txt
+llvm-3.6.1.src/projects $ mkdir libunwind/build
+llvm-3.6.1.src/projects $ cd libunwind/build
+llvm-3.6.1.src/projects/libunwind/build $ cmake -DLLVM_PATH=../../.. -DLIBUNWIND_ENABLE_SHARED=0 ..
+llvm-3.6.1.src/projects/libunwind/build $ make
+llvm-3.6.1.src/projects/libunwind/build $ cp lib/libunwind.a $PREFIX/lib/
+llvm-3.6.1.src/projects/libunwind/build $ cd cd ../../../../
+$ du -h musldist/lib/libunwind.a
+164K musldist/lib/libunwind.a
+$
+$ # Build musl-enabled rust
+$ git clone https://github.com/rust-lang/rust.git muslrust
+$ cd muslrust
+muslrust $ ./configure --target=x86_64-unknown-linux-musl --musl-root=$PREFIX --prefix=$PREFIX
+muslrust $ make
+muslrust $ make install
+muslrust $ cd ..
+$ du -h musldist/bin/rustc
+12K musldist/bin/rustc
+```
+
+You now have a build of a `musl`-enabled Rust! Because we've installed it to a
+custom prefix we need to make sure our system can the binaries and appropriate
+libraries when we try and run it:
+
+```text
+$ export PATH=$PREFIX/bin:$PATH
+$ export LD_LIBRARY_PATH=$PREFIX/lib:$LD_LIBRARY_PATH
+```
+
+Let's try it out!
+
+```text
+$ echo 'fn main() { println!("hi!"); panic!("failed"); }' > example.rs
+$ rustc --target=x86_64-unknown-linux-musl example.rs
+$ ldd example
+ not a dynamic executable
+$ ./example
+hi!
+thread '<main>' panicked at 'failed', example.rs:1
+```
+
+Success! This binary can be copied to almost any Linux machine with the same
+machine architecture and run without issues.
+
+`cargo build` also permits the `--target` option so you should be able to build
+your crates as normal. However, you may need to recompile your native libraries
+against `musl` before they can be linked against.
--- /dev/null
+% Choosing your Guarantees
+
+One important feature of Rust as language is that it lets us control the costs and guarantees
+of a program.
+
+There are various “wrapper type” abstractions in the Rust standard library which embody
+a multitude of tradeoffs between cost, ergonomics, and guarantees. Many let one choose between
+run time and compile time enforcement. This section will explain a few selected abstractions in
+detail.
+
+Before proceeding, it is highly recommended that one reads about [ownership][ownership] and
+[borrowing][borrowing] in Rust.
+
+[ownership]: ownership.html
+[borrowing]: references-and-borrowing.html
+
+# Basic pointer types
+
+## `Box<T>`
+
+[`Box<T>`][box] is pointer which is “owned”, or a “box”. While it can hand
+out references to the contained data, it is the only owner of the data. In particular, when
+something like the following occurs:
+
+```rust
+let x = Box::new(1);
+let y = x;
+// x no longer accessible here
+```
+
+Here, the box was _moved_ into `y`. As `x` no longer owns it, the compiler will no longer allow the
+programmer to use `x` after this. A box can similarly be moved _out_ of a function by returning it.
+
+When a box (that hasn't been moved) goes out of scope, destructors are run. These destructors take
+care of deallocating the inner data.
+
+This is a zero-cost abstraction for dynamic allocation. If you want to allocate some memory on the
+heap and safely pass around a pointer to that memory, this is ideal. Note that you will only be
+allowed to share references to this by the regular borrowing rules, checked at compile time.
+
+[box]: ../std/boxed/struct.Box.html
+
+## `&T` and `&mut T`
+
+These are immutable and mutable references respectively. They follow the “read-write lock”
+pattern, such that one may either have only one mutable reference to some data, or any number of
+immutable ones, but not both. This guarantee is enforced at compile time, and has no visible cost at
+runtime. In most cases these two pointer types suffice for sharing cheap references between sections
+of code.
+
+These pointers cannot be copied in such a way that they outlive the lifetime associated with them.
+
+## `*const T` and `*mut T`
+
+These are C-like raw pointers with no lifetime or ownership attached to them. They just point to
+some location in memory with no other restrictions. The only guarantee that these provide is that
+they cannot be dereferenced except in code marked `unsafe`.
+
+These are useful when building safe, low cost abstractions like `Vec<T>`, but should be avoided in
+safe code.
+
+## `Rc<T>`
+
+This is the first wrapper we will cover that has a runtime cost.
+
+[`Rc<T>`][rc] is a reference counted pointer. In other words, this lets us have multiple "owning"
+pointers to the same data, and the data will be dropped (destructors will be run) when all pointers
+are out of scope.
+
+Internally, it contains a shared “reference count” (also called “refcount”),
+which is incremented each time the `Rc` is cloned, and decremented each time one of the `Rc`s goes
+out of scope. The main responsibility of `Rc<T>` is to ensure that destructors are called for shared
+data.
+
+The internal data here is immutable, and if a cycle of references is created, the data will be
+leaked. If we want data that doesn't leak when there are cycles, we need a garbage collector.
+
+#### Guarantees
+
+The main guarantee provided here is that the data will not be destroyed until all references to it
+are out of scope.
+
+This should be used when we wish to dynamically allocate and share some data (read-only) between
+various portions of your program, where it is not certain which portion will finish using the pointer
+last. It's a viable alternative to `&T` when `&T` is either impossible to statically check for
+correctness, or creates extremely unergonomic code where the programmer does not wish to spend the
+development cost of working with.
+
+This pointer is _not_ thread safe, and Rust will not let it be sent or shared with other threads.
+This lets one avoid the cost of atomics in situations where they are unnecessary.
+
+There is a sister smart pointer to this one, `Weak<T>`. This is a non-owning, but also non-borrowed,
+smart pointer. It is also similar to `&T`, but it is not restricted in lifetime—a `Weak<T>`
+can be held on to forever. However, it is possible that an attempt to access the inner data may fail
+and return `None`, since this can outlive the owned `Rc`s. This is useful for cyclic
+data structures and other things.
+
+#### Cost
+
+As far as memory goes, `Rc<T>` is a single allocation, though it will allocate two extra words (i.e.
+two `usize` values) as compared to a regular `Box<T>` (for "strong" and "weak" refcounts).
+
+`Rc<T>` has the computational cost of incrementing/decrementing the refcount whenever it is cloned
+or goes out of scope respectively. Note that a clone will not do a deep copy, rather it will simply
+increment the inner reference count and return a copy of the `Rc<T>`.
+
+[rc]: ../std/rc/struct.Rc.html
+
+# Cell types
+
+`Cell`s provide interior mutability. In other words, they contain data which can be manipulated even
+if the type cannot be obtained in a mutable form (for example, when it is behind an `&`-ptr or
+`Rc<T>`).
+
+[The documentation for the `cell` module has a pretty good explanation for these][cell-mod].
+
+These types are _generally_ found in struct fields, but they may be found elsewhere too.
+
+## `Cell<T>`
+
+[`Cell<T>`][cell] is a type that provides zero-cost interior mutability, but only for `Copy` types.
+Since the compiler knows that all the data owned by the contained value is on the stack, there's
+no worry of leaking any data behind references (or worse!) by simply replacing the data.
+
+It is still possible to violate your own invariants using this wrapper, so be careful when using it.
+If a field is wrapped in `Cell`, it's a nice indicator that the chunk of data is mutable and may not
+stay the same between the time you first read it and when you intend to use it.
+
+```rust
+use std::cell::Cell;
+
+let x = Cell::new(1);
+let y = &x;
+let z = &x;
+x.set(2);
+y.set(3);
+z.set(4);
+println!("{}", x.get());
+```
+
+Note that here we were able to mutate the same value from various immutable references.
+
+This has the same runtime cost as the following:
+
+```rust,ignore
+let mut x = 1;
+let y = &mut x;
+let z = &mut x;
+x = 2;
+*y = 3;
+*z = 4;
+println!("{}", x);
+```
+
+but it has the added benefit of actually compiling successfully.
+
+#### Guarantees
+
+This relaxes the “no aliasing with mutability” restriction in places where it's
+unnecessary. However, this also relaxes the guarantees that the restriction provides; so if your
+invariants depend on data stored within `Cell`, you should be careful.
+
+This is useful for mutating primitives and other `Copy` types when there is no easy way of
+doing it in line with the static rules of `&` and `&mut`.
+
+`Cell` does not let you obtain interior references to the data, which makes it safe to freely
+mutate.
+
+#### Cost
+
+There is no runtime cost to using `Cell<T>`, however if you are using it to wrap larger (`Copy`)
+structs, it might be worthwhile to instead wrap individual fields in `Cell<T>` since each write is
+otherwise a full copy of the struct.
+
+
+## `RefCell<T>`
+
+[`RefCell<T>`][refcell] also provides interior mutability, but isn't restricted to `Copy` types.
+
+Instead, it has a runtime cost. `RefCell<T>` enforces the read-write lock pattern at runtime (it's
+like a single-threaded mutex), unlike `&T`/`&mut T` which do so at compile time. This is done by the
+`borrow()` and `borrow_mut()` functions, which modify an internal reference count and return smart
+pointers which can be dereferenced immutably and mutably respectively. The refcount is restored when
+the smart pointers go out of scope. With this system, we can dynamically ensure that there are never
+any other borrows active when a mutable borrow is active. If the programmer attempts to make such a
+borrow, the thread will panic.
+
+```rust
+use std::cell::RefCell;
+
+let x = RefCell::new(vec![1,2,3,4]);
+{
+ println!("{:?}", *x.borrow())
+}
+
+{
+ let mut my_ref = x.borrow_mut();
+ my_ref.push(1);
+}
+```
+
+Similar to `Cell`, this is mainly useful for situations where it's hard or impossible to satisfy the
+borrow checker. Generally we know that such mutations won't happen in a nested form, but it's good
+to check.
+
+For large, complicated programs, it becomes useful to put some things in `RefCell`s to make things
+simpler. For example, a lot of the maps in [the `ctxt` struct][ctxt] in the rust compiler internals
+are inside this wrapper. These are only modified once (during creation, which is not right after
+initialization) or a couple of times in well-separated places. However, since this struct is
+pervasively used everywhere, juggling mutable and immutable pointers would be hard (perhaps
+impossible) and probably form a soup of `&`-ptrs which would be hard to extend. On the other hand,
+the `RefCell` provides a cheap (not zero-cost) way of safely accessing these. In the future, if
+someone adds some code that attempts to modify the cell when it's already borrowed, it will cause a
+(usually deterministic) panic which can be traced back to the offending borrow.
+
+Similarly, in Servo's DOM there is a lot of mutation, most of which is local to a DOM type, but some
+of which crisscrosses the DOM and modifies various things. Using `RefCell` and `Cell` to guard all
+mutation lets us avoid worrying about mutability everywhere, and it simultaneously highlights the
+places where mutation is _actually_ happening.
+
+Note that `RefCell` should be avoided if a mostly simple solution is possible with `&` pointers.
+
+#### Guarantees
+
+`RefCell` relaxes the _static_ restrictions preventing aliased mutation, and replaces them with
+_dynamic_ ones. As such the guarantees have not changed.
+
+#### Cost
+
+`RefCell` does not allocate, but it contains an additional "borrow state"
+indicator (one word in size) along with the data.
+
+At runtime each borrow causes a modification/check of the refcount.
+
+[cell-mod]: ../std/cell/
+[cell]: ../std/cell/struct.Cell.html
+[refcell]: ../std/cell/struct.RefCell.html
+[ctxt]: ../rustc/middle/ty/struct.ctxt.html
+
+# Synchronous types
+
+Many of the types above cannot be used in a threadsafe manner. Particularly, `Rc<T>` and
+`RefCell<T>`, which both use non-atomic reference counts (_atomic_ reference counts are those which
+can be incremented from multiple threads without causing a data race), cannot be used this way. This
+makes them cheaper to use, but we need thread safe versions of these too. They exist, in the form of
+`Arc<T>` and `Mutex<T>`/`RWLock<T>`
+
+Note that the non-threadsafe types _cannot_ be sent between threads, and this is checked at compile
+time.
+
+There are many useful wrappers for concurrent programming in the [sync][sync] module, but only the
+major ones will be covered below.
+
+[sync]: ../std/sync/index.html
+
+## `Arc<T>`
+
+[`Arc<T>`][arc] is just a version of `Rc<T>` that uses an atomic reference count (hence, "Arc").
+This can be sent freely between threads.
+
+C++'s `shared_ptr` is similar to `Arc`, however in the case of C++ the inner data is always mutable.
+For semantics similar to that from C++, we should use `Arc<Mutex<T>>`, `Arc<RwLock<T>>`, or
+`Arc<UnsafeCell<T>>`[^4] (`UnsafeCell<T>` is a cell type that can be used to hold any data and has
+no runtime cost, but accessing it requires `unsafe` blocks). The last one should only be used if we
+are certain that the usage won't cause any memory unsafety. Remember that writing to a struct is not
+an atomic operation, and many functions like `vec.push()` can reallocate internally and cause unsafe
+behavior, so even monotonicity may not be enough to justify `UnsafeCell`.
+
+[^4]: `Arc<UnsafeCell<T>>` actually won't compile since `UnsafeCell<T>` isn't `Send` or `Sync`, but we can wrap it in a type and implement `Send`/`Sync` for it manually to get `Arc<Wrapper<T>>` where `Wrapper` is `struct Wrapper<T>(UnsafeCell<T>)`.
+
+#### Guarantees
+
+Like `Rc`, this provides the (thread safe) guarantee that the destructor for the internal data will
+be run when the last `Arc` goes out of scope (barring any cycles).
+
+#### Cost
+
+This has the added cost of using atomics for changing the refcount (which will happen whenever it is
+cloned or goes out of scope). When sharing data from an `Arc` in a single thread, it is preferable
+to share `&` pointers whenever possible.
+
+[arc]: ../std/sync/struct.Arc.html
+
+## `Mutex<T>` and `RwLock<T>`
+
+[`Mutex<T>`][mutex] and [`RwLock<T>`][rwlock] provide mutual-exclusion via RAII guards (guards are
+objects which maintain some state, like a lock, until their destructor is called). For both of
+these, the mutex is opaque until we call `lock()` on it, at which point the thread will block
+until a lock can be acquired, and then a guard will be returned. This guard can be used to access
+the inner data (mutably), and the lock will be released when the guard goes out of scope.
+
+```rust,ignore
+{
+ let guard = mutex.lock();
+ // guard dereferences mutably to the inner type
+ *guard += 1;
+} // lock released when destructor runs
+```
+
+
+`RwLock` has the added benefit of being efficient for multiple reads. It is always safe to have
+multiple readers to shared data as long as there are no writers; and `RwLock` lets readers acquire a
+"read lock". Such locks can be acquired concurrently and are kept track of via a reference count.
+Writers must obtain a "write lock" which can only be obtained when all readers have gone out of
+scope.
+
+#### Guarantees
+
+Both of these provide safe shared mutability across threads, however they are prone to deadlocks.
+Some level of additional protocol safety can be obtained via the type system.
+
+#### Costs
+
+These use internal atomic-like types to maintain the locks, which are pretty costly (they can block
+all memory reads across processors till they're done). Waiting on these locks can also be slow when
+there's a lot of concurrent access happening.
+
+[rwlock]: ../std/sync/struct.RwLock.html
+[mutex]: ../std/sync/struct.Mutex.html
+[sessions]: https://github.com/Munksgaard/rust-sessions
+
+# Composition
+
+A common gripe when reading Rust code is with types like `Rc<RefCell<Vec<T>>>` (or even more more
+complicated compositions of such types). It's not always clear what the composition does, or why the
+author chose one like this (and when one should be using such a composition in one's own code)
+
+Usually, it's a case of composing together the guarantees that you need, without paying for stuff
+that is unnecessary.
+
+For example, `Rc<RefCell<T>>` is one such composition. `Rc<T>` itself can't be dereferenced mutably;
+because `Rc<T>` provides sharing and shared mutability can lead to unsafe behavior, so we put
+`RefCell<T>` inside to get dynamically verified shared mutability. Now we have shared mutable data,
+but it's shared in a way that there can only be one mutator (and no readers) or multiple readers.
+
+Now, we can take this a step further, and have `Rc<RefCell<Vec<T>>>` or `Rc<Vec<RefCell<T>>>`. These
+are both shareable, mutable vectors, but they're not the same.
+
+With the former, the `RefCell<T>` is wrapping the `Vec<T>`, so the `Vec<T>` in its entirety is
+mutable. At the same time, there can only be one mutable borrow of the whole `Vec` at a given time.
+This means that your code cannot simultaneously work on different elements of the vector from
+different `Rc` handles. However, we are able to push and pop from the `Vec<T>` at will. This is
+similar to an `&mut Vec<T>` with the borrow checking done at runtime.
+
+With the latter, the borrowing is of individual elements, but the overall vector is immutable. Thus,
+we can independently borrow separate elements, but we cannot push or pop from the vector. This is
+similar to an `&mut [T]`[^3], but, again, the borrow checking is at runtime.
+
+In concurrent programs, we have a similar situation with `Arc<Mutex<T>>`, which provides shared
+mutability and ownership.
+
+When reading code that uses these, go in step by step and look at the guarantees/costs provided.
+
+When choosing a composed type, we must do the reverse; figure out which guarantees we want, and at
+which point of the composition we need them. For example, if there is a choice between
+`Vec<RefCell<T>>` and `RefCell<Vec<T>>`, we should figure out the tradeoffs as done above and pick
+one.
+
+[^3]: `&[T]` and `&mut [T]` are _slices_; they consist of a pointer and a length and can refer to a portion of a vector or array. `&mut [T]` can have its elements mutated, however its length cannot be touched.
}
```
+There is another style of doc comment, `//!`, to comment containing items (e.g.
+crates, modules or functions), instead of the items following it. Commonly used
+inside crates root (lib.rs) or modules root (mod.rs):
+
+```
+//! # The Rust Standard Library
+//!
+//! The Rust Standard Library provides the essential runtime
+//! functionality for building portable Rust software.
+```
+
When writing doc comments, providing some examples of usage is very, very
helpful. You’ll notice we’ve used a new macro here: `assert_eq!`. This compares
two values, and `panic!`s if they’re not equal to each other. It’s very helpful
("I", 1)];
let text = match args {
- [TtToken(_, token::Ident(s, _))] => token::get_ident(s).to_string(),
+ [TtToken(_, token::Ident(s, _))] => s.to_string(),
_ => {
cx.span_err(sp, "argument should be a single identifier");
return DummyResult::any(sp);
}
fn check_item(&mut self, cx: &Context, it: &ast::Item) {
- let name = token::get_ident(it.ident);
- if name.get() == "lintme" {
+ if it.ident.name == "lintme" {
cx.span_lint(TEST_LINT, it.span, "item is named 'lintme'");
}
}
concurrent code at compile time.
Before we talk about the concurrency features that come with Rust, it's important
-to understand something: Rust is low-level enough that all of this is provided
-by the standard library, not by the language. This means that if you don't like
-some aspect of the way Rust handles concurrency, you can implement an alternative
-way of doing things. [mio](https://github.com/carllerche/mio) is a real-world
-example of this principle in action.
+to understand something: Rust is low-level enough that the vast majority of
+this is provided by the standard library, not by the language. This means that
+if you don't like some aspect of the way Rust handles concurrency, you can
+implement an alternative way of doing things.
+[mio](https://github.com/carllerche/mio) is a real-world example of this
+principle in action.
## Background: `Send` and `Sync`
use std::thread;
fn main() {
- let mut data = vec![1u32, 2, 3];
+ let mut data = vec![1, 2, 3];
for i in 0..3 {
thread::spawn(move || {
use std::sync::Mutex;
fn main() {
- let mut data = Mutex::new(vec![1u32, 2, 3]);
+ let mut data = Mutex::new(vec![1, 2, 3]);
for i in 0..3 {
let data = data.lock().unwrap();
use std::thread;
fn main() {
- let data = Arc::new(Mutex::new(vec![1u32, 2, 3]));
+ let data = Arc::new(Mutex::new(vec![1, 2, 3]));
for i in 0..3 {
let data = data.clone();
# use std::sync::{Arc, Mutex};
# use std::thread;
# fn main() {
-# let data = Arc::new(Mutex::new(vec![1u32, 2, 3]));
+# let data = Arc::new(Mutex::new(vec![1, 2, 3]));
# for i in 0..3 {
# let data = data.clone();
thread::spawn(move || {
use std::sync::mpsc;
fn main() {
- let data = Arc::new(Mutex::new(0u32));
+ let data = Arc::new(Mutex::new(0));
let (tx, rx) = mpsc::channel();
let tx = tx.clone();
thread::spawn(move || {
- let answer = 42u32;
+ let answer = 42;
tx.send(answer);
});
Goodbye in English: Goodbye.
```
+`pub` also applies to `struct`s and their member fields. In keeping with Rust’s
+tendency toward safety, simply making a `struct` public won't automatically
+make its members public: you must mark the fields individually with `pub`.
+
Now that our functions are public, we can use them. Great! However, typing out
`phrases::english::greetings::hello()` is very long and repetitive. Rust has
another keyword for importing names into the current scope, so that you can
`::foo::bar()`, it refers to a different `foo`, an absolute path from your
crate root.
-Also, note that we `pub use`d before we declared our `mod`s. Rust requires that
-`use` declarations go first.
-
This will build and run:
```bash
# struct Philosopher {
# name: String,
# }
-#
+#
# impl Philosopher {
# fn new(name: &str) -> Philosopher {
# Philosopher {
# }
# }
# }
-#
+#
fn main() {
let p1 = Philosopher::new("Judith Butler");
let p2 = Philosopher::new("Gilles Deleuze");
```rust
struct Philosopher {
name: String,
-}
+}
-impl Philosopher {
+impl Philosopher {
fn new(name: &str) -> Philosopher {
Philosopher {
name: name.to_string(),
}
}
-
+
fn eat(&self) {
println!("{} is done eating.", self.name);
}
struct Philosopher {
name: String,
-}
+}
-impl Philosopher {
+impl Philosopher {
fn new(name: &str) -> Philosopher {
Philosopher {
name: name.to_string(),
}
}
-
+
fn eat(&self) {
println!("{} is eating.", self.name);
struct Philosopher {
name: String,
-}
+}
-impl Philosopher {
+impl Philosopher {
fn new(name: &str) -> Philosopher {
Philosopher {
name: name.to_string(),
While this is only five lines, they’re a dense five. Let’s break it down.
```rust,ignore
-let handles: Vec<_> =
+let handles: Vec<_> =
```
We introduce a new binding, called `handles`. We’ve given it this name because
We have multi-threading!
```text
+Judith Butler is eating.
Gilles Deleuze is eating.
-Gilles Deleuze is done eating.
+Karl Marx is eating.
Emma Goldman is eating.
-Emma Goldman is done eating.
Michel Foucault is eating.
-Judith Butler is eating.
Judith Butler is done eating.
-Karl Marx is eating.
+Gilles Deleuze is done eating.
Karl Marx is done eating.
+Emma Goldman is done eating.
Michel Foucault is done eating.
```
```
This code generates documentation that looks [like this][rc-new]. I've left the
-implementation out, with a regular comment in its place. That's the first thing
-to notice about this annotation: it uses `///`, instead of `//`. The triple slash
+implementation out, with a regular comment in its place.
+
+The first thing to notice about this annotation is that it uses
+`///` instead of `//`. The triple slash
indicates a documentation comment.
Documentation comments are written in Markdown.
#### Special sections
Next, are special sections. These are indicated with a header, `#`. There
-are three kinds of headers that are commonly used. They aren't special syntax,
+are four kinds of headers that are commonly used. They aren't special syntax,
just convention, for now.
```rust
# fn foo() {}
```
-Third, `Examples`. Include one or more examples of using your function or
+Fourth, `Examples`. Include one or more examples of using your function or
method, and your users will love you for it. These examples go inside of
code block annotations, which we'll talk about in a moment, and can have
more than one section:
$ cargo test
```
-That's right, `cargo test` tests embedded documentation too. However,
+That's right, `cargo test` tests embedded documentation too. However,
`cargo test` will not test binary crates, only library ones. This is
due to the way `rustdoc` works: it links against the library to be tested,
but with a binary, there’s nothing to link to.
state. Another example is using the `unreachable!()` macro:
```rust,ignore
+use Event::NewRelease;
+
enum Event {
NewRelease,
}
}
fn main() {
- std::io::println(descriptive_probability(NewRelease));
+ println!("{}", descriptive_probability(NewRelease));
}
```
In these cases access to Rust data structures inside the callbacks is
especially unsafe and proper synchronization mechanisms must be used.
Besides classical synchronization mechanisms like mutexes, one possibility in
-Rust is to use channels (in `std::comm`) to forward data from the C thread
-that invoked the callback into a Rust thread.
+Rust is to use channels (in `std::sync::mpsc`) to forward data from the C
+thread that invoked the callback into a Rust thread.
If an asynchronous callback targets a special object in the Rust address space
it is also absolutely necessary that no more callbacks are performed by the
Note that frameworks are only available on OSX targets.
The different `kind` values are meant to differentiate how the native library
-participates in linkage. From a linkage perspective, the rust compiler creates
+participates in linkage. From a linkage perspective, the Rust compiler creates
two flavors of artifacts: partial (rlib/staticlib) and final (dylib/binary).
Native dynamic library and framework dependencies are propagated to the final
artifact boundary, while static library dependencies are not propagated at
A few examples of how this model can be used are:
* A native build dependency. Sometimes some C/C++ glue is needed when writing
- some rust code, but distribution of the C/C++ code in a library format is just
+ some Rust code, but distribution of the C/C++ code in a library format is just
a burden. In this case, the code will be archived into `libfoo.a` and then the
- rust crate would declare a dependency via `#[link(name = "foo", kind =
+ Rust crate would declare a dependency via `#[link(name = "foo", kind =
"static")]`.
Regardless of the flavor of output for the crate, the native static library
* A normal dynamic dependency. Common system libraries (like `readline`) are
available on a large number of systems, and often a static copy of these
- libraries cannot be found. When this dependency is included in a rust crate,
+ libraries cannot be found. When this dependency is included in a Rust crate,
partial targets (like rlibs) will not link to the library, but when the rlib
is included in a final target (like a binary), the native library will be
linked in.
# FFI and panics
-It’s important to be mindful of `panic!`s when working with FFI. This code,
-when called from C, will `abort`:
-
-```rust
-#[no_mangle]
-pub extern fn oh_no() -> ! {
- panic!("Oops!");
-}
-# fn main() {}
-```
-
-If you’re writing code that may panic, you should run it in another thread,
-so that the panic doesn’t bubble up to C:
+It’s important to be mindful of `panic!`s when working with FFI. A `panic!`
+across an FFI boundary is undefined behavior. If you’re writing code that may
+panic, you should run it in another thread, so that the panic doesn’t bubble up
+to C:
```rust
use std::thread;
+++ /dev/null
-% for Loops
-
-The `for` loop is used to loop a particular number of times. Rust’s `for` loops
-work a bit differently than in other systems languages, however. Rust’s `for`
-loop doesn’t look like this “C-style” `for` loop:
-
-```c
-for (x = 0; x < 10; x++) {
- printf( "%d\n", x );
-}
-```
-
-Instead, it looks like this:
-
-```rust
-for x in 0..10 {
- println!("{}", x); // x: i32
-}
-```
-
-In slightly more abstract terms,
-
-```ignore
-for var in expression {
- code
-}
-```
-
-The expression is an [iterator][iterator]. The iterator gives back a series of
-elements. Each element is one iteration of the loop. That value is then bound
-to the name `var`, which is valid for the loop body. Once the body is over, the
-next value is fetched from the iterator, and we loop another time. When there
-are no more values, the `for` loop is over.
-
-[iterator]: iterators.html
-
-In our example, `0..10` is an expression that takes a start and an end position,
-and gives an iterator over those values. The upper bound is exclusive, though,
-so our loop will print `0` through `9`, not `10`.
-
-Rust does not have the “C-style” `for` loop on purpose. Manually controlling
-each element of the loop is complicated and error prone, even for experienced C
-developers.
-
-# Enumerate
-
-When you need to keep track of how many times you already looped, you can use the `.enumerate()` function.
-
-## On ranges:
-
-```rust
-for (i,j) in (5..10).enumerate() {
- println!("i = {} and j = {}", i, j);
-}
-```
-
-Outputs:
-
-```text
-i = 0 and j = 5
-i = 1 and j = 6
-i = 2 and j = 7
-i = 3 and j = 8
-i = 4 and j = 9
-```
-
-Don't forget to add the parentheses around the range.
-
-## On iterators:
-
-```rust
-# let lines = "hello\nworld".lines();
-for (linenumber, line) in lines.enumerate() {
- println!("{}: {}", linenumber, line);
-}
-```
-
-Outputs:
-
-```text
-0: Content of line one
-1: Content of line two
-2: Content of line tree
-3: Content of line four
-```
Not every Rustacean has a background in systems programming, nor in computer
science, so we've added explanations of terms that might be unfamiliar.
-### Arity
-
-Arity refers to the number of arguments a function or operation takes.
-
-```rust
-let x = (2, 3);
-let y = (4, 6);
-let z = (8, 2, 6);
-```
-
-In the example above `x` and `y` have arity 2. `z` has arity 3.
-
### Abstract Syntax Tree
-When a compiler is compiling your program, it does a number of different
-things. One of the things that it does is turn the text of your program into an
-‘abstract syntax tree’, or ‘AST’. This tree is a representation of the
-structure of your program. For example, `2 + 3` can be turned into a tree:
+When a compiler is compiling your program, it does a number of different things.
+One of the things that it does is turn the text of your program into an
+‘abstract syntax tree’, or ‘AST’. This tree is a representation of the structure
+of your program. For example, `2 + 3` can be turned into a tree:
```text
+
/ \
3 4
```
+
+### Arity
+
+Arity refers to the number of arguments a function or operation takes.
+
+```rust
+let x = (2, 3);
+let y = (4, 6);
+let z = (8, 2, 6);
+```
+
+In the example above `x` and `y` have arity 2. `z` has arity 3.
+
+### Expression
+
+In computer programming, an expression is a combination of values, constants,
+variables, operators and functions that evaluate to a single value. For example,
+`2 + (3 * 4)` is an expression that returns the value 14. It is worth noting
+that expressions can have side-effects. For example, a function included in an
+expression might perform actions other than simply returning a value.
+
+### Expression-Oriented Language
+
+In early programming languages, [expressions][expression] and
+[statements][statement] were two separate syntactic categories: expressions had
+a value and statements did things. However, later languages blurred this
+distinction, allowing expressions to do things and statements to have a value.
+In an expression-oriented language, (nearly) every statement is an expression
+and therefore returns a value. Consequently, these expression statements can
+themselves form part of larger expressions.
+
+[expression]: glossary.html#expression
+[statement]: glossary.html#statement
+
+### Statement
+
+In computer programming, a statement is the smallest standalone element of a
+programming language that commands a computer to perform an action.
We’ll need to take user input, and then print the result as output. As such, we
need the `io` library from the standard library. Rust only imports a few things
-into every program, [the ‘prelude’][prelude]. If it’s not in the prelude,
-you’ll have to `use` it directly.
+by default into every program, [the ‘prelude’][prelude]. If it’s not in the
+prelude, you’ll have to `use` it directly.
[prelude]: ../std/prelude/index.html
The `[dependencies]` section of `Cargo.toml` is like the `[package]` section:
everything that follows it is part of it, until the next section starts.
Cargo uses the dependencies section to know what dependencies on external
-crates you have, and what versions you require. In this case, we’ve used version `0.3.0`.
+crates you have, and what versions you require. In this case, we’ve specified version `0.3.0`,
+which Cargo understands to be any release that’s compatible with this specific version.
Cargo understands [Semantic Versioning][semver], which is a standard for writing version
-numbers. If we wanted to use the latest version we could use `*` or we could use a range
-of versions. [Cargo’s documentation][cargodoc] contains more details.
+numbers. If we wanted to use only `0.3.0` exactly, we could use `=0.3.0`. If we
+wanted to use the latest version we could use `*`; We could use a range of
+versions. [Cargo’s documentation][cargodoc] contains more details.
[semver]: http://semver.org
[cargodoc]: http://doc.crates.io/crates-io.html
we’re in. Because we `use rand::Rng`’d above, it has a `gen_range()` method
available. This method takes two arguments, and generates a number between
them. It’s inclusive on the lower bound, but exclusive on the upper bound,
-so we need `1` and `101` to get a number between one and a hundred.
+so we need `1` and `101` to get a number ranging from one to a hundred.
[concurrency]: concurrency.html
[cratesio]: http://doc.crates.io
Cargo manages three things: building your code, downloading the dependencies
-your code needs, and building those dependencies. At first, your
-program doesn’t have any dependencies, so we’ll only be using the first part of
-its functionality. Eventually, we’ll add more. Since we started off by using
-Cargo, it'll be easy to add later.
+your code needs, and building those dependencies. At first, your program doesn’t
+have any dependencies, so we’ll only be using the first part of its
+functionality. Eventually, we’ll add more. Since we started off by using Cargo,
+it'll be easy to add later.
-If you installed Rust via the official installers you will also have Cargo. If
-you installed Rust some other way, you may want to [check the Cargo
+If we installed Rust via the official installers we will also have Cargo. If we
+installed Rust some other way, we may want to [check the Cargo
README][cargoreadme] for specific instructions about installing it.
[cargoreadme]: https://github.com/rust-lang/cargo#installing-cargo-from-nightlies
Let’s convert Hello World to Cargo.
-To Cargo-ify our project, we need to do two things: Make a `Cargo.toml`
-configuration file, and put our source file in the right place. Let's
-do that part first:
+To Cargo-ify our project, we need to do three things: Make a `Cargo.toml`
+configuration file, put our source file in the right place, and get rid of the
+old executable (`main.exe` on Windows, `main` everywhere else). Let's do that part first:
```bash
$ mkdir src
$ mv main.rs src/main.rs
+$ rm main # or main.exe on Windows
```
-Note that since we're creating an executable, we used `main.rs`. If we
-want to make a library instead, we should use `lib.rs`. This convention is required
-for Cargo to successfully compile our projects, but it can be overridden if we wish.
-Custom file locations for the entry point can be specified
-with a [`[lib]` or `[[bin]]`][crates-custom] key in the TOML file.
+Note that since we're creating an executable, we retain `main.rs` as the source
+filename. If we want to make a library instead, we should use `lib.rs`. This
+convention is used by Cargo to successfully compile our projects, but it can be
+overridden if we wish. Custom file locations for the entry point can be
+specified with a [`[lib]` or `[[bin]]`][crates-custom] key in the TOML file.
[crates-custom]: http://doc.crates.io/manifest.html#configuring-a-target
authors = [ "Your name <you@example.com>" ]
```
-This file is in the [TOML][toml] format. TOML is similar to INI, but has some
-extra goodies. According to the TOML docs,
+This file is in the [TOML][toml] format. TOML is similar to INI, but has some
+extra goodies. According to the TOML docs,
> TOML aims to be a minimal configuration file format that's easy to read due
> to obvious semantics. TOML is designed to map unambiguously to a hash table.
[toml]: https://github.com/toml-lang/toml
-Once you have this file in place, we should be ready to build! To do so, run:
+Once we have this file in place in our project's root directory, we should be
+ready to build! To do so, run:
```bash
$ cargo build
[allocation]: the-stack-and-the-heap.html
-Finally, the line ends with a semicolon (`;`). Rust is an ‘expression oriented’
-language, which means that most things are expressions, rather than statements.
-The `;` is used to indicate that this expression is over, and the next one is
-ready to begin. Most lines of Rust code end with a `;`.
+Finally, the line ends with a semicolon (`;`). Rust is an [‘expression oriented’
+language][expression-oriented language], which means that most things are
+expressions, rather than statements. The `;` is used to indicate that this
+expression is over, and the next one is ready to begin. Most lines of Rust code
+end with a `;`.
+
+[expression-oriented language]: glossary.html#expression-oriented-language
Finally, actually compiling and running our program. We can compile with our
compiler, `rustc`, by passing it the name of our source file:
If you would like to use real operands in this position, however,
you are required to put curly braces `{}` around the register that
you want, and you are required to put the specific size of the
-operand. This is useful for very low level programming, where
+operand. This is useful for very low level programming, where
which register you use is important:
```rust
println!("eax is currently {}", result);
# }
```
+
+## More Information
+
+The current implementation of the `asm!` macro is a direct binding to [LLVM's
+inline assembler expressions][llvm-docs], so be sure to check out [their
+documentation as well][llvm-docs] for more information about clobbers,
+constraints, etc.
+
+[llvm-docs]: http://llvm.org/docs/LangRef.html#inline-assembler-expressions
The first step to using Rust is to install it! There are a number of ways to
install Rust, but the easiest is to use the `rustup` script. If you're on Linux
-or a Mac, all you need to do is this:
+or a Mac, all you need to do is this:
> Note: you don't need to type in the `$`s, they just indicate the start of
> each command. You’ll see many tutorials and examples around the web that
[insecurity]: http://curlpipesh.tumblr.com
If you're on Windows, please download the appropriate [installer][install-page].
+**NOTE:** By default, the Windows installer will not add Rust to the %PATH%
+system variable. If this is the only version of Rust you are installing and you
+want to be able to run it from the command line, click on "Advanced" on the
+install dialog and on the "Product Features" page ensure "Add to PATH" is
+installed on the local hard drive.
+
[install-page]: http://www.rust-lang.org/install.html
If you used the Windows installer, just re-run the `.msi` and it will give you
an uninstall option.
+## That disclaimer we promised
+
Some people, and somewhat rightfully so, get very upset when we tell you to
`curl | sh`. Basically, when you do this, you are trusting that the good
people who maintain Rust aren't going to hack your computer and do bad things.
[from-source]: https://github.com/rust-lang/rust#building-from-source
+## Platform support
+
Oh, we should also mention the officially supported platforms:
* Windows (7, 8, Server 2008 R2)
does not work, it is a bug. Please let us know if that happens. Each and every
commit is tested against Windows just like any other platform.
+## After installation
+
If you've got Rust installed, you can open up a shell, and type this:
```bash
If you did, Rust has been installed successfully! Congrats!
+If you didn't and you're on Windows, check that Rust is in your %PATH% system
+variable. If it isn't, run the installer again, select "Change" on the "Change,
+repair, or remove installation" page and ensure "Add to PATH" is installed on
+the local hard drive.
+
This installer also installs a copy of the documentation locally, so you can
read it offline. On UNIX systems, `/usr/local/share/doc/rust` is the location.
On Windows, it's in a `share/doc` directory, inside wherever you installed Rust
[irc]: irc://irc.mozilla.org/#rust
[mibbit]: http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust
-[users]: http://users.rust-lang.org/
+[users]: http://users.rust-lang.org/
[stackoverflow]: http://stackoverflow.com/questions/tagged/rust
via a declaration like
```rust
-# #![feature(intrinsics)]
+#![feature(intrinsics)]
# fn main() {}
extern "rust-intrinsic" {
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
+# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
```
Note the use of `abort`: the `exchange_malloc` lang item is assumed to
i32` as ‘a mutable reference to an i32’ and `&'a mut i32` as ‘a mutable
reference to an `i32` with the lifetime `'a`’.
+# In `struct`s
+
You’ll also need explicit lifetimes when working with [`struct`][structs]s:
```rust
uses it. So why do we need a lifetime here? We need to ensure that any reference
to a `Foo` cannot outlive the reference to an `i32` it contains.
+## `impl` blocks
+
+Let’s implement a method on `Foo`:
+
+```rust
+struct Foo<'a> {
+ x: &'a i32,
+}
+
+impl<'a> Foo<'a> {
+ fn x(&self) -> &'a i32 { self.x }
+}
+
+fn main() {
+ let y = &5; // this is the same as `let _y = 5; let y = &_y;`
+ let f = Foo { x: y };
+
+ println!("x is: {}", f.x());
+}
+```
+
+As you can see, we need to declare a lifetime for `Foo` in the `impl` line. We repeat
+`'a` twice, just like on functions: `impl<'a>` defines a lifetime `'a`, and `Foo<'a>`
+uses it.
+
+## Multiple lifetimes
+
If you have multiple references, you can use the same lifetime multiple times:
```rust
+++ /dev/null
-% Link args
-
-There is one other way to tell rustc how to customize linking, and that is via
-the `link_args` attribute. This attribute is applied to `extern` blocks and
-specifies raw flags which need to get passed to the linker when producing an
-artifact. An example usage would be:
-
-``` no_run
-#![feature(link_args)]
-
-#[link_args = "-foo -bar -baz"]
-extern {}
-# fn main() {}
-```
-
-Note that this feature is currently hidden behind the `feature(link_args)` gate
-because this is not a sanctioned way of performing linking. Right now rustc
-shells out to the system linker, so it makes sense to provide extra command line
-arguments, but this will not always be the case. In the future rustc may use
-LLVM directly to link native libraries in which case `link_args` will have no
-meaning.
-
-It is highly recommended to *not* use this attribute, and rather use the more
-formal `#[link(...)]` attribute on `extern` blocks instead.
-
--- /dev/null
+% Loops
+
+Rust currently provides three approaches to performing some kind of iterative activity. They are: `loop`, `while` and `for`. Each approach has its own set of uses.
+
+## loop
+
+The infinite `loop` is the simplest form of loop available in Rust. Using the keyword `loop`, Rust provides a way to loop indefinitely until some terminating statement is reached. Rust's infinite `loop`s look like this:
+
+```rust,ignore
+loop {
+ println!("Loop forever!");
+}
+```
+
+## while
+
+Rust also has a `while` loop. It looks like this:
+
+```rust
+let mut x = 5; // mut x: i32
+let mut done = false; // mut done: bool
+
+while !done {
+ x += x - 3;
+
+ println!("{}", x);
+
+ if x % 5 == 0 {
+ done = true;
+ }
+}
+```
+
+`while` loops are the correct choice when you’re not sure how many times
+you need to loop.
+
+If you need an infinite loop, you may be tempted to write this:
+
+```rust,ignore
+while true {
+```
+
+However, `loop` is far better suited to handle this case:
+
+```rust,ignore
+loop {
+```
+
+Rust’s control-flow analysis treats this construct differently than a `while
+true`, since we know that it will always loop. In general, the more information
+we can give to the compiler, the better it can do with safety and code
+generation, so you should always prefer `loop` when you plan to loop
+infinitely.
+
+## for
+
+The `for` loop is used to loop a particular number of times. Rust’s `for` loops
+work a bit differently than in other systems languages, however. Rust’s `for`
+loop doesn’t look like this “C-style” `for` loop:
+
+```c
+for (x = 0; x < 10; x++) {
+ printf( "%d\n", x );
+}
+```
+
+Instead, it looks like this:
+
+```rust
+for x in 0..10 {
+ println!("{}", x); // x: i32
+}
+```
+
+In slightly more abstract terms,
+
+```ignore
+for var in expression {
+ code
+}
+```
+
+The expression is an [iterator][iterator]. The iterator gives back a series of
+elements. Each element is one iteration of the loop. That value is then bound
+to the name `var`, which is valid for the loop body. Once the body is over, the
+next value is fetched from the iterator, and we loop another time. When there
+are no more values, the `for` loop is over.
+
+[iterator]: iterators.html
+
+In our example, `0..10` is an expression that takes a start and an end position,
+and gives an iterator over those values. The upper bound is exclusive, though,
+so our loop will print `0` through `9`, not `10`.
+
+Rust does not have the “C-style” `for` loop on purpose. Manually controlling
+each element of the loop is complicated and error prone, even for experienced C
+developers.
+
+### Enumerate
+
+When you need to keep track of how many times you already looped, you can use the `.enumerate()` function.
+
+#### On ranges:
+
+```rust
+for (i,j) in (5..10).enumerate() {
+ println!("i = {} and j = {}", i, j);
+}
+```
+
+Outputs:
+
+```text
+i = 0 and j = 5
+i = 1 and j = 6
+i = 2 and j = 7
+i = 3 and j = 8
+i = 4 and j = 9
+```
+
+Don't forget to add the parentheses around the range.
+
+#### On iterators:
+
+```rust
+# let lines = "hello\nworld".lines();
+for (linenumber, line) in lines.enumerate() {
+ println!("{}: {}", linenumber, line);
+}
+```
+
+Outputs:
+
+```text
+0: Content of line one
+1: Content of line two
+2: Content of line tree
+3: Content of line four
+```
+
+## Ending iteration early
+
+Let’s take a look at that `while` loop we had earlier:
+
+```rust
+let mut x = 5;
+let mut done = false;
+
+while !done {
+ x += x - 3;
+
+ println!("{}", x);
+
+ if x % 5 == 0 {
+ done = true;
+ }
+}
+```
+
+We had to keep a dedicated `mut` boolean variable binding, `done`, to know
+when we should exit out of the loop. Rust has two keywords to help us with
+modifying iteration: `break` and `continue`.
+
+In this case, we can write the loop in a better way with `break`:
+
+```rust
+let mut x = 5;
+
+loop {
+ x += x - 3;
+
+ println!("{}", x);
+
+ if x % 5 == 0 { break; }
+}
+```
+
+We now loop forever with `loop` and use `break` to break out early. Issuing an explicit `return` statement will also serve to terminate the loop early.
+
+`continue` is similar, but instead of ending the loop, goes to the next
+iteration. This will only print the odd numbers:
+
+```rust
+for x in 0..10 {
+ if x % 2 == 0 { continue; }
+
+ println!("{}", x);
+}
+```
+
+## Loop labels
+
+You may also encounter situations where you have nested loops and need to
+specify which one your `break` or `continue` statement is for. Like most
+other languages, by default a `break` or `continue` will apply to innermost
+loop. In a sitation where you would like to a `break` or `continue` for one
+of the outer loops, you can use labels to specify which loop the `break` or
+ `continue` statement applies to. This will only print when both `x` and `y` are
+ odd:
+
+```rust
+'outer: for x in 0..10 {
+ 'inner: for y in 0..10 {
+ if x % 2 == 0 { continue 'outer; } // continues the loop over x
+ if y % 2 == 0 { continue 'inner; } // continues the loop over y
+ println!("x: {}, y: {}", x, y);
+ }
+}
+```
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
+# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
# // fn main() {} tricked you, rustdoc!
```
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
+# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
# // fn main() {} tricked you, rustdoc!
```
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
+# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
# #[start] fn start(argc: isize, argv: *const *const u8) -> isize { 0 }
# fn main() {}
```
# Ownership
[Variable bindings][bindings] have a property in Rust: they ‘have ownership’
-of what they’re bound to. This means that when a binding goes out of scope, the
-resource that they’re bound to are freed. For example:
+of what they’re bound to. This means that when a binding goes out of scope,
+Rust will free the bound resources. For example:
```rust
fn foo() {
}
```
-# Ignoring variants
+# Ignoring bindings
-If you’re matching on an enum which has variants, you can use `..` to
-ignore the value and type in the variant:
+You can use `_` in a pattern to disregard the type and value.
+For example, here’s a `match` against a `Result<T, E>`:
```rust
-enum OptionalInt {
- Value(i32),
+# let some_value: Result<i32, &'static str> = Err("There was an error");
+match some_value {
+ Ok(value) => println!("got a value: {}", value),
+ Err(_) => println!("an error occurred"),
+}
+```
+
+In the first arm, we bind the value inside the `Ok` variant to `value`. But
+in the `Err` arm, we use `_` to disregard the specific error, and just print
+a general error message.
+
+`_` is valid in any pattern that creates a binding. This can be useful to
+ignore parts of a larger structure:
+
+```rust
+fn coordinate() -> (i32, i32, i32) {
+ // generate and return some sort of triple tuple
+# (1, 2, 3)
+}
+
+let (x, _, z) = coordinate();
+```
+
+Here, we bind the first and last element of the tuple to `x` and `z`, but
+ignore the middle element.
+
+Similarly, you can use `..` in a pattern to disregard multiple values.
+
+```rust
+enum OptionalTuple {
+ Value(i32, i32, i32),
Missing,
}
-let x = OptionalInt::Value(5);
+let x = OptionalTuple::Value(5, -2, 3);
match x {
- OptionalInt::Value(..) => println!("Got an int!"),
- OptionalInt::Missing => println!("No such luck."),
+ OptionalTuple::Value(..) => println!("Got a tuple!"),
+ OptionalTuple::Missing => println!("No such luck."),
}
```
-This prints `Got an int!`.
+This prints `Got a tuple!`.
# Guards
the borrow ‘doesn’t live long enough’ because it’s not valid for the right
amount of time.
-The same problem occurs when the reference is declared _before_ the variable it refers to:
+The same problem occurs when the reference is declared _before_ the variable it
+refers to. This is because resources within the same scope are freed in the
+opposite order they were declared:
```rust,ignore
let y: &i32;
println!("{}", y);
}
```
+
+In the above example, `y` is declared before `x`, meaning that `y` lives longer
+than `x`, which is not allowed.
Additionally, testing against nightly can catch regressions even sooner, and so
if you don’t mind a third build, we’d appreciate testing against all channels.
+As an example, many Rust programmers use [Travis](https://travis-ci.org/) to
+test their crates, which is free for open source projects. Travis [supports
+Rust directly][travis], and you can use a `.travis.yml` file like this to
+test on all channels:
+
+```yaml
+language: rust
+rust:
+ - nightly
+ - beta
+ - stable
+
+matrix:
+ allow_failures:
+ - rust: nightly
+```
+
+[travis]: http://docs.travis-ci.com/user/languages/rust/
+
+With this configuration, Travis will test all three channels, but if something
+breaks on nightly, it won’t fail your build. A similar configuration is
+recommended for any CI system, check the documentation of the one you’re
+using for more details.
The second change is the `use` declaration. Because we're in an inner module,
we need to bring our test function into scope. This can be annoying if you have
-a large module, and so this is a common use of the `glob` feature. Let's change
-our `src/lib.rs` to make use of it:
+a large module, and so this is a common use of globs. Let's change our
+`src/lib.rs` to make use of it:
```rust,ignore
-
pub fn add_two(a: i32) -> i32 {
a + 2
}
visualize what’s going on with memory. Your operating system presents a view of
memory to your program that’s pretty simple: a huge list of addresses, from 0
to a large number, representing how much RAM your computer has. For example, if
-you have a gigabyte of RAM, your addresses go from `0` to `1,073,741,824`. That
+you have a gigabyte of RAM, your addresses go from `0` to `1,073,741,823`. That
number comes from 2<sup>30</sup>, the number of bytes in a gigabyte.
This memory is kind of like a giant array: addresses start at zero and go
| 1 | a | 5 |
| 0 | x | 42 |
-And then `foo()` ends, leaving just `main()`
+And then `foo()` ends, leaving just `main()`:
| Address | Name | Value |
|---------|------|-------|
default. The LIFO model of the stack is simpler, at a fundamental level. This
has two big impacts: runtime efficiency and semantic impact.
-## Runtime Efficiency.
+## Runtime Efficiency
Managing the memory for the stack is trivial: The machine just
increments or decrements a single value, the so-called “stack pointer”.
[wilson]: http://www.cs.northwestern.edu/~pdinda/icsclass/doc/dsa.pdf
-## Semantic impact
+## Semantic impact
Stack-allocation impacts the Rust language itself, and thus the developer’s
mental model. The LIFO semantics is what drives how the Rust language handles
```rust
trait Foo {
- fn bar(&self);
+ fn is_valid(&self) -> bool;
- fn baz(&self) { println!("We called baz."); }
+ fn is_invalid(&self) -> bool { !self.is_valid() }
}
```
-Implementors of the `Foo` trait need to implement `bar()`, but they don’t
-need to implement `baz()`. They’ll get this default behavior. They can
+Implementors of the `Foo` trait need to implement `is_valid()`, but they don’t
+need to implement `is_invalid()`. They’ll get this default behavior. They can
override the default if they so choose:
```rust
# trait Foo {
-# fn bar(&self);
-# fn baz(&self) { println!("We called baz."); }
+# fn is_valid(&self) -> bool;
+#
+# fn is_invalid(&self) -> bool { !self.is_valid() }
# }
struct UseDefault;
impl Foo for UseDefault {
- fn bar(&self) { println!("We called bar."); }
+ fn is_valid(&self) -> bool {
+ println!("Called UseDefault.is_valid.");
+ true
+ }
}
struct OverrideDefault;
impl Foo for OverrideDefault {
- fn bar(&self) { println!("We called bar."); }
+ fn is_valid(&self) -> bool {
+ println!("Called OverrideDefault.is_valid.");
+ true
+ }
- fn baz(&self) { println!("Override baz!"); }
+ fn is_invalid(&self) -> bool {
+ println!("Called OverrideDefault.is_invalid!");
+ true // this implementation is a self-contradiction!
+ }
}
let default = UseDefault;
-default.baz(); // prints "We called baz."
+assert!(!default.is_invalid()); // prints "Called UseDefault.is_valid."
let over = OverrideDefault;
-over.baz(); // prints "Override baz!"
+assert!(over.is_invalid()); // prints "Called OverrideDefault.is_invalid!"
```
# Inheritance
than normal code does.
Let’s go over the syntax, and then we’ll talk semantics. `unsafe` is used in
-two contexts. The first one is to mark a function as unsafe:
+four contexts. The first one is to mark a function as unsafe:
```rust
unsafe fn danger_will_robinson() {
- // scary stuff
+ // scary stuff
}
```
}
```
+The third is for unsafe traits:
+
+```rust
+unsafe trait Scary { }
+```
+
+And the fourth is for `impl`ementing one of those traits:
+
+```rust
+# unsafe trait Scary { }
+unsafe impl Scary for i32 {}
+```
+
It’s important to be able to explicitly delineate code that may have bugs that
cause big problems. If a Rust program segfaults, you can be sure it’s somewhere
in the sections marked `unsafe`.
# What does ‘safe’ mean?
-Safe, in the context of Rust, means “doesn’t do anything unsafe.” Easy!
+Safe, in the context of Rust, means ‘doesn’t do anything unsafe’. It’s also
+important to know that there are certain behaviors that are probably not
+desirable in your code, but are expressly _not_ unsafe:
-Okay, let’s try again: what is not safe to do? Here’s a list:
+* Deadlocks
+* Leaks of memory or other resources
+* Exiting without calling destructors
+* Integer overflow
+
+Rust cannot prevent all kinds of software problems. Buggy code can and will be
+written in Rust. These things aren’t great, but they don’t qualify as `unsafe`
+specifically.
+
+In addition, the following are all undefined behaviors in Rust, and must be
+avoided, even when writing `unsafe` code:
* Data races
* Dereferencing a null/dangling raw pointer
[undef]: http://llvm.org/docs/LangRef.html#undefined-values
[aliasing]: http://llvm.org/docs/LangRef.html#pointer-aliasing-rules
-Whew! That’s a bunch of stuff. It’s also important to notice all kinds of
-behaviors that are certainly bad, but are expressly _not_ unsafe:
-
-* Deadlocks
-* Reading data from private fields
-* Leaks due to reference count cycles
-* Exiting without calling destructors
-* Sending signals
-* Accessing/modifying the file system
-* Integer overflow
-
-Rust cannot prevent all kinds of software problems. Buggy code can and will be
-written in Rust. These things aren’t great, but they don’t qualify as `unsafe`
-specifically.
-
# Unsafe Superpowers
In both unsafe functions and unsafe blocks, Rust will let you do three things
That’s it. It’s important that `unsafe` does not, for example, ‘turn off the
borrow checker’. Adding `unsafe` to some random Rust code doesn’t change its
-semantics, it won’t just start accepting anything.
+semantics, it won’t just start accepting anything. But it will let you write
+things that _do_ break some of the rules.
+
+You will also encounter the `unsafe` keyword when writing bindings to foreign
+(non-Rust) interfaces. You're encouraged to write a safe, native Rust interface
+around the methods provided by the library.
-But it will let you write things that _do_ break some of the rules. Let’s go
-over these three abilities in order.
+Let’s go over the basic three abilities listed, in order.
## Access or update a `static mut`
+++ /dev/null
-% while Loops
-
-Rust also has a `while` loop. It looks like this:
-
-```rust
-let mut x = 5; // mut x: i32
-let mut done = false; // mut done: bool
-
-while !done {
- x += x - 3;
-
- println!("{}", x);
-
- if x % 5 == 0 {
- done = true;
- }
-}
-```
-
-`while` loops are the correct choice when you’re not sure how many times
-you need to loop.
-
-If you need an infinite loop, you may be tempted to write this:
-
-```rust,ignore
-while true {
-```
-
-However, Rust has a dedicated keyword, `loop`, to handle this case:
-
-```rust,ignore
-loop {
-```
-
-Rust’s control-flow analysis treats this construct differently than a `while
-true`, since we know that it will always loop. In general, the more information
-we can give to the compiler, the better it can do with safety and code
-generation, so you should always prefer `loop` when you plan to loop
-infinitely.
-
-## Ending iteration early
-
-Let’s take a look at that `while` loop we had earlier:
-
-```rust
-let mut x = 5;
-let mut done = false;
-
-while !done {
- x += x - 3;
-
- println!("{}", x);
-
- if x % 5 == 0 {
- done = true;
- }
-}
-```
-
-We had to keep a dedicated `mut` boolean variable binding, `done`, to know
-when we should exit out of the loop. Rust has two keywords to help us with
-modifying iteration: `break` and `continue`.
-
-In this case, we can write the loop in a better way with `break`:
-
-```rust
-let mut x = 5;
-
-loop {
- x += x - 3;
-
- println!("{}", x);
-
- if x % 5 == 0 { break; }
-}
-```
-
-We now loop forever with `loop` and use `break` to break out early.
-
-`continue` is similar, but instead of ending the loop, goes to the next
-iteration. This will only print the odd numbers:
-
-```rust
-for x in 0..10 {
- if x % 2 == 0 { continue; }
-
- println!("{}", x);
-}
-```
-
-Both `continue` and `break` are valid in both `while` loops and [`for` loops][for].
-
-[for]: for-loops.html
--regex-Rust=/^[ \t]*(pub[ \t]+)?enum[ \t]+([a-zA-Z0-9_]+)/\2/g,enum,enumeration names/
--regex-Rust=/^[ \t]*(pub[ \t]+)?struct[ \t]+([a-zA-Z0-9_]+)/\2/s,structure names/
--regex-Rust=/^[ \t]*(pub[ \t]+)?mod[ \t]+([a-zA-Z0-9_]+)/\2/m,modules,module names/
---regex-Rust=/^[ \t]*(pub[ \t]+)?static[ \t]+([a-zA-Z0-9_]+)/\2/c,consts,static constants/
---regex-Rust=/^[ \t]*(pub[ \t]+)?trait[ \t]+([a-zA-Z0-9_]+)/\2/t,traits,traits/
---regex-Rust=/^[ \t]*(pub[ \t]+)?impl([ \t\n]*<[^>]*>)?[ \t]+(([a-zA-Z0-9_:]+)[ \t]*(<[^>]*>)?[ \t]+(for)[ \t]+)?([a-zA-Z0-9_]+)/\4 \6 \7/i,impls,trait implementations/
+--regex-Rust=/^[ \t]*(pub[ \t]+)?(static|const)[ \t]+(mut[ \t]+)?([a-zA-Z0-9_]+)/\4/c,consts,static constants/
+--regex-Rust=/^[ \t]*(pub[ \t]+)?(unsafe[ \t]+)?trait[ \t]+([a-zA-Z0-9_]+)/\3/t,traits,traits/
+--regex-Rust=/^[ \t]*(pub[ \t]+)?(unsafe[ \t]+)?impl([ \t\n]*<[^>]*>)?[ \t]+(([a-zA-Z0-9_:]+)[ \t]*(<[^>]*>)?[ \t]+(for)[ \t]+)?([a-zA-Z0-9_]+)/\5 \7 \8/i,impls,trait implementations/
--regex-Rust=/^[ \t]*macro_rules![ \t]+([a-zA-Z0-9_]+)/\1/d,macros,macro definitions/
SLICE_FIELD_NAMES = [SLICE_FIELD_NAME_DATA_PTR, SLICE_FIELD_NAME_LENGTH]
# std::Vec<> related constants
-STD_VEC_FIELD_NAME_DATA_PTR = "ptr"
STD_VEC_FIELD_NAME_LENGTH = "len"
-STD_VEC_FIELD_NAME_CAPACITY = "cap"
-STD_VEC_FIELD_NAMES = [STD_VEC_FIELD_NAME_DATA_PTR,
- STD_VEC_FIELD_NAME_LENGTH,
- STD_VEC_FIELD_NAME_CAPACITY]
+STD_VEC_FIELD_NAME_BUF = "buf"
+STD_VEC_FIELD_NAMES = [STD_VEC_FIELD_NAME_BUF,
+ STD_VEC_FIELD_NAME_LENGTH]
# std::String related constants
STD_STRING_FIELD_NAMES = ["vec"]
def extract_length_ptr_and_cap_from_std_vec(vec_val):
assert vec_val.type.get_type_kind() == TYPE_KIND_STD_VEC
length_field_index = STD_VEC_FIELD_NAMES.index(STD_VEC_FIELD_NAME_LENGTH)
- ptr_field_index = STD_VEC_FIELD_NAMES.index(STD_VEC_FIELD_NAME_DATA_PTR)
- cap_field_index = STD_VEC_FIELD_NAMES.index(STD_VEC_FIELD_NAME_CAPACITY)
+ buf_field_index = STD_VEC_FIELD_NAMES.index(STD_VEC_FIELD_NAME_BUF)
length = vec_val.get_child_at_index(length_field_index).as_integer()
- vec_ptr_val = vec_val.get_child_at_index(ptr_field_index)
- capacity = vec_val.get_child_at_index(cap_field_index).as_integer()
+ buf = vec_val.get_child_at_index(buf_field_index)
+ vec_ptr_val = buf.get_child_at_index(0)
+ capacity = buf.get_child_at_index(1).as_integer()
unique_ptr_val = vec_ptr_val.get_child_at_index(0)
data_ptr = unique_ptr_val.get_child_at_index(0)
assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR
errcode_map = {}
error_re = re.compile("(E\d\d\d\d)")
+# In the register_long_diagnostics! macro, entries look like this:
+#
+# EXXXX: r##"
+# <Long diagnostic message>
+# "##,
+#
+# These two variables are for detecting the beginning and end of diagnostic
+# messages so that duplicate error codes are not reported when a code occurs
+# inside a diagnostic message
+long_diag_begin = "r##\""
+long_diag_end = "\"##"
+
for (dirpath, dirnames, filenames) in os.walk(src_dir):
if "src/test" in dirpath or "src/llvm" in dirpath:
# Short circuit for fast
path = os.path.join(dirpath, filename)
with open(path, 'r') as f:
+ inside_long_diag = False
for line_num, line in enumerate(f, start=1):
+ if inside_long_diag:
+ # Skip duplicate error code checking for this line
+ if long_diag_end in line:
+ inside_long_diag = False
+ continue
+
match = error_re.search(line)
if match:
errcode = match.group(1)
else:
errcode_map[errcode] = new_record
+ if long_diag_begin in line:
+ inside_long_diag = True
+
errors = False
all_errors = []
f = open(sys.argv[1], 'wb')
-components = sys.argv[2].split(' ')
-components = [i for i in components if i] # ignore extra whitespaces
+components = sys.argv[2].split() # splits on whitespace
enable_static = sys.argv[3]
-llconfig = sys.argv[4]
+llvm_config = sys.argv[4]
f.write("""// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
out, err = proc.communicate()
if err:
- print("failed to run llconfig: args = `{}`".format(args))
+ print("failed to run llvm_config: args = `{}`".format(args))
print(err)
sys.exit(1)
return out
f.write("\n")
# LLVM libs
-args = [llconfig, '--libs', '--system-libs']
+args = [llvm_config, '--libs', '--system-libs']
args.extend(components)
out = run(args)
f.write(")]\n")
# LLVM ldflags
-out = run([llconfig, '--ldflags'])
+out = run([llvm_config, '--ldflags'])
for lib in out.strip().split(' '):
if lib[:2] == "-l":
f.write("#[link(name = \"" + lib[2:] + "\")]\n")
# C++ runtime library
-out = run([llconfig, '--cxxflags'])
+out = run([llvm_config, '--cxxflags'])
if enable_static == '1':
assert('stdlib=libc++' not in out)
f.write("#[link(name = \"stdc++\", kind = \"static\")]\n")
download_unpack_base = os.path.join(download_dir_base, "unpack")
snapshot_files = {
+ "bitrig": ["bin/rustc"],
+ "dragonfly": ["bin/rustc"],
+ "freebsd": ["bin/rustc"],
"linux": ["bin/rustc"],
"macos": ["bin/rustc"],
- "winnt": ["bin/rustc.exe"],
- "freebsd": ["bin/rustc"],
- "dragonfly": ["bin/rustc"],
- "bitrig": ["bin/rustc"],
+ "netbsd": ["bin/rustc"],
"openbsd": ["bin/rustc"],
+ "winnt": ["bin/rustc.exe"],
}
winnt_runtime_deps_32 = ["libgcc_s_dw2-1.dll", "libstdc++-6.dll"]
return "dragonfly"
if os_name == "bitrig":
return "bitrig"
+ if os_name == "netbsd":
+ return "netbsd"
if os_name == "openbsd":
return "openbsd"
return "linux"
}
}
- pub fn to_title(c: char) -> [char; 3] {
- match bsearch_case_table(c, to_titlecase_table) {
- None => [c, '\\0', '\\0'],
- Some(index) => to_titlecase_table[index].1
- }
- }
-
fn bsearch_case_table(c: char, table: &'static [(char, [char; 3])]) -> Option<usize> {
match table.binary_search_by(|&(key, _)| {
if c == key { Equal }
emit_table(f, "to_uppercase_table",
sorted(to_upper.iteritems(), key=operator.itemgetter(0)),
is_pub=False, t_type = t_type, pfun=pfun)
- emit_table(f, "to_titlecase_table",
- sorted(to_title.iteritems(), key=operator.itemgetter(0)),
- is_pub=False, t_type = t_type, pfun=pfun)
f.write("}\n\n")
def emit_grapheme_module(f, grapheme_table, grapheme_cats):
-0.12.0-10860-g082e4763615bdbe7b4dd3dfd6fc2210b7773edf5
+0.12.0-12255-g9a92aaf19a64603b02b4130fe52958cc12488900
use core::fmt;
use core::cmp::Ordering;
use core::mem::{align_of_val, size_of_val};
-use core::intrinsics::drop_in_place;
+use core::intrinsics::{drop_in_place, abort};
use core::mem;
use core::nonzero::NonZero;
use core::ops::{Deref, CoerceUnsized};
+use core::ptr;
use core::marker::Unsize;
use core::hash::{Hash, Hasher};
+use core::{usize, isize};
use heap::deallocate;
+const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+
/// An atomically reference counted wrapper for shared state.
///
/// # Examples
unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> { }
unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> { }
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct ArcInner<T: ?Sized> {
strong: atomic::AtomicUsize,
+
+ // the value usize::MAX acts as a sentinel for temporarily "locking" the
+ // ability to upgrade weak pointers or downgrade strong ones; this is used
+ // to avoid races in `make_unique` and `get_mut`.
weak: atomic::AtomicUsize,
+
data: T,
}
/// # Examples
///
/// ```
- /// # #![feature(arc_weak)]
+ /// #![feature(arc_weak)]
+ ///
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
#[unstable(feature = "arc_weak",
reason = "Weak pointers may not belong in this module.")]
pub fn downgrade(&self) -> Weak<T> {
- // See the clone() impl for why this is relaxed
- self.inner().weak.fetch_add(1, Relaxed);
- Weak { _ptr: self._ptr }
+ loop {
+ // This Relaxed is OK because we're checking the value in the CAS
+ // below.
+ let cur = self.inner().weak.load(Relaxed);
+
+ // check if the weak counter is currently "locked"; if so, spin.
+ if cur == usize::MAX { continue }
+
+ // NOTE: this code currently ignores the possibility of overflow
+ // into usize::MAX; in general both Rc and Arc need to be adjusted
+ // to deal with overflow.
+
+ // Unlike with Clone(), we need this to be an Acquire read to
+ // synchronize with the write coming from `is_unique`, so that the
+ // events prior to that write happen before this read.
+ if self.inner().weak.compare_and_swap(cur, cur + 1, Acquire) == cur {
+ return Weak { _ptr: self._ptr }
+ }
+ }
}
/// Get the number of weak references to this value.
#[deprecated(since = "1.2.0", reason = "renamed to Arc::strong_count")]
pub fn strong_count<T: ?Sized>(this: &Arc<T>) -> usize { Arc::strong_count(this) }
-
-/// Returns a mutable reference to the contained value if the `Arc<T>` is unique.
-///
-/// Returns `None` if the `Arc<T>` is not unique.
-///
-/// This function is marked **unsafe** because it is racy if weak pointers
-/// are active.
-///
-/// # Examples
-///
-/// ```
-/// # #![feature(arc_unique, alloc)]
-/// extern crate alloc;
-/// # fn main() {
-/// use alloc::arc::{Arc, get_mut};
-///
-/// # unsafe {
-/// let mut x = Arc::new(3);
-/// *get_mut(&mut x).unwrap() = 4;
-/// assert_eq!(*x, 4);
-///
-/// let _y = x.clone();
-/// assert!(get_mut(&mut x).is_none());
-/// # }
-/// # }
-/// ```
-#[inline]
-#[unstable(feature = "arc_unique")]
-#[deprecated(since = "1.2.0",
- reason = "this function is unsafe with weak pointers")]
-pub unsafe fn get_mut<T: ?Sized>(this: &mut Arc<T>) -> Option<&mut T> {
- // FIXME(#24880) potential race with upgraded weak pointers here
- if Arc::strong_count(this) == 1 && Arc::weak_count(this) == 0 {
- // This unsafety is ok because we're guaranteed that the pointer
- // returned is the *only* pointer that will ever be returned to T. Our
- // reference count is guaranteed to be 1 at this point, and we required
- // the Arc itself to be `mut`, so we're returning the only possible
- // reference to the inner data.
- let inner = &mut **this._ptr;
- Some(&mut inner.data)
- } else {
- None
- }
-}
-
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Clone for Arc<T> {
/// Makes a clone of the `Arc<T>`.
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
- self.inner().strong.fetch_add(1, Relaxed);
+ let old_size = self.inner().strong.fetch_add(1, Relaxed);
+
+ // However we need to guard against massive refcounts in case someone
+ // is `mem::forget`ing Arcs. If we don't do this the count can overflow
+ // and users will use-after free. We racily saturate to `isize::MAX` on
+ // the assumption that there aren't ~2 billion threads incrementing
+ // the reference count at once. This branch will never be taken in
+ // any realistic program.
+ //
+ // We abort because such a program is incredibly degenerate, and we
+ // don't care to support it.
+ if old_size > MAX_REFCOUNT {
+ unsafe { abort(); }
+ }
+
Arc { _ptr: self._ptr }
}
}
/// Make a mutable reference from the given `Arc<T>`.
///
/// This is also referred to as a copy-on-write operation because the inner
- /// data is cloned if the reference count is greater than one.
- ///
- /// This method is marked **unsafe** because it is racy if weak pointers
- /// are active.
+ /// data is cloned if the (strong) reference count is greater than one. If
+ /// we hold the only strong reference, any existing weak references will no
+ /// longer be upgradeable.
///
/// # Examples
///
/// ```
- /// # #![feature(arc_unique)]
+ /// #![feature(arc_unique)]
+ ///
/// use std::sync::Arc;
///
- /// # unsafe {
/// let mut five = Arc::new(5);
///
- /// let mut_five = five.make_unique();
- /// # }
+ /// let mut_five = Arc::make_unique(&mut five);
/// ```
#[inline]
#[unstable(feature = "arc_unique")]
- #[deprecated(since = "1.2.0",
- reason = "this function is unsafe with weak pointers")]
- pub unsafe fn make_unique(&mut self) -> &mut T {
- // FIXME(#24880) potential race with upgraded weak pointers here
+ pub fn make_unique(this: &mut Arc<T>) -> &mut T {
+ // Note that we hold both a strong reference and a weak reference.
+ // Thus, releasing our strong reference only will not, by itself, cause
+ // the memory to be deallocated.
//
- // Note that we hold a strong reference, which also counts as a weak
- // reference, so we only clone if there is an additional reference of
- // either kind.
- if self.inner().strong.load(SeqCst) != 1 ||
- self.inner().weak.load(SeqCst) != 1 {
- *self = Arc::new((**self).clone())
+ // Use Acquire to ensure that we see any writes to `weak` that happen
+ // before release writes (i.e., decrements) to `strong`. Since we hold a
+ // weak count, there's no chance the ArcInner itself could be
+ // deallocated.
+ if this.inner().strong.compare_and_swap(1, 0, Acquire) != 1 {
+ // Another srong pointer exists; clone
+ *this = Arc::new((**this).clone());
+ } else if this.inner().weak.load(Relaxed) != 1 {
+ // Relaxed suffices in the above because this is fundamentally an
+ // optimization: we are always racing with weak pointers being
+ // dropped. Worst case, we end up allocated a new Arc unnecessarily.
+
+ // We removed the last strong ref, but there are additional weak
+ // refs remaining. We'll move the contents to a new Arc, and
+ // invalidate the other weak refs.
+
+ // Note that it is not possible for the read of `weak` to yield
+ // usize::MAX (i.e., locked), since the weak count can only be
+ // locked by a thread with a strong reference.
+
+ // Materialize our own implicit weak pointer, so that it can clean
+ // up the ArcInner as needed.
+ let weak = Weak { _ptr: this._ptr };
+
+ // mark the data itself as already deallocated
+ unsafe {
+ // there is no data race in the implicit write caused by `read`
+ // here (due to zeroing) because data is no longer accessed by
+ // other threads (due to there being no more strong refs at this
+ // point).
+ let mut swap = Arc::new(ptr::read(&(**weak._ptr).data));
+ mem::swap(this, &mut swap);
+ mem::forget(swap);
+ }
+ } else {
+ // We were the sole reference of either kind; bump back up the
+ // strong ref count.
+ this.inner().strong.store(1, Release);
}
+
// As with `get_mut()`, the unsafety is ok because our reference was
// either unique to begin with, or became one upon cloning the contents.
- let inner = &mut **self._ptr;
- &mut inner.data
+ unsafe {
+ let inner = &mut **this._ptr;
+ &mut inner.data
+ }
}
}
+impl<T: ?Sized> Arc<T> {
+ /// Returns a mutable reference to the contained value if the `Arc<T>` is unique.
+ ///
+ /// Returns `None` if the `Arc<T>` is not unique.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(arc_unique, alloc)]
+ ///
+ /// extern crate alloc;
+ /// # fn main() {
+ /// use alloc::arc::Arc;
+ ///
+ /// let mut x = Arc::new(3);
+ /// *Arc::get_mut(&mut x).unwrap() = 4;
+ /// assert_eq!(*x, 4);
+ ///
+ /// let _y = x.clone();
+ /// assert!(Arc::get_mut(&mut x).is_none());
+ /// # }
+ /// ```
+ #[inline]
+ #[unstable(feature = "arc_unique")]
+ pub fn get_mut(this: &mut Arc<T>) -> Option<&mut T> {
+ if this.is_unique() {
+ // This unsafety is ok because we're guaranteed that the pointer
+ // returned is the *only* pointer that will ever be returned to T. Our
+ // reference count is guaranteed to be 1 at this point, and we required
+ // the Arc itself to be `mut`, so we're returning the only possible
+ // reference to the inner data.
+ unsafe {
+ let inner = &mut **this._ptr;
+ Some(&mut inner.data)
+ }
+ } else {
+ None
+ }
+ }
+
+ /// Determine whether this is the unique reference (including weak refs) to
+ /// the underlying data.
+ ///
+ /// Note that this requires locking the weak ref count.
+ fn is_unique(&mut self) -> bool {
+ // lock the weak pointer count if we appear to be the sole weak pointer
+ // holder.
+ //
+ // The acquire label here ensures a happens-before relationship with any
+ // writes to `strong` prior to decrements of the `weak` count (via drop,
+ // which uses Release).
+ if self.inner().weak.compare_and_swap(1, usize::MAX, Acquire) == 1 {
+ // Due to the previous acquire read, this will observe any writes to
+ // `strong` that were due to upgrading weak pointers; only strong
+ // clones remain, which require that the strong count is > 1 anyway.
+ let unique = self.inner().strong.load(Relaxed) == 1;
+
+ // The release write here synchronizes with a read in `downgrade`,
+ // effectively preventing the above read of `strong` from happening
+ // after the write.
+ self.inner().weak.store(1, Release); // release the lock
+ unique
+ } else {
+ false
+ }
+ }
+}
+
+#[inline]
+#[unstable(feature = "arc_unique")]
+#[deprecated(since = "1.2", reason = "use Arc::get_mut instead")]
+pub fn get_mut<T: ?Sized>(this: &mut Arc<T>) -> Option<&mut T> {
+ Arc::get_mut(this)
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Drop for Arc<T> {
/// Drops the `Arc<T>`.
/// # Examples
///
/// ```
- /// # #![feature(arc_weak)]
+ /// #![feature(arc_weak)]
+ ///
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
// fetch_add because once the count hits 0 it must never be above 0.
let inner = self.inner();
loop {
- let n = inner.strong.load(SeqCst);
+ // Relaxed load because any write of 0 that we can observe
+ // leaves the field in a permanently zero state (so a
+ // "stale" read of 0 is fine), and any other value is
+ // confirmed via the CAS below.
+ let n = inner.strong.load(Relaxed);
if n == 0 { return None }
- let old = inner.strong.compare_and_swap(n, n + 1, SeqCst);
+
+ // Relaxed is valid for the same reason it is on Arc's Clone impl
+ let old = inner.strong.compare_and_swap(n, n + 1, Relaxed);
if old == n { return Some(Arc { _ptr: self._ptr }) }
}
}
/// # Examples
///
/// ```
- /// # #![feature(arc_weak)]
+ /// #![feature(arc_weak)]
+ ///
/// use std::sync::Arc;
///
/// let weak_five = Arc::new(5).downgrade();
/// ```
#[inline]
fn clone(&self) -> Weak<T> {
- // See comments in Arc::clone() for why this is relaxed
- self.inner().weak.fetch_add(1, Relaxed);
- Weak { _ptr: self._ptr }
+ // See comments in Arc::clone() for why this is relaxed. This can use a
+ // fetch_add (ignoring the lock) because the weak count is only locked
+ // where are *no other* weak pointers in existence. (So we can't be
+ // running this code in that case).
+ let old_size = self.inner().weak.fetch_add(1, Relaxed);
+
+ // See comments in Arc::clone() for why we do this (for mem::forget).
+ if old_size > MAX_REFCOUNT {
+ unsafe { abort(); }
+ }
+
+ return Weak { _ptr: self._ptr }
}
}
/// # Examples
///
/// ```
- /// # #![feature(arc_weak)]
+ /// #![feature(arc_weak)]
+ ///
/// use std::sync::Arc;
///
/// {
// If we find out that we were the last weak pointer, then its time to
// deallocate the data entirely. See the discussion in Arc::drop() about
// the memory orderings
+ //
+ // It's not necessary to check for the locked state here, because the
+ // weak count can only be locked if there was precisely one weak ref,
+ // meaning that drop could only subsequently run ON that remaining weak
+ // ref, which can only happen after the lock is released.
if self.inner().weak.fetch_sub(1, Release) == 1 {
atomic::fence(Acquire);
unsafe { deallocate(ptr as *mut u8,
let mut cow1 = cow0.clone();
let mut cow2 = cow1.clone();
- assert!(75 == *cow0.make_unique());
- assert!(75 == *cow1.make_unique());
- assert!(75 == *cow2.make_unique());
+ assert!(75 == *Arc::make_unique(&mut cow0));
+ assert!(75 == *Arc::make_unique(&mut cow1));
+ assert!(75 == *Arc::make_unique(&mut cow2));
- *cow0.make_unique() += 1;
- *cow1.make_unique() += 2;
- *cow2.make_unique() += 3;
+ *Arc::make_unique(&mut cow0) += 1;
+ *Arc::make_unique(&mut cow1) += 2;
+ *Arc::make_unique(&mut cow2) += 3;
assert!(76 == *cow0);
assert!(77 == *cow1);
assert!(75 == *cow2);
unsafe {
- *cow0.make_unique() += 1;
+ *Arc::make_unique(&mut cow0) += 1;
}
assert!(76 == *cow0);
assert!(75 == *cow1_weak.upgrade().unwrap());
unsafe {
- *cow0.make_unique() += 1;
+ *Arc::make_unique(&mut cow0) += 1;
}
assert!(76 == *cow0);
use core::prelude::*;
+use heap;
+use raw_vec::RawVec;
+
use core::any::Any;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{self, Hash};
-use core::marker::Unsize;
+use core::marker::{self, Unsize};
use core::mem;
use core::ops::{CoerceUnsized, Deref, DerefMut};
-use core::ptr::{Unique};
+use core::ops::{Placer, Boxed, Place, InPlace, BoxPlace};
+use core::ptr::{self, Unique};
use core::raw::{TraitObject};
/// A value that represents the heap. This is the default place that the `box`
/// The following two examples are equivalent:
///
/// ```
-/// # #![feature(box_heap)]
-/// #![feature(box_syntax)]
+/// #![feature(box_heap)]
+///
+/// #![feature(box_syntax, placement_in_syntax)]
/// use std::boxed::HEAP;
///
/// fn main() {
#[lang = "exchange_heap"]
#[unstable(feature = "box_heap",
reason = "may be renamed; uncertain about custom allocator design")]
-pub const HEAP: () = ();
+#[allow(deprecated)]
+pub const HEAP: ExchangeHeapSingleton =
+ ExchangeHeapSingleton { _force_singleton: () };
+
+/// This the singleton type used solely for `boxed::HEAP`.
+#[unstable(feature = "box_heap",
+ reason = "may be renamed; uncertain about custom allocator design")]
+#[derive(Copy, Clone)]
+pub struct ExchangeHeapSingleton { _force_singleton: () }
/// A pointer type for heap allocation.
///
#[lang = "owned_box"]
#[stable(feature = "rust1", since = "1.0.0")]
#[fundamental]
-pub struct Box<T>(Unique<T>);
+pub struct Box<T: ?Sized>(Unique<T>);
+
+/// `IntermediateBox` represents uninitialized backing storage for `Box`.
+///
+/// FIXME (pnkfelix): Ideally we would just reuse `Box<T>` instead of
+/// introducing a separate `IntermediateBox<T>`; but then you hit
+/// issues when you e.g. attempt to destructure an instance of `Box`,
+/// since it is a lang item and so it gets special handling by the
+/// compiler. Easier just to make this parallel type for now.
+///
+/// FIXME (pnkfelix): Currently the `box` protocol only supports
+/// creating instances of sized types. This IntermediateBox is
+/// designed to be forward-compatible with a future protocol that
+/// supports creating instances of unsized types; that is why the type
+/// parameter has the `?Sized` generalization marker, and is also why
+/// this carries an explicit size. However, it probably does not need
+/// to carry the explicit alignment; that is just a work-around for
+/// the fact that the `align_of` intrinsic currently requires the
+/// input type to be Sized (which I do not think is strictly
+/// necessary).
+#[unstable(feature = "placement_in", reason = "placement box design is still being worked out.")]
+pub struct IntermediateBox<T: ?Sized>{
+ ptr: *mut u8,
+ size: usize,
+ align: usize,
+ marker: marker::PhantomData<*mut T>,
+}
+
+impl<T> Place<T> for IntermediateBox<T> {
+ fn pointer(&mut self) -> *mut T {
+ unsafe { ::core::mem::transmute(self.ptr) }
+ }
+}
+
+unsafe fn finalize<T>(b: IntermediateBox<T>) -> Box<T> {
+ let p = b.ptr as *mut T;
+ mem::forget(b);
+ mem::transmute(p)
+}
+
+fn make_place<T>() -> IntermediateBox<T> {
+ let size = mem::size_of::<T>();
+ let align = mem::align_of::<T>();
+
+ let p = if size == 0 {
+ heap::EMPTY as *mut u8
+ } else {
+ let p = unsafe {
+ heap::allocate(size, align)
+ };
+ if p.is_null() {
+ panic!("Box make_place allocation failure.");
+ }
+ p
+ };
+
+ IntermediateBox { ptr: p, size: size, align: align, marker: marker::PhantomData }
+}
+
+impl<T> BoxPlace<T> for IntermediateBox<T> {
+ fn make_place() -> IntermediateBox<T> { make_place() }
+}
+
+impl<T> InPlace<T> for IntermediateBox<T> {
+ type Owner = Box<T>;
+ unsafe fn finalize(self) -> Box<T> { finalize(self) }
+}
+
+impl<T> Boxed for Box<T> {
+ type Data = T;
+ type Place = IntermediateBox<T>;
+ unsafe fn finalize(b: IntermediateBox<T>) -> Box<T> { finalize(b) }
+}
+
+impl<T> Placer<T> for ExchangeHeapSingleton {
+ type Place = IntermediateBox<T>;
+
+ fn make_place(self) -> IntermediateBox<T> {
+ make_place()
+ }
+}
+
+impl<T: ?Sized> Drop for IntermediateBox<T> {
+ fn drop(&mut self) {
+ if self.size > 0 {
+ unsafe {
+ heap::deallocate(self.ptr, self.size, self.align)
+ }
+ }
+ }
+}
impl<T> Box<T> {
/// Allocates memory on the heap and then moves `x` into it.
/// of `T` and releases memory. Since the way `Box` allocates and
/// releases memory is unspecified, the only valid pointer to pass
/// to this function is the one taken from another `Box` with
- /// `boxed::into_raw` function.
+ /// `Box::into_raw` function.
///
/// Function is unsafe, because improper use of this function may
/// lead to memory problems like double-free, for example if the
///
/// # Examples
/// ```
- /// # #![feature(box_raw)]
- /// use std::boxed;
+ /// #![feature(box_raw)]
///
/// let seventeen = Box::new(17u32);
- /// let raw = boxed::into_raw(seventeen);
+ /// let raw = Box::into_raw(seventeen);
/// let boxed_again = unsafe { Box::from_raw(raw) };
/// ```
#[unstable(feature = "box_raw", reason = "may be renamed")]
///
/// # Examples
/// ```
-/// # #![feature(box_raw)]
+/// #![feature(box_raw)]
+///
/// use std::boxed;
///
/// let seventeen = Box::new(17u32);
/// ```
#[inline]
fn clone(&self) -> Box<T> { box {(**self).clone()} }
-
/// Copies `source`'s contents into `self` without creating a new allocation.
///
/// # Examples
///
/// ```
- /// # #![feature(box_raw)]
+ /// #![feature(box_raw)]
+ ///
/// let x = Box::new(5);
/// let mut y = Box::new(10);
///
}
}
+
+#[stable(feature = "box_slice_clone", since = "1.3.0")]
+impl Clone for Box<str> {
+ fn clone(&self) -> Self {
+ let len = self.len();
+ let buf = RawVec::with_capacity(len);
+ unsafe {
+ ptr::copy_nonoverlapping(self.as_ptr(), buf.ptr(), len);
+ mem::transmute(buf.into_box()) // bytes to str ~magic
+ }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Box<T> {
#[inline]
}
impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+#[stable(feature = "box_slice_clone", since = "1.3.0")]
+impl<T: Clone> Clone for Box<[T]> {
+ fn clone(&self) -> Self {
+ let mut new = BoxBuilder {
+ data: RawVec::with_capacity(self.len()),
+ len: 0
+ };
+
+ let mut target = new.data.ptr();
+
+ for item in self.iter() {
+ unsafe {
+ ptr::write(target, item.clone());
+ target = target.offset(1);
+ };
+
+ new.len += 1;
+ }
+
+ return unsafe { new.into_box() };
+
+ // Helper type for responding to panics correctly.
+ struct BoxBuilder<T> {
+ data: RawVec<T>,
+ len: usize,
+ }
+
+ impl<T> BoxBuilder<T> {
+ unsafe fn into_box(self) -> Box<[T]> {
+ let raw = ptr::read(&self.data);
+ mem::forget(self);
+ raw.into_box()
+ }
+ }
+
+ impl<T> Drop for BoxBuilder<T> {
+ fn drop(&mut self) {
+ let mut data = self.data.ptr();
+ let max = unsafe { data.offset(self.len as isize) };
+
+ while data != max {
+ unsafe {
+ ptr::read(data);
+ data = data.offset(1);
+ }
+ }
+ }
+ }
+ }
+}
+
#[test]
fn raw_sized() {
+ let x = Box::new(17);
+ let p = Box::into_raw(x);
unsafe {
- let x = Box::new(17);
- let p = boxed::into_raw(x);
assert_eq!(17, *p);
*p = 19;
let y = Box::from_raw(p);
}
}
+ let x: Box<Foo> = Box::new(Bar(17));
+ let p = Box::into_raw(x);
unsafe {
- let x: Box<Foo> = Box::new(Bar(17));
- let p = boxed::into_raw(x);
assert_eq!(17, (*p).get());
(*p).set(19);
let y: Box<Foo> = Box::from_raw(p);
#![feature(core)]
#![feature(core_intrinsics)]
#![feature(core_prelude)]
+#![feature(core_slice_ext)]
#![feature(custom_attribute)]
#![feature(fundamental)]
#![feature(lang_items)]
#![feature(no_std)]
#![feature(nonzero)]
#![feature(optin_builtin_traits)]
+#![feature(placement_in_syntax)]
+#![feature(placement_new_protocol)]
#![feature(raw)]
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(unsize)]
+#![feature(core_slice_ext)]
+#![feature(core_str_ext)]
#![cfg_attr(test, feature(test, alloc, rustc_private, box_raw))]
#![cfg_attr(all(not(feature = "external_funcs"), not(feature = "external_crate")),
mod boxed_test;
pub mod arc;
pub mod rc;
+pub mod raw_vec;
/// Common out-of-memory routine
#[cold]
// allocate.
unsafe { core::intrinsics::abort() }
}
-
-// FIXME(#14344): When linking liballoc with libstd, this library will be linked
-// as an rlib (it only exists as an rlib). It turns out that an
-// optimized standard library doesn't actually use *any* symbols
-// from this library. Everything is inlined and optimized away.
-// This means that linkers will actually omit the object for this
-// file, even though it may be needed in the future.
-//
-// To get around this for now, we define a dummy symbol which
-// will never get inlined so the stdlib can call it. The stdlib's
-// reference to this symbol will cause this library's object file
-// to get linked in to libstd successfully (the linker won't
-// optimize it out).
-#[doc(hidden)]
-#[unstable(feature = "issue_14344_fixme")]
-pub fn fixme_14344_be_sure_to_link_to_collections() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::ptr::Unique;
+use core::mem;
+use core::slice::{self, SliceExt};
+use heap;
+use super::oom;
+use super::boxed::Box;
+use core::ops::Drop;
+
+/// A low-level utility for more ergonomically allocating, reallocating, and deallocating a
+/// a buffer of memory on the heap without having to worry about all the corner cases
+/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
+/// In particular:
+///
+/// * Produces heap::EMPTY on zero-sized types
+/// * Produces heap::EMPTY on zero-length allocations
+/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
+/// * Guards against 32-bit systems allocating more than isize::MAX bytes
+/// * Guards against overflowing your length
+/// * Aborts on OOM
+/// * Avoids freeing heap::EMPTY
+/// * Contains a ptr::Unique and thus endows the user with all related benefits
+///
+/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
+/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
+/// to handle the actual things *stored* inside of a RawVec.
+///
+/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
+/// This enables you to use capacity growing logic catch the overflows in your length
+/// that might occur with zero-sized types.
+///
+/// However this means that you need to be careful when roundtripping this type
+/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
+/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
+/// field. This allows zero-sized types to not be special-cased by consumers of
+/// this type.
+#[unsafe_no_drop_flag]
+pub struct RawVec<T> {
+ ptr: Unique<T>,
+ cap: usize,
+}
+
+impl<T> RawVec<T> {
+ /// Creates the biggest possible RawVec without allocating. If T has positive
+ /// size, then this makes a RawVec with capacity 0. If T has 0 size, then it
+ /// it makes a RawVec with capacity `usize::MAX`. Useful for implementing
+ /// delayed allocation.
+ pub fn new() -> Self {
+ unsafe {
+ // !0 is usize::MAX. This branch should be stripped at compile time.
+ let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
+
+ // heap::EMPTY doubles as "unallocated" and "zero-sized allocation"
+ RawVec { ptr: Unique::new(heap::EMPTY as *mut T), cap: cap }
+ }
+ }
+
+ /// Creates a RawVec with exactly the capacity and alignment requirements
+ /// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0
+ /// or T is zero-sized. Note that if `T` is zero-sized this means you will *not*
+ /// get a RawVec with the requested capacity!
+ ///
+ /// # Panics
+ ///
+ /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
+ /// * Panics on 32-bit platforms if the requested capacity exceeds
+ /// `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM
+ pub fn with_capacity(cap: usize) -> Self {
+ unsafe {
+ let elem_size = mem::size_of::<T>();
+
+ let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow");
+ alloc_guard(alloc_size);
+
+ // handles ZSTs and `cap = 0` alike
+ let ptr = if alloc_size == 0 {
+ heap::EMPTY as *mut u8
+ } else {
+ let align = mem::align_of::<T>();
+ let ptr = heap::allocate(alloc_size, align);
+ if ptr.is_null() { oom() }
+ ptr
+ };
+
+ RawVec { ptr: Unique::new(ptr as *mut _), cap: cap }
+ }
+ }
+
+ /// Reconstitutes a RawVec from a pointer and capacity.
+ ///
+ /// # Undefined Behaviour
+ ///
+ /// The ptr must be allocated, and with the given capacity. The
+ /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
+ /// If the ptr and capacity come from a RawVec, then this is guaranteed.
+ pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
+ RawVec { ptr: Unique::new(ptr), cap: cap }
+ }
+
+ /// Converts a `Box<[T]>` into a `RawVec<T>`.
+ pub fn from_box(mut slice: Box<[T]>) -> Self {
+ unsafe {
+ let result = RawVec::from_raw_parts(slice.as_mut_ptr(), slice.len());
+ mem::forget(slice);
+ result
+ }
+ }
+}
+
+impl<T> RawVec<T> {
+ /// Gets a raw pointer to the start of the allocation. Note that this is
+ /// heap::EMPTY if `cap = 0` or T is zero-sized. In the former case, you must
+ /// be careful.
+ pub fn ptr(&self) -> *mut T {
+ *self.ptr
+ }
+
+ /// Gets the capacity of the allocation.
+ ///
+ /// This will always be `usize::MAX` if `T` is zero-sized.
+ pub fn cap(&self) -> usize {
+ if mem::size_of::<T>() == 0 { !0 } else { self.cap }
+ }
+
+ /// Doubles the size of the type's backing allocation. This is common enough
+ /// to want to do that it's easiest to just have a dedicated method. Slightly
+ /// more efficient logic can be provided for this than the general case.
+ ///
+ /// This function is ideal for when pushing elements one-at-a-time because
+ /// you don't need to incur the costs of the more general computations
+ /// reserve needs to do to guard against overflow. You do however need to
+ /// manually check if your `len == cap`.
+ ///
+ /// # Panics
+ ///
+ /// * Panics if T is zero-sized on the assumption that you managed to exhaust
+ /// all `usize::MAX` slots in your imaginary buffer.
+ /// * Panics on 32-bit platforms if the requested capacity exceeds
+ /// `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM
+ ///
+ /// # Examples
+ ///
+ /// ```ignore
+ /// struct MyVec<T> {
+ /// buf: RawVec<T>,
+ /// len: usize,
+ /// }
+ ///
+ /// impl<T> MyVec<T> {
+ /// pub fn push(&mut self, elem: T) {
+ /// if self.len == self.buf.cap() { self.buf.double(); }
+ /// // double would have aborted or panicked if the len exceeded
+ /// // `isize::MAX` so this is safe to do unchecked now.
+ /// unsafe {
+ /// ptr::write(self.buf.ptr().offset(self.len as isize), elem);
+ /// }
+ /// self.len += 1;
+ /// }
+ /// }
+ /// ```
+ #[inline(never)]
+ #[cold]
+ pub fn double(&mut self) {
+ unsafe {
+ let elem_size = mem::size_of::<T>();
+
+ // since we set the capacity to usize::MAX when elem_size is
+ // 0, getting to here necessarily means the RawVec is overfull.
+ assert!(elem_size != 0, "capacity overflow");
+
+ let align = mem::align_of::<T>();
+
+ let (new_cap, ptr) = if self.cap == 0 {
+ // skip to 4 because tiny Vec's are dumb; but not if that would cause overflow
+ let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
+ let ptr = heap::allocate(new_cap * elem_size, align);
+ (new_cap, ptr)
+ } else {
+ // Since we guarantee that we never allocate more than isize::MAX bytes,
+ // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
+ let new_cap = 2 * self.cap;
+ let new_alloc_size = new_cap * elem_size;
+ alloc_guard(new_alloc_size);
+ let ptr = heap::reallocate(self.ptr() as *mut _,
+ self.cap * elem_size,
+ new_alloc_size,
+ align);
+ (new_cap, ptr)
+ };
+
+ // If allocate or reallocate fail, we'll get `null` back
+ if ptr.is_null() { oom() }
+
+ self.ptr = Unique::new(ptr as *mut _);
+ self.cap = new_cap;
+ }
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold
+ /// `used_cap + needed_extra_cap` elements. If it doesn't already,
+ /// will reallocate the minimum possible amount of memory necessary.
+ /// Generally this will be exactly the amount of memory necessary,
+ /// but in principle the allocator is free to give back more than
+ /// we asked for.
+ ///
+ /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe
+ /// code *you* write that relies on the behaviour of this function may break.
+ ///
+ /// # Panics
+ ///
+ /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
+ /// * Panics on 32-bit platforms if the requested capacity exceeds
+ /// `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM
+ pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
+ unsafe {
+ let elem_size = mem::size_of::<T>();
+ let align = mem::align_of::<T>();
+
+ // NOTE: we don't early branch on ZSTs here because we want this
+ // to actually catch "asking for more than usize::MAX" in that case.
+ // If we make it past the first branch then we are guaranteed to
+ // panic.
+
+ // Don't actually need any more capacity.
+ // Wrapping in case they gave a bad `used_cap`.
+ if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { return; }
+
+ // Nothing we can really do about these checks :(
+ let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow");
+ let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
+ alloc_guard(new_alloc_size);
+
+ let ptr = if self.cap == 0 {
+ heap::allocate(new_alloc_size, align)
+ } else {
+ heap::reallocate(self.ptr() as *mut _,
+ self.cap * elem_size,
+ new_alloc_size,
+ align)
+ };
+
+ // If allocate or reallocate fail, we'll get `null` back
+ if ptr.is_null() { oom() }
+
+ self.ptr = Unique::new(ptr as *mut _);
+ self.cap = new_cap;
+ }
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold
+ /// `used_cap + needed_extra_cap` elements. If it doesn't already have
+ /// enough capacity, will reallocate enough space plus comfortable slack
+ /// space to get amortized `O(1)` behaviour. Will limit this behaviour
+ /// if it would needlessly cause itself to panic.
+ ///
+ /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe
+ /// code *you* write that relies on the behaviour of this function may break.
+ ///
+ /// This is ideal for implementing a bulk-push operation like `extend`.
+ ///
+ /// # Panics
+ ///
+ /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
+ /// * Panics on 32-bit platforms if the requested capacity exceeds
+ /// `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM
+ ///
+ /// # Examples
+ ///
+ /// ```ignore
+ /// struct MyVec<T> {
+ /// buf: RawVec<T>,
+ /// len: usize,
+ /// }
+ ///
+ /// impl<T> MyVec<T> {
+ /// pub fn push_all(&mut self, elems: &[T]) {
+ /// self.buf.reserve(self.len, elems.len());
+ /// // reserve would have aborted or panicked if the len exceeded
+ /// // `isize::MAX` so this is safe to do unchecked now.
+ /// for x in elems {
+ /// unsafe {
+ /// ptr::write(self.buf.ptr().offset(self.len as isize), x.clone());
+ /// }
+ /// self.len += 1;
+ /// }
+ /// }
+ /// }
+ /// ```
+ pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
+ unsafe {
+ let elem_size = mem::size_of::<T>();
+ let align = mem::align_of::<T>();
+
+ // NOTE: we don't early branch on ZSTs here because we want this
+ // to actually catch "asking for more than usize::MAX" in that case.
+ // If we make it past the first branch then we are guaranteed to
+ // panic.
+
+ // Don't actually need any more capacity.
+ // Wrapping in case they give a bas `used_cap`
+ if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { return; }
+
+ // Nothing we can really do about these checks :(
+ let new_cap = used_cap.checked_add(needed_extra_cap)
+ .and_then(|cap| cap.checked_mul(2))
+ .expect("capacity overflow");
+ let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
+ // FIXME: may crash and burn on over-reserve
+ alloc_guard(new_alloc_size);
+
+ let ptr = if self.cap == 0 {
+ heap::allocate(new_alloc_size, align)
+ } else {
+ heap::reallocate(self.ptr() as *mut _,
+ self.cap * elem_size,
+ new_alloc_size,
+ align)
+ };
+
+ // If allocate or reallocate fail, we'll get `null` back
+ if ptr.is_null() { oom() }
+
+ self.ptr = Unique::new(ptr as *mut _);
+ self.cap = new_cap;
+ }
+ }
+
+ /// Shrinks the allocation down to the specified amount. If the given amount
+ /// is 0, actually completely deallocates.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the given amount is *larger* than the current capacity.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ pub fn shrink_to_fit(&mut self, amount: usize) {
+ let elem_size = mem::size_of::<T>();
+ let align = mem::align_of::<T>();
+
+ // Set the `cap` because they might be about to promote to a `Box<[T]>`
+ if elem_size == 0 {
+ self.cap = amount;
+ return;
+ }
+
+ // This check is my waterloo; it's the only thing Vec wouldn't have to do.
+ assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
+
+ if amount == 0 {
+ mem::replace(self, RawVec::new());
+ } else if self.cap != amount {
+ unsafe {
+ // Overflow check is unnecessary as the vector is already at
+ // least this large.
+ let ptr = heap::reallocate(self.ptr() as *mut _,
+ self.cap * elem_size,
+ amount * elem_size,
+ align);
+ if ptr.is_null() { oom() }
+ self.ptr = Unique::new(ptr as *mut _);
+ }
+ self.cap = amount;
+ }
+ }
+
+ /// Converts the entire buffer into `Box<[T]>`.
+ ///
+ /// While it is not *strictly* Undefined Behaviour to call
+ /// this procedure while some of the RawVec is unintialized,
+ /// it cetainly makes it trivial to trigger it.
+ ///
+ /// Note that this will correctly reconstitute any `cap` changes
+ /// that may have been performed. (see description of type for details)
+ pub unsafe fn into_box(self) -> Box<[T]> {
+ // NOTE: not calling `cap()` here, actually using the real `cap` field!
+ let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
+ let output: Box<[T]> = Box::from_raw(slice);
+ mem::forget(self);
+ output
+ }
+
+ /// This is a stupid name in the hopes that someone will find this in the
+ /// not too distant future and remove it with the rest of
+ /// #[unsafe_no_drop_flag]
+ pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool {
+ self.cap != mem::POST_DROP_USIZE
+ }
+}
+
+impl<T> Drop for RawVec<T> {
+ /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
+ fn drop(&mut self) {
+ let elem_size = mem::size_of::<T>();
+ if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() {
+ let align = mem::align_of::<T>();
+
+ let num_bytes = elem_size * self.cap;
+ unsafe {
+ heap::deallocate(*self.ptr as *mut _, num_bytes, align);
+ }
+ }
+ }
+}
+
+
+
+// We need to guarantee the following:
+// * We don't ever allocate `> isize::MAX` byte-size objects
+// * We don't overflow `usize::MAX` and actually allocate too little
+//
+// On 64-bit we just need to check for overflow since trying to allocate
+// `> isize::MAX` bytes will surely fail. On 32-bit we need to add an extra
+// guard for this in case we're running on a platform which can use all 4GB in
+// user-space. e.g. PAE or x32
+
+#[inline]
+#[cfg(target_pointer_width = "64")]
+fn alloc_guard(_alloc_size: usize) { }
+
+#[inline]
+#[cfg(target_pointer_width = "32")]
+fn alloc_guard(alloc_size: usize) {
+ assert!(alloc_size <= ::core::isize::MAX as usize, "capacity overflow");
+}
//! documentation for more details on interior mutability.
//!
//! ```rust
-//! # #![feature(rc_weak)]
+//! #![feature(rc_weak)]
+//!
//! use std::rc::Rc;
//! use std::rc::Weak;
//! use std::cell::RefCell;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{Hasher, Hash};
-use core::intrinsics::{assume, drop_in_place};
+use core::intrinsics::{assume, drop_in_place, abort};
use core::marker::{self, Unsize};
use core::mem::{self, align_of, size_of, align_of_val, size_of_val, forget};
use core::nonzero::NonZero;
/// # Examples
///
/// ```
- /// # #![feature(rc_unique)]
+ /// #![feature(rc_unique)]
+ ///
/// use std::rc::Rc;
///
/// let x = Rc::new(3);
/// # Examples
///
/// ```
- /// # #![feature(rc_weak)]
+ /// #![feature(rc_weak)]
+ ///
/// use std::rc::Rc;
///
/// let five = Rc::new(5);
/// # Examples
///
/// ```
- /// # #![feature(rc_unique)]
+ /// #![feature(rc_unique)]
+ ///
/// use std::rc::Rc;
///
/// let five = Rc::new(5);
/// # Examples
///
/// ```
- /// # #![feature(rc_unique)]
+ /// #![feature(rc_unique)]
+ ///
/// use std::rc::Rc;
///
/// let mut x = Rc::new(3);
/// # Examples
///
/// ```
-/// # #![feature(rc_unique)]
+/// #![feature(rc_unique)]
+///
/// use std::rc;
/// use std::rc::Rc;
///
/// # Examples
///
/// ```
-/// # #![feature(rc_unique)]
+/// #![feature(rc_unique)]
+///
/// use std::rc::{self, Rc};
///
/// let x = Rc::new(3);
/// # Examples
///
/// ```
-/// # #![feature(rc_unique)]
+/// #![feature(rc_unique)]
+///
/// use std::rc::{self, Rc};
///
/// let mut x = Rc::new(3);
/// # Examples
///
/// ```
- /// # #![feature(rc_unique)]
+ /// #![feature(rc_unique)]
+ ///
/// use std::rc::Rc;
///
/// let mut five = Rc::new(5);
impl<T: ?Sized> !marker::Send for Weak<T> {}
impl<T: ?Sized> !marker::Sync for Weak<T> {}
+impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+
#[unstable(feature = "rc_weak",
reason = "Weak pointers may not belong in this module.")]
impl<T: ?Sized> Weak<T> {
/// # Examples
///
/// ```
- /// # #![feature(rc_weak)]
+ /// #![feature(rc_weak)]
+ ///
/// use std::rc::Rc;
///
/// let five = Rc::new(5);
/// # Examples
///
/// ```
- /// # #![feature(rc_weak)]
+ /// #![feature(rc_weak)]
+ ///
/// use std::rc::Rc;
///
/// {
/// # Examples
///
/// ```
- /// # #![feature(rc_weak)]
+ /// #![feature(rc_weak)]
+ ///
/// use std::rc::Rc;
///
/// let weak_five = Rc::new(5).downgrade();
}
}
+// NOTE: We checked_add here to deal with mem::forget safety. In particular
+// if you mem::forget Rcs (or Weaks), the ref-count can overflow, and then
+// you can free the allocation while outstanding Rcs (or Weaks) exist.
+// We abort because this is such a degenerate scenario that we don't care about
+// what happens -- no real program should ever experience this.
+//
+// This should have negligible overhead since you don't actually need to
+// clone these much in Rust thanks to ownership and move-semantics.
+
#[doc(hidden)]
trait RcBoxPtr<T: ?Sized> {
fn inner(&self) -> &RcBox<T>;
fn strong(&self) -> usize { self.inner().strong.get() }
#[inline]
- fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); }
+ fn inc_strong(&self) {
+ self.inner().strong.set(self.strong().checked_add(1).unwrap_or_else(|| unsafe { abort() }));
+ }
#[inline]
fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); }
fn weak(&self) -> usize { self.inner().weak.get() }
#[inline]
- fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); }
+ fn inc_weak(&self) {
+ self.inner().weak.set(self.weak().checked_add(1).unwrap_or_else(|| unsafe { abort() }));
+ }
#[inline]
fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); }
/// # Examples
///
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// use std::collections::BinaryHeap;
/// let heap = BinaryHeap::from_vec(vec![9, 1, 2, 7, 3, 2]);
/// ```
/// # Examples
///
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// use std::collections::BinaryHeap;
/// let heap = BinaryHeap::from_vec(vec![1, 2, 3, 4]);
///
/// # Examples
///
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// use std::collections::BinaryHeap;
/// let mut heap = BinaryHeap::from_vec(vec![1, 3]);
///
/// # Examples
///
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// use std::collections::BinaryHeap;
/// let mut heap = BinaryHeap::new();
/// heap.push(1);
/// # Examples
///
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// use std::collections::BinaryHeap;
/// let mut heap = BinaryHeap::new();
///
/// # Examples
///
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// use std::collections::BinaryHeap;
/// let heap = BinaryHeap::from_vec(vec![1, 2, 3, 4, 5, 6, 7]);
/// let vec = heap.into_vec();
/// # Examples
///
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// use std::collections::BinaryHeap;
///
/// let mut heap = BinaryHeap::from_vec(vec![1, 2, 4, 5, 7]);
/// # Examples
///
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// use std::collections::BinaryHeap;
/// let heap = BinaryHeap::from_vec(vec![1, 2, 3, 4]);
///
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#![deprecated(reason = "BitVec and BitSet have been migrated to cargo as bit-vec and bit-set",
+ since = "1.3.0")]
+#![unstable(feature = "collections", reason = "deprecated")]
+#![allow(deprecated)]
+
// FIXME(Gankro): BitVec and BitSet are very tightly coupled. Ideally (for
// maintenance), they should be in separate files/modules, with BitSet only
// using BitVec's public API. This will be hard for performance though, because
//! [sieve]: http://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
//!
//! ```
-//! # #![feature(bitset, bitvec, range_inclusive, step_by)]
+//! #![feature(bitset, bitvec, range_inclusive, step_by)]
+//!
//! use std::collections::{BitSet, BitVec};
//! use std::iter;
//!
/// # Examples
///
/// ```
-/// # #![feature(bitvec)]
+/// #![feature(bitvec)]
+///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_elem(10, false);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
/// let mut bv = BitVec::new();
/// ```
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let bv = BitVec::from_elem(10, false);
pub fn from_elem(nbits: usize, bit: bool) -> BitVec {
let nblocks = blocks_for_bits(nbits);
let mut bit_vec = BitVec {
- storage: repeat(if bit { !0 } else { 0 }).take(nblocks).collect(),
+ storage: vec![if bit { !0 } else { 0 }; nblocks],
nbits: nbits
};
bit_vec.fix_last_block();
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let bv = BitVec::from_bytes(&[0b10100000, 0b00010010]);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let bv = BitVec::from_fn(5, |i| { i % 2 == 0 });
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let bv = BitVec::from_bytes(&[0b01100000]);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_elem(5, false);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let before = 0b01100000;
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let before = 0b01100000;
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let a = 0b01100100;
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let a = 0b01100100;
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let a = 0b01100100;
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_elem(5, true);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let bv = BitVec::from_bytes(&[0b01110100, 0b10010010]);
/// # Examples
///
/// ```
- /// # #![feature(bitvec, append)]
+ /// #![feature(bitvec, append)]
+ ///
/// use std::collections::BitVec;
///
/// let mut a = BitVec::from_bytes(&[0b10000000]);
/// # Examples
///
/// ```
- /// # #![feature(bitvec, split_off)]
+ /// #![feature(bitvec, split_off)]
+ ///
/// use std::collections::BitVec;
/// let mut a = BitVec::new();
/// a.push(true);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_elem(10, false);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_elem(10, false);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_elem(3, true);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let bv = BitVec::from_bytes(&[0b10100000]);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_bytes(&[0b01001011]);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_elem(3, false);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_elem(3, false);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::new();
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_bytes(&[0b01001011]);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::from_bytes(&[0b01001001]);
/// # Examples
///
/// ```
- /// # #![feature(bitvec)]
+ /// #![feature(bitvec)]
+ ///
/// use std::collections::BitVec;
///
/// let mut bv = BitVec::new();
/// # Examples
///
/// ```
-/// # #![feature(bitvec, bitset)]
+/// #![feature(bitvec, bitset)]
+///
/// use std::collections::{BitSet, BitVec};
///
/// // It's a regular set
/// # Examples
///
/// ```
- /// # #![feature(bitset)]
+ /// #![feature(bitset)]
+ ///
/// use std::collections::BitSet;
///
/// let mut s = BitSet::new();
/// # Examples
///
/// ```
- /// # #![feature(bitset)]
+ /// #![feature(bitset)]
+ ///
/// use std::collections::BitSet;
///
/// let mut s = BitSet::with_capacity(100);
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec)]
+ /// #![feature(bitset, bitvec)]
+ ///
/// use std::collections::{BitVec, BitSet};
///
/// let bv = BitVec::from_bytes(&[0b01100000]);
/// # Examples
///
/// ```
- /// # #![feature(bitset)]
+ /// #![feature(bitset)]
+ ///
/// use std::collections::BitSet;
///
/// let mut s = BitSet::with_capacity(100);
/// # Examples
///
/// ```
- /// # #![feature(bitset)]
+ /// #![feature(bitset)]
+ ///
/// use std::collections::BitSet;
///
/// let mut s = BitSet::new();
/// # Examples
///
/// ```
- /// # #![feature(bitset)]
+ /// #![feature(bitset)]
+ ///
/// use std::collections::BitSet;
///
/// let mut s = BitSet::new();
/// # Examples
///
/// ```
- /// # #![feature(bitset)]
+ /// #![feature(bitset)]
+ ///
/// use std::collections::BitSet;
///
/// let mut s = BitSet::new();
/// # Examples
///
/// ```
- /// # #![feature(bitset)]
+ /// #![feature(bitset)]
+ ///
/// use std::collections::BitSet;
///
/// let mut s = BitSet::new();
/// # Examples
///
/// ```
- /// # #![feature(bitset)]
+ /// #![feature(bitset)]
+ ///
/// use std::collections::BitSet;
///
/// let mut s = BitSet::new();
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec)]
+ /// #![feature(bitset, bitvec)]
+ ///
/// use std::collections::{BitVec, BitSet};
///
/// let s = BitSet::from_bit_vec(BitVec::from_bytes(&[0b01001010]));
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec)]
+ /// #![feature(bitset, bitvec)]
+ ///
/// use std::collections::{BitVec, BitSet};
///
/// let a = BitSet::from_bit_vec(BitVec::from_bytes(&[0b01101000]));
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec)]
+ /// #![feature(bitset, bitvec)]
+ ///
/// use std::collections::{BitVec, BitSet};
///
/// let a = BitSet::from_bit_vec(BitVec::from_bytes(&[0b01101000]));
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec)]
+ /// #![feature(bitset, bitvec)]
+ ///
/// use std::collections::{BitSet, BitVec};
///
/// let a = BitSet::from_bit_vec(BitVec::from_bytes(&[0b01101000]));
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec)]
+ /// #![feature(bitset, bitvec)]
+ ///
/// use std::collections::{BitSet, BitVec};
///
/// let a = BitSet::from_bit_vec(BitVec::from_bytes(&[0b01101000]));
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec)]
+ /// #![feature(bitset, bitvec)]
+ ///
/// use std::collections::{BitSet, BitVec};
///
/// let a = 0b01101000;
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec)]
+ /// #![feature(bitset, bitvec)]
+ ///
/// use std::collections::{BitSet, BitVec};
///
/// let a = 0b01101000;
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec)]
+ /// #![feature(bitset, bitvec)]
+ ///
/// use std::collections::{BitSet, BitVec};
///
/// let a = 0b01101000;
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec)]
+ /// #![feature(bitset, bitvec)]
+ ///
/// use std::collections::{BitSet, BitVec};
///
/// let a = 0b01101000;
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec, append)]
+ /// #![feature(bitset, bitvec, append)]
+ ///
/// use std::collections::{BitVec, BitSet};
///
/// let mut a = BitSet::new();
/// # Examples
///
/// ```
- /// # #![feature(bitset, bitvec, split_off)]
+ /// #![feature(bitset, bitvec, split_off)]
+ ///
/// use std::collections::{BitSet, BitVec};
/// let mut a = BitSet::new();
/// a.insert(2);
impl<'a, B: ?Sized> Cow<'a, B> where B: ToOwned {
/// Acquires a mutable reference to the owned form of the data.
///
- /// Copies the data if it is not already owned.
+ /// Clones the data if it is not already owned.
///
/// # Examples
///
/// Extracts the owned data.
///
- /// Copies the data if it is not already owned.
+ /// Clones the data if it is not already owned.
///
/// # Examples
///
/// # Examples
///
/// ```
- /// # #![feature(btree_range, collections_bound)]
+ /// #![feature(btree_range, collections_bound)]
+ ///
/// use std::collections::BTreeMap;
/// use std::collections::Bound::{Included, Unbounded};
///
/// # Examples
///
/// ```
- /// # #![feature(btree_range, collections_bound)]
+ /// #![feature(btree_range, collections_bound)]
+ ///
/// use std::collections::BTreeMap;
/// use std::collections::Bound::{Included, Excluded};
///
/// # Examples
///
/// ```
- /// # #![feature(btree_range, collections_bound)]
+ /// #![feature(btree_range, collections_bound)]
+ ///
/// use std::collections::BTreeSet;
/// use std::collections::Bound::{Included, Unbounded};
///
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
-//! It is illegal to put positional parameters (those without names) after
-//! arguments which have names. Like with positional parameters, it is illegal
-//! to provide named parameters that are unused by the format string.
+//! It is not valid to put positional parameters (those without names) after
+//! arguments which have names. Like with positional parameters, it is not
+//! valid to provide named parameters that are unused by the format string.
//!
//! ## Argument types
//!
//! hexadecimal as well as an
//! octal.
//!
-//! There are various parameters which do require a particular type, however. Namely, the `{:.*}`
-//! syntax, which sets the number of numbers after the decimal in floating-point types:
+//! There are various parameters which do require a particular type, however.
+//! Namely, the `{:.*}` syntax, which sets the number of numbers after the
+//! decimal in floating-point types:
//!
//! ```
//! let formatted_number = format!("{:.*}", 2, 1.234567);
//! assert_eq!("1.23", formatted_number)
//! ```
//!
-//! If this syntax is used, then the number of characters to print precedes the actual object being
-//! formatted, and the number of characters must have the type `usize`. Although a `usize` can be
-//! printed with `{}`, it is illegal to reference an argument as such. For example this is another
-//! invalid format string:
+//! If this syntax is used, then the number of characters to print precedes the
+//! actual object being formatted, and the number of characters must have the
+//! type `usize`. Although a `usize` can be printed with `{}`, it is invalid to
+//! reference an argument as such. For example this is another invalid format
+//! string:
//!
//! ```text
//! {:.*} {0}
//! This allows multiple actual types to be formatted via `{:x}` (like `i8` as
//! well as `isize`). The current mapping of types to traits is:
//!
-//! * *nothing* ⇒ `Display`
-//! * `?` ⇒ `Debug`
-//! * `o` ⇒ `Octal`
-//! * `x` ⇒ `LowerHex`
-//! * `X` ⇒ `UpperHex`
-//! * `p` ⇒ `Pointer`
-//! * `b` ⇒ `Binary`
-//! * `e` ⇒ `LowerExp`
-//! * `E` ⇒ `UpperExp`
+//! * *nothing* ⇒ [`Display`](trait.Display.html)
+//! * `?` ⇒ [`Debug`](trait.Debug.html)
+//! * `o` ⇒ [`Octal`](trait.Octal.html)
+//! * `x` ⇒ [`LowerHex`](trait.LowerHex.html)
+//! * `X` ⇒ [`UpperHex`](trait.UpperHex.html)
+//! * `p` ⇒ [`Pointer`](trait.Pointer.html)
+//! * `b` ⇒ [`Binary`](trait.Binary.html)
+//! * `e` ⇒ [`LowerExp`](trait.LowerExp.html)
+//! * `E` ⇒ [`UpperExp`](trait.UpperExp.html)
//!
//! What this means is that any type of argument which implements the
//! `fmt::Binary` trait can then be formatted with `{:b}`. Implementations
//! like:
//!
//! ```
-//! # #![feature(fmt_flags)]
+//! #![feature(fmt_flags)]
//! use std::fmt;
//!
//! #[derive(Debug)]
//! should always be printed.
//! * '-' - Currently not used
//! * '#' - This flag is indicates that the "alternate" form of printing should
-//! be used. For array slices, the alternate form omits the brackets.
-//! For the integer formatting traits, the alternate forms are:
+//! be used. The alternate forms are:
+//! * `#?` - pretty-print the `Debug` formatting
//! * `#x` - precedes the argument with a "0x"
//! * `#X` - precedes the argument with a "0x"
-//! * `#t` - precedes the argument with a "0b"
+//! * `#b` - precedes the argument with a "0b"
//! * `#o` - precedes the argument with a "0o"
//! * '0' - This is used to indicate for integer formats that the padding should
//! both be done with a `0` character as well as be sign-aware. A format
//!
//! There are three possible ways to specify the desired `precision`:
//!
-//! There are three possible ways to specify the desired `precision`:
-//! 1. An integer `.N`,
-//! 2. an integer followed by dollar sign `.N$`, or
-//! 3. an asterisk `.*`.
+//! 1. An integer `.N`:
+//!
+//! the integer `N` itself is the precision.
+//!
+//! 2. An integer followed by dollar sign `.N$`:
//!
-//! The first specification, `.N`, means the integer `N` itself is the precision.
+//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
-//! The second, `.N$`, means use format *argument* `N` (which must be a `usize`) as the precision.
+//! 3. An asterisk `.*`:
//!
-//! Finally, `.*` means that this `{...}` is associated with *two* format inputs rather than one:
-//! the first input holds the `usize` precision, and the second holds the value to print. Note
-//! that in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part
-//! refers to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
+//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
+//! first input holds the `usize` precision, and the second holds the value to print. Note that
+//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
+//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, these:
//!
#![feature(alloc)]
#![feature(box_patterns)]
-#![feature(box_raw)]
#![feature(box_syntax)]
#![feature(core)]
#![feature(core_intrinsics)]
#[cfg(test)] extern crate test;
pub use binary_heap::BinaryHeap;
+#[allow(deprecated)]
pub use bit_vec::BitVec;
+#[allow(deprecated)]
pub use bit_set::BitSet;
pub use btree_map::BTreeMap;
pub use btree_set::BTreeSet;
pub use vec_deque::VecDeque;
pub use string::String;
pub use vec::Vec;
+#[allow(deprecated)]
pub use vec_map::VecMap;
// Needed for the vec! macro
pub mod string;
pub mod vec;
pub mod vec_deque;
+#[allow(deprecated)]
pub mod vec_map;
#[unstable(feature = "bitvec", reason = "RFC 509")]
pub mod bit_vec {
+ #![allow(deprecated)]
pub use bit::{BitVec, Iter};
}
#[unstable(feature = "bitset", reason = "RFC 509")]
pub mod bit_set {
+ #![allow(deprecated)]
pub use bit::{BitSet, Union, Intersection, Difference, SymmetricDifference};
pub use bit::SetIter as Iter;
}
pub use btree::set::*;
}
-
-// FIXME(#14344) this shouldn't be necessary
-#[doc(hidden)]
-#[unstable(feature = "issue_14344_fixme")]
-pub fn fixme_14344_be_sure_to_link_to_collections() {}
-
#[cfg(not(test))]
mod std {
pub use core::ops; // RangeFull
/// # Examples
///
/// ```
- /// # #![feature(linked_list_extras)]
+ /// #![feature(linked_list_extras)]
+ ///
/// use std::collections::LinkedList;
///
/// let mut list: LinkedList<_> = vec![1, 3, 4].into_iter().collect();
/// # Examples
///
/// ```
- /// # #![feature(linked_list_extras)]
+ /// #![feature(linked_list_extras)]
+ ///
/// use std::collections::LinkedList;
///
/// let mut list: LinkedList<_> = vec![1, 2, 3].into_iter().collect();
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Utilities for slice manipulation
+//! A dynamically-sized view into a contiguous sequence, `[T]`.
//!
-//! The `slice` module contains useful code to help work with slice values.
//! Slices are a view into a block of memory represented as a pointer and a
//! length.
//!
//! iterators.
//! * Further methods that return iterators are `.split()`, `.splitn()`,
//! `.chunks()`, `.windows()` and more.
-#![doc(primitive = "slice")]
+//!
+//! *[See also the slice primitive type](../primitive.slice.html).*
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
/// Returns all but the first element of a slice.
#[unstable(feature = "slice_extras", reason = "likely to be renamed")]
+ #[deprecated(since = "1.3.0", reason = "superseded by split_first")]
#[inline]
pub fn tail(&self) -> &[T] {
core_slice::SliceExt::tail(self)
}
+ /// Returns the first and all the rest of the elements of a slice.
+ #[unstable(feature = "slice_splits", reason = "new API")]
+ #[inline]
+ pub fn split_first(&self) -> Option<(&T, &[T])> {
+ core_slice::SliceExt::split_first(self)
+ }
+
/// Returns all but the first element of a mutable slice
- #[unstable(feature = "slice_extras",
- reason = "likely to be renamed or removed")]
+ #[unstable(feature = "slice_extras", reason = "likely to be renamed or removed")]
+ #[deprecated(since = "1.3.0", reason = "superseded by split_first_mut")]
#[inline]
pub fn tail_mut(&mut self) -> &mut [T] {
core_slice::SliceExt::tail_mut(self)
}
+ /// Returns the first and all the rest of the elements of a slice.
+ #[unstable(feature = "slice_splits", reason = "new API")]
+ #[inline]
+ pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
+ core_slice::SliceExt::split_first_mut(self)
+ }
+
/// Returns all but the last element of a slice.
#[unstable(feature = "slice_extras", reason = "likely to be renamed")]
+ #[deprecated(since = "1.3.0", reason = "superseded by split_last")]
#[inline]
pub fn init(&self) -> &[T] {
core_slice::SliceExt::init(self)
}
+ /// Returns the last and all the rest of the elements of a slice.
+ #[unstable(feature = "slice_splits", reason = "new API")]
+ #[inline]
+ pub fn split_last(&self) -> Option<(&T, &[T])> {
+ core_slice::SliceExt::split_last(self)
+
+ }
+
/// Returns all but the last element of a mutable slice
- #[unstable(feature = "slice_extras",
- reason = "likely to be renamed or removed")]
+ #[unstable(feature = "slice_extras", reason = "likely to be renamed or removed")]
+ #[deprecated(since = "1.3.0", reason = "superseded by split_last_mut")]
#[inline]
pub fn init_mut(&mut self) -> &mut [T] {
core_slice::SliceExt::init_mut(self)
}
+ /// Returns the last and all the rest of the elements of a slice.
+ #[unstable(feature = "slice_splits", since = "1.3.0")]
+ #[inline]
+ pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
+ core_slice::SliceExt::split_last_mut(self)
+ }
+
/// Returns the last element of a slice, or `None` if it is empty.
///
/// # Examples
/// Find the first index containing a matching value.
#[unstable(feature = "slice_position_elem")]
+ #[deprecated(since = "1.3.0",
+ reason = "less idiomatic than .iter().position()")]
pub fn position_elem(&self, t: &T) -> Option<usize> where T: PartialEq {
core_slice::SliceExt::position_elem(self, t)
}
/// Find the last index containing a matching value.
#[unstable(feature = "slice_position_elem")]
+ #[deprecated(since = "1.3.0",
+ reason = "less idiomatic than .iter().rev().position()")]
pub fn rposition_elem(&self, t: &T) -> Option<usize> where T: PartialEq {
core_slice::SliceExt::rposition_elem(self, t)
}
/// # Examples
///
/// ```rust
- /// # #![feature(permutations)]
+ /// #![feature(permutations)]
+ ///
/// let v = [1, 2, 3];
/// let mut perms = v.permutations();
///
/// Iterating through permutations one by one.
///
/// ```rust
- /// # #![feature(permutations)]
+ /// #![feature(permutations)]
+ ///
/// let v = [1, 2, 3];
/// let mut perms = v.permutations();
///
/// # Example
///
/// ```rust
- /// # #![feature(permutations)]
+ /// #![feature(permutations)]
+ ///
/// let v: &mut [_] = &mut [0, 1, 2];
/// v.next_permutation();
/// let b: &mut [_] = &mut [0, 2, 1];
/// # Example
///
/// ```rust
- /// # #![feature(permutations)]
+ /// #![feature(permutations)]
+ ///
/// let v: &mut [_] = &mut [1, 0, 2];
/// v.prev_permutation();
/// let b: &mut [_] = &mut [0, 2, 1];
/// # Example
///
/// ```rust
- /// # #![feature(clone_from_slice)]
+ /// #![feature(clone_from_slice)]
+ ///
/// let mut dst = [0, 0, 0];
/// let src = [1, 2];
///
/// # Examples
///
/// ```rust
- /// # #![feature(move_from)]
+ /// #![feature(move_from)]
+ ///
/// let mut a = [1, 2, 3, 4, 5];
/// let b = vec![6, 7, 8];
/// let num_moved = a.move_from(b, 0, 3);
/// ```
#[unstable(feature = "move_from",
reason = "uncertain about this API approach")]
+ #[deprecated(since = "1.3.0",
+ reason = "unclear that it must belong in the standard library")]
#[inline]
pub fn move_from(&mut self, mut src: Vec<T>, start: usize, end: usize) -> usize {
for (a, b) in self.iter_mut().zip(&mut src[start .. end]) {
#[stable(feature = "rust1", since = "1.0.0")]
fn concat(&self) -> Self::Output;
+ /// Flattens a slice of `T` into a single value `Self::Output`, placing a
+ /// given separator between each.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(["hello", "world"].join(" "), "hello world");
+ /// ```
+ #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
+ fn join(&self, sep: &T) -> Self::Output;
+
/// Flattens a slice of `T` into a single value `Self::Output`, placing a
/// given separator between each.
///
/// assert_eq!(["hello", "world"].connect(" "), "hello world");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.3.0", reason = "renamed to join")]
fn connect(&self, sep: &T) -> Self::Output;
}
result
}
- fn connect(&self, sep: &T) -> Vec<T> {
+ fn join(&self, sep: &T) -> Vec<T> {
let size = self.iter().fold(0, |acc, v| acc + v.borrow().len());
let mut result = Vec::with_capacity(size + self.len());
let mut first = true;
}
result
}
+
+ fn connect(&self, sep: &T) -> Vec<T> {
+ self.join(sep)
+ }
}
/// An iterator that yields the element swaps needed to produce
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Unicode string manipulation (the `str` type).
+//! Unicode string slices
//!
-//! Rust's `str` type is one of the core primitive types of the language. `&str`
-//! is the borrowed string type. This type of string can only be created from
-//! other strings, unless it is a `&'static str` (see below). It is not possible
-//! to move out of borrowed strings because they are owned elsewhere.
-//!
-//! # Examples
-//!
-//! Here's some code that uses a `&str`:
-//!
-//! ```
-//! let s = "Hello, world.";
-//! ```
-//!
-//! This `&str` is a `&'static str`, which is the type of string literals.
-//! They're `'static` because literals are available for the entire lifetime of
-//! the program.
-//!
-//! You can get a non-`'static` `&str` by taking a slice of a `String`:
-//!
-//! ```
-//! let some_string = "Hello, world.".to_string();
-//! let s = &some_string;
-//! ```
-//!
-//! # Representation
-//!
-//! Rust's string type, `str`, is a sequence of Unicode scalar values encoded as
-//! a stream of UTF-8 bytes. All [strings](../../reference.html#literals) are
-//! guaranteed to be validly encoded UTF-8 sequences. Additionally, strings are
-//! not null-terminated and can thus contain null bytes.
-//!
-//! The actual representation of `str`s have direct mappings to slices: `&str`
-//! is the same as `&[u8]`.
+//! *[See also the `str` primitive type](../primitive.str.html).*
+
-#![doc(primitive = "str")]
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
use core::str as core_str;
use core::str::pattern::Pattern;
use core::str::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher};
+use core::mem;
use rustc_unicode::str::{UnicodeStr, Utf16Encoder};
use vec_deque::VecDeque;
use rustc_unicode;
use vec::Vec;
use slice::SliceConcatExt;
+use boxed::Box;
pub use core::str::{FromStr, Utf8Error};
pub use core::str::{Lines, LinesAny, CharRange};
pub use rustc_unicode::str::{SplitWhitespace, Words, Graphemes, GraphemeIndices};
pub use core::str::pattern;
-/*
-Section: Creating a string
-*/
-
impl<S: Borrow<str>> SliceConcatExt<str> for [S] {
type Output = String;
result
}
- fn connect(&self, sep: &str) -> String {
+ fn join(&self, sep: &str) -> String {
if self.is_empty() {
return String::new();
}
}
result
}
-}
-/*
-Section: Iterators
-*/
+ fn connect(&self, sep: &str) -> String {
+ self.join(sep)
+ }
+}
// Helper functions used for Unicode normalization
fn canonical_sort(comb: &mut [(char, u8)]) {
fn size_hint(&self) -> (usize, Option<usize>) { self.encoder.size_hint() }
}
-/*
-Section: Misc
-*/
-
// Return the initial codepoint accumulator for the first byte.
// The first byte is special, only want bottom 5 bits for width 2, 4 bits
// for width 3, and 3 bits for width 4
}
}
-/*
-Section: CowString
-*/
-
-/*
-Section: Trait implementations
-*/
-
-
/// Any string that can be represented as a slice.
#[lang = "str"]
#[cfg(not(test))]
since = "1.0.0")]
#[unstable(feature = "unicode",
reason = "this functionality may only be provided by libunicode")]
+ #[inline]
pub fn width(&self, is_cjk: bool) -> usize {
UnicodeStr::width(self, is_cjk)
}
/// considered to be
/// boundaries.
///
- /// # Panics
- ///
- /// Panics if `index` is greater than `self.len()`.
+ /// Returns `false` if `index` is greater than `self.len()`.
///
/// # Examples
///
/// ```
- /// # #![feature(str_char)]
+ /// #![feature(str_char)]
+ ///
/// let s = "Löwe 老虎 Léopard";
/// assert!(s.is_char_boundary(0));
/// // start of `老`
with the existence of the char_indices iterator or \
this method may want to be replaced with checked \
slicing")]
+ #[inline]
pub fn is_char_boundary(&self, index: usize) -> bool {
core_str::StrExt::is_char_boundary(self, index)
}
///
/// # Unsafety
///
- /// Caller must check both UTF-8 character boundaries and the boundaries
+ /// Caller must check both UTF-8 sequence boundaries and the boundaries
/// of the entire slice as
/// well.
///
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
pub unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str {
core_str::StrExt::slice_unchecked(self, begin, end)
}
- /// Returns a slice of the string from the character range [`begin`..`end`).
+ /// Takes a bytewise mutable slice from a string.
+ ///
+ /// Same as `slice_unchecked`, but works with `&mut str` instead of `&str`.
+ #[unstable(feature = "str_slice_mut", reason = "recently added")]
+ #[inline]
+ pub unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str {
+ core_str::StrExt::slice_mut_unchecked(self, begin, end)
+ }
+
+ /// Returns a slice of the string from the range [`begin`..`end`) where indices
+ /// are counted in code points.
///
/// That is, start at the `begin`-th code point of the string and continue
/// to the `end`-th code point. This does not detect or handle edge cases
- /// such as leaving a combining character as the first code point of the
+ /// such as leaving a combining character as the first `char` of the
/// string.
///
/// Due to the design of UTF-8, this operation is `O(end)`. Use slicing
- /// syntax if you want to use byte indices rather than codepoint indices.
+ /// syntax if you want to use `O(1)` byte indices instead.
///
/// # Panics
///
/// # Examples
///
/// ```
- /// # #![feature(slice_chars)]
+ /// #![feature(slice_chars)]
+ ///
/// let s = "Löwe 老虎 Léopard";
///
/// assert_eq!(s.slice_chars(0, 4), "Löwe");
/// ```
#[unstable(feature = "slice_chars",
reason = "may have yet to prove its worth")]
+ #[deprecated(since = "1.3.0",
+ reason = "can be implemented with char_indices and \
+ hasn't seen enough use to justify inclusion")]
+ #[inline]
pub fn slice_chars(&self, begin: usize, end: usize) -> &str {
core_str::StrExt::slice_chars(self, begin, end)
}
- /// Given a byte position, return the next char and its index.
+ /// Given a byte position, return the next code point and its index.
///
- /// This can be used to iterate over the Unicode characters of a string.
+ /// This can be used to iterate over the Unicode code points of a string.
///
/// # Panics
///
/// If `i` is greater than or equal to the length of the string.
- /// If `i` is not the index of the beginning of a valid UTF-8 character.
+ /// If `i` is not the index of the beginning of a valid UTF-8 sequence.
///
/// # Examples
///
- /// This example manually iterates through the characters of a string;
+ /// This example manually iterates through the code points of a string;
/// this should normally be
/// done by `.chars()` or `.char_indices()`.
///
/// ```
- /// # #![feature(str_char, core)]
+ /// #![feature(str_char, core)]
+ ///
/// use std::str::CharRange;
///
- /// let s = "中华Việt Nam";
+ /// let s = "中华Việt Nam";
/// let mut i = 0;
/// while i < s.len() {
/// let CharRange {ch, next} = s.char_range_at(i);
/// 3: 华
/// 6: V
/// 7: i
- /// 8: ệ
- /// 11: t
- /// 12:
- /// 13: N
- /// 14: a
- /// 15: m
+ /// 8: e
+ /// 9: ̣
+ /// 11: ̂
+ /// 13: t
+ /// 14:
+ /// 15: N
+ /// 16: a
+ /// 17: m
/// ```
#[unstable(feature = "str_char",
reason = "often replaced by char_indices, this method may \
be removed in favor of just char_at() or eventually \
removed altogether")]
+ #[inline]
pub fn char_range_at(&self, start: usize) -> CharRange {
core_str::StrExt::char_range_at(self, start)
}
/// Given a byte position, return the previous `char` and its position.
///
- /// This function can be used to iterate over a Unicode string in reverse.
+ /// This function can be used to iterate over a Unicode code points in reverse.
+ ///
+ /// Note that Unicode has many features, such as combining marks, ligatures,
+ /// and direction marks, that need to be taken into account to correctly reverse a string.
///
/// Returns 0 for next index if called on start index 0.
///
/// # Panics
///
/// If `i` is greater than the length of the string.
- /// If `i` is not an index following a valid UTF-8 character.
+ /// If `i` is not an index following a valid UTF-8 sequence.
///
/// # Examples
///
- /// This example manually iterates through the characters of a string;
+ /// This example manually iterates through the code points of a string;
/// this should normally be
/// done by `.chars().rev()` or `.char_indices()`.
///
/// ```
- /// # #![feature(str_char, core)]
+ /// #![feature(str_char, core)]
+ ///
/// use std::str::CharRange;
///
- /// let s = "中华Việt Nam";
+ /// let s = "中华Việt Nam";
/// let mut i = s.len();
/// while i > 0 {
/// let CharRange {ch, next} = s.char_range_at_reverse(i);
/// This outputs:
///
/// ```text
- /// 16: m
- /// 15: a
- /// 14: N
- /// 13:
- /// 12: t
- /// 11: ệ
+ /// 18: m
+ /// 17: a
+ /// 16: N
+ /// 15:
+ /// 14: t
+ /// 13: ̂
+ /// 11: ̣
+ /// 9: e
/// 8: i
/// 7: V
/// 6: 华
reason = "often replaced by char_indices, this method may \
be removed in favor of just char_at_reverse() or \
eventually removed altogether")]
+ #[inline]
pub fn char_range_at_reverse(&self, start: usize) -> CharRange {
core_str::StrExt::char_range_at_reverse(self, start)
}
/// # Panics
///
/// If `i` is greater than or equal to the length of the string.
- /// If `i` is not the index of the beginning of a valid UTF-8 character.
+ /// If `i` is not the index of the beginning of a valid UTF-8 sequence.
///
/// # Examples
///
/// ```
- /// # #![feature(str_char)]
+ /// #![feature(str_char)]
+ ///
/// let s = "abπc";
/// assert_eq!(s.char_at(1), 'b');
/// assert_eq!(s.char_at(2), 'π');
+ /// assert_eq!(s.char_at(4), 'c');
/// ```
#[unstable(feature = "str_char",
reason = "frequently replaced by the chars() iterator, this \
future; it is normally replaced by chars/char_indices \
iterators or by getting the first char from a \
subslice")]
+ #[inline]
pub fn char_at(&self, i: usize) -> char {
core_str::StrExt::char_at(self, i)
}
/// # Panics
///
/// If `i` is greater than the length of the string.
- /// If `i` is not an index following a valid UTF-8 character.
+ /// If `i` is not an index following a valid UTF-8 sequence.
///
/// # Examples
///
/// ```
- /// # #![feature(str_char)]
+ /// #![feature(str_char)]
+ ///
/// let s = "abπc";
/// assert_eq!(s.char_at_reverse(1), 'a');
/// assert_eq!(s.char_at_reverse(2), 'b');
+ /// assert_eq!(s.char_at_reverse(3), 'π');
/// ```
#[unstable(feature = "str_char",
reason = "see char_at for more details, but reverse semantics \
are also somewhat unclear, especially with which \
cases generate panics")]
+ #[inline]
pub fn char_at_reverse(&self, i: usize) -> char {
core_str::StrExt::char_at_reverse(self, i)
}
- /// Retrieves the first character from a `&str` and returns it.
+ /// Retrieves the first code point from a `&str` and returns it.
+ ///
+ /// Note that a single Unicode character (grapheme cluster)
+ /// can be composed of multiple `char`s.
///
/// This does not allocate a new string; instead, it returns a slice that
- /// points one character
- /// beyond the character that was shifted.
+ /// points one code point beyond the code point that was shifted.
///
- /// If the slice does not contain any characters, None is returned instead.
+ /// `None` is returned if the slice is empty.
///
/// # Examples
///
/// ```
- /// # #![feature(str_char)]
- /// let s = "Löwe 老虎 Léopard";
+ /// #![feature(str_char)]
+ ///
+ /// let s = "Łódź"; // \u{141}o\u{301}dz\u{301}
/// let (c, s1) = s.slice_shift_char().unwrap();
///
- /// assert_eq!(c, 'L');
- /// assert_eq!(s1, "öwe 老虎 Léopard");
+ /// assert_eq!(c, 'Ł');
+ /// assert_eq!(s1, "ódź");
///
/// let (c, s2) = s1.slice_shift_char().unwrap();
///
- /// assert_eq!(c, 'ö');
- /// assert_eq!(s2, "we 老虎 Léopard");
+ /// assert_eq!(c, 'o');
+ /// assert_eq!(s2, "\u{301}dz\u{301}");
/// ```
#[unstable(feature = "str_char",
reason = "awaiting conventions about shifting and slices and \
may not be warranted with the existence of the chars \
and/or char_indices iterators")]
+ #[inline]
pub fn slice_shift_char(&self) -> Option<(char, &str)> {
core_str::StrExt::slice_shift_char(self)
}
/// Divide one string slice into two at an index.
///
/// The index `mid` is a byte offset from the start of the string
- /// that must be on a character boundary.
+ /// that must be on a `char` boundary.
///
/// Return slices `&self[..mid]` and `&self[mid..]`.
///
/// # Panics
///
- /// Panics if `mid` is beyond the last character of the string,
- /// or if it is not on a character boundary.
+ /// Panics if `mid` is beyond the last code point of the string,
+ /// or if it is not on a `char` boundary.
///
/// # Examples
/// ```
- /// # #![feature(collections)]
+ /// #![feature(str_split_at)]
+ ///
/// let s = "Löwe 老虎 Léopard";
/// let first_space = s.find(' ').unwrap_or(s.len());
/// let (a, b) = s.split_at(first_space);
/// assert_eq!(b, " 老虎 Léopard");
/// ```
#[inline]
+ #[unstable(feature = "str_split_at", reason = "recently added")]
pub fn split_at(&self, mid: usize) -> (&str, &str) {
core_str::StrExt::split_at(self, mid)
}
- /// An iterator over the codepoints of `self`.
+ /// Divide one mutable string slice into two at an index.
+ #[inline]
+ #[unstable(feature = "str_split_at", reason = "recently added")]
+ pub fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str) {
+ core_str::StrExt::split_at_mut(self, mid)
+ }
+
+ /// An iterator over the code points of `self`.
+ ///
+ /// In Unicode relationship between code points and characters is complex.
+ /// A single character may be composed of multiple code points
+ /// (e.g. diacritical marks added to a letter), and a single code point
+ /// (e.g. Hangul syllable) may contain multiple characters.
+ ///
+ /// For iteration over human-readable characters a grapheme cluster iterator
+ /// may be more appropriate. See the [unicode-segmentation crate][1].
+ ///
+ /// [1]: https://crates.io/crates/unicode-segmentation
///
/// # Examples
///
/// ```
- /// let v: Vec<char> = "abc åäö".chars().collect();
+ /// let v: Vec<char> = "ASCII żółć 🇨🇭 한".chars().collect();
///
- /// assert_eq!(v, ['a', 'b', 'c', ' ', 'å', 'ä', 'ö']);
+ /// assert_eq!(v, ['A', 'S', 'C', 'I', 'I', ' ',
+ /// 'z', '\u{307}', 'o', '\u{301}', 'ł', 'c', '\u{301}', ' ',
+ /// '\u{1f1e8}', '\u{1f1ed}', ' ', '한']);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
pub fn chars(&self) -> Chars {
core_str::StrExt::chars(self)
}
- /// An iterator over the characters of `self` and their byte offsets.
+ /// An iterator over the `char`s of `self` and their byte offsets.
///
/// # Examples
///
/// ```
- /// let v: Vec<(usize, char)> = "abc".char_indices().collect();
- /// let b = vec![(0, 'a'), (1, 'b'), (2, 'c')];
+ /// let v: Vec<(usize, char)> = "A🇨🇭".char_indices().collect();
+ /// let b = vec![(0, 'A'), (1, '\u{1f1e8}'), (5, '\u{1f1ed}')];
///
/// assert_eq!(v, b);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
pub fn char_indices(&self) -> CharIndices {
core_str::StrExt::char_indices(self)
}
/// assert_eq!(v, b"bors".to_vec());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
pub fn bytes(&self) -> Bytes {
core_str::StrExt::bytes(self)
}
/// # Examples
///
/// ```
- /// let some_words = " Mary had\ta little \n\t lamb";
+ /// let some_words = " Mary had\ta\u{2009}little \n\t lamb";
/// let v: Vec<&str> = some_words.split_whitespace().collect();
///
/// assert_eq!(v, ["Mary", "had", "a", "little", "lamb"]);
/// ```
#[stable(feature = "split_whitespace", since = "1.1.0")]
+ #[inline]
pub fn split_whitespace(&self) -> SplitWhitespace {
UnicodeStr::split_whitespace(self)
}
/// # Examples
///
/// ```
- /// # #![feature(str_words)]
- /// # #![allow(deprecated)]
- /// let some_words = " Mary had\ta little \n\t lamb";
+ /// #![feature(str_words)]
+ /// #![allow(deprecated)]
+ ///
+ /// let some_words = " Mary had\ta\u{2009}little \n\t lamb";
/// let v: Vec<&str> = some_words.words().collect();
///
/// assert_eq!(v, ["Mary", "had", "a", "little", "lamb"]);
#[unstable(feature = "str_words",
reason = "the precise algorithm to use is unclear")]
#[allow(deprecated)]
+ #[inline]
pub fn words(&self) -> Words {
UnicodeStr::words(self)
}
/// assert_eq!(v, ["foo", "bar", "", "baz"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
pub fn lines(&self) -> Lines {
core_str::StrExt::lines(self)
}
/// assert_eq!(v, ["foo", "bar", "", "baz"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
pub fn lines_any(&self) -> LinesAny {
core_str::StrExt::lines_any(self)
}
/// # Examples
///
/// ```
- /// # #![feature(unicode, core)]
+ /// #![feature(unicode, core)]
+ ///
/// let gr1 = "a\u{310}e\u{301}o\u{308}\u{332}".graphemes(true).collect::<Vec<&str>>();
/// let b: &[_] = &["a\u{310}", "e\u{301}", "o\u{308}\u{332}"];
///
/// # Examples
///
/// ```
- /// # #![feature(unicode, core)]
+ /// #![feature(unicode, core)]
+ ///
/// let gr_inds = "a̐éö̲\r\n".grapheme_indices(true).collect::<Vec<(usize, &str)>>();
/// let b: &[_] = &[(0, "a̐"), (3, "é"), (6, "ö̲"), (11, "\r\n")];
///
/// # Examples
///
/// ```
- /// # #![feature(str_match_indices)]
+ /// #![feature(str_match_indices)]
+ ///
/// let v: Vec<(usize, usize)> = "abcXXXabcYYYabc".match_indices("abc").collect();
/// assert_eq!(v, [(0, 3), (6, 9), (12, 15)]);
///
/// # Examples
///
/// ```
- /// # #![feature(str_match_indices)]
+ /// #![feature(str_match_indices)]
+ ///
/// let v: Vec<(usize, usize)> = "abcXXXabcYYYabc".rmatch_indices("abc").collect();
/// assert_eq!(v, [(12, 15), (6, 9), (0, 3)]);
///
/// # Examples
///
/// ```
- /// # #![feature(subslice_offset)]
+ /// #![feature(subslice_offset)]
+ ///
/// let string = "a\nb\nc";
/// let lines: Vec<&str> = string.lines().collect();
///
/// ```
#[unstable(feature = "subslice_offset",
reason = "awaiting convention about comparability of arbitrary slices")]
+ #[deprecated(since = "1.3.0",
+ reason = "replaced with other pattern-related methods")]
pub fn subslice_offset(&self, inner: &str) -> usize {
core_str::StrExt::subslice_offset(self, inner)
}
/// # Examples
///
/// ```
- /// #![feature(str_casing)]
- ///
/// let s = "HELLO";
/// assert_eq!(s.to_lowercase(), "hello");
/// ```
/// # Examples
///
/// ```
- /// #![feature(str_casing)]
- ///
/// let s = "hello";
/// assert_eq!(s.to_uppercase(), "HELLO");
/// ```
pub fn escape_unicode(&self) -> String {
self.chars().flat_map(|c| c.escape_unicode()).collect()
}
+
+ /// Converts the `Box<str>` into a `String` without copying or allocating.
+ #[unstable(feature = "box_str",
+ reason = "recently added, matches RFC")]
+ pub fn into_string(self: Box<str>) -> String {
+ unsafe {
+ let slice = mem::transmute::<Box<str>, Box<[u8]>>(self);
+ String::from_utf8_unchecked(slice.into_vec())
+ }
+ }
}
use borrow::{Cow, IntoCow};
use range::RangeArgument;
use str::{self, FromStr, Utf8Error, Chars};
-use vec::{DerefVec, Vec, as_vec};
+use vec::Vec;
+use boxed::Box;
/// A growable string stored as a UTF-8 encoded buffer.
#[derive(Clone, PartialOrd, Eq, Ord)]
/// # Examples
///
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// let s = String::from("hello");
/// assert_eq!(&s[..], "hello");
/// ```
/// Creates a new `String` from a length, capacity, and pointer.
///
- /// This is unsafe because:
+ /// # Unsafety
+ ///
+ /// This is _very_ unsafe because:
///
- /// * We call `Vec::from_raw_parts` to get a `Vec<u8>`;
+ /// * We call `Vec::from_raw_parts` to get a `Vec<u8>`. Therefore, this
+ /// function inherits all of its unsafety, see [its
+ /// documentation](../vec/struct.Vec.html#method.from_raw_parts)
+ /// for the invariants it expects, they also apply to this function.
/// * We assume that the `Vec` contains valid UTF-8.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// # Examples
///
/// ```
- /// # #![feature(drain)]
+ /// #![feature(drain)]
///
/// let mut s = String::from("α is alpha, β is beta");
/// let beta_offset = s.find('β').unwrap_or(s.len());
string: self_ptr,
}
}
+
+ /// Converts the string into `Box<str>`.
+ ///
+ /// Note that this will drop any excess capacity.
+ #[unstable(feature = "box_str",
+ reason = "recently added, matches RFC")]
+ pub fn into_boxed_slice(self) -> Box<str> {
+ let slice = self.vec.into_boxed_slice();
+ unsafe { mem::transmute::<Box<[u8]>, Box<str>>(slice) }
+ }
}
impl FromUtf8Error {
}
}
+#[stable(feature = "derefmut_for_string", since = "1.2.0")]
+impl ops::IndexMut<ops::Range<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::Range<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.2.0")]
+impl ops::IndexMut<ops::RangeTo<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.2.0")]
+impl ops::IndexMut<ops::RangeFrom<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.2.0")]
+impl ops::IndexMut<ops::RangeFull> for String {
+ #[inline]
+ fn index_mut(&mut self, _index: ops::RangeFull) -> &mut str {
+ unsafe { mem::transmute(&mut *self.vec) }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Deref for String {
type Target = str;
}
}
-/// Wrapper type providing a `&String` reference via `Deref`.
-#[unstable(feature = "collections")]
-#[deprecated(since = "1.2.0",
- reason = "replaced with deref coercions or Borrow")]
-#[allow(deprecated)]
-pub struct DerefString<'a> {
- x: DerefVec<'a, u8>
-}
-
-#[allow(deprecated)]
-impl<'a> Deref for DerefString<'a> {
- type Target = String;
-
+#[stable(feature = "derefmut_for_string", since = "1.2.0")]
+impl ops::DerefMut for String {
#[inline]
- fn deref<'b>(&'b self) -> &'b String {
- unsafe { mem::transmute(&*self.x) }
+ fn deref_mut(&mut self) -> &mut str {
+ unsafe { mem::transmute(&mut self.vec[..]) }
}
}
-/// Converts a string slice to a wrapper type providing a `&String` reference.
-///
-/// # Examples
-///
-/// ```
-/// # #![feature(collections)]
-/// use std::string::as_string;
-///
-/// // Let's pretend we have a function that requires `&String`
-/// fn string_consumer(s: &String) {
-/// assert_eq!(s, "foo");
-/// }
-///
-/// // Provide a `&String` from a `&str` without allocating
-/// string_consumer(&as_string("foo"));
-/// ```
-#[unstable(feature = "collections")]
-#[deprecated(since = "1.2.0",
- reason = "replaced with deref coercions or Borrow")]
-#[allow(deprecated)]
-pub fn as_string<'a>(x: &'a str) -> DerefString<'a> {
- DerefString { x: as_vec(x.as_bytes()) }
-}
-
/// Error returned from `String::from`
#[unstable(feature = "str_parse_error", reason = "may want to be replaced with \
Void if it ever exists")]
#![stable(feature = "rust1", since = "1.0.0")]
use core::prelude::*;
-
+use alloc::raw_vec::RawVec;
use alloc::boxed::Box;
-use alloc::heap::{EMPTY, allocate, reallocate, deallocate};
-use core::cmp::max;
+use alloc::heap::EMPTY;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{self, Hash};
-use core::intrinsics::{arith_offset, assume};
-use core::iter::{repeat, FromIterator};
+use core::intrinsics::{arith_offset, assume, drop_in_place};
+use core::iter::FromIterator;
use core::marker::PhantomData;
use core::mem;
use core::ops::{Index, IndexMut, Deref};
use core::ops;
use core::ptr;
-use core::ptr::Unique;
use core::slice;
-use core::isize;
-use core::usize;
use borrow::{Cow, IntoCow};
use super::range::RangeArgument;
-// FIXME- fix places which assume the max vector allowed has memory usize::MAX.
-const MAX_MEMORY_SIZE: usize = isize::MAX as usize;
-
/// A growable list type, written `Vec<T>` but pronounced 'vector.'
///
/// # Examples
/// assert_eq!(vec, [1, 2, 3, 4]);
/// ```
///
+/// It can also initialize each element of a `Vec<T>` with a given value:
+///
+/// ```
+/// let vec = vec![0; 5];
+/// assert_eq!(vec, [0, 0, 0, 0, 0]);
+/// ```
+///
/// Use a `Vec<T>` as an efficient stack:
///
/// ```
#[unsafe_no_drop_flag]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Vec<T> {
- ptr: Unique<T>,
+ buf: RawVec<T>,
len: usize,
- cap: usize,
}
-unsafe impl<T: Send> Send for Vec<T> { }
-unsafe impl<T: Sync> Sync for Vec<T> { }
-
////////////////////////////////////////////////////////////////////////////////
// Inherent methods
////////////////////////////////////////////////////////////////////////////////
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> Vec<T> {
- // We want ptr to never be NULL so instead we set it to some arbitrary
- // non-null value which is fine since we never call deallocate on the ptr
- // if cap is 0. The reason for this is because the pointer of a slice
- // being NULL would break the null pointer optimization for enums.
- unsafe { Vec::from_raw_parts(EMPTY as *mut T, 0, 0) }
+ Vec { buf: RawVec::new(), len: 0 }
}
/// Constructs a new, empty `Vec<T>` with the specified capacity.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(capacity: usize) -> Vec<T> {
- if mem::size_of::<T>() == 0 {
- unsafe { Vec::from_raw_parts(EMPTY as *mut T, 0, usize::MAX) }
- } else if capacity == 0 {
- Vec::new()
- } else {
- let size = capacity.checked_mul(mem::size_of::<T>())
- .expect("capacity overflow");
- let ptr = unsafe { allocate(size, mem::align_of::<T>()) };
- if ptr.is_null() { ::alloc::oom() }
- unsafe { Vec::from_raw_parts(ptr as *mut T, 0, capacity) }
- }
+ Vec { buf: RawVec::with_capacity(capacity), len: 0 }
}
/// Creates a `Vec<T>` directly from the raw components of another vector.
///
- /// This is highly unsafe, due to the number of invariants that aren't checked.
+ /// # Unsafety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * `ptr` needs to have been previously allocated via `String`/`Vec<T>`
+ /// (at least, it's highly likely to be incorrect if it wasn't).
+ /// * `length` needs to be the length that less than or equal to `capacity`.
+ /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal datastructures.
///
/// # Examples
///
pub unsafe fn from_raw_parts(ptr: *mut T, length: usize,
capacity: usize) -> Vec<T> {
Vec {
- ptr: Unique::new(ptr),
+ buf: RawVec::from_raw_parts(ptr, capacity),
len: length,
- cap: capacity,
}
}
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
- self.cap
+ self.buf.cap()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
- if self.cap - self.len < additional {
- const ERR_MSG: &'static str = "Vec::reserve: `isize` overflow";
-
- let new_min_cap = self.len.checked_add(additional).expect(ERR_MSG);
- if new_min_cap > MAX_MEMORY_SIZE { panic!(ERR_MSG) }
- self.grow_capacity(match new_min_cap.checked_next_power_of_two() {
- Some(x) if x > MAX_MEMORY_SIZE => MAX_MEMORY_SIZE,
- None => MAX_MEMORY_SIZE,
- Some(x) => x,
- });
- }
+ self.buf.reserve(self.len, additional);
}
/// Reserves the minimum capacity for exactly `additional` more elements to
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
- if self.cap - self.len < additional {
- match self.len.checked_add(additional) {
- None => panic!("Vec::reserve: `usize` overflow"),
- Some(new_cap) => self.grow_capacity(new_cap)
- }
- }
+ self.buf.reserve_exact(self.len, additional);
}
/// Shrinks the capacity of the vector as much as possible.
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shrink_to_fit(&mut self) {
- if mem::size_of::<T>() == 0 { return }
-
- if self.len == 0 {
- if self.cap != 0 {
- unsafe {
- dealloc(*self.ptr, self.cap)
- }
- self.cap = 0;
- }
- } else if self.cap != self.len {
- unsafe {
- // Overflow check is unnecessary as the vector is already at
- // least this large.
- let ptr = reallocate(*self.ptr as *mut u8,
- self.cap * mem::size_of::<T>(),
- self.len * mem::size_of::<T>(),
- mem::align_of::<T>()) as *mut T;
- if ptr.is_null() { ::alloc::oom() }
- self.ptr = Unique::new(ptr);
- }
- self.cap = self.len;
- }
+ self.buf.shrink_to_fit(self.len);
}
/// Converts the vector into Box<[T]>.
/// `shrink_to_fit()`.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_boxed_slice(mut self) -> Box<[T]> {
- self.shrink_to_fit();
unsafe {
- let xs: Box<[T]> = Box::from_raw(&mut *self);
+ self.shrink_to_fit();
+ let buf = ptr::read(&self.buf);
mem::forget(self);
- xs
+ buf.into_box()
}
}
pub fn insert(&mut self, index: usize, element: T) {
let len = self.len();
assert!(index <= len);
+
// space for the new element
- self.reserve(1);
+ if len == self.buf.cap() { self.buf.double(); }
unsafe { // infallible
// The spot to put the new value
let p = self.as_mut_ptr().offset(index as isize);
// Shift everything over to make space. (Duplicating the
// `index`th element into two consecutive places.)
- ptr::copy(&*p, p.offset(1), len - index);
+ ptr::copy(p, p.offset(1), len - index);
// Write it in, overwriting the first copy of the `index`th
// element.
- ptr::write(&mut *p, element);
+ ptr::write(p, element);
}
self.set_len(len + 1);
}
ret = ptr::read(ptr);
// Shift everything down to fill in that spot.
- ptr::copy(&*ptr.offset(1), ptr, len - index - 1);
+ ptr::copy(ptr.offset(1), ptr, len - index - 1);
}
self.set_len(len - 1);
ret
/// # Examples
///
/// ```
- /// let mut vec = vec!(1, 2);
+ /// let mut vec = vec![1, 2];
/// vec.push(3);
/// assert_eq!(vec, [1, 2, 3]);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push(&mut self, value: T) {
- #[cold]
- #[inline(never)]
- fn resize<T>(vec: &mut Vec<T>) {
- let old_size = vec.cap * mem::size_of::<T>();
- if old_size >= MAX_MEMORY_SIZE { panic!("capacity overflow") }
- let mut size = max(old_size, 2 * mem::size_of::<T>()) * 2;
- if old_size > size || size > MAX_MEMORY_SIZE {
- size = MAX_MEMORY_SIZE;
- }
- unsafe {
- let ptr = alloc_or_realloc(*vec.ptr, old_size, size);
- if ptr.is_null() { ::alloc::oom() }
- vec.ptr = Unique::new(ptr);
- }
- vec.cap = max(vec.cap, 2) * 2;
- }
-
- if mem::size_of::<T>() == 0 {
- // zero-size types consume no memory, so we can't rely on the
- // address space running out
- self.len = self.len.checked_add(1).expect("length overflow");
- mem::forget(value);
- return
- }
-
- if self.len == self.cap {
- resize(self);
- }
-
+ // This will panic or abort if we would allocate > isize::MAX bytes
+ // or if the length increment would overflow for zero-sized types.
+ if self.len == self.buf.cap() { self.buf.double(); }
unsafe {
- let end = (*self.ptr).offset(self.len as isize);
- ptr::write(&mut *end, value);
+ let end = self.as_mut_ptr().offset(self.len as isize);
+ ptr::write(end, value);
self.len += 1;
}
}
/// # Examples
///
/// ```
- /// # #![feature(append)]
+ /// #![feature(append)]
+ ///
/// let mut vec = vec![1, 2, 3];
/// let mut vec2 = vec![4, 5, 6];
/// vec.append(&mut vec2);
#[unstable(feature = "append",
reason = "new API, waiting for dust to settle")]
pub fn append(&mut self, other: &mut Self) {
- if mem::size_of::<T>() == 0 {
- // zero-size types consume no memory, so we can't rely on the
- // address space running out
- self.len = self.len.checked_add(other.len()).expect("length overflow");
- unsafe { other.set_len(0) }
- return;
- }
self.reserve(other.len());
let len = self.len();
unsafe {
/// # Examples
///
/// ```
- /// # #![feature(drain)]
+ /// #![feature(drain)]
///
/// // Draining using `..` clears the whole vector.
/// let mut v = vec![1, 2, 3];
/// # Examples
///
/// ```
- /// # #![feature(map_in_place)]
+ /// #![feature(map_in_place)]
+ ///
/// let v = vec![0, 1, 2];
/// let w = v.map_in_place(|i| i + 3);
/// assert_eq!(&w[..], &[3, 4, 5]);
/// ```
#[unstable(feature = "map_in_place",
reason = "API may change to provide stronger guarantees")]
+ #[deprecated(since = "1.3.0",
+ reason = "unclear that the API is strong enough and did \
+ not proven itself")]
pub fn map_in_place<U, F>(self, mut f: F) -> Vec<U> where F: FnMut(T) -> U {
// FIXME: Assert statically that the types `T` and `U` have the same
// size.
/// # Examples
///
/// ```
- /// # #![feature(split_off)]
+ /// #![feature(split_off)]
+ ///
/// let mut vec = vec![1,2,3];
/// let vec2 = vec.split_off(1);
/// assert_eq!(vec, [1]);
/// # Examples
///
/// ```
- /// # #![feature(vec_resize)]
+ /// #![feature(vec_resize)]
+ ///
/// let mut vec = vec!["hello"];
/// vec.resize(3, "world");
/// assert_eq!(vec, ["hello", "world", "world"]);
let len = self.len();
if new_len > len {
- self.extend(repeat(value).take(new_len - len));
+ self.extend_with_element(new_len - len, value);
} else {
self.truncate(new_len);
}
}
+ /// Extend the vector by `n` additional clones of `value`.
+ fn extend_with_element(&mut self, n: usize, value: T) {
+ self.reserve(n);
+
+ unsafe {
+ let len = self.len();
+ let mut ptr = self.as_mut_ptr().offset(len as isize);
+ // Write all elements except the last one
+ for i in 1..n {
+ ptr::write(ptr, value.clone());
+ ptr = ptr.offset(1);
+ // Increment the length in every step in case clone() panics
+ self.set_len(len + i);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+ ptr::write(ptr, value);
+ self.set_len(len + n);
+ }
+ }
+ }
+
/// Appends all elements in a slice to the `Vec`.
///
/// Iterates over the slice `other`, clones each element, and then appends
/// # Examples
///
/// ```
- /// # #![feature(vec_push_all)]
+ /// #![feature(vec_push_all)]
+ ///
/// let mut vec = vec![1];
/// vec.push_all(&[2, 3, 4]);
/// assert_eq!(vec, [1, 2, 3, 4]);
// Internal methods and functions
////////////////////////////////////////////////////////////////////////////////
-impl<T> Vec<T> {
- /// Reserves capacity for exactly `capacity` elements in the given vector.
- ///
- /// If the capacity for `self` is already equal to or greater than the
- /// requested capacity, then no action is taken.
- fn grow_capacity(&mut self, capacity: usize) {
- if mem::size_of::<T>() == 0 { return }
-
- if capacity > self.cap {
- let size = capacity.checked_mul(mem::size_of::<T>())
- .expect("capacity overflow");
- unsafe {
- let ptr = alloc_or_realloc(*self.ptr, self.cap * mem::size_of::<T>(), size);
- if ptr.is_null() { ::alloc::oom() }
- self.ptr = Unique::new(ptr);
- }
- self.cap = capacity;
- }
- }
-}
-
-// FIXME: #13996: need a way to mark the return value as `noalias`
-#[inline(never)]
-unsafe fn alloc_or_realloc<T>(ptr: *mut T, old_size: usize, size: usize) -> *mut T {
- if old_size == 0 {
- allocate(size, mem::align_of::<T>()) as *mut T
- } else {
- reallocate(ptr as *mut u8, old_size, size, mem::align_of::<T>()) as *mut T
- }
-}
-
-#[inline]
-unsafe fn dealloc<T>(ptr: *mut T, len: usize) {
- if mem::size_of::<T>() != 0 {
- deallocate(ptr as *mut u8,
- len * mem::size_of::<T>(),
- mem::align_of::<T>())
- }
-}
-
#[doc(hidden)]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
- unsafe {
- let mut v = Vec::with_capacity(n);
- let mut ptr = v.as_mut_ptr();
-
- // Write all elements except the last one
- for i in 1..n {
- ptr::write(ptr, Clone::clone(&elem));
- ptr = ptr.offset(1);
- v.set_len(i); // Increment the length in every step in case Clone::clone() panics
- }
-
- if n > 0 {
- // We can write the last element directly without cloning needlessly
- ptr::write(ptr, elem);
- v.set_len(n);
- }
-
- v
- }
+ let mut v = Vec::with_capacity(n);
+ v.extend_with_element(n, elem);
+ v
}
////////////////////////////////////////////////////////////////////////////////
fn deref(&self) -> &[T] {
unsafe {
- let p = *self.ptr;
+ let p = self.buf.ptr();
assume(p != 0 as *mut T);
slice::from_raw_parts(p, self.len)
}
impl<T> ops::DerefMut for Vec<T> {
fn deref_mut(&mut self) -> &mut [T] {
unsafe {
- let ptr = *self.ptr;
+ let ptr = self.buf.ptr();
assume(!ptr.is_null());
slice::from_raw_parts_mut(ptr, self.len)
}
/// }
/// ```
#[inline]
- fn into_iter(self) -> IntoIter<T> {
+ fn into_iter(mut self) -> IntoIter<T> {
unsafe {
- let ptr = *self.ptr;
+ let ptr = self.as_mut_ptr();
assume(!ptr.is_null());
- let cap = self.cap;
let begin = ptr as *const T;
let end = if mem::size_of::<T>() == 0 {
arith_offset(ptr as *const i8, self.len() as isize) as *const T
} else {
ptr.offset(self.len() as isize) as *const T
};
+ let buf = ptr::read(&self.buf);
mem::forget(self);
- IntoIter { allocation: ptr, cap: cap, ptr: begin, end: end }
+ IntoIter { buf: buf, ptr: begin, end: end }
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for Vec<T> {
fn drop(&mut self) {
- // This is (and should always remain) a no-op if the fields are
- // zeroed (when moving out, because of #[unsafe_no_drop_flag]).
- if self.cap != 0 && self.cap != mem::POST_DROP_USIZE {
- unsafe {
- for x in self.iter() {
- ptr::read(x);
- }
- dealloc(*self.ptr, self.cap)
+ // NOTE: this is currently abusing the fact that ZSTs can't impl Drop.
+ // Or rather, that impl'ing Drop makes them not zero-sized. This is
+ // OK because exactly when this stops being a valid assumption, we
+ // don't need unsafe_no_drop_flag shenanigans anymore.
+ if self.buf.unsafe_no_drop_flag_needs_drop() {
+ for x in self.iter_mut() {
+ unsafe { drop_in_place(x); }
}
}
+ // RawVec handles deallocation
}
}
/// An iterator that moves out of a vector.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
- allocation: *mut T, // the block of memory allocated for the vector
- cap: usize, // the capacity of the vector
+ buf: RawVec<T>,
ptr: *const T,
end: *const T
}
#[inline]
/// Drops all items that have not yet been moved and returns the empty vector.
#[unstable(feature = "iter_to_vec")]
+ #[deprecated(since = "1.3.0", reason = "replaced by drain()")]
pub fn into_inner(mut self) -> Vec<T> {
unsafe {
for _x in self.by_ref() { }
- let IntoIter { allocation, cap, ptr: _ptr, end: _end } = self;
+ let buf = ptr::read(&self.buf);
mem::forget(self);
- Vec::from_raw_parts(allocation, 0, cap)
+ Vec { buf: buf, len: 0 }
}
}
}
impl<T> Drop for IntoIter<T> {
fn drop(&mut self) {
// destroy the remaining elements
- if self.cap != 0 {
- for _x in self.by_ref() {}
- unsafe {
- dealloc(self.allocation, self.cap);
- }
- }
+ for _x in self.by_ref() {}
+
+ // RawVec handles deallocation
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Drain<'a, T> {}
-////////////////////////////////////////////////////////////////////////////////
-// Conversion from &[T] to &Vec<T>
-////////////////////////////////////////////////////////////////////////////////
-
-/// Wrapper type providing a `&Vec<T>` reference via `Deref`.
-#[unstable(feature = "collections")]
-#[deprecated(since = "1.2.0",
- reason = "replaced with deref coercions or Borrow")]
-pub struct DerefVec<'a, T:'a> {
- x: Vec<T>,
- l: PhantomData<&'a T>,
-}
-
-#[unstable(feature = "collections")]
-#[deprecated(since = "1.2.0",
- reason = "replaced with deref coercions or Borrow")]
-#[allow(deprecated)]
-impl<'a, T> Deref for DerefVec<'a, T> {
- type Target = Vec<T>;
-
- fn deref<'b>(&'b self) -> &'b Vec<T> {
- &self.x
- }
-}
-
-// Prevent the inner `Vec<T>` from attempting to deallocate memory.
-#[stable(feature = "rust1", since = "1.0.0")]
-#[deprecated(since = "1.2.0",
- reason = "replaced with deref coercions or Borrow")]
-#[allow(deprecated)]
-impl<'a, T> Drop for DerefVec<'a, T> {
- fn drop(&mut self) {
- self.x.len = 0;
- self.x.cap = 0;
- }
-}
-
-/// Converts a slice to a wrapper type providing a `&Vec<T>` reference.
-///
-/// # Examples
-///
-/// ```
-/// # #![feature(collections)]
-/// use std::vec::as_vec;
-///
-/// // Let's pretend we have a function that requires `&Vec<i32>`
-/// fn vec_consumer(s: &Vec<i32>) {
-/// assert_eq!(s, &[1, 2, 3]);
-/// }
-///
-/// // Provide a `&Vec<i32>` from a `&[i32]` without allocating
-/// let values = [1, 2, 3];
-/// vec_consumer(&as_vec(&values));
-/// ```
-#[unstable(feature = "collections")]
-#[deprecated(since = "1.2.0",
- reason = "replaced with deref coercions or Borrow")]
-#[allow(deprecated)]
-pub fn as_vec<'a, T>(x: &'a [T]) -> DerefVec<'a, T> {
- unsafe {
- DerefVec {
- x: Vec::from_raw_parts(x.as_ptr() as *mut T, x.len(), x.len()),
- l: PhantomData,
- }
- }
-}
-
////////////////////////////////////////////////////////////////////////////////
// Partial vec, used for map_in_place
////////////////////////////////////////////////////////////////////////////////
use core::cmp::Ordering;
use core::fmt;
use core::iter::{self, repeat, FromIterator, RandomAccessIterator};
-use core::mem;
use core::ops::{Index, IndexMut};
-use core::ptr::{self, Unique};
+use core::ptr;
use core::slice;
use core::hash::{Hash, Hasher};
use core::cmp;
-use alloc::heap;
+use alloc::raw_vec::RawVec;
const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
const MINIMUM_CAPACITY: usize = 1; // 2 - 1
/// `VecDeque` is a growable ring buffer, which can be used as a
/// double-ended queue efficiently.
+///
+/// The "default" usage of this type as a queue is to use `push_back` to add to the queue, and
+/// `pop_front` to remove from the queue. `extend` and `append` push onto the back in this manner,
+/// and iterating over `VecDeque` goes front to back.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VecDeque<T> {
// tail and head are pointers into the buffer. Tail always points
// to the first element that could be read, Head always points
// to where data should be written.
- // If tail == head the buffer is empty. The length of the ringbuf
+ // If tail == head the buffer is empty. The length of the ringbuffer
// is defined as the distance between the two.
tail: usize,
head: usize,
- cap: usize,
- ptr: Unique<T>,
+ buf: RawVec<T>,
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for VecDeque<T> {
fn drop(&mut self) {
self.clear();
- unsafe {
- if mem::size_of::<T>() != 0 {
- heap::deallocate(*self.ptr as *mut u8,
- self.cap * mem::size_of::<T>(),
- mem::align_of::<T>())
- }
- }
+ // RawVec handles deallocation
}
}
}
impl<T> VecDeque<T> {
+ /// Marginally more convenient
+ #[inline]
+ fn ptr(&self) -> *mut T {
+ self.buf.ptr()
+ }
+
+ /// Marginally more convenient
+ #[inline]
+ fn cap(&self) -> usize {
+ self.buf.cap()
+ }
+
/// Turn ptr into a slice
#[inline]
unsafe fn buffer_as_slice(&self) -> &[T] {
- slice::from_raw_parts(*self.ptr, self.cap)
+ slice::from_raw_parts(self.ptr(), self.cap())
}
/// Turn ptr into a mut slice
#[inline]
unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] {
- slice::from_raw_parts_mut(*self.ptr, self.cap)
+ slice::from_raw_parts_mut(self.ptr(), self.cap())
}
/// Moves an element out of the buffer
#[inline]
unsafe fn buffer_read(&mut self, off: usize) -> T {
- ptr::read(self.ptr.offset(off as isize))
+ ptr::read(self.ptr().offset(off as isize))
}
/// Writes an element into the buffer, moving it.
#[inline]
- unsafe fn buffer_write(&mut self, off: usize, t: T) {
- ptr::write(self.ptr.offset(off as isize), t);
+ unsafe fn buffer_write(&mut self, off: usize, value: T) {
+ ptr::write(self.ptr().offset(off as isize), value);
}
- /// Returns true iff the buffer is at capacity
+ /// Returns true if and only if the buffer is at capacity
#[inline]
- fn is_full(&self) -> bool { self.cap - self.len() == 1 }
+ fn is_full(&self) -> bool { self.cap() - self.len() == 1 }
/// Returns the index in the underlying buffer for a given logical element
/// index.
#[inline]
- fn wrap_index(&self, idx: usize) -> usize { wrap_index(idx, self.cap) }
+ fn wrap_index(&self, idx: usize) -> usize { wrap_index(idx, self.cap()) }
/// Returns the index in the underlying buffer for a given logical element
/// index + addend.
#[inline]
fn wrap_add(&self, idx: usize, addend: usize) -> usize {
- wrap_index(idx.wrapping_add(addend), self.cap)
+ wrap_index(idx.wrapping_add(addend), self.cap())
}
/// Returns the index in the underlying buffer for a given logical element
/// index - subtrahend.
#[inline]
fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
- wrap_index(idx.wrapping_sub(subtrahend), self.cap)
+ wrap_index(idx.wrapping_sub(subtrahend), self.cap())
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
- debug_assert!(dst + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len,
- self.cap);
- debug_assert!(src + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len,
- self.cap);
+ debug_assert!(dst + len <= self.cap(), "dst={} src={} len={} cap={}", dst, src, len,
+ self.cap());
+ debug_assert!(src + len <= self.cap(), "dst={} src={} len={} cap={}", dst, src, len,
+ self.cap());
ptr::copy(
- self.ptr.offset(src as isize),
- self.ptr.offset(dst as isize),
+ self.ptr().offset(src as isize),
+ self.ptr().offset(dst as isize),
len);
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) {
- debug_assert!(dst + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len,
- self.cap);
- debug_assert!(src + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len,
- self.cap);
+ debug_assert!(dst + len <= self.cap(), "dst={} src={} len={} cap={}", dst, src, len,
+ self.cap());
+ debug_assert!(src + len <= self.cap(), "dst={} src={} len={} cap={}", dst, src, len,
+ self.cap());
ptr::copy_nonoverlapping(
- self.ptr.offset(src as isize),
- self.ptr.offset(dst as isize),
+ self.ptr().offset(src as isize),
+ self.ptr().offset(dst as isize),
len);
}
+
+ /// Frobs the head and tail sections around to handle the fact that we
+ /// just reallocated. Unsafe because it trusts old_cap.
+ #[inline]
+ unsafe fn handle_cap_increase(&mut self, old_cap: usize) {
+ let new_cap = self.cap();
+
+ // Move the shortest contiguous section of the ring buffer
+ // T H
+ // [o o o o o o o . ]
+ // T H
+ // A [o o o o o o o . . . . . . . . . ]
+ // H T
+ // [o o . o o o o o ]
+ // T H
+ // B [. . . o o o o o o o . . . . . . ]
+ // H T
+ // [o o o o o . o o ]
+ // H T
+ // C [o o o o o . . . . . . . . . o o ]
+
+ if self.tail <= self.head { // A
+ // Nop
+ } else if self.head < old_cap - self.tail { // B
+ self.copy_nonoverlapping(old_cap, 0, self.head);
+ self.head += old_cap;
+ debug_assert!(self.head > self.tail);
+ } else { // C
+ let new_tail = new_cap - (old_cap - self.tail);
+ self.copy_nonoverlapping(new_tail, self.tail, old_cap - self.tail);
+ self.tail = new_tail;
+ debug_assert!(self.head < self.tail);
+ }
+ debug_assert!(self.head < self.cap());
+ debug_assert!(self.tail < self.cap());
+ debug_assert!(self.cap().count_ones() == 1);
+ }
}
impl<T> VecDeque<T> {
// +1 since the ringbuffer always leaves one space empty
let cap = cmp::max(n + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
assert!(cap > n, "capacity overflow");
- let size = cap.checked_mul(mem::size_of::<T>())
- .expect("capacity overflow");
-
- let ptr = unsafe {
- if mem::size_of::<T>() != 0 {
- let ptr = heap::allocate(size, mem::align_of::<T>()) as *mut T;;
- if ptr.is_null() { ::alloc::oom() }
- Unique::new(ptr)
- } else {
- Unique::new(heap::EMPTY as *mut T)
- }
- };
VecDeque {
tail: 0,
head: 0,
- cap: cap,
- ptr: ptr,
+ buf: RawVec::with_capacity(cap),
}
}
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
- /// assert_eq!(buf.get(1).unwrap(), &4);
+ /// assert_eq!(buf.get(1), Some(&4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn get(&self, i: usize) -> Option<&T> {
- if i < self.len() {
- let idx = self.wrap_add(self.tail, i);
- unsafe { Some(&*self.ptr.offset(idx as isize)) }
+ pub fn get(&self, index: usize) -> Option<&T> {
+ if index < self.len() {
+ let idx = self.wrap_add(self.tail, index);
+ unsafe { Some(&*self.ptr().offset(idx as isize)) }
} else {
None
}
/// assert_eq!(buf[1], 7);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn get_mut(&mut self, i: usize) -> Option<&mut T> {
- if i < self.len() {
- let idx = self.wrap_add(self.tail, i);
- unsafe { Some(&mut *self.ptr.offset(idx as isize)) }
+ pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
+ if index < self.len() {
+ let idx = self.wrap_add(self.tail, index);
+ unsafe { Some(&mut *self.ptr().offset(idx as isize)) }
} else {
None
}
let ri = self.wrap_add(self.tail, i);
let rj = self.wrap_add(self.tail, j);
unsafe {
- ptr::swap(self.ptr.offset(ri as isize), self.ptr.offset(rj as isize))
+ ptr::swap(self.ptr().offset(ri as isize), self.ptr().offset(rj as isize))
}
}
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn capacity(&self) -> usize { self.cap - 1 }
+ pub fn capacity(&self) -> usize { self.cap() - 1 }
/// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
/// given `VecDeque`. Does nothing if the capacity is already sufficient.
}
/// Reserves capacity for at least `additional` more elements to be inserted in the given
- /// `Ringbuf`. The collection may reserve more space to avoid frequent reallocations.
+ /// `VecDeque`. The collection may reserve more space to avoid frequent reallocations.
///
/// # Panics
///
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
- let new_len = self.len() + additional;
- assert!(new_len + 1 > self.len(), "capacity overflow");
- if new_len > self.capacity() {
- let count = (new_len + 1).next_power_of_two();
- assert!(count >= new_len + 1);
-
- if mem::size_of::<T>() != 0 {
- let old = self.cap * mem::size_of::<T>();
- let new = count.checked_mul(mem::size_of::<T>())
- .expect("capacity overflow");
- unsafe {
- let ptr = heap::reallocate(*self.ptr as *mut u8,
- old,
- new,
- mem::align_of::<T>()) as *mut T;
- if ptr.is_null() { ::alloc::oom() }
- self.ptr = Unique::new(ptr);
- }
- }
-
- // Move the shortest contiguous section of the ring buffer
- // T H
- // [o o o o o o o . ]
- // T H
- // A [o o o o o o o . . . . . . . . . ]
- // H T
- // [o o . o o o o o ]
- // T H
- // B [. . . o o o o o o o . . . . . . ]
- // H T
- // [o o o o o . o o ]
- // H T
- // C [o o o o o . . . . . . . . . o o ]
-
- let oldcap = self.cap;
- self.cap = count;
-
- if self.tail <= self.head { // A
- // Nop
- } else if self.head < oldcap - self.tail { // B
- unsafe {
- self.copy_nonoverlapping(oldcap, 0, self.head);
- }
- self.head += oldcap;
- debug_assert!(self.head > self.tail);
- } else { // C
- let new_tail = count - (oldcap - self.tail);
- unsafe {
- self.copy_nonoverlapping(new_tail, self.tail, oldcap - self.tail);
- }
- self.tail = new_tail;
- debug_assert!(self.head < self.tail);
- }
- debug_assert!(self.head < self.cap);
- debug_assert!(self.tail < self.cap);
- debug_assert!(self.cap.count_ones() == 1);
+ let old_cap = self.cap();
+ let used_cap = self.len() + 1;
+ let new_cap = used_cap
+ .checked_add(additional)
+ .and_then(|needed_cap| needed_cap.checked_next_power_of_two())
+ .expect("capacity overflow");
+
+ if new_cap > self.capacity() {
+ self.buf.reserve_exact(used_cap, new_cap - used_cap);
+ unsafe { self.handle_cap_increase(old_cap); }
}
}
- /// Shrinks the capacity of the ringbuf as much as possible.
+ /// Shrinks the capacity of the `VecDeque` as much as possible.
///
/// It will drop down as close as possible to the length but the allocator may still inform the
- /// ringbuf that there is space for a few more elements.
+ /// `VecDeque` that there is space for a few more elements.
///
/// # Examples
///
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::with_capacity(15);
/// ```
pub fn shrink_to_fit(&mut self) {
// +1 since the ringbuffer always leaves one space empty
- // len + 1 can't overflow for an existing, well-formed ringbuf.
+ // len + 1 can't overflow for an existing, well-formed ringbuffer.
let target_cap = cmp::max(self.len() + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
- if target_cap < self.cap {
+ if target_cap < self.cap() {
// There are three cases of interest:
// All elements are out of desired bounds
// Elements are contiguous, and head is out of desired bounds
// H T
// [o o o o o . o o ]
debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
- let len = self.cap - self.tail;
+ let len = self.cap() - self.tail;
let new_tail = target_cap - len;
unsafe {
self.copy_nonoverlapping(new_tail, self.tail, len);
debug_assert!(self.head < self.tail);
}
- if mem::size_of::<T>() != 0 {
- let old = self.cap * mem::size_of::<T>();
- let new_size = target_cap * mem::size_of::<T>();
- unsafe {
- let ptr = heap::reallocate(*self.ptr as *mut u8,
- old,
- new_size,
- mem::align_of::<T>()) as *mut T;
- if ptr.is_null() { ::alloc::oom() }
- self.ptr = Unique::new(ptr);
- }
- }
- self.cap = target_cap;
- debug_assert!(self.head < self.cap);
- debug_assert!(self.tail < self.cap);
- debug_assert!(self.cap.count_ones() == 1);
+ self.buf.shrink_to_fit(target_cap);
+
+ debug_assert!(self.head < self.cap());
+ debug_assert!(self.tail < self.cap());
+ debug_assert!(self.cap().count_ones() == 1);
}
}
- /// Shortens a ringbuf, dropping excess elements from the back.
+ /// Shortens a `VecDeque`, dropping excess elements from the back.
///
- /// If `len` is greater than the ringbuf's current length, this has no
+ /// If `len` is greater than the `VecDeque`'s current length, this has no
/// effect.
///
/// # Examples
///
/// ```
- /// # #![feature(deque_extras)]
+ /// #![feature(deque_extras)]
+ ///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(v.len(), 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn len(&self) -> usize { count(self.tail, self.head, self.cap) }
+ pub fn len(&self) -> usize { count(self.tail, self.head, self.cap()) }
/// Returns true if the buffer contains no elements
///
/// # Examples
///
/// ```
- /// # #![feature(drain)]
+ /// #![feature(drain)]
+ ///
/// use std::collections::VecDeque;
///
/// let mut v = VecDeque::new();
/// assert_eq!(d.front(), Some(&2));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn push_front(&mut self, t: T) {
+ pub fn push_front(&mut self, value: T) {
if self.is_full() {
- self.reserve(1);
+ let old_cap = self.cap();
+ self.buf.double();
+ unsafe { self.handle_cap_increase(old_cap); }
debug_assert!(!self.is_full());
}
self.tail = self.wrap_sub(self.tail, 1);
let tail = self.tail;
- unsafe { self.buffer_write(tail, t); }
+ unsafe { self.buffer_write(tail, value); }
}
/// Appends an element to the back of a buffer
/// assert_eq!(3, *buf.back().unwrap());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn push_back(&mut self, t: T) {
+ pub fn push_back(&mut self, value: T) {
if self.is_full() {
- self.reserve(1);
+ let old_cap = self.cap();
+ self.buf.double();
+ unsafe { self.handle_cap_increase(old_cap); }
debug_assert!(!self.is_full());
}
let head = self.head;
self.head = self.wrap_add(self.head, 1);
- unsafe { self.buffer_write(head, t) }
+ unsafe { self.buffer_write(head, value) }
}
/// Removes the last element from a buffer and returns it, or `None` if
self.tail <= self.head
}
- /// Removes an element from anywhere in the ringbuf and returns it, replacing it with the last
- /// element.
+ /// Removes an element from anywhere in the `VecDeque` and returns it, replacing it with the
+ /// last element.
///
/// This does not preserve ordering, but is O(1).
///
/// # Examples
///
/// ```
- /// # #![feature(deque_extras)]
+ /// #![feature(deque_extras)]
+ ///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.swap_back_remove(0), None);
- /// buf.push_back(5);
- /// buf.push_back(99);
- /// buf.push_back(15);
- /// buf.push_back(20);
- /// buf.push_back(10);
- /// assert_eq!(buf.swap_back_remove(1), Some(99));
+ /// buf.push_back(1);
+ /// buf.push_back(2);
+ /// buf.push_back(3);
+ ///
+ /// assert_eq!(buf.swap_back_remove(0), Some(1));
+ /// assert_eq!(buf.len(), 2);
+ /// assert_eq!(buf[0], 3);
+ /// assert_eq!(buf[1], 2);
/// ```
#[unstable(feature = "deque_extras",
reason = "the naming of this function may be altered")]
self.pop_back()
}
- /// Removes an element from anywhere in the ringbuf and returns it,
+ /// Removes an element from anywhere in the `VecDeque` and returns it,
/// replacing it with the first element.
///
/// This does not preserve ordering, but is O(1).
/// # Examples
///
/// ```
- /// # #![feature(deque_extras)]
+ /// #![feature(deque_extras)]
+ ///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.swap_front_remove(0), None);
- /// buf.push_back(15);
- /// buf.push_back(5);
- /// buf.push_back(10);
- /// buf.push_back(99);
- /// buf.push_back(20);
- /// assert_eq!(buf.swap_front_remove(3), Some(99));
+ /// buf.push_back(1);
+ /// buf.push_back(2);
+ /// buf.push_back(3);
+ ///
+ /// assert_eq!(buf.swap_front_remove(2), Some(3));
+ /// assert_eq!(buf.len(), 2);
+ /// assert_eq!(buf[0], 2);
+ /// assert_eq!(buf[1], 1);
/// ```
#[unstable(feature = "deque_extras",
reason = "the naming of this function may be altered")]
self.pop_front()
}
- /// Inserts an element at position `i` within the ringbuf. Whichever
+ /// Inserts an element at `index` within the `VecDeque`. Whichever
/// end is closer to the insertion point will be moved to make room,
/// and all the affected elements will be moved to new positions.
///
/// # Panics
///
- /// Panics if `i` is greater than ringbuf's length
+ /// Panics if `index` is greater than `VecDeque`'s length
///
/// # Examples
/// ```
- /// # #![feature(collections)]
+ /// #![feature(collections)]
+ ///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(10);
/// buf.push_back(12);
- /// buf.insert(1,11);
+ /// buf.insert(1, 11);
/// assert_eq!(Some(&11), buf.get(1));
/// ```
- pub fn insert(&mut self, i: usize, t: T) {
- assert!(i <= self.len(), "index out of bounds");
+ pub fn insert(&mut self, index: usize, value: T) {
+ assert!(index <= self.len(), "index out of bounds");
if self.is_full() {
- self.reserve(1);
+ let old_cap = self.cap();
+ self.buf.double();
+ unsafe { self.handle_cap_increase(old_cap); }
debug_assert!(!self.is_full());
}
// A - The element that should be after the insertion point
// M - Indicates element was moved
- let idx = self.wrap_add(self.tail, i);
+ let idx = self.wrap_add(self.tail, index);
- let distance_to_tail = i;
- let distance_to_head = self.len() - i;
+ let distance_to_tail = index;
+ let distance_to_head = self.len() - index;
let contiguous = self.is_contiguous();
match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
- (true, true, _) if i == 0 => {
+ (true, true, _) if index == 0 => {
// push_front
//
// T
let new_tail = self.wrap_sub(self.tail, 1);
self.copy(new_tail, self.tail, 1);
- // Already moved the tail, so we only copy `i - 1` elements.
- self.copy(self.tail, self.tail + 1, i - 1);
+ // Already moved the tail, so we only copy `index - 1` elements.
+ self.copy(self.tail, self.tail + 1, index - 1);
self.tail = new_tail;
},
// [o o o o o o . . . . o o I A o o]
// M M
- self.copy(self.tail - 1, self.tail, i);
+ self.copy(self.tail - 1, self.tail, index);
self.tail -= 1;
},
(false, false, true) => unsafe {
self.copy(1, 0, self.head);
// copy last element into empty spot at bottom of buffer
- self.copy(0, self.cap - 1, 1);
+ self.copy(0, self.cap() - 1, 1);
// move elements from idx to end forward not including ^ element
- self.copy(idx + 1, idx, self.cap - 1 - idx);
+ self.copy(idx + 1, idx, self.cap() - 1 - idx);
self.head += 1;
},
// M M M
// copy elements up to new tail
- self.copy(self.tail - 1, self.tail, self.cap - self.tail);
+ self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
// copy last element into empty spot at bottom of buffer
- self.copy(self.cap - 1, 0, 1);
+ self.copy(self.cap() - 1, 0, 1);
self.tail -= 1;
},
// M M M M M M
// copy elements up to new tail
- self.copy(self.tail - 1, self.tail, self.cap - self.tail);
+ self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
// copy last element into empty spot at bottom of buffer
- self.copy(self.cap - 1, 0, 1);
+ self.copy(self.cap() - 1, 0, 1);
// move elements from idx-1 to end forward not including ^ element
self.copy(0, 1, idx - 1);
}
// tail might've been changed so we need to recalculate
- let new_idx = self.wrap_add(self.tail, i);
+ let new_idx = self.wrap_add(self.tail, index);
unsafe {
- self.buffer_write(new_idx, t);
+ self.buffer_write(new_idx, value);
}
}
- /// Removes and returns the element at position `i` from the ringbuf.
+ /// Removes and returns the element at `index` from the `VecDeque`.
/// Whichever end is closer to the removal point will be moved to make
/// room, and all the affected elements will be moved to new positions.
- /// Returns `None` if `i` is out of bounds.
+ /// Returns `None` if `index` is out of bounds.
///
/// # Examples
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
- /// buf.push_back(5);
- /// buf.push_back(10);
- /// buf.push_back(12);
- /// buf.push_back(15);
- /// buf.remove(2);
- /// assert_eq!(Some(&15), buf.get(2));
+ /// buf.push_back(1);
+ /// buf.push_back(2);
+ /// buf.push_back(3);
+ ///
+ /// assert_eq!(buf.remove(1), Some(2));
+ /// assert_eq!(buf.get(1), Some(&3));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn remove(&mut self, i: usize) -> Option<T> {
- if self.is_empty() || self.len() <= i {
+ pub fn remove(&mut self, index: usize) -> Option<T> {
+ if self.is_empty() || self.len() <= index {
return None;
}
// R - Indicates element that is being removed
// M - Indicates element was moved
- let idx = self.wrap_add(self.tail, i);
+ let idx = self.wrap_add(self.tail, index);
let elem = unsafe {
Some(self.buffer_read(idx))
};
- let distance_to_tail = i;
- let distance_to_head = self.len() - i;
+ let distance_to_tail = index;
+ let distance_to_head = self.len() - index;
let contiguous = self.is_contiguous();
// [. . . . o o o o o o . . . . . .]
// M M
- self.copy(self.tail + 1, self.tail, i);
+ self.copy(self.tail + 1, self.tail, index);
self.tail += 1;
},
(true, false, _) => unsafe {
// [o o o o o o . . . . . . o o o o]
// M M
- self.copy(self.tail + 1, self.tail, i);
+ self.copy(self.tail + 1, self.tail, index);
self.tail = self.wrap_add(self.tail, 1);
},
(false, false, false) => unsafe {
// M
// draw in elements in the tail section
- self.copy(idx, idx + 1, self.cap - idx - 1);
+ self.copy(idx, idx + 1, self.cap() - idx - 1);
// Prevents underflow.
if self.head != 0 {
// copy first element into empty spot
- self.copy(self.cap - 1, 0, 1);
+ self.copy(self.cap() - 1, 0, 1);
// move elements in the head section backwards
self.copy(0, 1, self.head - 1);
self.copy(1, 0, idx);
// copy last element into empty spot
- self.copy(0, self.cap - 1, 1);
+ self.copy(0, self.cap() - 1, 1);
// move elements from tail to end forward, excluding the last one
- self.copy(self.tail + 1, self.tail, self.cap - self.tail - 1);
+ self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1);
self.tail = self.wrap_add(self.tail, 1);
}
/// # Examples
///
/// ```
- /// # #![feature(split_off)]
+ /// #![feature(split_off)]
+ ///
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<_> = vec![1,2,3].into_iter().collect();
let amount_in_first = first_len - at;
ptr::copy_nonoverlapping(first_half.as_ptr().offset(at as isize),
- *other.ptr,
+ other.ptr(),
amount_in_first);
// just take all of the second half.
ptr::copy_nonoverlapping(second_half.as_ptr(),
- other.ptr.offset(amount_in_first as isize),
+ other.ptr().offset(amount_in_first as isize),
second_len);
} else {
// `at` lies in the second half, need to factor in the elements we skipped
let offset = at - first_len;
let amount_in_second = second_len - offset;
ptr::copy_nonoverlapping(second_half.as_ptr().offset(offset as isize),
- *other.ptr,
+ other.ptr(),
amount_in_second);
}
}
/// # Examples
///
/// ```
- /// # #![feature(append)]
+ /// #![feature(append)]
+ ///
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<_> = vec![1, 2, 3].into_iter().collect();
/// # Examples
///
/// ```
- /// # #![feature(vec_deque_retain)]
+ /// #![feature(vec_deque_retain)]
+ ///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
}
impl<T: Clone> VecDeque<T> {
- /// Modifies the ringbuf in-place so that `len()` is equal to new_len,
+ /// Modifies the `VecDeque` in-place so that `len()` is equal to new_len,
/// either by removing excess elements or by appending copies of a value to the back.
///
/// # Examples
///
/// ```
- /// # #![feature(deque_extras)]
+ /// #![feature(deque_extras)]
+ ///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
type Output = A;
#[inline]
- fn index(&self, i: usize) -> &A {
- self.get(i).expect("Out of bounds access")
+ fn index(&self, index: usize) -> &A {
+ self.get(index).expect("Out of bounds access")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> IndexMut<usize> for VecDeque<A> {
#[inline]
- fn index_mut(&mut self, i: usize) -> &mut A {
- self.get_mut(i).expect("Out of bounds access")
+ fn index_mut(&mut self, index: usize) -> &mut A {
+ self.get_mut(index).expect("Out of bounds access")
}
}
assert_eq!(tester.swap_front_remove(idx), Some(len * 2 - 1 - i));
}
}
- assert!(tester.tail < tester.cap);
- assert!(tester.head < tester.cap);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
}
tester.insert(to_insert, to_insert);
- assert!(tester.tail < tester.cap);
- assert!(tester.head < tester.cap);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
tester.push_back(1234);
}
tester.remove(to_remove);
- assert!(tester.tail < tester.cap);
- assert!(tester.head < tester.cap);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
tester.shrink_to_fit();
assert!(tester.capacity() <= cap);
- assert!(tester.tail < tester.cap);
- assert!(tester.head < tester.cap);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
tester.push_back(i);
}
let result = tester.split_off(at);
- assert!(tester.tail < tester.cap);
- assert!(tester.head < tester.cap);
- assert!(result.tail < result.cap);
- assert!(result.head < result.cap);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert!(result.tail < result.cap());
+ assert!(result.head < result.cap());
assert_eq!(tester, expected_self);
assert_eq!(result, expected_other);
}
//! A simple map based on a vector for small integer keys. Space requirements
//! are O(highest integer key).
+#![deprecated(reason = "VecMap moved to crates.io as vec_map",
+ since = "1.3.0")]
+#![unstable(feature = "vecmap", reason = "deprecated")]
+#![allow(deprecated)]
+
#![allow(missing_docs)]
-#![unstable(feature = "vecmap",
- reason = "may not be stabilized in the standard library")]
use self::Entry::*;
/// # Examples
///
/// ```
-/// # #![feature(vecmap)]
+/// #![feature(vecmap)]
+///
/// use std::collections::VecMap;
///
/// let mut months = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
/// let mut map: VecMap<&str> = VecMap::new();
/// ```
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
/// let mut map: VecMap<&str> = VecMap::with_capacity(10);
/// ```
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
/// let map: VecMap<String> = VecMap::with_capacity(10);
/// assert!(map.capacity() >= 10);
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
/// let mut map: VecMap<&str> = VecMap::new();
/// map.reserve_len(10);
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
/// let mut map: VecMap<&str> = VecMap::new();
/// map.reserve_len_exact(10);
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut map = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut map = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap, append)]
+ /// #![feature(vecmap, append)]
+ ///
/// use std::collections::VecMap;
///
/// let mut a = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap, split_off)]
+ /// #![feature(vecmap, split_off)]
+ ///
/// use std::collections::VecMap;
///
/// let mut a = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap, drain)]
+ /// #![feature(vecmap, drain)]
+ ///
/// use std::collections::VecMap;
///
/// let mut map = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut a = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut a = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut a = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut map = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut map = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut map = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut map = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut map = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap, entry)]
+ /// #![feature(vecmap, entry)]
+ ///
/// use std::collections::VecMap;
///
/// let mut count: VecMap<u32> = VecMap::new();
/// # Examples
///
/// ```
- /// # #![feature(vecmap)]
+ /// #![feature(vecmap)]
+ ///
/// use std::collections::VecMap;
///
/// let mut map = VecMap::new();
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#![feature(ascii)]
#![feature(append)]
-#![feature(bit_vec_append_split_off)]
#![feature(bitset)]
#![feature(bitvec)]
#![feature(box_syntax)]
#![feature(rustc_private)]
#![feature(slice_bytes)]
#![feature(slice_chars)]
-#![feature(slice_extras)]
+#![feature(slice_splits)]
#![feature(slice_position_elem)]
#![feature(split_off)]
#![feature(step_by)]
#![feature(str_char)]
#![feature(str_escape)]
#![feature(str_match_indices)]
+#![feature(str_split_at)]
#![feature(str_utf16)]
+#![feature(box_str)]
#![feature(subslice_offset)]
#![feature(test)]
#![feature(unboxed_closures)]
#![feature(vec_deque_retain)]
#![feature(vec_from_raw_buf)]
#![feature(vec_push_all)]
-#![feature(vec_split_off)]
#![feature(vecmap)]
+#![allow(deprecated)]
+
#[macro_use] extern crate log;
extern crate collections;
}
#[test]
-fn test_tail() {
+fn test_split_first() {
let mut a = vec![11];
let b: &[i32] = &[];
- assert_eq!(a.tail(), b);
+ assert!(b.split_first().is_none());
+ assert_eq!(a.split_first(), Some((&11, b)));
a = vec![11, 12];
let b: &[i32] = &[12];
- assert_eq!(a.tail(), b);
+ assert_eq!(a.split_first(), Some((&11, b)));
}
#[test]
-fn test_tail_mut() {
+fn test_split_first_mut() {
let mut a = vec![11];
let b: &mut [i32] = &mut [];
- assert!(a.tail_mut() == b);
+ assert!(b.split_first_mut().is_none());
+ assert!(a.split_first_mut() == Some((&mut 11, b)));
a = vec![11, 12];
let b: &mut [_] = &mut [12];
- assert!(a.tail_mut() == b);
+ assert!(a.split_first_mut() == Some((&mut 11, b)));
}
#[test]
-#[should_panic]
-fn test_tail_empty() {
- let a = Vec::<i32>::new();
- a.tail();
-}
-
-#[test]
-#[should_panic]
-fn test_tail_mut_empty() {
- let mut a = Vec::<i32>::new();
- a.tail_mut();
-}
-
-#[test]
-fn test_init() {
+fn test_split_last() {
let mut a = vec![11];
let b: &[i32] = &[];
- assert_eq!(a.init(), b);
+ assert!(b.split_last().is_none());
+ assert_eq!(a.split_last(), Some((&11, b)));
a = vec![11, 12];
let b: &[_] = &[11];
- assert_eq!(a.init(), b);
+ assert_eq!(a.split_last(), Some((&12, b)));
}
#[test]
-fn test_init_mut() {
+fn test_split_last_mut() {
let mut a = vec![11];
let b: &mut [i32] = &mut [];
- assert!(a.init_mut() == b);
+ assert!(b.split_last_mut().is_none());
+ assert!(a.split_last_mut() == Some((&mut 11, b)));
+
a = vec![11, 12];
let b: &mut [_] = &mut [11];
- assert!(a.init_mut() == b);
-}
-
-#[test]
-#[should_panic]
-fn test_init_empty() {
- let a = Vec::<i32>::new();
- a.init();
-}
-
-#[test]
-#[should_panic]
-fn test_init_mut_empty() {
- let mut a = Vec::<i32>::new();
- a.init_mut();
+ assert!(a.split_last_mut() == Some((&mut 12, b)));
}
#[test]
assert_eq!(d, [1, 2, 3]);
let v: &[&[_]] = &[&[1], &[2, 3]];
- assert_eq!(v.connect(&0), [1, 0, 2, 3]);
+ assert_eq!(v.join(&0), [1, 0, 2, 3]);
let v: &[&[_]] = &[&[1], &[2], &[3]];
- assert_eq!(v.connect(&0), [1, 0, 2, 0, 3]);
+ assert_eq!(v.join(&0), [1, 0, 2, 0, 3]);
}
#[test]
-fn test_connect() {
+fn test_join() {
let v: [Vec<i32>; 0] = [];
- assert_eq!(v.connect(&0), []);
- assert_eq!([vec![1], vec![2, 3]].connect(&0), [1, 0, 2, 3]);
- assert_eq!([vec![1], vec![2], vec![3]].connect(&0), [1, 0, 2, 0, 3]);
+ assert_eq!(v.join(&0), []);
+ assert_eq!([vec![1], vec![2, 3]].join(&0), [1, 0, 2, 3]);
+ assert_eq!([vec![1], vec![2], vec![3]].join(&0), [1, 0, 2, 0, 3]);
let v: [&[_]; 2] = [&[1], &[2, 3]];
- assert_eq!(v.connect(&0), [1, 0, 2, 3]);
+ assert_eq!(v.join(&0), [1, 0, 2, 3]);
let v: [&[_]; 3] = [&[1], &[2], &[3]];
- assert_eq!(v.connect(&0), [1, 0, 2, 0, 3]);
+ assert_eq!(v.join(&0), [1, 0, 2, 0, 3]);
}
#[test]
assert_eq!(ys, [1, 2, 3]);
}
+#[test]
+fn test_box_slice_clone() {
+ let data = vec![vec![0, 1], vec![0], vec![1]];
+ let data2 = data.clone().into_boxed_slice().clone().to_vec();
+
+ assert_eq!(data, data2);
+}
+
+#[test]
+fn test_box_slice_clone_panics() {
+ use std::sync::Arc;
+ use std::sync::atomic::{AtomicUsize, Ordering};
+ use std::thread::spawn;
+
+ struct Canary {
+ count: Arc<AtomicUsize>,
+ panics: bool
+ }
+
+ impl Drop for Canary {
+ fn drop(&mut self) {
+ self.count.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ impl Clone for Canary {
+ fn clone(&self) -> Self {
+ if self.panics { panic!() }
+
+ Canary {
+ count: self.count.clone(),
+ panics: self.panics
+ }
+ }
+ }
+
+ let drop_count = Arc::new(AtomicUsize::new(0));
+ let canary = Canary { count: drop_count.clone(), panics: false };
+ let panic = Canary { count: drop_count.clone(), panics: true };
+
+ spawn(move || {
+ // When xs is dropped, +5.
+ let xs = vec![canary.clone(), canary.clone(), canary.clone(),
+ panic, canary].into_boxed_slice();
+
+ // When panic is cloned, +3.
+ xs.clone();
+ }).join().unwrap_err();
+
+ // Total = 8
+ assert_eq!(drop_count.load(Ordering::SeqCst), 8);
+}
+
mod bench {
use std::iter::repeat;
use std::{mem, ptr};
#[bench]
fn mut_iterator(b: &mut Bencher) {
- let mut v: Vec<_> = repeat(0).take(100).collect();
+ let mut v = vec![0; 100];
b.iter(|| {
let mut i = 0;
}
#[bench]
- fn connect(b: &mut Bencher) {
+ fn join(b: &mut Bencher) {
let xss: Vec<Vec<i32>> =
(0..100).map(|i| (0..i).collect()).collect();
b.iter(|| {
- xss.connect(&0)
+ xss.join(&0)
});
}
#[bench]
fn zero_1kb_from_elem(b: &mut Bencher) {
b.iter(|| {
- repeat(0u8).take(1024).collect::<Vec<_>>()
+ vec![0u8; 1024]
});
}
fn random_inserts(b: &mut Bencher) {
let mut rng = thread_rng();
b.iter(|| {
- let mut v: Vec<_> = repeat((0, 0)).take(30).collect();
+ let mut v = vec![(0, 0); 30];
for _ in 0..100 {
let l = v.len();
v.insert(rng.gen::<usize>() % (l + 1),
fn random_removes(b: &mut Bencher) {
let mut rng = thread_rng();
b.iter(|| {
- let mut v: Vec<_> = repeat((0, 0)).take(130).collect();
+ let mut v = vec![(0, 0); 130];
for _ in 0..100 {
let l = v.len();
v.remove(rng.gen::<usize>() % l);
test_concat!("abc", ["", "a", "bc"]);
}
-macro_rules! test_connect {
+macro_rules! test_join {
($expected: expr, $string: expr, $delim: expr) => {
{
- let s = $string.connect($delim);
+ let s = $string.join($delim);
assert_eq!($expected, s);
}
}
}
#[test]
-fn test_connect_for_different_types() {
- test_connect!("a-b", ["a", "b"], "-");
+fn test_join_for_different_types() {
+ test_join!("a-b", ["a", "b"], "-");
let hyphen = "-".to_string();
- test_connect!("a-b", [s("a"), s("b")], &*hyphen);
- test_connect!("a-b", vec!["a", "b"], &*hyphen);
- test_connect!("a-b", &*vec!["a", "b"], "-");
- test_connect!("a-b", vec![s("a"), s("b")], "-");
+ test_join!("a-b", [s("a"), s("b")], &*hyphen);
+ test_join!("a-b", vec!["a", "b"], &*hyphen);
+ test_join!("a-b", &*vec!["a", "b"], "-");
+ test_join!("a-b", vec![s("a"), s("b")], "-");
}
#[test]
-fn test_connect_for_different_lengths() {
+fn test_join_for_different_lengths() {
let empty: &[&str] = &[];
- test_connect!("", empty, "-");
- test_connect!("a", ["a"], "-");
- test_connect!("a-b", ["a", "b"], "-");
- test_connect!("-a-bc", ["", "a", "bc"], "-");
+ test_join!("", empty, "-");
+ test_join!("a", ["a"], "-");
+ test_join!("a-b", ["a", "b"], "-");
+ test_join!("-a-bc", ["", "a", "bc"], "-");
}
#[test]
assert_eq!(b, "");
}
+#[test]
+fn test_split_at_mut() {
+ use std::ascii::AsciiExt;
+ let mut s = "Hello World".to_string();
+ {
+ let (a, b) = s.split_at_mut(5);
+ a.make_ascii_uppercase();
+ b.make_ascii_lowercase();
+ }
+ assert_eq!(s, "HELLO world");
+}
+
#[test]
#[should_panic]
fn test_split_at_boundscheck() {
let s = "ศไทย中华Việt Nam";
- let (a, b) = s.split_at(1);
+ s.split_at(1);
}
#[test]
assert_eq!("aéDžßfiᾀ".to_uppercase(), "AÉDŽSSFIἈΙ");
}
+#[test]
+fn test_into_string() {
+ // The only way to acquire a Box<str> in the first place is through a String, so just
+ // test that we can round-trip between Box<str> and String.
+ let string = String::from("Some text goes here");
+ assert_eq!(string.clone().into_boxed_slice().into_string(), string);
+}
+
+#[test]
+fn test_box_slice_clone() {
+ let data = String::from("hello HELLO hello HELLO yes YES 5 中ä华!!!");
+ let data2 = data.clone().into_boxed_slice().clone().into_string();
+
+ assert_eq!(data, data2);
+}
+
mod pattern {
use std::str::pattern::Pattern;
use std::str::pattern::{Searcher, ReverseSearcher};
Match (4, 6),
Reject(6, 7),
]);
+ make_test!(str_searcher_ascii_haystack_seq, "bb", "abbcbbbbd", [
+ Reject(0, 1),
+ Match (1, 3),
+ Reject(3, 4),
+ Match (4, 6),
+ Match (6, 8),
+ Reject(8, 9),
+ ]);
make_test!(str_searcher_empty_needle_ascii_haystack, "", "abbcbbd", [
Match (0, 0),
Reject(0, 1),
}
#[bench]
- fn bench_connect(b: &mut Bencher) {
+ fn bench_join(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
let sep = "→";
let v = vec![s, s, s, s, s, s, s, s, s, s];
b.iter(|| {
- assert_eq!(v.connect(sep).len(), s.len() * 10 + sep.len() * 9);
+ assert_eq!(v.join(sep).len(), s.len() * 10 + sep.len() * 9);
})
}
use std::borrow::{IntoCow, Cow};
use std::iter::repeat;
-#[allow(deprecated)]
-use std::string::as_string;
use test::Bencher;
-#[test]
-#[allow(deprecated)]
-fn test_as_string() {
- let x = "foo";
- assert_eq!(x, &**as_string(x));
-}
-
#[test]
fn test_from_str() {
let owned: Option<::std::string::String> = "string".parse().ok();
assert_eq!(&a, "foobar");
}
+#[test]
+fn test_into_boxed_slice() {
+ let xs = String::from("hello my name is bob");
+ let ys = xs.into_boxed_slice();
+ assert_eq!(&*ys, "hello my name is bob");
+}
+
#[bench]
fn bench_with_capacity(b: &mut Bencher) {
b.iter(|| {
use std::iter::{FromIterator, repeat};
use std::mem::size_of;
-#[allow(deprecated)]
-use std::vec::as_vec;
use test::Bencher;
}
}
-#[test]
-#[allow(deprecated)]
-fn test_as_vec() {
- let xs = [1u8, 2u8, 3u8];
- assert_eq!(&**as_vec(&xs), xs);
-}
-
-#[test]
-#[allow(deprecated)]
-fn test_as_vec_dtor() {
- let (mut count_x, mut count_y) = (0, 0);
- {
- let xs = &[DropCounter { count: &mut count_x }, DropCounter { count: &mut count_y }];
- assert_eq!(as_vec(xs).len(), 2);
- }
- assert_eq!(count_x, 1);
- assert_eq!(count_y, 1);
-}
-
#[test]
fn test_small_vec_struct() {
assert!(size_of::<Vec<u8>>() == size_of::<usize>() * 3);
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Traits for dynamic typing of any `'static` type (through runtime reflection)
-//!
//! This module implements the `Any` trait, which enables dynamic typing
//! of any `'static` type through runtime reflection.
//!
//! `Any` itself can be used to get a `TypeId`, and has more features when used
//! as a trait object. As `&Any` (a borrowed trait object), it has the `is` and
//! `as_ref` methods, to test if the contained value is of a given type, and to
-//! get a reference to the inner value as a type. As`&mut Any`, there is also
+//! get a reference to the inner value as a type. As `&mut Any`, there is also
//! the `as_mut` method, for getting a mutable reference to the inner value.
//! `Box<Any>` adds the `move` method, which will unwrap a `Box<T>` from the
//! object. See the extension traits (`*Ext`) for the full details.
//! Implementations of things like `Eq` for fixed-length arrays
//! up to a certain length. Eventually we should able to generalize
//! to all lengths.
+//!
+//! *[See also the array primitive type](../primitive.array.html).*
-#![doc(primitive = "array")]
#![unstable(feature = "fixed_size_array",
reason = "traits and impls are better expressed through generic \
integer constants")]
use self::Ordering::*;
-use marker::Sync;
+use marker::{Send, Sync};
use intrinsics;
use cell::UnsafeCell;
use default::Default;
+use fmt;
/// A boolean type which can be safely shared between threads.
#[stable(feature = "rust1", since = "1.0.0")]
}
}
+unsafe impl<T> Send for AtomicPtr<T> {}
unsafe impl<T> Sync for AtomicPtr<T> {}
/// Atomic memory orderings
/// "relaxed" atomics allow all reorderings.
///
/// Rust's memory orderings are [the same as
-/// C++'s](http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync).
+/// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Copy, Clone)]
pub enum Ordering {
unsafe { atomic_swap(self.v.get(), val, order) > 0 }
}
- /// Stores a value into the bool if the current value is the same as the expected value.
+ /// Stores a value into the `bool` if the current value is the same as the `current` value.
///
- /// The return value is always the previous value. If it is equal to `old`, then the value was
- /// updated.
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
///
- /// `swap` also takes an `Ordering` argument which describes the memory ordering of this
- /// operation.
+ /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
+ /// this operation.
///
/// # Examples
///
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn compare_and_swap(&self, old: bool, new: bool, order: Ordering) -> bool {
- let old = if old { UINT_TRUE } else { 0 };
+ pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
+ let current = if current { UINT_TRUE } else { 0 };
let new = if new { UINT_TRUE } else { 0 };
- unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) > 0 }
+ unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) > 0 }
}
/// Logical "and" with a boolean value.
unsafe { atomic_swap(self.v.get(), val, order) }
}
- /// Stores a value into the isize if the current value is the same as the expected value.
+ /// Stores a value into the `isize` if the current value is the same as the `current` value.
///
- /// The return value is always the previous value. If it is equal to `old`, then the value was
- /// updated.
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
///
/// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
/// this operation.
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn compare_and_swap(&self, old: isize, new: isize, order: Ordering) -> isize {
- unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
+ pub fn compare_and_swap(&self, current: isize, new: isize, order: Ordering) -> isize {
+ unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
}
/// Add an isize to the current value, returning the previous value.
unsafe { atomic_swap(self.v.get(), val, order) }
}
- /// Stores a value into the usize if the current value is the same as the expected value.
+ /// Stores a value into the `usize` if the current value is the same as the `current` value.
///
- /// The return value is always the previous value. If it is equal to `old`, then the value was
- /// updated.
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
///
/// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
/// this operation.
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn compare_and_swap(&self, old: usize, new: usize, order: Ordering) -> usize {
- unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
+ pub fn compare_and_swap(&self, current: usize, new: usize, order: Ordering) -> usize {
+ unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
}
/// Add to the current usize, returning the previous value.
unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
}
- /// Stores a value into the pointer if the current value is the same as the expected value.
+ /// Stores a value into the pointer if the current value is the same as the `current` value.
///
- /// The return value is always the previous value. If it is equal to `old`, then the value was
- /// updated.
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
///
/// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
/// this operation.
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
+ pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
unsafe {
- atomic_compare_and_swap(self.p.get() as *mut usize, old as usize,
+ atomic_compare_and_swap(self.p.get() as *mut usize, current as usize,
new as usize, order) as *mut T
}
}
}
#[inline]
-#[stable(feature = "rust1", since = "1.0.0")]
unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
match order {
Acquire => intrinsics::atomic_load_acq(dst),
}
#[inline]
-#[stable(feature = "rust1", since = "1.0.0")]
unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xchg_acq(dst, val),
/// Returns the old value (like __sync_fetch_and_add).
#[inline]
-#[stable(feature = "rust1", since = "1.0.0")]
unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xadd_acq(dst, val),
/// Returns the old value (like __sync_fetch_and_sub).
#[inline]
-#[stable(feature = "rust1", since = "1.0.0")]
unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xsub_acq(dst, val),
}
#[inline]
-#[stable(feature = "rust1", since = "1.0.0")]
unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
}
#[inline]
-#[stable(feature = "rust1", since = "1.0.0")]
unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_and_acq(dst, val),
}
#[inline]
-#[stable(feature = "rust1", since = "1.0.0")]
unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_nand_acq(dst, val),
#[inline]
-#[stable(feature = "rust1", since = "1.0.0")]
unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_or_acq(dst, val),
#[inline]
-#[stable(feature = "rust1", since = "1.0.0")]
unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xor_acq(dst, val),
}
}
}
+
+macro_rules! impl_Debug {
+ ($($t:ident)*) => ($(
+ #[stable(feature = "atomic_debug", since = "1.3.0")]
+ impl fmt::Debug for $t {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_tuple(stringify!($t)).field(&self.load(Ordering::SeqCst)).finish()
+ }
+ }
+ )*);
+}
+
+impl_Debug!{ AtomicUsize AtomicIsize AtomicBool }
+
+#[stable(feature = "atomic_debug", since = "1.3.0")]
+impl<T> fmt::Debug for AtomicPtr<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()
+ }
+}
//! would otherwise be disallowed though, there are occasions when interior mutability might be
//! appropriate, or even *must* be used, e.g.
//!
-//! * Introducing inherited mutability roots to shared types.
+//! * Introducing mutability 'inside' of something immutable
//! * Implementation details of logically-immutable methods.
//! * Mutating implementations of `Clone`.
//!
-//! ## Introducing inherited mutability roots to shared types
+//! ## Introducing mutability 'inside' of something immutable
//!
-//! Shared smart pointer types, including `Rc<T>` and `Arc<T>`, provide containers that can be
+//! Many shared smart pointer types, including `Rc<T>` and `Arc<T>`, provide containers that can be
//! cloned and shared between multiple parties. Because the contained values may be
-//! multiply-aliased, they can only be borrowed as shared references, not mutable references.
-//! Without cells it would be impossible to mutate data inside of shared boxes at all!
+//! multiply-aliased, they can only be borrowed with `&`, not `&mut`. Without cells it would be
+//! impossible to mutate data inside of these smart pointers at all.
//!
//! It's very common then to put a `RefCell<T>` inside shared pointer types to reintroduce
//! mutability:
//! ```
//!
//! Note that this example uses `Rc<T>` and not `Arc<T>`. `RefCell<T>`s are for single-threaded
-//! scenarios. Consider using `Mutex<T>` if you need shared mutability in a multi-threaded
-//! situation.
+//! scenarios. Consider using `RwLock<T>` or `Mutex<T>` if you need shared mutability in a
+//! multi-threaded situation.
//!
//! ## Implementation details of logically-immutable methods
//!
/// # Examples
///
/// ```
- /// # #![feature(as_unsafe_cell)]
+ /// #![feature(as_unsafe_cell)]
+ ///
/// use std::cell::Cell;
///
/// let c = Cell::new(5);
/// # Example
///
/// ```
- /// # #![feature(cell_extras)]
+ /// #![feature(cell_extras)]
+ ///
/// use std::cell::{RefCell, Ref};
///
/// let c = RefCell::new((5, 'b'));
//! For more details, see ::rustc_unicode::char (a.k.a. std::char)
#![allow(non_snake_case)]
-#![doc(primitive = "char")]
#![stable(feature = "core_char", since = "1.2.0")]
use iter::Iterator;
if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) {
None
} else {
- Some(unsafe { transmute(i) })
+ Some(unsafe { from_u32_unchecked(i) })
}
}
+/// Converts a `u32` to an `char`, not checking whether it is a valid unicode
+/// codepoint.
+#[inline]
+#[unstable(feature = "char_from_unchecked", reason = "recently added API")]
+pub unsafe fn from_u32_unchecked(i: u32) -> char {
+ transmute(i)
+}
+
/// Converts a number to the character representing it.
///
/// # Return value
panic!("from_digit: radix is too high (maximum 36)");
}
if num < radix {
- unsafe {
- if num < 10 {
- Some(transmute('0' as u32 + num))
- } else {
- Some(transmute('a' as u32 + num - 10))
- }
+ let num = num as u8;
+ if num < 10 {
+ Some((b'0' + num) as char)
+ } else {
+ Some((b'a' + num - 10) as char)
}
} else {
None
Some('{')
}
EscapeUnicodeState::Value(offset) => {
- let v = match ((self.c as i32) >> (offset * 4)) & 0xf {
- i @ 0 ... 9 => '0' as i32 + i,
- i => 'a' as i32 + (i - 10)
- };
+ let c = from_digit(((self.c as u32) >> (offset * 4)) & 0xf, 16).unwrap();
if offset == 0 {
self.state = EscapeUnicodeState::RightBrace;
} else {
self.state = EscapeUnicodeState::Value(offset - 1);
}
- Some(unsafe { transmute(v) })
+ Some(c)
}
EscapeUnicodeState::RightBrace => {
self.state = EscapeUnicodeState::Done;
///
/// - total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true; and
/// - transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+///
+/// When this trait is `derive`d, it produces a lexicographic ordering.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Ord: Eq + PartialOrd<Self> {
/// This method returns an `Ordering` between `self` and `other`.
/// # Examples
///
/// ```
-/// # #![feature(cmp_partial)]
+/// #![feature(cmp_partial)]
+///
/// use std::cmp;
///
/// assert_eq!(Some(1), cmp::partial_min(1, 2));
/// When comparison is impossible:
///
/// ```
-/// # #![feature(cmp_partial)]
+/// #![feature(cmp_partial)]
+///
/// use std::cmp;
///
/// let result = cmp::partial_min(std::f64::NAN, 1.0);
/// ```
#[inline]
#[unstable(feature = "cmp_partial")]
+#[deprecated(since = "1.3.0", reason = "has not proven itself worthwhile")]
pub fn partial_min<T: PartialOrd>(v1: T, v2: T) -> Option<T> {
match v1.partial_cmp(&v2) {
Some(Less) | Some(Equal) => Some(v1),
/// # Examples
///
/// ```
-/// # #![feature(cmp_partial)]
+/// #![feature(cmp_partial)]
+///
/// use std::cmp;
///
/// assert_eq!(Some(2), cmp::partial_max(1, 2));
/// When comparison is impossible:
///
/// ```
-/// # #![feature(cmp_partial)]
+/// #![feature(cmp_partial)]
+///
/// use std::cmp;
///
/// let result = cmp::partial_max(std::f64::NAN, 1.0);
/// ```
#[inline]
#[unstable(feature = "cmp_partial")]
+#[deprecated(since = "1.3.0", reason = "has not proven itself worthwhile")]
pub fn partial_max<T: PartialOrd>(v1: T, v2: T) -> Option<T> {
match v1.partial_cmp(&v2) {
Some(Equal) | Some(Less) => Some(v2),
fn is_pretty(&self) -> bool {
self.fmt.flags() & (1 << (FlagV1::Alternate as usize)) != 0
}
+
+ /// Returns the wrapped `Formatter`.
+ #[unstable(feature = "debug_builder_formatter", reason = "recently added")]
+ pub fn formatter(&mut self) -> &mut fmt::Formatter<'b> {
+ &mut self.fmt
+ }
}
struct DebugInner<'a, 'b: 'a> {
}
}
-/// Format trait for the `:?` format. Useful for debugging, all types
-/// should implement this.
+/// Format trait for the `?` character.
+///
+/// `Debug` should format the output in a programmer-facing, debugging context.
///
/// Generally speaking, you should just `derive` a `Debug` implementation.
///
+/// When used with the alternate format specifier `#?`, the output is pretty-printed.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../index.html
+///
/// # Examples
///
/// Deriving an implementation:
/// println!("The origin is: {:?}", origin);
/// ```
///
+/// This outputs:
+///
+/// ```text
+/// The origin is: Point { x: 0, y: 0 }
+/// ```
+///
/// There are a number of `debug_*` methods on `Formatter` to help you with manual
/// implementations, such as [`debug_struct`][debug_struct].
///
+/// `Debug` implementations using either `derive` or the debug builder API
+/// on `Formatter` support pretty printing using the alternate flag: `{:#?}`.
+///
/// [debug_struct]: ../std/fmt/struct.Formatter.html#method.debug_struct
+///
+/// Pretty printing with `#?`:
+///
+/// ```
+/// #[derive(Debug)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// let origin = Point { x: 0, y: 0 };
+///
+/// println!("The origin is: {:#?}", origin);
+/// ```
+///
+/// This outputs:
+///
+/// ```text
+/// The origin is: Point {
+/// x: 0,
+/// y: 0
+/// }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented = "`{Self}` cannot be formatted using `:?`; if it is \
defined in your crate, add `#[derive(Debug)]` or \
fn fmt(&self, &mut Formatter) -> Result;
}
-/// When a value can be semantically expressed as a String, this trait may be
-/// used. It corresponds to the default format, `{}`.
+/// Format trait for an empty format, `{}`.
+///
+/// `Display` is similar to [`Debug`][debug], but `Display` is for user-facing
+/// output, and so cannot be derived.
+///
+/// [debug]: trait.Debug.html
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../index.html
+///
+/// # Examples
+///
+/// Implementing `Display` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl fmt::Display for Point {
+/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+/// write!(f, "({}, {})", self.x, self.y)
+/// }
+/// }
+///
+/// let origin = Point { x: 0, y: 0 };
+///
+/// println!("The origin is: {}", origin);
+/// ```
#[rustc_on_unimplemented = "`{Self}` cannot be formatted with the default \
formatter; try using `:?` instead if you are using \
a format string"]
fn fmt(&self, &mut Formatter) -> Result;
}
-/// Format trait for the `o` character
+/// Format trait for the `o` character.
+///
+/// The `Octal` trait should format its output as a number in base-8.
+///
+/// The alternate flag, `#`, adds a `0o` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../index.html
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42; // 42 is '52' in octal
+///
+/// assert_eq!(format!("{:o}", x), "52");
+/// assert_eq!(format!("{:#o}", x), "0o52");
+/// ```
+///
+/// Implementing `Octal` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::Octal for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+/// let val = self.0;
+///
+/// write!(f, "{:o}", val) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(9);
+///
+/// println!("l as octal is: {:o}", l);
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Octal {
/// Formats the value using the given formatter.
fn fmt(&self, &mut Formatter) -> Result;
}
-/// Format trait for the `b` character
+/// Format trait for the `b` character.
+///
+/// The `Binary` trait should format its output as a number in binary.
+///
+/// The alternate flag, `#`, adds a `0b` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../index.html
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42; // 42 is '101010' in binary
+///
+/// assert_eq!(format!("{:b}", x), "101010");
+/// assert_eq!(format!("{:#b}", x), "0b101010");
+/// ```
+///
+/// Implementing `Binary` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::Binary for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+/// let val = self.0;
+///
+/// write!(f, "{:b}", val) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(107);
+///
+/// println!("l as binary is: {:b}", l);
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Binary {
/// Formats the value using the given formatter.
fn fmt(&self, &mut Formatter) -> Result;
}
-/// Format trait for the `x` character
+/// Format trait for the `x` character.
+///
+/// The `LowerHex` trait should format its output as a number in hexidecimal, with `a` through `f`
+/// in lower case.
+///
+/// The alternate flag, `#`, adds a `0x` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../index.html
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42; // 42 is '2a' in hex
+///
+/// assert_eq!(format!("{:x}", x), "2a");
+/// assert_eq!(format!("{:#x}", x), "0x2a");
+/// ```
+///
+/// Implementing `LowerHex` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::LowerHex for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+/// let val = self.0;
+///
+/// write!(f, "{:x}", val) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(9);
+///
+/// println!("l as hex is: {:x}", l);
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait LowerHex {
/// Formats the value using the given formatter.
fn fmt(&self, &mut Formatter) -> Result;
}
-/// Format trait for the `X` character
+/// Format trait for the `X` character.
+///
+/// The `UpperHex` trait should format its output as a number in hexidecimal, with `A` through `F`
+/// in upper case.
+///
+/// The alternate flag, `#`, adds a `0x` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../index.html
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42; // 42 is '2A' in hex
+///
+/// assert_eq!(format!("{:X}", x), "2A");
+/// assert_eq!(format!("{:#X}", x), "0x2A");
+/// ```
+///
+/// Implementing `UpperHex` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::UpperHex for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+/// let val = self.0;
+///
+/// write!(f, "{:X}", val) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(9);
+///
+/// println!("l as hex is: {:X}", l);
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait UpperHex {
/// Formats the value using the given formatter.
fn fmt(&self, &mut Formatter) -> Result;
}
-/// Format trait for the `p` character
+/// Format trait for the `p` character.
+///
+/// The `Pointer` trait should format its output as a memory location. This is commonly presented
+/// as hexidecimal.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../index.html
+///
+/// # Examples
+///
+/// Basic usage with `&i32`:
+///
+/// ```
+/// let x = &42;
+///
+/// let address = format!("{:p}", x); // this produces something like '0x7f06092ac6d0'
+/// ```
+///
+/// Implementing `Pointer` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::Pointer for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+/// // use `as` to convert to a `*const T`, which implements Pointer, which we can use
+///
+/// write!(f, "{:p}", self as *const Length)
+/// }
+/// }
+///
+/// let l = Length(42);
+///
+/// println!("l is in memory here: {:p}", l);
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Pointer {
/// Formats the value using the given formatter.
fn fmt(&self, &mut Formatter) -> Result;
}
-/// Format trait for the `e` character
+/// Format trait for the `e` character.
+///
+/// The `LowerExp` trait should format its output in scientific notation with a lower-case `e`.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../index.html
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42.0; // 42.0 is '4.2e1' in scientific notation
+///
+/// assert_eq!(format!("{:e}", x), "4.2e1");
+/// ```
+///
+/// Implementing `LowerExp` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::LowerExp for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+/// let val = self.0;
+/// write!(f, "{}e1", val / 10)
+/// }
+/// }
+///
+/// let l = Length(100);
+///
+/// println!("l in scientific notation is: {:e}", l);
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait LowerExp {
/// Formats the value using the given formatter.
fn fmt(&self, &mut Formatter) -> Result;
}
-/// Format trait for the `E` character
+/// Format trait for the `E` character.
+///
+/// The `UpperExp` trait should format its output in scientific notation with an upper-case `E`.
+///
+/// For more information on formatters, see [the module-level documentation][module].
+///
+/// [module]: ../index.html
+///
+/// # Examples
+///
+/// Basic usage with `f32`:
+///
+/// ```
+/// let x = 42.0; // 42.0 is '4.2E1' in scientific notation
+///
+/// assert_eq!(format!("{:E}", x), "4.2E1");
+/// ```
+///
+/// Implementing `UpperExp` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::UpperExp for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+/// let val = self.0;
+/// write!(f, "{}E1", val / 10)
+/// }
+/// }
+///
+/// let l = Length(100);
+///
+/// println!("l in scientific notation is: {:E}", l);
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait UpperExp {
/// Formats the value using the given formatter.
fn fmt(&self, f: &mut Formatter) -> Result {
try!(write!(f, "\""));
for c in self.chars().flat_map(|c| c.escape_default()) {
- try!(write!(f, "{}", c));
+ try!(f.write_char(c))
}
write!(f, "\"")
}
use char::CharExt;
try!(write!(f, "'"));
for c in self.escape_default() {
- try!(write!(f, "{}", c));
+ try!(f.write_char(c))
}
write!(f, "'")
}
impl<$($name:Debug),*> Debug for ($($name,)*) {
#[allow(non_snake_case, unused_assignments)]
fn fmt(&self, f: &mut Formatter) -> Result {
- try!(write!(f, "("));
+ let mut builder = f.debug_tuple("");
let ($(ref $name,)*) = *self;
let mut n = 0;
$(
- if n > 0 {
- try!(write!(f, ", "));
- }
- try!(write!(f, "{:?}", *$name));
+ builder.field($name);
n += 1;
)*
+
if n == 1 {
- try!(write!(f, ","));
+ try!(write!(builder.formatter(), ","));
}
- write!(f, ")")
+
+ builder.finish()
}
}
peel! { $($name,)* }
// FIXME: #6220 Implement floating point formatting
-#![allow(unsigned_negation)]
-
use prelude::*;
use fmt;
use num::Zero;
use ops::{Div, Rem, Sub};
use str;
+use slice;
+use ptr;
+use mem;
#[doc(hidden)]
trait Int: Zero + PartialEq + PartialOrd + Div<Output=Self> + Rem<Output=Self> +
Sub<Output=Self> + Copy {
fn from_u8(u: u8) -> Self;
fn to_u8(&self) -> u8;
+ fn to_u32(&self) -> u32;
+ fn to_u64(&self) -> u64;
}
macro_rules! doit {
($($t:ident)*) => ($(impl Int for $t {
fn from_u8(u: u8) -> $t { u as $t }
fn to_u8(&self) -> u8 { *self as u8 }
+ fn to_u32(&self) -> u32 { *self as u32 }
+ fn to_u64(&self) -> u64 { *self as u64 }
})*)
}
doit! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize }
/// # Examples
///
/// ```
-/// # #![feature(fmt_radix)]
+/// #![feature(fmt_radix)]
+///
/// use std::fmt::radix;
/// assert_eq!(format!("{}", radix(55, 36)), "1j".to_string());
/// ```
}
}
}
+
macro_rules! int_base {
($Trait:ident for $T:ident as $U:ident -> $Radix:ident) => {
#[stable(feature = "rust1", since = "1.0.0")]
}
}
}
+
macro_rules! integer {
($Int:ident, $Uint:ident) => {
- int_base! { Display for $Int as $Int -> Decimal }
int_base! { Binary for $Int as $Uint -> Binary }
int_base! { Octal for $Int as $Uint -> Octal }
int_base! { LowerHex for $Int as $Uint -> LowerHex }
radix_fmt! { $Int as $Int, fmt_int }
debug! { $Int }
- int_base! { Display for $Uint as $Uint -> Decimal }
int_base! { Binary for $Uint as $Uint -> Binary }
int_base! { Octal for $Uint as $Uint -> Octal }
int_base! { LowerHex for $Uint as $Uint -> LowerHex }
integer! { i16, u16 }
integer! { i32, u32 }
integer! { i64, u64 }
+
+const DEC_DIGITS_LUT: &'static[u8] =
+ b"0001020304050607080910111213141516171819\
+ 2021222324252627282930313233343536373839\
+ 4041424344454647484950515253545556575859\
+ 6061626364656667686970717273747576777879\
+ 8081828384858687888990919293949596979899";
+
+macro_rules! impl_Display {
+ ($($t:ident),*: $conv_fn:ident) => ($(
+ impl fmt::Display for $t {
+ #[allow(unused_comparisons)]
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let is_positive = *self >= 0;
+ let mut n = if is_positive {
+ self.$conv_fn()
+ } else {
+ // convert the negative num to positive by summing 1 to it's 2 complement
+ (!self.$conv_fn()).wrapping_add(1)
+ };
+ let mut buf: [u8; 20] = unsafe { mem::uninitialized() };
+ let mut curr = buf.len() as isize;
+ let buf_ptr = buf.as_mut_ptr();
+ let lut_ptr = DEC_DIGITS_LUT.as_ptr();
+
+ unsafe {
+ // eagerly decode 4 characters at a time
+ if <$t>::max_value() as u64 >= 10000 {
+ while n >= 10000 {
+ let rem = (n % 10000) as isize;
+ n /= 10000;
+
+ let d1 = (rem / 100) << 1;
+ let d2 = (rem % 100) << 1;
+ curr -= 4;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
+ }
+ }
+
+ // if we reach here numbers are <= 9999, so at most 4 chars long
+ let mut n = n as isize; // possibly reduce 64bit math
+
+ // decode 2 more chars, if > 2 chars
+ if n >= 100 {
+ let d1 = (n % 100) << 1;
+ n /= 100;
+ curr -= 2;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ }
+
+ // decode last 1 or 2 chars
+ if n < 10 {
+ curr -= 1;
+ *buf_ptr.offset(curr) = (n as u8) + 48;
+ } else {
+ let d1 = n << 1;
+ curr -= 2;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ }
+ }
+
+ let buf_slice = unsafe {
+ str::from_utf8_unchecked(
+ slice::from_raw_parts(buf_ptr.offset(curr), buf.len() - curr as usize))
+ };
+ f.pad_integral(is_positive, "", buf_slice)
+ }
+ })*);
+}
+
+impl_Display!(i8, u8, i16, u16, i32, u32: to_u32);
+impl_Display!(i64, u64: to_u64);
+#[cfg(target_pointer_width = "32")]
+impl_Display!(isize, usize: to_u32);
+#[cfg(target_pointer_width = "64")]
+impl_Display!(isize, usize: to_u64);
//! # Examples
//!
//! ```rust
-//! # #![feature(hash_default)]
+//! #![feature(hash_default)]
+//!
//! use std::hash::{hash, Hash, SipHasher};
//!
//! #[derive(Hash)]
//! the trait `Hash`:
//!
//! ```rust
-//! # #![feature(hash_default)]
+//! #![feature(hash_default)]
+//!
//! use std::hash::{hash, Hash, Hasher, SipHasher};
//!
//! struct Person {
fn hash<H: Hasher>(&self, state: &mut H);
/// Feeds a slice of this type into the state provided.
- #[unstable(feature = "hash_slice",
- reason = "module was recently redesigned")]
+ #[stable(feature = "hash_slice", since = "1.3.0")]
fn hash_slice<H: Hasher>(data: &[Self], state: &mut H) where Self: Sized {
for piece in data {
piece.hash(state);
/// Write a single `u8` into this hasher
#[inline]
- #[unstable(feature = "hasher_write", reason = "module was recently redesigned")]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
fn write_u8(&mut self, i: u8) { self.write(&[i]) }
/// Write a single `u16` into this hasher.
#[inline]
- #[unstable(feature = "hasher_write", reason = "module was recently redesigned")]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
fn write_u16(&mut self, i: u16) {
self.write(&unsafe { mem::transmute::<_, [u8; 2]>(i) })
}
/// Write a single `u32` into this hasher.
#[inline]
- #[unstable(feature = "hasher_write", reason = "module was recently redesigned")]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
fn write_u32(&mut self, i: u32) {
self.write(&unsafe { mem::transmute::<_, [u8; 4]>(i) })
}
/// Write a single `u64` into this hasher.
#[inline]
- #[unstable(feature = "hasher_write", reason = "module was recently redesigned")]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
fn write_u64(&mut self, i: u64) {
self.write(&unsafe { mem::transmute::<_, [u8; 8]>(i) })
}
/// Write a single `usize` into this hasher.
#[inline]
- #[unstable(feature = "hasher_write", reason = "module was recently redesigned")]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
fn write_usize(&mut self, i: usize) {
if cfg!(target_pointer_width = "32") {
self.write_u32(i as u32)
/// Write a single `i8` into this hasher.
#[inline]
- #[unstable(feature = "hasher_write", reason = "module was recently redesigned")]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
fn write_i8(&mut self, i: i8) { self.write_u8(i as u8) }
/// Write a single `i16` into this hasher.
#[inline]
- #[unstable(feature = "hasher_write", reason = "module was recently redesigned")]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
fn write_i16(&mut self, i: i16) { self.write_u16(i as u16) }
/// Write a single `i32` into this hasher.
#[inline]
- #[unstable(feature = "hasher_write", reason = "module was recently redesigned")]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
fn write_i32(&mut self, i: i32) { self.write_u32(i as u32) }
/// Write a single `i64` into this hasher.
#[inline]
- #[unstable(feature = "hasher_write", reason = "module was recently redesigned")]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
fn write_i64(&mut self, i: i64) { self.write_u64(i as u64) }
/// Write a single `isize` into this hasher.
#[inline]
- #[unstable(feature = "hasher_write", reason = "module was recently redesigned")]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
fn write_isize(&mut self, i: isize) { self.write_usize(i as usize) }
}
#[unstable(feature = "hash_default",
reason = "not the most ergonomic interface unless `H` is defaulted \
to SipHasher, but perhaps not ready to commit to that")]
+#[deprecated(since = "1.3.0",
+ reason = "has yet to prove itself useful")]
pub fn hash<T: Hash, H: Hasher + Default>(value: &T) -> u64 {
let mut h: H = Default::default();
value.hash(&mut h);
//! An implementation of SipHash 2-4.
-#![allow(deprecated)] // until the next snapshot for inherent wrapping ops
-
+use ptr;
use prelude::*;
use super::Hasher;
k0: u64,
k1: u64,
length: usize, // how many bytes we've processed
+ // v0, v2 and v1, v3 show up in pairs in the algorithm,
+ // and simd implementations of SipHash will use vectors
+ // of v02 and v13. By placing them in this order in the struct,
+ // the compiler can pick up on just a few simd optimizations by itself.
v0: u64, // hash state
- v1: u64,
v2: u64,
+ v1: u64,
v3: u64,
tail: u64, // unprocessed bytes le
ntail: usize, // how many bytes in tail are valid
});
}
+/// Load a full u64 word from a byte stream, in LE order. Use
+/// `copy_nonoverlapping` to let the compiler generate the most efficient way
+/// to load u64 from a possibly unaligned address.
+///
+/// Unsafe because: unchecked indexing at i..i+8
+#[inline]
+unsafe fn load_u64_le(buf: &[u8], i: usize) -> u64 {
+ debug_assert!(i + 8 <= buf.len());
+ let mut data = 0u64;
+ ptr::copy_nonoverlapping(buf.get_unchecked(i),
+ &mut data as *mut _ as *mut u8, 8);
+ data.to_le()
+}
+
macro_rules! rotl {
($x:expr, $b:expr) =>
(($x << $b) | ($x >> (64_i32.wrapping_sub($b))))
// Buffered tail is now flushed, process new input.
let len = length - needed;
- let end = len & (!0x7);
let left = len & 0x7;
let mut i = needed;
- while i < end {
- let mi = u8to64_le!(msg, i);
+ while i < len - left {
+ let mi = unsafe { load_u64_le(msg, i) };
self.v3 ^= mi;
compress!(self.v0, self.v1, self.v2, self.v3);
/// Moves a value to an uninitialized memory location.
///
/// Drop glue is not run on the destination.
- pub fn move_val_init<T>(dst: &mut T, src: T);
+ pub fn move_val_init<T>(dst: *mut T, src: T);
pub fn min_align_of<T>() -> usize;
pub fn pref_align_of<T>() -> usize;
pub fn overflowing_mul<T>(a: T, b: T) -> T;
/// Performs an unchecked signed division, which results in undefined behavior,
- /// in cases where y == 0, or x == int::MIN and y == -1
+ /// in cases where y == 0, or x == isize::MIN and y == -1
pub fn unchecked_sdiv<T>(x: T, y: T) -> T;
/// Performs an unchecked unsigned division, which results in undefined behavior,
/// in cases where y == 0
pub fn unchecked_udiv<T>(x: T, y: T) -> T;
/// Returns the remainder of an unchecked signed division, which results in
- /// undefined behavior, in cases where y == 0, or x == int::MIN and y == -1
- pub fn unchecked_urem<T>(x: T, y: T) -> T;
- /// Returns the remainder of an unchecked signed division, which results in
- /// undefined behavior, in cases where y == 0
+ /// undefined behavior, in cases where y == 0, or x == isize::MIN and y == -1
pub fn unchecked_srem<T>(x: T, y: T) -> T;
+ /// Returns the remainder of an unchecked unsigned division, which results in
+ /// undefined behavior, in cases where y == 0
+ pub fn unchecked_urem<T>(x: T, y: T) -> T;
/// Returns the value of the discriminant for the variant in 'v',
/// cast to a `u64`; if `T` has no discriminant, returns 0.
pub fn discriminant_value<T>(v: &T) -> u64;
+
+ /// Rust's "try catch" construct which invokes the function pointer `f` with
+ /// the data pointer `data`, returning the exception payload if an exception
+ /// is thrown (aka the thread panics).
+ pub fn try(f: fn(*mut u8), data: *mut u8) -> *mut u8;
}
#![stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
use self::MinMaxResult::*;
use clone::Clone;
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[allow(deprecated)]
fn scan<St, B, F>(self, initial_state: St, f: F) -> Scan<Self, St, F>
where Self: Sized, F: FnMut(&mut St, Self::Item) -> Option<B>,
{
/// # Examples
///
/// ```
- /// # #![feature(iter_min_max)]
+ /// #![feature(iter_min_max)]
+ ///
/// use std::iter::MinMaxResult::{NoElements, OneElement, MinMax};
///
/// let a: [i32; 0] = [];
#[unstable(feature = "iter_min_max",
reason = "return type may change or may wish to have a closure \
based version as well")]
+ #[deprecated(since = "1.3.0", reason = "has not proven itself")]
+ #[allow(deprecated)]
fn min_max(mut self) -> MinMaxResult<Self::Item> where Self: Sized, Self::Item: Ord
{
let (mut min, mut max) = match self.next() {
/// # Examples
///
/// ```
- /// # #![feature(iter_cmp)]
+ /// #![feature(iter_cmp)]
+ ///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(*a.iter().max_by(|x| x.abs()).unwrap(), -10);
/// ```
/// # Examples
///
/// ```
- /// # #![feature(iter_cmp)]
+ /// #![feature(iter_cmp)]
+ ///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(*a.iter().min_by(|x| x.abs()).unwrap(), 0);
/// ```
/// # Examples
///
/// ```
- /// # #![feature(iter_arith)]
+ /// #![feature(iter_arith)]
+ ///
/// let a = [1, 2, 3, 4, 5];
/// let it = a.iter();
/// assert_eq!(it.sum::<i32>(), 15);
/// # Examples
///
/// ```
- /// # #![feature(iter_arith)]
+ /// #![feature(iter_arith)]
+ ///
/// fn factorial(n: u32) -> u32 {
/// (1..).take_while(|&i| i <= n).product()
/// }
#[derive(Clone, PartialEq, Debug)]
#[unstable(feature = "iter_min_max",
reason = "unclear whether such a fine-grained result is widely useful")]
+#[deprecated(since = "1.3.0", reason = "has not proven itself")]
+#[allow(deprecated)]
pub enum MinMaxResult<T> {
/// Empty iterator
NoElements,
}
#[unstable(feature = "iter_min_max", reason = "type is unstable")]
+#[deprecated(since = "1.3.0", reason = "has not proven itself")]
+#[allow(deprecated)]
impl<T: Clone> MinMaxResult<T> {
/// `into_option` creates an `Option` of type `(T,T)`. The returned `Option`
/// has variant `None` if and only if the `MinMaxResult` has variant
/// # Examples
///
/// ```
- /// # #![feature(iter_min_max)]
+ /// #![feature(iter_min_max)]
+ ///
/// use std::iter::MinMaxResult::{self, NoElements, OneElement, MinMax};
///
/// let r: MinMaxResult<i32> = NoElements;
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Clone)]
+#[allow(deprecated)]
pub struct Scan<I, St, F> {
iter: I,
f: F,
/// The current internal state to be passed to the closure next.
#[unstable(feature = "scan_state",
reason = "public fields are otherwise rare in the stdlib")]
+ #[deprecated(since = "1.3.0", reason = "unclear whether this is necessary")]
pub state: St,
}
type Item = B;
#[inline]
+ #[allow(deprecated)]
fn next(&mut self) -> Option<B> {
self.iter.next().and_then(|a| (self.f)(&mut self.state, a))
}
/// previously returned `None`.
#[inline]
#[unstable(feature = "iter_reset_fuse", reason = "seems marginal")]
+ #[deprecated(since = "1.3.0",
+ reason = "unusual for adaptors to have one-off methods")]
pub fn reset_fuse(&mut self) {
self.done = false
}
#[unstable(feature = "iter_unfold")]
#[derive(Clone)]
#[deprecated(since = "1.2.0",
- reason = "has gained enough traction to retain its position \
+ reason = "has not gained enough traction to retain its position \
in the standard library")]
#[allow(deprecated)]
pub struct Unfold<St, F> {
#[unstable(feature = "iter_unfold")]
#[deprecated(since = "1.2.0",
- reason = "has gained enough traction to retain its position \
+ reason = "has not gained enough traction to retain its position \
in the standard library")]
#[allow(deprecated)]
impl<A, St, F> Unfold<St, F> where F: FnMut(&mut St) -> Option<A> {
#[allow(trivial_numeric_casts)]
fn steps_between(start: &$t, end: &$t, by: &$t) -> Option<usize> {
if *by == 0 { return None; }
- let mut diff: usize;
- let mut by_u: usize;
+ let diff: usize;
+ let by_u: usize;
if *by > 0 {
if *start >= *end {
return Some(0);
/// # Examples
///
/// ```
- /// # #![feature(step_by)]
+ /// #![feature(step_by)]
+ ///
/// for i in (0..10).step_by(2) {
/// println!("{}", i);
/// }
/// from a given seed value.
#[unstable(feature = "iter_iterate")]
#[deprecated(since = "1.2.0",
- reason = "has gained enough traction to retain its position \
+ reason = "has not gained enough traction to retain its position \
in the standard library")]
#[allow(deprecated)]
pub type Iterate<T, F> = Unfold<IterateState<T, F>, fn(&mut IterateState<T, F>) -> Option<T>>;
/// repeated applications of the given function `f`.
#[unstable(feature = "iter_iterate")]
#[deprecated(since = "1.2.0",
- reason = "has gained enough traction to retain its position \
+ reason = "has not gained enough traction to retain its position \
in the standard library")]
#[allow(deprecated)]
pub fn iterate<T, F>(seed: T, f: F) -> Iterate<T, F> where
pub mod hash;
pub mod fmt;
-#[doc(primitive = "bool")]
-mod bool {
-}
-
// note: does not need to be public
mod tuple;
+// A curious inner-module that's not exported that contains the bindings of core
+// so that compiler-expanded references to `core::$foo` can be resolved within
+// core itself.
+//
+// Note that no crate-defined macros require this module due to the existence of
+// the `$crate` meta variable, only those expansions defined in the compiler
+// require this. This is because the compiler doesn't currently know that it's
+// compiling the core library when it's compiling this library, so it expands
+// all references to `::core::$foo`
#[doc(hidden)]
mod core {
- pub use intrinsics;
- pub use panicking;
- pub use fmt;
- pub use clone;
- pub use cmp;
- pub use hash;
- pub use marker;
- pub use option;
- pub use iter;
-}
-
-#[doc(hidden)]
-mod std {
- // range syntax
- pub use ops;
+ pub use intrinsics; // derive(PartialOrd)
+ pub use fmt; // format_args!
+ pub use clone; // derive(Clone)
+ pub use cmp; // derive(Ord)
+ pub use hash; // derive(Hash)
+ pub use marker; // derive(Copy)
+ pub use option; // iterator protocol
+ pub use iter; // iterator protocol
}
);
($msg:expr) => ({
static _MSG_FILE_LINE: (&'static str, &'static str, u32) = ($msg, file!(), line!());
- ::core::panicking::panic(&_MSG_FILE_LINE)
+ $crate::panicking::panic(&_MSG_FILE_LINE)
});
($fmt:expr, $($arg:tt)*) => ({
// The leading _'s are to avoid dead code warnings if this is
// insufficient, since the user may have
// `#[forbid(dead_code)]` and which cannot be overridden.
static _FILE_LINE: (&'static str, u32) = (file!(), line!());
- ::core::panicking::panic_fmt(format_args!($fmt, $($arg)*), &_FILE_LINE)
+ $crate::panicking::panic_fmt(format_args!($fmt, $($arg)*), &_FILE_LINE)
});
}
/// Any types with interior mutability must also use the `std::cell::UnsafeCell`
/// wrapper around the value(s) which can be mutated when behind a `&`
/// reference; not doing this is undefined behaviour (for example,
-/// `transmute`-ing from `&T` to `&mut T` is illegal).
+/// `transmute`-ing from `&T` to `&mut T` is invalid).
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "sync"]
#[rustc_on_unimplemented = "`{Self}` cannot be shared between threads safely"]
dropped_impl()
}
-/// Creates an uninitialized value.
+/// Bypasses Rust's normal memory-initialization checks by pretending to
+/// produce a value of type T, while doing nothing at all.
///
-/// Care must be taken when using this function, if the type `T` has a destructor and the value
-/// falls out of scope (due to unwinding or returning) before being initialized, then the
-/// destructor will run on uninitialized data, likely leading to crashes.
+/// **This is incredibly dangerous, and should not be done lightly. Deeply
+/// consider initializing your memory with a default value instead.**
///
-/// This is useful for FFI functions sometimes, but should generally be avoided.
+/// This is useful for FFI functions and initializing arrays sometimes,
+/// but should generally be avoided.
+///
+/// # Undefined Behaviour
+///
+/// It is Undefined Behaviour to read uninitialized memory. Even just an
+/// uninitialized boolean. For instance, if you branch on the value of such
+/// a boolean your program may take one, both, or neither of the branches.
+///
+/// Note that this often also includes *writing* to the uninitialized value.
+/// Rust believes the value is initialized, and will therefore try to Drop
+/// the uninitialized value and its fields if you try to overwrite the memory
+/// in a normal manner. The only way to safely initialize an arbitrary
+/// uninitialized value is with one of the `ptr` functions: `write`, `copy`, or
+/// `copy_nonoverlapping`. This isn't necessary if `T` is a primitive
+/// or otherwise only contains types that don't implement Drop.
+///
+/// If this value *does* need some kind of Drop, it must be initialized before
+/// it goes out of scope (and therefore would be dropped). Note that this
+/// includes a `panic` occurring and unwinding the stack suddenly.
///
/// # Examples
///
+/// Here's how to safely initialize an array of `Vec`s.
+///
/// ```
/// use std::mem;
+/// use std::ptr;
///
-/// let x: i32 = unsafe { mem::uninitialized() };
+/// // Only declare the array. This safely leaves it
+/// // uninitialized in a way that Rust will track for us.
+/// // However we can't initialize it element-by-element
+/// // safely, and we can't use the `[value; 1000]`
+/// // constructor because it only works with `Copy` data.
+/// let mut data: [Vec<u32>; 1000];
+///
+/// unsafe {
+/// // So we need to do this to initialize it.
+/// data = mem::uninitialized();
+///
+/// // DANGER ZONE: if anything panics or otherwise
+/// // incorrectly reads the array here, we will have
+/// // Undefined Behaviour.
+///
+/// // It's ok to mutably iterate the data, since this
+/// // doesn't involve reading it at all.
+/// // (ptr and len are statically known for arrays)
+/// for elem in &mut data[..] {
+/// // *elem = Vec::new() would try to drop the
+/// // uninitialized memory at `elem` -- bad!
+/// //
+/// // Vec::new doesn't allocate or do really
+/// // anything. It's only safe to call here
+/// // because we know it won't panic.
+/// ptr::write(elem, Vec::new());
+/// }
+///
+/// // SAFE ZONE: everything is initialized.
+/// }
+///
+/// println!("{:?}", &data[0]);
+/// ```
+///
+/// This example emphasizes exactly how delicate and dangerous doing this is.
+/// Note that the `vec!` macro *does* let you initialize every element with a
+/// value that is only `Clone`, so the following is semantically equivalent and
+/// vastly less dangerous, as long as you can live with an extra heap
+/// allocation:
+///
+/// ```
+/// let data: Vec<Vec<u32>> = vec![Vec::new(); 1000];
+/// println!("{:?}", &data[0]);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// Disposes of a value.
///
-/// This function can be used to destroy any value by allowing `drop` to take ownership of its
-/// argument.
+/// While this does call the argument's implementation of `Drop`, it will not
+/// release any borrows, as borrows are based on lexical scope.
///
/// # Examples
///
+/// Basic usage:
+///
+/// ```
+/// let v = vec![1, 2, 3];
+///
+/// drop(v); // explicitly drop the vector
+/// ```
+///
+/// Borrows are based on lexical scope, so this produces an error:
+///
+/// ```ignore
+/// let mut v = vec![1, 2, 3];
+/// let x = &v[0];
+///
+/// drop(x); // explicitly drop the reference, but the borrow still exists
+///
+/// v.push(4); // error: cannot borrow `v` as mutable because it is also
+/// // borrowed as immutable
+/// ```
+///
+/// An inner scope is needed to fix this:
+///
+/// ```
+/// let mut v = vec![1, 2, 3];
+///
+/// {
+/// let x = &v[0];
+///
+/// drop(x); // this is now redundant, as `x` is going out of scope anyway
+/// }
+///
+/// v.push(4); // no problems
+/// ```
+///
+/// Since `RefCell` enforces the borrow rules at runtime, `drop()` can
+/// seemingly release a borrow of one:
+///
/// ```
/// use std::cell::RefCell;
///
//
// And of course, 0x00 brings back the old world of zero'ing on drop.
#[unstable(feature = "filling_drop")]
+#[allow(missing_docs)]
pub const POST_DROP_U8: u8 = 0x1d;
#[unstable(feature = "filling_drop")]
+#[allow(missing_docs)]
pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8);
#[unstable(feature = "filling_drop")]
+#[allow(missing_docs)]
pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8);
#[cfg(target_pointer_width = "32")]
#[unstable(feature = "filling_drop")]
+#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize;
#[cfg(target_pointer_width = "64")]
#[unstable(feature = "filling_drop")]
+#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize;
/// Interprets `src` as `&U`, and then reads `src` without moving the contained
//! Operations and constants for 32-bits floats (`f32` type)
-#![doc(primitive = "f32")]
// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
#![allow(overflowing_literals)]
use num::FpCategory as Fp;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const RADIX: u32 = 2;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MANTISSA_DIGITS: u32 = 24;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const DIGITS: u32 = 6;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const EPSILON: f32 = 1.19209290e-07_f32;
/// Smallest finite f32 value
pub const MAX: f32 = 3.40282347e+38_f32;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MIN_EXP: i32 = -125;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MAX_EXP: i32 = 128;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MIN_10_EXP: i32 = -37;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MAX_10_EXP: i32 = 38;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const NAN: f32 = 0.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const INFINITY: f32 = 1.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32;
/// Basic mathematial constants.
/// Rounds towards minus infinity.
#[inline]
fn floor(self) -> f32 {
- unsafe { intrinsics::floorf32(self) }
+ return floorf(self);
+
+ // On MSVC LLVM will lower many math intrinsics to a call to the
+ // corresponding function. On MSVC, however, many of these functions
+ // aren't actually available as symbols to call, but rather they are all
+ // `static inline` functions in header files. This means that from a C
+ // perspective it's "compatible", but not so much from an ABI
+ // perspective (which we're worried about).
+ //
+ // The inline header functions always just cast to a f64 and do their
+ // operation, so we do that here as well, but only for MSVC targets.
+ //
+ // Note that there are many MSVC-specific float operations which
+ // redirect to this comment, so `floorf` is just one case of a missing
+ // function on MSVC, but there are many others elsewhere.
+ #[cfg(target_env = "msvc")]
+ fn floorf(f: f32) -> f32 { (f as f64).floor() as f32 }
+ #[cfg(not(target_env = "msvc"))]
+ fn floorf(f: f32) -> f32 { unsafe { intrinsics::floorf32(f) } }
}
/// Rounds towards plus infinity.
#[inline]
fn ceil(self) -> f32 {
- unsafe { intrinsics::ceilf32(self) }
+ return ceilf(self);
+
+ // see notes above in `floor`
+ #[cfg(target_env = "msvc")]
+ fn ceilf(f: f32) -> f32 { (f as f64).ceil() as f32 }
+ #[cfg(not(target_env = "msvc"))]
+ fn ceilf(f: f32) -> f32 { unsafe { intrinsics::ceilf32(f) } }
}
/// Rounds to nearest integer. Rounds half-way cases away from zero.
#[inline]
fn powf(self, n: f32) -> f32 {
- unsafe { intrinsics::powf32(self, n) }
+ return powf(self, n);
+
+ // see notes above in `floor`
+ #[cfg(target_env = "msvc")]
+ fn powf(f: f32, n: f32) -> f32 { (f as f64).powf(n as f64) as f32 }
+ #[cfg(not(target_env = "msvc"))]
+ fn powf(f: f32, n: f32) -> f32 { unsafe { intrinsics::powf32(f, n) } }
}
#[inline]
/// Returns the exponential of the number.
#[inline]
fn exp(self) -> f32 {
- unsafe { intrinsics::expf32(self) }
+ return expf(self);
+
+ // see notes above in `floor`
+ #[cfg(target_env = "msvc")]
+ fn expf(f: f32) -> f32 { (f as f64).exp() as f32 }
+ #[cfg(not(target_env = "msvc"))]
+ fn expf(f: f32) -> f32 { unsafe { intrinsics::expf32(f) } }
}
/// Returns 2 raised to the power of the number.
/// Returns the natural logarithm of the number.
#[inline]
fn ln(self) -> f32 {
- unsafe { intrinsics::logf32(self) }
+ return logf(self);
+
+ // see notes above in `floor`
+ #[cfg(target_env = "msvc")]
+ fn logf(f: f32) -> f32 { (f as f64).ln() as f32 }
+ #[cfg(not(target_env = "msvc"))]
+ fn logf(f: f32) -> f32 { unsafe { intrinsics::logf32(f) } }
}
/// Returns the logarithm of the number with respect to an arbitrary base.
/// Returns the base 10 logarithm of the number.
#[inline]
fn log10(self) -> f32 {
- unsafe { intrinsics::log10f32(self) }
+ return log10f(self);
+
+ // see notes above in `floor`
+ #[cfg(target_env = "msvc")]
+ fn log10f(f: f32) -> f32 { (f as f64).log10() as f32 }
+ #[cfg(not(target_env = "msvc"))]
+ fn log10f(f: f32) -> f32 { unsafe { intrinsics::log10f32(f) } }
}
/// Converts to degrees, assuming the number is in radians.
//! Operations and constants for 64-bits floats (`f64` type)
-#![doc(primitive = "f64")]
// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
#![allow(overflowing_literals)]
use num::{Float, ParseFloatError};
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const RADIX: u32 = 2;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MANTISSA_DIGITS: u32 = 53;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const DIGITS: u32 = 15;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const EPSILON: f64 = 2.2204460492503131e-16_f64;
/// Smallest finite f64 value
pub const MAX: f64 = 1.7976931348623157e+308_f64;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MIN_EXP: i32 = -1021;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MAX_EXP: i32 = 1024;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MIN_10_EXP: i32 = -307;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MAX_10_EXP: i32 = 308;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const NAN: f64 = 0.0_f64/0.0_f64;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const INFINITY: f64 = 1.0_f64/0.0_f64;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const NEG_INFINITY: f64 = -1.0_f64/0.0_f64;
/// Basic mathematial constants.
//! Operations and constants for signed 16-bits integers (`i16` type)
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "i16")]
int_module! { i16, 16 }
//! Operations and constants for signed 32-bits integers (`i32` type)
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "i32")]
int_module! { i32, 32 }
//! Operations and constants for signed 64-bits integers (`i64` type)
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "i64")]
int_module! { i64, 64 }
//! Operations and constants for signed 8-bits integers (`i8` type)
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "i8")]
int_module! { i8, 8 }
// calling the `mem::size_of` function.
#[unstable(feature = "num_bits_bytes",
reason = "may want to be an associated function")]
+#[allow(missing_docs)]
pub const BITS : usize = $bits;
// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
// calling the `mem::size_of` function.
#[unstable(feature = "num_bits_bytes",
reason = "may want to be an associated function")]
+#[allow(missing_docs)]
pub const BYTES : usize = ($bits / 8);
// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
// calling the `Bounded::min_value` function.
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MIN: $T = (-1 as $T) << (BITS - 1);
// FIXME(#9837): Compute MIN like this so the high bits that shouldn't exist are 0.
// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
// calling the `Bounded::max_value` function.
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MAX: $T = !MIN;
) }
//! Operations and constants for pointer-sized signed integers (`isize` type)
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "isize")]
#[cfg(target_pointer_width = "32")]
int_module! { isize, 32 }
use option::Option::{self, Some, None};
use result::Result::{self, Ok, Err};
use str::{FromStr, StrExt};
+use slice::SliceExt;
/// Provides intentionally-wrapped arithmetic on `T`.
///
}
}
- /// Wrapping (modular) division. Computes `floor(self / other)`,
+ /// Wrapping (modular) division. Computes `self / other`,
/// wrapping around at the boundary of the type.
///
/// The only case where such wrapping can occur is when one
/// negative minimal value for the type); this is equivalent
/// to `-MIN`, a positive value that is too large to represent
/// in the type. In such a case, this function returns `MIN`
- /// itself..
+ /// itself.
#[stable(feature = "num_wrapping", since = "1.2.0")]
#[inline(always)]
pub fn wrapping_div(self, rhs: Self) -> Self {
/// wrapping around at the boundary of the type.
///
/// Such wrap-around never actually occurs mathematically;
- /// implementation artifacts make `x % y` illegal for `MIN /
- /// -1` on a signed type illegal (where `MIN` is the negative
+ /// implementation artifacts make `x % y` invalid for `MIN /
+ /// -1` on a signed type (where `MIN` is the negative
/// minimal value). In such a case, this function returns `0`.
#[stable(feature = "num_wrapping", since = "1.2.0")]
#[inline(always)]
$mul_with_overflow:path) => {
/// Returns the smallest value that can be represented by this integer type.
#[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
pub fn min_value() -> Self { 0 }
/// Returns the largest value that can be represented by this integer type.
#[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
pub fn max_value() -> Self { !0 }
/// Converts a string slice in a given base to an integer.
}
}
- /// Wrapping (modular) division. Computes `floor(self / other)`,
+ /// Wrapping (modular) division. Computes `self / other`,
/// wrapping around at the boundary of the type.
///
/// The only case where such wrapping can occur is when one
/// negative minimal value for the type); this is equivalent
/// to `-MIN`, a positive value that is too large to represent
/// in the type. In such a case, this function returns `MIN`
- /// itself..
+ /// itself.
#[stable(feature = "num_wrapping", since = "1.2.0")]
#[inline(always)]
pub fn wrapping_div(self, rhs: Self) -> Self {
/// wrapping around at the boundary of the type.
///
/// Such wrap-around never actually occurs mathematically;
- /// implementation artifacts make `x % y` illegal for `MIN /
- /// -1` on a signed type illegal (where `MIN` is the negative
+ /// implementation artifacts make `x % y` invalid for `MIN /
+ /// -1` on a signed type (where `MIN` is the negative
/// minimal value). In such a case, this function returns `0`.
#[stable(feature = "num_wrapping", since = "1.2.0")]
#[inline(always)]
acc
}
- /// Returns `true` iff `self == 2^k` for some `k`.
+ /// Returns `true` if and only if `self == 2^k` for some `k`.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is_power_of_two(self) -> bool {
-> Result<T, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
+
assert!(radix >= 2 && radix <= 36,
"from_str_radix_int: must lie in the range `[2, 36]` - found {}",
radix);
+ if src.is_empty() {
+ return Err(PIE { kind: Empty });
+ }
+
let is_signed_ty = T::from_u32(0) > T::min_value();
- match src.slice_shift_char() {
- Some(('-', "")) => Err(PIE { kind: Empty }),
- Some(('-', src)) if is_signed_ty => {
+ // all valid digits are ascii, so we will just iterate over the utf8 bytes
+ // and cast them to chars. .to_digit() will safely return None for anything
+ // other than a valid ascii digit for a the given radix, including the first-byte
+ // of multi-byte sequences
+ let src = src.as_bytes();
+
+ match (src[0], &src[1..]) {
+ (b'-', digits) if digits.is_empty() => Err(PIE { kind: Empty }),
+ (b'-', digits) if is_signed_ty => {
// The number is negative
let mut result = T::from_u32(0);
- for c in src.chars() {
- let x = match c.to_digit(radix) {
+ for &c in digits {
+ let x = match (c as char).to_digit(radix) {
Some(x) => x,
None => return Err(PIE { kind: InvalidDigit }),
};
}
Ok(result)
},
- Some((_, _)) => {
+ (c, digits) => {
// The number is signed
- let mut result = T::from_u32(0);
- for c in src.chars() {
- let x = match c.to_digit(radix) {
+ let mut result = match (c as char).to_digit(radix) {
+ Some(x) => T::from_u32(x),
+ None => return Err(PIE { kind: InvalidDigit }),
+ };
+ for &c in digits {
+ let x = match (c as char).to_digit(radix) {
Some(x) => x,
None => return Err(PIE { kind: InvalidDigit }),
};
};
}
Ok(result)
- },
- None => Err(ParseIntError { kind: Empty }),
+ }
}
}
//! Operations and constants for unsigned 16-bits integers (`u16` type)
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "u16")]
uint_module! { u16, i16, 16 }
//! Operations and constants for unsigned 32-bits integers (`u32` type)
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "u32")]
uint_module! { u32, i32, 32 }
//! Operations and constants for unsigned 64-bits integer (`u64` type)
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "u64")]
uint_module! { u64, i64, 64 }
//! Operations and constants for unsigned 8-bits integers (`u8` type)
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "u8")]
uint_module! { u8, i8, 8 }
#[unstable(feature = "num_bits_bytes",
reason = "may want to be an associated function")]
+#[allow(missing_docs)]
pub const BITS : usize = $bits;
#[unstable(feature = "num_bits_bytes",
reason = "may want to be an associated function")]
+#[allow(missing_docs)]
pub const BYTES : usize = ($bits / 8);
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MIN: $T = 0 as $T;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
pub const MAX: $T = !0 as $T;
) }
//! Operations and constants for pointer-sized unsigned integers (`usize` type)
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "usize")]
uint_module! { usize, isize, ::isize::BITS }
}
}
+ #[stable(feature = "wrapping_div", since = "1.3.0")]
+ impl Div for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn div(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_div(other.0))
+ }
+ }
+
#[stable(feature = "rust1", since = "1.0.0")]
impl Not for Wrapping<$t> {
type Output = Wrapping<$t>;
fn div(self, rhs: RHS) -> Self::Output;
}
-macro_rules! div_impl {
+macro_rules! div_impl_integer {
($($t:ty)*) => ($(
+ /// This operation rounds towards zero, truncating any
+ /// fractional part of the exact result.
#[stable(feature = "rust1", since = "1.0.0")]
impl Div for $t {
type Output = $t;
)*)
}
-div_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
+div_impl_integer! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
+
+macro_rules! div_impl_float {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Div for $t {
+ type Output = $t;
+
+ #[inline]
+ fn div(self, other: $t) -> $t { self / other }
+ }
+
+ forward_ref_binop! { impl Div, div for $t, $t }
+ )*)
+}
+
+div_impl_float! { f32 f64 }
/// The `Rem` trait is used to specify the functionality of `%`.
///
macro_rules! rem_impl {
($($t:ty)*) => ($(
+ /// This operation satisfies `n % d == n - (n / d) * d`. The
+ /// result has the same sign as the left operand.
#[stable(feature = "rust1", since = "1.0.0")]
impl Rem for $t {
type Output = $t;
)*)
}
-macro_rules! rem_float_impl {
- ($t:ty, $fmod:ident) => {
- #[stable(feature = "rust1", since = "1.0.0")]
- impl Rem for $t {
- type Output = $t;
+rem_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
- #[inline]
- fn rem(self, other: $t) -> $t {
- extern { fn $fmod(a: $t, b: $t) -> $t; }
- unsafe { $fmod(self, other) }
- }
- }
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Rem for f32 {
+ type Output = f32;
+
+ // see notes in `core::f32::Float::floor`
+ #[inline]
+ #[cfg(target_env = "msvc")]
+ fn rem(self, other: f32) -> f32 {
+ (self as f64).rem(other as f64) as f32
+ }
- forward_ref_binop! { impl Rem, rem for $t, $t }
+ #[inline]
+ #[cfg(not(target_env = "msvc"))]
+ fn rem(self, other: f32) -> f32 {
+ extern { fn fmodf(a: f32, b: f32) -> f32; }
+ unsafe { fmodf(self, other) }
}
}
-rem_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
-rem_float_impl! { f32, fmodf }
-rem_float_impl! { f64, fmod }
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Rem for f64 {
+ type Output = f64;
+
+ #[inline]
+ fn rem(self, other: f64) -> f64 {
+ extern { fn fmod(a: f64, b: f64) -> f64; }
+ unsafe { fmod(self, other) }
+ }
+}
+
+forward_ref_binop! { impl Rem, rem for f64, f64 }
+forward_ref_binop! { impl Rem, rem for f32, f32 }
/// The `Neg` trait is used to specify the functionality of unary `-`.
///
macro_rules! neg_impl_core {
($id:ident => $body:expr, $($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- #[allow(unsigned_negation)]
impl Neg for $t {
#[stable(feature = "rust1", since = "1.0.0")]
type Output = $t;
/// The `Deref` trait is used to specify the functionality of dereferencing
/// operations like `*v`.
///
+/// `Deref` also enables ['`Deref` coercions'][coercions].
+///
+/// [coercions]: ../../book/deref-coercions.html
+///
/// # Examples
///
/// A struct with a single field which is accessible via dereferencing the
/// The `DerefMut` trait is used to specify the functionality of dereferencing
/// mutably like `*v = 1;`
///
+/// `DerefMut` also enables ['`Deref` coercions'][coercions].
+///
+/// [coercions]: ../../book/deref-coercions.html
+///
/// # Examples
///
/// A struct with a single field which is modifiable via dereferencing the
// *const T -> *const U
impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+
+/// Both `in (PLACE) EXPR` and `box EXPR` desugar into expressions
+/// that allocate an intermediate "place" that holds uninitialized
+/// state. The desugaring evaluates EXPR, and writes the result at
+/// the address returned by the `pointer` method of this trait.
+///
+/// A `Place` can be thought of as a special representation for a
+/// hypothetical `&uninit` reference (which Rust cannot currently
+/// express directly). That is, it represents a pointer to
+/// uninitialized storage.
+///
+/// The client is responsible for two steps: First, initializing the
+/// payload (it can access its address via `pointer`). Second,
+/// converting the agent to an instance of the owning pointer, via the
+/// appropriate `finalize` method (see the `InPlace`.
+///
+/// If evaluating EXPR fails, then the destructor for the
+/// implementation of Place to clean up any intermediate state
+/// (e.g. deallocate box storage, pop a stack, etc).
+#[unstable(feature = "placement_new_protocol")]
+pub trait Place<Data: ?Sized> {
+ /// Returns the address where the input value will be written.
+ /// Note that the data at this address is generally uninitialized,
+ /// and thus one should use `ptr::write` for initializing it.
+ fn pointer(&mut self) -> *mut Data;
+}
+
+/// Interface to implementations of `in (PLACE) EXPR`.
+///
+/// `in (PLACE) EXPR` effectively desugars into:
+///
+/// ```rust,ignore
+/// let p = PLACE;
+/// let mut place = Placer::make_place(p);
+/// let raw_place = Place::pointer(&mut place);
+/// let value = EXPR;
+/// unsafe {
+/// std::ptr::write(raw_place, value);
+/// InPlace::finalize(place)
+/// }
+/// ```
+///
+/// The type of `in (PLACE) EXPR` is derived from the type of `PLACE`;
+/// if the type of `PLACE` is `P`, then the final type of the whole
+/// expression is `P::Place::Owner` (see the `InPlace` and `Boxed`
+/// traits).
+///
+/// Values for types implementing this trait usually are transient
+/// intermediate values (e.g. the return value of `Vec::emplace_back`)
+/// or `Copy`, since the `make_place` method takes `self` by value.
+#[unstable(feature = "placement_new_protocol")]
+pub trait Placer<Data: ?Sized> {
+ /// `Place` is the intermedate agent guarding the
+ /// uninitialized state for `Data`.
+ type Place: InPlace<Data>;
+
+ /// Creates a fresh place from `self`.
+ fn make_place(self) -> Self::Place;
+}
+
+/// Specialization of `Place` trait supporting `in (PLACE) EXPR`.
+#[unstable(feature = "placement_new_protocol")]
+pub trait InPlace<Data: ?Sized>: Place<Data> {
+ /// `Owner` is the type of the end value of `in (PLACE) EXPR`
+ ///
+ /// Note that when `in (PLACE) EXPR` is solely used for
+ /// side-effecting an existing data-structure,
+ /// e.g. `Vec::emplace_back`, then `Owner` need not carry any
+ /// information at all (e.g. it can be the unit type `()` in that
+ /// case).
+ type Owner;
+
+ /// Converts self into the final value, shifting
+ /// deallocation/cleanup responsibilities (if any remain), over to
+ /// the returned instance of `Owner` and forgetting self.
+ unsafe fn finalize(self) -> Self::Owner;
+}
+
+/// Core trait for the `box EXPR` form.
+///
+/// `box EXPR` effectively desugars into:
+///
+/// ```rust,ignore
+/// let mut place = BoxPlace::make_place();
+/// let raw_place = Place::pointer(&mut place);
+/// let value = EXPR;
+/// unsafe {
+/// ::std::ptr::write(raw_place, value);
+/// Boxed::finalize(place)
+/// }
+/// ```
+///
+/// The type of `box EXPR` is supplied from its surrounding
+/// context; in the above expansion, the result type `T` is used
+/// to determine which implementation of `Boxed` to use, and that
+/// `<T as Boxed>` in turn dictates determines which
+/// implementation of `BoxPlace` to use, namely:
+/// `<<T as Boxed>::Place as BoxPlace>`.
+#[unstable(feature = "placement_new_protocol")]
+pub trait Boxed {
+ /// The kind of data that is stored in this kind of box.
+ type Data; /* (`Data` unused b/c cannot yet express below bound.) */
+ /// The place that will negotiate the storage of the data.
+ type Place: BoxPlace<Self::Data>;
+
+ /// Converts filled place into final owning value, shifting
+ /// deallocation/cleanup responsibilities (if any remain), over to
+ /// returned instance of `Self` and forgetting `filled`.
+ unsafe fn finalize(filled: Self::Place) -> Self;
+}
+
+/// Specialization of `Place` trait supporting `box EXPR`.
+#[unstable(feature = "placement_new_protocol")]
+pub trait BoxPlace<Data: ?Sized> : Place<Data> {
+ /// Creates a globally fresh place.
+ fn make_place() -> Self;
+}
//! // The division was valid
//! Some(x) => println!("Result: {}", x),
//! // The division was invalid
-//! None => println!("Cannot divide by 0")
+//! None => println!("Cannot divide by 0"),
//! }
//! ```
//!
//! fn check_optional(optional: &Option<Box<i32>>) {
//! match *optional {
//! Some(ref p) => println!("have value {}", p),
-//! None => println!("have no value")
+//! None => println!("have no value"),
//! }
//! }
//! ```
//! // Take a reference to the contained string
//! match msg {
//! Some(ref m) => println!("{}", *m),
-//! None => ()
+//! None => (),
//! }
//!
//! // Remove the contained string, destroying the Option
//! let unwrapped_msg = match msg {
//! Some(m) => m,
-//! None => "default message"
+//! None => "default message",
//! };
//! ```
//!
//!
//! match name_of_biggest_animal {
//! Some(name) => println!("the biggest animal is {}", name),
-//! None => println!("there are no animals :(")
+//! None => println!("there are no animals :("),
//! }
//! ```
pub fn is_some(&self) -> bool {
match *self {
Some(_) => true,
- None => false
+ None => false,
}
}
pub fn as_ref<'r>(&'r self) -> Option<&'r T> {
match *self {
Some(ref x) => Some(x),
- None => None
+ None => None,
}
}
pub fn as_mut<'r>(&'r mut self) -> Option<&'r mut T> {
match *self {
Some(ref mut x) => Some(x),
- None => None
+ None => None,
}
}
/// # Examples
///
/// ```
- /// # #![feature(as_slice)]
+ /// #![feature(as_slice)]
+ ///
/// let mut x = Some("Diamonds");
/// {
/// let v = x.as_mut_slice();
pub fn unwrap_or(self, def: T) -> T {
match self {
Some(x) => x,
- None => def
+ None => def,
}
}
pub fn unwrap_or_else<F: FnOnce() -> T>(self, f: F) -> T {
match self {
Some(x) => x,
- None => f()
+ None => f(),
}
}
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Option<U> {
match self {
Some(x) => Some(f(x)),
- None => None
+ None => None,
}
}
pub fn map_or_else<U, D: FnOnce() -> U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U {
match self {
Some(t) => f(t),
- None => default()
+ None => default(),
}
}
pub fn or(self, optb: Option<T>) -> Option<T> {
match self {
Some(_) => self,
- None => optb
+ None => optb,
}
}
pub fn or_else<F: FnOnce() -> Option<T>>(self, f: F) -> Option<T> {
match self {
Some(_) => self,
- None => f()
+ None => f(),
}
}
pub fn unwrap_or_default(self) -> T {
match self {
Some(x) => x,
- None => Default::default()
+ None => Default::default(),
}
}
}
// FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
-//! Operations on raw pointers, `*const T`, and `*mut T`.
+//! Raw, unsafe pointers, `*const T`, and `*mut T`
//!
-//! Working with raw pointers in Rust is uncommon,
-//! typically limited to a few patterns.
-//!
-//! Use the `null` function to create null pointers, and the `is_null` method
-//! of the `*const T` type to check for null. The `*const T` type also defines
-//! the `offset` method, for pointer math.
-//!
-//! # Common ways to create raw pointers
-//!
-//! ## 1. Coerce a reference (`&T`) or mutable reference (`&mut T`).
-//!
-//! ```
-//! let my_num: i32 = 10;
-//! let my_num_ptr: *const i32 = &my_num;
-//! let mut my_speed: i32 = 88;
-//! let my_speed_ptr: *mut i32 = &mut my_speed;
-//! ```
-//!
-//! To get a pointer to a boxed value, dereference the box:
-//!
-//! ```
-//! let my_num: Box<i32> = Box::new(10);
-//! let my_num_ptr: *const i32 = &*my_num;
-//! let mut my_speed: Box<i32> = Box::new(88);
-//! let my_speed_ptr: *mut i32 = &mut *my_speed;
-//! ```
-//!
-//! This does not take ownership of the original allocation
-//! and requires no resource management later,
-//! but you must not use the pointer after its lifetime.
-//!
-//! ## 2. Consume a box (`Box<T>`).
-//!
-//! The `into_raw` function consumes a box and returns
-//! the raw pointer. It doesn't destroy `T` or deallocate any memory.
-//!
-//! ```
-//! # #![feature(box_raw)]
-//! use std::boxed;
-//!
-//! unsafe {
-//! let my_speed: Box<i32> = Box::new(88);
-//! let my_speed: *mut i32 = boxed::into_raw(my_speed);
-//!
-//! // By taking ownership of the original `Box<T>` though
-//! // we are obligated to put it together later to be destroyed.
-//! drop(Box::from_raw(my_speed));
-//! }
-//! ```
-//!
-//! Note that here the call to `drop` is for clarity - it indicates
-//! that we are done with the given value and it should be destroyed.
-//!
-//! ## 3. Get it from C.
-//!
-//! ```
-//! # #![feature(libc)]
-//! extern crate libc;
-//!
-//! use std::mem;
-//!
-//! fn main() {
-//! unsafe {
-//! let my_num: *mut i32 = libc::malloc(mem::size_of::<i32>() as libc::size_t) as *mut i32;
-//! if my_num.is_null() {
-//! panic!("failed to allocate memory");
-//! }
-//! libc::free(my_num as *mut libc::c_void);
-//! }
-//! }
-//! ```
-//!
-//! Usually you wouldn't literally use `malloc` and `free` from Rust,
-//! but C APIs hand out a lot of pointers generally, so are a common source
-//! of raw pointers in Rust.
+//! *[See also the pointer primitive types](../primitive.pointer.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "pointer")]
use mem;
use clone::Clone;
use intrinsics;
use ops::Deref;
-use core::fmt;
+use fmt;
use option::Option::{self, Some, None};
use marker::{PhantomData, Send, Sized, Sync};
use nonzero::NonZero;
#[inline(always)]
#[unstable(feature = "read_and_zero",
reason = "may play a larger role in std::ptr future extensions")]
+#[deprecated(since = "1.3.0",
+ reason = "a \"zero value\" will soon not actually exist for all \
+ types once dynamic drop has been implemented")]
pub unsafe fn read_and_zero<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);
/// # Examples
///
/// ```
-/// # #![feature(raw)]
+/// #![feature(raw)]
+///
/// use std::raw::{self, Repr};
///
/// let slice: &[u16] = &[1, 2, 3, 4];
/// # Examples
///
/// ```
-/// # #![feature(raw)]
+/// #![feature(raw)]
+///
/// use std::mem;
/// use std::raw;
///
/// Converts from `Result<T, E>` to `&mut [T]` (without copying)
///
/// ```
- /// # #![feature(as_slice)]
+ /// #![feature(as_slice)]
+ ///
/// let mut x: Result<&str, u32> = Ok("Gold");
/// {
/// let v = x.as_mut_slice();
/// x.expect("Testing expect"); // panics with `Testing expect: emergency failure`
/// ```
#[inline]
- #[unstable(feature = "result_expect", reason = "newly introduced")]
+ #[unstable(feature = "result_expect", reason = "newly introduced", issue = "27277")]
pub fn expect(self, msg: &str) -> T {
match self {
Ok(t) => t,
//! provided beyond this module.
//!
//! ```rust
-//! # #![feature(core_simd)]
+//! #![feature(core_simd)]
+//!
//! fn main() {
//! use std::simd::f32x4;
//! let a = f32x4(40.0, 41.0, 42.0, 43.0);
//! For more details `std::slice`.
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "slice")]
// How this module is organized.
//
fn first<'a>(&'a self) -> Option<&'a Self::Item>;
fn tail<'a>(&'a self) -> &'a [Self::Item];
fn init<'a>(&'a self) -> &'a [Self::Item];
+ fn split_first<'a>(&'a self) -> Option<(&'a Self::Item, &'a [Self::Item])>;
+ fn split_last<'a>(&'a self) -> Option<(&'a Self::Item, &'a [Self::Item])>;
fn last<'a>(&'a self) -> Option<&'a Self::Item>;
unsafe fn get_unchecked<'a>(&'a self, index: usize) -> &'a Self::Item;
fn as_ptr(&self) -> *const Self::Item;
fn first_mut<'a>(&'a mut self) -> Option<&'a mut Self::Item>;
fn tail_mut<'a>(&'a mut self) -> &'a mut [Self::Item];
fn init_mut<'a>(&'a mut self) -> &'a mut [Self::Item];
+ fn split_first_mut<'a>(&'a mut self) -> Option<(&'a mut Self::Item, &'a mut [Self::Item])>;
+ fn split_last_mut<'a>(&'a mut self) -> Option<(&'a mut Self::Item, &'a mut [Self::Item])>;
fn last_mut<'a>(&'a mut self) -> Option<&'a mut Self::Item>;
fn split_mut<'a, P>(&'a mut self, pred: P) -> SplitMut<'a, Self::Item, P>
where P: FnMut(&Self::Item) -> bool;
fn tail(&self) -> &[T] { &self[1..] }
#[inline]
- fn init(&self) -> &[T] {
- &self[..self.len() - 1]
+ fn split_first(&self) -> Option<(&T, &[T])> {
+ if self.is_empty() { None } else { Some((&self[0], &self[1..])) }
+ }
+
+ #[inline]
+ fn init(&self) -> &[T] { &self[..self.len() - 1] }
+
+ #[inline]
+ fn split_last(&self) -> Option<(&T, &[T])> {
+ let len = self.len();
+ if len == 0 { None } else { Some((&self[len - 1], &self[..(len - 1)])) }
}
#[inline]
#[inline]
fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
+ let len = self.len();
+ let ptr = self.as_mut_ptr();
+ assert!(mid <= len);
unsafe {
- let self2: &mut [T] = mem::transmute_copy(&self);
-
- (ops::IndexMut::index_mut(self, ops::RangeTo { end: mid } ),
- ops::IndexMut::index_mut(self2, ops::RangeFrom { start: mid } ))
+ (from_raw_parts_mut(ptr, mid),
+ from_raw_parts_mut(ptr.offset(mid as isize), len - mid))
}
}
}
#[inline]
- fn tail_mut(&mut self) -> &mut [T] {
- &mut self[1 ..]
+ fn tail_mut(&mut self) -> &mut [T] { &mut self[1 ..] }
+
+ #[inline]
+ fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
+ if self.is_empty() { None } else {
+ let split = self.split_at_mut(1);
+ Some((&mut split.0[0], split.1))
+ }
}
#[inline]
&mut self[.. (len - 1)]
}
+ #[inline]
+ fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
+ let len = self.len();
+ if len == 0 { None } else {
+ let split = self.split_at_mut(len - 1);
+ Some((&mut split.1[0], split.0))
+ }
+ }
+
#[inline]
fn split_mut<'a, P>(&'a mut self, pred: P) -> SplitMut<'a, T, P> where P: FnMut(&T) -> bool {
SplitMut { v: self, pred: pred, finished: false }
///
/// The `len` argument is the number of **elements**, not the number of bytes.
///
+/// # Unsafety
+///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `len` elements, nor whether the lifetime inferred is a suitable
/// lifetime for the returned slice.
///
+/// `p` must be non-null, even for zero-length slices.
+///
/// # Caveat
///
/// The lifetime for the returned slice is inferred from its usage. To
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B> PartialEq<[B]> for [A] where A: PartialEq<B> {
fn eq(&self, other: &[B]) -> bool {
- self.len() == other.len() &&
- order::eq(self.iter(), other.iter())
+ if self.len() != other.len() {
+ return false;
+ }
+
+ for i in 0..self.len() {
+ if !self[i].eq(&other[i]) {
+ return false;
+ }
+ }
+
+ true
}
fn ne(&self, other: &[B]) -> bool {
- self.len() != other.len() ||
- order::ne(self.iter(), other.iter())
+ if self.len() != other.len() {
+ return true;
+ }
+
+ for i in 0..self.len() {
+ if self[i].ne(&other[i]) {
+ return true;
+ }
+ }
+
+ false
}
}
//!
//! For more details, see std::str
-#![doc(primitive = "str")]
#![stable(feature = "rust1", since = "1.0.0")]
-use self::OldSearcher::{TwoWay, TwoWayLong};
use self::pattern::Pattern;
use self::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher};
use char::CharExt;
use clone::Clone;
-use cmp::{self, Eq};
+use cmp::Eq;
use convert::AsRef;
use default::Default;
use fmt;
use raw::{Repr, Slice};
use result::Result::{self, Ok, Err};
use slice::{self, SliceExt};
-use usize;
pub mod pattern;
generate_pattern_iterators! {
forward:
- #[doc="Created with the method `.split()`."]
+ /// Created with the method `.split()`.
struct Split;
reverse:
- #[doc="Created with the method `.rsplit()`."]
+ /// Created with the method `.rsplit()`.
struct RSplit;
stability:
#[stable(feature = "rust1", since = "1.0.0")]
generate_pattern_iterators! {
forward:
- #[doc="Created with the method `.split_terminator()`."]
+ /// Created with the method `.split_terminator()`.
struct SplitTerminator;
reverse:
- #[doc="Created with the method `.rsplit_terminator()`."]
+ /// Created with the method `.rsplit_terminator()`.
struct RSplitTerminator;
stability:
#[stable(feature = "rust1", since = "1.0.0")]
generate_pattern_iterators! {
forward:
- #[doc="Created with the method `.splitn()`."]
+ /// Created with the method `.splitn()`.
struct SplitN;
reverse:
- #[doc="Created with the method `.rsplitn()`."]
+ /// Created with the method `.rsplitn()`.
struct RSplitN;
stability:
#[stable(feature = "rust1", since = "1.0.0")]
generate_pattern_iterators! {
forward:
- #[doc="Created with the method `.match_indices()`."]
+ /// Created with the method `.match_indices()`.
struct MatchIndices;
reverse:
- #[doc="Created with the method `.rmatch_indices()`."]
+ /// Created with the method `.rmatch_indices()`.
struct RMatchIndices;
stability:
#[unstable(feature = "str_match_indices",
generate_pattern_iterators! {
forward:
- #[doc="Created with the method `.matches()`."]
+ /// Created with the method `.matches()`.
struct Matches;
reverse:
- #[doc="Created with the method `.rmatches()`."]
+ /// Created with the method `.rmatches()`.
struct RMatches;
stability:
#[stable(feature = "str_matches", since = "1.2.0")]
}
}
-/// The internal state of an iterator that searches for matches of a substring
-/// within a larger string using two-way search
-#[derive(Clone)]
-struct TwoWaySearcher {
- // constants
- crit_pos: usize,
- period: usize,
- byteset: u64,
-
- // variables
- position: usize,
- memory: usize
-}
-
-/*
- This is the Two-Way search algorithm, which was introduced in the paper:
- Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
-
- Here's some background information.
-
- A *word* is a string of symbols. The *length* of a word should be a familiar
- notion, and here we denote it for any word x by |x|.
- (We also allow for the possibility of the *empty word*, a word of length zero).
-
- If x is any non-empty word, then an integer p with 0 < p <= |x| is said to be a
- *period* for x iff for all i with 0 <= i <= |x| - p - 1, we have x[i] == x[i+p].
- For example, both 1 and 2 are periods for the string "aa". As another example,
- the only period of the string "abcd" is 4.
-
- We denote by period(x) the *smallest* period of x (provided that x is non-empty).
- This is always well-defined since every non-empty word x has at least one period,
- |x|. We sometimes call this *the period* of x.
-
- If u, v and x are words such that x = uv, where uv is the concatenation of u and
- v, then we say that (u, v) is a *factorization* of x.
-
- Let (u, v) be a factorization for a word x. Then if w is a non-empty word such
- that both of the following hold
-
- - either w is a suffix of u or u is a suffix of w
- - either w is a prefix of v or v is a prefix of w
-
- then w is said to be a *repetition* for the factorization (u, v).
-
- Just to unpack this, there are four possibilities here. Let w = "abc". Then we
- might have:
-
- - w is a suffix of u and w is a prefix of v. ex: ("lolabc", "abcde")
- - w is a suffix of u and v is a prefix of w. ex: ("lolabc", "ab")
- - u is a suffix of w and w is a prefix of v. ex: ("bc", "abchi")
- - u is a suffix of w and v is a prefix of w. ex: ("bc", "a")
-
- Note that the word vu is a repetition for any factorization (u,v) of x = uv,
- so every factorization has at least one repetition.
-
- If x is a string and (u, v) is a factorization for x, then a *local period* for
- (u, v) is an integer r such that there is some word w such that |w| = r and w is
- a repetition for (u, v).
-
- We denote by local_period(u, v) the smallest local period of (u, v). We sometimes
- call this *the local period* of (u, v). Provided that x = uv is non-empty, this
- is well-defined (because each non-empty word has at least one factorization, as
- noted above).
-
- It can be proven that the following is an equivalent definition of a local period
- for a factorization (u, v): any positive integer r such that x[i] == x[i+r] for
- all i such that |u| - r <= i <= |u| - 1 and such that both x[i] and x[i+r] are
- defined. (i.e. i > 0 and i + r < |x|).
-
- Using the above reformulation, it is easy to prove that
-
- 1 <= local_period(u, v) <= period(uv)
-
- A factorization (u, v) of x such that local_period(u,v) = period(x) is called a
- *critical factorization*.
-
- The algorithm hinges on the following theorem, which is stated without proof:
-
- **Critical Factorization Theorem** Any word x has at least one critical
- factorization (u, v) such that |u| < period(x).
-
- The purpose of maximal_suffix is to find such a critical factorization.
-
-*/
-impl TwoWaySearcher {
- #[allow(dead_code)]
- fn new(needle: &[u8]) -> TwoWaySearcher {
- let (crit_pos_false, period_false) = TwoWaySearcher::maximal_suffix(needle, false);
- let (crit_pos_true, period_true) = TwoWaySearcher::maximal_suffix(needle, true);
-
- let (crit_pos, period) =
- if crit_pos_false > crit_pos_true {
- (crit_pos_false, period_false)
- } else {
- (crit_pos_true, period_true)
- };
-
- // This isn't in the original algorithm, as far as I'm aware.
- let byteset = needle.iter()
- .fold(0, |a, &b| (1 << ((b & 0x3f) as usize)) | a);
-
- // A particularly readable explanation of what's going on here can be found
- // in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
- // see the code for "Algorithm CP" on p. 323.
- //
- // What's going on is we have some critical factorization (u, v) of the
- // needle, and we want to determine whether u is a suffix of
- // &v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
- // "Algorithm CP2", which is optimized for when the period of the needle
- // is large.
- if &needle[..crit_pos] == &needle[period.. period + crit_pos] {
- TwoWaySearcher {
- crit_pos: crit_pos,
- period: period,
- byteset: byteset,
-
- position: 0,
- memory: 0
- }
- } else {
- TwoWaySearcher {
- crit_pos: crit_pos,
- period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
- byteset: byteset,
-
- position: 0,
- memory: usize::MAX // Dummy value to signify that the period is long
- }
- }
- }
-
- // One of the main ideas of Two-Way is that we factorize the needle into
- // two halves, (u, v), and begin trying to find v in the haystack by scanning
- // left to right. If v matches, we try to match u by scanning right to left.
- // How far we can jump when we encounter a mismatch is all based on the fact
- // that (u, v) is a critical factorization for the needle.
- #[inline]
- fn next(&mut self, haystack: &[u8], needle: &[u8], long_period: bool)
- -> Option<(usize, usize)> {
- 'search: loop {
- // Check that we have room to search in
- if self.position + needle.len() > haystack.len() {
- return None;
- }
-
- // Quickly skip by large portions unrelated to our substring
- if (self.byteset >>
- ((haystack[self.position + needle.len() - 1] & 0x3f)
- as usize)) & 1 == 0 {
- self.position += needle.len();
- if !long_period {
- self.memory = 0;
- }
- continue 'search;
- }
-
- // See if the right part of the needle matches
- let start = if long_period { self.crit_pos }
- else { cmp::max(self.crit_pos, self.memory) };
- for i in start..needle.len() {
- if needle[i] != haystack[self.position + i] {
- self.position += i - self.crit_pos + 1;
- if !long_period {
- self.memory = 0;
- }
- continue 'search;
- }
- }
-
- // See if the left part of the needle matches
- let start = if long_period { 0 } else { self.memory };
- for i in (start..self.crit_pos).rev() {
- if needle[i] != haystack[self.position + i] {
- self.position += self.period;
- if !long_period {
- self.memory = needle.len() - self.period;
- }
- continue 'search;
- }
- }
-
- // We have found a match!
- let match_pos = self.position;
- self.position += needle.len(); // add self.period for all matches
- if !long_period {
- self.memory = 0; // set to needle.len() - self.period for all matches
- }
- return Some((match_pos, match_pos + needle.len()));
- }
- }
-
- // Computes a critical factorization (u, v) of `arr`.
- // Specifically, returns (i, p), where i is the starting index of v in some
- // critical factorization (u, v) and p = period(v)
- #[inline]
- #[allow(dead_code)]
- #[allow(deprecated)]
- fn maximal_suffix(arr: &[u8], reversed: bool) -> (usize, usize) {
- let mut left: usize = !0; // Corresponds to i in the paper
- let mut right = 0; // Corresponds to j in the paper
- let mut offset = 1; // Corresponds to k in the paper
- let mut period = 1; // Corresponds to p in the paper
-
- while right + offset < arr.len() {
- let a;
- let b;
- if reversed {
- a = arr[left.wrapping_add(offset)];
- b = arr[right + offset];
- } else {
- a = arr[right + offset];
- b = arr[left.wrapping_add(offset)];
- }
- if a < b {
- // Suffix is smaller, period is entire prefix so far.
- right += offset;
- offset = 1;
- period = right.wrapping_sub(left);
- } else if a == b {
- // Advance through repetition of the current period.
- if offset == period {
- right += offset;
- offset = 1;
- } else {
- offset += 1;
- }
- } else {
- // Suffix is larger, start over from current location.
- left = right;
- right += 1;
- offset = 1;
- period = 1;
- }
- }
- (left.wrapping_add(1), period)
- }
-}
-
-/// The internal state of an iterator that searches for matches of a substring
-/// within a larger string using a dynamically chosen search algorithm
-#[derive(Clone)]
-// NB: This is kept around for convenience because
-// it is planned to be used again in the future
-enum OldSearcher {
- TwoWay(TwoWaySearcher),
- TwoWayLong(TwoWaySearcher),
-}
-
-impl OldSearcher {
- #[allow(dead_code)]
- fn new(haystack: &[u8], needle: &[u8]) -> OldSearcher {
- if needle.is_empty() {
- // Handle specially
- unimplemented!()
- // FIXME: Tune this.
- // FIXME(#16715): This unsigned integer addition will probably not
- // overflow because that would mean that the memory almost solely
- // consists of the needle. Needs #16715 to be formally fixed.
- } else if needle.len() + 20 > haystack.len() {
- // Use naive searcher
- unimplemented!()
- } else {
- let searcher = TwoWaySearcher::new(needle);
- if searcher.memory == usize::MAX { // If the period is long
- TwoWayLong(searcher)
- } else {
- TwoWay(searcher)
- }
- }
- }
-}
-
-#[derive(Clone)]
-// NB: This is kept around for convenience because
-// it is planned to be used again in the future
-struct OldMatchIndices<'a, 'b> {
- // constants
- haystack: &'a str,
- needle: &'b str,
- searcher: OldSearcher
-}
-
-impl<'a, 'b> OldMatchIndices<'a, 'b> {
- #[inline]
- #[allow(dead_code)]
- fn next(&mut self) -> Option<(usize, usize)> {
- match self.searcher {
- TwoWay(ref mut searcher)
- => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), false),
- TwoWayLong(ref mut searcher)
- => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), true),
- }
- }
-}
-
/*
Section: Comparing strings
*/
-// share the implementation of the lang-item vs. non-lang-item
-// eq_slice.
+/// Bytewise slice equality
/// NOTE: This function is (ab)used in rustc::middle::trans::_match
/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
+#[lang = "str_eq"]
#[inline]
-fn eq_slice_(a: &str, b: &str) -> bool {
+fn eq_slice(a: &str, b: &str) -> bool {
// NOTE: In theory n should be libc::size_t and not usize, but libc is not available here
#[allow(improper_ctypes)]
extern { fn memcmp(s1: *const i8, s2: *const i8, n: usize) -> i32; }
}
}
-/// Bytewise slice equality
-/// NOTE: This function is (ab)used in rustc::middle::trans::_match
-/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
-#[lang = "str_eq"]
-#[inline]
-fn eq_slice(a: &str, b: &str) -> bool {
- eq_slice_(a, b)
-}
-
/*
Section: Misc
*/
}
}
+ /// Returns a mutable slice of the given string from the byte range
+ /// [`begin`..`end`).
+ #[stable(feature = "derefmut_for_string", since = "1.2.0")]
+ impl ops::IndexMut<ops::Range<usize>> for str {
+ #[inline]
+ fn index_mut(&mut self, index: ops::Range<usize>) -> &mut str {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if index.start <= index.end &&
+ self.is_char_boundary(index.start) &&
+ self.is_char_boundary(index.end) {
+ unsafe { self.slice_mut_unchecked(index.start, index.end) }
+ } else {
+ super::slice_error_fail(self, index.start, index.end)
+ }
+ }
+ }
+
/// Returns a slice of the string from the beginning to byte
/// `end`.
///
}
}
+ /// Returns a mutable slice of the string from the beginning to byte
+ /// `end`.
+ #[stable(feature = "derefmut_for_string", since = "1.2.0")]
+ impl ops::IndexMut<ops::RangeTo<usize>> for str {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut str {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(index.end) {
+ unsafe { self.slice_mut_unchecked(0, index.end) }
+ } else {
+ super::slice_error_fail(self, 0, index.end)
+ }
+ }
+ }
+
/// Returns a slice of the string from `begin` to its end.
///
/// Equivalent to `self[begin .. self.len()]`.
}
}
+ /// Returns a slice of the string from `begin` to its end.
+ #[stable(feature = "derefmut_for_string", since = "1.2.0")]
+ impl ops::IndexMut<ops::RangeFrom<usize>> for str {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut str {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(index.start) {
+ let len = self.len();
+ unsafe { self.slice_mut_unchecked(index.start, len) }
+ } else {
+ super::slice_error_fail(self, index.start, self.len())
+ }
+ }
+ }
+
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Index<ops::RangeFull> for str {
type Output = str;
self
}
}
+
+ #[stable(feature = "derefmut_for_string", since = "1.2.0")]
+ impl ops::IndexMut<ops::RangeFull> for str {
+ #[inline]
+ fn index_mut(&mut self, _index: ops::RangeFull) -> &mut str {
+ self
+ }
+ }
}
/// Methods for string slices
fn char_len(&self) -> usize;
fn slice_chars<'a>(&'a self, begin: usize, end: usize) -> &'a str;
unsafe fn slice_unchecked<'a>(&'a self, begin: usize, end: usize) -> &'a str;
+ unsafe fn slice_mut_unchecked<'a>(&'a mut self, begin: usize, end: usize) -> &'a mut str;
fn starts_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool;
fn ends_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool
where P::Searcher: ReverseSearcher<'a>;
where P::Searcher: ReverseSearcher<'a>;
fn find_str<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option<usize>;
fn split_at(&self, mid: usize) -> (&str, &str);
+ fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str);
fn slice_shift_char<'a>(&'a self) -> Option<(char, &'a str)>;
fn subslice_offset(&self, inner: &str) -> usize;
fn as_ptr(&self) -> *const u8;
})
}
+ #[inline]
+ unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str {
+ mem::transmute(Slice {
+ data: self.as_ptr().offset(begin as isize),
+ len: end - begin,
+ })
+ }
+
#[inline]
fn starts_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool {
pat.is_prefix_of(self)
}
}
+ fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str) {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(mid) {
+ let len = self.len();
+ unsafe {
+ let self2: &mut str = mem::transmute_copy(&self);
+ (self.slice_mut_unchecked(0, mid),
+ self2.slice_mut_unchecked(mid, len))
+ }
+ } else {
+ slice_error_fail(self, 0, mid)
+ }
+ }
+
#[inline]
fn slice_shift_char(&self) -> Option<(char, &str)> {
if self.is_empty() {
reason = "API not fully fleshed out and ready to be stabilized")]
use prelude::*;
+use cmp;
+use usize;
// Pattern
impl<'a, C: CharEq> DoubleEndedSearcher<'a> for CharEqSearcher<'a, C> {}
-/////////////////////////////////////////////////////////////////////////////
-// Impl for &str
-/////////////////////////////////////////////////////////////////////////////
-
-// Todo: Optimize the naive implementation here
-
-/// Associated type for `<&str as Pattern<'a>>::Searcher`.
-#[derive(Clone)]
-pub struct StrSearcher<'a, 'b> {
- haystack: &'a str,
- needle: &'b str,
- start: usize,
- end: usize,
- state: State,
-}
-
-#[derive(Clone, PartialEq)]
-enum State { Done, NotDone, Reject(usize, usize) }
-impl State {
- #[inline] fn done(&self) -> bool { *self == State::Done }
- #[inline] fn take(&mut self) -> State { ::mem::replace(self, State::NotDone) }
-}
-
-/// Non-allocating substring search.
-///
-/// Will handle the pattern `""` as returning empty matches at each utf8
-/// boundary.
-impl<'a, 'b> Pattern<'a> for &'b str {
- type Searcher = StrSearcher<'a, 'b>;
-
- #[inline]
- fn into_searcher(self, haystack: &'a str) -> StrSearcher<'a, 'b> {
- StrSearcher {
- haystack: haystack,
- needle: self,
- start: 0,
- end: haystack.len(),
- state: State::NotDone,
- }
- }
-}
-
-unsafe impl<'a, 'b> Searcher<'a> for StrSearcher<'a, 'b> {
- #[inline]
- fn haystack(&self) -> &'a str {
- self.haystack
- }
-
- #[inline]
- fn next(&mut self) -> SearchStep {
- str_search_step(self,
- |m: &mut StrSearcher| {
- // Forward step for empty needle
- let current_start = m.start;
- if !m.state.done() {
- m.start = m.haystack.char_range_at(current_start).next;
- m.state = State::Reject(current_start, m.start);
- }
- SearchStep::Match(current_start, current_start)
- },
- |m: &mut StrSearcher| {
- // Forward step for nonempty needle
- let current_start = m.start;
- // Compare byte window because this might break utf8 boundaries
- let possible_match = &m.haystack.as_bytes()[m.start .. m.start + m.needle.len()];
- if possible_match == m.needle.as_bytes() {
- m.start += m.needle.len();
- SearchStep::Match(current_start, m.start)
- } else {
- // Skip a char
- let haystack_suffix = &m.haystack[m.start..];
- m.start += haystack_suffix.chars().next().unwrap().len_utf8();
- SearchStep::Reject(current_start, m.start)
- }
- })
- }
-}
-
-unsafe impl<'a, 'b> ReverseSearcher<'a> for StrSearcher<'a, 'b> {
- #[inline]
- fn next_back(&mut self) -> SearchStep {
- str_search_step(self,
- |m: &mut StrSearcher| {
- // Backward step for empty needle
- let current_end = m.end;
- if !m.state.done() {
- m.end = m.haystack.char_range_at_reverse(current_end).next;
- m.state = State::Reject(m.end, current_end);
- }
- SearchStep::Match(current_end, current_end)
- },
- |m: &mut StrSearcher| {
- // Backward step for nonempty needle
- let current_end = m.end;
- // Compare byte window because this might break utf8 boundaries
- let possible_match = &m.haystack.as_bytes()[m.end - m.needle.len() .. m.end];
- if possible_match == m.needle.as_bytes() {
- m.end -= m.needle.len();
- SearchStep::Match(m.end, current_end)
- } else {
- // Skip a char
- let haystack_prefix = &m.haystack[..m.end];
- m.end -= haystack_prefix.chars().rev().next().unwrap().len_utf8();
- SearchStep::Reject(m.end, current_end)
- }
- })
- }
-}
-
-// Helper function for encapsulating the common control flow
-// of doing a search step from the front or doing a search step from the back
-fn str_search_step<F, G>(mut m: &mut StrSearcher,
- empty_needle_step: F,
- nonempty_needle_step: G) -> SearchStep
- where F: FnOnce(&mut StrSearcher) -> SearchStep,
- G: FnOnce(&mut StrSearcher) -> SearchStep
-{
- if m.state.done() {
- SearchStep::Done
- } else if m.needle.is_empty() && m.start <= m.end {
- // Case for needle == ""
- if let State::Reject(a, b) = m.state.take() {
- SearchStep::Reject(a, b)
- } else {
- if m.start == m.end {
- m.state = State::Done;
- }
- empty_needle_step(&mut m)
- }
- } else if m.start + m.needle.len() <= m.end {
- // Case for needle != ""
- nonempty_needle_step(&mut m)
- } else if m.start < m.end {
- // Remaining slice shorter than needle, reject it
- m.state = State::Done;
- SearchStep::Reject(m.start, m.end)
- } else {
- m.state = State::Done;
- SearchStep::Done
- }
-}
-
/////////////////////////////////////////////////////////////////////////////
macro_rules! pattern_methods {
impl<'a, 'b> Pattern<'a> for &'b &'b str {
pattern_methods!(StrSearcher<'a, 'b>, |&s| s, |s| s);
}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for &str
+/////////////////////////////////////////////////////////////////////////////
+
+/// Non-allocating substring search.
+///
+/// Will handle the pattern `""` as returning empty matches at each character
+/// boundary.
+impl<'a, 'b> Pattern<'a> for &'b str {
+ type Searcher = StrSearcher<'a, 'b>;
+
+ #[inline]
+ fn into_searcher(self, haystack: &'a str) -> StrSearcher<'a, 'b> {
+ StrSearcher::new(haystack, self)
+ }
+
+ /// Checks whether the pattern matches at the front of the haystack
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ haystack.is_char_boundary(self.len()) &&
+ self == &haystack[..self.len()]
+ }
+
+ /// Checks whether the pattern matches at the back of the haystack
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool {
+ self.len() <= haystack.len() &&
+ haystack.is_char_boundary(haystack.len() - self.len()) &&
+ self == &haystack[haystack.len() - self.len()..]
+ }
+}
+
+
+/////////////////////////////////////////////////////////////////////////////
+// Two Way substring searcher
+/////////////////////////////////////////////////////////////////////////////
+
+#[derive(Clone, Debug)]
+/// Associated type for `<&str as Pattern<'a>>::Searcher`.
+pub struct StrSearcher<'a, 'b> {
+ haystack: &'a str,
+ needle: &'b str,
+
+ searcher: StrSearcherImpl,
+}
+
+#[derive(Clone, Debug)]
+enum StrSearcherImpl {
+ Empty(EmptyNeedle),
+ TwoWay(TwoWaySearcher),
+}
+
+#[derive(Clone, Debug)]
+struct EmptyNeedle {
+ position: usize,
+ end: usize,
+ is_match_fw: bool,
+ is_match_bw: bool,
+}
+
+impl<'a, 'b> StrSearcher<'a, 'b> {
+ fn new(haystack: &'a str, needle: &'b str) -> StrSearcher<'a, 'b> {
+ if needle.is_empty() {
+ StrSearcher {
+ haystack: haystack,
+ needle: needle,
+ searcher: StrSearcherImpl::Empty(EmptyNeedle {
+ position: 0,
+ end: haystack.len(),
+ is_match_fw: true,
+ is_match_bw: true,
+ }),
+ }
+ } else {
+ StrSearcher {
+ haystack: haystack,
+ needle: needle,
+ searcher: StrSearcherImpl::TwoWay(
+ TwoWaySearcher::new(needle.as_bytes(), haystack.len())
+ ),
+ }
+ }
+ }
+}
+
+unsafe impl<'a, 'b> Searcher<'a> for StrSearcher<'a, 'b> {
+ fn haystack(&self) -> &'a str { self.haystack }
+
+ #[inline]
+ fn next(&mut self) -> SearchStep {
+ match self.searcher {
+ StrSearcherImpl::Empty(ref mut searcher) => {
+ // empty needle rejects every char and matches every empty string between them
+ let is_match = searcher.is_match_fw;
+ searcher.is_match_fw = !searcher.is_match_fw;
+ let pos = searcher.position;
+ match self.haystack[pos..].chars().next() {
+ _ if is_match => SearchStep::Match(pos, pos),
+ None => SearchStep::Done,
+ Some(ch) => {
+ searcher.position += ch.len_utf8();
+ SearchStep::Reject(pos, searcher.position)
+ }
+ }
+ }
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ // TwoWaySearcher produces valid *Match* indices that split at char boundaries
+ // as long as it does correct matching and that haystack and needle are
+ // valid UTF-8
+ // *Rejects* from the algorithm can fall on any indices, but we will walk them
+ // manually to the next character boundary, so that they are utf-8 safe.
+ if searcher.position == self.haystack.len() {
+ return SearchStep::Done;
+ }
+ let is_long = searcher.memory == usize::MAX;
+ match searcher.next::<RejectAndMatch>(self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ is_long)
+ {
+ SearchStep::Reject(a, mut b) => {
+ // skip to next char boundary
+ while !self.haystack.is_char_boundary(b) {
+ b += 1;
+ }
+ searcher.position = cmp::max(b, searcher.position);
+ SearchStep::Reject(a, b)
+ }
+ otherwise => otherwise,
+ }
+ }
+ }
+ }
+
+ #[inline(always)]
+ fn next_match(&mut self) -> Option<(usize, usize)> {
+ match self.searcher {
+ StrSearcherImpl::Empty(..) => {
+ loop {
+ match self.next() {
+ SearchStep::Match(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ SearchStep::Reject(..) => { }
+ }
+ }
+ }
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ let is_long = searcher.memory == usize::MAX;
+ if is_long {
+ searcher.next::<MatchOnly>(self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ true)
+ } else {
+ searcher.next::<MatchOnly>(self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ false)
+ }
+ }
+ }
+ }
+
+}
+unsafe impl<'a, 'b> ReverseSearcher<'a> for StrSearcher<'a, 'b> {
+ #[inline]
+ fn next_back(&mut self) -> SearchStep {
+ match self.searcher {
+ StrSearcherImpl::Empty(ref mut searcher) => {
+ let is_match = searcher.is_match_bw;
+ searcher.is_match_bw = !searcher.is_match_bw;
+ let end = searcher.end;
+ match self.haystack[..end].chars().next_back() {
+ _ if is_match => SearchStep::Match(end, end),
+ None => SearchStep::Done,
+ Some(ch) => {
+ searcher.end -= ch.len_utf8();
+ SearchStep::Reject(searcher.end, end)
+ }
+ }
+ }
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ if searcher.end == 0 {
+ return SearchStep::Done;
+ }
+ match searcher.next_back::<RejectAndMatch>(self.haystack.as_bytes(),
+ self.needle.as_bytes())
+ {
+ SearchStep::Reject(mut a, b) => {
+ // skip to next char boundary
+ while !self.haystack.is_char_boundary(a) {
+ a -= 1;
+ }
+ searcher.end = cmp::min(a, searcher.end);
+ SearchStep::Reject(a, b)
+ }
+ otherwise => otherwise,
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn next_match_back(&mut self) -> Option<(usize, usize)> {
+ match self.searcher {
+ StrSearcherImpl::Empty(..) => {
+ loop {
+ match self.next_back() {
+ SearchStep::Match(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ SearchStep::Reject(..) => { }
+ }
+ }
+ }
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ searcher.next_back::<MatchOnly>(self.haystack.as_bytes(),
+ self.needle.as_bytes())
+ }
+ }
+ }
+}
+
+/// The internal state of an iterator that searches for matches of a substring
+/// within a larger string using two-way search
+#[derive(Clone, Debug)]
+struct TwoWaySearcher {
+ // constants
+ crit_pos: usize,
+ period: usize,
+ byteset: u64,
+
+ // variables
+ position: usize,
+ end: usize,
+ memory: usize
+}
+
+/*
+ This is the Two-Way search algorithm, which was introduced in the paper:
+ Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
+
+ Here's some background information.
+
+ A *word* is a string of symbols. The *length* of a word should be a familiar
+ notion, and here we denote it for any word x by |x|.
+ (We also allow for the possibility of the *empty word*, a word of length zero).
+
+ If x is any non-empty word, then an integer p with 0 < p <= |x| is said to be a
+ *period* for x iff for all i with 0 <= i <= |x| - p - 1, we have x[i] == x[i+p].
+ For example, both 1 and 2 are periods for the string "aa". As another example,
+ the only period of the string "abcd" is 4.
+
+ We denote by period(x) the *smallest* period of x (provided that x is non-empty).
+ This is always well-defined since every non-empty word x has at least one period,
+ |x|. We sometimes call this *the period* of x.
+
+ If u, v and x are words such that x = uv, where uv is the concatenation of u and
+ v, then we say that (u, v) is a *factorization* of x.
+
+ Let (u, v) be a factorization for a word x. Then if w is a non-empty word such
+ that both of the following hold
+
+ - either w is a suffix of u or u is a suffix of w
+ - either w is a prefix of v or v is a prefix of w
+
+ then w is said to be a *repetition* for the factorization (u, v).
+
+ Just to unpack this, there are four possibilities here. Let w = "abc". Then we
+ might have:
+
+ - w is a suffix of u and w is a prefix of v. ex: ("lolabc", "abcde")
+ - w is a suffix of u and v is a prefix of w. ex: ("lolabc", "ab")
+ - u is a suffix of w and w is a prefix of v. ex: ("bc", "abchi")
+ - u is a suffix of w and v is a prefix of w. ex: ("bc", "a")
+
+ Note that the word vu is a repetition for any factorization (u,v) of x = uv,
+ so every factorization has at least one repetition.
+
+ If x is a string and (u, v) is a factorization for x, then a *local period* for
+ (u, v) is an integer r such that there is some word w such that |w| = r and w is
+ a repetition for (u, v).
+
+ We denote by local_period(u, v) the smallest local period of (u, v). We sometimes
+ call this *the local period* of (u, v). Provided that x = uv is non-empty, this
+ is well-defined (because each non-empty word has at least one factorization, as
+ noted above).
+
+ It can be proven that the following is an equivalent definition of a local period
+ for a factorization (u, v): any positive integer r such that x[i] == x[i+r] for
+ all i such that |u| - r <= i <= |u| - 1 and such that both x[i] and x[i+r] are
+ defined. (i.e. i > 0 and i + r < |x|).
+
+ Using the above reformulation, it is easy to prove that
+
+ 1 <= local_period(u, v) <= period(uv)
+
+ A factorization (u, v) of x such that local_period(u,v) = period(x) is called a
+ *critical factorization*.
+
+ The algorithm hinges on the following theorem, which is stated without proof:
+
+ **Critical Factorization Theorem** Any word x has at least one critical
+ factorization (u, v) such that |u| < period(x).
+
+ The purpose of maximal_suffix is to find such a critical factorization.
+
+*/
+impl TwoWaySearcher {
+ fn new(needle: &[u8], end: usize) -> TwoWaySearcher {
+ let (crit_pos_false, period_false) = TwoWaySearcher::maximal_suffix(needle, false);
+ let (crit_pos_true, period_true) = TwoWaySearcher::maximal_suffix(needle, true);
+
+ let (crit_pos, period) =
+ if crit_pos_false > crit_pos_true {
+ (crit_pos_false, period_false)
+ } else {
+ (crit_pos_true, period_true)
+ };
+
+ // This isn't in the original algorithm, as far as I'm aware.
+ let byteset = needle.iter()
+ .fold(0, |a, &b| (1 << ((b & 0x3f) as usize)) | a);
+
+ // A particularly readable explanation of what's going on here can be found
+ // in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
+ // see the code for "Algorithm CP" on p. 323.
+ //
+ // What's going on is we have some critical factorization (u, v) of the
+ // needle, and we want to determine whether u is a suffix of
+ // &v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
+ // "Algorithm CP2", which is optimized for when the period of the needle
+ // is large.
+ if &needle[..crit_pos] == &needle[period.. period + crit_pos] {
+ // short period case
+ TwoWaySearcher {
+ crit_pos: crit_pos,
+ period: period,
+ byteset: byteset,
+
+ position: 0,
+ end: end,
+ memory: 0
+ }
+ } else {
+ // long period case
+ // we have an approximation to the actual period, and don't use memory.
+ TwoWaySearcher {
+ crit_pos: crit_pos,
+ period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
+ byteset: byteset,
+
+ position: 0,
+ end: end,
+ memory: usize::MAX // Dummy value to signify that the period is long
+ }
+ }
+ }
+
+ #[inline(always)]
+ fn byteset_contains(&self, byte: u8) -> bool {
+ (self.byteset >> ((byte & 0x3f) as usize)) & 1 != 0
+ }
+
+ // One of the main ideas of Two-Way is that we factorize the needle into
+ // two halves, (u, v), and begin trying to find v in the haystack by scanning
+ // left to right. If v matches, we try to match u by scanning right to left.
+ // How far we can jump when we encounter a mismatch is all based on the fact
+ // that (u, v) is a critical factorization for the needle.
+ #[inline(always)]
+ fn next<S>(&mut self, haystack: &[u8], needle: &[u8], long_period: bool)
+ -> S::Output
+ where S: TwoWayStrategy
+ {
+ // `next()` uses `self.position` as its cursor
+ let old_pos = self.position;
+ 'search: loop {
+ // Check that we have room to search in
+ if needle.len() > haystack.len() - self.position {
+ self.position = haystack.len();
+ return S::rejecting(old_pos, self.position);
+ }
+
+ if S::use_early_reject() && old_pos != self.position {
+ return S::rejecting(old_pos, self.position);
+ }
+
+ // Quickly skip by large portions unrelated to our substring
+ if !self.byteset_contains(haystack[self.position + needle.len() - 1]) {
+ self.position += needle.len();
+ if !long_period {
+ self.memory = 0;
+ }
+ continue 'search;
+ }
+
+ // See if the right part of the needle matches
+ let start = if long_period { self.crit_pos }
+ else { cmp::max(self.crit_pos, self.memory) };
+ for i in start..needle.len() {
+ if needle[i] != haystack[self.position + i] {
+ self.position += i - self.crit_pos + 1;
+ if !long_period {
+ self.memory = 0;
+ }
+ continue 'search;
+ }
+ }
+
+ // See if the left part of the needle matches
+ let start = if long_period { 0 } else { self.memory };
+ for i in (start..self.crit_pos).rev() {
+ if needle[i] != haystack[self.position + i] {
+ self.position += self.period;
+ if !long_period {
+ self.memory = needle.len() - self.period;
+ }
+ continue 'search;
+ }
+ }
+
+ // We have found a match!
+ let match_pos = self.position;
+
+ // Note: add self.period instead of needle.len() to have overlapping matches
+ self.position += needle.len();
+ if !long_period {
+ self.memory = 0; // set to needle.len() - self.period for overlapping matches
+ }
+
+ return S::matching(match_pos, match_pos + needle.len());
+ }
+ }
+
+ // Follows the ideas in `next()`.
+ //
+ // All the definitions are completely symmetrical, with period(x) = period(reverse(x))
+ // and local_period(u, v) = local_period(reverse(v), reverse(u)), so if (u, v)
+ // is a critical factorization, so is (reverse(v), reverse(u)). Similarly,
+ // the "period" stored in self.period is the real period if long_period is
+ // false, and so is still valid for a reversed needle, and if long_period is
+ // true, all the algorithm requires is that self.period is less than or
+ // equal to the real period, which must be true for the forward case anyway.
+ //
+ // To search in reverse through the haystack, we search forward through
+ // a reversed haystack with a reversed needle, and the above paragraph shows
+ // that the precomputed parameters can be left alone.
+ #[inline]
+ fn next_back<S>(&mut self, haystack: &[u8], needle: &[u8])
+ -> S::Output
+ where S: TwoWayStrategy
+ {
+ // `next_back()` uses `self.end` as its cursor -- so that `next()` and `next_back()`
+ // are independent.
+ let old_end = self.end;
+ 'search: loop {
+ // Check that we have room to search in
+ if needle.len() > self.end {
+ self.end = 0;
+ return S::rejecting(0, old_end);
+ }
+
+ if S::use_early_reject() && old_end != self.end {
+ return S::rejecting(self.end, old_end);
+ }
+
+ // Quickly skip by large portions unrelated to our substring
+ if !self.byteset_contains(haystack[self.end - needle.len()]) {
+ self.end -= needle.len();
+ continue 'search;
+ }
+
+ // See if the left part of the needle matches
+ for i in (0..self.crit_pos).rev() {
+ if needle[i] != haystack[self.end - needle.len() + i] {
+ self.end -= self.crit_pos - i;
+ continue 'search;
+ }
+ }
+
+ // See if the right part of the needle matches
+ for i in self.crit_pos..needle.len() {
+ if needle[i] != haystack[self.end - needle.len() + i] {
+ self.end -= self.period;
+ continue 'search;
+ }
+ }
+
+ // We have found a match!
+ let match_pos = self.end - needle.len();
+ // Note: sub self.period instead of needle.len() to have overlapping matches
+ self.end -= needle.len();
+
+ return S::matching(match_pos, match_pos + needle.len());
+ }
+ }
+
+ // Computes a critical factorization (u, v) of `arr`.
+ // Specifically, returns (i, p), where i is the starting index of v in some
+ // critical factorization (u, v) and p = period(v)
+ #[inline]
+ fn maximal_suffix(arr: &[u8], reversed: bool) -> (usize, usize) {
+ let mut left: usize = !0; // Corresponds to i in the paper
+ let mut right = 0; // Corresponds to j in the paper
+ let mut offset = 1; // Corresponds to k in the paper
+ let mut period = 1; // Corresponds to p in the paper
+
+ while right + offset < arr.len() {
+ let a;
+ let b;
+ if reversed {
+ a = arr[left.wrapping_add(offset)];
+ b = arr[right + offset];
+ } else {
+ a = arr[right + offset];
+ b = arr[left.wrapping_add(offset)];
+ }
+ if a < b {
+ // Suffix is smaller, period is entire prefix so far.
+ right += offset;
+ offset = 1;
+ period = right.wrapping_sub(left);
+ } else if a == b {
+ // Advance through repetition of the current period.
+ if offset == period {
+ right += offset;
+ offset = 1;
+ } else {
+ offset += 1;
+ }
+ } else {
+ // Suffix is larger, start over from current location.
+ left = right;
+ right += 1;
+ offset = 1;
+ period = 1;
+ }
+ }
+ (left.wrapping_add(1), period)
+ }
+}
+
+// TwoWayStrategy allows the algorithm to either skip non-matches as quickly
+// as possible, or to work in a mode where it emits Rejects relatively quickly.
+trait TwoWayStrategy {
+ type Output;
+ fn use_early_reject() -> bool;
+ fn rejecting(usize, usize) -> Self::Output;
+ fn matching(usize, usize) -> Self::Output;
+}
+
+/// Skip to match intervals as quickly as possible
+enum MatchOnly { }
+
+impl TwoWayStrategy for MatchOnly {
+ type Output = Option<(usize, usize)>;
+
+ #[inline]
+ fn use_early_reject() -> bool { false }
+ #[inline]
+ fn rejecting(_a: usize, _b: usize) -> Self::Output { None }
+ #[inline]
+ fn matching(a: usize, b: usize) -> Self::Output { Some((a, b)) }
+}
+
+/// Emit Rejects regularly
+enum RejectAndMatch { }
+
+impl TwoWayStrategy for RejectAndMatch {
+ type Output = SearchStep;
+
+ #[inline]
+ fn use_early_reject() -> bool { true }
+ #[inline]
+ fn rejecting(a: usize, b: usize) -> Self::Output { SearchStep::Reject(a, b) }
+ #[inline]
+ fn matching(a: usize, b: usize) -> Self::Output { SearchStep::Match(a, b) }
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations on tuples
+//! A finite heterogeneous sequence, `(T, U, ..)`
//!
//! To access a single element of a tuple one can use the `.0`
//! field access syntax.
//! * `Default`
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "tuple")]
use clone::Clone;
use cmp::*;
assert_eq!(upper('ᾀ'), ['Ἀ', 'Ι']);
}
-#[test]
-fn test_to_titlecase() {
- fn title(c: char) -> Vec<char> {
- c.to_titlecase().collect()
- }
- assert_eq!(title('a'), ['A']);
- assert_eq!(title('ö'), ['Ö']);
- assert_eq!(title('ß'), ['S', 's']); // not ẞ: Latin capital letter sharp s
- assert_eq!(title('ü'), ['Ü']);
- assert_eq!(title('💩'), ['💩']);
-
- assert_eq!(title('σ'), ['Σ']);
- assert_eq!(title('τ'), ['Τ']);
- assert_eq!(title('ι'), ['Ι']);
- assert_eq!(title('γ'), ['Γ']);
- assert_eq!(title('μ'), ['Μ']);
- assert_eq!(title('α'), ['Α']);
- assert_eq!(title('ς'), ['Σ']);
- assert_eq!(title('DŽ'), ['Dž']);
- assert_eq!(title('fi'), ['F', 'i']);
- assert_eq!(title('ᾀ'), ['ᾈ']);
-}
-
#[test]
fn test_is_control() {
assert!('\u{0}'.is_control());
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![allow(unsigned_negation)]
-
use core::fmt::radix;
#[test]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+mod sip;
+
use std::mem;
use std::hash::{Hash, Hasher};
use std::default::Default;
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use test::Bencher;
-use std::prelude::*;
-use std::fmt;
+use test::{Bencher, black_box};
-use str::Str;
-use string::String;
-use slice::{AsSlice, SlicePrelude};
-use vec::Vec;
-
-use core::hash::{Hash, Writer};
-use core::hash::sip::{SipState, hash, hash_with_keys};
+use core::hash::{Hash, Hasher};
+use core::hash::SipHasher;
// Hash just the bytes of the slice, without length prefix
struct Bytes<'a>(&'a [u8]);
-impl<'a, S: Writer> Hash<S> for Bytes<'a> {
+impl<'a> Hash for Bytes<'a> {
#[allow(unused_must_use)]
- fn hash(&self, state: &mut S) {
+ fn hash<H: Hasher>(&self, state: &mut H) {
let Bytes(v) = *self;
state.write(v);
}
}
+macro_rules! u8to64_le {
+ ($buf:expr, $i:expr) =>
+ ($buf[0+$i] as u64 |
+ ($buf[1+$i] as u64) << 8 |
+ ($buf[2+$i] as u64) << 16 |
+ ($buf[3+$i] as u64) << 24 |
+ ($buf[4+$i] as u64) << 32 |
+ ($buf[5+$i] as u64) << 40 |
+ ($buf[6+$i] as u64) << 48 |
+ ($buf[7+$i] as u64) << 56);
+ ($buf:expr, $i:expr, $len:expr) =>
+ ({
+ let mut t = 0;
+ let mut out = 0;
+ while t < $len {
+ out |= ($buf[t+$i] as u64) << t*8;
+ t += 1;
+ }
+ out
+ });
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ let mut st = SipHasher::new();
+ x.hash(&mut st);
+ st.finish()
+}
+
+fn hash_with_keys<T: Hash>(k1: u64, k2: u64, x: &T) -> u64 {
+ let mut st = SipHasher::new_with_keys(k1, k2);
+ x.hash(&mut st);
+ st.finish()
+}
+
+fn hash_bytes(x: &[u8]) -> u64 {
+ let mut s = SipHasher::default();
+ Hasher::write(&mut s, x);
+ s.finish()
+}
+
#[test]
#[allow(unused_must_use)]
fn test_siphash() {
let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
let mut buf = Vec::new();
let mut t = 0;
- let mut state_inc = SipState::new_with_keys(k0, k1);
- let mut state_full = SipState::new_with_keys(k0, k1);
-
- fn to_hex_str(r: &[u8; 8]) -> String {
- let mut s = String::new();
- for b in r {
- s.push_str(format!("{}", fmt::radix(*b, 16)));
- }
- s
- }
-
- fn result_bytes(h: u64) -> Vec<u8> {
- vec![(h >> 0) as u8,
- (h >> 8) as u8,
- (h >> 16) as u8,
- (h >> 24) as u8,
- (h >> 32) as u8,
- (h >> 40) as u8,
- (h >> 48) as u8,
- (h >> 56) as u8,
- ]
- }
-
- fn result_str(h: u64) -> String {
- let r = result_bytes(h);
- let mut s = String::new();
- for b in &r {
- s.push_str(format!("{}", fmt::radix(*b, 16)));
- }
- s
- }
+ let mut state_inc = SipHasher::new_with_keys(k0, k1);
while t < 64 {
- debug!("siphash test {}: {}", t, buf);
let vec = u8to64_le!(vecs[t], 0);
- let out = hash_with_keys(k0, k1, &Bytes(buf));
- debug!("got {}, expected {}", out, vec);
+ let out = hash_with_keys(k0, k1, &Bytes(&buf));
assert_eq!(vec, out);
- state_full.reset();
- state_full.write(buf);
- let f = result_str(state_full.result());
- let i = result_str(state_inc.result());
- let v = to_hex_str(&vecs[t]);
- debug!("{}: ({}) => inc={} full={}", t, v, i, f);
+ let full = hash_with_keys(k0, k1, &Bytes(&buf));
+ let i = state_inc.finish();
- assert_eq!(f, i);
- assert_eq!(f, v);
+ assert_eq!(full, i);
+ assert_eq!(full, vec);
buf.push(t as u8);
- state_inc.write(&[t as u8]);
+ Hasher::write(&mut state_inc, &[t as u8]);
t += 1;
}
}
#[test] #[cfg(target_arch = "arm")]
-fn test_hash_uint() {
+fn test_hash_usize() {
let val = 0xdeadbeef_deadbeef_u64;
- assert!(hash(&(val as u64)) != hash(&(val as uint)));
- assert_eq!(hash(&(val as u32)), hash(&(val as uint)));
+ assert!(hash(&(val as u64)) != hash(&(val as usize)));
+ assert_eq!(hash(&(val as u32)), hash(&(val as usize)));
}
#[test] #[cfg(target_arch = "x86_64")]
-fn test_hash_uint() {
+fn test_hash_usize() {
let val = 0xdeadbeef_deadbeef_u64;
- assert_eq!(hash(&(val as u64)), hash(&(val as uint)));
- assert!(hash(&(val as u32)) != hash(&(val as uint)));
+ assert_eq!(hash(&(val as u64)), hash(&(val as usize)));
+ assert!(hash(&(val as u32)) != hash(&(val as usize)));
}
#[test] #[cfg(target_arch = "x86")]
-fn test_hash_uint() {
+fn test_hash_usize() {
let val = 0xdeadbeef_deadbeef_u64;
- assert!(hash(&(val as u64)) != hash(&(val as uint)));
- assert_eq!(hash(&(val as u32)), hash(&(val as uint)));
+ assert!(hash(&(val as u64)) != hash(&(val as usize)));
+ assert_eq!(hash(&(val as u32)), hash(&(val as usize)));
}
#[test]
assert!(hash(&val) != hash(&zero_byte(val, 6)));
assert!(hash(&val) != hash(&zero_byte(val, 7)));
- fn zero_byte(val: u64, byte: uint) -> u64 {
+ fn zero_byte(val: u64, byte: usize) -> u64 {
assert!(byte < 8);
val & !(0xff << (byte * 8))
}
assert!(hash(&val) != hash(&zero_byte(val, 2)));
assert!(hash(&val) != hash(&zero_byte(val, 3)));
- fn zero_byte(val: u32, byte: uint) -> u32 {
+ fn zero_byte(val: u32, byte: usize) -> u32 {
assert!(byte < 4);
val & !(0xff << (byte * 8))
}
assert!(s != t && t != u);
assert!(hash(&s) != hash(&t) && hash(&s) != hash(&u));
- let v: (&[u8], &[u8], &[u8]) = (&[1], &[0, 0], &[0]);
- let w: (&[u8], &[u8], &[u8]) = (&[1, 0, 0, 0], &[], &[]);
+ let u = [1, 0, 0, 0];
+ let v = (&u[..1], &u[1..3], &u[3..]);
+ let w = (&u[..], &u[4..4], &u[4..4]);
assert!(v != w);
assert!(hash(&v) != hash(&w));
})
}
+#[bench]
+fn bench_u32(b: &mut Bencher) {
+ let u = 162629500u32;
+ let u = black_box(u);
+ b.iter(|| {
+ hash(&u)
+ });
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_u32_keyed(b: &mut Bencher) {
+ let u = 162629500u32;
+ let u = black_box(u);
+ let k1 = black_box(0x1);
+ let k2 = black_box(0x2);
+ b.iter(|| {
+ hash_with_keys(k1, k2, &u)
+ });
+ b.bytes = 8;
+}
+
#[bench]
fn bench_u64(b: &mut Bencher) {
let u = 16262950014981195938u64;
+ let u = black_box(u);
b.iter(|| {
- assert_eq!(hash(&u), 5254097107239593357);
- })
+ hash(&u)
+ });
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_bytes_4(b: &mut Bencher) {
+ let data = black_box([b' '; 4]);
+ b.iter(|| {
+ hash_bytes(&data)
+ });
+ b.bytes = 4;
+}
+
+#[bench]
+fn bench_bytes_7(b: &mut Bencher) {
+ let data = black_box([b' '; 7]);
+ b.iter(|| {
+ hash_bytes(&data)
+ });
+ b.bytes = 7;
+}
+
+#[bench]
+fn bench_bytes_8(b: &mut Bencher) {
+ let data = black_box([b' '; 8]);
+ b.iter(|| {
+ hash_bytes(&data)
+ });
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_bytes_a_16(b: &mut Bencher) {
+ let data = black_box([b' '; 16]);
+ b.iter(|| {
+ hash_bytes(&data)
+ });
+ b.bytes = 16;
+}
+
+#[bench]
+fn bench_bytes_b_32(b: &mut Bencher) {
+ let data = black_box([b' '; 32]);
+ b.iter(|| {
+ hash_bytes(&data)
+ });
+ b.bytes = 32;
+}
+
+#[bench]
+fn bench_bytes_c_128(b: &mut Bencher) {
+ let data = black_box([b' '; 128]);
+ b.iter(|| {
+ hash_bytes(&data)
+ });
+ b.bytes = 128;
}
}
#[test]
- fn test_int_from_minus_sign() {
- assert_eq!("-".parse::<i32>().ok(), None);
+ fn test_invalid() {
+ assert_eq!("--129".parse::<i8>().ok(), None);
+ assert_eq!("Съешь".parse::<u8>().ok(), None);
+ }
+
+ #[test]
+ fn test_empty() {
+ assert_eq!("-".parse::<i8>().ok(), None);
+ assert_eq!("".parse::<u8>().ok(), None);
}
}
use core::ptr::*;
use core::mem;
-use std::iter::repeat;
#[test]
fn test() {
#[test]
fn test_ptr_addition() {
unsafe {
- let xs = repeat(5).take(16).collect::<Vec<_>>();
+ let xs = vec![5; 16];
let mut ptr = xs.as_ptr();
let end = ptr.offset(16);
m_ptr = m_ptr.offset(1);
}
- assert!(xs_mut == repeat(10).take(16).collect::<Vec<_>>());
+ assert!(xs_mut == vec![10; 16]);
}
}
}
Some(..) | None => { return &self.input[..0]; }
};
- let mut end;
+ let end;
loop {
match self.cur.clone().next() {
Some((_, c)) if c.is_xid_continue() => {
//! optopt("o", "", "set output file name", "NAME"),
//! optflag("h", "help", "print this help menu")
//! ];
-//! let matches = match getopts(args.tail(), opts) {
+//! let matches = match getopts(args[1..], opts) {
//! Ok(m) => { m }
//! Err(f) => { panic!(f.to_string()) }
//! };
// FIXME: #5516 should be graphemes not codepoints
// wrapped description
- row.push_str(&desc_rows.connect(&desc_sep[..]));
+ row.push_str(&desc_rows.join(&desc_sep[..]));
row
});
format!("{}\n\nOptions:\n{}\n", brief,
- rows.collect::<Vec<String>>().connect("\n"))
+ rows.collect::<Vec<String>>().join("\n"))
}
fn format_option(opt: &OptGroup) -> String {
line.push_str(&opts.iter()
.map(format_option)
.collect::<Vec<String>>()
- .connect(" ")[..]);
+ .join(" ")[..]);
line
}
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
//! which is cyclic.
//!
//! ```rust
-//! # #![feature(rustc_private, core, into_cow)]
+//! #![feature(rustc_private, core, into_cow)]
+//!
//! use std::borrow::IntoCow;
//! use std::io::Write;
//! use graphviz as dot;
//! entity `&sube`).
//!
//! ```rust
-//! # #![feature(rustc_private, core, into_cow)]
+//! #![feature(rustc_private, core, into_cow)]
+//!
//! use std::borrow::IntoCow;
//! use std::io::Write;
//! use graphviz as dot;
//! Hasse-diagram for the subsets of the set `{x, y}`.
//!
//! ```rust
-//! # #![feature(rustc_private, core, into_cow)]
+//! #![feature(rustc_private, core, into_cow)]
+//!
//! use std::borrow::IntoCow;
//! use std::io::Write;
//! use graphviz as dot;
EscStr(Cow<'a, str>),
}
+/// The style for a node or edge.
+/// See http://www.graphviz.org/doc/info/attrs.html#k:style for descriptions.
+/// Note that some of these are not valid for edges.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Style {
+ None,
+ Solid,
+ Dashed,
+ Dotted,
+ Bold,
+ Rounded,
+ Diagonals,
+ Filled,
+ Striped,
+ Wedged,
+}
+
+impl Style {
+ pub fn as_slice(self) -> &'static str {
+ match self {
+ Style::None => "",
+ Style::Solid => "solid",
+ Style::Dashed => "dashed",
+ Style::Dotted => "dotted",
+ Style::Bold => "bold",
+ Style::Rounded => "rounded",
+ Style::Diagonals => "diagonals",
+ Style::Filled => "filled",
+ Style::Striped => "striped",
+ Style::Wedged => "wedged",
+ }
+ }
+}
+
// There is a tension in the design of the labelling API.
//
// For example, I considered making a `Labeller<T>` trait that
let _ignored = e;
LabelStr("".into_cow())
}
+
+ /// Maps `n` to a style that will be used in the rendered output.
+ fn node_style(&'a self, _n: &N) -> Style {
+ Style::None
+ }
+
+ /// Maps `e` to a style that will be used in the rendered output.
+ fn edge_style(&'a self, _e: &E) -> Style {
+ Style::None
+ }
}
impl<'a> LabelText<'a> {
pub enum RenderOption {
NoEdgeLabels,
NoNodeLabels,
+ NoEdgeStyles,
+ NoNodeStyles,
}
/// Returns vec holding all the default render options.
for n in g.nodes().iter() {
try!(indent(w));
let id = g.node_id(n);
- if options.contains(&RenderOption::NoNodeLabels) {
- try!(writeln(w, &[id.as_slice(), ";"]));
- } else {
- let escaped = g.node_label(n).escape();
- try!(writeln(w, &[id.as_slice(),
- "[label=\"", &escaped, "\"];"]));
+
+ let escaped = &g.node_label(n).escape();
+
+ let mut text = vec![id.as_slice()];
+
+ if !options.contains(&RenderOption::NoNodeLabels) {
+ text.push("[label=\"");
+ text.push(escaped);
+ text.push("\"]");
}
+
+ let style = g.node_style(n);
+ if !options.contains(&RenderOption::NoNodeStyles) && style != Style::None {
+ text.push("[style=\"");
+ text.push(style.as_slice());
+ text.push("\"]");
+ }
+
+ text.push(";");
+ try!(writeln(w, &text));
}
for e in g.edges().iter() {
- let escaped_label = g.edge_label(e).escape();
+ let escaped_label = &g.edge_label(e).escape();
try!(indent(w));
let source = g.source(e);
let target = g.target(e);
let source_id = g.node_id(&source);
let target_id = g.node_id(&target);
- if options.contains(&RenderOption::NoEdgeLabels) {
- try!(writeln(w, &[source_id.as_slice(),
- " -> ", target_id.as_slice(), ";"]));
- } else {
- try!(writeln(w, &[source_id.as_slice(),
- " -> ", target_id.as_slice(),
- "[label=\"", &escaped_label, "\"];"]));
+
+ let mut text = vec![source_id.as_slice(), " -> ", target_id.as_slice()];
+
+ if !options.contains(&RenderOption::NoEdgeLabels) {
+ text.push("[label=\"");
+ text.push(escaped_label);
+ text.push("\"]");
+ }
+
+ let style = g.edge_style(e);
+ if !options.contains(&RenderOption::NoEdgeStyles) && style != Style::None {
+ text.push("[style=\"");
+ text.push(style.as_slice());
+ text.push("\"]");
}
+
+ text.push(";");
+ try!(writeln(w, &text));
}
writeln(w, &["}"])
#[cfg(test)]
mod tests {
use self::NodeLabels::*;
- use super::{Id, Labeller, Nodes, Edges, GraphWalk, render};
+ use super::{Id, Labeller, Nodes, Edges, GraphWalk, render, Style};
use super::LabelText::{self, LabelStr, EscStr};
use std::io;
use std::io::prelude::*;
use std::borrow::IntoCow;
- use std::iter::repeat;
/// each node is an index in a vector in the graph.
type Node = usize;
struct Edge {
- from: usize, to: usize, label: &'static str
+ from: usize,
+ to: usize,
+ label: &'static str,
+ style: Style,
}
- fn edge(from: usize, to: usize, label: &'static str) -> Edge {
- Edge { from: from, to: to, label: label }
+ fn edge(from: usize, to: usize, label: &'static str, style: Style) -> Edge {
+ Edge { from: from, to: to, label: label, style: style }
}
struct LabelledGraph {
/// text.
node_labels: Vec<Option<&'static str>>,
+ node_styles: Vec<Style>,
+
/// Each edge relates a from-index to a to-index along with a
/// label; `edges` collects them.
edges: Vec<Edge>,
fn to_opt_strs(self) -> Vec<Option<&'static str>> {
match self {
UnlabelledNodes(len)
- => repeat(None).take(len).collect(),
+ => vec![None; len],
AllNodesLabelled(lbls)
=> lbls.into_iter().map(
|l|Some(l)).collect(),
=> lbls.into_iter().collect(),
}
}
+
+ fn len(&self) -> usize {
+ match self {
+ &UnlabelledNodes(len) => len,
+ &AllNodesLabelled(ref lbls) => lbls.len(),
+ &SomeNodesLabelled(ref lbls) => lbls.len(),
+ }
+ }
}
impl LabelledGraph {
fn new(name: &'static str,
node_labels: Trivial,
- edges: Vec<Edge>) -> LabelledGraph {
+ edges: Vec<Edge>,
+ node_styles: Option<Vec<Style>>) -> LabelledGraph {
+ let count = node_labels.len();
LabelledGraph {
name: name,
node_labels: node_labels.to_opt_strs(),
- edges: edges
+ edges: edges,
+ node_styles: match node_styles {
+ Some(nodes) => nodes,
+ None => vec![Style::None; count],
+ }
}
}
}
node_labels: Trivial,
edges: Vec<Edge>) -> LabelledGraphWithEscStrs {
LabelledGraphWithEscStrs {
- graph: LabelledGraph::new(name, node_labels, edges)
+ graph: LabelledGraph::new(name,
+ node_labels,
+ edges,
+ None)
}
}
}
fn edge_label(&'a self, e: & &'a Edge) -> LabelText<'a> {
LabelStr(e.label.into_cow())
}
+ fn node_style(&'a self, n: &Node) -> Style {
+ self.node_styles[*n]
+ }
+ fn edge_style(&'a self, e: & &'a Edge) -> Style {
+ e.style
+ }
}
impl<'a> Labeller<'a, Node, &'a Edge> for LabelledGraphWithEscStrs {
#[test]
fn empty_graph() {
let labels : Trivial = UnlabelledNodes(0);
- let r = test_input(LabelledGraph::new("empty_graph", labels, vec!()));
+ let r = test_input(LabelledGraph::new("empty_graph", labels, vec![], None));
assert_eq!(r.unwrap(),
r#"digraph empty_graph {
}
#[test]
fn single_node() {
let labels : Trivial = UnlabelledNodes(1);
- let r = test_input(LabelledGraph::new("single_node", labels, vec!()));
+ let r = test_input(LabelledGraph::new("single_node", labels, vec![], None));
assert_eq!(r.unwrap(),
r#"digraph single_node {
N0[label="N0"];
"#);
}
+ #[test]
+ fn single_node_with_style() {
+ let labels : Trivial = UnlabelledNodes(1);
+ let styles = Some(vec![Style::Dashed]);
+ let r = test_input(LabelledGraph::new("single_node", labels, vec![], styles));
+ assert_eq!(r.unwrap(),
+r#"digraph single_node {
+ N0[label="N0"][style="dashed"];
+}
+"#);
+ }
+
#[test]
fn single_edge() {
let labels : Trivial = UnlabelledNodes(2);
let result = test_input(LabelledGraph::new("single_edge", labels,
- vec!(edge(0, 1, "E"))));
+ vec![edge(0, 1, "E", Style::None)], None));
assert_eq!(result.unwrap(),
r#"digraph single_edge {
N0[label="N0"];
"#);
}
+ #[test]
+ fn single_edge_with_style() {
+ let labels : Trivial = UnlabelledNodes(2);
+ let result = test_input(LabelledGraph::new("single_edge", labels,
+ vec![edge(0, 1, "E", Style::Bold)], None));
+ assert_eq!(result.unwrap(),
+r#"digraph single_edge {
+ N0[label="N0"];
+ N1[label="N1"];
+ N0 -> N1[label="E"][style="bold"];
+}
+"#);
+ }
+
#[test]
fn test_some_labelled() {
let labels : Trivial = SomeNodesLabelled(vec![Some("A"), None]);
+ let styles = Some(vec![Style::None, Style::Dotted]);
let result = test_input(LabelledGraph::new("test_some_labelled", labels,
- vec![edge(0, 1, "A-1")]));
+ vec![edge(0, 1, "A-1", Style::None)], styles));
assert_eq!(result.unwrap(),
r#"digraph test_some_labelled {
N0[label="A"];
- N1[label="N1"];
+ N1[label="N1"][style="dotted"];
N0 -> N1[label="A-1"];
}
"#);
fn single_cyclic_node() {
let labels : Trivial = UnlabelledNodes(1);
let r = test_input(LabelledGraph::new("single_cyclic_node", labels,
- vec!(edge(0, 0, "E"))));
+ vec![edge(0, 0, "E", Style::None)], None));
assert_eq!(r.unwrap(),
r#"digraph single_cyclic_node {
N0[label="N0"];
let labels = AllNodesLabelled(vec!("{x,y}", "{x}", "{y}", "{}"));
let r = test_input(LabelledGraph::new(
"hasse_diagram", labels,
- vec!(edge(0, 1, ""), edge(0, 2, ""),
- edge(1, 3, ""), edge(2, 3, ""))));
+ vec![edge(0, 1, "", Style::None), edge(0, 2, "", Style::None),
+ edge(1, 3, "", Style::None), edge(2, 3, "", Style::None)],
+ None));
assert_eq!(r.unwrap(),
r#"digraph hasse_diagram {
N0[label="{x,y}"];
let g = LabelledGraphWithEscStrs::new(
"syntax_tree", labels,
- vec!(edge(0, 1, "then"), edge(0, 2, "else"),
- edge(1, 3, ";"), edge(2, 3, ";" )));
+ vec![edge(0, 1, "then", Style::None), edge(0, 2, "else", Style::None),
+ edge(1, 3, ";", Style::None), edge(2, 3, ";", Style::None)]);
render(&g, &mut writer).unwrap();
let mut r = String::new();
pub type intmax_t = i64;
pub type uintmax_t = u64;
}
- #[cfg(any(target_arch = "x86",
- target_arch = "mips",
+ #[cfg(any(target_arch = "mips",
target_arch = "mipsel",
target_arch = "powerpc",
target_arch = "le32",
- all(target_arch = "arm", not(target_os = "android"))))]
+ all(any(target_arch = "arm", target_arch = "x86"),
+ not(target_os = "android"))))]
pub mod posix88 {
pub type off_t = i32;
pub type dev_t = u64;
pub type mode_t = u32;
pub type ssize_t = i32;
}
- #[cfg(all(target_arch = "arm", target_os = "android"))]
+ #[cfg(all(any(target_arch = "arm", target_arch = "x86"),
+ target_os = "android"))]
pub mod posix88 {
pub type off_t = i32;
pub type dev_t = u32;
#[cfg(any(target_arch = "x86",
target_arch = "le32",
target_arch = "powerpc",
- all(target_arch = "arm", not(target_os = "android"))))]
+ all(any(target_arch = "arm", target_arch = "x86"),
+ not(target_os = "android"))))]
pub mod posix01 {
use types::os::arch::c95::{c_short, c_long, time_t};
use types::os::arch::posix88::{dev_t, gid_t, ino_t};
pub __size: [u32; 9]
}
}
- #[cfg(all(target_arch = "arm", target_os = "android"))]
+ #[cfg(all(any(target_arch = "arm", target_arch = "x86"),
+ target_os = "android"))]
pub mod posix01 {
use types::os::arch::c95::{c_uchar, c_uint, c_ulong, time_t};
use types::os::arch::c99::{c_longlong, c_ulonglong};
}
}
+ #[cfg(target_arch = "x86")]
+ pub mod arch {
+ pub mod c95 {
+ pub type c_char = i8;
+ pub type c_schar = i8;
+ pub type c_uchar = u8;
+ pub type c_short = i16;
+ pub type c_ushort = u16;
+ pub type c_int = i32;
+ pub type c_uint = u32;
+ pub type c_long = i32;
+ pub type c_ulong = u32;
+ pub type c_float = f32;
+ pub type c_double = f64;
+ pub type size_t = u32;
+ pub type ptrdiff_t = i32;
+ pub type clock_t = i32;
+ pub type time_t = i32;
+ pub type suseconds_t = i32;
+ pub type wchar_t = i32;
+ }
+ pub mod c99 {
+ pub type c_longlong = i64;
+ pub type c_ulonglong = u64;
+ pub type intptr_t = i32;
+ pub type uintptr_t = u32;
+ pub type intmax_t = i64;
+ pub type uintmax_t = u64;
+ }
+ pub mod posix88 {
+ pub type off_t = i64;
+ pub type dev_t = u32;
+ pub type ino_t = u32;
+ pub type pid_t = i32;
+ pub type uid_t = u32;
+ pub type gid_t = u32;
+ pub type useconds_t = u32;
+ pub type mode_t = u16;
+ pub type ssize_t = i32;
+ }
+ pub mod posix01 {
+ use types::common::c95::{c_void};
+ use types::common::c99::{uint32_t, int32_t};
+ use types::os::arch::c95::{c_long, time_t};
+ use types::os::arch::posix88::{dev_t, gid_t, ino_t};
+ use types::os::arch::posix88::{mode_t, off_t};
+ use types::os::arch::posix88::{uid_t};
+
+ pub type nlink_t = u16;
+ pub type blksize_t = u32;
+ pub type blkcnt_t = i64;
+ pub type fflags_t = u32;
+ #[repr(C)]
+ #[derive(Copy, Clone)] pub struct stat {
+ pub st_dev: dev_t,
+ pub st_ino: ino_t,
+ pub st_mode: mode_t,
+ pub st_nlink: nlink_t,
+ pub st_uid: uid_t,
+ pub st_gid: gid_t,
+ pub st_rdev: dev_t,
+ pub st_atime: time_t,
+ pub st_atime_nsec: c_long,
+ pub st_mtime: time_t,
+ pub st_mtime_nsec: c_long,
+ pub st_ctime: time_t,
+ pub st_ctime_nsec: c_long,
+ pub st_size: off_t,
+ pub st_blocks: blkcnt_t,
+ pub st_blksize: blksize_t,
+ pub st_flags: fflags_t,
+ pub st_gen: uint32_t,
+ pub st_lspare: int32_t,
+ pub st_birthtime: time_t,
+ pub st_birthtime_nsec: c_long,
+ pub __unused: [u8; 8],
+ }
+
+ #[repr(C)]
+ #[derive(Copy, Clone)] pub struct utimbuf {
+ pub actime: time_t,
+ pub modtime: time_t,
+ }
+
+ pub type pthread_attr_t = *mut c_void;
+ }
+ pub mod posix08 {
+ }
+ pub mod bsd44 {
+ }
+ pub mod extra {
+ }
+ }
+
#[cfg(target_arch = "x86_64")]
pub mod arch {
pub mod c95 {
}
pub mod posix01 {
use types::common::c95::{c_void};
- use types::common::c99::{uint8_t, uint32_t, int32_t};
+ use types::common::c99::{uint32_t, int32_t};
use types::os::arch::c95::{c_long, time_t};
use types::os::arch::posix88::{dev_t, gid_t, ino_t};
use types::os::arch::posix88::{mode_t, off_t};
use types::os::arch::posix88::{uid_t};
pub type nlink_t = u16;
- pub type blksize_t = i64;
+ pub type blksize_t = u32;
pub type blkcnt_t = i64;
pub type fflags_t = u32;
#[repr(C)]
pub st_lspare: int32_t,
pub st_birthtime: time_t,
pub st_birthtime_nsec: c_long,
- pub __unused: [uint8_t; 2],
}
#[repr(C)]
}
}
- #[cfg(any(target_os = "bitrig", target_os = "openbsd"))]
+ #[cfg(any(target_os = "bitrig", target_os = "netbsd", target_os ="openbsd"))]
pub mod os {
pub mod common {
pub mod posix01 {
pub __unused7: *mut c_void,
}
- #[cfg(target_os = "openbsd")]
+ #[cfg(any(target_os = "netbsd", target_os="openbsd"))]
#[repr(C)]
#[derive(Copy, Clone)] pub struct glob_t {
pub gl_pathc: c_int,
pub const S_IFDIR : c_int = 16384;
pub const S_IFREG : c_int = 32768;
pub const S_IFLNK : c_int = 40960;
+ pub const S_IFSOCK : mode_t = 49152;
pub const S_IFMT : c_int = 61440;
pub const S_IEXEC : c_int = 64;
pub const S_IWRITE : c_int = 128;
pub const S_IFDIR : mode_t = 16384;
pub const S_IFREG : mode_t = 32768;
pub const S_IFLNK : mode_t = 40960;
+ pub const S_IFSOCK : mode_t = 49152;
pub const S_IFMT : mode_t = 61440;
pub const S_IEXEC : mode_t = 64;
pub const S_IWRITE : mode_t = 128;
pub const S_IFDIR : mode_t = 16384;
pub const S_IFREG : mode_t = 32768;
pub const S_IFLNK : mode_t = 40960;
+ pub const S_IFSOCK : mode_t = 49152;
pub const S_IFMT : mode_t = 61440;
pub const S_IEXEC : mode_t = 64;
pub const S_IWRITE : mode_t = 128;
pub const SHUT_RD: c_int = 0;
pub const SHUT_WR: c_int = 1;
pub const SHUT_RDWR: c_int = 2;
+
+ pub const LOCK_SH: c_int = 1;
+ pub const LOCK_EX: c_int = 2;
+ pub const LOCK_NB: c_int = 4;
+ pub const LOCK_UN: c_int = 8;
}
#[cfg(any(target_arch = "mips",
target_arch = "mipsel"))]
pub const SHUT_RD: c_int = 0;
pub const SHUT_WR: c_int = 1;
pub const SHUT_RDWR: c_int = 2;
+
+ pub const LOCK_SH: c_int = 1;
+ pub const LOCK_EX: c_int = 2;
+ pub const LOCK_NB: c_int = 4;
+ pub const LOCK_UN: c_int = 8;
}
#[cfg(any(target_arch = "x86",
target_arch = "x86_64",
pub const S_IFDIR : mode_t = 16384;
pub const S_IFREG : mode_t = 32768;
pub const S_IFLNK : mode_t = 40960;
+ pub const S_IFSOCK : mode_t = 49152;
pub const S_IFMT : mode_t = 61440;
pub const S_IEXEC : mode_t = 64;
pub const S_IWRITE : mode_t = 128;
pub const SHUT_RD: c_int = 0;
pub const SHUT_WR: c_int = 1;
pub const SHUT_RDWR: c_int = 2;
+
+ pub const LOCK_SH: c_int = 1;
+ pub const LOCK_EX: c_int = 2;
+ pub const LOCK_NB: c_int = 4;
+ pub const LOCK_UN: c_int = 8;
}
pub mod extra {
use types::os::arch::c95::c_int;
}
}
- #[cfg(any(target_os = "bitrig", target_os = "openbsd"))]
+ #[cfg(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"))]
pub mod os {
pub mod c95 {
use types::os::arch::c95::{c_int, c_uint};
pub const S_IFDIR : mode_t = 16384;
pub const S_IFREG : mode_t = 32768;
pub const S_IFLNK : mode_t = 40960;
+ pub const S_IFSOCK : mode_t = 49152;
pub const S_IFMT : mode_t = 61440;
pub const S_IEXEC : mode_t = 64;
pub const S_IWRITE : mode_t = 128;
pub const SHUT_RD: c_int = 0;
pub const SHUT_WR: c_int = 1;
pub const SHUT_RDWR: c_int = 2;
+
+ pub const LOCK_SH: c_int = 1;
+ pub const LOCK_EX: c_int = 2;
+ pub const LOCK_NB: c_int = 4;
+ pub const LOCK_UN: c_int = 8;
}
pub mod extra {
use types::os::arch::c95::c_int;
pub const S_IFDIR : mode_t = 16384;
pub const S_IFREG : mode_t = 32768;
pub const S_IFLNK : mode_t = 40960;
+ pub const S_IFSOCK : mode_t = 49152;
pub const S_IFMT : mode_t = 61440;
pub const S_IEXEC : mode_t = 64;
pub const S_IWRITE : mode_t = 128;
pub const F_GETFL : c_int = 3;
pub const F_SETFL : c_int = 4;
+ pub const O_ACCMODE : c_int = 3;
+
pub const SIGTRAP : c_int = 5;
pub const SIG_IGN: size_t = 1;
pub const SHUT_RD: c_int = 0;
pub const SHUT_WR: c_int = 1;
pub const SHUT_RDWR: c_int = 2;
+
+ pub const LOCK_SH: c_int = 1;
+ pub const LOCK_EX: c_int = 2;
+ pub const LOCK_NB: c_int = 4;
+ pub const LOCK_UN: c_int = 8;
}
pub mod extra {
use types::os::arch::c95::c_int;
pub const O_DSYNC : c_int = 4194304;
pub const O_SYNC : c_int = 128;
pub const O_NONBLOCK : c_int = 4;
+ pub const F_GETPATH : c_int = 50;
pub const F_FULLFSYNC : c_int = 51;
pub const MAP_COPY : c_int = 0x0002;
pub const SO_DONTTRUNC: c_int = 0x2000;
pub const SO_WANTMORE: c_int = 0x4000;
pub const SO_WANTOOBFLAG: c_int = 0x8000;
+
+ pub const PATH_MAX: c_int = 1024;
}
pub mod sysconf {
use types::os::arch::c95::c_int;
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd",
target_os = "nacl"))]
pub mod posix88 {
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd",
target_os = "android",
target_os = "ios",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd",
target_os = "android",
target_os = "ios",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd",
target_os = "nacl"))]
pub mod posix01 {
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd",
target_os = "android",
target_os = "ios",
use types::os::arch::c95::c_int;
use types::os::common::posix01::sighandler_t;
- #[cfg(not(all(target_os = "android", target_arch = "arm")))]
+ #[cfg(not(all(target_os = "android", any(target_arch = "arm",
+ target_arch = "x86"))))]
extern {
pub fn signal(signum: c_int,
handler: sighandler_t) -> sighandler_t;
}
- #[cfg(all(target_os = "android", target_arch = "arm"))]
+ #[cfg(all(target_os = "android", any(target_arch = "arm",
+ target_arch = "x86")))]
extern {
#[link_name = "bsd_signal"]
pub fn signal(signum: c_int,
}
- #[cfg(any(target_os = "windows",
- target_os = "linux",
- target_os = "android",
- target_os = "macos",
+ #[cfg(any(target_os = "android",
+ target_os = "bitrig",
+ target_os = "dragonfly",
target_os = "ios",
target_os = "freebsd",
- target_os = "dragonfly",
- target_os = "bitrig",
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "nacl",
+ target_os = "netbsd",
target_os = "openbsd",
- target_os = "nacl"))]
+ target_os = "windows"))]
pub mod posix08 {
pub mod unistd {
}
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
pub mod bsd44 {
use types::common::c95::{c_void};
use types::os::arch::c95::{c_char, c_uchar, c_int, c_uint, c_ulong, size_t};
extern {
- pub fn ioctl(d: c_int, request: c_ulong, ...) -> c_int;
+ pub fn ioctl(fd: c_int, request: c_ulong, ...) -> c_int;
pub fn sysctl(name: *mut c_int,
namelen: c_uint,
oldp: *mut c_void,
-> c_int;
pub fn realpath(pathname: *const c_char, resolved: *mut c_char)
-> *mut c_char;
+ pub fn flock(fd: c_int, operation: c_int) -> c_int;
}
}
#[cfg(any(target_os = "linux", target_os = "android"))]
pub mod bsd44 {
use types::common::c95::{c_void};
- use types::os::arch::c95::{c_uchar, c_int, size_t};
+ use types::os::arch::c95::{c_uchar, c_int, c_ulong, size_t};
extern {
#[cfg(not(all(target_os = "android", target_arch = "aarch64")))]
pub fn getdtablesize() -> c_int;
- pub fn ioctl(d: c_int, request: c_int, ...) -> c_int;
+ pub fn ioctl(fd: c_int, request: c_ulong, ...) -> c_int;
pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int)
-> c_int;
pub fn mincore(addr: *mut c_void, len: size_t, vec: *mut c_uchar)
-> c_int;
+ pub fn flock(fd: c_int, operation: c_int) -> c_int;
}
}
#[cfg(any(target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
pub mod extra {
}
}
}
-#[doc(hidden)]
-pub fn issue_14344_workaround() {} // FIXME #14344 force linkage to happen correctly
-
#[test] fn work_on_windows() { } // FIXME #10872 needed for a happy windows
/// The helper trait for types that have a sensible way to sample
/// uniformly between two values. This should not be used directly,
/// and is only to facilitate `Range`.
+#[doc(hidden)]
pub trait SampleRange {
/// Construct the `Range` object that `sample_range`
/// requires. This should not ever be called directly, only via
/// Refills the output buffer (`self.rsl`)
#[inline]
- #[allow(unsigned_negation)]
fn isaac(&mut self) {
self.c = self.c + w(1);
// abbreviations
mod rand_impls;
/// A type that can be randomly generated using an `Rng`.
+#[doc(hidden)]
pub trait Rand : Sized {
/// Generates a random instance of this type using the specified source of
/// randomness.
const FILL_BYTES_V_LEN: usize = 13579;
#[test]
fn test_rng_fill_bytes() {
- let mut v = repeat(0).take(FILL_BYTES_V_LEN).collect::<Vec<_>>();
+ let mut v = vec![0; FILL_BYTES_V_LEN];
::test::rng().fill_bytes(&mut v);
// Sanity test: if we've gotten here, `fill_bytes` has not infinitely
impl fmt::Display for PathElem {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- let slot = token::get_name(self.name());
- write!(f, "{}", slot)
+ write!(f, "{}", self.name())
}
}
NodeStructCtor(&'ast StructDef),
NodeLifetime(&'ast Lifetime),
+ NodeTyParam(&'ast TyParam)
}
-/// Represents an entry and its parent Node ID
+/// Represents an entry and its parent NodeID.
/// The odd layout is to bring down the total size.
#[derive(Copy, Debug)]
enum MapEntry<'ast> {
EntryBlock(NodeId, &'ast Block),
EntryStructCtor(NodeId, &'ast StructDef),
EntryLifetime(NodeId, &'ast Lifetime),
+ EntryTyParam(NodeId, &'ast TyParam),
/// Roots for node trees.
RootCrate,
NodePat(n) => EntryPat(p, n),
NodeBlock(n) => EntryBlock(p, n),
NodeStructCtor(n) => EntryStructCtor(p, n),
- NodeLifetime(n) => EntryLifetime(p, n)
+ NodeLifetime(n) => EntryLifetime(p, n),
+ NodeTyParam(n) => EntryTyParam(p, n),
}
}
- fn parent(self) -> Option<NodeId> {
+ fn parent_node(self) -> Option<NodeId> {
Some(match self {
EntryItem(id, _) => id,
EntryForeignItem(id, _) => id,
EntryBlock(id, _) => id,
EntryStructCtor(id, _) => id,
EntryLifetime(id, _) => id,
+ EntryTyParam(id, _) => id,
_ => return None
})
}
EntryBlock(_, n) => NodeBlock(n),
EntryStructCtor(_, n) => NodeStructCtor(n),
EntryLifetime(_, n) => NodeLifetime(n),
+ EntryTyParam(_, n) => NodeTyParam(n),
_ => return None
})
}
self.find_entry(id).and_then(|x| x.to_node())
}
- /// Retrieve the parent NodeId for `id`, or `id` itself if no
- /// parent is registered in this map.
+ /// Similar to get_parent, returns the parent node id or id if there is no
+ /// parent.
+ /// This function returns the immediate parent in the AST, whereas get_parent
+ /// returns the enclosing item. Note that this might not be the actual parent
+ /// node in the AST - some kinds of nodes are not in the map and these will
+ /// never appear as the parent_node. So you can always walk the parent_nodes
+ /// from a node to the root of the ast (unless you get the same id back here
+ /// that can happen if the id is not in the map itself or is just weird).
+ pub fn get_parent_node(&self, id: NodeId) -> NodeId {
+ self.find_entry(id).and_then(|x| x.parent_node()).unwrap_or(id)
+ }
+
+ /// If there is some error when walking the parents (e.g., a node does not
+ /// have a parent in the map or a node can't be found), then we return the
+ /// last good node id we found. Note that reaching the crate root (id == 0),
+ /// is not an error, since items in the crate module have the crate root as
+ /// parent.
+ fn walk_parent_nodes<F>(&self, start_id: NodeId, found: F) -> Result<NodeId, NodeId>
+ where F: Fn(&Node<'ast>) -> bool
+ {
+ let mut id = start_id;
+ loop {
+ let parent_node = self.get_parent_node(id);
+ if parent_node == 0 {
+ return Ok(0);
+ }
+ if parent_node == id {
+ return Err(id);
+ }
+
+ let node = self.find_entry(parent_node);
+ if node.is_none() {
+ return Err(id);
+ }
+ let node = node.unwrap().to_node();
+ match node {
+ Some(ref node) => {
+ if found(node) {
+ return Ok(parent_node);
+ }
+ }
+ None => {
+ return Err(parent_node);
+ }
+ }
+ id = parent_node;
+ }
+ }
+
+ /// Retrieve the NodeId for `id`'s parent item, or `id` itself if no
+ /// parent item is in this map. The "parent item" is the closest parent node
+ /// in the AST which is recorded by the map and is an item, either an item
+ /// in a module, trait, or impl.
pub fn get_parent(&self, id: NodeId) -> NodeId {
- self.find_entry(id).and_then(|x| x.parent()).unwrap_or(id)
+ match self.walk_parent_nodes(id, |node| match *node {
+ NodeItem(_) |
+ NodeForeignItem(_) |
+ NodeTraitItem(_) |
+ NodeImplItem(_) => true,
+ _ => false,
+ }) {
+ Ok(id) => id,
+ Err(id) => id,
+ }
+ }
+
+ /// Returns the nearest enclosing scope. A scope is an item or block.
+ /// FIXME it is not clear to me that all items qualify as scopes - statics
+ /// and associated types probably shouldn't, for example. Behaviour in this
+ /// regard should be expected to be highly unstable.
+ pub fn get_enclosing_scope(&self, id: NodeId) -> Option<NodeId> {
+ match self.walk_parent_nodes(id, |node| match *node {
+ NodeItem(_) |
+ NodeForeignItem(_) |
+ NodeTraitItem(_) |
+ NodeImplItem(_) |
+ NodeBlock(_) => true,
+ _ => false,
+ }) {
+ Ok(id) => Some(id),
+ Err(_) => None,
+ }
}
pub fn get_parent_did(&self, id: NodeId) -> DefId {
}
}
+ pub fn expect_trait_item(&self, id: NodeId) -> &'ast TraitItem {
+ match self.find(id) {
+ Some(NodeTraitItem(item)) => item,
+ _ => panic!("expected trait item, found {}", self.node_to_string(id))
+ }
+ }
+
pub fn expect_struct(&self, id: NodeId) -> &'ast StructDef {
match self.find(id) {
Some(NodeItem(i)) => {
{
let parent = self.get_parent(id);
let parent = match self.find_entry(id) {
- Some(EntryForeignItem(..)) | Some(EntryVariant(..)) => {
- // Anonymous extern items, enum variants and struct ctors
- // go in the parent scope.
+ Some(EntryForeignItem(..)) => {
+ // Anonymous extern items go in the parent scope.
self.get_parent(parent)
}
// But tuple struct ctors don't have names, so use the path of its
Some(NodePat(pat)) => pat.span,
Some(NodeBlock(block)) => block.span,
Some(NodeStructCtor(_)) => self.expect_item(self.get_parent(id)).span,
+ Some(NodeTyParam(ty_param)) => ty_param.span,
_ => return None,
};
Some(sp)
return None;
}
self.idx += 1;
- let (p, name) = match self.map.find_entry(idx) {
- Some(EntryItem(p, n)) => (p, n.name()),
- Some(EntryForeignItem(p, n))=> (p, n.name()),
- Some(EntryTraitItem(p, n)) => (p, n.name()),
- Some(EntryImplItem(p, n)) => (p, n.name()),
- Some(EntryVariant(p, n)) => (p, n.name()),
+ let name = match self.map.find_entry(idx) {
+ Some(EntryItem(_, n)) => n.name(),
+ Some(EntryForeignItem(_, n))=> n.name(),
+ Some(EntryTraitItem(_, n)) => n.name(),
+ Some(EntryImplItem(_, n)) => n.name(),
+ Some(EntryVariant(_, n)) => n.name(),
_ => continue,
};
- if self.matches_names(p, name) {
+ if self.matches_names(self.map.get_parent(idx), name) {
return Some(idx)
}
}
/// A Visitor that walks over an AST and collects Node's into an AST Map.
struct NodeCollector<'ast> {
map: Vec<MapEntry<'ast>>,
- /// The node in which we are currently mapping (an item or a method).
- parent: NodeId
+ parent_node: NodeId,
}
impl<'ast> NodeCollector<'ast> {
}
fn insert(&mut self, id: NodeId, node: Node<'ast>) {
- let entry = MapEntry::from_node(self.parent, node);
+ let entry = MapEntry::from_node(self.parent_node, node);
self.insert_entry(id, entry);
}
impl<'ast> Visitor<'ast> for NodeCollector<'ast> {
fn visit_item(&mut self, i: &'ast Item) {
self.insert(i.id, NodeItem(i));
- let parent = self.parent;
- self.parent = i.id;
+
+ let parent_node = self.parent_node;
+ self.parent_node = i.id;
+
match i.node {
ItemImpl(_, _, _, _, _, ref impl_items) => {
for ii in impl_items {
_ => {}
}
visit::walk_item(self, i);
- self.parent = parent;
+ self.parent_node = parent_node;
+ }
+
+ fn visit_generics(&mut self, generics: &'ast Generics) {
+ for ty_param in generics.ty_params.iter() {
+ self.insert(ty_param.id, NodeTyParam(ty_param));
+ }
+
+ visit::walk_generics(self, generics);
}
fn visit_trait_item(&mut self, ti: &'ast TraitItem) {
- let parent = self.parent;
- self.parent = ti.id;
+ let parent_node = self.parent_node;
+ self.parent_node = ti.id;
visit::walk_trait_item(self, ti);
- self.parent = parent;
+ self.parent_node = parent_node;
}
fn visit_impl_item(&mut self, ii: &'ast ImplItem) {
- let parent = self.parent;
- self.parent = ii.id;
+ let parent_node = self.parent_node;
+ self.parent_node = ii.id;
+
visit::walk_impl_item(self, ii);
- self.parent = parent;
+
+ self.parent_node = parent_node;
}
fn visit_pat(&mut self, pat: &'ast Pat) {
PatIdent(..) => NodeLocal(pat),
_ => NodePat(pat)
});
+
+ let parent_node = self.parent_node;
+ self.parent_node = pat.id;
visit::walk_pat(self, pat);
+ self.parent_node = parent_node;
}
fn visit_expr(&mut self, expr: &'ast Expr) {
self.insert(expr.id, NodeExpr(expr));
+ let parent_node = self.parent_node;
+ self.parent_node = expr.id;
visit::walk_expr(self, expr);
+ self.parent_node = parent_node;
}
fn visit_stmt(&mut self, stmt: &'ast Stmt) {
- self.insert(ast_util::stmt_id(stmt), NodeStmt(stmt));
+ let id = ast_util::stmt_id(stmt);
+ self.insert(id, NodeStmt(stmt));
+ let parent_node = self.parent_node;
+ self.parent_node = id;
visit::walk_stmt(self, stmt);
+ self.parent_node = parent_node;
}
fn visit_fn(&mut self, fk: visit::FnKind<'ast>, fd: &'ast FnDecl,
- b: &'ast Block, s: Span, _: NodeId) {
+ b: &'ast Block, s: Span, id: NodeId) {
+ let parent_node = self.parent_node;
+ self.parent_node = id;
self.visit_fn_decl(fd);
visit::walk_fn(self, fk, fd, b, s);
+ self.parent_node = parent_node;
}
fn visit_ty(&mut self, ty: &'ast Ty) {
+ let parent_node = self.parent_node;
+ self.parent_node = ty.id;
match ty.node {
TyBareFn(ref fd) => {
self.visit_fn_decl(&*fd.decl);
_ => {}
}
visit::walk_ty(self, ty);
+ self.parent_node = parent_node;
}
fn visit_block(&mut self, block: &'ast Block) {
self.insert(block.id, NodeBlock(block));
+ let parent_node = self.parent_node;
+ self.parent_node = block.id;
visit::walk_block(self, block);
+ self.parent_node = parent_node;
}
fn visit_lifetime_ref(&mut self, lifetime: &'ast Lifetime) {
let mut collector = NodeCollector {
map: vec![],
- parent: CRATE_NODE_ID
+ parent_node: CRATE_NODE_ID,
};
collector.insert_entry(CRATE_NODE_ID, RootCrate);
visit::walk_crate(&mut collector, &forest.krate);
ii: ii
});
+ let ii_parent_id = fld.new_id(DUMMY_NODE_ID);
let mut collector = NodeCollector {
map: mem::replace(&mut *map.map.borrow_mut(), vec![]),
- parent: fld.new_id(DUMMY_NODE_ID)
+ parent_node: ii_parent_id,
};
- let ii_parent_id = collector.parent;
collector.insert_entry(ii_parent_id, RootInlinedParent(ii_parent));
visit::walk_inlined_item(&mut collector, &ii_parent.ii);
NodePat(a) => self.print_pat(&*a),
NodeBlock(a) => self.print_block(&*a),
NodeLifetime(a) => self.print_lifetime(&*a),
-
+ NodeTyParam(_) => panic!("cannot print TyParam"),
// these cases do not carry enough information in the
// ast_map to reconstruct their full structure for pretty
// printing.
match ii.node {
ConstImplItem(..) => {
format!("assoc const {} in {}{}",
- token::get_ident(ii.ident),
+ ii.ident,
map.path_to_string(id),
id_str)
}
MethodImplItem(..) => {
format!("method {} in {}{}",
- token::get_ident(ii.ident),
+ ii.ident,
map.path_to_string(id), id_str)
}
TypeImplItem(_) => {
format!("assoc type {} in {}{}",
- token::get_ident(ii.ident),
+ ii.ident,
map.path_to_string(id),
id_str)
}
format!("{} {} in {}{}",
kind,
- token::get_ident(ti.ident),
+ ti.ident,
map.path_to_string(id),
id_str)
}
Some(NodeVariant(ref variant)) => {
format!("variant {} in {}{}",
- token::get_ident(variant.node.name),
+ variant.node.name,
map.path_to_string(id), id_str)
}
Some(NodeExpr(ref expr)) => {
format!("lifetime {}{}",
pprust::lifetime_to_string(&**l), id_str)
}
+ Some(NodeTyParam(ref ty_param)) => {
+ format!("typaram {:?}{}", ty_param, id_str)
+ }
None => {
format!("unknown node{}", id_str)
}
This means that perhaps some of the preceding patterns are too general, this one
is too specific or the ordering is incorrect.
+
+For example, the following `match` block has too many arms:
+
+```
+match foo {
+ Some(bar) => {/* ... */}
+ None => {/* ... */}
+ _ => {/* ... */} // All possible cases have already been handled
+}
+```
+
+`match` blocks have their patterns matched in order, so, for example, putting
+a wildcard arm above a more specific arm will make the latter arm irrelevant.
+
+Ensure the ordering of the match arm is correct and remove any superfluous
+arms.
"##,
E0002: r##"
-This error indicates that an empty match expression is illegal because the type
+This error indicates that an empty match expression is invalid because the type
it is matching on is non-empty (there exist values of this type). In safe code
it is impossible to create an instance of an empty type, so empty match
expressions are almost never desired. This error is typically fixed by adding
one or more cases to the match expression.
-An example of an empty type is `enum Empty { }`.
+An example of an empty type is `enum Empty { }`. So, the following will work:
+
+```
+fn foo(x: Empty) {
+ match x {
+ // empty
+ }
+}
+```
+
+However, this won't:
+
+```
+fn foo(x: Option<String>) {
+ match x {
+ // empty
+ }
+}
+```
"##,
E0003: r##"
Not-a-Number (NaN) values cannot be compared for equality and hence can never
-match the input to a match expression. To match against NaN values, you should
-instead use the `is_nan` method in a guard, as in: `x if x.is_nan() => ...`
+match the input to a match expression. So, the following will not compile:
+
+```
+const NAN: f32 = 0.0 / 0.0;
+
+match number {
+ NAN => { /* ... */ },
+ // ...
+}
+```
+
+To match against NaN values, you should instead use the `is_nan()` method in a
+guard, like so:
+
+```
+match number {
+ // ...
+ x if x.is_nan() => { /* ... */ }
+ // ...
+}
+```
"##,
E0004: r##"
"anything else".
"##,
-// FIXME: Remove duplication here?
E0005: r##"
Patterns used to bind names must be irrefutable, that is, they must guarantee
that a name will be extracted in all cases. If you encounter this error you
failure.
"##,
-E0006: r##"
-Patterns used to bind names must be irrefutable, that is, they must guarantee
-that a name will be extracted in all cases. If you encounter this error you
-probably need to use a `match` or `if let` to deal with the possibility of
-failure.
-"##,
-
E0007: r##"
This error indicates that the bindings in a match arm would require a value to
be moved into more than one location, thus violating unique ownership. Code like
```
"##,
+E0017: r##"
+References in statics and constants may only refer to immutable values. Example:
+
+```
+static X: i32 = 1;
+const C: i32 = 2;
+
+// these three are not allowed:
+const CR: &'static mut i32 = &mut C;
+static STATIC_REF: &'static mut i32 = &mut X;
+static CONST_REF: &'static mut i32 = &mut C;
+```
+
+Statics are shared everywhere, and if they refer to mutable data one might
+violate memory safety since holding multiple mutable references to shared data
+is not allowed.
+
+If you really want global mutable state, try using `static mut` or a global
+`UnsafeCell`.
+
+"##,
+
E0018: r##"
The value of static and const variables must be known at compile time. You
can't cast a pointer as an integer because we can't know what value the
```
Remember: you can't use a function call inside a const's initialization
-expression! However, you can totally use it elsewhere you want:
+expression! However, you can totally use it anywhere else:
```
fn main() {
remainder of a zero divisor) in a static or constant expression.
"##,
+E0022: r##"
+Constant functions are not allowed to mutate anything. Thus, binding to an
+argument with a mutable pattern is not allowed. For example,
+
+```
+const fn foo(mut x: u8) {
+ // do stuff
+}
+```
+
+is bad because the function body may not mutate `x`.
+
+Remove any mutable bindings from the argument list to fix this error. In case
+you need to mutate the argument, try lazily initializing a global variable
+instead of using a `const fn`, or refactoring the code to a functional style to
+avoid mutation if possible.
+"##,
+
+E0030: r##"
+When matching against a range, the compiler verifies that the range is
+non-empty. Range patterns include both end-points, so this is equivalent to
+requiring the start of the range to be less than or equal to the end of the
+range.
+
+For example:
+
+```
+match 5u32 {
+ // This range is ok, albeit pointless.
+ 1 ... 1 => ...
+ // This range is empty, and the compiler can tell.
+ 1000 ... 5 => ...
+}
+```
+"##,
+
+E0038: r####"
+Trait objects like `Box<Trait>` can only be constructed when certain
+requirements are satisfied by the trait in question.
+
+Trait objects are a form of dynamic dispatch and use a dynamically sized type
+for the inner type. So, for a given trait `Trait`, when `Trait` is treated as a
+type, as in `Box<Trait>`, the inner type is 'unsized'. In such cases the boxed
+pointer is a 'fat pointer' that contains an extra pointer to a table of methods
+(among other things) for dynamic dispatch. This design mandates some
+restrictions on the types of traits that are allowed to be used in trait
+objects, which are collectively termed as 'object safety' rules.
+
+Attempting to create a trait object for a non object-safe trait will trigger
+this error.
+
+There are various rules:
+
+### The trait cannot require `Self: Sized`
+
+When `Trait` is treated as a type, the type does not implement the special
+`Sized` trait, because the type does not have a known size at compile time and
+can only be accessed behind a pointer. Thus, if we have a trait like the
+following:
+
+```
+trait Foo where Self: Sized {
+
+}
+```
+
+we cannot create an object of type `Box<Foo>` or `&Foo` since in this case
+`Self` would not be `Sized`.
+
+Generally, `Self : Sized` is used to indicate that the trait should not be used
+as a trait object. If the trait comes from your own crate, consider removing
+this restriction.
+
+### Method references the `Self` type in its arguments or return type
+
+This happens when a trait has a method like the following:
+
+```
+trait Trait {
+ fn foo(&self) -> Self;
+}
+
+impl Trait for String {
+ fn foo(&self) -> Self {
+ "hi".to_owned()
+ }
+}
+
+impl Trait for u8 {
+ fn foo(&self) -> Self {
+ 1
+ }
+}
+```
+
+(Note that `&self` and `&mut self` are okay, it's additional `Self` types which
+cause this problem)
+
+In such a case, the compiler cannot predict the return type of `foo()` in a
+situation like the following:
+
+```
+fn call_foo(x: Box<Trait>) {
+ let y = x.foo(); // What type is y?
+ // ...
+}
+```
+
+If only some methods aren't object-safe, you can add a `where Self: Sized` bound
+on them to mark them as explicitly unavailable to trait objects. The
+functionality will still be available to all other implementers, including
+`Box<Trait>` which is itself sized (assuming you `impl Trait for Box<Trait>`).
+
+```
+trait Trait {
+ fn foo(&self) -> Self where Self: Sized;
+ // more functions
+}
+```
+
+Now, `foo()` can no longer be called on a trait object, but you will now be
+allowed to make a trait object, and that will be able to call any object-safe
+methods". With such a bound, one can still call `foo()` on types implementing
+that trait that aren't behind trait objects.
+
+### Method has generic type parameters
+
+As mentioned before, trait objects contain pointers to method tables. So, if we
+have:
+
+```
+trait Trait {
+ fn foo(&self);
+}
+impl Trait for String {
+ fn foo(&self) {
+ // implementation 1
+ }
+}
+impl Trait for u8 {
+ fn foo(&self) {
+ // implementation 2
+ }
+}
+// ...
+```
+
+At compile time each implementation of `Trait` will produce a table containing
+the various methods (and other items) related to the implementation.
+
+This works fine, but when the method gains generic parameters, we can have a
+problem.
+
+Usually, generic parameters get _monomorphized_. For example, if I have
+
+```
+fn foo<T>(x: T) {
+ // ...
+}
+```
+
+the machine code for `foo::<u8>()`, `foo::<bool>()`, `foo::<String>()`, or any
+other type substitution is different. Hence the compiler generates the
+implementation on-demand. If you call `foo()` with a `bool` parameter, the
+compiler will only generate code for `foo::<bool>()`. When we have additional
+type parameters, the number of monomorphized implementations the compiler
+generates does not grow drastically, since the compiler will only generate an
+implementation if the function is called with unparametrized substitutions
+(i.e., substitutions where none of the substituted types are themselves
+parametrized).
+
+However, with trait objects we have to make a table containing _every_ object
+that implements the trait. Now, if it has type parameters, we need to add
+implementations for every type that implements the trait, and there could
+theoretically be an infinite number of types.
+
+For example, with:
+
+```
+trait Trait {
+ fn foo<T>(&self, on: T);
+ // more methods
+}
+impl Trait for String {
+ fn foo<T>(&self, on: T) {
+ // implementation 1
+ }
+}
+impl Trait for u8 {
+ fn foo<T>(&self, on: T) {
+ // implementation 2
+ }
+}
+// 8 more implementations
+```
+
+Now, if we have the following code:
+
+```
+fn call_foo(thing: Box<Trait>) {
+ thing.foo(true); // this could be any one of the 8 types above
+ thing.foo(1);
+ thing.foo("hello");
+}
+```
+
+we don't just need to create a table of all implementations of all methods of
+`Trait`, we need to create such a table, for each different type fed to
+`foo()`. In this case this turns out to be (10 types implementing `Trait`)*(3
+types being fed to `foo()`) = 30 implementations!
+
+With real world traits these numbers can grow drastically.
+
+To fix this, it is suggested to use a `where Self: Sized` bound similar to the
+fix for the sub-error above if you do not intend to call the method with type
+parameters:
+
+```
+trait Trait {
+ fn foo<T>(&self, on: T) where Self: Sized;
+ // more methods
+}
+```
+
+If this is not an option, consider replacing the type parameter with another
+trait object (e.g. if `T: OtherTrait`, use `on: Box<OtherTrait>`). If the number
+of types you intend to feed to this method is limited, consider manually listing
+out the methods of different types.
+
+### Method has no receiver
+
+Methods that do not take a `self` parameter can't be called since there won't be
+a way to get a pointer to the method table for them
+
+```
+trait Foo {
+ fn foo() -> u8;
+}
+```
+
+This could be called as `<Foo as Foo>::foo()`, which would not be able to pick
+an implementation.
+
+Adding a `Self: Sized` bound to these methods will generally make this compile.
+
+```
+trait Foo {
+ fn foo() -> u8 where Self: Sized;
+}
+```
+
+### The trait cannot use `Self` as a type parameter in the supertrait listing
+
+This is similar to the second sub-error, but subtler. It happens in situations
+like the following:
+
+```
+trait Super<A> {}
+
+trait Trait: Super<Self> {
+}
+
+struct Foo;
+
+impl Super<Foo> for Foo{}
+
+impl Trait for Foo {}
+```
+
+Here, the supertrait might have methods as follows:
+
+```
+trait Super<A> {
+ fn get_a(&self) -> A; // note that this is object safe!
+}
+```
+
+If the trait `Foo` was deriving from something like `Super<String>` or
+`Super<T>` (where `Foo` itself is `Foo<T>`), this is okay, because given a type
+`get_a()` will definitely return an object of that type.
+
+However, if it derives from `Super<Self>`, even though `Super` is object safe,
+the method `get_a()` would return an object of unknown type when called on the
+function. `Self` type parameters let us make object safe traits no longer safe,
+so they are forbidden when specifying supertraits.
+
+There's no easy fix for this, generally code will need to be refactored so that
+you no longer need to derive from `Super<Self>`.
+"####,
+
E0079: r##"
Enum variants which contain no data can be given a custom integer
-representation. This error indicates that the value provided is not an
-integer literal and is therefore invalid.
+representation. This error indicates that the value provided is not an integer
+literal and is therefore invalid.
+
+For example, in the following code,
+
+```
+enum Foo {
+ Q = "32"
+}
+```
+
+we try to set the representation to a string.
+
+There's no general fix for this; if you can work with an integer then just set
+it to one:
+
+```
+enum Foo {
+ Q = 32
+}
+```
+
+however if you actually wanted a mapping between variants and non-integer
+objects, it may be preferable to use a method with a match instead:
+
+```
+enum Foo { Q }
+impl Foo {
+ fn get_str(&self) -> &'static str {
+ match *self {
+ Foo::Q => "32",
+ }
+ }
+}
+```
"##,
E0080: r##"
See the FFI section of the Reference for more information about using a custom
integer type:
-http://doc.rust-lang.org/reference.html#ffi-attributes
+https://doc.rust-lang.org/reference.html#ffi-attributes
+"##,
+
+E0109: r##"
+You tried to give a type parameter to a type which doesn't need it. Erroneous
+code example:
+
+```
+type X = u32<i32>; // error: type parameters are not allowed on this type
+```
+
+Please check that you used the correct type and recheck its definition. Perhaps
+it doesn't need the type parameter.
+
+Example:
+
+```
+type X = u32; // this compiles
+```
+
+Note that type parameters for enum-variant constructors go after the variant,
+not after the enum (Option::None::<u32>, not Option::<u32>::None).
+"##,
+
+E0110: r##"
+You tried to give a lifetime parameter to a type which doesn't need it.
+Erroneous code example:
+
+```
+type X = u32<'static>; // error: lifetime parameters are not allowed on
+ // this type
+```
+
+Please check that the correct type was used and recheck its definition; perhaps
+it doesn't need the lifetime parameter. Example:
+
+```
+type X = u32; // ok!
+```
"##,
E0133: r##"
}
```
-See also http://doc.rust-lang.org/book/unsafe.html
+See also https://doc.rust-lang.org/book/unsafe.html
+"##,
+
+// This shouldn't really ever trigger since the repeated value error comes first
+E0136: r##"
+A binary can only have one entry point, and by default that entry point is the
+function `main()`. If there are multiple such functions, please rename one.
"##,
E0137: r##"
point into a Rust program.
"##,
+E0138: r##"
+This error indicates that the compiler found multiple functions with the
+`#[start]` attribute. This is an error because there must be a unique entry
+point into a Rust program.
+"##,
+
+// FIXME link this to the relevant turpl chapters for instilling fear of the
+// transmute gods in the user
+E0139: r##"
+There are various restrictions on transmuting between types in Rust; for example
+types being transmuted must have the same size. To apply all these restrictions,
+the compiler must know the exact types that may be transmuted. When type
+parameters are involved, this cannot always be done.
+
+So, for example, the following is not allowed:
+
+```
+struct Foo<T>(Vec<T>)
+
+fn foo<T>(x: Vec<T>) {
+ // we are transmuting between Vec<T> and Foo<T> here
+ let y: Foo<T> = unsafe { transmute(x) };
+ // do something with y
+}
+```
+
+In this specific case there's a good chance that the transmute is harmless (but
+this is not guaranteed by Rust). However, when alignment and enum optimizations
+come into the picture, it's quite likely that the sizes may or may not match
+with different type parameter substitutions. It's not possible to check this for
+_all_ possible types, so `transmute()` simply only accepts types without any
+unsubstituted type parameters.
+
+If you need this, there's a good chance you're doing something wrong. Keep in
+mind that Rust doesn't guarantee much about the layout of different structs
+(even two structs with identical declarations may have different layouts). If
+there is a solution that avoids the transmute entirely, try it instead.
+
+If it's possible, hand-monomorphize the code by writing the function for each
+possible type substitution. It's possible to use traits to do this cleanly,
+for example:
+
+```
+trait MyTransmutableType {
+ fn transmute(Vec<Self>) -> Foo<Self>
+}
+
+impl MyTransmutableType for u8 {
+ fn transmute(x: Foo<u8>) -> Vec<u8> {
+ transmute(x)
+ }
+}
+impl MyTransmutableType for String {
+ fn transmute(x: Foo<String>) -> Vec<String> {
+ transmute(x)
+ }
+}
+// ... more impls for the types you intend to transmute
+
+fn foo<T: MyTransmutableType>(x: Vec<T>) {
+ let y: Foo<T> = <T as MyTransmutableType>::transmute(x);
+ // do something with y
+}
+```
+
+Each impl will be checked for a size match in the transmute as usual, and since
+there are no unbound type parameters involved, this should compile unless there
+is a size mismatch in one of the impls.
+
+It is also possible to manually transmute:
+
+```
+let result: SomeType = mem::uninitialized();
+unsafe { copy_nonoverlapping(&v, &result) };
+result // `v` transmuted to type `SomeType`
+```
+"##,
+
E0152: r##"
Lang items are already implemented in the standard library. Unless you are
writing a free-standing application (e.g. a kernel), you do not need to provide
use Method::*;
enum Method { GET, POST }
```
+
+If you want others to be able to import variants from your module directly, use
+`pub use`:
+
+```
+pub use Method::*;
+enum Method { GET, POST }
+```
"##,
E0261: r##"
the lifetime of the entire program, this is an error:
```
-// error, illegal lifetime parameter name `'static`
+// error, invalid lifetime parameter name `'static`
fn foo<'static>(x: &'static str) { }
```
"##,
E0267: r##"
This error indicates the use of a loop keyword (`break` or `continue`) inside a
-closure but outside of any loop. Break and continue can be used as normal inside
-closures as long as they are also contained within a loop. To halt the execution
-of a closure you should instead use a return statement.
+closure but outside of any loop. Erroneous code example:
+
+```
+let w = || { break; }; // error: `break` inside of a closure
+```
+
+`break` and `continue` keywords can be used as normal inside closures as long as
+they are also contained within a loop. To halt the execution of a closure you
+should instead use a return statement. Example:
+
+```
+let w = || {
+ for _ in 0..10 {
+ break;
+ }
+};
+
+w();
+```
"##,
E0268: r##"
This error indicates the use of a loop keyword (`break` or `continue`) outside
of a loop. Without a loop to break out of or continue in, no sensible action can
-be taken.
+be taken. Erroneous code example:
+
+```
+fn some_func() {
+ break; // error: `break` outside of loop
+}
+```
+
+Please verify that you are using `break` and `continue` only in loops. Example:
+
+```
+fn some_func() {
+ for _ in 0..10 {
+ break; // ok!
+ }
+}
+```
+"##,
+
+E0269: r##"
+Functions must eventually return a value of their return type. For example, in
+the following function
+
+```
+fn foo(x: u8) -> u8 {
+ if x > 0 {
+ x // alternatively, `return x`
+ }
+ // nothing here
+}
+```
+
+if the condition is true, the value `x` is returned, but if the condition is
+false, control exits the `if` block and reaches a place where nothing is being
+returned. All possible control paths must eventually return a `u8`, which is not
+happening here.
+
+An easy fix for this in a complicated function is to specify a default return
+value, if possible:
+
+```
+fn foo(x: u8) -> u8 {
+ if x > 0 {
+ x // alternatively, `return x`
+ }
+ // lots of other if branches
+ 0 // return 0 if all else fails
+}
+```
+
+It is advisable to find out what the unhandled cases are and check for them,
+returning an appropriate value or panicking if necessary.
+"##,
+
+E0270: r##"
+Rust lets you define functions which are known to never return, i.e. are
+'diverging', by marking its return type as `!`.
+
+For example, the following functions never return:
+
+```
+fn foo() -> ! {
+ loop {}
+}
+
+fn bar() -> ! {
+ foo() // foo() is diverging, so this will diverge too
+}
+
+fn baz() -> ! {
+ panic!(); // this macro internally expands to a call to a diverging function
+}
+
+```
+
+Such functions can be used in a place where a value is expected without
+returning a value of that type, for instance:
+
+```
+let y = match x {
+ 1 => 1,
+ 2 => 4,
+ _ => foo() // diverging function called here
+};
+println!("{}", y)
+```
+
+If the third arm of the match block is reached, since `foo()` doesn't ever
+return control to the match block, it is fine to use it in a place where an
+integer was expected. The `match` block will never finish executing, and any
+point where `y` (like the print statement) is needed will not be reached.
+
+However, if we had a diverging function that actually does finish execution
+
+```
+fn foo() -> {
+ loop {break;}
+}
+```
+
+then we would have an unknown value for `y` in the following code:
+
+```
+let y = match x {
+ 1 => 1,
+ 2 => 4,
+ _ => foo()
+};
+println!("{}", y);
+```
+
+In the previous example, the print statement was never reached when the wildcard
+match arm was hit, so we were okay with `foo()` not returning an integer that we
+could set to `y`. But in this example, `foo()` actually does return control, so
+the print statement will be executed with an uninitialized value.
+
+Obviously we cannot have functions which are allowed to be used in such
+positions and yet can return control. So, if you are defining a function that
+returns `!`, make sure that there is no way for it to actually finish executing.
"##,
E0271: r##"
```
"##,
+E0272: r##"
+The `#[rustc_on_unimplemented]` attribute lets you specify a custom error
+message for when a particular trait isn't implemented on a type placed in a
+position that needs that trait. For example, when the following code is
+compiled:
+
+```
+fn foo<T: Index<u8>>(x: T){}
+
+#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
+trait Index<Idx> { ... }
+
+foo(true); // `bool` does not implement `Index<u8>`
+```
+
+there will be an error about `bool` not implementing `Index<u8>`, followed by a
+note saying "the type `bool` cannot be indexed by `u8`".
+
+As you can see, you can specify type parameters in curly braces for substitution
+with the actual types (using the regular format string syntax) in a given
+situation. Furthermore, `{Self}` will substitute to the type (in this case,
+`bool`) that we tried to use.
+
+This error appears when the curly braces contain an identifier which doesn't
+match with any of the type parameters or the string `Self`. This might happen if
+you misspelled a type parameter, or if you intended to use literal curly braces.
+If it is the latter, escape the curly braces with a second curly brace of the
+same type; e.g. a literal `{` is `{{`
+"##,
+
+E0273: r##"
+The `#[rustc_on_unimplemented]` attribute lets you specify a custom error
+message for when a particular trait isn't implemented on a type placed in a
+position that needs that trait. For example, when the following code is
+compiled:
+
+```
+fn foo<T: Index<u8>>(x: T){}
+
+#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
+trait Index<Idx> { ... }
+
+foo(true); // `bool` does not implement `Index<u8>`
+```
+
+there will be an error about `bool` not implementing `Index<u8>`, followed by a
+note saying "the type `bool` cannot be indexed by `u8`".
+
+As you can see, you can specify type parameters in curly braces for substitution
+with the actual types (using the regular format string syntax) in a given
+situation. Furthermore, `{Self}` will substitute to the type (in this case,
+`bool`) that we tried to use.
+
+This error appears when the curly braces do not contain an identifier. Please
+add one of the same name as a type parameter. If you intended to use literal
+braces, use `{{` and `}}` to escape them.
+"##,
+
+E0274: r##"
+The `#[rustc_on_unimplemented]` attribute lets you specify a custom error
+message for when a particular trait isn't implemented on a type placed in a
+position that needs that trait. For example, when the following code is
+compiled:
+
+```
+fn foo<T: Index<u8>>(x: T){}
+
+#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
+trait Index<Idx> { ... }
+
+foo(true); // `bool` does not implement `Index<u8>`
+```
+
+there will be an error about `bool` not implementing `Index<u8>`, followed by a
+note saying "the type `bool` cannot be indexed by `u8`".
+
+For this to work, some note must be specified. An empty attribute will not do
+anything, please remove the attribute or add some helpful note for users of the
+trait.
+"##,
+
+E0275: r##"
+This error occurs when there was a recursive trait requirement that overflowed
+before it could be evaluated. Often this means that there is unbounded recursion
+in resolving some type bounds.
+
+For example, in the following code
+
+```
+trait Foo {}
+
+struct Bar<T>(T);
+
+impl<T> Foo for T where Bar<T>: Foo {}
+```
+
+to determine if a `T` is `Foo`, we need to check if `Bar<T>` is `Foo`. However,
+to do this check, we need to determine that `Bar<Bar<T>>` is `Foo`. To determine
+this, we check if `Bar<Bar<Bar<T>>>` is `Foo`, and so on. This is clearly a
+recursive requirement that can't be resolved directly.
+
+Consider changing your trait bounds so that they're less self-referential.
+"##,
+
+E0276: r##"
+This error occurs when a bound in an implementation of a trait does not match
+the bounds specified in the original trait. For example:
+
+```
+trait Foo {
+ fn foo<T>(x: T);
+}
+
+impl Foo for bool {
+ fn foo<T>(x: T) where T: Copy {}
+}
+```
+
+Here, all types implementing `Foo` must have a method `foo<T>(x: T)` which can
+take any type `T`. However, in the `impl` for `bool`, we have added an extra
+bound that `T` is `Copy`, which isn't compatible with the original trait.
+
+Consider removing the bound from the method or adding the bound to the original
+method definition in the trait.
+"##,
+
+E0277: r##"
+You tried to use a type which doesn't implement some trait in a place which
+expected that trait. Erroneous code example:
+
+```
+// here we declare the Foo trait with a bar method
+trait Foo {
+ fn bar(&self);
+}
+
+// we now declare a function which takes an object implementing the Foo trait
+fn some_func<T: Foo>(foo: T) {
+ foo.bar();
+}
+
+fn main() {
+ // we now call the method with the i32 type, which doesn't implement
+ // the Foo trait
+ some_func(5i32); // error: the trait `Foo` is not implemented for the
+ // type `i32`
+}
+```
+
+In order to fix this error, verify that the type you're using does implement
+the trait. Example:
+
+```
+trait Foo {
+ fn bar(&self);
+}
+
+fn some_func<T: Foo>(foo: T) {
+ foo.bar(); // we can now use this method since i32 implements the
+ // Foo trait
+}
+
+// we implement the trait on the i32 type
+impl Foo for i32 {
+ fn bar(&self) {}
+}
+
+fn main() {
+ some_func(5i32); // ok!
+}
+```
+"##,
+
E0282: r##"
This error indicates that type inference did not result in one unique possible
type, and extra information is required. In most cases this can be provided
restriction, but for now patterns must be rewritten without sub-bindings.
```
-// Code like this...
-match Some(5) {
- ref op_num @ Some(num) => ...
+// Before.
+match Some("hi".to_string()) {
+ ref op_string_ref @ Some(ref s) => ...
None => ...
}
// After.
match Some("hi".to_string()) {
Some(ref s) => {
- let op_string_ref = &Some(&s);
+ let op_string_ref = &Some(s);
...
}
None => ...
E0308: r##"
This error occurs when the compiler was unable to infer the concrete type of a
-variable. This error can occur for several cases, the most common of which is a
+variable. It can occur for several cases, the most common of which is a
mismatch in the expected type that the compiler inferred for a variable's
initializing expression, and the actual type explicitly assigned to the
variable.
E0394: r##"
From [RFC 246]:
- > It is illegal for a static to reference another static by value. It is
+ > It is invalid for a static to reference another static by value. It is
> required that all references be borrowed.
[RFC 246]: https://github.com/rust-lang/rfcs/pull/246
"##,
+E0395: r##"
+The value assigned to a constant expression must be known at compile time,
+which is not the case when comparing raw pointers. Erroneous code example:
+
+```
+static foo: i32 = 42;
+static bar: i32 = 43;
+
+static baz: bool = { (&foo as *const i32) == (&bar as *const i32) };
+// error: raw pointers cannot be compared in statics!
+```
+
+Please check that the result of the comparison can be determined at compile time
+or isn't assigned to a constant expression. Example:
+
+```
+static foo: i32 = 42;
+static bar: i32 = 43;
+
+let baz: bool = { (&foo as *const i32) == (&bar as *const i32) };
+// baz isn't a constant expression so it's ok
+```
+"##,
+
+E0396: r##"
+The value assigned to a constant expression must be known at compile time,
+which is not the case when dereferencing raw pointers. Erroneous code
+example:
+
+```
+const foo: i32 = 42;
+const baz: *const i32 = (&foo as *const i32);
+
+const deref: i32 = *baz;
+// error: raw pointers cannot be dereferenced in constants
+```
+
+To fix this error, please do not assign this value to a constant expression.
+Example:
+
+```
+const foo: i32 = 42;
+const baz: *const i32 = (&foo as *const i32);
+
+unsafe { let deref: i32 = *baz; }
+// baz isn't a constant expression so it's ok
+```
+
+You'll also note that this assignment must be done in an unsafe block!
+"##,
+
E0397: r##"
It is not allowed for a mutable static to allocate or have destructors. For
example:
register_diagnostics! {
- E0017,
- E0022,
- E0038,
- E0109,
- E0110,
- E0134,
- E0135,
- E0136,
- E0138,
- E0139,
+ // E0006 // merged with E0005
+// E0134,
+// E0135,
E0264, // unknown external lang item
- E0266, // expected item
- E0269, // not all control paths return a value
- E0270, // computation may converge in a function marked as diverging
- E0272, // rustc_on_unimplemented attribute refers to non-existent type parameter
- E0273, // rustc_on_unimplemented must have named format arguments
- E0274, // rustc_on_unimplemented must have a value
- E0275, // overflow evaluating requirement
- E0276, // requirement appears on impl method but not on corresponding trait method
- E0277, // trait is not implemented for type
E0278, // requirement is not satisfied
E0279, // requirement is not satisfied
E0280, // requirement is not satisfied
E0315, // cannot invoke closure outside of its lifetime
E0316, // nested quantification of lifetimes
E0370, // discriminant overflow
- E0395, // pointer comparison in const-expr
- E0396 // pointer dereference in const-expr
+ E0400 // overloaded derefs are not allowed in constants
}
#![feature(clone_from_slice)]
#![feature(collections)]
#![feature(const_fn)]
-#![feature(duration)]
#![feature(duration_span)]
#![feature(dynamic_lib)]
#![feature(enumset)]
#![feature(fs_canonicalize)]
-#![feature(hash_default)]
#![feature(hashmap_hasher)]
#![feature(into_cow)]
#![feature(iter_cmp)]
#![feature(iter_arith)]
#![feature(libc)]
-#![feature(map_in_place)]
#![feature(num_bits_bytes)]
#![feature(path_ext)]
#![feature(quote)]
#![feature(rustc_private)]
#![feature(scoped_tls)]
#![feature(slice_bytes)]
-#![feature(slice_extras)]
+#![feature(slice_splits)]
#![feature(slice_patterns)]
-#![feature(slice_position_elem)]
#![feature(staged_api)]
#![feature(str_char)]
#![feature(str_match_indices)]
#![feature(vec_push_all)]
#![feature(wrapping)]
+#![feature(cell_extras)]
+#![feature(page_size)]
#![cfg_attr(test, feature(test))]
#![allow(trivial_casts)]
pub mod back {
pub use rustc_back::abi;
- pub use rustc_back::archive;
- pub use rustc_back::arm;
- pub use rustc_back::mips;
- pub use rustc_back::mipsel;
pub use rustc_back::rpath;
pub use rustc_back::svh;
- pub use rustc_back::target_strs;
- pub use rustc_back::x86;
- pub use rustc_back::x86_64;
}
pub mod ast_map;
use util::nodemap::FnvHashMap;
use std::cell::RefCell;
+use std::cmp;
use std::mem;
use syntax::ast_util::IdVisitingOperation;
use syntax::attr::AttrMetaMethods;
/// Map of registered lint groups to what lints they expand to. The bool
/// is true if the lint group was added by a plugin.
lint_groups: FnvHashMap<&'static str, (Vec<LintId>, bool)>,
+
+ /// Maximum level a lint can be
+ lint_cap: Option<Level>,
}
/// The targed of the `by_name` map, which accounts for renaming/deprecation.
/// Temporary renaming, used for easing migration pain; see #16545
Renamed(String, LintId),
+
+ /// Lint with this name existed previously, but has been removed/deprecated.
+ /// The string argument is the reason for removal.
+ Removed(String),
+}
+
+enum FindLintError {
+ NotFound,
+ Removed
}
impl LintStore {
}
}
- fn set_level(&mut self, lint: LintId, lvlsrc: LevelSource) {
+ fn set_level(&mut self, lint: LintId, mut lvlsrc: LevelSource) {
+ if let Some(cap) = self.lint_cap {
+ lvlsrc.0 = cmp::min(lvlsrc.0, cap);
+ }
if lvlsrc.0 == Allow {
self.levels.remove(&lint);
} else {
by_name: FnvHashMap(),
levels: FnvHashMap(),
lint_groups: FnvHashMap(),
+ lint_cap: None,
}
}
self.by_name.insert(old_name.to_string(), Renamed(new_name.to_string(), target));
}
+ pub fn register_removed(&mut self, name: &str, reason: &str) {
+ self.by_name.insert(name.into(), Removed(reason.into()));
+ }
+
#[allow(unused_variables)]
fn find_lint(&self, lint_name: &str, sess: &Session, span: Option<Span>)
- -> Option<LintId>
+ -> Result<LintId, FindLintError>
{
match self.by_name.get(lint_name) {
- Some(&Id(lint_id)) => Some(lint_id),
+ Some(&Id(lint_id)) => Ok(lint_id),
Some(&Renamed(ref new_name, lint_id)) => {
let warning = format!("lint {} has been renamed to {}",
lint_name, new_name);
Some(span) => sess.span_warn(span, &warning[..]),
None => sess.warn(&warning[..]),
};
- Some(lint_id)
- }
- None => None
+ Ok(lint_id)
+ },
+ Some(&Removed(ref reason)) => {
+ let warning = format!("lint {} has been removed: {}", lint_name, reason);
+ match span {
+ Some(span) => sess.span_warn(span, &warning[..]),
+ None => sess.warn(&warning[..])
+ }
+ Err(FindLintError::Removed)
+ },
+ None => Err(FindLintError::NotFound)
}
}
pub fn process_command_line(&mut self, sess: &Session) {
for &(ref lint_name, level) in &sess.opts.lint_opts {
match self.find_lint(&lint_name[..], sess, None) {
- Some(lint_id) => self.set_level(lint_id, (level, CommandLine)),
- None => {
+ Ok(lint_id) => self.set_level(lint_id, (level, CommandLine)),
+ Err(_) => {
match self.lint_groups.iter().map(|(&x, pair)| (x, pair.0.clone()))
.collect::<FnvHashMap<&'static str,
Vec<LintId>>>()
}
}
}
+
+ self.lint_cap = sess.opts.lint_cap;
+ if let Some(cap) = self.lint_cap {
+ for level in self.levels.iter_mut().map(|p| &mut (p.1).0) {
+ *level = cmp::min(*level, cap);
+ }
+ }
}
}
}
Ok((lint_name, level, span)) => {
match self.lints.find_lint(&lint_name, &self.tcx.sess, Some(span)) {
- Some(lint_id) => vec![(lint_id, level, span)],
- None => {
+ Ok(lint_id) => vec![(lint_id, level, span)],
+ Err(FindLintError::NotFound) => {
match self.lints.lint_groups.get(&lint_name[..]) {
Some(&(ref v, _)) => v.iter()
.map(|lint_id: &LintId|
continue;
}
}
- }
+ },
+ Err(FindLintError::Removed) => { continue; }
}
}
};
use syntax::codemap::{self, Span, mk_sp, Pos};
use syntax::parse;
use syntax::parse::token::InternedString;
-use syntax::parse::token;
use syntax::visit;
use log;
fn extract_crate_info(&self, i: &ast::Item) -> Option<CrateInfo> {
match i.node {
ast::ItemExternCrate(ref path_opt) => {
- let ident = token::get_ident(i.ident);
debug!("resolving extern crate stmt. ident: {} path_opt: {:?}",
- ident, path_opt);
+ i.ident, path_opt);
let name = match *path_opt {
Some(name) => {
- validate_crate_name(Some(self.sess), name.as_str(),
+ validate_crate_name(Some(self.sess), &name.as_str(),
Some(i.span));
- name.as_str().to_string()
+ name.to_string()
}
- None => ident.to_string(),
+ None => i.ident.to_string(),
};
Some(CrateInfo {
- ident: ident.to_string(),
+ ident: i.ident.to_string(),
name: name,
id: i.id,
should_link: should_link(i),
// `CodeMap::new_imported_filemap()` will then translate those
// coordinates to their new global frame of reference when the
// offset of the FileMap is known.
- let lines = lines.into_inner().map_in_place(|pos| pos - start_pos);
- let multibyte_chars = multibyte_chars
- .into_inner()
- .map_in_place(|mbc|
- codemap::MultiByteChar {
- pos: mbc.pos - start_pos,
- bytes: mbc.bytes
- });
+ let mut lines = lines.into_inner();
+ for pos in &mut lines {
+ *pos = *pos - start_pos;
+ }
+ let mut multibyte_chars = multibyte_chars.into_inner();
+ for mbc in &mut multibyte_chars {
+ mbc.pos = mbc.pos - start_pos;
+ }
let local_version = local_codemap.new_imported_filemap(name,
source_length,
pub fn get_struct_fields(cstore: &cstore::CStore,
def: ast::DefId)
- -> Vec<ty::field_ty> {
+ -> Vec<ty::FieldTy> {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_struct_fields(cstore.intr.clone(), &*cdata, def.node)
}
}))
.collect::<Vec<_>>();
libs.sort_by(|&(a, _), &(b, _)| {
- ordering.position_elem(&a).cmp(&ordering.position_elem(&b))
+ let a = ordering.iter().position(|x| *x == a);
+ let b = ordering.iter().position(|x| *x == b);
+ a.cmp(&b)
});
libs
}
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
-use std::hash::{self, Hash, SipHasher};
+use std::hash::{Hash, SipHasher, Hasher};
use std::io::prelude::*;
use std::io;
use std::rc::Rc;
fn eq_item(bytes: &[u8], item_id: ast::NodeId) -> bool {
u32_from_be_bytes(bytes) == item_id
}
- lookup_hash(items,
- |a| eq_item(a, item_id),
- hash::hash::<i64, SipHasher>(&(item_id as i64)))
+ let mut s = SipHasher::new_with_keys(0, 0);
+ (item_id as i64).hash(&mut s);
+ lookup_hash(items, |a| eq_item(a, item_id), s.finish())
}
fn find_item<'a>(item_id: ast::NodeId, items: rbml::Doc<'a>) -> rbml::Doc<'a> {
Constant => {
// Check whether we have an associated const item.
if item_sort(item) == Some('C') {
- // Check whether the associated const is from a trait or impl.
- // See the comment for methods below.
- let provenance = if reader::maybe_get_doc(
- item, tag_item_trait_parent_sort).is_some() {
- def::FromTrait(item_require_parent_item(cdata, item))
- } else {
- def::FromImpl(item_require_parent_item(cdata, item))
- };
- DlDef(def::DefAssociatedConst(did, provenance))
+ DlDef(def::DefAssociatedConst(did))
} else {
// Regular const item.
DlDef(def::DefConst(did))
Fn => DlDef(def::DefFn(did, false)),
CtorFn => DlDef(def::DefFn(did, true)),
Method | StaticMethod => {
- // def_static_method carries an optional field of its enclosing
- // trait or enclosing impl (if this is an inherent static method).
- // So we need to detect whether this is in a trait or not, which
- // we do through the mildly hacky way of checking whether there is
- // a trait_parent_sort.
- let provenance = if reader::maybe_get_doc(
- item, tag_item_trait_parent_sort).is_some() {
- def::FromTrait(item_require_parent_item(cdata, item))
- } else {
- def::FromImpl(item_require_parent_item(cdata, item))
- };
- DlDef(def::DefMethod(did, provenance))
+ DlDef(def::DefMethod(did))
}
Type => {
if item_sort(item) == Some('t') {
-> csearch::FoundAst<'tcx> {
debug!("Looking up item: {}", id);
let item_doc = lookup_item(id, cdata.data());
- let path = item_path(item_doc).init().to_vec();
+ let path = item_path(item_doc).split_last().unwrap().1.to_vec();
match decode_inlined_item(cdata, tcx, path, item_doc) {
Ok(ii) => csearch::FoundAst::Found(ii),
Err(path) => {
}
pub fn get_struct_fields(intr: Rc<IdentInterner>, cdata: Cmd, id: ast::NodeId)
- -> Vec<ty::field_ty> {
+ -> Vec<ty::FieldTy> {
let data = cdata.data();
let item = lookup_item(id, data);
reader::tagged_docs(item, tag_item_field).filter_map(|an_item| {
let did = item_def_id(an_item, cdata);
let tagdoc = reader::get_doc(an_item, tag_item_field_origin);
let origin_id = translated_def_id(cdata, tagdoc);
- Some(ty::field_ty {
+ Some(ty::FieldTy {
name: name,
id: did,
vis: struct_field_family_to_visibility(f),
let tagdoc = reader::get_doc(an_item, tag_item_field_origin);
let f = item_family(an_item);
let origin_id = translated_def_id(cdata, tagdoc);
- ty::field_ty {
+ ty::FieldTy {
name: special_idents::unnamed_field.name,
id: did,
vis: struct_field_family_to_visibility(f),
use metadata::decoder;
use metadata::tyencode;
use middle::def;
-use middle::ty::lookup_item_type;
use middle::ty::{self, Ty};
use middle::stability;
use util::nodemap::{FnvHashMap, NodeMap, NodeSet};
use syntax::attr::AttrMetaMethods;
use syntax::diagnostic::SpanHandler;
use syntax::parse::token::special_idents;
-use syntax::parse::token;
use syntax::print::pprust;
use syntax::ptr::P;
use syntax::visit::Visitor;
}
fn encode_name(rbml_w: &mut Encoder, name: ast::Name) {
- rbml_w.wr_tagged_str(tag_paths_data_name, &token::get_name(name));
+ rbml_w.wr_tagged_str(tag_paths_data_name, &name.as_str());
}
fn encode_impl_type_basename(rbml_w: &mut Encoder, name: ast::Name) {
- rbml_w.wr_tagged_str(tag_item_impl_type_basename, &token::get_name(name));
+ rbml_w.wr_tagged_str(tag_item_impl_type_basename, &name.as_str());
}
fn encode_def_id(rbml_w: &mut Encoder, id: DefId) {
fn encode_item_variances(rbml_w: &mut Encoder,
ecx: &EncodeContext,
id: NodeId) {
- let v = ty::item_variances(ecx.tcx, ast_util::local_def(id));
+ let v = ecx.tcx.item_variances(ast_util::local_def(id));
rbml_w.start_tag(tag_item_variances);
v.encode(rbml_w);
rbml_w.end_tag();
id: ast::NodeId) {
encode_bounds_and_type(rbml_w,
ecx,
- &ty::lookup_item_type(ecx.tcx, local_def(id)),
- &ty::lookup_predicates(ecx.tcx, local_def(id)));
+ &ecx.tcx.lookup_item_type(local_def(id)),
+ &ecx.tcx.lookup_predicates(local_def(id)));
}
fn encode_bounds_and_type<'a, 'tcx>(rbml_w: &mut Encoder,
}
fn encode_struct_fields(rbml_w: &mut Encoder,
- fields: &[ty::field_ty],
+ fields: &[ty::FieldTy],
origin: DefId) {
for f in fields {
if f.name == special_idents::unnamed_field.name {
let mut disr_val = 0;
let mut i = 0;
- let vi = ty::enum_variants(ecx.tcx,
- DefId { krate: ast::LOCAL_CRATE, node: id });
+ let vi = ecx.tcx.enum_variants(local_def(id));
for variant in variants {
let def_id = local_def(variant.node.id);
index.push(entry {
match variant.node.kind {
ast::TupleVariantKind(_) => {},
ast::StructVariantKind(_) => {
- let fields = ty::lookup_struct_fields(ecx.tcx, def_id);
+ let fields = ecx.tcx.lookup_struct_fields(def_id);
let idx = encode_info_for_struct(ecx,
rbml_w,
&fields[..],
encode_index(rbml_w, idx, write_i64);
}
}
- if (*vi)[i].disr_val != disr_val {
- encode_disr_val(ecx, rbml_w, (*vi)[i].disr_val);
- disr_val = (*vi)[i].disr_val;
+ let specified_disr_val = vi[i].disr_val;
+ if specified_disr_val != disr_val {
+ encode_disr_val(ecx, rbml_w, specified_disr_val);
+ disr_val = specified_disr_val;
}
encode_bounds_and_type_for_item(rbml_w, ecx, def_id.local_id());
ast_map::PathMod(_) => tag_path_elem_mod,
ast_map::PathName(_) => tag_path_elem_name
};
- rbml_w.wr_tagged_str(tag, &token::get_name(pe.name()));
+ rbml_w.wr_tagged_str(tag, &pe.name().as_str());
}
rbml_w.end_tag();
}
method_def_id: DefId,
method_name: ast::Name) {
debug!("(encode reexported static method) {}::{}",
- exp.name, token::get_name(method_name));
+ exp.name, method_name);
rbml_w.start_tag(tag_items_data_item_reexport);
rbml_w.wr_tagged_u64(tag_items_data_item_reexport_def_id,
def_to_u64(method_def_id));
rbml_w.wr_tagged_str(tag_items_data_item_reexport_name,
&format!("{}::{}", exp.name,
- token::get_name(method_name)));
+ method_name));
rbml_w.end_tag();
}
Some(implementations) => {
for base_impl_did in implementations.iter() {
for &method_did in impl_items.get(base_impl_did).unwrap() {
- let impl_item = ty::impl_or_trait_item(
- ecx.tcx,
- method_did.def_id());
+ let impl_item = ecx.tcx.impl_or_trait_item(method_did.def_id());
if let ty::MethodTraitItem(ref m) = impl_item {
encode_reexported_static_method(rbml_w,
exp,
rbml_w.wr_tagged_u64(tag_items_data_item_reexport_def_id,
def_to_u64(exp.def_id));
rbml_w.wr_tagged_str(tag_items_data_item_reexport_name,
- exp.name.as_str());
+ &exp.name.as_str());
rbml_w.end_tag();
encode_reexported_static_methods(ecx, rbml_w, path.clone(), exp);
}
- }
- None => {
- debug!("(encoding info for module) found no reexports for {}",
- id);
- }
+ },
+ None => debug!("(encoding info for module) found no reexports for {}", id),
}
}
if let ast::ItemImpl(..) = item.node {
let (ident, did) = (item.ident, item.id);
debug!("(encoding info for module) ... encoding impl {} ({}/{})",
- token::get_ident(ident),
+ ident,
did, ecx.tcx.map.node_to_string(did));
rbml_w.wr_tagged_u64(tag_mod_impl, def_to_u64(local_def(did)));
/* Returns an index of items in this class */
fn encode_info_for_struct(ecx: &EncodeContext,
rbml_w: &mut Encoder,
- fields: &[ty::field_ty],
+ fields: &[ty::FieldTy],
global_index: &mut Vec<entry<i64>>)
-> Vec<entry<i64>> {
/* Each class has its own index, since different classes
});
rbml_w.start_tag(tag_items_data_item);
debug!("encode_info_for_struct: doing {} {}",
- token::get_name(nm), id);
+ nm, id);
encode_struct_field_family(rbml_w, field.vis);
encode_name(rbml_w, nm);
encode_bounds_and_type_for_item(rbml_w, ecx, id);
impl_item_opt: Option<&ast::ImplItem>) {
debug!("encode_info_for_associated_const({:?},{:?})",
associated_const.def_id,
- token::get_name(associated_const.name));
+ associated_const.name);
rbml_w.start_tag(tag_items_data_item);
impl_item_opt: Option<&ast::ImplItem>) {
debug!("encode_info_for_method: {:?} {:?}", m.def_id,
- token::get_name(m.name));
+ m.name);
rbml_w.start_tag(tag_items_data_item);
encode_method_ty_fields(ecx, rbml_w, m);
if let Some(impl_item) = impl_item_opt {
if let ast::MethodImplItem(ref sig, _) = impl_item.node {
encode_attributes(rbml_w, &impl_item.attrs);
- let scheme = ty::lookup_item_type(ecx.tcx, m.def_id);
+ let scheme = ecx.tcx.lookup_item_type(m.def_id);
let any_types = !scheme.generics.types.is_empty();
let needs_inline = any_types || is_default_impl ||
attr::requests_inline(&impl_item.attrs);
impl_item_opt: Option<&ast::ImplItem>) {
debug!("encode_info_for_associated_type({:?},{:?})",
associated_type.def_id,
- token::get_name(associated_type.name));
+ associated_type.name);
rbml_w.start_tag(tag_items_data_item);
encode_attributes(rbml_w, &ii.attrs);
} else {
encode_predicates(rbml_w, ecx,
- &ty::lookup_predicates(ecx.tcx, associated_type.def_id),
+ &ecx.tcx.lookup_predicates(associated_type.def_id),
tag_item_generics);
}
for arg in &decl.inputs {
let tag = tag_method_argument_name;
if let ast::PatIdent(_, ref path1, _) = arg.pat.node {
- let name = token::get_name(path1.node.name);
+ let name = path1.node.name.as_str();
rbml_w.wr_tagged_bytes(tag, name.as_bytes());
} else {
rbml_w.wr_tagged_bytes(tag, &[]);
rbml_w: &mut Encoder,
trait_def_id: DefId) {
assert!(ast_util::is_local(trait_def_id));
- let def = ty::lookup_trait_def(ecx.tcx, trait_def_id);
+ let def = ecx.tcx.lookup_trait_def(trait_def_id);
def.for_each_impl(ecx.tcx, |impl_def_id| {
rbml_w.start_tag(tag_items_data_item_extension_impl);
index);
}
ast::ItemStruct(ref struct_def, _) => {
- let fields = ty::lookup_struct_fields(tcx, def_id);
+ let fields = tcx.lookup_struct_fields(def_id);
/* First, encode the fields
These come first because we need to write them to make
encode_name(rbml_w, item.ident.name);
encode_unsafety(rbml_w, unsafety);
- let trait_ref = ty::impl_trait_ref(tcx, local_def(item.id)).unwrap();
+ let trait_ref = tcx.impl_trait_ref(local_def(item.id)).unwrap();
encode_trait_ref(rbml_w, ecx, trait_ref, tag_item_trait_ref);
rbml_w.end_tag();
}
}
rbml_w.end_tag();
}
- if let Some(trait_ref) = ty::impl_trait_ref(tcx, local_def(item.id)) {
+ if let Some(trait_ref) = tcx.impl_trait_ref(local_def(item.id)) {
encode_trait_ref(rbml_w, ecx, trait_ref, tag_item_trait_ref);
}
encode_path(rbml_w, path.clone());
pos: rbml_w.mark_stable_position(),
});
- match ty::impl_or_trait_item(tcx, trait_item_def_id.def_id()) {
+ match tcx.impl_or_trait_item(trait_item_def_id.def_id()) {
ty::ConstTraitItem(ref associated_const) => {
encode_info_for_associated_const(ecx,
rbml_w,
encode_def_id(rbml_w, def_id);
encode_family(rbml_w, 'I');
encode_item_variances(rbml_w, ecx, item.id);
- let trait_def = ty::lookup_trait_def(tcx, def_id);
- let trait_predicates = ty::lookup_predicates(tcx, def_id);
+ let trait_def = tcx.lookup_trait_def(def_id);
+ let trait_predicates = tcx.lookup_predicates(def_id);
encode_unsafety(rbml_w, trait_def.unsafety);
encode_paren_sugar(rbml_w, trait_def.paren_sugar);
- encode_defaulted(rbml_w, ty::trait_has_default_impl(tcx, def_id));
+ encode_defaulted(rbml_w, tcx.trait_has_default_impl(def_id));
encode_associated_type_names(rbml_w, &trait_def.associated_type_names);
encode_generics(rbml_w, ecx, &trait_def.generics, &trait_predicates,
tag_item_generics);
- encode_predicates(rbml_w, ecx, &ty::lookup_super_predicates(tcx, def_id),
+ encode_predicates(rbml_w, ecx, &tcx.lookup_super_predicates(def_id),
tag_item_super_predicates);
encode_trait_ref(rbml_w, ecx, trait_def.trait_ref, tag_item_trait_ref);
encode_name(rbml_w, item.ident.name);
encode_attributes(rbml_w, &item.attrs);
encode_visibility(rbml_w, vis);
encode_stability(rbml_w, stab);
- for &method_def_id in ty::trait_item_def_ids(tcx, def_id).iter() {
+ for &method_def_id in tcx.trait_item_def_ids(def_id).iter() {
rbml_w.start_tag(tag_item_trait_item);
match method_def_id {
ty::ConstTraitItemId(const_def_id) => {
rbml_w.end_tag();
// Now output the trait item info for each trait item.
- let r = ty::trait_item_def_ids(tcx, def_id);
+ let r = tcx.trait_item_def_ids(def_id);
for (i, &item_def_id) in r.iter().enumerate() {
assert_eq!(item_def_id.def_id().krate, ast::LOCAL_CRATE);
encode_stability(rbml_w, stab);
let trait_item_type =
- ty::impl_or_trait_item(tcx, item_def_id.def_id());
+ tcx.impl_or_trait_item(item_def_id.def_id());
let is_nonstatic_method;
match trait_item_type {
ty::ConstTraitItem(associated_const) => {
encode_item_sort(rbml_w, 't');
encode_family(rbml_w, 'y');
+ if let Some(ty) = associated_type.ty {
+ encode_type(ecx, rbml_w, ty);
+ }
+
is_nonstatic_method = false;
}
}
index: &mut Vec<entry<i64>>) {
debug!("writing foreign item {}::{}",
ecx.tcx.map.path_to_string(ni.id),
- token::get_ident(ni.ident));
+ ni.ident);
let abi = ecx.tcx.map.get_foreign_abi(ni.id);
ecx.tcx.map.with_path(ni.id, |path| {
fn encode_associated_type_names(rbml_w: &mut Encoder, names: &[ast::Name]) {
rbml_w.start_tag(tag_associated_type_names);
for &name in names {
- rbml_w.wr_tagged_str(tag_associated_type_name, &token::get_name(name));
+ rbml_w.wr_tagged_str(tag_associated_type_name, &name.as_str());
}
rbml_w.end_tag();
}
cstore::RequireStatic => "s",
})).to_string())
}).collect::<Vec<String>>();
- rbml_w.wr_tagged_str(tag, &s.connect(","));
+ rbml_w.wr_tagged_str(tag, &s.join(","));
}
None => {
rbml_w.wr_tagged_str(tag, "");
let mut rbml_w = Encoder::new(wr);
encode_crate_name(&mut rbml_w, &ecx.link_meta.crate_name);
- encode_crate_triple(&mut rbml_w,
- &tcx.sess
- .opts
- .target_triple
- );
+ encode_crate_triple(&mut rbml_w, &tcx.sess.opts.target_triple);
encode_hash(&mut rbml_w, &ecx.link_meta.crate_hash);
encode_dylib_dependency_formats(&mut rbml_w, &ecx);
//! no means all of the necessary details. Take a look at the rest of
//! metadata::loader or metadata::creader for all the juicy details!
-use back::archive::METADATA_FILENAME;
use back::svh::Svh;
use session::Session;
use session::search_paths::PathKind;
pub rlib: Option<PathBuf>
}
+pub const METADATA_FILENAME: &'static str = "rust.metadata.bin";
+
impl CratePaths {
fn paths(&self) -> Vec<PathBuf> {
match (&self.dylib, &self.rlib) {
let dur = Duration::span(|| {
ret = Some(get_metadata_section_imp(target, filename));
});
- info!("reading {:?} => {}", filename.file_name().unwrap(), dur);
+ info!("reading {:?} => {:?}", filename.file_name().unwrap(), dur);
return ret.unwrap();;
}
let mut seen = HashSet::new();
for mut def in macros {
- let name = token::get_ident(def.ident);
- seen.insert(name.clone());
+ let name = def.ident.name.as_str();
def.use_locally = match import.as_ref() {
None => true,
"allow_internal_unstable");
debug!("load_macros: loaded: {:?}", def);
self.macros.push(def);
+ seen.insert(name);
}
if let Some(sel) = import.as_ref() {
for (name, span) in sel {
- if !seen.contains(name) {
+ if !seen.contains(&name) {
self.sess.span_err(*span, "imported macro not found");
}
}
}
for (name, span) in &reexport {
- if !seen.contains(name) {
+ if !seen.contains(&name) {
self.sess.span_err(*span, "reexported macro not found");
}
}
use middle::region;
use middle::subst;
use middle::subst::VecPerParamSpace;
-use middle::ty::{self, AsPredicate, Ty};
+use middle::ty::{self, ToPredicate, Ty, HasTypeFlags};
use std::str;
use syntax::abi;
let def = parse_def_(st, NominalType, conv);
let substs = parse_substs_(st, conv);
assert_eq!(next(st), ']');
- return ty::mk_enum(tcx, def, st.tcx.mk_substs(substs));
+ return tcx.mk_enum(def, st.tcx.mk_substs(substs));
}
'x' => {
assert_eq!(next(st), '[');
let trait_ref = ty::Binder(parse_trait_ref_(st, conv));
let bounds = parse_existential_bounds_(st, conv);
assert_eq!(next(st), ']');
- return ty::mk_trait(tcx, trait_ref, bounds);
+ return tcx.mk_trait(trait_ref, bounds);
}
'p' => {
assert_eq!(next(st), '[');
let space = parse_param_space(st);
assert_eq!(next(st), '|');
let name = token::intern(&parse_str(st, ']'));
- return ty::mk_param(tcx, space, index, name);
+ return tcx.mk_param(space, index, name);
}
- '~' => return ty::mk_uniq(tcx, parse_ty_(st, conv)),
- '*' => return ty::mk_ptr(tcx, parse_mt_(st, conv)),
+ '~' => return tcx.mk_box(parse_ty_(st, conv)),
+ '*' => return tcx.mk_ptr(parse_mt_(st, conv)),
'&' => {
let r = parse_region_(st, conv);
let mt = parse_mt_(st, conv);
- return ty::mk_rptr(tcx, tcx.mk_region(r), mt);
+ return tcx.mk_ref(tcx.mk_region(r), mt);
}
'V' => {
let t = parse_ty_(st, conv);
- let sz = parse_size(st);
- return ty::mk_vec(tcx, t, sz);
+ return match parse_size(st) {
+ Some(n) => tcx.mk_array(t, n),
+ None => tcx.mk_slice(t)
+ };
}
'v' => {
- return ty::mk_str(tcx);
+ return tcx.mk_str();
}
'T' => {
assert_eq!(next(st), '[');
let mut params = Vec::new();
while peek(st) != ']' { params.push(parse_ty_(st, conv)); }
st.pos = st.pos + 1;
- return ty::mk_tup(tcx, params);
+ return tcx.mk_tup(params);
}
'F' => {
let def_id = parse_def_(st, NominalType, conv);
- return ty::mk_bare_fn(tcx, Some(def_id),
- tcx.mk_bare_fn(parse_bare_fn_ty_(st, conv)));
+ return tcx.mk_fn(Some(def_id), tcx.mk_bare_fn(parse_bare_fn_ty_(st, conv)));
}
'G' => {
- return ty::mk_bare_fn(tcx, None,
- tcx.mk_bare_fn(parse_bare_fn_ty_(st, conv)));
+ return tcx.mk_fn(None, tcx.mk_bare_fn(parse_bare_fn_ty_(st, conv)));
}
'#' => {
let pos = parse_hex(st);
assert_eq!(next(st), ':');
let len = parse_hex(st);
assert_eq!(next(st), '#');
- let key = ty::creader_cache_key {cnum: st.krate,
+ let key = ty::CReaderCacheKey {cnum: st.krate,
pos: pos,
len: len };
// If there is a closure buried in the type some where, then we
// need to re-convert any def ids (see case 'k', below). That means
// we can't reuse the cached version.
- if !ty::type_has_ty_closure(tt) {
+ if !tt.has_closure_types() {
return tt;
}
}
let did = parse_def_(st, NominalType, conv);
let substs = parse_substs_(st, conv);
assert_eq!(next(st), ']');
- return ty::mk_struct(st.tcx, did, st.tcx.mk_substs(substs));
+ return st.tcx.mk_struct(did, st.tcx.mk_substs(substs));
}
'k' => {
assert_eq!(next(st), '[');
let did = parse_def_(st, ClosureSource, conv);
let substs = parse_substs_(st, conv);
+ let mut tys = vec![];
+ while peek(st) != '.' {
+ tys.push(parse_ty_(st, conv));
+ }
+ assert_eq!(next(st), '.');
assert_eq!(next(st), ']');
- return ty::mk_closure(st.tcx, did, st.tcx.mk_substs(substs));
+ return st.tcx.mk_closure(did, st.tcx.mk_substs(substs), tys);
}
'P' => {
assert_eq!(next(st), '[');
let trait_ref = parse_trait_ref_(st, conv);
let name = token::intern(&parse_str(st, ']'));
- return ty::mk_projection(tcx, trait_ref, name);
+ return tcx.mk_projection(trait_ref, name);
}
'e' => {
return tcx.types.err;
}
}
-fn parse_mt_<'a, 'tcx, F>(st: &mut PState<'a, 'tcx>, conv: &mut F) -> ty::mt<'tcx> where
+fn parse_mt_<'a, 'tcx, F>(st: &mut PState<'a, 'tcx>, conv: &mut F) -> ty::TypeAndMut<'tcx> where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,
{
let m = parse_mutability(st);
- ty::mt { ty: parse_ty_(st, conv), mutbl: m }
+ ty::TypeAndMut { ty: parse_ty_(st, conv), mutbl: m }
}
fn parse_def_<F>(st: &mut PState, source: DefIdSource, conv: &mut F) -> ast::DefId where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,
{
match next(st) {
- 't' => ty::Binder(parse_trait_ref_(st, conv)).as_predicate(),
+ 't' => ty::Binder(parse_trait_ref_(st, conv)).to_predicate(),
'e' => ty::Binder(ty::EquatePredicate(parse_ty_(st, conv),
- parse_ty_(st, conv))).as_predicate(),
+ parse_ty_(st, conv))).to_predicate(),
'r' => ty::Binder(ty::OutlivesPredicate(parse_region_(st, conv),
- parse_region_(st, conv))).as_predicate(),
+ parse_region_(st, conv))).to_predicate(),
'o' => ty::Binder(ty::OutlivesPredicate(parse_ty_(st, conv),
- parse_region_(st, conv))).as_predicate(),
- 'p' => ty::Binder(parse_projection_predicate_(st, conv)).as_predicate(),
+ parse_region_(st, conv))).to_predicate(),
+ 'p' => ty::Binder(parse_projection_predicate_(st, conv)).to_predicate(),
c => panic!("Encountered invalid character in metadata: {}", c)
}
}
assert_eq!(next(st), '|');
let index = parse_u32(st);
assert_eq!(next(st), '|');
+ let default_def_id = parse_def_(st, NominalType, conv);
let default = parse_opt(st, |st| parse_ty_(st, conv));
let object_lifetime_default = parse_object_lifetime_default(st, conv);
def_id: def_id,
space: space,
index: index,
+ default_def_id: default_def_id,
default: default,
object_lifetime_default: object_lifetime_default,
}
}
}
- let region_bound_will_change = match next(st) {
- 'y' => true,
- 'n' => false,
- c => panic!("parse_ty: expected y/n not '{}'", c)
- };
-
return ty::ExistentialBounds { region_bound: region_bound,
builtin_bounds: builtin_bounds,
- projection_bounds: projection_bounds,
- region_bound_will_change: region_bound_will_change };
+ projection_bounds: projection_bounds };
}
fn parse_builtin_bounds<F>(st: &mut PState, mut _conv: F) -> ty::BuiltinBounds where
use syntax::abi::Abi;
use syntax::ast;
use syntax::diagnostic::SpanHandler;
-use syntax::parse::token;
use rbml::writer::Encoder;
cx.diag.handler().bug("cannot encode inference variable types");
}
ty::TyParam(ParamTy {space, idx, name}) => {
- mywrite!(w, "p[{}|{}|{}]", idx, space.to_uint(), token::get_name(name))
+ mywrite!(w, "p[{}|{}|{}]", idx, space.to_uint(), name)
}
ty::TyStruct(def, substs) => {
mywrite!(w, "a[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
- ty::TyClosure(def, substs) => {
+ ty::TyClosure(def, ref substs) => {
mywrite!(w, "k[{}|", (cx.ds)(def));
- enc_substs(w, cx, substs);
+ enc_substs(w, cx, &substs.func_substs);
+ for ty in &substs.upvar_tys {
+ enc_ty(w, cx, ty);
+ }
+ mywrite!(w, ".");
mywrite!(w, "]");
}
ty::TyProjection(ref data) => {
mywrite!(w, "P[");
enc_trait_ref(w, cx, data.trait_ref);
- mywrite!(w, "{}]", token::get_name(data.item_name));
+ mywrite!(w, "{}]", data.item_name);
}
ty::TyError => {
mywrite!(w, "e");
}
fn enc_mt<'a, 'tcx>(w: &mut Encoder, cx: &ctxt<'a, 'tcx>,
- mt: ty::mt<'tcx>) {
+ mt: ty::TypeAndMut<'tcx>) {
enc_mutability(w, mt.mutbl);
enc_ty(w, cx, mt.ty);
}
data.param_id,
data.space.to_uint(),
data.index,
- token::get_name(data.name));
+ data.name);
}
ty::ReFree(ref fr) => {
mywrite!(w, "f[");
ty::BrNamed(d, name) => {
mywrite!(w, "[{}|{}]",
(cx.ds)(d),
- token::get_name(name));
+ name);
}
ty::BrFresh(id) => {
mywrite!(w, "f{}|", id);
}
mywrite!(w, ".");
-
- mywrite!(w, "{}", if bs.region_bound_will_change {'y'} else {'n'});
}
pub fn enc_region_bounds<'a, 'tcx>(w: &mut Encoder,
pub fn enc_type_param_def<'a, 'tcx>(w: &mut Encoder, cx: &ctxt<'a, 'tcx>,
v: &ty::TypeParameterDef<'tcx>) {
- mywrite!(w, "{}:{}|{}|{}|",
- token::get_name(v.name), (cx.ds)(v.def_id),
- v.space.to_uint(), v.index);
+ mywrite!(w, "{}:{}|{}|{}|{}|",
+ v.name, (cx.ds)(v.def_id),
+ v.space.to_uint(), v.index, (cx.ds)(v.default_def_id));
enc_opt(w, v.default, |w, t| enc_ty(w, cx, t));
enc_object_lifetime_default(w, cx, v.object_lifetime_default);
}
cx: &ctxt<'a, 'tcx>,
data: &ty::ProjectionPredicate<'tcx>) {
enc_trait_ref(w, cx, data.projection_ty.trait_ref);
- mywrite!(w, "{}|", token::get_name(data.projection_ty.item_name));
+ mywrite!(w, "{}|", data.projection_ty.item_name);
enc_ty(w, cx, data.ty);
}
match nty {
ast::TyBool => tcx.types.bool,
ast::TyChar => tcx.types.char,
- ast::TyInt(it) => ty::mk_mach_int(tcx, it),
- ast::TyUint(uit) => ty::mk_mach_uint(tcx, uit),
- ast::TyFloat(ft) => ty::mk_mach_float(tcx, ft),
- ast::TyStr => ty::mk_str(tcx)
+ ast::TyInt(it) => tcx.mk_mach_int(it),
+ ast::TyUint(uit) => tcx.mk_mach_uint(uit),
+ ast::TyFloat(ft) => tcx.mk_mach_float(ft),
+ ast::TyStr => tcx.mk_str()
}
}
use metadata::tyencode;
use middle::cast;
use middle::check_const::ConstQualif;
-use middle::mem_categorization::Typer;
use middle::privacy::{AllPublic, LastMod};
use middle::subst;
use middle::subst::VecPerParamSpace;
-use middle::ty::{self, Ty, MethodCall, MethodCallee, MethodOrigin};
+use middle::ty::{self, Ty};
use syntax::{ast, ast_util, codemap, fold};
use syntax::codemap::Span;
use syntax::fold::Folder;
-use syntax::parse::token;
use syntax::ptr::P;
use syntax;
ast::IITraitItem(_, ref ti) => ti.ident,
ast::IIImplItem(_, ref ii) => ii.ident
};
- debug!("Fn named: {}", token::get_ident(ident));
+ debug!("Fn named: {}", ident);
debug!("< Decoded inlined fn: {}::{}",
path_as_str.unwrap(),
- token::get_ident(ident));
+ ident);
region::resolve_inlined_item(&tcx.sess, &tcx.region_maps, ii);
decode_side_tables(dcx, ast_doc);
match *ii {
fn tr(&self, dcx: &DecodeContext) -> def::Def {
match *self {
def::DefFn(did, is_ctor) => def::DefFn(did.tr(dcx), is_ctor),
- def::DefMethod(did, p) => {
- def::DefMethod(did.tr(dcx), p.map(|did2| did2.tr(dcx)))
- }
+ def::DefMethod(did) => def::DefMethod(did.tr(dcx)),
def::DefSelfTy(opt_did, impl_ids) => { def::DefSelfTy(opt_did.map(|did| did.tr(dcx)),
impl_ids.map(|(nid1, nid2)| {
(dcx.tr_id(nid1),
def::DefForeignMod(did) => { def::DefForeignMod(did.tr(dcx)) }
def::DefStatic(did, m) => { def::DefStatic(did.tr(dcx), m) }
def::DefConst(did) => { def::DefConst(did.tr(dcx)) }
- def::DefAssociatedConst(did, p) => {
- def::DefAssociatedConst(did.tr(dcx), p.map(|did2| did2.tr(dcx)))
- }
+ def::DefAssociatedConst(did) => def::DefAssociatedConst(did.tr(dcx)),
def::DefLocal(nid) => { def::DefLocal(dcx.tr_id(nid)) }
def::DefVariant(e_did, v_did, is_s) => {
def::DefVariant(e_did.tr(dcx), v_did.tr(dcx), is_s)
trait read_method_callee_helper<'tcx> {
fn read_method_callee<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>)
- -> (u32, MethodCallee<'tcx>);
+ -> (u32, ty::MethodCallee<'tcx>);
}
fn encode_method_callee<'a, 'tcx>(ecx: &e::EncodeContext<'a, 'tcx>,
rbml_w: &mut Encoder,
autoderef: u32,
- method: &MethodCallee<'tcx>) {
+ method: &ty::MethodCallee<'tcx>) {
use serialize::Encoder;
rbml_w.emit_struct("MethodCallee", 4, |rbml_w| {
rbml_w.emit_struct_field("autoderef", 0, |rbml_w| {
autoderef.encode(rbml_w)
});
- rbml_w.emit_struct_field("origin", 1, |rbml_w| {
- Ok(rbml_w.emit_method_origin(ecx, &method.origin))
+ rbml_w.emit_struct_field("def_id", 1, |rbml_w| {
+ Ok(rbml_w.emit_def_id(method.def_id))
});
rbml_w.emit_struct_field("ty", 2, |rbml_w| {
Ok(rbml_w.emit_ty(ecx, method.ty))
impl<'a, 'tcx> read_method_callee_helper<'tcx> for reader::Decoder<'a> {
fn read_method_callee<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>)
- -> (u32, MethodCallee<'tcx>) {
+ -> (u32, ty::MethodCallee<'tcx>) {
self.read_struct("MethodCallee", 4, |this| {
- let autoderef = this.read_struct_field("autoderef", 0, |this| {
- Decodable::decode(this)
- }).unwrap();
- Ok((autoderef, MethodCallee {
- origin: this.read_struct_field("origin", 1, |this| {
- Ok(this.read_method_origin(dcx))
+ let autoderef = this.read_struct_field("autoderef", 0,
+ Decodable::decode).unwrap();
+ Ok((autoderef, ty::MethodCallee {
+ def_id: this.read_struct_field("def_id", 1, |this| {
+ Ok(this.read_def_id(dcx))
}).unwrap(),
ty: this.read_struct_field("ty", 2, |this| {
Ok(this.read_ty(dcx))
}).unwrap(),
substs: this.read_struct_field("substs", 3, |this| {
- Ok(this.read_substs(dcx))
+ Ok(dcx.tcx.mk_substs(this.read_substs(dcx)))
}).unwrap()
}))
}).unwrap()
trait rbml_writer_helpers<'tcx> {
fn emit_closure_type<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>,
closure_type: &ty::ClosureTy<'tcx>);
- fn emit_method_origin<'a>(&mut self,
- ecx: &e::EncodeContext<'a, 'tcx>,
- method_origin: &ty::MethodOrigin<'tcx>);
fn emit_ty<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, ty: Ty<'tcx>);
fn emit_tys<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, tys: &[Ty<'tcx>]);
fn emit_type_param_def<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>,
});
}
- fn emit_method_origin<'b>(&mut self,
- ecx: &e::EncodeContext<'b, 'tcx>,
- method_origin: &ty::MethodOrigin<'tcx>)
- {
- use serialize::Encoder;
-
- self.emit_enum("MethodOrigin", |this| {
- match *method_origin {
- ty::MethodStatic(def_id) => {
- this.emit_enum_variant("MethodStatic", 0, 1, |this| {
- Ok(this.emit_def_id(def_id))
- })
- }
-
- ty::MethodStaticClosure(def_id) => {
- this.emit_enum_variant("MethodStaticClosure", 1, 1, |this| {
- Ok(this.emit_def_id(def_id))
- })
- }
-
- ty::MethodTypeParam(ref p) => {
- this.emit_enum_variant("MethodTypeParam", 2, 1, |this| {
- this.emit_struct("MethodParam", 2, |this| {
- try!(this.emit_struct_field("trait_ref", 0, |this| {
- Ok(this.emit_trait_ref(ecx, &p.trait_ref))
- }));
- try!(this.emit_struct_field("method_num", 0, |this| {
- this.emit_uint(p.method_num)
- }));
- try!(this.emit_struct_field("impl_def_id", 0, |this| {
- this.emit_option(|this| {
- match p.impl_def_id {
- None => this.emit_option_none(),
- Some(did) => this.emit_option_some(|this| {
- Ok(this.emit_def_id(did))
- })
- }
- })
- }));
- Ok(())
- })
- })
- }
-
- ty::MethodTraitObject(ref o) => {
- this.emit_enum_variant("MethodTraitObject", 3, 1, |this| {
- this.emit_struct("MethodObject", 2, |this| {
- try!(this.emit_struct_field("trait_ref", 0, |this| {
- Ok(this.emit_trait_ref(ecx, &o.trait_ref))
- }));
- try!(this.emit_struct_field("object_trait_id", 0, |this| {
- Ok(this.emit_def_id(o.object_trait_id))
- }));
- try!(this.emit_struct_field("method_num", 0, |this| {
- this.emit_uint(o.method_num)
- }));
- try!(this.emit_struct_field("vtable_index", 0, |this| {
- this.emit_uint(o.vtable_index)
- }));
- Ok(())
- })
- })
- }
- }
- });
- }
-
fn emit_ty<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, ty: Ty<'tcx>) {
self.emit_opaque(|this| Ok(e::write_type(ecx, this, ty)));
}
})
}
- if let Some(item_substs) = tcx.item_substs.borrow().get(&id) {
+ if let Some(item_substs) = tcx.tables.borrow().item_substs.get(&id) {
rbml_w.tag(c::tag_table_item_subst, |rbml_w| {
rbml_w.id(id);
rbml_w.emit_substs(ecx, &item_substs.substs);
var_id: var_id,
closure_expr_id: id
};
- let upvar_capture = tcx.upvar_capture_map.borrow().get(&upvar_id).unwrap().clone();
+ let upvar_capture = tcx.tables
+ .borrow()
+ .upvar_capture_map
+ .get(&upvar_id)
+ .unwrap()
+ .clone();
var_id.encode(rbml_w);
upvar_capture.encode(rbml_w);
})
})
}
- let method_call = MethodCall::expr(id);
- if let Some(method) = tcx.method_map.borrow().get(&method_call) {
+ let method_call = ty::MethodCall::expr(id);
+ if let Some(method) = tcx.tables.borrow().method_map.get(&method_call) {
rbml_w.tag(c::tag_table_method_map, |rbml_w| {
rbml_w.id(id);
encode_method_callee(ecx, rbml_w, method_call.autoderef, method)
})
}
- if let Some(adjustment) = tcx.adjustments.borrow().get(&id) {
+ if let Some(adjustment) = tcx.tables.borrow().adjustments.get(&id) {
match *adjustment {
ty::AdjustDerefRef(ref adj) => {
for autoderef in 0..adj.autoderefs {
- let method_call = MethodCall::autoderef(id, autoderef as u32);
- if let Some(method) = tcx.method_map.borrow().get(&method_call) {
+ let method_call = ty::MethodCall::autoderef(id, autoderef as u32);
+ if let Some(method) = tcx.tables.borrow().method_map.get(&method_call) {
rbml_w.tag(c::tag_table_method_map, |rbml_w| {
rbml_w.id(id);
encode_method_callee(ecx, rbml_w,
})
}
- if let Some(closure_type) = tcx.closure_tys.borrow().get(&ast_util::local_def(id)) {
+ if let Some(closure_type) = tcx.tables.borrow().closure_tys.get(&ast_util::local_def(id)) {
rbml_w.tag(c::tag_table_closure_tys, |rbml_w| {
rbml_w.id(id);
rbml_w.emit_closure_type(ecx, closure_type);
})
}
- if let Some(closure_kind) = tcx.closure_kinds.borrow().get(&ast_util::local_def(id)) {
+ if let Some(closure_kind) = tcx.tables.borrow().closure_kinds.get(&ast_util::local_def(id)) {
rbml_w.tag(c::tag_table_closure_kinds, |rbml_w| {
rbml_w.id(id);
encode_closure_kind(rbml_w, *closure_kind)
}
trait rbml_decoder_decoder_helpers<'tcx> {
- fn read_method_origin<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>)
- -> ty::MethodOrigin<'tcx>;
fn read_ty<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) -> Ty<'tcx>;
fn read_tys<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) -> Vec<Ty<'tcx>>;
fn read_trait_ref<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>)
}).unwrap()
}
- fn read_method_origin<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>)
- -> ty::MethodOrigin<'tcx>
- {
- self.read_enum("MethodOrigin", |this| {
- let variants = &["MethodStatic", "MethodStaticClosure",
- "MethodTypeParam", "MethodTraitObject"];
- this.read_enum_variant(variants, |this, i| {
- Ok(match i {
- 0 => {
- let def_id = this.read_def_id(dcx);
- ty::MethodStatic(def_id)
- }
-
- 1 => {
- let def_id = this.read_def_id(dcx);
- ty::MethodStaticClosure(def_id)
- }
-
- 2 => {
- this.read_struct("MethodTypeParam", 2, |this| {
- Ok(ty::MethodTypeParam(
- ty::MethodParam {
- trait_ref: {
- this.read_struct_field("trait_ref", 0, |this| {
- Ok(this.read_trait_ref(dcx))
- }).unwrap()
- },
- method_num: {
- this.read_struct_field("method_num", 1, |this| {
- this.read_uint()
- }).unwrap()
- },
- impl_def_id: {
- this.read_struct_field("impl_def_id", 2, |this| {
- this.read_option(|this, b| {
- if b {
- Ok(Some(this.read_def_id(dcx)))
- } else {
- Ok(None)
- }
- })
- }).unwrap()
- }
- }))
- }).unwrap()
- }
-
- 3 => {
- this.read_struct("MethodTraitObject", 2, |this| {
- Ok(ty::MethodTraitObject(
- ty::MethodObject {
- trait_ref: {
- this.read_struct_field("trait_ref", 0, |this| {
- Ok(this.read_trait_ref(dcx))
- }).unwrap()
- },
- object_trait_id: {
- this.read_struct_field("object_trait_id", 1, |this| {
- Ok(this.read_def_id(dcx))
- }).unwrap()
- },
- method_num: {
- this.read_struct_field("method_num", 2, |this| {
- this.read_uint()
- }).unwrap()
- },
- vtable_index: {
- this.read_struct_field("vtable_index", 3, |this| {
- this.read_uint()
- }).unwrap()
- },
- }))
- }).unwrap()
- }
-
- _ => panic!("..")
- })
- })
- }).unwrap()
- }
-
-
fn read_ty<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) -> Ty<'tcx> {
// Note: regions types embed local node ids. In principle, we
// should translate these node ids into the new decode
let item_substs = ty::ItemSubsts {
substs: val_dsr.read_substs(dcx)
};
- dcx.tcx.item_substs.borrow_mut().insert(
+ dcx.tcx.tables.borrow_mut().item_substs.insert(
id, item_substs);
}
c::tag_table_freevars => {
closure_expr_id: id
};
let ub: ty::UpvarCapture = Decodable::decode(val_dsr).unwrap();
- dcx.tcx.upvar_capture_map.borrow_mut().insert(upvar_id, ub.tr(dcx));
+ dcx.tcx.tables.borrow_mut().upvar_capture_map.insert(upvar_id, ub.tr(dcx));
}
c::tag_table_tcache => {
let type_scheme = val_dsr.read_type_scheme(dcx);
let lid = ast::DefId { krate: ast::LOCAL_CRATE, node: id };
- dcx.tcx.tcache.borrow_mut().insert(lid, type_scheme);
+ dcx.tcx.register_item_type(lid, type_scheme);
}
c::tag_table_param_defs => {
let bounds = val_dsr.read_type_param_def(dcx);
}
c::tag_table_method_map => {
let (autoderef, method) = val_dsr.read_method_callee(dcx);
- let method_call = MethodCall {
+ let method_call = ty::MethodCall {
expr_id: id,
autoderef: autoderef
};
- dcx.tcx.method_map.borrow_mut().insert(method_call, method);
+ dcx.tcx.tables.borrow_mut().method_map.insert(method_call, method);
}
c::tag_table_adjustments => {
let adj: ty::AutoAdjustment = val_dsr.read_auto_adjustment(dcx);
- dcx.tcx.adjustments.borrow_mut().insert(id, adj);
+ dcx.tcx.tables.borrow_mut().adjustments.insert(id, adj);
}
c::tag_table_closure_tys => {
let closure_ty =
val_dsr.read_closure_ty(dcx);
- dcx.tcx.closure_tys.borrow_mut().insert(ast_util::local_def(id),
+ dcx.tcx.tables.borrow_mut().closure_tys.insert(ast_util::local_def(id),
closure_ty);
}
c::tag_table_closure_kinds => {
let closure_kind =
val_dsr.read_closure_kind(dcx);
- dcx.tcx.closure_kinds.borrow_mut().insert(ast_util::local_def(id),
+ dcx.tcx.tables.borrow_mut().closure_kinds.insert(ast_util::local_def(id),
closure_kind);
}
c::tag_table_cast_kinds => {
/// Function Pointers
FnPtr,
/// Raw pointers
- Ptr(&'tcx ty::mt<'tcx>),
+ Ptr(&'tcx ty::TypeAndMut<'tcx>),
/// References
- RPtr(&'tcx ty::mt<'tcx>),
+ RPtr(&'tcx ty::TypeAndMut<'tcx>),
}
/// Cast Kind. See RFC 401 (or librustc_typeck/check/cast.rs)
ty::TyInt(_) => Some(CastTy::Int(IntTy::I)),
ty::TyUint(u) => Some(CastTy::Int(IntTy::U(u))),
ty::TyFloat(_) => Some(CastTy::Float),
- ty::TyEnum(..) if ty::type_is_c_like_enum(
- tcx, t) => Some(CastTy::Int(IntTy::CEnum)),
+ ty::TyEnum(..) if t.is_c_like_enum(tcx) =>
+ Some(CastTy::Int(IntTy::CEnum)),
ty::TyRawPtr(ref mt) => Some(CastTy::Ptr(mt)),
ty::TyRef(_, ref mt) => Some(CastTy::RPtr(mt)),
ty::TyBareFn(..) => Some(CastTy::FnPtr),
}
ast::ExprIndex(ref l, ref r) |
- ast::ExprBinary(_, ref l, ref r) if self.is_method_call(expr) => {
+ ast::ExprBinary(_, ref l, ref r) if self.tcx.is_method_call(expr.id) => {
self.call(expr, pred, &**l, Some(&**r).into_iter())
}
self.straightline(expr, pred, fields)
}
- ast::ExprUnary(_, ref e) if self.is_method_call(expr) => {
+ ast::ExprUnary(_, ref e) if self.tcx.is_method_call(expr.id) => {
self.call(expr, pred, &**e, None::<ast::Expr>.iter())
}
func_or_rcvr: &ast::Expr,
args: I) -> CFGIndex {
let method_call = ty::MethodCall::expr(call_expr.id);
- let return_ty = ty::ty_fn_ret(match self.tcx.method_map.borrow().get(&method_call) {
+ let fn_ty = match self.tcx.tables.borrow().method_map.get(&method_call) {
Some(method) => method.ty,
- None => ty::expr_ty_adjusted(self.tcx, func_or_rcvr)
- });
+ None => self.tcx.expr_ty_adjusted(func_or_rcvr)
+ };
let func_or_rcvr_exit = self.expr(func_or_rcvr, pred);
let ret = self.straightline(call_expr, func_or_rcvr_exit, args);
- if return_ty.diverges() {
+ if fn_ty.fn_ret().diverges() {
self.add_unreachable_node()
} else {
ret
}
}
}
-
- fn is_method_call(&self, expr: &ast::Expr) -> bool {
- let method_call = ty::MethodCall::expr(expr.id);
- self.tcx.method_map.borrow().contains_key(&method_call)
- }
}
use middle::cast::{CastKind};
use middle::const_eval;
+use middle::const_eval::EvalHint::ExprTypeChecked;
use middle::def;
use middle::expr_use_visitor as euv;
use middle::infer;
use syntax::visit::{self, Visitor};
use std::collections::hash_map::Entry;
+use std::cmp::Ordering;
// Const qualification, from partial to completely promotable.
bitflags! {
}
fn with_euv<'b, F, R>(&'b mut self, item_id: Option<ast::NodeId>, f: F) -> R where
- F: for<'t> FnOnce(&mut euv::ExprUseVisitor<'b, 't, 'tcx,
- ty::ParameterEnvironment<'a, 'tcx>>) -> R,
+ F: for<'t> FnOnce(&mut euv::ExprUseVisitor<'b, 't, 'b, 'tcx>) -> R,
{
let param_env = match item_id {
Some(item_id) => ty::ParameterEnvironment::for_item(self.tcx, item_id),
- None => ty::empty_parameter_environment(self.tcx)
+ None => self.tcx.empty_parameter_environment()
};
- f(&mut euv::ExprUseVisitor::new(self, ¶m_env))
+
+ let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, Some(param_env), false);
+
+ f(&mut euv::ExprUseVisitor::new(self, &infcx))
}
fn global_expr(&mut self, mode: Mode, expr: &ast::Expr) -> ConstQualif {
fn_like.id());
self.add_qualif(qualif);
- if ty::type_contents(self.tcx, ret_ty).interior_unsafe() {
+ if ret_ty.type_contents(self.tcx).interior_unsafe() {
self.add_qualif(ConstQualif::MUTABLE_MEM);
}
}
fn check_static_mut_type(&self, e: &ast::Expr) {
- let node_ty = ty::node_id_to_type(self.tcx, e.id);
- let tcontents = ty::type_contents(self.tcx, node_ty);
+ let node_ty = self.tcx.node_id_to_type(e.id);
+ let tcontents = node_ty.type_contents(self.tcx);
let suffix = if tcontents.has_dtor() {
"destructors"
}
fn check_static_type(&self, e: &ast::Expr) {
- let ty = ty::node_id_to_type(self.tcx, e.id);
- let infcx = infer::new_infer_ctxt(self.tcx);
- let mut fulfill_cx = traits::FulfillmentContext::new(false);
+ let ty = self.tcx.node_id_to_type(e.id);
+ let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, None, false);
let cause = traits::ObligationCause::new(e.span, e.id, traits::SharedStatic);
+ let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
fulfill_cx.register_builtin_bound(&infcx, ty, ty::BoundSync, cause);
- let env = ty::empty_parameter_environment(self.tcx);
- match fulfill_cx.select_all_or_error(&infcx, &env) {
+ match fulfill_cx.select_all_or_error(&infcx) {
Ok(()) => { },
Err(ref errors) => {
traits::report_fulfillment_errors(&infcx, errors);
ast::PatRange(ref start, ref end) => {
self.global_expr(Mode::Const, &**start);
self.global_expr(Mode::Const, &**end);
+
+ match const_eval::compare_lit_exprs(self.tcx, start, end) {
+ Some(Ordering::Less) |
+ Some(Ordering::Equal) => {}
+ Some(Ordering::Greater) => {
+ span_err!(self.tcx.sess, start.span, E0030,
+ "lower range bound must be less than or equal to upper");
+ }
+ None => {
+ self.tcx.sess.span_bug(
+ start.span, "literals of different types in range pat");
+ }
+ }
}
_ => visit::walk_pat(self, p)
}
let mut outer = self.qualif;
self.qualif = ConstQualif::empty();
- let node_ty = ty::node_id_to_type(self.tcx, ex.id);
+ let node_ty = self.tcx.node_id_to_type(ex.id);
check_expr(self, ex, node_ty);
+ check_adjustments(self, ex);
// Special-case some expressions to avoid certain flags bubbling up.
match ex.node {
match node_ty.sty {
ty::TyUint(_) | ty::TyInt(_) if div_or_rem => {
if !self.qualif.intersects(ConstQualif::NOT_CONST) {
- match const_eval::eval_const_expr_partial(self.tcx, ex, None) {
+ match const_eval::eval_const_expr_partial(
+ self.tcx, ex, ExprTypeChecked) {
Ok(_) => {}
Err(msg) => {
span_err!(self.tcx.sess, msg.span, E0020,
// initializer values (very bad).
// If the type doesn't have interior mutability, then `ConstQualif::MUTABLE_MEM` has
// propagated from another error, so erroring again would be just noise.
- let tc = ty::type_contents(self.tcx, node_ty);
+ let tc = node_ty.type_contents(self.tcx);
if self.qualif.intersects(ConstQualif::MUTABLE_MEM) && tc.interior_unsafe() {
outer = outer | ConstQualif::NOT_CONST;
if self.mode != Mode::Var {
e: &ast::Expr, node_ty: Ty<'tcx>) {
match node_ty.sty {
ty::TyStruct(did, _) |
- ty::TyEnum(did, _) if ty::has_dtor(v.tcx, did) => {
+ ty::TyEnum(did, _) if v.tcx.has_dtor(did) => {
v.add_qualif(ConstQualif::NEEDS_DROP);
if v.mode != Mode::Var {
v.tcx.sess.span_err(e.span,
match e.node {
ast::ExprUnary(..) |
ast::ExprBinary(..) |
- ast::ExprIndex(..) if v.tcx.method_map.borrow().contains_key(&method_call) => {
+ ast::ExprIndex(..) if v.tcx.tables.borrow().method_map.contains_key(&method_call) => {
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0011,
}
}
ast::ExprUnary(op, ref inner) => {
- match ty::node_id_to_type(v.tcx, inner.id).sty {
+ match v.tcx.node_id_to_type(inner.id).sty {
ty::TyRawPtr(_) => {
assert!(op == ast::UnDeref);
}
}
ast::ExprBinary(op, ref lhs, _) => {
- match ty::node_id_to_type(v.tcx, lhs.id).sty {
+ match v.tcx.node_id_to_type(lhs.id).sty {
ty::TyRawPtr(_) => {
assert!(op.node == ast::BiEq || op.node == ast::BiNe ||
op.node == ast::BiLe || op.node == ast::BiLt ||
}
}
Some(def::DefConst(did)) |
- Some(def::DefAssociatedConst(did, _)) => {
+ Some(def::DefAssociatedConst(did)) => {
if let Some(expr) = const_eval::lookup_const_by_id(v.tcx, did,
Some(e.id)) {
let inner = v.global_expr(Mode::Const, expr);
v.add_qualif(ConstQualif::NON_ZERO_SIZED);
true
}
- Some(def::DefMethod(did, def::FromImpl(_))) |
Some(def::DefFn(did, _)) => {
v.handle_const_fn_call(e, did, node_ty)
}
+ Some(def::DefMethod(did)) => {
+ match v.tcx.impl_or_trait_item(did).container() {
+ ty::ImplContainer(_) => {
+ v.handle_const_fn_call(e, did, node_ty)
+ }
+ ty::TraitContainer(_) => false
+ }
+ }
_ => false
};
if !is_const {
}
}
ast::ExprMethodCall(..) => {
- let method_did = match v.tcx.method_map.borrow()[&method_call].origin {
- ty::MethodStatic(did) => Some(did),
- _ => None
- };
- let is_const = match method_did {
- Some(did) => v.handle_const_fn_call(e, did, node_ty),
- None => false
+ let method = v.tcx.tables.borrow().method_map[&method_call];
+ let is_const = match v.tcx.impl_or_trait_item(method.def_id).container() {
+ ty::ImplContainer(_) => v.handle_const_fn_call(e, method.def_id, node_ty),
+ ty::TraitContainer(_) => false
};
if !is_const {
v.add_qualif(ConstQualif::NOT_CONST);
ast::ExprClosure(..) => {
// Paths in constant contexts cannot refer to local variables,
// as there are none, and thus closures can't have upvars there.
- if ty::with_freevars(v.tcx, e.id, |fv| !fv.is_empty()) {
+ if v.tcx.with_freevars(e.id, |fv| !fv.is_empty()) {
assert!(v.mode == Mode::Var,
"global closures can't capture anything");
v.add_qualif(ConstQualif::NOT_CONST);
}
}
+/// Check the adjustments of an expression
+fn check_adjustments<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &ast::Expr) {
+ match v.tcx.tables.borrow().adjustments.get(&e.id) {
+ None | Some(&ty::AdjustReifyFnPointer) | Some(&ty::AdjustUnsafeFnPointer) => {}
+ Some(&ty::AdjustDerefRef(ty::AutoDerefRef { autoderefs, .. })) => {
+ if (0..autoderefs as u32).any(|autoderef| {
+ v.tcx.is_overloaded_autoderef(e.id, autoderef)
+ }) {
+ v.add_qualif(ConstQualif::NOT_CONST);
+ if v.mode != Mode::Var {
+ span_err!(v.tcx.sess, e.span, E0400,
+ "user-defined dereference operators are not allowed in {}s",
+ v.msg());
+ }
+ }
+ }
+ }
+}
+
pub fn check_crate(tcx: &ty::ctxt) {
visit::walk_crate(&mut CheckCrateVisitor {
tcx: tcx,
use middle::const_eval::{compare_const_vals, ConstVal};
use middle::const_eval::{eval_const_expr, eval_const_expr_partial};
use middle::const_eval::{const_expr_to_pat, lookup_const_by_id};
+use middle::const_eval::EvalHint::ExprTypeChecked;
use middle::def::*;
use middle::expr_use_visitor::{ConsumeMode, Delegate, ExprUseVisitor, Init};
use middle::expr_use_visitor::{JustWrite, LoanCause, MutateMode};
use middle::expr_use_visitor::WriteAndRead;
use middle::expr_use_visitor as euv;
-use middle::mem_categorization::cmt;
+use middle::infer;
+use middle::mem_categorization::{cmt};
use middle::pat_util::*;
use middle::ty::*;
use middle::ty;
use syntax::codemap::{Span, Spanned, DUMMY_SP};
use syntax::fold::{Folder, noop_fold_pat};
use syntax::print::pprust::pat_to_string;
-use syntax::parse::token;
use syntax::ptr::P;
use syntax::visit::{self, Visitor, FnKind};
use util::nodemap::FnvHashMap;
}
}
+//NOTE: appears to be the only place other then InferCtxt to contain a ParamEnv
pub struct MatchCheckCtxt<'a, 'tcx: 'a> {
pub tcx: &'a ty::ctxt<'tcx>,
pub param_env: ParameterEnvironment<'a, 'tcx>,
pub fn check_crate(tcx: &ty::ctxt) {
visit::walk_crate(&mut MatchCheckCtxt {
tcx: tcx,
- param_env: ty::empty_parameter_environment(tcx),
+ param_env: tcx.empty_parameter_environment(),
}, tcx.map.krate());
tcx.sess.abort_if_errors();
}
// Finally, check if the whole match expression is exhaustive.
// Check for empty enum, because is_useful only works on inhabited types.
- let pat_ty = node_id_to_type(cx.tcx, scrut.id);
+ let pat_ty = cx.tcx.node_id_to_type(scrut.id);
if inlined_arms.is_empty() {
- if !type_is_empty(cx.tcx, pat_ty) {
+ if !pat_ty.is_empty(cx.tcx) {
// We know the type is inhabited, so this must be wrong
span_err!(cx.tcx.sess, ex.span, E0002,
"non-exhaustive patterns: type {} is non-empty",
ast_util::walk_pat(pat, |p| {
match p.node {
ast::PatIdent(ast::BindByValue(ast::MutImmutable), ident, None) => {
- let pat_ty = ty::pat_ty(cx.tcx, p);
+ let pat_ty = cx.tcx.pat_ty(p);
if let ty::TyEnum(def_id, _) = pat_ty.sty {
let def = cx.tcx.def_map.borrow().get(&p.id).map(|d| d.full_def());
if let Some(DefLocal(_)) = def {
- if ty::enum_variants(cx.tcx, def_id).iter().any(|variant|
- token::get_name(variant.name) == token::get_name(ident.node.name)
+ if cx.tcx.enum_variants(def_id).iter().any(|variant|
+ variant.name == ident.node.name
&& variant.args.is_empty()
) {
span_warn!(cx.tcx.sess, p.span, E0170,
"pattern binding `{}` is named the same as one \
of the variants of the type `{}`",
- &token::get_ident(ident.node), pat_ty);
+ ident.node, pat_ty);
fileline_help!(cx.tcx.sess, p.span,
"if you meant to match on a variant, \
consider making the path in the pattern qualified: `{}::{}`",
- pat_ty, &token::get_ident(ident.node));
+ pat_ty, ident.node);
}
}
}
fn check_for_static_nan(cx: &MatchCheckCtxt, pat: &Pat) {
ast_util::walk_pat(pat, |p| {
if let ast::PatLit(ref expr) = p.node {
- match eval_const_expr_partial(cx.tcx, &**expr, None) {
+ match eval_const_expr_partial(cx.tcx, &**expr, ExprTypeChecked) {
Ok(ConstVal::Float(f)) if f.is_nan() => {
span_warn!(cx.tcx.sess, p.span, E0003,
"unmatchable NaN in pattern, \
ast::PatIdent(..) | ast::PatEnum(..) | ast::PatQPath(..) => {
let def = self.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def());
match def {
- Some(DefAssociatedConst(did, _)) |
+ Some(DefAssociatedConst(did)) |
Some(DefConst(did)) => match lookup_const_by_id(self.tcx, did, Some(pat.id)) {
Some(const_expr) => {
const_expr_to_pat(self.tcx, const_expr, pat.span).map(|new_pat| {
ty::TyEnum(cid, _) | ty::TyStruct(cid, _) => {
let (vid, is_structure) = match ctor {
&Variant(vid) =>
- (vid, ty::enum_variant_with_id(cx.tcx, cid, vid).arg_names.is_some()),
+ (vid, cx.tcx.enum_variant_with_id(cid, vid).arg_names.is_some()),
_ =>
- (cid, !ty::is_tuple_struct(cx.tcx, cid))
+ (cid, !cx.tcx.is_tuple_struct(cid))
};
if is_structure {
- let fields = ty::lookup_struct_fields(cx.tcx, vid);
+ let fields = cx.tcx.lookup_struct_fields(vid);
let field_pats: Vec<_> = fields.into_iter()
.zip(pats)
.filter(|&(_, ref pat)| pat.node != ast::PatWild(ast::PatWildSingle))
}
}
- ty::TyRef(_, ty::mt { ty, mutbl }) => {
+ ty::TyRef(_, ty::TypeAndMut { ty, mutbl }) => {
match ty.sty {
ty::TyArray(_, n) => match ctor {
&Single => {
ty::TyBool =>
[true, false].iter().map(|b| ConstantValue(ConstVal::Bool(*b))).collect(),
- ty::TyRef(_, ty::mt { ty, .. }) => match ty.sty {
+ ty::TyRef(_, ty::TypeAndMut { ty, .. }) => match ty.sty {
ty::TySlice(_) =>
range_inclusive(0, max_slice_length).map(|length| Slice(length)).collect(),
_ => vec!(Single)
},
ty::TyEnum(eid, _) =>
- ty::enum_variants(cx.tcx, eid)
+ cx.tcx.enum_variants(eid)
.iter()
.map(|va| Variant(va.id))
.collect(),
None => v[0]
};
let left_ty = if real_pat.id == DUMMY_NODE_ID {
- ty::mk_nil(cx.tcx)
+ cx.tcx.mk_nil()
} else {
- let left_ty = ty::pat_ty(cx.tcx, &*real_pat);
+ let left_ty = cx.tcx.pat_ty(&*real_pat);
match real_pat.node {
- ast::PatIdent(ast::BindByRef(..), _, _) => ty::deref(left_ty, false).unwrap().ty,
+ ast::PatIdent(ast::BindByRef(..), _, _) => {
+ left_ty.builtin_deref(false).unwrap().ty
+ }
_ => left_ty,
}
};
Some(constructor) => {
let matrix = rows.iter().filter_map(|r| {
if pat_is_binding_or_wild(&cx.tcx.def_map, raw_pat(r[0])) {
- Some(r.tail().to_vec())
+ Some(r[1..].to_vec())
} else {
None
}
}).collect();
- match is_useful(cx, &matrix, v.tail(), witness) {
+ match is_useful(cx, &matrix, &v[1..], witness) {
UsefulWithWitness(pats) => {
let arity = constructor_arity(cx, &constructor, left_ty);
- let wild_pats: Vec<_> = repeat(DUMMY_WILD_PAT).take(arity).collect();
+ let wild_pats = vec![DUMMY_WILD_PAT; arity];
let enum_pat = construct_witness(cx, &constructor, wild_pats, left_ty);
let mut new_pats = vec![enum_pat];
new_pats.extend(pats);
match ty.sty {
ty::TyTuple(ref fs) => fs.len(),
ty::TyBox(_) => 1,
- ty::TyRef(_, ty::mt { ty, .. }) => match ty.sty {
+ ty::TyRef(_, ty::TypeAndMut { ty, .. }) => match ty.sty {
ty::TySlice(_) => match *ctor {
Slice(length) => length,
ConstantValue(_) => 0,
},
ty::TyEnum(eid, _) => {
match *ctor {
- Variant(id) => enum_variant_with_id(cx.tcx, eid, id).args.len(),
+ Variant(id) => cx.tcx.enum_variant_with_id(eid, id).args.len(),
_ => unreachable!()
}
}
- ty::TyStruct(cid, _) => ty::lookup_struct_fields(cx.tcx, cid).len(),
+ ty::TyStruct(cid, _) => cx.tcx.lookup_struct_fields(cid).len(),
ty::TyArray(_, n) => n,
_ => 0
}
} = raw_pat(r[col]);
let head: Option<Vec<&Pat>> = match *node {
ast::PatWild(_) =>
- Some(repeat(DUMMY_WILD_PAT).take(arity).collect()),
+ Some(vec![DUMMY_WILD_PAT; arity]),
ast::PatIdent(_, _, _) => {
let opt_def = cx.tcx.def_map.borrow().get(&pat_id).map(|d| d.full_def());
} else {
None
},
- _ => Some(repeat(DUMMY_WILD_PAT).take(arity).collect())
+ _ => Some(vec![DUMMY_WILD_PAT; arity])
}
}
DefVariant(..) | DefStruct(..) => {
Some(match args {
&Some(ref args) => args.iter().map(|p| &**p).collect(),
- &None => repeat(DUMMY_WILD_PAT).take(arity).collect(),
+ &None => vec![DUMMY_WILD_PAT; arity],
})
}
_ => None
},
_ => {
// Assume this is a struct.
- match ty::ty_to_def_id(node_id_to_type(cx.tcx, pat_id)) {
+ match cx.tcx.node_id_to_type(pat_id).ty_to_def_id() {
None => {
cx.tcx.sess.span_bug(pat_span,
"struct pattern wasn't of a \
}
};
class_id.map(|variant_id| {
- let struct_fields = ty::lookup_struct_fields(cx.tcx, variant_id);
+ let struct_fields = cx.tcx.lookup_struct_fields(variant_id);
let args = struct_fields.iter().map(|sf| {
match pattern_fields.iter().find(|f| f.node.ident.name == sf.name) {
Some(ref f) => &*f.node.pat,
fn check_local(cx: &mut MatchCheckCtxt, loc: &ast::Local) {
visit::walk_local(cx, loc);
- let name = match loc.source {
- ast::LocalLet => "local",
- ast::LocalFor => "`for` loop"
- };
-
- let mut static_inliner = StaticInliner::new(cx.tcx, None);
- is_refutable(cx, &*static_inliner.fold_pat(loc.pat.clone()), |pat| {
- span_err!(cx.tcx.sess, loc.pat.span, E0005,
- "refutable pattern in {} binding: `{}` not covered",
- name, pat_to_string(pat)
- );
- });
+ let pat = StaticInliner::new(cx.tcx, None).fold_pat(loc.pat.clone());
+ check_irrefutable(cx, &pat, false);
// Check legality of move bindings and `@` patterns.
check_legality_of_move_bindings(cx, false, slice::ref_slice(&loc.pat));
visit::walk_fn(cx, kind, decl, body, sp);
for input in &decl.inputs {
- is_refutable(cx, &*input.pat, |pat| {
- span_err!(cx.tcx.sess, input.pat.span, E0006,
- "refutable pattern in function argument: `{}` not covered",
- pat_to_string(pat)
- );
- });
+ check_irrefutable(cx, &input.pat, true);
check_legality_of_move_bindings(cx, false, slice::ref_slice(&input.pat));
check_legality_of_bindings_in_at_patterns(cx, &*input.pat);
}
}
+fn check_irrefutable(cx: &MatchCheckCtxt, pat: &Pat, is_fn_arg: bool) {
+ let origin = if is_fn_arg {
+ "function argument"
+ } else {
+ "local binding"
+ };
+
+ is_refutable(cx, pat, |uncovered_pat| {
+ span_err!(cx.tcx.sess, pat.span, E0005,
+ "refutable pattern in {}: `{}` not covered",
+ origin,
+ pat_to_string(uncovered_pat),
+ );
+ });
+}
+
fn is_refutable<A, F>(cx: &MatchCheckCtxt, pat: &Pat, refutable: F) -> Option<A> where
F: FnOnce(&Pat) -> A,
{
if pat_is_binding(def_map, &*p) {
match p.node {
ast::PatIdent(ast::BindByValue(_), _, ref sub) => {
- let pat_ty = ty::node_id_to_type(tcx, p.id);
- if ty::type_moves_by_default(&cx.param_env, pat.span, pat_ty) {
+ let pat_ty = tcx.node_id_to_type(p.id);
+ //FIXME: (@jroesch) this code should be floated up as well
+ let infcx = infer::new_infer_ctxt(cx.tcx,
+ &cx.tcx.tables,
+ Some(cx.param_env.clone()),
+ false);
+ if infcx.type_moves_by_default(pat_ty, pat.span) {
check_move(p, sub.as_ref().map(|p| &**p));
}
}
let mut checker = MutationChecker {
cx: cx,
};
- let mut visitor = ExprUseVisitor::new(&mut checker,
- &checker.cx.param_env);
+
+ let infcx = infer::new_infer_ctxt(cx.tcx,
+ &cx.tcx.tables,
+ Some(checker.cx.param_env.clone()),
+ false);
+
+ let mut visitor = ExprUseVisitor::new(&mut checker, &infcx);
visitor.walk_expr(guard);
}
// is the public starting point.
use middle::expr_use_visitor as euv;
+use middle::infer;
use middle::mem_categorization as mc;
use middle::ty::ParameterEnvironment;
use middle::ty;
s: Span,
fn_id: ast::NodeId) {
{
+ // FIXME (@jroesch) change this to be an inference context
let param_env = ParameterEnvironment::for_item(self.tcx, fn_id);
+ let infcx = infer::new_infer_ctxt(self.tcx,
+ &self.tcx.tables,
+ Some(param_env.clone()),
+ false);
let mut delegate = RvalueContextDelegate { tcx: self.tcx, param_env: ¶m_env };
- let mut euv = euv::ExprUseVisitor::new(&mut delegate, ¶m_env);
+ let mut euv = euv::ExprUseVisitor::new(&mut delegate, &infcx);
euv.walk_fn(fd, b);
}
visit::walk_fn(self, fk, fd, b, s)
cmt: mc::cmt<'tcx>,
_: euv::ConsumeMode) {
debug!("consume; cmt: {:?}; type: {:?}", *cmt, cmt.ty);
- if !ty::type_is_sized(Some(self.param_env), self.tcx, span, cmt.ty) {
+ if !cmt.ty.is_sized(self.param_env, span) {
span_err!(self.tcx.sess, span, E0161,
"cannot move a value of type {0}: the size of {0} cannot be statically determined",
cmt.ty);
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// This compiler pass detects static items that refer to themselves
+// This compiler pass detects constants that refer to themselves
// recursively.
use ast_map;
use session::Session;
-use middle::def::{DefStatic, DefConst, DefAssociatedConst, DefMap};
+use middle::def::{DefStatic, DefConst, DefAssociatedConst, DefVariant, DefMap};
+use util::nodemap::NodeMap;
use syntax::{ast, ast_util};
use syntax::codemap::Span;
+use syntax::feature_gate::emit_feature_err;
use syntax::visit::Visitor;
use syntax::visit;
+use std::cell::RefCell;
+
struct CheckCrateVisitor<'a, 'ast: 'a> {
sess: &'a Session,
def_map: &'a DefMap,
- ast_map: &'a ast_map::Map<'ast>
+ ast_map: &'a ast_map::Map<'ast>,
+ // `discriminant_map` is a cache that associates the `NodeId`s of local
+ // variant definitions with the discriminant expression that applies to
+ // each one. If the variant uses the default values (starting from `0`),
+ // then `None` is stored.
+ discriminant_map: RefCell<NodeMap<Option<&'ast ast::Expr>>>,
}
-impl<'v, 'a, 'ast> Visitor<'v> for CheckCrateVisitor<'a, 'ast> {
- fn visit_item(&mut self, it: &ast::Item) {
+impl<'a, 'ast: 'a> Visitor<'ast> for CheckCrateVisitor<'a, 'ast> {
+ fn visit_item(&mut self, it: &'ast ast::Item) {
match it.node {
- ast::ItemStatic(_, _, ref expr) |
- ast::ItemConst(_, ref expr) => {
+ ast::ItemStatic(..) |
+ ast::ItemConst(..) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &it.span);
recursion_visitor.visit_item(it);
- visit::walk_expr(self, &*expr)
},
- _ => visit::walk_item(self, it)
+ ast::ItemEnum(ref enum_def, ref generics) => {
+ // We could process the whole enum, but handling the variants
+ // with discriminant expressions one by one gives more specific,
+ // less redundant output.
+ for variant in &enum_def.variants {
+ if let Some(_) = variant.node.disr_expr {
+ let mut recursion_visitor =
+ CheckItemRecursionVisitor::new(self, &variant.span);
+ recursion_visitor.populate_enum_discriminants(enum_def);
+ recursion_visitor.visit_variant(variant, generics);
+ }
+ }
+ }
+ _ => {}
}
+ visit::walk_item(self, it)
}
- fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
+ fn visit_trait_item(&mut self, ti: &'ast ast::TraitItem) {
match ti.node {
ast::ConstTraitItem(_, ref default) => {
- if let Some(ref expr) = *default {
+ if let Some(_) = *default {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ti.span);
recursion_visitor.visit_trait_item(ti);
- visit::walk_expr(self, &*expr)
}
}
- _ => visit::walk_trait_item(self, ti)
+ _ => {}
}
+ visit::walk_trait_item(self, ti)
}
- fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
+ fn visit_impl_item(&mut self, ii: &'ast ast::ImplItem) {
match ii.node {
- ast::ConstImplItem(_, ref expr) => {
+ ast::ConstImplItem(..) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ii.span);
recursion_visitor.visit_impl_item(ii);
- visit::walk_expr(self, &*expr)
}
- _ => visit::walk_impl_item(self, ii)
+ _ => {}
}
+ visit::walk_impl_item(self, ii)
}
}
pub fn check_crate<'ast>(sess: &Session,
- krate: &ast::Crate,
+ krate: &'ast ast::Crate,
def_map: &DefMap,
ast_map: &ast_map::Map<'ast>) {
let mut visitor = CheckCrateVisitor {
sess: sess,
def_map: def_map,
- ast_map: ast_map
+ ast_map: ast_map,
+ discriminant_map: RefCell::new(NodeMap()),
};
visit::walk_crate(&mut visitor, krate);
sess.abort_if_errors();
sess: &'a Session,
ast_map: &'a ast_map::Map<'ast>,
def_map: &'a DefMap,
- idstack: Vec<ast::NodeId>
+ discriminant_map: &'a RefCell<NodeMap<Option<&'ast ast::Expr>>>,
+ idstack: Vec<ast::NodeId>,
}
impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> {
- fn new(v: &CheckCrateVisitor<'a, 'ast>, span: &'a Span)
+ fn new(v: &'a CheckCrateVisitor<'a, 'ast>, span: &'a Span)
-> CheckItemRecursionVisitor<'a, 'ast> {
CheckItemRecursionVisitor {
root_span: span,
sess: v.sess,
ast_map: v.ast_map,
def_map: v.def_map,
- idstack: Vec::new()
+ discriminant_map: &v.discriminant_map,
+ idstack: Vec::new(),
}
}
fn with_item_id_pushed<F>(&mut self, id: ast::NodeId, f: F)
where F: Fn(&mut Self) {
- if self.idstack.iter().any(|x| x == &(id)) {
- span_err!(self.sess, *self.root_span, E0265, "recursive constant");
+ if self.idstack.iter().any(|&x| x == id) {
+ let any_static = self.idstack.iter().any(|&x| {
+ if let ast_map::NodeItem(item) = self.ast_map.get(x) {
+ if let ast::ItemStatic(..) = item.node {
+ true
+ } else {
+ false
+ }
+ } else {
+ false
+ }
+ });
+ if any_static {
+ if !self.sess.features.borrow().static_recursion {
+ emit_feature_err(&self.sess.parse_sess.span_diagnostic,
+ "static_recursion",
+ *self.root_span, "recursive static");
+ }
+ } else {
+ span_err!(self.sess, *self.root_span, E0265, "recursive constant");
+ }
return;
}
self.idstack.push(id);
f(self);
self.idstack.pop();
}
+ // If a variant has an expression specifying its discriminant, then it needs
+ // to be checked just like a static or constant. However, if there are more
+ // variants with no explicitly specified discriminant, those variants will
+ // increment the same expression to get their values.
+ //
+ // So for every variant, we need to track whether there is an expression
+ // somewhere in the enum definition that controls its discriminant. We do
+ // this by starting from the end and searching backward.
+ fn populate_enum_discriminants(&self, enum_definition: &'ast ast::EnumDef) {
+ // Get the map, and return if we already processed this enum or if it
+ // has no variants.
+ let mut discriminant_map = self.discriminant_map.borrow_mut();
+ match enum_definition.variants.first() {
+ None => { return; }
+ Some(variant) if discriminant_map.contains_key(&variant.node.id) => {
+ return;
+ }
+ _ => {}
+ }
+
+ // Go through all the variants.
+ let mut variant_stack: Vec<ast::NodeId> = Vec::new();
+ for variant in enum_definition.variants.iter().rev() {
+ variant_stack.push(variant.node.id);
+ // When we find an expression, every variant currently on the stack
+ // is affected by that expression.
+ if let Some(ref expr) = variant.node.disr_expr {
+ for id in &variant_stack {
+ discriminant_map.insert(*id, Some(expr));
+ }
+ variant_stack.clear()
+ }
+ }
+ // If we are at the top, that always starts at 0, so any variant on the
+ // stack has a default value and does not need to be checked.
+ for id in &variant_stack {
+ discriminant_map.insert(*id, None);
+ }
+ }
}
-impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> {
- fn visit_item(&mut self, it: &ast::Item) {
+impl<'a, 'ast: 'a> Visitor<'ast> for CheckItemRecursionVisitor<'a, 'ast> {
+ fn visit_item(&mut self, it: &'ast ast::Item) {
self.with_item_id_pushed(it.id, |v| visit::walk_item(v, it));
}
- fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
+ fn visit_enum_def(&mut self, enum_definition: &'ast ast::EnumDef,
+ generics: &'ast ast::Generics) {
+ self.populate_enum_discriminants(enum_definition);
+ visit::walk_enum_def(self, enum_definition, generics);
+ }
+
+ fn visit_variant(&mut self, variant: &'ast ast::Variant,
+ _: &'ast ast::Generics) {
+ let variant_id = variant.node.id;
+ let maybe_expr;
+ if let Some(get_expr) = self.discriminant_map.borrow().get(&variant_id) {
+ // This is necessary because we need to let the `discriminant_map`
+ // borrow fall out of scope, so that we can reborrow farther down.
+ maybe_expr = (*get_expr).clone();
+ } else {
+ self.sess.span_bug(variant.span,
+ "`check_static_recursion` attempted to visit \
+ variant with unknown discriminant")
+ }
+ // If `maybe_expr` is `None`, that's because no discriminant is
+ // specified that affects this variant. Thus, no risk of recursion.
+ if let Some(expr) = maybe_expr {
+ self.with_item_id_pushed(expr.id, |v| visit::walk_expr(v, expr));
+ }
+ }
+
+ fn visit_trait_item(&mut self, ti: &'ast ast::TraitItem) {
self.with_item_id_pushed(ti.id, |v| visit::walk_trait_item(v, ti));
}
- fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
+ fn visit_impl_item(&mut self, ii: &'ast ast::ImplItem) {
self.with_item_id_pushed(ii.id, |v| visit::walk_impl_item(v, ii));
}
- fn visit_expr(&mut self, e: &ast::Expr) {
+ fn visit_expr(&mut self, e: &'ast ast::Expr) {
match e.node {
ast::ExprPath(..) => {
match self.def_map.borrow().get(&e.id).map(|d| d.base_def) {
Some(DefStatic(def_id, _)) |
- Some(DefAssociatedConst(def_id, _)) |
- Some(DefConst(def_id)) if
- ast_util::is_local(def_id) => {
+ Some(DefAssociatedConst(def_id)) |
+ Some(DefConst(def_id))
+ if ast_util::is_local(def_id) => {
match self.ast_map.get(def_id.node) {
ast_map::NodeItem(item) =>
self.visit_item(item),
self.visit_impl_item(item),
ast_map::NodeForeignItem(_) => {},
_ => {
- span_err!(self.sess, e.span, E0266,
- "expected item, found {}",
- self.ast_map.node_to_string(def_id.node));
- return;
- },
+ self.sess.span_bug(
+ e.span,
+ &format!("expected item, found {}",
+ self.ast_map.node_to_string(def_id.node)));
+ }
+ }
+ }
+ // For variants, we only want to check expressions that
+ // affect the specific variant used, but we need to check
+ // the whole enum definition to see what expression that
+ // might be (if any).
+ Some(DefVariant(enum_id, variant_id, false))
+ if ast_util::is_local(enum_id) => {
+ if let ast::ItemEnum(ref enum_def, ref generics) =
+ self.ast_map.expect_item(enum_id.local_id()).node {
+ self.populate_enum_discriminants(enum_def);
+ let variant = self.ast_map.expect_variant(variant_id.local_id());
+ self.visit_variant(variant, generics);
+ } else {
+ self.sess.span_bug(e.span,
+ "`check_static_recursion` found \
+ non-enum in DefVariant");
}
}
_ => ()
// except according to those terms.
#![allow(non_camel_case_types)]
-#![allow(unsigned_negation)]
use self::ConstVal::*;
-
use self::ErrKind::*;
+use self::EvalHint::*;
use ast_map;
use ast_map::blocks::FnLikeNode;
use syntax::ast::{self, Expr};
use syntax::ast_util;
use syntax::codemap::Span;
-use syntax::feature_gate;
use syntax::parse::token::InternedString;
use syntax::ptr::P;
use syntax::{codemap, visit};
let opt_def = tcx.def_map.borrow().get(&e.id).map(|d| d.full_def());
match opt_def {
Some(def::DefConst(def_id)) |
- Some(def::DefAssociatedConst(def_id, _)) => {
+ Some(def::DefAssociatedConst(def_id)) => {
lookup_const_by_id(tcx, def_id, Some(e.id))
}
Some(def::DefVariant(enum_def, variant_def, _)) => {
// `resolve_trait_associated_const` will select an impl
// or the default.
Some(ref_id) => {
- let trait_id = ty::trait_of_item(tcx, def_id)
+ let trait_id = tcx.trait_of_item(def_id)
.unwrap();
- let substs = ty::node_id_item_substs(tcx, ref_id)
+ let substs = tcx.node_id_item_substs(ref_id)
.substs;
resolve_trait_associated_const(tcx, ti, trait_id,
substs)
// a trait-associated const if the caller gives us
// the expression that refers to it.
Some(ref_id) => {
- let substs = ty::node_id_item_substs(tcx, ref_id)
+ let substs = tcx.node_id_item_substs(ref_id)
.substs;
resolve_trait_associated_const(tcx, ti, trait_id,
substs).map(|e| e.id)
Tuple(ast::NodeId),
}
+impl ConstVal {
+ pub fn description(&self) -> &'static str {
+ match *self {
+ Float(_) => "float",
+ Int(i) if i < 0 => "negative integer",
+ Int(_) => "positive integer",
+ Uint(_) => "unsigned integer",
+ Str(_) => "string literal",
+ Binary(_) => "binary array",
+ Bool(_) => "boolean",
+ Struct(_) => "struct",
+ Tuple(_) => "tuple",
+ }
+ }
+}
+
pub fn const_expr_to_pat(tcx: &ty::ctxt, expr: &Expr, span: Span) -> P<ast::Pat> {
let pat = match expr.node {
ast::ExprTup(ref exprs) =>
}
pub fn eval_const_expr(tcx: &ty::ctxt, e: &Expr) -> ConstVal {
- match eval_const_expr_partial(tcx, e, None) {
+ match eval_const_expr_partial(tcx, e, ExprTypeChecked) {
Ok(r) => r,
Err(s) => tcx.sess.span_fatal(s.span, &s.description())
}
InvalidOpForFloats(ast::BinOp_),
InvalidOpForIntUint(ast::BinOp_),
InvalidOpForUintInt(ast::BinOp_),
- NegateOnString,
- NegateOnBoolean,
- NegateOnBinary,
- NegateOnStruct,
- NegateOnTuple,
- NotOnFloat,
- NotOnString,
- NotOnBinary,
- NotOnStruct,
- NotOnTuple,
+ NegateOn(ConstVal),
+ NotOn(ConstVal),
NegateWithOverflow(i64),
AddiWithOverflow(i64, i64),
InvalidOpForFloats(_) => "can't do this op on floats".into_cow(),
InvalidOpForIntUint(..) => "can't do this op on an isize and usize".into_cow(),
InvalidOpForUintInt(..) => "can't do this op on a usize and isize".into_cow(),
- NegateOnString => "negate on string".into_cow(),
- NegateOnBoolean => "negate on boolean".into_cow(),
- NegateOnBinary => "negate on binary literal".into_cow(),
- NegateOnStruct => "negate on struct".into_cow(),
- NegateOnTuple => "negate on tuple".into_cow(),
- NotOnFloat => "not on float or string".into_cow(),
- NotOnString => "not on float or string".into_cow(),
- NotOnBinary => "not on binary literal".into_cow(),
- NotOnStruct => "not on struct".into_cow(),
- NotOnTuple => "not on tuple".into_cow(),
+ NegateOn(ref const_val) => format!("negate on {}", const_val.description()).into_cow(),
+ NotOn(ref const_val) => format!("not on {}", const_val.description()).into_cow(),
NegateWithOverflow(..) => "attempted to negate with overflow".into_cow(),
AddiWithOverflow(..) => "attempted to add with overflow".into_cow(),
pub type EvalResult = Result<ConstVal, ConstEvalErr>;
pub type CastResult = Result<ConstVal, ErrKind>;
+// FIXME: Long-term, this enum should go away: trying to evaluate
+// an expression which hasn't been type-checked is a recipe for
+// disaster. That said, it's not clear how to fix ast_ty_to_ty
+// to avoid the ordering issue.
+
+/// Hint to determine how to evaluate constant expressions which
+/// might not be type-checked.
+#[derive(Copy, Clone, Debug)]
+pub enum EvalHint<'tcx> {
+ /// We have a type-checked expression.
+ ExprTypeChecked,
+ /// We have an expression which hasn't been type-checked, but we have
+ /// an idea of what the type will be because of the context. For example,
+ /// the length of an array is always `usize`. (This is referred to as
+ /// a hint because it isn't guaranteed to be consistent with what
+ /// type-checking would compute.)
+ UncheckedExprHint(Ty<'tcx>),
+ /// We have an expression which has not yet been type-checked, and
+ /// and we have no clue what the type will be.
+ UncheckedExprNoHint,
+}
+
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum IntTy { I8, I16, I32, I64 }
#[derive(Copy, Clone, PartialEq, Debug)]
uint_shift_body overflowing_shr Uint ShiftRightWithOverflow
}}
-// After type checking, `eval_const_expr_partial` should always suffice. The
-// reason for providing `eval_const_expr_with_substs` is to allow
-// trait-associated consts to be evaluated *during* type checking, when the
-// substs for each expression have not been written into `tcx` yet.
+/// Evaluate a constant expression in a context where the expression isn't
+/// guaranteed to be evaluatable. `ty_hint` is usually ExprTypeChecked,
+/// but a few places need to evaluate constants during type-checking, like
+/// computing the length of an array. (See also the FIXME above EvalHint.)
pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
e: &Expr,
- ty_hint: Option<Ty<'tcx>>) -> EvalResult {
- eval_const_expr_with_substs(tcx, e, ty_hint, |id| {
- ty::node_id_item_substs(tcx, id).substs
- })
-}
-
-pub fn eval_const_expr_with_substs<'tcx, S>(tcx: &ty::ctxt<'tcx>,
- e: &Expr,
- ty_hint: Option<Ty<'tcx>>,
- get_substs: S) -> EvalResult
- where S: Fn(ast::NodeId) -> subst::Substs<'tcx> {
+ ty_hint: EvalHint<'tcx>) -> EvalResult {
fn fromb(b: bool) -> ConstVal { Int(b as i64) }
- let ety = ty_hint.or_else(|| ty::expr_ty_opt(tcx, e));
+ // Try to compute the type of the expression based on the EvalHint.
+ // (See also the definition of EvalHint, and the FIXME above EvalHint.)
+ let ety = match ty_hint {
+ ExprTypeChecked => {
+ // After type-checking, expr_ty is guaranteed to succeed.
+ Some(tcx.expr_ty(e))
+ }
+ UncheckedExprHint(ty) => {
+ // Use the type hint; it's not guaranteed to be right, but it's
+ // usually good enough.
+ Some(ty)
+ }
+ UncheckedExprNoHint => {
+ // This expression might not be type-checked, and we have no hint.
+ // Try to query the context for a type anyway; we might get lucky
+ // (for example, if the expression was imported from another crate).
+ tcx.expr_ty_opt(e)
+ }
+ };
// If type of expression itself is int or uint, normalize in these
// bindings so that isize/usize is mapped to a type with an
let result = match e.node {
ast::ExprUnary(ast::UnNeg, ref inner) => {
- match try!(eval_const_expr_partial(tcx, &**inner, ety)) {
+ match try!(eval_const_expr_partial(tcx, &**inner, ty_hint)) {
Float(f) => Float(-f),
Int(n) => try!(const_int_checked_neg(n, e, expr_int_type)),
Uint(i) => {
- if !tcx.sess.features.borrow().negate_unsigned {
- feature_gate::emit_feature_err(
- &tcx.sess.parse_sess.span_diagnostic,
- "negate_unsigned",
- e.span,
- "unary negation of unsigned integers may be removed in the future");
- }
try!(const_uint_checked_neg(i, e, expr_uint_type))
}
- Str(_) => signal!(e, NegateOnString),
- Bool(_) => signal!(e, NegateOnBoolean),
- Binary(_) => signal!(e, NegateOnBinary),
- Tuple(_) => signal!(e, NegateOnTuple),
- Struct(..) => signal!(e, NegateOnStruct),
+ const_val => signal!(e, NegateOn(const_val)),
}
}
ast::ExprUnary(ast::UnNot, ref inner) => {
- match try!(eval_const_expr_partial(tcx, &**inner, ety)) {
+ match try!(eval_const_expr_partial(tcx, &**inner, ty_hint)) {
Int(i) => Int(!i),
Uint(i) => const_uint_not(i, expr_uint_type),
Bool(b) => Bool(!b),
- Str(_) => signal!(e, NotOnString),
- Float(_) => signal!(e, NotOnFloat),
- Binary(_) => signal!(e, NotOnBinary),
- Tuple(_) => signal!(e, NotOnTuple),
- Struct(..) => signal!(e, NotOnStruct),
+ const_val => signal!(e, NotOn(const_val)),
}
}
ast::ExprBinary(op, ref a, ref b) => {
let b_ty = match op.node {
- ast::BiShl | ast::BiShr => Some(tcx.types.usize),
- _ => ety
+ ast::BiShl | ast::BiShr => {
+ if let ExprTypeChecked = ty_hint {
+ ExprTypeChecked
+ } else {
+ UncheckedExprHint(tcx.types.usize)
+ }
+ }
+ _ => ty_hint
};
- match (try!(eval_const_expr_partial(tcx, &**a, ety)),
+ match (try!(eval_const_expr_partial(tcx, &**a, ty_hint)),
try!(eval_const_expr_partial(tcx, &**b, b_ty))) {
(Float(a), Float(b)) => {
match op.node {
}
}
ast::ExprCast(ref base, ref target_ty) => {
- // This tends to get called w/o the type actually having been
- // populated in the ctxt, which was causing things to blow up
- // (#5900). Fall back to doing a limited lookup to get past it.
let ety = ety.or_else(|| ast_ty_to_prim_ty(tcx, &**target_ty))
.unwrap_or_else(|| {
tcx.sess.span_fatal(target_ty.span,
"target type not found for const cast")
});
- // Prefer known type to noop, but always have a type hint.
- //
- // FIXME (#23833): the type-hint can cause problems,
- // e.g. `(i8::MAX + 1_i8) as u32` feeds in `u32` as result
- // type to the sum, and thus no overflow is signaled.
- let base_hint = ty::expr_ty_opt(tcx, &**base).unwrap_or(ety);
- let val = try!(eval_const_expr_partial(tcx, &**base, Some(base_hint)));
+ let base_hint = if let ExprTypeChecked = ty_hint {
+ ExprTypeChecked
+ } else {
+ // FIXME (#23833): the type-hint can cause problems,
+ // e.g. `(i8::MAX + 1_i8) as u32` feeds in `u32` as result
+ // type to the sum, and thus no overflow is signaled.
+ match tcx.expr_ty_opt(&base) {
+ Some(t) => UncheckedExprHint(t),
+ None => ty_hint
+ }
+ };
+
+ let val = try!(eval_const_expr_partial(tcx, &**base, base_hint));
match cast_const(tcx, val, ety) {
Ok(val) => val,
Err(kind) => return Err(ConstEvalErr { span: e.span, kind: kind }),
(lookup_const_by_id(tcx, def_id, Some(e.id)), None)
}
}
- Some(def::DefAssociatedConst(def_id, provenance)) => {
+ Some(def::DefAssociatedConst(def_id)) => {
if ast_util::is_local(def_id) {
- match provenance {
- def::FromTrait(trait_id) => match tcx.map.find(def_id.node) {
+ match tcx.impl_or_trait_item(def_id).container() {
+ ty::TraitContainer(trait_id) => match tcx.map.find(def_id.node) {
Some(ast_map::NodeTraitItem(ti)) => match ti.node {
ast::ConstTraitItem(ref ty, _) => {
- let substs = get_substs(e.id);
- (resolve_trait_associated_const(tcx,
- ti,
- trait_id,
- substs),
- Some(&**ty))
+ if let ExprTypeChecked = ty_hint {
+ let substs = tcx.node_id_item_substs(e.id).substs;
+ (resolve_trait_associated_const(tcx,
+ ti,
+ trait_id,
+ substs),
+ Some(&**ty))
+ } else {
+ (None, None)
+ }
}
_ => (None, None)
},
_ => (None, None)
},
- def::FromImpl(_) => match tcx.map.find(def_id.node) {
+ ty::ImplContainer(_) => match tcx.map.find(def_id.node) {
Some(ast_map::NodeImplItem(ii)) => match ii.node {
ast::ConstImplItem(ref ty, ref expr) => {
(Some(&**expr), Some(&**ty))
Some(def::DefVariant(enum_def, variant_def, _)) => {
(lookup_variant_by_id(tcx, enum_def, variant_def), None)
}
+ Some(def::DefStruct(_)) => {
+ return Ok(ConstVal::Struct(e.id))
+ }
_ => (None, None)
};
let const_expr = match const_expr {
Some(actual_e) => actual_e,
None => signal!(e, NonConstPath)
};
- let ety = ety.or_else(|| const_ty.and_then(|ty| ast_ty_to_prim_ty(tcx, ty)));
- try!(eval_const_expr_partial(tcx, const_expr, ety))
+ let item_hint = if let UncheckedExprNoHint = ty_hint {
+ match const_ty {
+ Some(ty) => match ast_ty_to_prim_ty(tcx, ty) {
+ Some(ty) => UncheckedExprHint(ty),
+ None => UncheckedExprNoHint
+ },
+ None => UncheckedExprNoHint
+ }
+ } else {
+ ty_hint
+ };
+ try!(eval_const_expr_partial(tcx, const_expr, item_hint))
}
ast::ExprLit(ref lit) => {
lit_to_const(&**lit, ety)
}
- ast::ExprParen(ref e) => try!(eval_const_expr_partial(tcx, &**e, ety)),
+ ast::ExprParen(ref e) => try!(eval_const_expr_partial(tcx, &**e, ty_hint)),
ast::ExprBlock(ref block) => {
match block.expr {
- Some(ref expr) => try!(eval_const_expr_partial(tcx, &**expr, ety)),
+ Some(ref expr) => try!(eval_const_expr_partial(tcx, &**expr, ty_hint)),
None => Int(0)
}
}
ast::ExprTup(_) => Tuple(e.id),
ast::ExprStruct(..) => Struct(e.id),
ast::ExprTupField(ref base, index) => {
- if let Ok(c) = eval_const_expr_partial(tcx, base, None) {
+ let base_hint = if let ExprTypeChecked = ty_hint {
+ ExprTypeChecked
+ } else {
+ UncheckedExprNoHint
+ };
+ if let Ok(c) = eval_const_expr_partial(tcx, base, base_hint) {
if let Tuple(tup_id) = c {
if let ast::ExprTup(ref fields) = tcx.map.expect_expr(tup_id).node {
if index.node < fields.len() {
- return eval_const_expr_partial(tcx, &fields[index.node], None)
+ return eval_const_expr_partial(tcx, &fields[index.node], base_hint)
} else {
signal!(e, TupleIndexOutOfBounds);
}
}
ast::ExprField(ref base, field_name) => {
// Get the base expression if it is a struct and it is constant
- if let Ok(c) = eval_const_expr_partial(tcx, base, None) {
+ let base_hint = if let ExprTypeChecked = ty_hint {
+ ExprTypeChecked
+ } else {
+ UncheckedExprNoHint
+ };
+ if let Ok(c) = eval_const_expr_partial(tcx, base, base_hint) {
if let Struct(struct_id) = c {
if let ast::ExprStruct(_, ref fields, _) = tcx.map.expect_expr(struct_id).node {
// Check that the given field exists and evaluate it
- if let Some(f) = fields.iter().find(|f| f.ident.node.as_str()
- == field_name.node.as_str()) {
- return eval_const_expr_partial(tcx, &*f.expr, None)
+ // if the idents are compared run-pass/issue-19244 fails
+ if let Some(f) = fields.iter().find(|f| f.ident.node.name
+ == field_name.node.name) {
+ return eval_const_expr_partial(tcx, &*f.expr, base_hint)
} else {
signal!(e, MissingStructField);
}
let trait_ref = ty::Binder(ty::TraitRef { def_id: trait_id,
substs: trait_substs });
- ty::populate_implementations_for_trait_if_necessary(tcx, trait_ref.def_id());
- let infcx = infer::new_infer_ctxt(tcx);
+ tcx.populate_implementations_for_trait_if_necessary(trait_ref.def_id());
+ let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, false);
- let param_env = ty::empty_parameter_environment(tcx);
- let mut selcx = traits::SelectionContext::new(&infcx, ¶m_env);
+ let mut selcx = traits::SelectionContext::new(&infcx);
let obligation = traits::Obligation::new(traits::ObligationCause::dummy(),
trait_ref.to_poly_trait_predicate());
let selection = match selcx.select(&obligation) {
match selection {
traits::VtableImpl(ref impl_data) => {
- match ty::associated_consts(tcx, impl_data.impl_def_id)
+ match tcx.associated_consts(impl_data.impl_def_id)
.iter().find(|ic| ic.name == ti.ident.name) {
Some(ic) => lookup_const_by_id(tcx, ic.def_id, None),
None => match ti.node {
})
}
-pub fn compare_lit_exprs<'tcx, S>(tcx: &ty::ctxt<'tcx>,
- a: &Expr,
- b: &Expr,
- ty_hint: Option<Ty<'tcx>>,
- get_substs: S) -> Option<Ordering>
- where S: Fn(ast::NodeId) -> subst::Substs<'tcx> {
- let a = match eval_const_expr_with_substs(tcx, a, ty_hint,
- |id| {get_substs(id)}) {
+pub fn compare_lit_exprs<'tcx>(tcx: &ty::ctxt<'tcx>,
+ a: &Expr,
+ b: &Expr) -> Option<Ordering> {
+ let a = match eval_const_expr_partial(tcx, a, ExprTypeChecked) {
Ok(a) => a,
Err(e) => {
tcx.sess.span_err(a.span, &e.description());
return None;
}
};
- let b = match eval_const_expr_with_substs(tcx, b, ty_hint, get_substs) {
+ let b = match eval_const_expr_partial(tcx, b, ExprTypeChecked) {
Ok(b) => b,
Err(e) => {
tcx.sess.span_err(b.span, &e.description());
use middle::ty;
use std::io;
use std::usize;
-use std::iter::repeat;
use syntax::ast;
use syntax::ast_util::IdRange;
use syntax::visit;
let entry = if oper.initial_value() { usize::MAX } else {0};
- let zeroes: Vec<_> = repeat(0).take(num_nodes * words_per_id).collect();
- let gens: Vec<_> = zeroes.clone();
- let kills1: Vec<_> = zeroes.clone();
- let kills2: Vec<_> = zeroes;
- let on_entry: Vec<_> = repeat(entry).take(num_nodes * words_per_id).collect();
+ let zeroes = vec![0; num_nodes * words_per_id];
+ let gens = zeroes.clone();
+ let kills1 = zeroes.clone();
+ let kills2 = zeroes;
+ let on_entry = vec![entry; num_nodes * words_per_id];
let nodeid_to_index = build_nodeid_to_index(decl, cfg);
changed: true
};
- let mut temp: Vec<_> = repeat(0).take(words_per_id).collect();
+ let mut temp = vec![0; words_per_id];
while propcx.changed {
propcx.changed = false;
propcx.reset(&mut temp);
});
}
- fn lookup_and_handle_method(&mut self, id: ast::NodeId,
- span: codemap::Span) {
+ fn lookup_and_handle_method(&mut self, id: ast::NodeId) {
let method_call = ty::MethodCall::expr(id);
- match self.tcx.method_map.borrow().get(&method_call) {
- Some(method) => {
- match method.origin {
- ty::MethodStatic(def_id) => {
- match ty::provided_source(self.tcx, def_id) {
- Some(p_did) => self.check_def_id(p_did),
- None => self.check_def_id(def_id)
- }
- }
- ty::MethodStaticClosure(_) => {}
- ty::MethodTypeParam(ty::MethodParam {
- ref trait_ref,
- method_num: index,
- ..
- }) |
- ty::MethodTraitObject(ty::MethodObject {
- ref trait_ref,
- method_num: index,
- ..
- }) => {
- let trait_item = ty::trait_item(self.tcx,
- trait_ref.def_id,
- index);
- self.check_def_id(trait_item.def_id());
- }
- }
- }
- None => {
- self.tcx.sess.span_bug(span,
- "method call expression not \
- in method map?!")
- }
- }
+ let method = self.tcx.tables.borrow().method_map[&method_call];
+ self.check_def_id(method.def_id);
}
fn handle_field_access(&mut self, lhs: &ast::Expr, name: ast::Name) {
- match ty::expr_ty_adjusted(self.tcx, lhs).sty {
+ match self.tcx.expr_ty_adjusted(lhs).sty {
ty::TyStruct(id, _) => {
- let fields = ty::lookup_struct_fields(self.tcx, id);
+ let fields = self.tcx.lookup_struct_fields(id);
let field_id = fields.iter()
.find(|field| field.name == name).unwrap().id;
self.live_symbols.insert(field_id.node);
}
fn handle_tup_field_access(&mut self, lhs: &ast::Expr, idx: usize) {
- match ty::expr_ty_adjusted(self.tcx, lhs).sty {
+ match self.tcx.expr_ty_adjusted(lhs).sty {
ty::TyStruct(id, _) => {
- let fields = ty::lookup_struct_fields(self.tcx, id);
+ let fields = self.tcx.lookup_struct_fields(id);
let field_id = fields[idx].id;
self.live_symbols.insert(field_id.node);
},
let id = match self.tcx.def_map.borrow().get(&lhs.id).unwrap().full_def() {
def::DefVariant(_, id, _) => id,
_ => {
- match ty::ty_to_def_id(ty::node_id_to_type(self.tcx,
- lhs.id)) {
+ match self.tcx.node_id_to_type(lhs.id).ty_to_def_id() {
None => {
self.tcx.sess.span_bug(lhs.span,
"struct pattern wasn't of a \
}
}
};
- let fields = ty::lookup_struct_fields(self.tcx, id);
+ let fields = self.tcx.lookup_struct_fields(id);
for pat in pats {
if let ast::PatWild(ast::PatWildSingle) = pat.node.pat.node {
continue;
fn visit_expr(&mut self, expr: &ast::Expr) {
match expr.node {
ast::ExprMethodCall(..) => {
- self.lookup_and_handle_method(expr.id, expr.span);
+ self.lookup_and_handle_method(expr.id);
}
ast::ExprField(ref lhs, ref ident) => {
self.handle_field_access(&**lhs, ident.node.name);
fn should_warn_about_field(&mut self, node: &ast::StructField_) -> bool {
let is_named = node.ident().is_some();
- let field_type = ty::node_id_to_type(self.tcx, node.id);
- let is_marker_field = match ty::ty_to_def_id(field_type) {
+ let field_type = self.tcx.node_id_to_type(node.id);
+ let is_marker_field = match field_type.ty_to_def_id() {
Some(def_id) => self.tcx.lang_items.items().any(|(_, item)| *item == Some(def_id)),
_ => false
};
// except according to those terms.
pub use self::Def::*;
-pub use self::MethodProvenance::*;
use middle::privacy::LastPrivate;
use middle::subst::ParamSpace;
DefForeignMod(ast::DefId),
DefStatic(ast::DefId, bool /* is_mutbl */),
DefConst(ast::DefId),
- DefAssociatedConst(ast::DefId /* const */, MethodProvenance),
+ DefAssociatedConst(ast::DefId),
DefLocal(ast::NodeId),
DefVariant(ast::DefId /* enum */, ast::DefId /* variant */, bool /* is_structure */),
DefTy(ast::DefId, bool /* is_enum */),
DefStruct(ast::DefId),
DefRegion(ast::NodeId),
DefLabel(ast::NodeId),
- DefMethod(ast::DefId /* method */, MethodProvenance),
+ DefMethod(ast::DefId),
}
/// The result of resolving a path.
pub def_id: ast::DefId, // The definition of the target.
}
-#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum MethodProvenance {
- FromTrait(ast::DefId),
- FromImpl(ast::DefId),
-}
-
-impl MethodProvenance {
- pub fn map<F>(self, f: F) -> MethodProvenance where
- F: FnOnce(ast::DefId) -> ast::DefId,
- {
- match self {
- FromTrait(did) => FromTrait(f(did)),
- FromImpl(did) => FromImpl(f(did))
- }
- }
-}
-
impl Def {
pub fn local_node_id(&self) -> ast::NodeId {
let def_id = self.def_id();
DefFn(id, _) | DefMod(id) | DefForeignMod(id) | DefStatic(id, _) |
DefVariant(_, id, _) | DefTy(id, _) | DefAssociatedTy(_, id) |
DefTyParam(_, _, id, _) | DefUse(id) | DefStruct(id) | DefTrait(id) |
- DefMethod(id, _) | DefConst(id) | DefAssociatedConst(id, _) |
+ DefMethod(id) | DefConst(id) | DefAssociatedConst(id) |
DefSelfTy(Some(id), None)=> {
id
}
//! Enforces the Rust effect system. Currently there is just one effect,
//! `unsafe`.
-use self::UnsafeContext::*;
+use self::RootUnsafeContext::*;
use middle::def;
use middle::ty::{self, Ty};
use syntax::visit;
use syntax::visit::Visitor;
+#[derive(Copy, Clone)]
+struct UnsafeContext {
+ push_unsafe_count: usize,
+ root: RootUnsafeContext,
+}
+
+impl UnsafeContext {
+ fn new(root: RootUnsafeContext) -> UnsafeContext {
+ UnsafeContext { root: root, push_unsafe_count: 0 }
+ }
+}
+
#[derive(Copy, Clone, PartialEq)]
-enum UnsafeContext {
+enum RootUnsafeContext {
SafeContext,
UnsafeFn,
UnsafeBlock(ast::NodeId),
impl<'a, 'tcx> EffectCheckVisitor<'a, 'tcx> {
fn require_unsafe(&mut self, span: Span, description: &str) {
- match self.unsafe_context {
+ if self.unsafe_context.push_unsafe_count > 0 { return; }
+ match self.unsafe_context.root {
SafeContext => {
// Report an error.
span_err!(self.tcx.sess, span, E0133,
UnsafeFn => {}
}
}
-
- fn check_str_index(&mut self, e: &ast::Expr) {
- let base_type = match e.node {
- ast::ExprIndex(ref base, _) => ty::node_id_to_type(self.tcx, base.id),
- _ => return
- };
- debug!("effect: checking index with base type {:?}",
- base_type);
- match base_type.sty {
- ty::TyBox(ty) | ty::TyRef(_, ty::mt{ty, ..}) => if ty::TyStr == ty.sty {
- span_err!(self.tcx.sess, e.span, E0134,
- "modification of string types is not allowed");
- },
- ty::TyStr => {
- span_err!(self.tcx.sess, e.span, E0135,
- "modification of string types is not allowed");
- }
- _ => {}
- }
- }
}
impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> {
let old_unsafe_context = self.unsafe_context;
if is_unsafe_fn {
- self.unsafe_context = UnsafeFn
+ self.unsafe_context = UnsafeContext::new(UnsafeFn)
} else if is_item_fn {
- self.unsafe_context = SafeContext
+ self.unsafe_context = UnsafeContext::new(SafeContext)
}
visit::walk_fn(self, fn_kind, fn_decl, block, span);
// external blocks (e.g. `unsafe { println("") }`,
// expands to `unsafe { ... unsafe { ... } }` where
// the inner one is compiler generated).
- if self.unsafe_context == SafeContext || source == ast::CompilerGenerated {
- self.unsafe_context = UnsafeBlock(block.id)
+ if self.unsafe_context.root == SafeContext || source == ast::CompilerGenerated {
+ self.unsafe_context.root = UnsafeBlock(block.id)
}
}
+ ast::PushUnsafeBlock(..) => {
+ self.unsafe_context.push_unsafe_count =
+ self.unsafe_context.push_unsafe_count.checked_add(1).unwrap();
+ }
+ ast::PopUnsafeBlock(..) => {
+ self.unsafe_context.push_unsafe_count =
+ self.unsafe_context.push_unsafe_count.checked_sub(1).unwrap();
+ }
}
visit::walk_block(self, block);
match expr.node {
ast::ExprMethodCall(_, _, _) => {
let method_call = MethodCall::expr(expr.id);
- let base_type = self.tcx.method_map.borrow().get(&method_call).unwrap().ty;
+ let base_type = self.tcx.tables.borrow().method_map[&method_call].ty;
debug!("effect: method call case, base type is {:?}",
base_type);
if type_is_unsafe_function(base_type) {
}
}
ast::ExprCall(ref base, _) => {
- let base_type = ty::node_id_to_type(self.tcx, base.id);
+ let base_type = self.tcx.node_id_to_type(base.id);
debug!("effect: call case, base type is {:?}",
base_type);
if type_is_unsafe_function(base_type) {
}
}
ast::ExprUnary(ast::UnDeref, ref base) => {
- let base_type = ty::node_id_to_type(self.tcx, base.id);
+ let base_type = self.tcx.node_id_to_type(base.id);
debug!("effect: unary case, base type is {:?}",
base_type);
if let ty::TyRawPtr(_) = base_type.sty {
self.require_unsafe(expr.span, "dereference of raw pointer")
}
}
- ast::ExprAssign(ref base, _) | ast::ExprAssignOp(_, ref base, _) => {
- self.check_str_index(&**base);
- }
- ast::ExprAddrOf(ast::MutMutable, ref base) => {
- self.check_str_index(&**base);
- }
ast::ExprInlineAsm(..) => {
self.require_unsafe(expr.span, "use of inline assembly");
}
ast::ExprPath(..) => {
- if let def::DefStatic(_, true) = ty::resolve_expr(self.tcx, expr) {
+ if let def::DefStatic(_, true) = self.tcx.resolve_expr(expr) {
self.require_unsafe(expr.span, "use of mutable static");
}
}
pub fn check_crate(tcx: &ty::ctxt) {
let mut visitor = EffectCheckVisitor {
tcx: tcx,
- unsafe_context: SafeContext,
+ unsafe_context: UnsafeContext::new(SafeContext),
};
visit::walk_crate(&mut visitor, tcx.map.krate());
use self::OverloadedCallType::*;
use middle::{def, region, pat_util};
+use middle::infer;
use middle::mem_categorization as mc;
-use middle::mem_categorization::Typer;
-use middle::ty::{self};
-use middle::ty::{MethodCall, MethodObject, MethodTraitObject};
-use middle::ty::{MethodOrigin, MethodParam, MethodTypeParam};
-use middle::ty::{MethodStatic, MethodStaticClosure};
+use middle::ty;
use syntax::{ast, ast_util};
use syntax::ptr::P;
fn from_method_id(tcx: &ty::ctxt, method_id: ast::DefId)
-> OverloadedCallType {
- let method_descriptor = match ty::impl_or_trait_item(tcx, method_id) {
- ty::MethodTraitItem(ref method_descriptor) => {
- (*method_descriptor).clone()
- }
- _ => {
- tcx.sess.bug("overloaded call method wasn't in method map")
- }
- };
- let impl_id = match method_descriptor.container {
- ty::TraitContainer(_) => {
- tcx.sess.bug("statically resolved overloaded call method \
- belonged to a trait?!")
- }
- ty::ImplContainer(impl_id) => impl_id,
- };
- let trait_ref = match ty::impl_trait_ref(tcx, impl_id) {
- None => {
- tcx.sess.bug("statically resolved overloaded call impl \
- didn't implement a trait?!")
- }
- Some(ref trait_ref) => (*trait_ref).clone(),
- };
- OverloadedCallType::from_trait_id(tcx, trait_ref.def_id)
- }
-
- fn from_closure(tcx: &ty::ctxt, closure_did: ast::DefId)
- -> OverloadedCallType {
- let trait_did =
- tcx.closure_kinds
- .borrow()
- .get(&closure_did)
- .expect("OverloadedCallType::from_closure: didn't find closure id")
- .trait_did(tcx);
- OverloadedCallType::from_trait_id(tcx, trait_did)
- }
-
- fn from_method_origin(tcx: &ty::ctxt, origin: &MethodOrigin)
- -> OverloadedCallType {
- match *origin {
- MethodStatic(def_id) => {
- OverloadedCallType::from_method_id(tcx, def_id)
- }
- MethodStaticClosure(def_id) => {
- OverloadedCallType::from_closure(tcx, def_id)
- }
- MethodTypeParam(MethodParam { ref trait_ref, .. }) |
- MethodTraitObject(MethodObject { ref trait_ref, .. }) => {
- OverloadedCallType::from_trait_id(tcx, trait_ref.def_id)
- }
- }
+ let method = tcx.impl_or_trait_item(method_id);
+ OverloadedCallType::from_trait_id(tcx, method.container().id())
}
}
// supplies types from the tree. After type checking is complete, you
// can just use the tcx as the typer.
-pub struct ExprUseVisitor<'d,'t,'tcx:'t,TYPER:'t> {
- typer: &'t TYPER,
- mc: mc::MemCategorizationContext<'t,TYPER>,
+pub struct ExprUseVisitor<'d,'t,'a: 't, 'tcx:'a> {
+ typer: &'t infer::InferCtxt<'a, 'tcx>,
+ mc: mc::MemCategorizationContext<'t, 'a, 'tcx>,
delegate: &'d mut (Delegate<'tcx>+'d),
}
($inp: expr) => (
match $inp {
Ok(v) => v,
- Err(()) => return
+ Err(()) => {
+ debug!("mc reported err");
+ return
+ }
}
)
}
ByRef,
}
-impl<'d,'t,'tcx,TYPER:mc::Typer<'tcx>> ExprUseVisitor<'d,'t,'tcx,TYPER> {
+impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
pub fn new(delegate: &'d mut Delegate<'tcx>,
- typer: &'t TYPER)
- -> ExprUseVisitor<'d,'t,'tcx,TYPER> {
+ typer: &'t infer::InferCtxt<'a, 'tcx>)
+ -> ExprUseVisitor<'d,'t,'a, 'tcx> {
ExprUseVisitor {
typer: typer,
mc: mc::MemCategorizationContext::new(typer),
}
fn tcx(&self) -> &'t ty::ctxt<'tcx> {
- self.typer.tcx()
+ self.typer.tcx
}
fn delegate_consume(&mut self,
// make sure that the thing we are pointing out stays valid
// for the lifetime `scope_r` of the resulting ptr:
let expr_ty = return_if_err!(self.typer.node_ty(expr.id));
- let r = ty::ty_region(self.tcx(), expr.span, expr_ty);
- let bk = ty::BorrowKind::from_mutbl(m);
- self.borrow_expr(&**base, r, bk, AddrOf);
+ if let ty::TyRef(&r, _) = expr_ty.sty {
+ let bk = ty::BorrowKind::from_mutbl(m);
+ self.borrow_expr(&**base, r, bk, AddrOf);
+ }
}
ast::ExprInlineAsm(ref ia) => {
None => {}
}
self.consume_expr(&**base);
+ if place.is_some() {
+ self.tcx().sess.span_bug(
+ expr.span,
+ "box with explicit place remains after expansion");
+ }
}
ast::ExprMac(..) => {
ty::TyError => { }
_ => {
let overloaded_call_type =
- match self.typer.node_method_origin(MethodCall::expr(call.id)) {
- Some(method_origin) => {
- OverloadedCallType::from_method_origin(
- self.tcx(),
- &method_origin)
+ match self.typer.node_method_id(ty::MethodCall::expr(call.id)) {
+ Some(method_id) => {
+ OverloadedCallType::from_method_id(self.tcx(), method_id)
}
None => {
self.tcx().sess.span_bug(
match local.init {
None => {
let delegate = &mut self.delegate;
- pat_util::pat_bindings(&self.typer.tcx().def_map, &*local.pat,
+ pat_util::pat_bindings(&self.typer.tcx.def_map, &*local.pat,
|_, id, span, _| {
delegate.decl_without_init(id, span);
})
// expression that will actually be used
let with_fields = match with_cmt.ty.sty {
ty::TyStruct(did, substs) => {
- ty::struct_fields(self.tcx(), did, substs)
+ self.tcx().struct_fields(did, substs)
}
_ => {
// the base expression should always evaluate to a
// are properly handled.
self.walk_expr(with_expr);
- fn contains_field_named(field: &ty::field,
+ fn contains_field_named(field: &ty::Field,
fields: &Vec<ast::Field>)
-> bool
{
// process.
fn walk_adjustment(&mut self, expr: &ast::Expr) {
let typer = self.typer;
- if let Some(adjustment) = typer.adjustments().borrow().get(&expr.id) {
- match *adjustment {
+ //NOTE(@jroesch): mixed RefCell borrow causes crash
+ let adj = typer.adjustments().get(&expr.id).map(|x| x.clone());
+ if let Some(adjustment) = adj {
+ match adjustment {
ty::AdjustReifyFnPointer |
ty::AdjustUnsafeFnPointer => {
// Creating a closure/fn-pointer or unsizing consumes
// the method call infrastructure should have
// replaced all late-bound regions with variables:
- let self_ty = ty::ty_fn_sig(method_ty).input(0);
- let self_ty = ty::no_late_bound_regions(self.tcx(), &self_ty).unwrap();
+ let self_ty = method_ty.fn_sig().input(0);
+ let self_ty = self.tcx().no_late_bound_regions(&self_ty).unwrap();
let (m, r) = match self_ty.sty {
ty::TyRef(r, ref m) => (m.mutbl, r),
// This is always an rvalue, since we are producing a new
// (temporary) indirection.
- let adj_ty =
- ty::adjust_ty_for_autoref(self.tcx(),
- cmt_base_ty,
- opt_autoref);
+ let adj_ty = cmt_base_ty.adjust_for_autoref(self.tcx(), opt_autoref);
self.mc.cat_rvalue_node(expr.id, expr.span, adj_ty)
}
let delegate = &mut self.delegate;
return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |mc, cmt_pat, pat| {
if pat_util::pat_is_binding(def_map, pat) {
- let tcx = typer.tcx();
+ let tcx = typer.tcx;
debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}",
cmt_pat,
// It is also a borrow or copy/move of the value being matched.
match pat.node {
ast::PatIdent(ast::BindByRef(m), _, _) => {
- let (r, bk) = {
- (ty::ty_region(tcx, pat.span, pat_ty),
- ty::BorrowKind::from_mutbl(m))
- };
- delegate.borrow(pat.id, pat.span, cmt_pat,
- r, bk, RefBinding);
+ if let ty::TyRef(&r, _) = pat_ty.sty {
+ let bk = ty::BorrowKind::from_mutbl(m);
+ delegate.borrow(pat.id, pat.span, cmt_pat,
+ r, bk, RefBinding);
+ }
}
ast::PatIdent(ast::BindByValue(_), _, _) => {
let mode = copy_or_move(typer, &cmt_pat, PatBindingMove);
// the leaves of the pattern tree structure.
return_if_err!(mc.cat_pattern(cmt_discr, pat, |mc, cmt_pat, pat| {
let def_map = def_map.borrow();
- let tcx = typer.tcx();
+ let tcx = typer.tcx;
match pat.node {
ast::PatEnum(_, _) | ast::PatQPath(..) |
Some(def::DefVariant(enum_did, variant_did, _is_struct)) => {
let downcast_cmt =
- if ty::enum_is_univariant(tcx, enum_did) {
+ if tcx.enum_is_univariant(enum_did) {
cmt_pat
} else {
let cmt_pat_ty = cmt_pat.ty;
fn walk_captures(&mut self, closure_expr: &ast::Expr) {
debug!("walk_captures({:?})", closure_expr);
- ty::with_freevars(self.tcx(), closure_expr.id, |freevars| {
+ self.tcx().with_freevars(closure_expr.id, |freevars| {
for freevar in freevars {
let id_var = freevar.def.def_id().node;
let upvar_id = ty::UpvarId { var_id: id_var,
}
}
-fn copy_or_move<'tcx>(typer: &mc::Typer<'tcx>,
+fn copy_or_move<'a, 'tcx>(typer: &infer::InferCtxt<'a, 'tcx>,
cmt: &mc::cmt<'tcx>,
move_reason: MoveReason)
-> ConsumeMode
{
- if typer.type_moves_by_default(cmt.span, cmt.ty) {
+ if typer.type_moves_by_default(cmt.ty, cmt.span) {
Move(move_reason)
} else {
Copy
}
ty::TyBox(_) => {
// treat like we would treat `Box`
- let def_id = tcx.lang_items.owned_box().unwrap();
- Some(StructSimplifiedType(def_id))
+ match tcx.lang_items.require_owned_box() {
+ Ok(def_id) => Some(StructSimplifiedType(def_id)),
+ Err(msg) => tcx.sess.fatal(&msg),
+ }
}
ty::TyClosure(def_id, _) => {
Some(ClosureSimplifiedType(def_id))
use middle::implicator::Implication;
use middle::ty::{self, FreeRegion};
use util::common::can_reach;
-use util::nodemap::FnvHashMap;
+use util::nodemap::{FnvHashMap, FnvHashSet};
#[derive(Clone)]
pub struct FreeRegionMap {
- /// `free_region_map` maps from a free region `a` to a list of
+ /// `map` maps from a free region `a` to a list of
/// free regions `bs` such that `a <= b for all b in bs`
map: FnvHashMap<FreeRegion, Vec<FreeRegion>>,
+ /// regions that are required to outlive (and therefore be
+ /// equal to) 'static.
+ statics: FnvHashSet<FreeRegion>
}
impl FreeRegionMap {
pub fn new() -> FreeRegionMap {
- FreeRegionMap { map: FnvHashMap() }
+ FreeRegionMap { map: FnvHashMap(), statics: FnvHashSet() }
}
pub fn relate_free_regions_from_implications<'tcx>(&mut self,
self.relate_free_regions(free_a, free_b);
}
Implication::RegionSubRegion(..) |
- Implication::RegionSubClosure(..) |
Implication::RegionSubGeneric(..) |
Implication::Predicate(..) => {
}
}
ty::Predicate::RegionOutlives(ty::Binder(ty::OutlivesPredicate(r_a, r_b))) => {
match (r_a, r_b) {
+ (ty::ReStatic, ty::ReFree(_)) => {},
+ (ty::ReFree(fr_a), ty::ReStatic) => self.relate_to_static(fr_a),
(ty::ReFree(fr_a), ty::ReFree(fr_b)) => {
// Record that `'a:'b`. Or, put another way, `'b <= 'a`.
self.relate_free_regions(fr_b, fr_a);
}
}
- pub fn relate_free_regions(&mut self, sub: FreeRegion, sup: FreeRegion) {
- let mut sups = self.map.entry(sub).or_insert(Vec::new());
+ fn relate_to_static(&mut self, sup: FreeRegion) {
+ self.statics.insert(sup);
+ }
+
+ fn relate_free_regions(&mut self, sub: FreeRegion, sup: FreeRegion) {
+ let mut sups = self.map.entry(sub).or_insert(Vec::new());
if !sups.contains(&sup) {
sups.push(sup);
}
/// it is possible that `sub != sup` and `sub <= sup` and `sup <= sub`
/// (that is, the user can give two different names to the same lifetime).
pub fn sub_free_region(&self, sub: FreeRegion, sup: FreeRegion) -> bool {
- can_reach(&self.map, sub, sup)
+ can_reach(&self.map, sub, sup) || self.is_static(&sup)
}
/// Determines whether one region is a subregion of another. This is intended to run *after
(ty::ReFree(sub_fr), ty::ReFree(super_fr)) =>
self.sub_free_region(sub_fr, super_fr),
+ (ty::ReStatic, ty::ReFree(ref sup_fr)) => self.is_static(sup_fr),
+
_ =>
false,
}
}
}
-}
+ /// Determines whether this free-region is required to be 'static
+ pub fn is_static(&self, super_region: &ty::FreeRegion) -> bool {
+ debug!("is_static(super_region={:?})", super_region);
+ self.statics.iter().any(|s| can_reach(&self.map, *s, *super_region))
+ }
+}
use middle::infer::{InferCtxt, GenericKind};
use middle::subst::Substs;
use middle::traits;
-use middle::ty::{self, RegionEscape, ToPolyTraitRef, AsPredicate, Ty};
+use middle::ty::{self, RegionEscape, ToPolyTraitRef, ToPredicate, Ty};
use middle::ty_fold::{TypeFoldable, TypeFolder};
use syntax::ast;
pub enum Implication<'tcx> {
RegionSubRegion(Option<Ty<'tcx>>, ty::Region, ty::Region),
RegionSubGeneric(Option<Ty<'tcx>>, ty::Region, GenericKind<'tcx>),
- RegionSubClosure(Option<Ty<'tcx>>, ty::Region, ast::DefId, &'tcx Substs<'tcx>),
Predicate(ast::DefId, ty::Predicate<'tcx>),
}
struct Implicator<'a, 'tcx: 'a> {
infcx: &'a InferCtxt<'a,'tcx>,
- closure_typer: &'a (ty::ClosureTyper<'tcx>+'a),
body_id: ast::NodeId,
stack: Vec<(ty::Region, Option<Ty<'tcx>>)>,
span: Span,
/// appear in a context with lifetime `outer_region`
pub fn implications<'a,'tcx>(
infcx: &'a InferCtxt<'a,'tcx>,
- closure_typer: &ty::ClosureTyper<'tcx>,
body_id: ast::NodeId,
ty: Ty<'tcx>,
outer_region: ty::Region,
let mut stack = Vec::new();
stack.push((outer_region, None));
- let mut wf = Implicator { closure_typer: closure_typer,
- infcx: infcx,
+ let mut wf = Implicator { infcx: infcx,
body_id: body_id,
span: span,
stack: stack,
// No borrowed content reachable here.
}
- ty::TyClosure(def_id, substs) => {
- let &(r_a, opt_ty) = self.stack.last().unwrap();
- self.out.push(Implication::RegionSubClosure(opt_ty, r_a, def_id, substs));
+ ty::TyClosure(_, ref substs) => {
+ // FIXME(#27086). We do not accumulate from substs, since they
+ // don't represent reachable data. This means that, in
+ // practice, some of the lifetime parameters might not
+ // be in scope when the body runs, so long as there is
+ // no reachable data with that lifetime. For better or
+ // worse, this is consistent with fn types, however,
+ // which can also encapsulate data in this fashion
+ // (though it's somewhat harder, and typically
+ // requires virtual dispatch).
+ //
+ // Note that changing this (in a naive way, at least)
+ // causes regressions for what appears to be perfectly
+ // reasonable code like this:
+ //
+ // ```
+ // fn foo<'a>(p: &Data<'a>) {
+ // bar(|q: &mut Parser| q.read_addr())
+ // }
+ // fn bar(p: Box<FnMut(&mut Parser)+'static>) {
+ // }
+ // ```
+ //
+ // Note that `p` (and `'a`) are not used in the
+ // closure at all, but to meet the requirement that
+ // the closure type `C: 'static` (so it can be coerced
+ // to the object type), we get the requirement that
+ // `'a: 'static` since `'a` appears in the closure
+ // type `C`.
+ //
+ // A smarter fix might "prune" unused `func_substs` --
+ // this would avoid breaking simple examples like
+ // this, but would still break others (which might
+ // indeed be invalid, depending on your POV). Pruning
+ // would be a subtle process, since we have to see
+ // what func/type parameters are used and unused,
+ // taking into consideration UFCS and so forth.
+
+ for &upvar_ty in &substs.upvar_tys {
+ self.accumulate_from_ty(upvar_ty);
+ }
}
ty::TyTrait(ref t) => {
ty::TyEnum(def_id, substs) |
ty::TyStruct(def_id, substs) => {
- let item_scheme = ty::lookup_item_type(self.tcx(), def_id);
+ let item_scheme = self.tcx().lookup_item_type(def_id);
self.accumulate_from_adt(ty, def_id, &item_scheme.generics, substs)
}
ty::TyArray(t, _) |
ty::TySlice(t) |
- ty::TyRawPtr(ty::mt { ty: t, .. }) |
+ ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) |
ty::TyBox(t) => {
self.accumulate_from_ty(t)
}
substs: &Substs<'tcx>)
{
let predicates =
- ty::lookup_predicates(self.tcx(), def_id).instantiate(self.tcx(), substs);
+ self.tcx().lookup_predicates(def_id).instantiate(self.tcx(), substs);
let predicates = match self.fully_normalize(&predicates) {
Ok(predicates) => predicates,
Err(ErrorReported) => { return; }
ty::Predicate::Equate(..) => { }
ty::Predicate::Projection(..) => { }
ty::Predicate::RegionOutlives(ref data) => {
- match ty::no_late_bound_regions(self.tcx(), data) {
+ match self.tcx().no_late_bound_regions(data) {
None => { }
Some(ty::OutlivesPredicate(r_a, r_b)) => {
self.push_sub_region_constraint(Some(ty), r_b, r_a);
}
}
ty::Predicate::TypeOutlives(ref data) => {
- match ty::no_late_bound_regions(self.tcx(), data) {
+ match self.tcx().no_late_bound_regions(data) {
None => { }
Some(ty::OutlivesPredicate(ty_a, r_b)) => {
self.stack.push((r_b, Some(ty)));
.map(|pred| Implication::Predicate(def_id, pred));
self.out.extend(obligations);
- let variances = ty::item_variances(self.tcx(), def_id);
+ let variances = self.tcx().item_variances(def_id);
+ self.accumulate_from_substs(substs, Some(&variances));
+ }
+
+ fn accumulate_from_substs(&mut self,
+ substs: &Substs<'tcx>,
+ variances: Option<&ty::ItemVariances>)
+ {
+ let mut tmp_variances = None;
+ let variances = variances.unwrap_or_else(|| {
+ tmp_variances = Some(ty::ItemVariances {
+ types: substs.types.map(|_| ty::Variance::Invariant),
+ regions: substs.regions().map(|_| ty::Variance::Invariant),
+ });
+ tmp_variances.as_ref().unwrap()
+ });
for (®ion, &variance) in substs.regions().iter().zip(&variances.regions) {
match variance {
data);
for poly_trait_ref in traits::supertraits(self.tcx(), data.to_poly_trait_ref()) {
- match ty::no_late_bound_regions(self.tcx(), &poly_trait_ref) {
+ match self.tcx().no_late_bound_regions(&poly_trait_ref) {
Some(trait_ref) => { self.accumulate_from_assoc_types(trait_ref); }
None => { }
}
trait_ref);
let trait_def_id = trait_ref.def_id;
- let trait_def = ty::lookup_trait_def(self.tcx(), trait_def_id);
+ let trait_def = self.tcx().lookup_trait_def(trait_def_id);
let assoc_type_projections: Vec<_> =
trait_def.associated_type_names
.iter()
- .map(|&name| ty::mk_projection(self.tcx(), trait_ref.clone(), name))
+ .map(|&name| self.tcx().mk_projection(trait_ref.clone(), name))
.collect();
debug!("accumulate_from_assoc_types: assoc_type_projections={:?}",
assoc_type_projections);
}
fn fully_normalize<T>(&self, value: &T) -> Result<T,ErrorReported>
- where T : TypeFoldable<'tcx> + ty::HasProjectionTypes
+ where T : TypeFoldable<'tcx> + ty::HasTypeFlags
{
let value =
traits::fully_normalize(self.infcx,
- self.closure_typer,
traits::ObligationCause::misc(self.span, self.body_id),
value);
match value {
// Since we don't actually *know* the self type for an object,
// this "open(err)" serves as a kind of dummy standin -- basically
// a skolemized type.
- let open_ty = ty::mk_infer(tcx, ty::FreshTy(0));
+ let open_ty = tcx.mk_infer(ty::FreshTy(0));
// Note that we preserve the overall binding levels here.
assert!(!open_ty.has_escaping_regions());
let trait_refs = vec!(ty::Binder(ty::TraitRef::new(principal.0.def_id, substs)));
let mut predicates = others.to_predicates(tcx, open_ty);
- predicates.extend(trait_refs.iter().map(|t| t.as_predicate()));
+ predicates.extend(trait_refs.iter().map(|t| t.to_predicate()));
- ty::required_region_bounds(tcx, open_ty, predicates)
+ tcx.required_region_bounds(open_ty, predicates)
}
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
- fn will_change(&mut self, _: bool, _: bool) -> bool {
- // since we are not comparing regions, we don't care
- false
- }
-
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
variance: ty::Variance,
a: &T,
-> RelateResult<'tcx, ty::Binder<T>>
where T: Relate<'a,'tcx>
{
- let a1 = ty::erase_late_bound_regions(self.tcx(), a);
- let b1 = ty::erase_late_bound_regions(self.tcx(), b);
+ let a1 = self.tcx().erase_late_bound_regions(a);
+ let b1 = self.tcx().erase_late_bound_regions(b);
let c = try!(self.relate(&a1, &b1));
Ok(ty::Binder(c))
}
use middle::ty::{TyVar};
use middle::ty::{IntType, UintType};
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, TypeError};
use middle::ty_fold;
use middle::ty_fold::{TypeFolder, TypeFoldable};
use middle::ty_relate::{self, Relate, RelateResult, TypeRelation};
// All other cases of inference are errors
(&ty::TyInfer(_), _) |
(_, &ty::TyInfer(_)) => {
- Err(ty::terr_sorts(ty_relate::expected_found(relation, &a, &b)))
+ Err(TypeError::Sorts(ty_relate::expected_found(relation, &a, &b)))
}
.unify_var_value(vid, val)
.map_err(|e| int_unification_error(vid_is_expected, e)));
match val {
- IntType(v) => Ok(ty::mk_mach_int(infcx.tcx, v)),
- UintType(v) => Ok(ty::mk_mach_uint(infcx.tcx, v)),
+ IntType(v) => Ok(infcx.tcx.mk_mach_int(v)),
+ UintType(v) => Ok(infcx.tcx.mk_mach_uint(v)),
}
}
.borrow_mut()
.unify_var_value(vid, val)
.map_err(|e| float_unification_error(vid_is_expected, e)));
- Ok(ty::mk_mach_float(infcx.tcx, val))
+ Ok(infcx.tcx.mk_mach_float(val))
}
impl<'a, 'tcx> CombineFields<'a, 'tcx> {
};
let u = ty.fold_with(&mut generalize);
if generalize.cycle_detected {
- Err(ty::terr_cyclic_ty)
+ Err(TypeError::CyclicTy)
} else {
Ok(u)
}
pub trait RelateResultCompare<'tcx, T> {
fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
- F: FnOnce() -> ty::type_err<'tcx>;
+ F: FnOnce() -> ty::TypeError<'tcx>;
}
impl<'tcx, T:Clone + PartialEq> RelateResultCompare<'tcx, T> for RelateResult<'tcx, T> {
fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
- F: FnOnce() -> ty::type_err<'tcx>,
+ F: FnOnce() -> ty::TypeError<'tcx>,
{
self.clone().and_then(|s| {
if s == t {
}
fn int_unification_error<'tcx>(a_is_expected: bool, v: (ty::IntVarValue, ty::IntVarValue))
- -> ty::type_err<'tcx>
+ -> ty::TypeError<'tcx>
{
let (a, b) = v;
- ty::terr_int_mismatch(ty_relate::expected_found_bool(a_is_expected, &a, &b))
+ TypeError::IntMismatch(ty_relate::expected_found_bool(a_is_expected, &a, &b))
}
fn float_unification_error<'tcx>(a_is_expected: bool,
v: (ast::FloatTy, ast::FloatTy))
- -> ty::type_err<'tcx>
+ -> ty::TypeError<'tcx>
{
let (a, b) = v;
- ty::terr_float_mismatch(ty_relate::expected_found_bool(a_is_expected, &a, &b))
+ TypeError::FloatMismatch(ty_relate::expected_found_bool(a_is_expected, &a, &b))
}
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
- fn will_change(&mut self, a: bool, b: bool) -> bool {
- // if either side changed from what it was, that could cause equality to fail
- a || b
- }
-
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
_: ty::Variance,
a: &T,
use middle::infer;
use middle::region;
use middle::subst;
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, TypeError, HasTypeFlags};
use middle::ty::{Region, ReFree};
use std::cell::{Cell, RefCell};
use syntax::print::pprust;
use syntax::ptr::P;
-pub fn note_and_explain_region(tcx: &ty::ctxt,
- prefix: &str,
- region: ty::Region,
- suffix: &str) {
- fn item_scope_tag(item: &ast::Item) -> &'static str {
- match item.node {
- ast::ItemImpl(..) => "impl",
- ast::ItemStruct(..) => "struct",
- ast::ItemEnum(..) => "enum",
- ast::ItemTrait(..) => "trait",
- ast::ItemFn(..) => "function body",
- _ => "item"
+impl<'tcx> ty::ctxt<'tcx> {
+ pub fn note_and_explain_region(&self,
+ prefix: &str,
+ region: ty::Region,
+ suffix: &str) {
+ fn item_scope_tag(item: &ast::Item) -> &'static str {
+ match item.node {
+ ast::ItemImpl(..) => "impl",
+ ast::ItemStruct(..) => "struct",
+ ast::ItemEnum(..) => "enum",
+ ast::ItemTrait(..) => "trait",
+ ast::ItemFn(..) => "function body",
+ _ => "item"
+ }
}
- }
-
- fn explain_span(tcx: &ty::ctxt, heading: &str, span: Span)
- -> (String, Option<Span>) {
- let lo = tcx.sess.codemap().lookup_char_pos_adj(span.lo);
- (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize()),
- Some(span))
- }
- let (description, span) = match region {
- ty::ReScope(scope) => {
- let new_string;
- let unknown_scope = || {
- format!("{}unknown scope: {:?}{}. Please report a bug.",
- prefix, scope, suffix)
- };
- let span = match scope.span(&tcx.map) {
- Some(s) => s,
- None => return tcx.sess.note(&unknown_scope())
- };
- let tag = match tcx.map.find(scope.node_id()) {
- Some(ast_map::NodeBlock(_)) => "block",
- Some(ast_map::NodeExpr(expr)) => match expr.node {
- ast::ExprCall(..) => "call",
- ast::ExprMethodCall(..) => "method call",
- ast::ExprMatch(_, _, ast::MatchSource::IfLetDesugar { .. }) => "if let",
- ast::ExprMatch(_, _, ast::MatchSource::WhileLetDesugar) => "while let",
- ast::ExprMatch(_, _, ast::MatchSource::ForLoopDesugar) => "for",
- ast::ExprMatch(..) => "match",
- _ => "expression",
- },
- Some(ast_map::NodeStmt(_)) => "statement",
- Some(ast_map::NodeItem(it)) => item_scope_tag(&*it),
- Some(_) | None => {
- return tcx.sess.span_note(span, &unknown_scope());
- }
- };
- let scope_decorated_tag = match scope {
- region::CodeExtent::Misc(_) => tag,
- region::CodeExtent::ParameterScope { .. } => {
- "scope of parameters for function"
- }
- region::CodeExtent::DestructionScope(_) => {
- new_string = format!("destruction scope surrounding {}", tag);
- &new_string[..]
- }
- region::CodeExtent::Remainder(r) => {
- new_string = format!("block suffix following statement {}",
- r.first_statement_index);
- &new_string[..]
- }
- };
- explain_span(tcx, scope_decorated_tag, span)
+ fn explain_span(tcx: &ty::ctxt, heading: &str, span: Span)
+ -> (String, Option<Span>) {
+ let lo = tcx.sess.codemap().lookup_char_pos_adj(span.lo);
+ (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize()),
+ Some(span))
}
- ty::ReFree(ref fr) => {
- let prefix = match fr.bound_region {
- ty::BrAnon(idx) => {
- format!("the anonymous lifetime #{} defined on", idx + 1)
- }
- ty::BrFresh(_) => "an anonymous lifetime defined on".to_owned(),
- _ => {
- format!("the lifetime {} as defined on",
- fr.bound_region)
- }
- };
+ let (description, span) = match region {
+ ty::ReScope(scope) => {
+ let new_string;
+ let unknown_scope = || {
+ format!("{}unknown scope: {:?}{}. Please report a bug.",
+ prefix, scope, suffix)
+ };
+ let span = match scope.span(&self.map) {
+ Some(s) => s,
+ None => return self.sess.note(&unknown_scope())
+ };
+ let tag = match self.map.find(scope.node_id()) {
+ Some(ast_map::NodeBlock(_)) => "block",
+ Some(ast_map::NodeExpr(expr)) => match expr.node {
+ ast::ExprCall(..) => "call",
+ ast::ExprMethodCall(..) => "method call",
+ ast::ExprMatch(_, _, ast::MatchSource::IfLetDesugar { .. }) => "if let",
+ ast::ExprMatch(_, _, ast::MatchSource::WhileLetDesugar) => "while let",
+ ast::ExprMatch(_, _, ast::MatchSource::ForLoopDesugar) => "for",
+ ast::ExprMatch(..) => "match",
+ _ => "expression",
+ },
+ Some(ast_map::NodeStmt(_)) => "statement",
+ Some(ast_map::NodeItem(it)) => item_scope_tag(&*it),
+ Some(_) | None => {
+ return self.sess.span_note(span, &unknown_scope());
+ }
+ };
+ let scope_decorated_tag = match scope {
+ region::CodeExtent::Misc(_) => tag,
+ region::CodeExtent::ParameterScope { .. } => {
+ "scope of parameters for function"
+ }
+ region::CodeExtent::DestructionScope(_) => {
+ new_string = format!("destruction scope surrounding {}", tag);
+ &new_string[..]
+ }
+ region::CodeExtent::Remainder(r) => {
+ new_string = format!("block suffix following statement {}",
+ r.first_statement_index);
+ &new_string[..]
+ }
+ };
+ explain_span(self, scope_decorated_tag, span)
+ }
- match tcx.map.find(fr.scope.node_id) {
- Some(ast_map::NodeBlock(ref blk)) => {
- let (msg, opt_span) = explain_span(tcx, "block", blk.span);
- (format!("{} {}", prefix, msg), opt_span)
- }
- Some(ast_map::NodeItem(it)) => {
- let tag = item_scope_tag(&*it);
- let (msg, opt_span) = explain_span(tcx, tag, it.span);
- (format!("{} {}", prefix, msg), opt_span)
- }
- Some(_) | None => {
- // this really should not happen
- (format!("{} unknown free region bounded by scope {:?}",
- prefix, fr.scope), None)
+ ty::ReFree(ref fr) => {
+ let prefix = match fr.bound_region {
+ ty::BrAnon(idx) => {
+ format!("the anonymous lifetime #{} defined on", idx + 1)
+ }
+ ty::BrFresh(_) => "an anonymous lifetime defined on".to_owned(),
+ _ => {
+ format!("the lifetime {} as defined on",
+ fr.bound_region)
+ }
+ };
+
+ match self.map.find(fr.scope.node_id) {
+ Some(ast_map::NodeBlock(ref blk)) => {
+ let (msg, opt_span) = explain_span(self, "block", blk.span);
+ (format!("{} {}", prefix, msg), opt_span)
+ }
+ Some(ast_map::NodeItem(it)) => {
+ let tag = item_scope_tag(&*it);
+ let (msg, opt_span) = explain_span(self, tag, it.span);
+ (format!("{} {}", prefix, msg), opt_span)
+ }
+ Some(_) | None => {
+ // this really should not happen
+ (format!("{} unknown free region bounded by scope {:?}",
+ prefix, fr.scope), None)
+ }
}
}
- }
- ty::ReStatic => ("the static lifetime".to_owned(), None),
+ ty::ReStatic => ("the static lifetime".to_owned(), None),
- ty::ReEmpty => ("the empty lifetime".to_owned(), None),
+ ty::ReEmpty => ("the empty lifetime".to_owned(), None),
- ty::ReEarlyBound(ref data) => {
- (format!("{}", token::get_name(data.name)), None)
- }
+ ty::ReEarlyBound(ref data) => (data.name.to_string(), None),
- // I believe these cases should not occur (except when debugging,
- // perhaps)
- ty::ReInfer(_) | ty::ReLateBound(..) => {
- (format!("lifetime {:?}", region), None)
+ // I believe these cases should not occur (except when debugging,
+ // perhaps)
+ ty::ReInfer(_) | ty::ReLateBound(..) => {
+ (format!("lifetime {:?}", region), None)
+ }
+ };
+ let message = format!("{}{}{}", prefix, description, suffix);
+ if let Some(span) = span {
+ self.sess.span_note(span, &message);
+ } else {
+ self.sess.note(&message);
}
- };
- let message = format!("{}{}{}", prefix, description, suffix);
- if let Some(span) = span {
- tcx.sess.span_note(span, &message);
- } else {
- tcx.sess.note(&message);
}
}
fn process_errors(&self, errors: &Vec<RegionResolutionError<'tcx>>)
-> Vec<RegionResolutionError<'tcx>>;
- fn report_type_error(&self, trace: TypeTrace<'tcx>, terr: &ty::type_err<'tcx>);
+ fn report_type_error(&self, trace: TypeTrace<'tcx>, terr: &ty::TypeError<'tcx>);
fn report_and_explain_type_error(&self,
trace: TypeTrace<'tcx>,
- terr: &ty::type_err<'tcx>);
+ terr: &ty::TypeError<'tcx>);
fn values_str(&self, values: &ValuePairs<'tcx>) -> Option<String>;
- fn expected_found_str<T: fmt::Display + Resolvable<'tcx>>(
+ fn expected_found_str<T: fmt::Display + Resolvable<'tcx> + HasTypeFlags>(
&self,
- exp_found: &ty::expected_found<T>)
+ exp_found: &ty::ExpectedFound<T>)
-> Option<String>;
fn report_concrete_failure(&self,
fn report_processed_errors(&self,
var_origin: &[RegionVariableOrigin],
- trace_origin: &[(TypeTrace<'tcx>, ty::type_err<'tcx>)],
+ trace_origin: &[(TypeTrace<'tcx>, ty::TypeError<'tcx>)],
same_regions: &[SameRegions]);
fn give_suggestion(&self, same_regions: &[SameRegions]);
match free_regions_from_same_fn(self.tcx, sub, sup) {
Some(ref same_frs) if trace.is_some() => {
let trace = trace.unwrap();
- let terr = ty::terr_regions_does_not_outlive(sup,
- sub);
+ let terr = TypeError::RegionsDoesNotOutlive(sup,
+ sub);
trace_origins.push((trace, terr));
append_to_same_regions(&mut same_regions, same_frs);
}
}
}
- fn report_type_error(&self, trace: TypeTrace<'tcx>, terr: &ty::type_err<'tcx>) {
+ fn report_type_error(&self, trace: TypeTrace<'tcx>, terr: &ty::TypeError<'tcx>) {
let expected_found_str = match self.values_str(&trace.values) {
Some(v) => v,
None => {
fn report_and_explain_type_error(&self,
trace: TypeTrace<'tcx>,
- terr: &ty::type_err<'tcx>) {
+ terr: &ty::TypeError<'tcx>) {
let span = trace.origin.span();
self.report_type_error(trace, terr);
- ty::note_and_explain_type_err(self.tcx, terr, span);
+ self.tcx.note_and_explain_type_err(terr, span);
}
/// Returns a string of the form "expected `{}`, found `{}`", or None if this is a derived
}
}
- fn expected_found_str<T: fmt::Display + Resolvable<'tcx>>(
+ fn expected_found_str<T: fmt::Display + Resolvable<'tcx> + HasTypeFlags>(
&self,
- exp_found: &ty::expected_found<T>)
+ exp_found: &ty::ExpectedFound<T>)
-> Option<String>
{
let expected = exp_found.expected.resolve(self);
- if expected.contains_error() {
+ if expected.references_error() {
return None;
}
let found = exp_found.found.resolve(self);
- if found.contains_error() {
+ if found.references_error() {
return None;
}
&format!(
"consider adding an explicit lifetime bound for `{}`",
bound_kind));
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
&format!("{} must be valid for ", labeled_user_string),
sub,
"...");
sub: Region,
sup: Region) {
match origin {
- infer::Subtype(trace) |
- infer::DefaultExistentialBound(trace) => {
- let terr = ty::terr_regions_does_not_outlive(sup, sub);
+ infer::Subtype(trace) => {
+ let terr = TypeError::RegionsDoesNotOutlive(sup, sub);
self.report_and_explain_type_error(trace, &terr);
}
infer::Reborrow(span) => {
span_err!(self.tcx.sess, span, E0312,
"lifetime of reference outlines \
lifetime of borrowed content...");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"...the reference is valid for ",
sub,
"...");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"...but the borrowed content is only valid for ",
sup,
"");
span_err!(self.tcx.sess, span, E0313,
"lifetime of borrowed pointer outlives \
lifetime of captured variable `{}`...",
- ty::local_var_name_str(self.tcx,
- upvar_id.var_id)
- .to_string());
- note_and_explain_region(
- self.tcx,
+ self.tcx.local_var_name_str(upvar_id.var_id));
+ self.tcx.note_and_explain_region(
"...the borrowed pointer is valid for ",
sub,
"...");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
&format!("...but `{}` is only valid for ",
- ty::local_var_name_str(self.tcx,
- upvar_id.var_id)
- .to_string()),
+ self.tcx.local_var_name_str(upvar_id.var_id)),
sup,
"");
}
infer::InfStackClosure(span) => {
span_err!(self.tcx.sess, span, E0314,
"closure outlives stack frame");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"...the closure must be valid for ",
sub,
"...");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"...but the closure's stack frame is only valid for ",
sup,
"");
infer::InvokeClosure(span) => {
span_err!(self.tcx.sess, span, E0315,
"cannot invoke closure outside of its lifetime");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the closure is only valid for ",
sup,
"");
self.tcx.sess.span_err(
span,
"dereference of reference outside its lifetime");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the reference is only valid for ",
sup,
"");
self.tcx.sess.span_err(
span,
&format!("captured variable `{}` does not \
- outlive the enclosing closure",
- ty::local_var_name_str(self.tcx,
- id).to_string()));
- note_and_explain_region(
- self.tcx,
+ outlive the enclosing closure",
+ self.tcx.local_var_name_str(id)));
+ self.tcx.note_and_explain_region(
"captured variable is valid for ",
sup,
"");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"closure is valid for ",
sub,
"");
infer::IndexSlice(span) => {
self.tcx.sess.span_err(span,
"index of slice outside its lifetime");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the slice is only valid for ",
sup,
"");
span,
"lifetime of the source pointer does not outlive \
lifetime bound of the object type");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"object type is valid for ",
sub,
"");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"source pointer is only valid for ",
sup,
"");
&format!("the type `{}` does not fulfill the \
required lifetime",
self.ty_to_string(ty)));
- note_and_explain_region(self.tcx,
+ self.tcx.note_and_explain_region(
"type must outlive ",
sub,
"");
self.tcx.sess.span_err(
span,
"lifetime bound not satisfied");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"lifetime parameter instantiated with ",
sup,
"");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"but lifetime parameter must outlive ",
sub,
"");
&format!("the type `{}` (provided as the value of \
a type parameter) is not valid at this point",
self.ty_to_string(ty)));
- note_and_explain_region(self.tcx,
+ self.tcx.note_and_explain_region(
"type must outlive ",
sub,
"");
span,
"lifetime of method receiver does not outlive \
the method call");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the receiver is only valid for ",
sup,
"");
span,
"lifetime of function argument does not outlive \
the function call");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the function argument is only valid for ",
sup,
"");
span,
"lifetime of return value does not outlive \
the function call");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the return value is only valid for ",
sup,
"");
span,
"lifetime of operand does not outlive \
the operation");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the operand is only valid for ",
sup,
"");
span,
"reference is not valid \
at the time of borrow");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the borrow is only valid for ",
sup,
"");
span,
"automatically reference is not valid \
at the time of borrow");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the automatic borrow is only valid for ",
sup,
"");
&format!("type of expression contains references \
that are not valid during the expression: `{}`",
self.ty_to_string(t)));
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"type is only valid for ",
sup,
"");
"unsafe use of destructor: destructor might be called \
while references are dead");
// FIXME (22171): terms "super/subregion" are suboptimal
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"superregion: ",
sup,
"");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"subregion: ",
sub,
"");
self.tcx.sess.span_err(
span,
"lifetime of variable does not enclose its declaration");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the variable is only valid for ",
sup,
"");
&format!("in type `{}`, reference has a longer lifetime \
than the data it references",
self.ty_to_string(ty)));
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"the pointer is valid for ",
sub,
"");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"but the referenced data is only valid for ",
sup,
"");
sup_region: Region) {
self.report_inference_failure(var_origin);
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"first, the lifetime cannot outlive ",
sup_region,
"...");
self.note_region_origin(&sup_origin);
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"but, the lifetime must be valid for ",
sub_region,
"...");
region2: Region) {
self.report_inference_failure(var_origin);
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"first, the lifetime must be contained by ",
region1,
"...");
self.note_region_origin(&origin1);
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"but, the lifetime must also be contained by ",
region2,
"...");
fn report_processed_errors(&self,
var_origins: &[RegionVariableOrigin],
- trace_origins: &[(TypeTrace<'tcx>, ty::type_err<'tcx>)],
+ trace_origins: &[(TypeTrace<'tcx>, ty::TypeError<'tcx>)],
same_regions: &[SameRegions]) {
for vo in var_origins {
self.report_inference_failure(vo.clone());
}
self.give_suggestion(same_regions);
- for &(ref trace, terr) in trace_origins {
- self.report_and_explain_type_error(trace.clone(), &terr);
+ for &(ref trace, ref terr) in trace_origins {
+ self.report_and_explain_type_error(trace.clone(), terr);
}
}
// choice of lifetime name deterministic and thus easier to test.
let mut names = Vec::new();
for rn in region_names {
- let lt_name = token::get_name(*rn).to_string();
+ let lt_name = rn.to_string();
names.push(lt_name);
}
names.sort();
};
match a_def {
def::DefTy(did, _) | def::DefStruct(did) => {
- let generics = ty::lookup_item_type(self.tcx, did).generics;
+ let generics = self.tcx.lookup_item_type(did).generics;
let expected =
generics.regions.len(subst::TypeSpace) as u32;
parameters: new_parameters
};
let mut new_segs = Vec::new();
- new_segs.push_all(path.segments.init());
+ new_segs.push_all(path.segments.split_last().unwrap().1);
new_segs.push(new_seg);
ast::Path {
span: path.span,
}
infer::LateBoundRegion(_, br, infer::AssocTypeProjection(type_name)) => {
format!(" for lifetime parameter {}in trait containing associated type `{}`",
- br_string(br), token::get_name(type_name))
+ br_string(br), type_name)
}
infer::EarlyBoundRegion(_, name) => {
format!(" for lifetime parameter `{}`",
- &token::get_name(name))
+ name)
}
infer::BoundRegionInCoherence(name) => {
format!(" for lifetime parameter `{}` in coherence check",
- &token::get_name(name))
+ name)
}
infer::UpvarRegion(ref upvar_id, _) => {
format!(" for capture of `{}` by closure",
- ty::local_var_name_str(self.tcx, upvar_id.var_id).to_string())
+ self.tcx.local_var_name_str(upvar_id.var_id).to_string())
}
};
fn note_region_origin(&self, origin: &SubregionOrigin<'tcx>) {
match *origin {
- infer::Subtype(ref trace) |
- infer::DefaultExistentialBound(ref trace) => {
+ infer::Subtype(ref trace) => {
let desc = match trace.origin {
infer::Misc(_) => {
"types are compatible"
span,
&format!(
"...so that closure can access `{}`",
- ty::local_var_name_str(self.tcx, upvar_id.var_id)
+ self.tcx.local_var_name_str(upvar_id.var_id)
.to_string()))
}
infer::InfStackClosure(span) => {
span,
&format!("...so that captured variable `{}` \
does not outlive the enclosing closure",
- ty::local_var_name_str(
- self.tcx,
- id).to_string()));
+ self.tcx.local_var_name_str(id)));
}
infer::IndexSlice(span) => {
self.tcx.sess.span_note(
pub trait Resolvable<'tcx> {
fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) -> Self;
- fn contains_error(&self) -> bool;
}
impl<'tcx> Resolvable<'tcx> for Ty<'tcx> {
fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) -> Ty<'tcx> {
infcx.resolve_type_vars_if_possible(self)
}
- fn contains_error(&self) -> bool {
- ty::type_is_error(*self)
- }
}
impl<'tcx> Resolvable<'tcx> for ty::TraitRef<'tcx> {
-> ty::TraitRef<'tcx> {
infcx.resolve_type_vars_if_possible(self)
}
- fn contains_error(&self) -> bool {
- ty::trait_ref_contains_error(self)
- }
}
impl<'tcx> Resolvable<'tcx> for ty::PolyTraitRef<'tcx> {
{
infcx.resolve_type_vars_if_possible(self)
}
-
- fn contains_error(&self) -> bool {
- ty::trait_ref_contains_error(&self.0)
- }
}
fn lifetimes_in_scope(tcx: &ty::ctxt,
fn with_taken(taken: &[ast::LifetimeDef]) -> LifeGiver {
let mut taken_ = HashSet::new();
for lt in taken {
- let lt_name = token::get_name(lt.lifetime.name).to_string();
+ let lt_name = lt.lifetime.name.to_string();
taken_.insert(lt_name);
}
LifeGiver {
}
fn give_lifetime(&self) -> ast::Lifetime {
- let mut lifetime;
+ let lifetime;
loop {
let mut s = String::from("'");
s.push_str(&num_to_string(self.counter.get()));
//! variable only once, and it does so as soon as it can, so it is reasonable to ask what the type
//! inferencer knows "so far".
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, HasTypeFlags};
use middle::ty_fold;
use middle::ty_fold::TypeFoldable;
use middle::ty_fold::TypeFolder;
Entry::Vacant(entry) => {
let index = self.freshen_count;
self.freshen_count += 1;
- let t = ty::mk_infer(self.infcx.tcx, freshener(index));
+ let t = self.infcx.tcx.mk_infer(freshener(index));
entry.insert(t);
t
}
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- if !ty::type_needs_infer(t) && !ty::type_has_erasable_regions(t) {
+ if !t.needs_infer() && !t.has_erasable_regions() {
return t;
}
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
- fn will_change(&mut self, a: bool, b: bool) -> bool {
- // Hmm, so the result of GLB will still be a LB if one or both
- // sides change to 'static, but it may no longer be the GLB.
- // I'm going to go with `a || b` here to be conservative,
- // since the result of this operation may be affected, though
- // I think it would mostly be more accepting than before (since the result
- // would be a bigger region).
- a || b
- }
-
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
variance: ty::Variance,
a: &T,
between functions with bound region parameters. Consider, for
example, whether the following relation holds:
- for<'a> fn(&'a int) <: for<'b> fn(&'b int)? (Yes, a => b)
+ for<'a> fn(&'a isize) <: for<'b> fn(&'b isize)? (Yes, a => b)
The answer is that of course it does. These two types are basically
the same, except that in one we used the name `a` and one we used
a lifetime is bound in a function type (that is, is a lifetime
parameter) or appears free (is defined in some outer scope).
Therefore, from now on I will always write the bindings explicitly,
-using the Rust syntax `for<'a> fn(&'a int)` to indicate that `a` is a
+using the Rust syntax `for<'a> fn(&'a isize)` to indicate that `a` is a
lifetime parameter.
Now let's consider two more function types. Here, we assume that the
`'b` lifetime is defined somewhere outside and hence is not a lifetime
parameter bound by the function type (it "appears free"):
- for<'a> fn(&'a int) <: fn(&'b int)? (Yes, a => b)
+ for<'a> fn(&'a isize) <: fn(&'b isize)? (Yes, a => b)
This subtyping relation does in fact hold. To see why, you have to
consider what subtyping means. One way to look at `T1 <: T2` is to
So, what if we reverse the order of the two function types, like this:
- fn(&'b int) <: for<'a> fn(&'a int)? (No)
+ fn(&'b isize) <: for<'a> fn(&'a isize)? (No)
Does the subtyping relationship still hold? The answer of course is
no. In this case, the function accepts *only the lifetime `'b`*,
What about these two examples:
- for<'a,'b> fn(&'a int, &'b int) <: for<'a> fn(&'a int, &'a int)? (Yes)
- for<'a> fn(&'a int, &'a int) <: for<'a,'b> fn(&'a int, &'b int)? (No)
+ for<'a,'b> fn(&'a isize, &'b isize) <: for<'a> fn(&'a isize, &'a isize)? (Yes)
+ for<'a> fn(&'a isize, &'a isize) <: for<'a,'b> fn(&'a isize, &'b isize)? (No)
Here, it is true that functions which take two pointers with any two
lifetimes can be treated as if they only accepted two pointers with
use super::combine::CombineFields;
use middle::subst;
-use middle::ty::{self, Binder};
+use middle::ty::{self, TypeError, Binder};
use middle::ty_fold::{self, TypeFoldable};
use middle::ty_relate::{Relate, RelateResult, TypeRelation};
use syntax::codemap::Span;
Err((skol_br, tainted_region)) => {
if self.a_is_expected {
debug!("Not as polymorphic!");
- return Err(ty::terr_regions_insufficiently_polymorphic(skol_br,
+ return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br,
tainted_region));
} else {
debug!("Overly polymorphic!");
- return Err(ty::terr_regions_overly_polymorphic(skol_br,
+ return Err(TypeError::RegionsOverlyPolymorphic(skol_br,
tainted_region));
}
}
where T: TypeFoldable<'tcx>,
F: FnMut(ty::Region, ty::DebruijnIndex) -> ty::Region,
{
- unbound_value.fold_with(&mut ty_fold::RegionFolder::new(tcx, &mut |region, current_depth| {
+ ty_fold::fold_regions(tcx, unbound_value, &mut false, |region, current_depth| {
// we should only be encountering "escaping" late-bound regions here,
// because the ones at the current level should have been replaced
// with fresh variables
});
fldr(region, ty::DebruijnIndex::new(current_depth))
- }))
+ })
}
impl<'a,'tcx> InferCtxtExt for InferCtxt<'a,'tcx> {
let escaping_types =
self.type_variables.borrow().types_escaping_snapshot(&snapshot.type_snapshot);
- let escaping_region_vars: FnvHashSet<_> =
- escaping_types
- .iter()
- .flat_map(|&t| ty_fold::collect_regions(self.tcx, &t))
- .collect();
+ let mut escaping_region_vars = FnvHashSet();
+ for ty in &escaping_types {
+ ty_fold::collect_regions(self.tcx, ty, &mut escaping_region_vars);
+ }
region_vars.retain(|®ion_vid| {
let r = ty::ReInfer(ty::ReVar(region_vid));
types: &mut subst::VecPerParamSpace<ty::Ty<'tcx>>,
defs: &[ty::TypeParameterDef<'tcx>]) {
for def in defs {
- let ty = ty::mk_param_from_def(tcx, def);
+ let ty = tcx.mk_param_from_def(def);
types.push(def.space, ty);
}
}
// binder is that we encountered in `value`. The caller is
// responsible for ensuring that (a) `value` contains at least one
// binder and (b) that binder is the one we want to use.
- let result = ty_fold::fold_regions(infcx.tcx, &value, |r, current_depth| {
+ let result = ty_fold::fold_regions(infcx.tcx, &value, &mut false, |r, current_depth| {
match inv_skol_map.get(&r) {
None => r,
Some(br) => {
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
- fn will_change(&mut self, a: bool, b: bool) -> bool {
- // result will be 'static if a || b
- a || b
- }
-
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
variance: ty::Variance,
a: &T,
//! See the Book for more information.
-#![allow(non_camel_case_types)]
-
pub use self::LateBoundRegionConversionTime::*;
pub use self::RegionVariableOrigin::*;
pub use self::SubregionOrigin::*;
pub use self::TypeOrigin::*;
pub use self::ValuePairs::*;
-pub use self::fixup_err::*;
pub use middle::ty::IntVarValue;
pub use self::freshen::TypeFreshener;
pub use self::region_inference::GenericKind;
use middle::free_region::FreeRegionMap;
+use middle::mem_categorization as mc;
+use middle::mem_categorization::McResult;
+use middle::region::CodeExtent;
use middle::subst;
use middle::subst::Substs;
+use middle::subst::Subst;
+use middle::traits::{self, FulfillmentContext, Normalized,
+ SelectionContext, ObligationCause};
use middle::ty::{TyVid, IntVid, FloatVid, RegionVid, UnconstrainedNumeric};
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, TypeError, HasTypeFlags};
use middle::ty_fold::{self, TypeFolder, TypeFoldable};
use middle::ty_relate::{Relate, RelateResult, TypeRelation};
use rustc_data_structures::unify::{self, UnificationTable};
-use std::cell::{RefCell};
+use std::cell::{RefCell, Ref};
use std::fmt;
use syntax::ast;
use syntax::codemap;
-use syntax::codemap::Span;
-use util::nodemap::FnvHashMap;
+use syntax::codemap::{Span, DUMMY_SP};
+use util::nodemap::{FnvHashMap, NodeMap};
use self::combine::CombineFields;
use self::region_inference::{RegionVarBindings, RegionSnapshot};
pub type Bound<T> = Option<T>;
pub type UnitResult<'tcx> = RelateResult<'tcx, ()>; // "unify result"
-pub type fres<T> = Result<T, fixup_err>; // "fixup result"
+pub type FixupResult<T> = Result<T, FixupError>; // "fixup result"
pub struct InferCtxt<'a, 'tcx: 'a> {
pub tcx: &'a ty::ctxt<'tcx>,
+ pub tables: &'a RefCell<ty::Tables<'tcx>>,
+
// We instantiate UnificationTable with bounds<Ty> because the
// types that might instantiate a general type variable have an
// order, represented by its upper and lower bounds.
// For region variables.
region_vars: RegionVarBindings<'a, 'tcx>,
+
+ pub parameter_environment: ty::ParameterEnvironment<'a, 'tcx>,
+
+ pub fulfillment_cx: RefCell<traits::FulfillmentContext<'tcx>>,
+
+ // This is a temporary field used for toggling on normalization in the inference context,
+ // as we move towards the approach described here:
+ // https://internals.rust-lang.org/t/flattening-the-contexts-for-fun-and-profit/2293
+ // At a point sometime in the future normalization will be done by the typing context
+ // directly.
+ normalize: bool,
+
+ err_count_on_creation: usize,
}
/// A map returned by `skolemize_late_bound_regions()` indicating the skolemized
/// See `error_reporting.rs` for more details
#[derive(Clone, Debug)]
pub enum ValuePairs<'tcx> {
- Types(ty::expected_found<Ty<'tcx>>),
- TraitRefs(ty::expected_found<ty::TraitRef<'tcx>>),
- PolyTraitRefs(ty::expected_found<ty::PolyTraitRef<'tcx>>),
+ Types(ty::ExpectedFound<Ty<'tcx>>),
+ TraitRefs(ty::ExpectedFound<ty::TraitRef<'tcx>>),
+ PolyTraitRefs(ty::ExpectedFound<ty::PolyTraitRef<'tcx>>),
}
/// The trace designates the path through inference that we took to
// Arose from a subtyping relation
Subtype(TypeTrace<'tcx>),
- // Arose from a subtyping relation
- DefaultExistentialBound(TypeTrace<'tcx>),
-
// Stack-allocated closures cannot outlive innermost loop
// or function so as to ensure we only require finite stack
InfStackClosure(Span),
}
#[derive(Copy, Clone, Debug)]
-pub enum fixup_err {
- unresolved_int_ty(IntVid),
- unresolved_float_ty(FloatVid),
- unresolved_ty(TyVid)
+pub enum FixupError {
+ UnresolvedIntTy(IntVid),
+ UnresolvedFloatTy(FloatVid),
+ UnresolvedTy(TyVid)
}
-pub fn fixup_err_to_string(f: fixup_err) -> String {
+pub fn fixup_err_to_string(f: FixupError) -> String {
+ use self::FixupError::*;
+
match f {
- unresolved_int_ty(_) => {
+ UnresolvedIntTy(_) => {
"cannot determine the type of this integer; add a suffix to \
specify the type explicitly".to_string()
}
- unresolved_float_ty(_) => {
+ UnresolvedFloatTy(_) => {
"cannot determine the type of this number; add a suffix to specify \
the type explicitly".to_string()
}
- unresolved_ty(_) => "unconstrained type".to_string(),
+ UnresolvedTy(_) => "unconstrained type".to_string(),
}
}
-pub fn new_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>)
+/// errors_will_be_reported is required to proxy to the fulfillment context
+/// FIXME -- a better option would be to hold back on modifying
+/// the global cache until we know that all dependent obligations
+/// are also satisfied. In that case, we could actually remove
+/// this boolean flag, and we'd also avoid the problem of squelching
+/// duplicate errors that occur across fns.
+pub fn new_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>,
+ tables: &'a RefCell<ty::Tables<'tcx>>,
+ param_env: Option<ty::ParameterEnvironment<'a, 'tcx>>,
+ errors_will_be_reported: bool)
-> InferCtxt<'a, 'tcx> {
InferCtxt {
tcx: tcx,
+ tables: tables,
type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
int_unification_table: RefCell::new(UnificationTable::new()),
float_unification_table: RefCell::new(UnificationTable::new()),
region_vars: RegionVarBindings::new(tcx),
+ parameter_environment: param_env.unwrap_or(tcx.empty_parameter_environment()),
+ fulfillment_cx: RefCell::new(traits::FulfillmentContext::new(errors_will_be_reported)),
+ normalize: false,
+ err_count_on_creation: tcx.sess.err_count()
}
}
+pub fn normalizing_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>,
+ tables: &'a RefCell<ty::Tables<'tcx>>)
+ -> InferCtxt<'a, 'tcx> {
+ let mut infcx = new_infer_ctxt(tcx, tables, None, false);
+ infcx.normalize = true;
+ infcx
+}
+
/// Computes the least upper-bound of `a` and `b`. If this is not possible, reports an error and
/// returns ty::err.
pub fn common_supertype<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
fn expected_found<T>(a_is_expected: bool,
a: T,
b: T)
- -> ty::expected_found<T>
+ -> ty::ExpectedFound<T>
{
if a_is_expected {
- ty::expected_found {expected: a, found: b}
+ ty::ExpectedFound {expected: a, found: b}
} else {
- ty::expected_found {expected: b, found: a}
+ ty::ExpectedFound {expected: b, found: a}
}
}
region_vars_snapshot: RegionSnapshot,
}
+pub fn normalize_associated_type<'tcx,T>(tcx: &ty::ctxt<'tcx>, value: &T) -> T
+ where T : TypeFoldable<'tcx> + HasTypeFlags
+{
+ debug!("normalize_associated_type(t={:?})", value);
+
+ let value = erase_regions(tcx, value);
+
+ if !value.has_projection_types() {
+ return value;
+ }
+
+ let infcx = new_infer_ctxt(tcx, &tcx.tables, None, true);
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ let cause = traits::ObligationCause::dummy();
+ let traits::Normalized { value: result, obligations } =
+ traits::normalize(&mut selcx, cause, &value);
+
+ debug!("normalize_associated_type: result={:?} obligations={:?}",
+ result,
+ obligations);
+
+ let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
+
+ for obligation in obligations {
+ fulfill_cx.register_predicate_obligation(&infcx, obligation);
+ }
+
+ let result = drain_fulfillment_cx_or_panic(DUMMY_SP, &infcx, &mut fulfill_cx, &result);
+
+ result
+}
+
+pub fn drain_fulfillment_cx_or_panic<'a,'tcx,T>(span: Span,
+ infcx: &InferCtxt<'a,'tcx>,
+ fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
+ result: &T)
+ -> T
+ where T : TypeFoldable<'tcx>
+{
+ match drain_fulfillment_cx(infcx, fulfill_cx, result) {
+ Ok(v) => v,
+ Err(errors) => {
+ infcx.tcx.sess.span_bug(
+ span,
+ &format!("Encountered errors `{:?}` fulfilling during trans",
+ errors));
+ }
+ }
+}
+
+/// Finishes processes any obligations that remain in the fulfillment
+/// context, and then "freshens" and returns `result`. This is
+/// primarily used during normalization and other cases where
+/// processing the obligations in `fulfill_cx` may cause type
+/// inference variables that appear in `result` to be unified, and
+/// hence we need to process those obligations to get the complete
+/// picture of the type.
+pub fn drain_fulfillment_cx<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
+ fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
+ result: &T)
+ -> Result<T,Vec<traits::FulfillmentError<'tcx>>>
+ where T : TypeFoldable<'tcx>
+{
+ debug!("drain_fulfillment_cx(result={:?})",
+ result);
+
+ // In principle, we only need to do this so long as `result`
+ // contains unbound type parameters. It could be a slight
+ // optimization to stop iterating early.
+ match fulfill_cx.select_all_or_error(infcx) {
+ Ok(()) => { }
+ Err(errors) => {
+ return Err(errors);
+ }
+ }
+
+ // Use freshen to simultaneously replace all type variables with
+ // their bindings and replace all regions with 'static. This is
+ // sort of overkill because we do not expect there to be any
+ // unbound type variables, hence no `TyFresh` types should ever be
+ // inserted.
+ Ok(result.fold_with(&mut infcx.freshener()))
+}
+
+/// Returns an equivalent value with all free regions removed (note
+/// that late-bound regions remain, because they are important for
+/// subtyping, but they are anonymized and normalized as well). This
+/// is a stronger, caching version of `ty_fold::erase_regions`.
+pub fn erase_regions<'tcx,T>(cx: &ty::ctxt<'tcx>, value: &T) -> T
+ where T : TypeFoldable<'tcx>
+{
+ let value1 = value.fold_with(&mut RegionEraser(cx));
+ debug!("erase_regions({:?}) = {:?}",
+ value, value1);
+ return value1;
+
+ struct RegionEraser<'a, 'tcx: 'a>(&'a ty::ctxt<'tcx>);
+
+ impl<'a, 'tcx> TypeFolder<'tcx> for RegionEraser<'a, 'tcx> {
+ fn tcx(&self) -> &ty::ctxt<'tcx> { self.0 }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match self.tcx().normalized_cache.borrow().get(&ty).cloned() {
+ None => {}
+ Some(u) => return u
+ }
+
+ let t_norm = ty_fold::super_fold_ty(self, ty);
+ self.tcx().normalized_cache.borrow_mut().insert(ty, t_norm);
+ return t_norm;
+ }
+
+ fn fold_binder<T>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T>
+ where T : TypeFoldable<'tcx>
+ {
+ let u = self.tcx().anonymize_late_bound_regions(t);
+ ty_fold::super_fold_binder(self, &u)
+ }
+
+ fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+ // because late-bound regions affect subtyping, we can't
+ // erase the bound/free distinction, but we can replace
+ // all free regions with 'static.
+ //
+ // Note that we *CAN* replace early-bound regions -- the
+ // type system never "sees" those, they get substituted
+ // away. In trans, they will always be erased to 'static
+ // whenever a substitution occurs.
+ match r {
+ ty::ReLateBound(..) => r,
+ _ => ty::ReStatic
+ }
+ }
+
+ fn fold_substs(&mut self,
+ substs: &subst::Substs<'tcx>)
+ -> subst::Substs<'tcx> {
+ subst::Substs { regions: subst::ErasedRegions,
+ types: substs.types.fold_with(self) }
+ }
+ }
+}
+
impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pub fn freshen<T:TypeFoldable<'tcx>>(&self, t: T) -> T {
t.fold_with(&mut self.freshener())
}
}
+ /// Returns a type variable's default fallback if any exists. A default
+ /// must be attached to the variable when created, if it is created
+ /// without a default, this will return None.
+ ///
+ /// This code does not apply to integral or floating point variables,
+ /// only to use declared defaults.
+ ///
+ /// See `new_ty_var_with_default` to create a type variable with a default.
+ /// See `type_variable::Default` for details about what a default entails.
+ pub fn default(&self, ty: Ty<'tcx>) -> Option<type_variable::Default<'tcx>> {
+ match ty.sty {
+ ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().default(vid),
+ _ => None
+ }
+ }
+
+ pub fn unsolved_variables(&self) -> Vec<ty::Ty<'tcx>> {
+ let mut variables = Vec::new();
+
+ let unbound_ty_vars = self.type_variables
+ .borrow()
+ .unsolved_variables()
+ .into_iter()
+ .map(|t| self.tcx.mk_var(t));
+
+ let unbound_int_vars = self.int_unification_table
+ .borrow_mut()
+ .unsolved_variables()
+ .into_iter()
+ .map(|v| self.tcx.mk_int_var(v));
+
+ let unbound_float_vars = self.float_unification_table
+ .borrow_mut()
+ .unsolved_variables()
+ .into_iter()
+ .map(|v| self.tcx.mk_float_var(v));
+
+ variables.extend(unbound_ty_vars);
+ variables.extend(unbound_int_vars);
+ variables.extend(unbound_float_vars);
+
+ return variables;
+ }
+
fn combine_fields(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>)
-> CombineFields<'a, 'tcx> {
CombineFields {infcx: self,
}
}
- fn rollback_to(&self, snapshot: CombinedSnapshot) {
- debug!("rollback!");
+ fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot) {
+ debug!("rollback_to(cause={})", cause);
let CombinedSnapshot { type_snapshot,
int_snapshot,
float_snapshot,
debug!("commit_if_ok() -- r.is_ok() = {}", r.is_ok());
match r {
Ok(_) => { self.commit_from(snapshot); }
- Err(_) => { self.rollback_to(snapshot); }
+ Err(_) => { self.rollback_to("commit_if_ok -- error", snapshot); }
}
r
}
let r = self.commit_if_ok(|_| f());
+ debug!("commit_regions_if_ok: rolling back everything but regions");
+
// Roll back any non-region bindings - they should be resolved
// inside `f`, with, e.g. `resolve_type_vars_if_possible`.
self.type_variables
debug!("probe()");
let snapshot = self.start_snapshot();
let r = f(&snapshot);
- self.rollback_to(snapshot);
+ self.rollback_to("probe", snapshot);
r
}
match higher_ranked::leak_check(self, skol_map, snapshot) {
Ok(()) => Ok(()),
- Err((br, r)) => Err(ty::terr_regions_insufficiently_polymorphic(br, r))
+ Err((br, r)) => Err(TypeError::RegionsInsufficientlyPolymorphic(br, r))
}
}
pub fn next_ty_var_id(&self, diverging: bool) -> TyVid {
self.type_variables
.borrow_mut()
- .new_var(diverging)
+ .new_var(diverging, None)
}
pub fn next_ty_var(&self) -> Ty<'tcx> {
- ty::mk_var(self.tcx, self.next_ty_var_id(false))
+ self.tcx.mk_var(self.next_ty_var_id(false))
+ }
+
+ pub fn next_ty_var_with_default(&self,
+ default: Option<type_variable::Default<'tcx>>) -> Ty<'tcx> {
+ let ty_var_id = self.type_variables
+ .borrow_mut()
+ .new_var(false, default);
+
+ self.tcx.mk_var(ty_var_id)
}
pub fn next_diverging_ty_var(&self) -> Ty<'tcx> {
- ty::mk_var(self.tcx, self.next_ty_var_id(true))
+ self.tcx.mk_var(self.next_ty_var_id(true))
}
pub fn next_ty_vars(&self, n: usize) -> Vec<Ty<'tcx>> {
.collect()
}
+ // We have to take `&mut Substs` in order to provide the correct substitutions for defaults
+ // along the way, for this reason we don't return them.
+ pub fn type_vars_for_defs(&self,
+ span: Span,
+ space: subst::ParamSpace,
+ substs: &mut Substs<'tcx>,
+ defs: &[ty::TypeParameterDef<'tcx>]) {
+
+ let mut vars = Vec::with_capacity(defs.len());
+
+ for def in defs.iter() {
+ let default = def.default.map(|default| {
+ type_variable::Default {
+ ty: default.subst_spanned(self.tcx, substs, Some(span)),
+ origin_span: span,
+ def_id: def.default_def_id
+ }
+ });
+
+ let ty_var = self.next_ty_var_with_default(default);
+ substs.types.push(space, ty_var);
+ vars.push(ty_var)
+ }
+ }
+
/// Given a set of generics defined on a type or impl, returns a substitution mapping each
/// type/region parameter to a fresh inference variable.
pub fn fresh_substs_for_generics(&self,
generics: &ty::Generics<'tcx>)
-> subst::Substs<'tcx>
{
- let type_params =
- generics.types.map(
- |_| self.next_ty_var());
+ let type_params = subst::VecPerParamSpace::empty();
+
let region_params =
generics.regions.map(
|d| self.next_region_var(EarlyBoundRegion(span, d.name)));
- subst::Substs::new(type_params, region_params)
+
+ let mut substs = subst::Substs::new(type_params, region_params);
+
+ for space in subst::ParamSpace::all().iter() {
+ self.type_vars_for_defs(
+ span,
+ *space,
+ &mut substs,
+ generics.types.get_slice(*space));
+ }
+
+ return substs;
}
/// Given a set of generics defined on a trait, returns a substitution mapping each output
assert!(generics.regions.len(subst::SelfSpace) == 0);
assert!(generics.regions.len(subst::FnSpace) == 0);
- let type_parameter_count = generics.types.len(subst::TypeSpace);
- let type_parameters = self.next_ty_vars(type_parameter_count);
+ let type_params = Vec::new();
let region_param_defs = generics.regions.get_slice(subst::TypeSpace);
let regions = self.region_vars_for_defs(span, region_param_defs);
- subst::Substs::new_trait(type_parameters, regions, self_ty)
+ let mut substs = subst::Substs::new_trait(type_params, regions, self_ty);
+
+ let type_parameter_defs = generics.types.get_slice(subst::TypeSpace);
+ self.type_vars_for_defs(span, subst::TypeSpace, &mut substs, type_parameter_defs);
+
+ return substs;
}
pub fn fresh_bound_region(&self, debruijn: ty::DebruijnIndex) -> ty::Region {
self.region_vars.new_bound(debruijn)
}
+ /// Apply `adjustment` to the type of `expr`
+ pub fn adjust_expr_ty(&self,
+ expr: &ast::Expr,
+ adjustment: Option<&ty::AutoAdjustment<'tcx>>)
+ -> Ty<'tcx>
+ {
+ let raw_ty = self.expr_ty(expr);
+ let raw_ty = self.shallow_resolve(raw_ty);
+ let resolve_ty = |ty: Ty<'tcx>| self.resolve_type_vars_if_possible(&ty);
+ raw_ty.adjust(self.tcx,
+ expr.span,
+ expr.id,
+ adjustment,
+ |method_call| self.tables
+ .borrow()
+ .method_map
+ .get(&method_call)
+ .map(|method| resolve_ty(method.ty)))
+ }
+
+ pub fn node_type(&self, id: ast::NodeId) -> Ty<'tcx> {
+ match self.tables.borrow().node_types.get(&id) {
+ Some(&t) => t,
+ // FIXME
+ None if self.tcx.sess.err_count() - self.err_count_on_creation != 0 =>
+ self.tcx.types.err,
+ None => {
+ self.tcx.sess.bug(
+ &format!("no type for node {}: {} in fcx",
+ id, self.tcx.map.node_to_string(id)));
+ }
+ }
+ }
+
+ pub fn expr_ty(&self, ex: &ast::Expr) -> Ty<'tcx> {
+ match self.tables.borrow().node_types.get(&ex.id) {
+ Some(&t) => t,
+ None => {
+ self.tcx.sess.bug(&format!("no type for expr in fcx"));
+ }
+ }
+ }
+
pub fn resolve_regions_and_report_errors(&self,
free_regions: &FreeRegionMap,
subject_node_id: ast::NodeId) {
pub fn tys_to_string(&self, ts: &[Ty<'tcx>]) -> String {
let tstrs: Vec<String> = ts.iter().map(|t| self.ty_to_string(*t)).collect();
- format!("({})", tstrs.connect(", "))
+ format!("({})", tstrs.join(", "))
}
pub fn trait_ref_to_string(&self, t: &ty::TraitRef<'tcx>) -> String {
value.fold_with(&mut r)
}
- pub fn fully_resolve<T:TypeFoldable<'tcx>>(&self, value: &T) -> fres<T> {
+ /// Resolves all type variables in `t` and then, if any were left
+ /// unresolved, substitutes an error type. This is used after the
+ /// main checking when doing a second pass before writeback. The
+ /// justification is that writeback will produce an error for
+ /// these unconstrained type variables.
+ fn resolve_type_vars_or_error(&self, t: &Ty<'tcx>) -> mc::McResult<Ty<'tcx>> {
+ let ty = self.resolve_type_vars_if_possible(t);
+ if ty.references_error() || ty.is_ty_var() {
+ debug!("resolve_type_vars_or_error: error from {:?}", ty);
+ Err(())
+ } else {
+ Ok(ty)
+ }
+ }
+
+ pub fn fully_resolve<T:TypeFoldable<'tcx>>(&self, value: &T) -> FixupResult<T> {
/*!
* Attempts to resolve all type/region variables in
* `value`. Region inference must have been run already (e.g.,
sp: Span,
mk_msg: M,
actual_ty: String,
- err: Option<&ty::type_err<'tcx>>) where
+ err: Option<&ty::TypeError<'tcx>>) where
M: FnOnce(Option<String>, String) -> String,
{
self.type_error_message_str_with_expected(sp, mk_msg, None, actual_ty, err)
mk_msg: M,
expected_ty: Option<Ty<'tcx>>,
actual_ty: String,
- err: Option<&ty::type_err<'tcx>>) where
+ err: Option<&ty::TypeError<'tcx>>) where
M: FnOnce(Option<String>, String) -> String,
{
debug!("hi! expected_ty = {:?}, actual_ty = {}", expected_ty, actual_ty);
let resolved_expected = expected_ty.map(|e_ty| self.resolve_type_vars_if_possible(&e_ty));
- match resolved_expected {
- Some(t) if ty::type_is_error(t) => (),
- _ => {
- let error_str = err.map_or("".to_string(), |t_err| {
- format!(" ({})", t_err)
- });
+ if !resolved_expected.references_error() {
+ let error_str = err.map_or("".to_string(), |t_err| {
+ format!(" ({})", t_err)
+ });
- self.tcx.sess.span_err(sp, &format!("{}{}",
- mk_msg(resolved_expected.map(|t| self.ty_to_string(t)), actual_ty),
- error_str));
+ self.tcx.sess.span_err(sp, &format!("{}{}",
+ mk_msg(resolved_expected.map(|t| self.ty_to_string(t)), actual_ty),
+ error_str));
- if let Some(err) = err {
- ty::note_and_explain_type_err(self.tcx, err, sp)
- }
+ if let Some(err) = err {
+ self.tcx.note_and_explain_type_err(err, sp)
}
}
}
sp: Span,
mk_msg: M,
actual_ty: Ty<'tcx>,
- err: Option<&ty::type_err<'tcx>>) where
+ err: Option<&ty::TypeError<'tcx>>) where
M: FnOnce(String) -> String,
{
let actual_ty = self.resolve_type_vars_if_possible(&actual_ty);
// Don't report an error if actual type is TyError.
- if ty::type_is_error(actual_ty) {
+ if actual_ty.references_error() {
return;
}
span: Span,
expected: Ty<'tcx>,
actual: Ty<'tcx>,
- err: &ty::type_err<'tcx>) {
+ err: &ty::TypeError<'tcx>) {
let trace = TypeTrace {
origin: Misc(span),
- values: Types(ty::expected_found {
+ values: Types(ty::ExpectedFound {
expected: expected,
found: actual
})
self.report_and_explain_type_error(trace, err);
}
+ pub fn report_conflicting_default_types(&self,
+ span: Span,
+ expected: type_variable::Default<'tcx>,
+ actual: type_variable::Default<'tcx>) {
+ let trace = TypeTrace {
+ origin: Misc(span),
+ values: Types(ty::ExpectedFound {
+ expected: expected.ty,
+ found: actual.ty
+ })
+ };
+
+ self.report_and_explain_type_error(trace,
+ &TypeError::TyParamDefaultMismatch(ty::ExpectedFound {
+ expected: expected,
+ found: actual
+ }));
+ }
+
pub fn replace_late_bound_regions_with_fresh_var<T>(
&self,
span: Span,
self.equate(true, trace).relate(a, b)
}).map(|_| ())
}
+
+ pub fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
+ let ty = self.node_type(id);
+ self.resolve_type_vars_or_error(&ty)
+ }
+
+ pub fn expr_ty_adjusted(&self, expr: &ast::Expr) -> McResult<Ty<'tcx>> {
+ let ty = self.adjust_expr_ty(expr, self.tables.borrow().adjustments.get(&expr.id));
+ self.resolve_type_vars_or_error(&ty)
+ }
+
+ pub fn type_moves_by_default(&self, ty: Ty<'tcx>, span: Span) -> bool {
+ let ty = self.resolve_type_vars_if_possible(&ty);
+ !traits::type_known_to_meet_builtin_bound(self, ty, ty::BoundCopy, span)
+ // FIXME(@jroesch): should be able to use:
+ // ty.moves_by_default(&self.parameter_environment, span)
+ }
+
+ pub fn node_method_ty(&self, method_call: ty::MethodCall)
+ -> Option<Ty<'tcx>> {
+ self.tables
+ .borrow()
+ .method_map
+ .get(&method_call)
+ .map(|method| method.ty)
+ .map(|ty| self.resolve_type_vars_if_possible(&ty))
+ }
+
+ pub fn node_method_id(&self, method_call: ty::MethodCall)
+ -> Option<ast::DefId> {
+ self.tables
+ .borrow()
+ .method_map
+ .get(&method_call)
+ .map(|method| method.def_id)
+ }
+
+ pub fn adjustments(&self) -> Ref<NodeMap<ty::AutoAdjustment<'tcx>>> {
+ fn project_adjustments<'a, 'tcx>(tables: &'a ty::Tables<'tcx>)
+ -> &'a NodeMap<ty::AutoAdjustment<'tcx>> {
+ &tables.adjustments
+ }
+
+ Ref::map(self.tables.borrow(), project_adjustments)
+ }
+
+ pub fn is_method_call(&self, id: ast::NodeId) -> bool {
+ self.tables.borrow().method_map.contains_key(&ty::MethodCall::expr(id))
+ }
+
+ pub fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option<CodeExtent> {
+ self.tcx.region_maps.temporary_scope(rvalue_id)
+ }
+
+ pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
+ self.tables.borrow().upvar_capture_map.get(&upvar_id).cloned()
+ }
+
+ pub fn param_env<'b>(&'b self) -> &'b ty::ParameterEnvironment<'b,'tcx> {
+ &self.parameter_environment
+ }
+
+ pub fn closure_kind(&self,
+ def_id: ast::DefId)
+ -> Option<ty::ClosureKind>
+ {
+ self.tables.borrow().closure_kinds.get(&def_id).cloned()
+ }
+
+ pub fn closure_type(&self,
+ def_id: ast::DefId,
+ substs: &ty::ClosureSubsts<'tcx>)
+ -> ty::ClosureTy<'tcx>
+ {
+ let closure_ty = self.tables
+ .borrow()
+ .closure_tys
+ .get(&def_id)
+ .unwrap()
+ .subst(self.tcx, &substs.func_substs);
+
+ if self.normalize {
+ normalize_associated_type(&self.tcx, &closure_ty)
+ } else {
+ closure_ty
+ }
+ }
}
impl<'tcx> TypeTrace<'tcx> {
pub fn dummy(tcx: &ty::ctxt<'tcx>) -> TypeTrace<'tcx> {
TypeTrace {
origin: Misc(codemap::DUMMY_SP),
- values: Types(ty::expected_found {
+ values: Types(ty::ExpectedFound {
expected: tcx.types.err,
found: tcx.types.err,
})
pub fn span(&self) -> Span {
match *self {
Subtype(ref a) => a.span(),
- DefaultExistentialBound(ref a) => a.span(),
InfStackClosure(a) => a,
InvokeClosure(a) => a,
DerefPointer(a) => a,
"execute" by testing the value they are applied to and creating any
relevant bindings). So, for example:
- fn foo(x: int, y: int) { // -+
+ fn foo(x: isize, y: isize) { // -+
// +------------+ // |
// | +-----+ // |
// | +-+ +-+ +-+ // |
Here is a more involved example (which is safe) so we can see what's
going on:
- struct Foo { f: uint, g: uint }
+ struct Foo { f: usize, g: usize }
...
- fn add(p: &mut uint, v: uint) {
+ fn add(p: &mut usize, v: usize) {
*p += v;
}
...
- fn inc(p: &mut uint) -> uint {
+ fn inc(p: &mut usize) -> usize {
*p += 1; *p
}
fn weird() {
'a: {
'a_arg1: let a_temp1: ... = add;
- 'a_arg2: let a_temp2: &'a mut uint = &'a mut (*x).f;
- 'a_arg3: let a_temp3: uint = {
+ 'a_arg2: let a_temp2: &'a mut usize = &'a mut (*x).f;
+ 'a_arg3: let a_temp3: usize = {
let b_temp1: ... = inc;
let b_temp2: &'b = &'b mut (*x).f;
'b_call: b_temp1(b_temp2)
argument, it can still be *invalidated* by that evaluation. Consider
this similar but unsound example:
- struct Foo { f: uint, g: uint }
+ struct Foo { f: usize, g: usize }
...
- fn add(p: &mut uint, v: uint) {
+ fn add(p: &mut usize, v: usize) {
*p += v;
}
...
- fn consume(x: Box<Foo>) -> uint {
+ fn consume(x: Box<Foo>) -> usize {
x.f + x.g
}
fn weird() {
use rustc_data_structures::graph::{self, Direction, NodeIndex};
use middle::free_region::FreeRegionMap;
use middle::region;
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, TypeError};
use middle::ty::{BoundRegion, FreeRegion, Region, RegionVid};
use middle::ty::{ReEmpty, ReStatic, ReInfer, ReFree, ReEarlyBound};
use middle::ty::{ReLateBound, ReScope, ReVar, ReSkolemized, BrFresh};
use std::cell::{Cell, RefCell};
use std::cmp::Ordering::{self, Less, Greater, Equal};
use std::fmt;
-use std::iter::repeat;
use std::u32;
use syntax::ast;
/// should put a lifetime. In those cases we process and put those errors
/// into `ProcessedErrors` before we do any reporting.
ProcessedErrors(Vec<RegionVariableOrigin>,
- Vec<(TypeTrace<'tcx>, ty::type_err<'tcx>)>,
+ Vec<(TypeTrace<'tcx>, ty::TypeError<'tcx>)>,
Vec<SameRegions>),
}
// is the scope `s_id`. Otherwise, as we do not know
// big the free region is precisely, the GLB is undefined.
let fr_scope = fr.scope.to_code_extent();
- if self.tcx.region_maps.nearest_common_ancestor(fr_scope, s_id) == fr_scope {
+ if self.tcx.region_maps.nearest_common_ancestor(fr_scope, s_id) == fr_scope ||
+ free_regions.is_static(fr) {
Ok(s)
} else {
- Err(ty::terr_regions_no_overlap(b, a))
+ Err(TypeError::RegionsNoOverlap(b, a))
}
}
if a == b {
Ok(a)
} else {
- Err(ty::terr_regions_no_overlap(b, a))
+ Err(TypeError::RegionsNoOverlap(b, a))
}
}
}
} else if r_id == scope_b {
Ok(ReScope(scope_a))
} else {
- Err(ty::terr_regions_no_overlap(region_a, region_b))
+ Err(TypeError::RegionsNoOverlap(region_a, region_b))
}
}
}
// idea is to report errors that derive from independent
// regions of the graph, but not those that derive from
// overlapping locations.
- let mut dup_vec: Vec<_> = repeat(u32::MAX).take(self.num_vars() as usize).collect();
+ let mut dup_vec = vec![u32::MAX; self.num_vars() as usize];
for idx in 0..self.num_vars() as usize {
match var_data[idx].value {
}
}
- // Check for future hostile edges tied to a bad default
- self.report_future_hostility(&graph);
-
(0..self.num_vars() as usize).map(|idx| var_data[idx].value).collect()
}
- fn report_future_hostility(&self, graph: &RegionGraph) {
- let constraints = self.constraints.borrow();
- for edge in graph.all_edges() {
- match constraints[&edge.data] {
- SubregionOrigin::DefaultExistentialBound(_) => {
- // this will become 'static in the future
- }
- _ => { continue; }
- }
-
- // this constraint will become a 'static constraint in the
- // future, so walk outward and see if we have any hard
- // bounds that could not be inferred to 'static
- for nid in graph.depth_traverse(edge.target()) {
- for (_, succ) in graph.outgoing_edges(nid) {
- match succ.data {
- ConstrainVarSubReg(_, r) => {
- match r {
- ty::ReStatic | ty::ReInfer(_) => {
- /* OK */
- }
- ty::ReFree(_) | ty::ReScope(_) | ty::ReEmpty => {
- span_warn!(
- self.tcx.sess,
- constraints[&edge.data].span(),
- E0398,
- "this code may fail to compile in Rust 1.3 due to \
- the proposed change in object lifetime bound defaults");
- return; // only issue the warning once per fn
- }
- ty::ReEarlyBound(..) | ty::ReLateBound(..) => {
- self.tcx.sess.span_bug(
- constraints[&succ.data].span(),
- "relation to bound region");
- }
- }
- }
- _ => { }
- }
- }
- }
- }
- }
-
fn construct_graph(&self) -> RegionGraph {
let num_vars = self.num_vars();
GenericKind::Param(ref p) =>
p.to_ty(tcx),
GenericKind::Projection(ref p) =>
- ty::mk_projection(tcx, p.trait_ref.clone(), p.item_name),
+ tcx.mk_projection(p.trait_ref.clone(), p.item_name),
}
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::{InferCtxt, fixup_err, fres, unresolved_ty, unresolved_int_ty, unresolved_float_ty};
-use middle::ty::{self, Ty};
+use super::{InferCtxt, FixupError, FixupResult};
+use middle::ty::{self, Ty, HasTypeFlags};
use middle::ty_fold::{self, TypeFoldable};
///////////////////////////////////////////////////////////////////////////
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- if !ty::type_has_ty_infer(t) {
+ if !t.has_infer_types() {
t // micro-optimize -- if there is nothing in this type that this fold affects...
} else {
let t0 = self.infcx.shallow_resolve(t);
/// Full type resolution replaces all type and region variables with
/// their concrete results. If any variable cannot be replaced (never unified, etc)
/// then an `Err` result is returned.
-pub fn fully_resolve<'a, 'tcx, T>(infcx: &InferCtxt<'a,'tcx>, value: &T) -> fres<T>
+pub fn fully_resolve<'a, 'tcx, T>(infcx: &InferCtxt<'a,'tcx>, value: &T) -> FixupResult<T>
where T : TypeFoldable<'tcx>
{
let mut full_resolver = FullTypeResolver { infcx: infcx, err: None };
// `err` field is not enforcable otherwise.
struct FullTypeResolver<'a, 'tcx:'a> {
infcx: &'a InferCtxt<'a, 'tcx>,
- err: Option<fixup_err>,
+ err: Option<FixupError>,
}
impl<'a, 'tcx> ty_fold::TypeFolder<'tcx> for FullTypeResolver<'a, 'tcx> {
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- if !ty::type_needs_infer(t) {
+ if !t.needs_infer() {
t // micro-optimize -- if there is nothing in this type that this fold affects...
} else {
let t = self.infcx.shallow_resolve(t);
match t.sty {
ty::TyInfer(ty::TyVar(vid)) => {
- self.err = Some(unresolved_ty(vid));
+ self.err = Some(FixupError::UnresolvedTy(vid));
self.tcx().types.err
}
ty::TyInfer(ty::IntVar(vid)) => {
- self.err = Some(unresolved_int_ty(vid));
+ self.err = Some(FixupError::UnresolvedIntTy(vid));
self.tcx().types.err
}
ty::TyInfer(ty::FloatVar(vid)) => {
- self.err = Some(unresolved_float_ty(vid));
+ self.err = Some(FixupError::UnresolvedFloatTy(vid));
self.tcx().types.err
}
ty::TyInfer(_) => {
r
}
- fn will_change(&mut self, a: bool, b: bool) -> bool {
- // if we have (Foo+'a) <: (Foo+'b), this requires that 'a:'b.
- // So if 'a becomes 'static, no additional errors can occur.
- // OTOH, if 'a stays the same, but 'b becomes 'static, we
- // could have a problem.
- !a && b
- }
-
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
variance: ty::Variance,
a: &T,
fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
debug!("{}.regions({:?}, {:?}) self.cause={:?}",
self.tag(), a, b, self.fields.cause);
- let origin = match self.fields.cause {
- Some(Cause::ExistentialRegionBound(true)) =>
- SubregionOrigin::DefaultExistentialBound(self.fields.trace.clone()),
- _ =>
- SubregionOrigin::Subtype(self.fields.trace.clone()),
- };
+ // FIXME -- we have more fine-grained information available
+ // from the "cause" field, we could perhaps give more tailored
+ // error messages.
+ let origin = SubregionOrigin::Subtype(self.fields.trace.clone());
self.fields.infcx.region_vars.make_subregion(origin, a, b);
Ok(a)
}
pub use self::RelationDir::*;
use self::TypeVariableValue::*;
use self::UndoEntry::*;
-
use middle::ty::{self, Ty};
+use syntax::ast::DefId;
+use syntax::codemap::Span;
+
use std::cmp::min;
use std::marker::PhantomData;
use std::mem;
enum TypeVariableValue<'tcx> {
Known(Ty<'tcx>),
- Bounded(Vec<Relation>),
+ Bounded {
+ relations: Vec<Relation>,
+ default: Option<Default<'tcx>>
+ }
+}
+
+// We will use this to store the required information to recapitulate what happened when
+// an error occurs.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub struct Default<'tcx> {
+ pub ty: Ty<'tcx>,
+ /// The span where the default was incurred
+ pub origin_span: Span,
+ /// The definition that the default originates from
+ pub def_id: DefId
}
pub struct Snapshot {
snapshot: sv::Snapshot
}
-enum UndoEntry {
+enum UndoEntry<'tcx> {
// The type of the var was specified.
- SpecifyVar(ty::TyVid, Vec<Relation>),
+ SpecifyVar(ty::TyVid, Vec<Relation>, Option<Default<'tcx>>),
Relate(ty::TyVid, ty::TyVid),
}
relations(self.values.get_mut(a.index as usize))
}
+ pub fn default(&self, vid: ty::TyVid) -> Option<Default<'tcx>> {
+ match &self.values.get(vid.index as usize).value {
+ &Known(_) => None,
+ &Bounded { ref default, .. } => default.clone()
+ }
+ }
+
pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
self.values.get(vid.index as usize).diverging
}
mem::replace(value_ptr, Known(ty))
};
- let relations = match old_value {
- Bounded(b) => b,
+ let (relations, default) = match old_value {
+ Bounded { relations, default } => (relations, default),
Known(_) => panic!("Asked to instantiate variable that is \
already instantiated")
};
stack.push((ty, dir, vid));
}
- self.values.record(SpecifyVar(vid, relations));
+ self.values.record(SpecifyVar(vid, relations, default));
}
- pub fn new_var(&mut self, diverging: bool) -> ty::TyVid {
+ pub fn new_var(&mut self,
+ diverging: bool,
+ default: Option<Default<'tcx>>) -> ty::TyVid {
let index = self.values.push(TypeVariableData {
- value: Bounded(vec![]),
+ value: Bounded { relations: vec![], default: default },
diverging: diverging
});
ty::TyVid { index: index as u32 }
pub fn probe(&self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
match self.values.get(vid.index as usize).value {
- Bounded(..) => None,
+ Bounded { .. } => None,
Known(t) => Some(t)
}
}
debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold);
}
- sv::UndoLog::Other(SpecifyVar(vid, _)) => {
+ sv::UndoLog::Other(SpecifyVar(vid, _, _)) => {
if vid.index < new_elem_threshold {
// quick check to see if this variable was
// created since the snapshot started or not.
escaping_types
}
+
+ pub fn unsolved_variables(&self) -> Vec<ty::TyVid> {
+ self.values
+ .iter()
+ .enumerate()
+ .filter_map(|(i, value)| match &value.value {
+ &TypeVariableValue::Known(_) => None,
+ &TypeVariableValue::Bounded { .. } => Some(ty::TyVid { index: i as u32 })
+ })
+ .collect()
+ }
}
impl<'tcx> sv::SnapshotVecDelegate for Delegate<'tcx> {
type Value = TypeVariableData<'tcx>;
- type Undo = UndoEntry;
+ type Undo = UndoEntry<'tcx>;
- fn reverse(values: &mut Vec<TypeVariableData<'tcx>>, action: UndoEntry) {
+ fn reverse(values: &mut Vec<TypeVariableData<'tcx>>, action: UndoEntry<'tcx>) {
match action {
- SpecifyVar(vid, relations) => {
- values[vid.index as usize].value = Bounded(relations);
+ SpecifyVar(vid, relations, default) => {
+ values[vid.index as usize].value = Bounded {
+ relations: relations,
+ default: default
+ };
}
Relate(a, b) => {
fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec<Relation> {
match v.value {
Known(_) => panic!("var_sub_var: variable is known"),
- Bounded(ref mut relations) => relations
+ Bounded { ref mut relations, .. } => relations
}
}
impl<'tcx> ToType<'tcx> for IntVarValue {
fn to_type(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
match *self {
- ty::IntType(i) => ty::mk_mach_int(tcx, i),
- ty::UintType(i) => ty::mk_mach_uint(tcx, i),
+ ty::IntType(i) => tcx.mk_mach_int(i),
+ ty::UintType(i) => tcx.mk_mach_uint(i),
}
}
}
impl<'tcx> ToType<'tcx> for ast::FloatTy {
fn to_type(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
- ty::mk_mach_float(tcx, *self)
+ tcx.mk_mach_float(*self)
}
}
use middle::def::DefFn;
use middle::subst::{Subst, Substs, EnumeratedItems};
use middle::ty::{TransmuteRestriction, ctxt, TyBareFn};
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, HasTypeFlags};
use std::fmt;
use syntax::ast::DefId;
use syntax::ast;
use syntax::codemap::Span;
-use syntax::parse::token;
use syntax::visit::Visitor;
use syntax::visit;
tcx: tcx,
param_envs: Vec::new(),
dummy_sized_ty: tcx.types.isize,
- dummy_unsized_ty: ty::mk_vec(tcx, tcx.types.isize, None),
+ dummy_unsized_ty: tcx.mk_slice(tcx.types.isize),
};
visit::walk_crate(&mut visitor, tcx.map.krate());
}
impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> {
fn def_id_is_transmute(&self, def_id: DefId) -> bool {
- let intrinsic = match ty::lookup_item_type(self.tcx, def_id).ty.sty {
+ let intrinsic = match self.tcx.lookup_item_type(def_id).ty.sty {
ty::TyBareFn(_, ref bfty) => bfty.abi == RustIntrinsic,
_ => return false
};
if def_id.krate == ast::LOCAL_CRATE {
match self.tcx.map.get(def_id.node) {
NodeForeignItem(ref item) if intrinsic => {
- token::get_ident(item.ident) ==
- token::intern_and_get_ident("transmute")
+ item.ident.name == "transmute"
}
_ => false,
}
} else {
match csearch::get_item_path(self.tcx, def_id).last() {
Some(ref last) if intrinsic => {
- token::get_name(last.name()) ==
- token::intern_and_get_ident("transmute")
+ last.name() == "transmute"
}
_ => false,
}
// Simple case: no type parameters involved.
if
- !ty::type_has_params(from) && !ty::type_has_self(from) &&
- !ty::type_has_params(to) && !ty::type_has_self(to)
+ !from.has_param_types() && !from.has_self_ty() &&
+ !to.has_param_types() && !to.has_self_ty()
{
let restriction = TransmuteRestriction {
span: span,
// In all cases, we keep the original unsubstituted types
// around for error reporting.
- let from_tc = ty::type_contents(self.tcx, from);
- let to_tc = ty::type_contents(self.tcx, to);
+ let from_tc = from.type_contents(self.tcx);
+ let to_tc = to.type_contents(self.tcx);
if from_tc.interior_param() || to_tc.interior_param() {
span_err!(self.tcx.sess, span, E0139,
"cannot transmute to or from a type that contains \
- type parameters in its interior");
+ unsubstituted type parameters");
return;
}
debug!("with_each_combination: space={:?}, index={}, param_ty={:?}",
space, index, param_ty);
- if !ty::type_is_sized(Some(param_env), self.tcx, span, param_ty) {
+ if !param_ty.is_sized(param_env, span) {
debug!("with_each_combination: param_ty is not known to be sized");
substs.types.get_mut_slice(space)[index] = self.dummy_unsized_ty;
fn visit_expr(&mut self, expr: &ast::Expr) {
if let ast::ExprPath(..) = expr.node {
- match ty::resolve_expr(self.tcx, expr) {
+ match self.tcx.resolve_expr(expr) {
DefFn(did, _) if self.def_id_is_transmute(did) => {
- let typ = ty::node_id_to_type(self.tcx, expr.id);
+ let typ = self.tcx.node_id_to_type(expr.id);
match typ.sty {
TyBareFn(_, ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => {
if let ty::FnConverging(to) = bare_fn_ty.sig.0.output {
}
}
+ pub fn require_owned_box(&self) -> Result<ast::DefId, String> {
+ self.require(OwnedBoxLangItem)
+ }
+
pub fn from_builtin_kind(&self, bound: ty::BuiltinBound)
-> Result<ast::DefId, String>
{
StartFnLangItem, "start", start_fn;
EhPersonalityLangItem, "eh_personality", eh_personality;
+ EhPersonalityCatchLangItem, "eh_personality_catch", eh_personality_catch;
+ EhUnwindResumeLangItem, "eh_unwind_resume", eh_unwind_resume;
+ MSVCTryFilterLangItem, "msvc_try_filter", msvc_try_filter;
ExchangeHeapLangItem, "exchange_heap", exchange_heap;
OwnedBoxLangItem, "owned_box", owned_box;
use self::VarKind::*;
use middle::def::*;
-use middle::mem_categorization::Typer;
use middle::pat_util;
use middle::region;
use middle::ty;
-use middle::ty::ClosureTyper;
use lint;
use util::nodemap::NodeMap;
use std::{fmt, usize};
use std::io::prelude::*;
use std::io;
-use std::iter::repeat;
use std::rc::Rc;
use syntax::ast::{self, NodeId, Expr};
use syntax::codemap::{BytePos, original_sp, Span};
-use syntax::parse::token::{self, special_idents};
+use syntax::parse::token::special_idents;
use syntax::print::pprust::{expr_to_string, block_to_string};
use syntax::ptr::P;
use syntax::ast_util;
fn variable_name(&self, var: Variable) -> String {
match self.var_kinds[var.get()] {
Local(LocalInfo { name, .. }) | Arg(_, name) => {
- token::get_name(name).to_string()
+ name.to_string()
},
ImplicitRet => "<implicit-ret>".to_string(),
CleanExit => "<clean-exit>".to_string()
// in better error messages than just pointing at the closure
// construction site.
let mut call_caps = Vec::new();
- ty::with_freevars(ir.tcx, expr.id, |freevars| {
+ ir.tcx.with_freevars(expr.id, |freevars| {
for fv in freevars {
if let DefLocal(rv) = fv.def {
let fv_ln = ir.add_live_node(FreeVarNode(fv.span));
Liveness {
ir: ir,
s: specials,
- successors: repeat(invalid_node()).take(num_live_nodes).collect(),
- users: repeat(invalid_users()).take(num_live_nodes * num_vars).collect(),
+ successors: vec![invalid_node(); num_live_nodes],
+ users: vec![invalid_users(); num_live_nodes * num_vars],
loop_scope: Vec::new(),
break_ln: NodeMap(),
cont_ln: NodeMap(),
}
ast::ExprCall(ref f, ref args) => {
- let diverges = !self.ir.tcx.is_method_call(expr.id) && {
- ty::ty_fn_ret(ty::expr_ty_adjusted(self.ir.tcx, &**f)).diverges()
- };
+ let diverges = !self.ir.tcx.is_method_call(expr.id) &&
+ self.ir.tcx.expr_ty_adjusted(&**f).fn_ret().diverges();
let succ = if diverges {
self.s.exit_ln
} else {
ast::ExprMethodCall(_, _, ref args) => {
let method_call = ty::MethodCall::expr(expr.id);
- let method_ty = self.ir.tcx.method_map.borrow().get(&method_call).unwrap().ty;
- let diverges = ty::ty_fn_ret(method_ty).diverges();
- let succ = if diverges {
+ let method_ty = self.ir.tcx.tables.borrow().method_map[&method_call].ty;
+ let succ = if method_ty.fn_ret().diverges() {
self.s.exit_ln
} else {
succ
impl<'a, 'tcx> Liveness<'a, 'tcx> {
fn fn_ret(&self, id: NodeId) -> ty::PolyFnOutput<'tcx> {
- let fn_ty = ty::node_id_to_type(self.ir.tcx, id);
+ let fn_ty = self.ir.tcx.node_id_to_type(id);
match fn_ty.sty {
- ty::TyClosure(closure_def_id, substs) =>
+ ty::TyClosure(closure_def_id, ref substs) =>
self.ir.tcx.closure_type(closure_def_id, substs).sig.output(),
- _ =>
- ty::ty_fn_ret(fn_ty),
+ _ => fn_ty.fn_ret()
}
}
{
// within the fn body, late-bound regions are liberated:
let fn_ret =
- ty::liberate_late_bound_regions(
- self.ir.tcx,
+ self.ir.tcx.liberate_late_bound_regions(
region::DestructionScopeData::new(body.id),
&self.fn_ret(id));
ty::FnConverging(t_ret)
if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() => {
- if ty::type_is_nil(t_ret) {
+ if t_ret.is_nil() {
// for nil return types, it is ok to not return a value expl.
} else {
let ends_with_stmt = match body.expr {
None if !body.stmts.is_empty() =>
match body.stmts.first().unwrap().node {
ast::StmtSemi(ref e, _) => {
- ty::expr_ty(self.ir.tcx, &**e) == t_ret
+ self.ir.tcx.expr_ty(&**e) == t_ret
},
_ => false
},
use self::Aliasability::*;
use ast_map;
+use middle::infer;
use middle::check_const;
use middle::def;
use middle::region;
use middle::ty::{self, Ty};
-use util::nodemap::NodeMap;
use syntax::ast::{MutImmutable, MutMutable};
use syntax::ast;
use syntax::codemap::Span;
-use std::cell::RefCell;
use std::fmt;
use std::rc::Rc;
fn span(&self) -> Span { self.span }
}
-pub struct MemCategorizationContext<'t,TYPER:'t> {
- typer: &'t TYPER
-}
-
-impl<'t,TYPER:'t> Copy for MemCategorizationContext<'t,TYPER> {}
-impl<'t,TYPER:'t> Clone for MemCategorizationContext<'t,TYPER> {
- fn clone(&self) -> MemCategorizationContext<'t,TYPER> { *self }
+#[derive(Copy, Clone)]
+pub struct MemCategorizationContext<'t, 'a: 't, 'tcx : 'a> {
+ pub typer: &'t infer::InferCtxt<'a, 'tcx>,
}
pub type McResult<T> = Result<T, ()>;
-/// The `Typer` trait provides the interface for the mem-categorization
-/// module to the results of the type check. It can be used to query
-/// the type assigned to an expression node, to inquire after adjustments,
-/// and so on.
-///
-/// This interface is needed because mem-categorization is used from
-/// two places: `regionck` and `borrowck`. `regionck` executes before
-/// type inference is complete, and hence derives types and so on from
-/// intermediate tables. This also implies that type errors can occur,
-/// and hence `node_ty()` and friends return a `Result` type -- any
-/// error will propagate back up through the mem-categorization
-/// routines.
-///
-/// In the borrow checker, in contrast, type checking is complete and we
-/// know that no errors have occurred, so we simply consult the tcx and we
-/// can be sure that only `Ok` results will occur.
-pub trait Typer<'tcx> : ty::ClosureTyper<'tcx> {
- fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>>;
- fn expr_ty_adjusted(&self, expr: &ast::Expr) -> McResult<Ty<'tcx>>;
- fn type_moves_by_default(&self, span: Span, ty: Ty<'tcx>) -> bool;
- fn node_method_ty(&self, method_call: ty::MethodCall) -> Option<Ty<'tcx>>;
- fn node_method_origin(&self, method_call: ty::MethodCall)
- -> Option<ty::MethodOrigin<'tcx>>;
- fn adjustments<'a>(&'a self) -> &'a RefCell<NodeMap<ty::AutoAdjustment<'tcx>>>;
- fn is_method_call(&self, id: ast::NodeId) -> bool;
- fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option<region::CodeExtent>;
- fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture>;
-}
-
impl MutabilityCategory {
pub fn from_mutbl(m: ast::Mutability) -> MutabilityCategory {
let ret = match m {
}
}
-impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
- pub fn new(typer: &'t TYPER) -> MemCategorizationContext<'t,TYPER> {
+impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
+ pub fn new(typer: &'t infer::InferCtxt<'a, 'tcx>) -> MemCategorizationContext<'t, 'a, 'tcx> {
MemCategorizationContext { typer: typer }
}
- fn tcx(&self) -> &'t ty::ctxt<'tcx> {
- self.typer.tcx()
+ fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+ self.typer.tcx
}
fn expr_ty(&self, expr: &ast::Expr) -> McResult<Ty<'tcx>> {
- self.typer.node_ty(expr.id)
+ match self.typer.node_ty(expr.id) {
+ Ok(t) => Ok(t),
+ Err(()) => {
+ debug!("expr_ty({:?}) yielded Err", expr);
+ Err(())
+ }
+ }
}
fn expr_ty_adjusted(&self, expr: &ast::Expr) -> McResult<Ty<'tcx>> {
let unadjusted_ty = try!(self.expr_ty(expr));
- Ok(ty::adjust_ty(self.tcx(), expr.span, expr.id, unadjusted_ty,
- self.typer.adjustments().borrow().get(&expr.id),
- |method_call| self.typer.node_method_ty(method_call)))
+ Ok(unadjusted_ty.adjust(
+ self.tcx(), expr.span, expr.id,
+ self.typer.adjustments().get(&expr.id),
+ |method_call| self.typer.node_method_ty(method_call)))
}
fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
// a bind-by-ref means that the base_ty will be the type of the ident itself,
// but what we want here is the type of the underlying value being borrowed.
// So peel off one-level, turning the &T into T.
- match ty::deref(base_ty, false) {
+ match base_ty.builtin_deref(false) {
Some(t) => t.ty,
None => { return Err(()); }
}
}
pub fn cat_expr(&self, expr: &ast::Expr) -> McResult<cmt<'tcx>> {
- match self.typer.adjustments().borrow().get(&expr.id) {
+ match self.typer.adjustments().get(&expr.id) {
None => {
// No adjustments.
self.cat_expr_unadjusted(expr)
let base_cmt = match method_ty {
Some(method_ty) => {
let ref_ty =
- ty::no_late_bound_regions(
- self.tcx(), &ty::ty_fn_ret(method_ty)).unwrap().unwrap();
+ self.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
self.cat_rvalue_node(node.id(), node.span(), ref_ty)
}
None => base_cmt
};
let base_cmt_ty = base_cmt.ty;
- match ty::deref(base_cmt_ty, true) {
+ match base_cmt_ty.builtin_deref(true) {
Some(mt) => {
let ret = self.cat_deref_common(node, base_cmt, deref_cnt,
mt.ty,
base_cmt = self.cat_rvalue_node(elt.id(), elt.span(), ref_ty);
// FIXME(#20649) -- why are we using the `self_ty` as the element type...?
- let self_ty = ty::ty_fn_sig(method_ty).input(0);
- ty::no_late_bound_regions(self.tcx(), &self_ty).unwrap()
+ let self_ty = method_ty.fn_sig().input(0);
+ self.tcx().no_late_bound_regions(&self_ty).unwrap()
}
None => {
- match ty::array_element_ty(self.tcx(), base_cmt.ty) {
+ match base_cmt.ty.builtin_index() {
Some(ty) => ty,
None => {
return Err(());
span:elt.span(),
cat:cat_deref(base_cmt.clone(), 0, ptr),
mutbl:m,
- ty: match ty::deref(base_cmt.ty, false) {
+ ty: match base_cmt.ty.builtin_deref(false) {
Some(mt) => mt.ty,
None => self.tcx().sess.bug("Found non-derefable type")
},
}
pub fn cat_pattern<F>(&self, cmt: cmt<'tcx>, pat: &ast::Pat, mut op: F) -> McResult<()>
- where F: FnMut(&MemCategorizationContext<'t, TYPER>, cmt<'tcx>, &ast::Pat),
+ where F: FnMut(&MemCategorizationContext<'t, 'a, 'tcx>, cmt<'tcx>, &ast::Pat),
{
self.cat_pattern_(cmt, pat, &mut op)
}
// FIXME(#19596) This is a workaround, but there should be a better way to do this
fn cat_pattern_<F>(&self, cmt: cmt<'tcx>, pat: &ast::Pat, op: &mut F)
-> McResult<()>
- where F : FnMut(&MemCategorizationContext<'t, TYPER>, cmt<'tcx>, &ast::Pat),
+ where F : FnMut(&MemCategorizationContext<'t, 'a, 'tcx>, cmt<'tcx>, &ast::Pat),
{
// Here, `cmt` is the categorization for the value being
// matched and pat is the pattern it is being matched against.
let cmt = match opt_def {
Some(def::DefVariant(enum_did, variant_did, _))
// univariant enums do not need downcasts
- if !ty::enum_is_univariant(self.tcx(), enum_did) => {
+ if !self.tcx().enum_is_univariant(enum_did) => {
self.cat_downcast(pat, cmt.clone(), cmt.ty, variant_did)
}
_ => cmt
// types are generated by method resolution and always have
// all late-bound regions fully instantiated, so we just want
// to skip past the binder.
- ty::no_late_bound_regions(self.tcx(), &ty::ty_fn_ret(method_ty))
+ self.tcx().no_late_bound_regions(&method_ty.fn_ret())
.unwrap()
.unwrap() // overloaded ops do not diverge, either
}
cat_static_item => write!(f, "static"),
cat_rvalue(r) => write!(f, "rvalue({:?})", r),
cat_local(id) => {
- let name = ty::tls::with(|tcx| ty::local_var_name_str(tcx, id));
+ let name = ty::tls::with(|tcx| tcx.local_var_name_str(id));
write!(f, "local({})", name)
}
cat_upvar(upvar) => {
fn element_kind(t: Ty) -> ElementKind {
match t.sty {
- ty::TyRef(_, ty::mt{ty, ..}) |
+ ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyBox(ty) => match ty.sty {
ty::TySlice(_) => VecElement,
_ => OtherElement
}
pub fn def_to_path(tcx: &ty::ctxt, id: ast::DefId) -> ast::Path {
- ty::with_path(tcx, id, |path| ast::Path {
+ tcx.with_path(id, |path| ast::Path {
global: false,
segments: path.last().map(|elem| ast::PathSegment {
identifier: ast::Ident::new(elem.name()),
}
ast::ExprMethodCall(..) => {
let method_call = ty::MethodCall::expr(expr.id);
- match (*self.tcx.method_map.borrow()).get(&method_call).unwrap().origin {
- ty::MethodStatic(def_id) => {
+ let def_id = self.tcx.tables.borrow().method_map[&method_call].def_id;
+ match self.tcx.impl_or_trait_item(def_id).container() {
+ ty::ImplContainer(_) => {
if is_local(def_id) {
if self.def_id_represents_local_inlined_item(def_id) {
self.worklist.push(def_id.node)
self.reachable_symbols.insert(def_id.node);
}
}
- _ => {}
+ ty::TraitContainer(_) => {}
}
}
_ => {}
use syntax::ast;
use syntax::codemap::Span;
use syntax::parse::token::special_idents;
-use syntax::parse::token;
use syntax::print::pprust::lifetime_to_string;
use syntax::visit;
use syntax::visit::Visitor;
impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> {
fn visit_item(&mut self, item: &ast::Item) {
- // Items save/restore the set of labels. This way innner items
+ // Items save/restore the set of labels. This way inner items
// can freely reuse names, be they loop labels or lifetimes.
let saved = replace(&mut self.labels_in_fn, vec![]);
replace(&mut self.labels_in_fn, saved);
}
+ fn visit_foreign_item(&mut self, item: &ast::ForeignItem) {
+ // Items save/restore the set of labels. This way inner items
+ // can freely reuse names, be they loop labels or lifetimes.
+ let saved = replace(&mut self.labels_in_fn, vec![]);
+
+ // Items always introduce a new root scope
+ self.with(RootScope, |_, this| {
+ match item.node {
+ ast::ForeignItemFn(_, ref generics) => {
+ this.visit_early_late(subst::FnSpace, generics, |this| {
+ visit::walk_foreign_item(this, item);
+ })
+ }
+ ast::ForeignItemStatic(..) => {
+ visit::walk_foreign_item(this, item);
+ }
+ }
+ });
+
+ // Done traversing the item; restore saved set of labels.
+ replace(&mut self.labels_in_fn, saved);
+ }
+
fn visit_fn(&mut self, fk: visit::FnKind<'v>, fd: &'v ast::FnDecl,
b: &'v ast::Block, s: Span, _: ast::NodeId) {
match fk {
impl<'v, 'a> Visitor<'v> for GatherLabels<'a> {
fn visit_expr(&mut self, ex: &'v ast::Expr) {
+ // do not recurse into closures defined in the block
+ // since they are treated as separate fns from the POV of
+ // labels_in_fn
+ if let ast::ExprClosure(..) = ex.node {
+ return
+ }
if let Some(label) = expression_label(ex) {
for &(prior, prior_span) in &self.labels_in_fn[..] {
// FIXME (#24278): non-hygienic comparison
fn unresolved_lifetime_ref(&self, lifetime_ref: &ast::Lifetime) {
span_err!(self.sess, lifetime_ref.span, E0261,
"use of undeclared lifetime name `{}`",
- token::get_name(lifetime_ref.name));
+ lifetime_ref.name);
}
fn check_lifetime_defs(&mut self, old_scope: Scope, lifetimes: &Vec<ast::LifetimeDef>) {
for lifetime in lifetimes {
if special_idents.iter().any(|&i| i.name == lifetime.lifetime.name) {
span_err!(self.sess, lifetime.lifetime.span, E0262,
- "illegal lifetime parameter name: `{}`",
- token::get_name(lifetime.lifetime.name));
+ "invalid lifetime parameter name: `{}`", lifetime.lifetime.name);
}
}
span_err!(self.sess, lifetime_j.lifetime.span, E0263,
"lifetime name `{}` declared twice in \
the same scope",
- token::get_name(lifetime_j.lifetime.name));
+ lifetime_j.lifetime.name);
}
}
use util::nodemap::{DefIdMap, FnvHashSet, FnvHashMap};
use std::mem::replace;
+use std::cmp::Ordering;
/// A stability index, giving the stability level for items and methods.
pub struct Index<'tcx> {
if self.index.staged_api[&ast::LOCAL_CRATE] {
debug!("annotate(id = {:?}, attrs = {:?})", id, attrs);
match attr::find_stability(self.tcx.sess.diagnostic(), attrs, item_sp) {
- Some(stab) => {
+ Some(mut stab) => {
debug!("annotate: found {:?}", stab);
+ // if parent is deprecated and we're not, inherit this by merging
+ // deprecated_since and its reason.
+ if let Some(parent_stab) = self.parent {
+ if parent_stab.deprecated_since.is_some()
+ && stab.deprecated_since.is_none() {
+ stab.deprecated_since = parent_stab.deprecated_since.clone();
+ stab.reason = parent_stab.reason.clone();
+ }
+ }
+
let stab = self.tcx.intern_stability(stab);
+
+ // Check if deprecated_since < stable_since. If it is,
+ // this is *almost surely* an accident.
+ let deprecated_predates_stable = match (stab.deprecated_since.as_ref(),
+ stab.since.as_ref()) {
+ (Some(dep_since), Some(stab_since)) => {
+ // explicit version of iter::order::lt to handle parse errors properly
+ let mut is_less = false;
+ for (dep_v, stab_v) in dep_since.split(".").zip(stab_since.split(".")) {
+ match (dep_v.parse::<u64>(), stab_v.parse::<u64>()) {
+ (Ok(dep_v), Ok(stab_v)) => match dep_v.cmp(&stab_v) {
+ Ordering::Less => {
+ is_less = true;
+ break;
+ }
+ Ordering::Equal => { continue; }
+ Ordering::Greater => { break; }
+ },
+ _ => {
+ self.tcx.sess.span_err(item_sp,
+ "Invalid stability or deprecation version found");
+ // act like it isn't less because the question is now
+ // nonsensical, and this makes us not do anything else
+ // interesting.
+ break;
+ }
+ }
+ }
+ is_less
+ },
+ _ => false,
+ };
+
+ if deprecated_predates_stable {
+ self.tcx.sess.span_err(item_sp,
+ "An API can't be stabilized after it is deprecated");
+ }
+
self.index.map.insert(local_def(id), Some(stab));
// Don't inherit #[stable(feature = "rust1", since = "1.0.0")]
if !cross_crate { return }
match *stab {
- Some(&Stability { level: attr::Unstable, ref feature, ref reason, .. }) => {
+ Some(&Stability { level: attr::Unstable, ref feature, ref reason, issue, .. }) => {
self.used_features.insert(feature.clone(), attr::Unstable);
if !self.active_features.contains(feature) {
- let msg = match *reason {
+ let mut msg = match *reason {
Some(ref r) => format!("use of unstable library feature '{}': {}",
&feature, &r),
None => format!("use of unstable library feature '{}'", &feature)
};
+ if let Some(n) = issue {
+ use std::fmt::Write;
+ write!(&mut msg, " (see issue #{})", n).unwrap();
+ }
emit_feature_err(&self.tcx.sess.parse_sess.span_diagnostic,
&feature, span, &msg);
// When compiling with --test we don't enforce stability on the
// compiler-generated test module, demarcated with `DUMMY_SP` plus the
// name `__test`
- if item.span == DUMMY_SP && item.ident.as_str() == "__test" { return }
+ if item.span == DUMMY_SP && item.ident.name == "__test" { return }
check_item(self.tcx, item, true,
&mut |id, sp, stab| self.check(id, sp, stab));
// items.
ast::ItemImpl(_, _, _, Some(ref t), _, ref impl_items) => {
let trait_did = tcx.def_map.borrow().get(&t.ref_id).unwrap().def_id();
- let trait_items = ty::trait_items(tcx, trait_did);
+ let trait_items = tcx.trait_items(trait_did);
for impl_item in impl_items {
let item = trait_items.iter().find(|item| {
ast::ExprMethodCall(i, _, _) => {
span = i.span;
let method_call = ty::MethodCall::expr(e.id);
- match tcx.method_map.borrow().get(&method_call) {
- Some(method) => {
- match method.origin {
- ty::MethodStatic(def_id) => {
- def_id
- }
- ty::MethodStaticClosure(def_id) => {
- def_id
- }
- ty::MethodTypeParam(ty::MethodParam {
- ref trait_ref,
- method_num: index,
- ..
- }) |
- ty::MethodTraitObject(ty::MethodObject {
- ref trait_ref,
- method_num: index,
- ..
- }) => {
- ty::trait_item(tcx, trait_ref.def_id, index).def_id()
- }
- }
- }
- None => return
- }
+ tcx.tables.borrow().method_map[&method_call].def_id
}
ast::ExprField(ref base_e, ref field) => {
span = field.span;
- match ty::expr_ty_adjusted(tcx, base_e).sty {
+ match tcx.expr_ty_adjusted(base_e).sty {
ty::TyStruct(did, _) => {
- ty::lookup_struct_fields(tcx, did)
+ tcx.lookup_struct_fields(did)
.iter()
.find(|f| f.name == field.node.name)
.unwrap_or_else(|| {
}
ast::ExprTupField(ref base_e, ref field) => {
span = field.span;
- match ty::expr_ty_adjusted(tcx, base_e).sty {
+ match tcx.expr_ty_adjusted(base_e).sty {
ty::TyStruct(did, _) => {
- ty::lookup_struct_fields(tcx, did)
+ tcx.lookup_struct_fields(did)
.get(field.node)
.unwrap_or_else(|| {
tcx.sess.span_bug(field.span,
}
}
ast::ExprStruct(_, ref expr_fields, _) => {
- let type_ = ty::expr_ty(tcx, e);
+ let type_ = tcx.expr_ty(e);
match type_.sty {
ty::TyStruct(did, _) => {
- let struct_fields = ty::lookup_struct_fields(tcx, did);
+ let struct_fields = tcx.lookup_struct_fields(did);
// check the stability of each field that appears
// in the construction expression.
for field in expr_fields {
debug!("check_pat(pat = {:?})", pat);
if is_internal(tcx, pat.span) { return; }
- let did = match ty::pat_ty_opt(tcx, pat) {
+ let did = match tcx.pat_ty_opt(pat) {
Some(&ty::TyS { sty: ty::TyStruct(did, _), .. }) => did,
Some(_) | None => return,
};
- let struct_fields = ty::lookup_struct_fields(tcx, did);
+ let struct_fields = tcx.lookup_struct_fields(did);
match pat.node {
// Foo(a, b, c)
ast::PatEnum(_, Some(ref pat_fields)) => {
fn maybe_do_stability_check(tcx: &ty::ctxt, id: ast::DefId, span: Span,
cb: &mut FnMut(ast::DefId, Span, &Option<&Stability>)) {
- if !is_staged_api(tcx, id) { return }
- if is_internal(tcx, span) { return }
+ if !is_staged_api(tcx, id) {
+ debug!("maybe_do_stability_check: \
+ skipping id={:?} since it is not staged_api", id);
+ return;
+ }
+ if is_internal(tcx, span) {
+ debug!("maybe_do_stability_check: \
+ skipping span={:?} since it is internal", span);
+ return;
+ }
let ref stability = lookup(tcx, id);
+ debug!("maybe_do_stability_check: \
+ inspecting id={:?} span={:?} of stability={:?}", id, span, stability);
cb(id, span, stability);
}
}
fn is_staged_api(tcx: &ty::ctxt, id: DefId) -> bool {
- match ty::trait_item_of_item(tcx, id) {
+ match tcx.trait_item_of_item(id) {
Some(ty::MethodTraitItemId(trait_method_id))
if trait_method_id != id => {
is_staged_api(tcx, trait_method_id)
debug!("lookup(id={:?})", id);
// is this definition the implementation of a trait method?
- match ty::trait_item_of_item(tcx, id) {
+ match tcx.trait_item_of_item(id) {
Some(ty::MethodTraitItemId(trait_method_id)) if trait_method_id != id => {
debug!("lookup: trait_method_id={:?}", trait_method_id);
return lookup(tcx, trait_method_id)
};
item_stab.or_else(|| {
- if ty::is_impl(tcx, id) {
- if let Some(trait_id) = ty::trait_id_of_impl(tcx, id) {
+ if tcx.is_impl(id) {
+ if let Some(trait_id) = tcx.trait_id_of_impl(id) {
// FIXME (#18969): for the time being, simply use the
// stability of the trait to determine the stability of any
// unmarked impls for it. See FIXME above for more details.
pub use self::ParamSpace::*;
pub use self::RegionSubsts::*;
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, HasTypeFlags, RegionEscape};
use middle::ty_fold::{self, TypeFoldable, TypeFolder};
use std::fmt;
*self.types.get(ty_param_def.space, ty_param_def.index as usize)
}
- pub fn has_regions_escaping_depth(&self, depth: u32) -> bool {
- self.types.iter().any(|&t| ty::type_escapes_depth(t, depth)) || {
- match self.regions {
- ErasedRegions =>
- false,
- NonerasedRegions(ref regions) =>
- regions.iter().any(|r| r.escapes_depth(depth)),
- }
- }
- }
-
pub fn self_ty(&self) -> Option<Ty<'tcx>> {
self.types.get_self().cloned()
}
{
let Substs { types, regions } = self;
let types = types.with_vec(FnSpace, m_types);
- let regions = regions.map(m_regions,
- |r, m_regions| r.with_vec(FnSpace, m_regions));
+ let regions = regions.map(|r| r.with_vec(FnSpace, m_regions));
+ Substs { types: types, regions: regions }
+ }
+
+ pub fn method_to_trait(self) -> Substs<'tcx> {
+ let Substs { mut types, regions } = self;
+ types.truncate(FnSpace, 0);
+ let regions = regions.map(|mut r| { r.truncate(FnSpace, 0); r });
Substs { types: types, regions: regions }
}
}
impl RegionSubsts {
- fn map<A, F>(self, a: A, op: F) -> RegionSubsts where
- F: FnOnce(VecPerParamSpace<ty::Region>, A) -> VecPerParamSpace<ty::Region>,
+ pub fn map<F>(self, op: F) -> RegionSubsts where
+ F: FnOnce(VecPerParamSpace<ty::Region>) -> VecPerParamSpace<ty::Region>,
{
match self {
ErasedRegions => ErasedRegions,
- NonerasedRegions(r) => NonerasedRegions(op(r, a))
+ NonerasedRegions(r) => NonerasedRegions(op(r))
}
}
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- if !ty::type_needs_subst(t) {
+ if !t.needs_subst() {
return t;
}
/// first case we do not increase the Debruijn index and in the second case we do. The reason
/// is that only in the second case have we passed through a fn binder.
fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
- debug!("shift_regions(ty={:?}, region_binders_passed={:?}, type_has_escaping_regions={:?})",
- ty, self.region_binders_passed, ty::type_has_escaping_regions(ty));
+ debug!("shift_regions(ty={:?}, region_binders_passed={:?}, has_escaping_regions={:?})",
+ ty, self.region_binders_passed, ty.has_escaping_regions());
- if self.region_binders_passed == 0 || !ty::type_has_escaping_regions(ty) {
+ if self.region_binders_passed == 0 || !ty.has_escaping_regions() {
return ty;
}
and then a call to that function:
- let v: Vec<int> = clone_slice([1, 2, 3])
+ let v: Vec<isize> = clone_slice([1, 2, 3])
it is the job of trait resolution to figure out (in which case)
-whether there exists an impl of `int : Clone`
+whether there exists an impl of `isize : Clone`
Note that in some cases, like generic functions, we may not be able to
find a specific impl, but we can figure out that the caller must
This trait just has one method. It's about as simple as it gets. It
converts from the (implicit) `Self` type to the `Target` type. If we
-wanted to permit conversion between `int` and `uint`, we might
+wanted to permit conversion between `isize` and `usize`, we might
implement `Convert` like so:
```rust
-impl Convert<uint> for int { ... } // int -> uint
-impl Convert<int> for uint { ... } // uint -> int
+impl Convert<usize> for isize { ... } // isize -> usize
+impl Convert<isize> for usize { ... } // usize -> isize
```
Now imagine there is some code like the following:
```rust
-let x: int = ...;
+let x: isize = ...;
let y = x.convert();
```
The call to convert will generate a trait reference `Convert<$Y> for
-int`, where `$Y` is the type variable representing the type of
+isize`, where `$Y` is the type variable representing the type of
`y`. When we match this against the two impls we can see, we will find
-that only one remains: `Convert<uint> for int`. Therefore, we can
+that only one remains: `Convert<usize> for isize`. Therefore, we can
select this impl, which will cause the type of `$Y` to be unified to
-`uint`. (Note that while assembling candidates, we do the initial
+`usize`. (Note that while assembling candidates, we do the initial
unifications in a transaction, so that they don't affect one another.)
There are tests to this effect in src/test/run-pass:
values found in the obligation, possibly yielding a type error. If we
return to our example of the `Convert` trait from the previous
section, confirmation is where an error would be reported, because the
-impl specified that `T` would be `uint`, but the obligation reported
+impl specified that `T` would be `usize`, but the obligation reported
`char`. Hence the result of selection would be an error.
### Selection during translation
trait Foo { ... }
impl<U,T:Bar<U>> Foo for Vec<T> { ... }
- impl Bar<uint> for int { ... }
+ impl Bar<usize> for isize { ... }
-After one shallow round of selection for an obligation like `Vec<int>
+After one shallow round of selection for an obligation like `Vec<isize>
: Foo`, we would know which impl we want, and we would know that
-`T=int`, but we do not know the type of `U`. We must select the
-nested obligation `int : Bar<U>` to find out that `U=uint`.
+`T=isize`, but we do not know the type of `U`. We must select the
+nested obligation `isize : Bar<U>` to find out that `U=usize`.
It would be good to only do *just as much* nested resolution as
necessary. Currently, though, we just do a full resolution.
# Higher-ranked trait bounds
One of the more subtle concepts at work are *higher-ranked trait
-bounds*. An example of such a bound is `for<'a> MyTrait<&'a int>`.
+bounds*. An example of such a bound is `for<'a> MyTrait<&'a isize>`.
Let's walk through how selection on higher-ranked trait references
works.
```
Let's say we have a function `want_hrtb` that wants a type which
-implements `Foo<&'a int>` for any `'a`:
+implements `Foo<&'a isize>` for any `'a`:
```rust
-fn want_hrtb<T>() where T : for<'a> Foo<&'a int> { ... }
+fn want_hrtb<T>() where T : for<'a> Foo<&'a isize> { ... }
```
-Now we have a struct `AnyInt` that implements `Foo<&'a int>` for any
+Now we have a struct `AnyInt` that implements `Foo<&'a isize>` for any
`'a`:
```rust
struct AnyInt;
-impl<'a> Foo<&'a int> for AnyInt { }
+impl<'a> Foo<&'a isize> for AnyInt { }
```
-And the question is, does `AnyInt : for<'a> Foo<&'a int>`? We want the
+And the question is, does `AnyInt : for<'a> Foo<&'a isize>`? We want the
answer to be yes. The algorithm for figuring it out is closely related
to the subtyping for higher-ranked types (which is described in
`middle::infer::higher_ranked::doc`, but also in a [paper by SPJ] that
[paper by SPJ]: http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/
So let's work through our example. The first thing we would do is to
-skolemize the obligation, yielding `AnyInt : Foo<&'0 int>` (here `'0`
+skolemize the obligation, yielding `AnyInt : Foo<&'0 isize>` (here `'0`
represents skolemized region #0). Note that now have no quantifiers;
in terms of the compiler type, this changes from a `ty::PolyTraitRef`
to a `TraitRef`. We would then create the `TraitRef` from the impl,
using fresh variables for it's bound regions (and thus getting
-`Foo<&'$a int>`, where `'$a` is the inference variable for `'a`). Next
+`Foo<&'$a isize>`, where `'$a` is the inference variable for `'a`). Next
we relate the two trait refs, yielding a graph with the constraint
that `'0 == '$a`. Finally, we check for skolemization "leaks" -- a
leak is basically any attempt to relate a skolemized region to another
```rust
struct StaticInt;
-impl Foo<&'static int> for StaticInt;
+impl Foo<&'static isize> for StaticInt;
```
-We want the obligation `StaticInt : for<'a> Foo<&'a int>` to be
+We want the obligation `StaticInt : for<'a> Foo<&'a isize>` to be
considered unsatisfied. The check begins just as before. `'a` is
skolemized to `'0` and the impl trait reference is instantiated to
-`Foo<&'static int>`. When we relate those two, we get a constraint
+`Foo<&'static isize>`. When we relate those two, we get a constraint
like `'static == '0`. This means that the taint set for `'0` is `{'0,
'static}`, which fails the leak check.
}
```
-Now let's say we have a obligation `for<'a> Foo<&'a int>` and we match
+Now let's say we have a obligation `for<'a> Foo<&'a isize>` and we match
this impl. What obligation is generated as a result? We want to get
-`for<'a> Bar<&'a int>`, but how does that happen?
+`for<'a> Bar<&'a isize>`, but how does that happen?
After the matching, we are in a position where we have a skolemized
-substitution like `X => &'0 int`. If we apply this substitution to the
-impl obligations, we get `F : Bar<&'0 int>`. Obviously this is not
+substitution like `X => &'0 isize`. If we apply this substitution to the
+impl obligations, we get `F : Bar<&'0 isize>`. Obviously this is not
directly usable because the skolemized region `'0` cannot leak out of
our computation.
region itself plus various intermediate region variables. We then walk
the trait-reference and convert every region in that taint set back to
a late-bound region, so in this case we'd wind up with `for<'a> F :
-Bar<&'a int>`.
+Bar<&'a isize>`.
# Caching and subtle considerations therewith
The high-level idea of how the cache works is that we first replace
all unbound inference variables with skolemized versions. Therefore,
-if we had a trait reference `uint : Foo<$1>`, where `$n` is an unbound
-inference variable, we might replace it with `uint : Foo<%0>`, where
+if we had a trait reference `usize : Foo<$1>`, where `$n` is an unbound
+inference variable, we might replace it with `usize : Foo<%0>`, where
`%n` is a skolemized type. We would then look this up in the cache.
If we found a hit, the hit would tell us the immediate next step to
take in the selection process: i.e., apply impl #22, or apply where
we come to the conclusion that the only possible impl is this one,
with def-id 22:
- impl Foo<int> for uint { ... } // Impl #22
+ impl Foo<isize> for usize { ... } // Impl #22
-We would then record in the cache `uint : Foo<%0> ==>
+We would then record in the cache `usize : Foo<%0> ==>
ImplCandidate(22)`. Next we would confirm `ImplCandidate(22)`, which
-would (as a side-effect) unify `$1` with `int`.
+would (as a side-effect) unify `$1` with `isize`.
-Now, at some later time, we might come along and see a `uint :
-Foo<$3>`. When skolemized, this would yield `uint : Foo<%0>`, just as
+Now, at some later time, we might come along and see a `usize :
+Foo<$3>`. When skolemized, this would yield `usize : Foo<%0>`, just as
before, and hence the cache lookup would succeed, yielding
`ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would
-(as a side-effect) unify `$3` with `int`.
+(as a side-effect) unify `$3` with `isize`.
## Where clauses and the local vs global cache
impl1_def_id,
impl2_def_id);
- let param_env = &ty::empty_parameter_environment(infcx.tcx);
- let selcx = &mut SelectionContext::intercrate(infcx, param_env);
+ let selcx = &mut SelectionContext::intercrate(infcx);
infcx.probe(|_| {
overlap(selcx, impl1_def_id, impl2_def_id) || overlap(selcx, impl2_def_id, impl1_def_id)
})
// already
if
trait_ref.def_id.krate != ast::LOCAL_CRATE &&
- !ty::has_attr(tcx, trait_ref.def_id, "fundamental")
+ !tcx.has_attr(trait_ref.def_id, "fundamental")
{
debug!("trait_ref_is_knowable: trait is neither local nor fundamental");
return false;
let impl_substs =
&substs_fn(selcx.infcx(), DUMMY_SP, impl_def_id);
let impl_trait_ref =
- ty::impl_trait_ref(selcx.tcx(), impl_def_id).unwrap();
+ selcx.tcx().impl_trait_ref(impl_def_id).unwrap();
let impl_trait_ref =
impl_trait_ref.subst(selcx.tcx(), impl_substs);
let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } =
project::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref);
- let predicates = ty::lookup_predicates(selcx.tcx(), impl_def_id);
+ let predicates = selcx.tcx().lookup_predicates(impl_def_id);
let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
let Normalized { value: predicates, obligations: normalization_obligations2 } =
project::normalize(selcx, ObligationCause::dummy(), &predicates);
// We only except this routine to be invoked on implementations
// of a trait, not inherent implementations.
- let trait_ref = ty::impl_trait_ref(tcx, impl_def_id).unwrap();
+ let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
debug!("orphan_check: trait_ref={:?}", trait_ref);
// If the *trait* is local to the crate, ok.
ty::TyBox(..) | ty::TyRef(..) =>
true,
ty::TyEnum(def_id, _) | ty::TyStruct(def_id, _) =>
- ty::has_attr(tcx, def_id, "fundamental"),
+ tcx.has_attr(def_id, "fundamental"),
ty::TyTrait(ref data) =>
- ty::has_attr(tcx, data.principal_def_id(), "fundamental"),
+ tcx.has_attr(data.principal_def_id(), "fundamental"),
_ =>
false
}
use fmt_macros::{Parser, Piece, Position};
use middle::infer::InferCtxt;
-use middle::ty::{self, AsPredicate, ReferencesError, ToPolyTraitRef, TraitRef};
+use middle::ty::{self, ToPredicate, HasTypeFlags, ToPolyTraitRef, TraitRef};
use middle::ty_fold::TypeFoldable;
use std::collections::HashMap;
use std::fmt;
span: Span) -> Option<String> {
let def_id = trait_ref.def_id;
let mut report = None;
- for item in ty::get_attrs(infcx.tcx, def_id).iter() {
+ for item in infcx.tcx.get_attrs(def_id).iter() {
if item.check_name("rustc_on_unimplemented") {
let err_sp = if item.meta().span == DUMMY_SP {
span
} else {
item.meta().span
};
- let def = ty::lookup_trait_def(infcx.tcx, def_id);
+ let def = infcx.tcx.lookup_trait_def(def_id);
let trait_str = def.trait_ref.to_string();
if let Some(ref istring) = item.value_str() {
let mut generic_map = def.generics.types.iter_enumerated()
OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
let expected_trait_ref = infcx.resolve_type_vars_if_possible(&*expected_trait_ref);
let actual_trait_ref = infcx.resolve_type_vars_if_possible(&*actual_trait_ref);
- if !ty::type_is_error(actual_trait_ref.self_ty()) {
+ if !actual_trait_ref.self_ty().references_error() {
span_err!(infcx.tcx.sess, obligation.cause.span, E0281,
"type mismatch: the type `{}` implements the trait `{}`, \
but the trait `{}` is required ({})",
TraitNotObjectSafe(did) => {
span_err!(infcx.tcx.sess, obligation.cause.span, E0038,
"cannot convert to a trait object because trait `{}` is not object-safe",
- ty::item_path_str(infcx.tcx, did));
+ infcx.tcx.item_path_str(did));
for violation in object_safety_violations(infcx.tcx, did) {
match violation {
let trait_ref = data.to_poly_trait_ref();
let self_ty = trait_ref.self_ty();
let all_types = &trait_ref.substs().types;
- if all_types.iter().any(|&t| ty::type_is_error(t)) {
- } else if all_types.iter().any(|&t| ty::type_needs_infer(t)) {
+ if all_types.references_error() {
+ } else if all_types.needs_infer() {
// This is kind of a hack: it frequently happens that some earlier
// error prevents types from being fully inferred, and then we get
// a bunch of uninteresting errors saying something like "<generic
match *cause_code {
ObligationCauseCode::MiscObligation => { }
ObligationCauseCode::ItemObligation(item_def_id) => {
- let item_name = ty::item_path_str(tcx, item_def_id);
+ let item_name = tcx.item_path_str(item_def_id);
tcx.sess.span_note(
cause_span,
&format!("required by `{}`", item_name));
}
ObligationCauseCode::ClosureCapture(var_id, closure_span, builtin_bound) => {
let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
- let trait_name = ty::item_path_str(tcx, def_id);
- let name = ty::local_var_name_str(tcx, var_id);
+ let trait_name = tcx.item_path_str(def_id);
+ let name = tcx.local_var_name_str(var_id);
span_note!(tcx.sess, closure_span,
"the closure that captures `{}` requires that all captured variables \
implement the trait `{}`",
span_note!(tcx.sess, cause_span,
"required because it appears within the type `{}`",
parent_trait_ref.0.self_ty());
- let parent_predicate = parent_trait_ref.as_predicate();
+ let parent_predicate = parent_trait_ref.to_predicate();
note_obligation_cause_code(infcx, &parent_predicate, cause_span, &*data.parent_code);
}
ObligationCauseCode::ImplDerivedObligation(ref data) => {
"required because of the requirements on the impl of `{}` for `{}`",
parent_trait_ref,
parent_trait_ref.0.self_ty());
- let parent_predicate = parent_trait_ref.as_predicate();
+ let parent_predicate = parent_trait_ref.to_predicate();
note_obligation_cause_code(infcx, &parent_predicate, cause_span, &*data.parent_code);
}
ObligationCauseCode::CompareImplMethodObligation => {
// except according to those terms.
use middle::infer::InferCtxt;
-use middle::ty::{self, RegionEscape, Ty};
+use middle::ty::{self, RegionEscape, Ty, HasTypeFlags};
use std::collections::HashSet;
use std::fmt;
// particular node-id).
region_obligations: NodeMap<Vec<RegionObligation<'tcx>>>,
- errors_will_be_reported: bool,
+ pub errors_will_be_reported: bool,
}
#[derive(Clone)]
/// `projection_ty` again.
pub fn normalize_projection_type<'a>(&mut self,
infcx: &InferCtxt<'a,'tcx>,
- typer: &ty::ClosureTyper<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
cause: ObligationCause<'tcx>)
-> Ty<'tcx>
// FIXME(#20304) -- cache
- let mut selcx = SelectionContext::new(infcx, typer);
+ let mut selcx = SelectionContext::new(infcx);
let normalized = project::normalize_projection_type(&mut selcx, projection_ty, cause, 0);
for obligation in normalized.obligations {
}
pub fn select_all_or_error<'a>(&mut self,
- infcx: &InferCtxt<'a,'tcx>,
- typer: &ty::ClosureTyper<'tcx>)
+ infcx: &InferCtxt<'a,'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
- try!(self.select_where_possible(infcx, typer));
+ try!(self.select_where_possible(infcx));
// Anything left is ambiguous.
let errors: Vec<FulfillmentError> =
/// gaining type information. It'd be equally valid to use `select_where_possible` but it
/// results in `O(n^2)` performance (#18208).
pub fn select_new_obligations<'a>(&mut self,
- infcx: &InferCtxt<'a,'tcx>,
- typer: &ty::ClosureTyper<'tcx>)
+ infcx: &InferCtxt<'a,'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
- let mut selcx = SelectionContext::new(infcx, typer);
+ let mut selcx = SelectionContext::new(infcx);
self.select(&mut selcx, true)
}
pub fn select_where_possible<'a>(&mut self,
- infcx: &InferCtxt<'a,'tcx>,
- typer: &ty::ClosureTyper<'tcx>)
+ infcx: &InferCtxt<'a,'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
- let mut selcx = SelectionContext::new(infcx, typer);
+ let mut selcx = SelectionContext::new(infcx);
self.select(&mut selcx, false)
}
}
ty::Predicate::TypeOutlives(ref binder) => {
- // For now, we just check that there are no higher-ranked
- // regions. If there are, we will call this obligation an
- // error. Eventually we should be able to support some
- // cases here, I imagine (e.g., `for<'a> int : 'a`).
- if ty::count_late_bound_regions(selcx.tcx(), binder) != 0 {
- errors.push(
- FulfillmentError::new(
- obligation.clone(),
- CodeSelectionError(Unimplemented)));
- } else {
- let ty::OutlivesPredicate(t_a, r_b) = binder.0;
- register_region_obligation(t_a, r_b,
- obligation.cause.clone(),
- region_obligations);
+ // Check if there are higher-ranked regions.
+ match selcx.tcx().no_late_bound_regions(binder) {
+ // If there are, inspect the underlying type further.
+ None => {
+ // Convert from `Binder<OutlivesPredicate<Ty, Region>>` to `Binder<Ty>`.
+ let binder = binder.map_bound_ref(|pred| pred.0);
+
+ // Check if the type has any bound regions.
+ match selcx.tcx().no_late_bound_regions(&binder) {
+ // If so, this obligation is an error (for now). Eventually we should be
+ // able to support additional cases here, like `for<'a> &'a str: 'a`.
+ None => {
+ errors.push(
+ FulfillmentError::new(
+ obligation.clone(),
+ CodeSelectionError(Unimplemented)))
+ }
+ // Otherwise, we have something of the form
+ // `for<'a> T: 'a where 'a not in T`, which we can treat as `T: 'static`.
+ Some(t_a) => {
+ register_region_obligation(t_a, ty::ReStatic,
+ obligation.cause.clone(),
+ region_obligations);
+ }
+ }
+ }
+ // If there aren't, register the obligation.
+ Some(ty::OutlivesPredicate(t_a, r_b)) => {
+ register_region_obligation(t_a, r_b,
+ obligation.cause.clone(),
+ region_obligations);
+ }
}
true
}
!self.set.insert(p.clone())
}
}
-
-
use middle::free_region::FreeRegionMap;
use middle::subst;
-use middle::ty::{self, HasProjectionTypes, Ty};
+use middle::ty::{self, HasTypeFlags, Ty};
use middle::ty_fold::TypeFoldable;
use middle::infer::{self, fixup_err_to_string, InferCtxt};
use std::rc::Rc;
Unimplemented,
OutputTypeParameterMismatch(ty::PolyTraitRef<'tcx>,
ty::PolyTraitRef<'tcx>,
- ty::type_err<'tcx>),
+ ty::TypeError<'tcx>),
TraitNotObjectSafe(ast::DefId),
}
#[derive(Clone, PartialEq, Eq)]
pub struct VtableClosureData<'tcx, N> {
pub closure_def_id: ast::DefId,
- pub substs: subst::Substs<'tcx>,
+ pub substs: ty::ClosureSubsts<'tcx>,
/// Nested obligations. This can be non-empty if the closure
/// signature contains associated types.
pub nested: Vec<N>
/// for the object type `Foo`.
#[derive(PartialEq,Eq,Clone)]
pub struct VtableObjectData<'tcx> {
- /// the object type `Foo`.
- pub object_ty: Ty<'tcx>,
-
/// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
+
+ /// The vtable is formed by concatenating together the method lists of
+ /// the base object trait and all supertraits; this is the start of
+ /// `upcast_trait_ref`'s methods in that vtable.
+ pub vtable_base: usize
}
/// Creates predicate obligations from the generic bounds.
/// conservative towards *no impl*, which is the opposite of the
/// `evaluate` methods).
pub fn type_known_to_meet_builtin_bound<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
- typer: &ty::ClosureTyper<'tcx>,
ty: Ty<'tcx>,
bound: ty::BuiltinBound,
span: Span)
// Note: we only assume something is `Copy` if we can
// *definitively* show that it implements `Copy`. Otherwise,
// assume it is move; linear is always ok.
- match fulfill_cx.select_all_or_error(infcx, typer) {
+ match fulfill_cx.select_all_or_error(infcx) {
Ok(()) => {
debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} success",
ty,
}
}
+// FIXME: this is gonna need to be removed ...
/// Normalizes the parameter environment, reporting errors if they occur.
pub fn normalize_param_env_or_error<'a,'tcx>(unnormalized_env: ty::ParameterEnvironment<'a,'tcx>,
cause: ObligationCause<'tcx>)
let elaborated_env = unnormalized_env.with_caller_bounds(predicates);
- let infcx = infer::new_infer_ctxt(tcx);
- let predicates = match fully_normalize(&infcx, &elaborated_env, cause,
- &elaborated_env.caller_bounds) {
+ let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(elaborated_env), false);
+ let predicates = match fully_normalize(&infcx, cause,
+ &infcx.parameter_environment.caller_bounds) {
Ok(predicates) => predicates,
Err(errors) => {
report_fulfillment_errors(&infcx, &errors);
- return unnormalized_env; // an unnormalized env is better than nothing
+ return infcx.parameter_environment; // an unnormalized env is better than nothing
}
};
// all things considered.
let err_msg = fixup_err_to_string(fixup_err);
tcx.sess.span_err(span, &err_msg);
- return elaborated_env; // an unnormalized env is better than nothing
+ return infcx.parameter_environment; // an unnormalized env is better than nothing
}
};
- elaborated_env.with_caller_bounds(predicates)
+ infcx.parameter_environment.with_caller_bounds(predicates)
}
pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
- closure_typer: &ty::ClosureTyper<'tcx>,
cause: ObligationCause<'tcx>,
value: &T)
-> Result<T, Vec<FulfillmentError<'tcx>>>
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
debug!("normalize_param_env(value={:?})", value);
- let mut selcx = &mut SelectionContext::new(infcx, closure_typer);
+ let mut selcx = &mut SelectionContext::new(infcx);
+ // FIXME (@jroesch) ISSUE 26721
+ // I'm not sure if this is a bug or not, needs further investigation.
+ // It appears that by reusing the fulfillment_cx here we incur more
+ // obligations and later trip an asssertion on regionck.rs line 337.
+ //
+ // The two possibilities I see is:
+ // - normalization is not actually fully happening and we
+ // have a bug else where
+ // - we are adding a duplicate bound into the list causing
+ // its size to change.
+ //
+ // I think we should probably land this refactor and then come
+ // back to this is a follow-up patch.
let mut fulfill_cx = FulfillmentContext::new(false);
+
let Normalized { value: normalized_value, obligations } =
project::normalize(selcx, cause, value);
debug!("normalize_param_env: normalized_value={:?} obligations={:?}",
for obligation in obligations {
fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation);
}
- try!(fulfill_cx.select_all_or_error(infcx, closure_typer));
+
+ try!(fulfill_cx.select_all_or_error(infcx));
let resolved_value = infcx.resolve_type_vars_if_possible(&normalized_value);
debug!("normalize_param_env: resolved_value={:?}", resolved_value);
Ok(resolved_value)
VtableClosure(c) => VtableClosure(VtableClosureData {
closure_def_id: c.closure_def_id,
substs: c.substs,
- nested: c.nested.into_iter().map(f).collect()
+ nested: c.nested.into_iter().map(f).collect(),
})
}
}
-> bool
{
// Because we query yes/no results frequently, we keep a cache:
- let def = ty::lookup_trait_def(tcx, trait_def_id);
+ let def = tcx.lookup_trait_def(trait_def_id);
let result = def.object_safety().unwrap_or_else(|| {
let result = object_safety_violations(tcx, trait_def_id).is_empty();
{
// Check methods for violations.
let mut violations: Vec<_> =
- ty::trait_items(tcx, trait_def_id).iter()
+ tcx.trait_items(trait_def_id).iter()
.flat_map(|item| {
match *item {
ty::MethodTraitItem(ref m) => {
trait_def_id: ast::DefId)
-> bool
{
- let trait_def = ty::lookup_trait_def(tcx, trait_def_id);
+ let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = trait_def.trait_ref.clone();
let trait_ref = trait_ref.to_poly_trait_ref();
- let predicates = ty::lookup_super_predicates(tcx, trait_def_id);
+ let predicates = tcx.lookup_super_predicates(trait_def_id);
predicates
.predicates
.into_iter()
trait_def_id: ast::DefId)
-> bool
{
- let trait_def = ty::lookup_trait_def(tcx, trait_def_id);
- let trait_predicates = ty::lookup_predicates(tcx, trait_def_id);
+ let trait_def = tcx.lookup_trait_def(trait_def_id);
+ let trait_predicates = tcx.lookup_predicates(trait_def_id);
generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
}
};
// Search for a predicate like `Self : Sized` amongst the trait bounds.
- let free_substs = ty::construct_free_substs(tcx, generics, ast::DUMMY_NODE_ID);
+ let free_substs = tcx.construct_free_substs(generics, ast::DUMMY_NODE_ID);
let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
elaborate_predicates(tcx, predicates)
.any(|predicate| {
let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
let mut error = false;
- ty::maybe_walk_ty(ty, |ty| {
+ ty.maybe_walk(|ty| {
match ty.sty {
ty::TyParam(ref param_ty) => {
if param_ty.space == SelfSpace {
// Compute supertraits of current trait lazily.
if supertraits.is_none() {
- let trait_def = ty::lookup_trait_def(tcx, trait_def_id);
+ let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = ty::Binder(trait_def.trait_ref.clone());
supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
}
use middle::infer;
use middle::subst::Subst;
-use middle::ty::{self, AsPredicate, ReferencesError, RegionEscape,
- HasProjectionTypes, ToPolyTraitRef, Ty};
+use middle::ty::{self, ToPredicate, RegionEscape, HasTypeFlags, ToPolyTraitRef, Ty};
use middle::ty_fold::{self, TypeFoldable, TypeFolder};
use syntax::parse::token;
use util::common::FN_OUTPUT_NAME;
#[derive(Clone)]
pub struct MismatchedProjectionTypes<'tcx> {
- pub err: ty::type_err<'tcx>
+ pub err: ty::TypeError<'tcx>
}
#[derive(PartialEq, Eq, Debug)]
debug!("consider_unification_despite_ambiguity: self_ty.sty={:?}",
self_ty.sty);
match self_ty.sty {
- ty::TyClosure(closure_def_id, substs) => {
+ ty::TyClosure(closure_def_id, ref substs) => {
let closure_typer = selcx.closure_typer();
let closure_type = closure_typer.closure_type(closure_def_id, substs);
let ty::Binder((_, ret_type)) =
cause: ObligationCause<'tcx>,
value: &T)
-> Normalized<'tcx, T>
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
normalize_with_depth(selcx, cause, 0, value)
}
depth: usize,
value: &T)
-> Normalized<'tcx, T>
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth);
let result = normalizer.fold(value);
}
}
- fn fold<T:TypeFoldable<'tcx> + HasProjectionTypes>(&mut self, value: &T) -> T {
+ fn fold<T:TypeFoldable<'tcx> + HasTypeFlags>(&mut self, value: &T) -> T {
let value = self.selcx.infcx().resolve_type_vars_if_possible(value);
if !value.has_projection_types() {
projection_ty: projection_ty,
ty: ty_var
});
- let obligation = Obligation::with_depth(cause, depth+1, projection.as_predicate());
+ let obligation = Obligation::with_depth(cause, depth+1, projection.to_predicate());
Normalized {
value: ty_var,
obligations: vec!(obligation)
depth,
obligations);
- if ty::type_has_projection(projected_ty) {
+ if projected_ty.has_projection_types() {
let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth);
let normalized_ty = normalizer.fold(&projected_ty);
let trait_ref = projection_ty.trait_ref.to_poly_trait_ref();
let trait_obligation = Obligation { cause: cause,
recursion_depth: depth,
- predicate: trait_ref.as_predicate() };
+ predicate: trait_ref.to_predicate() };
Normalized {
value: selcx.tcx().types.err,
obligations: vec!(trait_obligation)
Ok(ProjectedTy::Progress(ty, obligations))
}
None => {
- Ok(ProjectedTy::NoProgress(ty::mk_projection(selcx.tcx(),
- obligation.predicate.trait_ref.clone(),
- obligation.predicate.item_name)))
+ Ok(ProjectedTy::NoProgress(selcx.tcx().mk_projection(
+ obligation.predicate.trait_ref.clone(),
+ obligation.predicate.item_name)))
}
}
}
};
// If so, extract what we know from the trait and try to come up with a good answer.
- let trait_predicates = ty::lookup_predicates(selcx.tcx(), trait_ref.def_id);
+ let trait_predicates = selcx.tcx().lookup_predicates(trait_ref.def_id);
let bounds = trait_predicates.instantiate(selcx.tcx(), trait_ref.substs);
let bounds = elaborate_predicates(selcx.tcx(), bounds.predicates.into_vec());
assemble_candidates_from_predicates(selcx, obligation, obligation_trait_ref,
selcx: &mut SelectionContext<'cx,'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
obligation_trait_ref: &ty::TraitRef<'tcx>,
- candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
- object_ty: Ty<'tcx>)
+ candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
{
+ let self_ty = obligation_trait_ref.self_ty();
+ let object_ty = selcx.infcx().shallow_resolve(self_ty);
debug!("assemble_candidates_from_object_type(object_ty={:?})",
object_ty);
let data = match object_ty.sty {
};
let projection_bounds = data.projection_bounds_with_self_ty(selcx.tcx(), object_ty);
let env_predicates = projection_bounds.iter()
- .map(|p| p.as_predicate())
+ .map(|p| p.to_predicate())
.collect();
let env_predicates = elaborate_predicates(selcx.tcx(), env_predicates);
assemble_candidates_from_predicates(selcx, obligation, obligation_trait_ref,
candidate_set.vec.push(
ProjectionTyCandidate::Impl(data));
}
- super::VtableObject(data) => {
+ super::VtableObject(_) => {
assemble_candidates_from_object_type(
- selcx, obligation, obligation_trait_ref, candidate_set,
- data.object_ty);
+ selcx, obligation, obligation_trait_ref, candidate_set);
}
super::VtableClosure(data) => {
candidate_set.vec.push(
-> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
{
let fn_type = selcx.infcx().shallow_resolve(fn_type);
- let sig = ty::ty_fn_sig(fn_type);
+ let sig = fn_type.fn_sig();
confirm_callable_candidate(selcx, obligation, sig, util::TupleArgumentsFlag::Yes)
}
// It is not in the impl - get the default from the trait.
let trait_ref = obligation.predicate.trait_ref;
- for trait_item in ty::trait_items(selcx.tcx(), trait_ref.def_id).iter() {
+ for trait_item in selcx.tcx().trait_items(trait_ref.def_id).iter() {
if let &ty::TypeTraitItem(ref assoc_ty) = trait_item {
if assoc_ty.name == obligation.predicate.item_name {
if let Some(ty) = assoc_ty.ty {
// ought to be reported by the type checker method
// `check_impl_items_against_trait`, so here we
// just return TyError.
+ debug!("confirm_impl_candidate: no associated type {:?} for {:?}",
+ assoc_ty.name,
+ trait_ref);
return (selcx.tcx().types.err, vec!());
}
}
use middle::fast_reject;
use middle::subst::{Subst, Substs, TypeSpace};
-use middle::ty::{self, AsPredicate, RegionEscape, ToPolyTraitRef, Ty};
+use middle::ty::{self, ToPredicate, RegionEscape, ToPolyTraitRef, Ty, HasTypeFlags};
use middle::infer;
use middle::infer::{InferCtxt, TypeFreshener};
use middle::ty_fold::TypeFoldable;
pub struct SelectionContext<'cx, 'tcx:'cx> {
infcx: &'cx InferCtxt<'cx, 'tcx>,
- closure_typer: &'cx (ty::ClosureTyper<'tcx>+'cx),
/// Freshener used specifically for skolemizing entries on the
/// obligation stack. This ensures that all entries on the stack
/// other words, we consider `$0 : Bar` to be unimplemented if
/// there is no type that the user could *actually name* that
/// would satisfy it. This avoids crippling inference, basically.
+
intercrate: bool,
}
/// Implementation of a `Fn`-family trait by one of the
/// anonymous types generated for a `||` expression.
- ClosureCandidate(/* closure */ ast::DefId, Substs<'tcx>),
+ ClosureCandidate(/* closure */ ast::DefId, &'tcx ty::ClosureSubsts<'tcx>),
/// Implementation of a `Fn`-family trait by one of the anonymous
/// types generated for a fn pointer type (e.g., `fn(int)->int`)
}
impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
- pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>,
- closure_typer: &'cx ty::ClosureTyper<'tcx>)
+ pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>)
-> SelectionContext<'cx, 'tcx> {
SelectionContext {
infcx: infcx,
- closure_typer: closure_typer,
freshener: infcx.freshener(),
intercrate: false,
}
}
- pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>,
- closure_typer: &'cx ty::ClosureTyper<'tcx>)
+ pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>)
-> SelectionContext<'cx, 'tcx> {
SelectionContext {
infcx: infcx,
- closure_typer: closure_typer,
freshener: infcx.freshener(),
intercrate: true,
}
}
pub fn param_env(&self) -> &'cx ty::ParameterEnvironment<'cx, 'tcx> {
- self.closure_typer.param_env()
+ self.infcx.param_env()
}
- pub fn closure_typer(&self) -> &'cx (ty::ClosureTyper<'tcx>+'cx) {
- self.closure_typer
+ pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'tcx> {
+ self.infcx
}
///////////////////////////////////////////////////////////////////////////
// lifetimes can appear inside the self-type.
let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
let (closure_def_id, substs) = match self_ty.sty {
- ty::TyClosure(id, ref substs) => (id, substs.clone()),
+ ty::TyClosure(id, ref substs) => (id, substs),
_ => { return; }
};
assert!(!substs.has_escaping_regions());
// terms of `Fn` etc, but we could probably make this more
// precise still.
let input_types = stack.fresh_trait_ref.0.input_types();
- let unbound_input_types = input_types.iter().any(|&t| ty::type_is_fresh(t));
+ let unbound_input_types = input_types.iter().any(|ty| ty.is_fresh());
if
unbound_input_types &&
(self.intercrate ||
stack: &TraitObligationStack<'o, 'tcx>)
-> SelectionResult<'tcx, SelectionCandidate<'tcx>>
{
- if ty::type_is_error(stack.obligation.predicate.0.self_ty()) {
+ if stack.obligation.predicate.0.self_ty().references_error() {
return Ok(Some(ErrorCandidate));
}
match candidate {
ImplCandidate(def_id) => {
- match ty::trait_impl_polarity(self.tcx(), def_id) {
+ match self.tcx().trait_impl_polarity(def_id) {
Some(ast::ImplPolarity::Negative) => return Err(Unimplemented),
_ => {}
}
match *candidate {
Ok(Some(_)) | Err(_) => true,
Ok(None) => {
- cache_fresh_trait_pred.0.input_types().iter().any(|&t| ty::type_has_ty_infer(t))
+ cache_fresh_trait_pred.0.input_types().has_infer_types()
}
}
}
projection_trait_ref={:?}",
projection_trait_ref);
- let trait_predicates = ty::lookup_predicates(self.tcx(), projection_trait_ref.def_id);
+ let trait_predicates = self.tcx().lookup_predicates(projection_trait_ref.def_id);
let bounds = trait_predicates.instantiate(self.tcx(), projection_trait_ref.substs);
debug!("match_projection_obligation_against_bounds_from_trait: \
bounds={:?}",
// type/region parameters
let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
let (closure_def_id, substs) = match self_ty.sty {
- ty::TyClosure(id, substs) => (id, substs),
+ ty::TyClosure(id, ref substs) => (id, substs),
ty::TyInfer(ty::TyVar(_)) => {
debug!("assemble_unboxed_closure_candidates: ambiguous self-type");
candidates.ambiguous = true;
kind,
obligation);
- match self.closure_typer.closure_kind(closure_def_id) {
+ match self.infcx.closure_kind(closure_def_id) {
Some(closure_kind) => {
debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind);
if closure_kind.extends(kind) {
- candidates.vec.push(ClosureCandidate(closure_def_id,
- substs.clone()));
+ candidates.vec.push(ClosureCandidate(closure_def_id, substs));
}
}
None => {
{
debug!("assemble_candidates_from_impls(obligation={:?})", obligation);
- let def = ty::lookup_trait_def(self.tcx(), obligation.predicate.def_id());
+ let def = self.tcx().lookup_trait_def(obligation.predicate.def_id());
def.for_each_relevant_impl(
self.tcx(),
let def_id = obligation.predicate.def_id();
- if ty::trait_has_default_impl(self.tcx(), def_id) {
+ if self.tcx().trait_has_default_impl(def_id) {
match self_ty.sty {
ty::TyTrait(..) => {
// For object types, we don't know what the closed
// object types, because it just lets you reflect
// onto the object type, not into the object's
// interior.
- if ty::has_attr(self.tcx(), def_id, "rustc_reflect_like") {
+ if self.tcx().has_attr(def_id, "rustc_reflect_like") {
candidates.vec.push(DefaultImplObjectCandidate(def_id));
}
}
candidates.ambiguous = true;
}
_ => {
- if self.constituent_types_for_ty(self_ty).is_some() {
- candidates.vec.push(DefaultImplCandidate(def_id.clone()))
- } else {
- // We don't yet know what the constituent
- // types are. So call it ambiguous for now,
- // though this is a bit stronger than
- // necessary: that is, we know that the
- // defaulted impl applies, but we can't
- // process the confirmation step without
- // knowing the constituent types. (Anyway, in
- // the particular case of defaulted impls, it
- // doesn't really matter much either way,
- // since we won't be aiding inference by
- // processing the confirmation step.)
- candidates.ambiguous = true;
- }
+ candidates.vec.push(DefaultImplCandidate(def_id.clone()))
}
}
}
debug!("assemble_candidates_from_object_ty: poly_trait_ref={:?}",
poly_trait_ref);
- // see whether the object trait can be upcast to the trait we are looking for
- let upcast_trait_refs = self.upcast(poly_trait_ref, obligation);
- if upcast_trait_refs.len() > 1 {
+ // Count only those upcast versions that match the trait-ref
+ // we are looking for. Specifically, do not only check for the
+ // correct trait, but also the correct type parameters.
+ // For example, we may be trying to upcast `Foo` to `Bar<i32>`,
+ // but `Foo` is declared as `trait Foo : Bar<u32>`.
+ let upcast_trait_refs =
+ util::supertraits(self.tcx(), poly_trait_ref)
+ .filter(|upcast_trait_ref| {
+ self.infcx.probe(|_| {
+ let upcast_trait_ref = upcast_trait_ref.clone();
+ self.match_poly_trait_ref(obligation, upcast_trait_ref).is_ok()
+ })
+ })
+ .count();
+
+ if upcast_trait_refs > 1 {
// can be upcast in many ways; need more type information
candidates.ambiguous = true;
- } else if upcast_trait_refs.len() == 1 {
+ } else if upcast_trait_refs == 1 {
candidates.vec.push(ObjectCandidate);
}
// T: Trait
// so it seems ok if we (conservatively) fail to accept that `Unsize`
// obligation above. Should be possible to extend this in the future.
- let self_ty = match ty::no_late_bound_regions(self.tcx(), &obligation.self_ty()) {
+ let self_ty = match self.tcx().no_late_bound_regions(&obligation.self_ty()) {
Some(t) => t,
None => {
// Don't add any candidates if there are bound regions.
let principal =
data.principal_trait_ref_with_self_ty(self.tcx(),
self.tcx().types.err);
- let desired_def_id = obligation.predicate.def_id();
+ let copy_def_id = obligation.predicate.def_id();
for tr in util::supertraits(self.tcx(), principal) {
- if tr.def_id() == desired_def_id {
+ if tr.def_id() == copy_def_id {
return ok_if(Vec::new())
}
}
}
}
- ty::TyRef(_, ty::mt { ty: _, mutbl }) => {
+ ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl }) => {
// &mut T or &T
match bound {
ty::BoundCopy => {
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
ty::TyTuple(ref tys) => ok_if(tys.clone()),
- ty::TyClosure(def_id, substs) => {
+ ty::TyClosure(def_id, ref substs) => {
// FIXME -- This case is tricky. In the case of by-ref
// closures particularly, we need the results of
// inference to decide how to reflect the type of each
return ok_if(Vec::new());
}
- match self.closure_typer.closure_upvars(def_id, substs) {
- Some(upvars) => ok_if(upvars.iter().map(|c| c.ty).collect()),
- None => {
- debug!("assemble_builtin_bound_candidates: no upvar types available yet");
- Ok(AmbiguousBuiltin)
- }
- }
+ ok_if(substs.upvar_tys.clone())
}
ty::TyStruct(def_id, substs) => {
let types: Vec<Ty> =
- ty::struct_fields(self.tcx(), def_id, substs).iter()
+ self.tcx().struct_fields(def_id, substs).iter()
.map(|f| f.mt.ty)
.collect();
nominal(bound, types)
ty::TyEnum(def_id, substs) => {
let types: Vec<Ty> =
- ty::substd_enum_variants(self.tcx(), def_id, substs)
+ self.tcx().substd_enum_variants(def_id, substs)
.iter()
.flat_map(|variant| &variant.args)
.cloned()
/// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
/// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
/// ```
- fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Option<Vec<Ty<'tcx>>> {
+ fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> {
match t.sty {
ty::TyUint(_) |
ty::TyInt(_) |
ty::TyInfer(ty::IntVar(_)) |
ty::TyInfer(ty::FloatVar(_)) |
ty::TyChar => {
- Some(Vec::new())
+ Vec::new()
}
ty::TyTrait(..) |
}
ty::TyBox(referent_ty) => { // Box<T>
- Some(vec![referent_ty])
+ vec![referent_ty]
}
- ty::TyRawPtr(ty::mt { ty: element_ty, ..}) |
- ty::TyRef(_, ty::mt { ty: element_ty, ..}) => {
- Some(vec![element_ty])
+ ty::TyRawPtr(ty::TypeAndMut { ty: element_ty, ..}) |
+ ty::TyRef(_, ty::TypeAndMut { ty: element_ty, ..}) => {
+ vec![element_ty]
},
ty::TyArray(element_ty, _) | ty::TySlice(element_ty) => {
- Some(vec![element_ty])
+ vec![element_ty]
}
ty::TyTuple(ref tys) => {
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
- Some(tys.clone())
- }
-
- ty::TyClosure(def_id, substs) => {
+ tys.clone()
+ }
+
+ ty::TyClosure(def_id, ref substs) => {
+ // FIXME(#27086). We are invariant w/r/t our
+ // substs.func_substs, but we don't see them as
+ // constituent types; this seems RIGHT but also like
+ // something that a normal type couldn't simulate. Is
+ // this just a gap with the way that PhantomData and
+ // OIBIT interact? That is, there is no way to say
+ // "make me invariant with respect to this TYPE, but
+ // do not act as though I can reach it"
assert_eq!(def_id.krate, ast::LOCAL_CRATE);
-
- match self.closure_typer.closure_upvars(def_id, substs) {
- Some(upvars) => {
- Some(upvars.iter().map(|c| c.ty).collect())
- }
- None => {
- None
- }
- }
+ substs.upvar_tys.clone()
}
// for `PhantomData<T>`, we pass `T`
ty::TyStruct(def_id, substs)
if Some(def_id) == self.tcx().lang_items.phantom_data() =>
{
- Some(substs.types.get_slice(TypeSpace).to_vec())
+ substs.types.get_slice(TypeSpace).to_vec()
}
ty::TyStruct(def_id, substs) => {
- Some(ty::struct_fields(self.tcx(), def_id, substs).iter()
- .map(|f| f.mt.ty)
- .collect())
+ self.tcx().struct_fields(def_id, substs)
+ .iter()
+ .map(|f| f.mt.ty)
+ .collect()
}
ty::TyEnum(def_id, substs) => {
- Some(ty::substd_enum_variants(self.tcx(), def_id, substs)
- .iter()
- .flat_map(|variant| &variant.args)
- .map(|&ty| ty)
- .collect())
+ self.tcx().substd_enum_variants(def_id, substs)
+ .iter()
+ .flat_map(|variant| &variant.args)
+ .map(|&ty| ty)
+ .collect()
}
}
}
ClosureCandidate(closure_def_id, substs) => {
let vtable_closure =
- try!(self.confirm_closure_candidate(obligation, closure_def_id, &substs));
+ try!(self.confirm_closure_candidate(obligation, closure_def_id, substs));
Ok(VtableClosure(vtable_closure))
}
// binder is moved below
let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
- match self.constituent_types_for_ty(self_ty) {
- Some(types) => self.vtable_default_impl(obligation, trait_def_id, ty::Binder(types)),
- None => {
- self.tcx().sess.bug(
- &format!(
- "asked to confirm default implementation for ambiguous type: {:?}",
- self_ty));
- }
- }
+ let types = self.constituent_types_for_ty(self_ty);
+ self.vtable_default_impl(obligation, trait_def_id, ty::Binder(types))
}
fn confirm_default_impl_object_candidate(&mut self,
obligation,
trait_def_id);
- assert!(ty::has_attr(self.tcx(), trait_def_id, "rustc_reflect_like"));
+ assert!(self.tcx().has_attr(trait_def_id, "rustc_reflect_like"));
// OK to skip binder, it is reintroduced below
let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
// reintroduce the two binding levels we skipped, then flatten into one
let all_types = ty::Binder(ty::Binder(all_types));
- let all_types = ty::flatten_late_bound_regions(self.tcx(), &all_types);
+ let all_types = self.tcx().flatten_late_bound_regions(&all_types);
self.vtable_default_impl(obligation, trait_def_id, all_types)
}
}
};
- // Upcast the object type to the obligation type. There must
- // be exactly one applicable trait-reference; if this were not
- // the case, we would have reported an ambiguity error rather
- // than successfully selecting one of the candidates.
- let upcast_trait_refs = self.upcast(poly_trait_ref.clone(), obligation);
- assert_eq!(upcast_trait_refs.len(), 1);
- let upcast_trait_ref = upcast_trait_refs.into_iter().next().unwrap();
+ let mut upcast_trait_ref = None;
+ let vtable_base;
+
+ {
+ // We want to find the first supertrait in the list of
+ // supertraits that we can unify with, and do that
+ // unification. We know that there is exactly one in the list
+ // where we can unify because otherwise select would have
+ // reported an ambiguity. (When we do find a match, also
+ // record it for later.)
+ let nonmatching =
+ util::supertraits(self.tcx(), poly_trait_ref)
+ .take_while(|&t| {
+ match
+ self.infcx.commit_if_ok(
+ |_| self.match_poly_trait_ref(obligation, t))
+ {
+ Ok(_) => { upcast_trait_ref = Some(t); false }
+ Err(_) => { true }
+ }
+ });
+
+ // Additionally, for each of the nonmatching predicates that
+ // we pass over, we sum up the set of number of vtable
+ // entries, so that we can compute the offset for the selected
+ // trait.
+ vtable_base =
+ nonmatching.map(|t| util::count_own_vtable_entries(self.tcx(), t))
+ .sum();
- match self.match_poly_trait_ref(obligation, upcast_trait_ref.clone()) {
- Ok(()) => { }
- Err(()) => {
- self.tcx().sess.span_bug(obligation.cause.span,
- "failed to match trait refs");
- }
}
- VtableObjectData { object_ty: self_ty,
- upcast_trait_ref: upcast_trait_ref }
+ VtableObjectData {
+ upcast_trait_ref: upcast_trait_ref.unwrap(),
+ vtable_base: vtable_base,
+ }
}
fn confirm_fn_pointer_candidate(&mut self,
// ok to skip binder; it is reintroduced below
let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
- let sig = ty::ty_fn_sig(self_ty);
+ let sig = self_ty.fn_sig();
let trait_ref =
util::closure_trait_ref_and_return_type(self.tcx(),
obligation.predicate.def_id(),
fn confirm_closure_candidate(&mut self,
obligation: &TraitObligation<'tcx>,
closure_def_id: ast::DefId,
- substs: &Substs<'tcx>)
+ substs: &ty::ClosureSubsts<'tcx>)
-> Result<VtableClosureData<'tcx, PredicateObligation<'tcx>>,
SelectionError<'tcx>>
{
// assemble_candidates_for_unsizing should ensure there are no late bound
// regions here. See the comment there for more details.
let source = self.infcx.shallow_resolve(
- ty::no_late_bound_regions(tcx, &obligation.self_ty()).unwrap());
+ tcx.no_late_bound_regions(&obligation.self_ty()).unwrap());
let target = self.infcx.shallow_resolve(obligation.predicate.0.input_types()[0]);
debug!("confirm_builtin_unsize_candidate(source={:?}, target={:?})",
region_bound: data_b.bounds.region_bound,
builtin_bounds: data_b.bounds.builtin_bounds,
projection_bounds: data_a.bounds.projection_bounds.clone(),
- region_bound_will_change: data_b.bounds.region_bound_will_change,
};
- let new_trait = ty::mk_trait(tcx, data_a.principal.clone(), bounds);
+ let new_trait = tcx.mk_trait(data_a.principal.clone(), bounds);
let origin = infer::Misc(obligation.cause.span);
if self.infcx.sub_types(false, origin, new_trait, target).is_err() {
return Err(Unimplemented);
data_b.bounds.region_bound);
nested.push(Obligation::with_depth(cause,
obligation.recursion_depth + 1,
- ty::Binder(outlives).as_predicate()));
+ ty::Binder(outlives).to_predicate()));
}
// T -> Trait.
};
// Create the obligation for casting from T to Trait.
- push(data.principal_trait_ref_with_self_ty(tcx, source).as_predicate());
+ push(data.principal_trait_ref_with_self_ty(tcx, source).to_predicate());
// We can only make objects from sized types.
let mut builtin_bounds = data.bounds.builtin_bounds;
// for the Send check.)
for bound in &builtin_bounds {
if let Ok(tr) = util::trait_ref_for_builtin_bound(tcx, bound, source) {
- push(tr.as_predicate());
+ push(tr.to_predicate());
} else {
return Err(Unimplemented);
}
// Create obligations for the projection predicates.
for bound in data.projection_bounds_with_self_ty(tcx, source) {
- push(bound.as_predicate());
+ push(bound.to_predicate());
}
// If the type is `Foo+'a`, ensures that the type
// being cast to `Foo+'a` outlives `'a`:
let outlives = ty::OutlivesPredicate(source,
data.bounds.region_bound);
- push(ty::Binder(outlives).as_predicate());
+ push(ty::Binder(outlives).to_predicate());
}
// [T; n] -> [T].
// Struct<T> -> Struct<U>.
(&ty::TyStruct(def_id, substs_a), &ty::TyStruct(_, substs_b)) => {
- let fields = ty::lookup_struct_fields(tcx, def_id).iter().map(|f| {
- ty::lookup_field_type_unsubstituted(tcx, def_id, f.id)
+ let fields = tcx.lookup_struct_fields(def_id).iter().map(|f| {
+ tcx.lookup_field_type_unsubstituted(def_id, f.id)
}).collect::<Vec<_>>();
// The last field of the structure has to exist and contain type parameters.
return Err(Unimplemented);
};
let mut ty_params = vec![];
- ty::walk_ty(field, |ty| {
+ for ty in field.walk() {
if let ty::TyParam(p) = ty.sty {
assert!(p.space == TypeSpace);
let idx = p.idx as usize;
ty_params.push(idx);
}
}
- });
+ }
if ty_params.is_empty() {
return Err(Unimplemented);
}
for &i in &ty_params {
new_substs.types.get_mut_slice(TypeSpace)[i] = tcx.types.err;
}
- for &ty in fields.init() {
- if ty::type_is_error(ty.subst(tcx, &new_substs)) {
+ for &ty in fields.split_last().unwrap().1 {
+ if ty.subst(tcx, &new_substs).references_error() {
return Err(Unimplemented);
}
}
let param_b = *substs_b.types.get(TypeSpace, i);
new_substs.types.get_mut_slice(TypeSpace)[i] = param_b;
}
- let new_struct = ty::mk_struct(tcx, def_id, tcx.mk_substs(new_substs));
+ let new_struct = tcx.mk_struct(def_id, tcx.mk_substs(new_substs));
let origin = infer::Misc(obligation.cause.span);
if self.infcx.sub_types(false, origin, new_struct, target).is_err() {
return Err(Unimplemented);
-> Result<(Normalized<'tcx, Substs<'tcx>>,
infer::SkolemizationMap), ()>
{
- let impl_trait_ref = ty::impl_trait_ref(self.tcx(), impl_def_id).unwrap();
+ let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
// Before we create the substitutions and everything, first
// consider a "quick reject". This avoids creating more types
/// Returns `Ok` if `poly_trait_ref` being true implies that the
/// obligation is satisfied.
- fn match_poly_trait_ref(&mut self,
+ fn match_poly_trait_ref(&self,
obligation: &TraitObligation<'tcx>,
poly_trait_ref: ty::PolyTraitRef<'tcx>)
-> Result<(),()>
impl_def_id);
// Find the self type for the impl.
- let impl_self_ty = ty::lookup_item_type(self.tcx(), impl_def_id).ty;
+ let impl_self_ty = self.tcx().lookup_item_type(impl_def_id).ty;
let impl_self_ty = impl_self_ty.subst(self.tcx(), &impl_substs);
debug!("match_impl_self_types(obligation_self_ty={:?}, impl_self_ty={:?})",
fn closure_trait_ref_unnormalized(&mut self,
obligation: &TraitObligation<'tcx>,
closure_def_id: ast::DefId,
- substs: &Substs<'tcx>)
+ substs: &ty::ClosureSubsts<'tcx>)
-> ty::PolyTraitRef<'tcx>
{
- let closure_type = self.closure_typer.closure_type(closure_def_id, substs);
+ let closure_type = self.infcx.closure_type(closure_def_id, substs);
let ty::Binder((trait_ref, _)) =
util::closure_trait_ref_and_return_type(self.tcx(),
obligation.predicate.def_id(),
fn closure_trait_ref(&mut self,
obligation: &TraitObligation<'tcx>,
closure_def_id: ast::DefId,
- substs: &Substs<'tcx>)
+ substs: &ty::ClosureSubsts<'tcx>)
-> Normalized<'tcx, ty::PolyTraitRef<'tcx>>
{
let trait_ref = self.closure_trait_ref_unnormalized(
{
debug!("impl_or_trait_obligations(def_id={:?})", def_id);
- let predicates = ty::lookup_predicates(self.tcx(), def_id);
+ let predicates = self.tcx().lookup_predicates(def_id);
let predicates = predicates.instantiate(self.tcx(), substs);
let predicates = normalize_with_depth(self, cause.clone(), recursion_depth, &predicates);
let mut predicates = self.infcx().plug_leaks(skol_map, snapshot, &predicates);
obligation.cause.clone()
}
}
-
- /// Upcasts an object trait-reference into those that match the obligation.
- fn upcast(&mut self, obj_trait_ref: ty::PolyTraitRef<'tcx>, obligation: &TraitObligation<'tcx>)
- -> Vec<ty::PolyTraitRef<'tcx>>
- {
- debug!("upcast(obj_trait_ref={:?}, obligation={:?})",
- obj_trait_ref,
- obligation);
-
- let obligation_def_id = obligation.predicate.def_id();
- let mut upcast_trait_refs = util::upcast(self.tcx(), obj_trait_ref, obligation_def_id);
-
- // Retain only those upcast versions that match the trait-ref
- // we are looking for. In particular, we know that all of
- // `upcast_trait_refs` apply to the correct trait, but
- // possibly with incorrect type parameters. For example, we
- // may be trying to upcast `Foo` to `Bar<i32>`, but `Foo` is
- // declared as `trait Foo : Bar<u32>`.
- upcast_trait_refs.retain(|upcast_trait_ref| {
- let upcast_trait_ref = upcast_trait_ref.clone();
- self.infcx.probe(|_| self.match_poly_trait_ref(obligation, upcast_trait_ref)).is_ok()
- });
-
- debug!("upcast: upcast_trait_refs={:?}", upcast_trait_refs);
- upcast_trait_refs
- }
}
impl<'tcx> SelectionCache<'tcx> {
use middle::subst::Substs;
use middle::infer::InferCtxt;
-use middle::ty::{self, Ty, AsPredicate, ToPolyTraitRef};
+use middle::ty::{self, Ty, ToPredicate, ToPolyTraitRef};
use std::fmt;
use syntax::ast;
use syntax::codemap::Span;
// regions before we throw things into the underlying set.
let normalized_pred = match *pred {
ty::Predicate::Trait(ref data) =>
- ty::Predicate::Trait(ty::anonymize_late_bound_regions(self.tcx, data)),
+ ty::Predicate::Trait(self.tcx.anonymize_late_bound_regions(data)),
ty::Predicate::Equate(ref data) =>
- ty::Predicate::Equate(ty::anonymize_late_bound_regions(self.tcx, data)),
+ ty::Predicate::Equate(self.tcx.anonymize_late_bound_regions(data)),
ty::Predicate::RegionOutlives(ref data) =>
- ty::Predicate::RegionOutlives(ty::anonymize_late_bound_regions(self.tcx, data)),
+ ty::Predicate::RegionOutlives(self.tcx.anonymize_late_bound_regions(data)),
ty::Predicate::TypeOutlives(ref data) =>
- ty::Predicate::TypeOutlives(ty::anonymize_late_bound_regions(self.tcx, data)),
+ ty::Predicate::TypeOutlives(self.tcx.anonymize_late_bound_regions(data)),
ty::Predicate::Projection(ref data) =>
- ty::Predicate::Projection(ty::anonymize_late_bound_regions(self.tcx, data)),
+ ty::Predicate::Projection(self.tcx.anonymize_late_bound_regions(data)),
};
self.set.insert(normalized_pred)
}
trait_ref: ty::PolyTraitRef<'tcx>)
-> Elaborator<'cx, 'tcx>
{
- elaborate_predicates(tcx, vec![trait_ref.as_predicate()])
+ elaborate_predicates(tcx, vec![trait_ref.to_predicate()])
}
pub fn elaborate_trait_refs<'cx, 'tcx>(
-> Elaborator<'cx, 'tcx>
{
let predicates = trait_refs.iter()
- .map(|trait_ref| trait_ref.as_predicate())
+ .map(|trait_ref| trait_ref.to_predicate())
.collect();
elaborate_predicates(tcx, predicates)
}
match *predicate {
ty::Predicate::Trait(ref data) => {
// Predicates declared on the trait.
- let predicates = ty::lookup_super_predicates(self.tcx, data.def_id());
+ let predicates = self.tcx.lookup_super_predicates(data.def_id());
let mut predicates: Vec<_> =
predicates.predicates
None => { return None; }
};
- let predicates = ty::lookup_super_predicates(self.tcx, def_id);
+ let predicates = self.tcx.lookup_super_predicates(def_id);
let visited = &mut self.visited;
self.stack.extend(
predicates.predicates
-> Substs<'tcx>
{
let tcx = infcx.tcx;
- let impl_generics = ty::lookup_item_type(tcx, impl_def_id).generics;
+ let impl_generics = tcx.lookup_item_type(impl_def_id).generics;
infcx.fresh_substs_for_generics(span, &impl_generics)
}
Obligation {
cause: cause,
recursion_depth: recursion_depth,
- predicate: trait_ref.as_predicate(),
+ predicate: trait_ref.to_predicate(),
}
}
.collect()
}
-/// Given an object of type `object_trait_ref`, returns the index of
-/// the method `n_method` found in the trait `trait_def_id` (which
-/// should be a supertrait of `object_trait_ref`) within the vtable
-/// for `object_trait_ref`.
-pub fn get_vtable_index_of_object_method<'tcx>(tcx: &ty::ctxt<'tcx>,
- object_trait_ref: ty::PolyTraitRef<'tcx>,
- trait_def_id: ast::DefId,
- method_offset_in_trait: usize) -> usize {
- // We need to figure the "real index" of the method in a
- // listing of all the methods of an object. We do this by
- // iterating down the supertraits of the object's trait until
- // we find the trait the method came from, counting up the
- // methods from them.
- let mut method_count = 0;
-
- for bound_ref in transitive_bounds(tcx, &[object_trait_ref]) {
- if bound_ref.def_id() == trait_def_id {
- break;
- }
-
- let trait_items = ty::trait_items(tcx, bound_ref.def_id());
- for trait_item in trait_items.iter() {
- match *trait_item {
- ty::MethodTraitItem(_) => method_count += 1,
- _ => {}
- }
+/// Given an trait `trait_ref`, returns the number of vtable entries
+/// that come from `trait_ref`, excluding its supertraits. Used in
+/// computing the vtable base for an upcast trait of a trait object.
+pub fn count_own_vtable_entries<'tcx>(tcx: &ty::ctxt<'tcx>,
+ trait_ref: ty::PolyTraitRef<'tcx>)
+ -> usize {
+ let mut entries = 0;
+ // Count number of methods and add them to the total offset.
+ // Skip over associated types and constants.
+ for trait_item in &tcx.trait_items(trait_ref.def_id())[..] {
+ if let ty::MethodTraitItem(_) = *trait_item {
+ entries += 1;
}
}
+ entries
+}
- // count number of methods preceding the one we are selecting and
- // add them to the total offset; skip over associated types.
- let trait_items = ty::trait_items(tcx, trait_def_id);
- for trait_item in trait_items.iter().take(method_offset_in_trait) {
- match *trait_item {
- ty::MethodTraitItem(_) => method_count += 1,
- _ => {}
+/// Given an upcast trait object described by `object`, returns the
+/// index of the method `method_def_id` (which should be part of
+/// `object.upcast_trait_ref`) within the vtable for `object`.
+pub fn get_vtable_index_of_object_method<'tcx>(tcx: &ty::ctxt<'tcx>,
+ object: &super::VtableObjectData<'tcx>,
+ method_def_id: ast::DefId) -> usize {
+ // Count number of methods preceding the one we are selecting and
+ // add them to the total offset.
+ // Skip over associated types and constants.
+ let mut entries = object.vtable_base;
+ for trait_item in &tcx.trait_items(object.upcast_trait_ref.def_id())[..] {
+ if trait_item.def_id() == method_def_id {
+ // The item with the ID we were given really ought to be a method.
+ assert!(match *trait_item {
+ ty::MethodTraitItem(_) => true,
+ _ => false
+ });
+
+ return entries;
+ }
+ if let ty::MethodTraitItem(_) = *trait_item {
+ entries += 1;
}
}
- // the item at the offset we were given really ought to be a method
- assert!(match trait_items[method_offset_in_trait] {
- ty::MethodTraitItem(_) => true,
- _ => false
- });
-
- method_count
+ tcx.sess.bug(&format!("get_vtable_index_of_object_method: {:?} was not found",
+ method_def_id));
}
pub enum TupleArgumentsFlag { Yes, No }
{
let arguments_tuple = match tuple_arguments {
TupleArgumentsFlag::No => sig.0.inputs[0],
- TupleArgumentsFlag::Yes => ty::mk_tup(tcx, sig.0.inputs.to_vec()),
+ TupleArgumentsFlag::Yes => tcx.mk_tup(sig.0.inputs.to_vec()),
};
let trait_substs = Substs::new_trait(vec![arguments_tuple], vec![], self_ty);
let trait_ref = ty::TraitRef {
def_id: fn_trait_def_id,
substs: tcx.mk_substs(trait_substs),
};
- ty::Binder((trait_ref, sig.0.output.unwrap_or(ty::mk_nil(tcx))))
+ ty::Binder((trait_ref, sig.0.output.unwrap_or(tcx.mk_nil())))
}
impl<'tcx,O:fmt::Debug> fmt::Debug for super::Obligation<'tcx, O> {
write!(f, "VtableFnPointer({:?})", d),
super::VtableObject(ref d) =>
- write!(f, "VtableObject({:?})", d),
+ write!(f, "{:?}", d),
super::VtableParam(ref n) =>
write!(f, "VtableParam({:?})", n),
impl<'tcx> fmt::Debug for super::VtableObjectData<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "VtableObject(object_ty={:?})", self.object_ty)
+ write!(f, "VtableObject(upcast={:?}, vtable_base={})",
+ self.upcast_trait_ref,
+ self.vtable_base)
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// FIXME: (@jroesch) @eddyb should remove this when he renames ctxt
#![allow(non_camel_case_types)]
-pub use self::terr_vstore_kind::*;
-pub use self::type_err::*;
pub use self::InferTy::*;
pub use self::InferRegion::*;
pub use self::ImplOrTraitItemId::*;
pub use self::BoundRegion::*;
pub use self::TypeVariants::*;
pub use self::IntVarValue::*;
-pub use self::MethodOrigin::*;
pub use self::CopyImplementationError::*;
pub use self::BuiltinBound::Send as BoundSend;
use middle;
use middle::cast;
use middle::check_const;
-use middle::const_eval::{self, ConstVal};
+use middle::const_eval::{self, ConstVal, ErrKind};
+use middle::const_eval::EvalHint::UncheckedExprHint;
use middle::def::{self, DefMap, ExportMap};
use middle::dependency_format;
use middle::fast_reject;
use middle::free_region::FreeRegionMap;
-use middle::infer::error_reporting::note_and_explain_region;
use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem};
-use middle::mem_categorization as mc;
use middle::region;
use middle::resolve_lifetime;
use middle::infer;
+use middle::infer::type_variable;
use middle::pat_util;
use middle::region::RegionMaps;
use middle::stability;
use syntax::abi;
use syntax::ast::{CrateNum, DefId, ItemImpl, ItemTrait, LOCAL_CRATE};
use syntax::ast::{MutImmutable, MutMutable, Name, NamedField, NodeId};
-use syntax::ast::{StmtExpr, StmtSemi, StructField, UnnamedField, Visibility};
+use syntax::ast::{StructField, UnnamedField, Visibility};
use syntax::ast_util::{self, is_local, local_def};
use syntax::attr::{self, AttrMetaMethods, SignedInt, UnsignedInt};
use syntax::codemap::Span;
}
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-pub struct field<'tcx> {
+pub struct Field<'tcx> {
pub name: ast::Name,
- pub mt: mt<'tcx>
+ pub mt: TypeAndMut<'tcx>
}
-#[derive(Clone, Copy, Debug)]
+// Enum information
+#[derive(Clone)]
+pub struct VariantInfo<'tcx> {
+ pub args: Vec<Ty<'tcx>>,
+ pub arg_names: Option<Vec<ast::Name>>,
+ pub ctor_ty: Option<Ty<'tcx>>,
+ pub name: ast::Name,
+ pub id: ast::DefId,
+ pub disr_val: Disr,
+ pub vis: Visibility
+}
+
+impl<'tcx> VariantInfo<'tcx> {
+
+ /// Creates a new VariantInfo from the corresponding ast representation.
+ ///
+ /// Does not do any caching of the value in the type context.
+ pub fn from_ast_variant(cx: &ctxt<'tcx>,
+ ast_variant: &ast::Variant,
+ discriminant: Disr) -> VariantInfo<'tcx> {
+ let ctor_ty = cx.node_id_to_type(ast_variant.node.id);
+
+ match ast_variant.node.kind {
+ ast::TupleVariantKind(ref args) => {
+ let arg_tys = if !args.is_empty() {
+ // the regions in the argument types come from the
+ // enum def'n, and hence will all be early bound
+ cx.no_late_bound_regions(&ctor_ty.fn_args()).unwrap()
+ } else {
+ Vec::new()
+ };
+
+ return VariantInfo {
+ args: arg_tys,
+ arg_names: None,
+ ctor_ty: Some(ctor_ty),
+ name: ast_variant.node.name.name,
+ id: ast_util::local_def(ast_variant.node.id),
+ disr_val: discriminant,
+ vis: ast_variant.node.vis
+ };
+ },
+ ast::StructVariantKind(ref struct_def) => {
+ let fields: &[StructField] = &struct_def.fields;
+
+ assert!(!fields.is_empty());
+
+ let arg_tys = struct_def.fields.iter()
+ .map(|field| cx.node_id_to_type(field.node.id)).collect();
+ let arg_names = fields.iter().map(|field| {
+ match field.node.kind {
+ NamedField(ident, _) => ident.name,
+ UnnamedField(..) => cx.sess.bug(
+ "enum_variants: all fields in struct must have a name")
+ }
+ }).collect();
+
+ return VariantInfo {
+ args: arg_tys,
+ arg_names: Some(arg_names),
+ ctor_ty: None,
+ name: ast_variant.node.name.name,
+ id: ast_util::local_def(ast_variant.node.id),
+ disr_val: discriminant,
+ vis: ast_variant.node.vis
+ };
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+pub enum DtorKind {
+ NoDtor,
+ TraitDtor(DefId, bool)
+}
+
+impl DtorKind {
+ pub fn is_present(&self) -> bool {
+ match *self {
+ TraitDtor(..) => true,
+ _ => false
+ }
+ }
+
+ pub fn has_drop_flag(&self) -> bool {
+ match self {
+ &NoDtor => false,
+ &TraitDtor(_, flag) => flag
+ }
+ }
+}
+
+trait IntTypeExt {
+ fn to_ty<'tcx>(&self, cx: &ctxt<'tcx>) -> Ty<'tcx>;
+ fn i64_to_disr(&self, val: i64) -> Option<Disr>;
+ fn u64_to_disr(&self, val: u64) -> Option<Disr>;
+ fn disr_incr(&self, val: Disr) -> Option<Disr>;
+ fn disr_string(&self, val: Disr) -> String;
+ fn disr_wrap_incr(&self, val: Option<Disr>) -> Disr;
+}
+
+impl IntTypeExt for attr::IntType {
+ fn to_ty<'tcx>(&self, cx: &ctxt<'tcx>) -> Ty<'tcx> {
+ match *self {
+ SignedInt(ast::TyI8) => cx.types.i8,
+ SignedInt(ast::TyI16) => cx.types.i16,
+ SignedInt(ast::TyI32) => cx.types.i32,
+ SignedInt(ast::TyI64) => cx.types.i64,
+ SignedInt(ast::TyIs) => cx.types.isize,
+ UnsignedInt(ast::TyU8) => cx.types.u8,
+ UnsignedInt(ast::TyU16) => cx.types.u16,
+ UnsignedInt(ast::TyU32) => cx.types.u32,
+ UnsignedInt(ast::TyU64) => cx.types.u64,
+ UnsignedInt(ast::TyUs) => cx.types.usize,
+ }
+ }
+
+ fn i64_to_disr(&self, val: i64) -> Option<Disr> {
+ match *self {
+ SignedInt(ast::TyI8) => val.to_i8() .map(|v| v as Disr),
+ SignedInt(ast::TyI16) => val.to_i16() .map(|v| v as Disr),
+ SignedInt(ast::TyI32) => val.to_i32() .map(|v| v as Disr),
+ SignedInt(ast::TyI64) => val.to_i64() .map(|v| v as Disr),
+ UnsignedInt(ast::TyU8) => val.to_u8() .map(|v| v as Disr),
+ UnsignedInt(ast::TyU16) => val.to_u16() .map(|v| v as Disr),
+ UnsignedInt(ast::TyU32) => val.to_u32() .map(|v| v as Disr),
+ UnsignedInt(ast::TyU64) => val.to_u64() .map(|v| v as Disr),
+
+ UnsignedInt(ast::TyUs) |
+ SignedInt(ast::TyIs) => unreachable!(),
+ }
+ }
+
+ fn u64_to_disr(&self, val: u64) -> Option<Disr> {
+ match *self {
+ SignedInt(ast::TyI8) => val.to_i8() .map(|v| v as Disr),
+ SignedInt(ast::TyI16) => val.to_i16() .map(|v| v as Disr),
+ SignedInt(ast::TyI32) => val.to_i32() .map(|v| v as Disr),
+ SignedInt(ast::TyI64) => val.to_i64() .map(|v| v as Disr),
+ UnsignedInt(ast::TyU8) => val.to_u8() .map(|v| v as Disr),
+ UnsignedInt(ast::TyU16) => val.to_u16() .map(|v| v as Disr),
+ UnsignedInt(ast::TyU32) => val.to_u32() .map(|v| v as Disr),
+ UnsignedInt(ast::TyU64) => val.to_u64() .map(|v| v as Disr),
+
+ UnsignedInt(ast::TyUs) |
+ SignedInt(ast::TyIs) => unreachable!(),
+ }
+ }
+
+ fn disr_incr(&self, val: Disr) -> Option<Disr> {
+ macro_rules! add1 {
+ ($e:expr) => { $e.and_then(|v|v.checked_add(1)).map(|v| v as Disr) }
+ }
+ match *self {
+ // SignedInt repr means we *want* to reinterpret the bits
+ // treating the highest bit of Disr as a sign-bit, so
+ // cast to i64 before range-checking.
+ SignedInt(ast::TyI8) => add1!((val as i64).to_i8()),
+ SignedInt(ast::TyI16) => add1!((val as i64).to_i16()),
+ SignedInt(ast::TyI32) => add1!((val as i64).to_i32()),
+ SignedInt(ast::TyI64) => add1!(Some(val as i64)),
+
+ UnsignedInt(ast::TyU8) => add1!(val.to_u8()),
+ UnsignedInt(ast::TyU16) => add1!(val.to_u16()),
+ UnsignedInt(ast::TyU32) => add1!(val.to_u32()),
+ UnsignedInt(ast::TyU64) => add1!(Some(val)),
+
+ UnsignedInt(ast::TyUs) |
+ SignedInt(ast::TyIs) => unreachable!(),
+ }
+ }
+
+ // This returns a String because (1.) it is only used for
+ // rendering an error message and (2.) a string can represent the
+ // full range from `i64::MIN` through `u64::MAX`.
+ fn disr_string(&self, val: Disr) -> String {
+ match *self {
+ SignedInt(ast::TyI8) => format!("{}", val as i8 ),
+ SignedInt(ast::TyI16) => format!("{}", val as i16),
+ SignedInt(ast::TyI32) => format!("{}", val as i32),
+ SignedInt(ast::TyI64) => format!("{}", val as i64),
+ UnsignedInt(ast::TyU8) => format!("{}", val as u8 ),
+ UnsignedInt(ast::TyU16) => format!("{}", val as u16),
+ UnsignedInt(ast::TyU32) => format!("{}", val as u32),
+ UnsignedInt(ast::TyU64) => format!("{}", val as u64),
+
+ UnsignedInt(ast::TyUs) |
+ SignedInt(ast::TyIs) => unreachable!(),
+ }
+ }
+
+ fn disr_wrap_incr(&self, val: Option<Disr>) -> Disr {
+ macro_rules! add1 {
+ ($e:expr) => { ($e).wrapping_add(1) as Disr }
+ }
+ let val = val.unwrap_or(ty::INITIAL_DISCRIMINANT_VALUE);
+ match *self {
+ SignedInt(ast::TyI8) => add1!(val as i8 ),
+ SignedInt(ast::TyI16) => add1!(val as i16),
+ SignedInt(ast::TyI32) => add1!(val as i32),
+ SignedInt(ast::TyI64) => add1!(val as i64),
+ UnsignedInt(ast::TyU8) => add1!(val as u8 ),
+ UnsignedInt(ast::TyU16) => add1!(val as u16),
+ UnsignedInt(ast::TyU32) => add1!(val as u32),
+ UnsignedInt(ast::TyU64) => add1!(val as u64),
+
+ UnsignedInt(ast::TyUs) |
+ SignedInt(ast::TyIs) => unreachable!(),
+ }
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum ImplOrTraitItemContainer {
TraitContainer(ast::DefId),
ImplContainer(ast::DefId),
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-pub struct mt<'tcx> {
+pub struct TypeAndMut<'tcx> {
pub ty: Ty<'tcx>,
pub mutbl: ast::Mutability,
}
#[derive(Clone, Copy, Debug)]
-pub struct field_ty {
+pub struct FieldTy {
pub name: Name,
pub id: DefId,
pub vis: ast::Visibility,
Bivariant, // T<A> <: T<B> -- e.g., unused type parameter
}
+impl fmt::Debug for Variance {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_str(match *self {
+ Covariant => "+",
+ Contravariant => "-",
+ Invariant => "o",
+ Bivariant => "*",
+ })
+ }
+}
+
#[derive(Copy, Clone)]
pub enum AutoAdjustment<'tcx> {
AdjustReifyFnPointer, // go from a fn-item type to a fn-pointer type
Struct(usize)
}
-#[derive(Clone)]
-pub enum MethodOrigin<'tcx> {
- // fully statically resolved method
- MethodStatic(ast::DefId),
-
- // fully statically resolved closure invocation
- MethodStaticClosure(ast::DefId),
-
- // method invoked on a type parameter with a bounded trait
- MethodTypeParam(MethodParam<'tcx>),
-
- // method invoked on a trait instance
- MethodTraitObject(MethodObject<'tcx>),
-
-}
-
-// details for a method invoked with a receiver whose type is a type parameter
-// with a bounded trait.
-#[derive(Clone)]
-pub struct MethodParam<'tcx> {
- // the precise trait reference that occurs as a bound -- this may
- // be a supertrait of what the user actually typed. Note that it
- // never contains bound regions; those regions should have been
- // instantiated with fresh variables at this point.
- pub trait_ref: ty::TraitRef<'tcx>,
-
- // index of usize in the list of trait items. Note that this is NOT
- // the index into the vtable, because the list of trait items
- // includes associated types.
- pub method_num: usize,
-
- /// The impl for the trait from which the method comes. This
- /// should only be used for certain linting/heuristic purposes
- /// since there is no guarantee that this is Some in every
- /// situation that it could/should be.
- pub impl_def_id: Option<ast::DefId>,
-}
-
-// details for a method invoked with a receiver whose type is an object
-#[derive(Clone)]
-pub struct MethodObject<'tcx> {
- // the (super)trait containing the method to be invoked
- pub trait_ref: TraitRef<'tcx>,
-
- // the actual base trait id of the object
- pub object_trait_id: ast::DefId,
-
- // index of the method to be invoked amongst the trait's items
- pub method_num: usize,
-
- // index into the actual runtime vtable.
- // the vtable is formed by concatenating together the method lists of
- // the base object trait and all supertraits; this is the index into
- // that vtable
- pub vtable_index: usize,
-}
-
-#[derive(Clone, Debug)]
+#[derive(Clone, Copy, Debug)]
pub struct MethodCallee<'tcx> {
- pub origin: MethodOrigin<'tcx>,
+ /// Impl method ID, for inherent methods, or trait method ID, otherwise.
+ pub def_id: ast::DefId,
pub ty: Ty<'tcx>,
- pub substs: subst::Substs<'tcx>
+ pub substs: &'tcx subst::Substs<'tcx>
}
/// With method calls, we store some extra information in
// maps from an expression id that corresponds to a method call to the details
// of the method to be invoked
-pub type MethodMap<'tcx> = RefCell<FnvHashMap<MethodCall, MethodCallee<'tcx>>>;
+pub type MethodMap<'tcx> = FnvHashMap<MethodCall, MethodCallee<'tcx>>;
// Contains information needed to resolve types and (in the future) look up
// the types of AST nodes.
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-pub struct creader_cache_key {
+pub struct CReaderCacheKey {
pub cnum: CrateNum,
pub pos: usize,
pub len: usize
pub err: Ty<'tcx>,
}
-/// The data structure to keep track of all the information that typechecker
-/// generates so that so that it can be reused and doesn't have to be redone
-/// later on.
-pub struct ctxt<'tcx> {
- /// The arenas that types etc are allocated from.
- arenas: &'tcx CtxtArenas<'tcx>,
+pub struct Tables<'tcx> {
+ /// Stores the types for various nodes in the AST. Note that this table
+ /// is not guaranteed to be populated until after typeck. See
+ /// typeck::check::fn_ctxt for details.
+ pub node_types: NodeMap<Ty<'tcx>>,
- /// Specifically use a speedy hash algorithm for this hash map, it's used
- /// quite often.
+ /// Stores the type parameters which were substituted to obtain the type
+ /// of this node. This only applies to nodes that refer to entities
+ /// parameterized by type parameters, such as generic fns, types, or
+ /// other items.
+ pub item_substs: NodeMap<ItemSubsts<'tcx>>,
+
+ pub adjustments: NodeMap<ty::AutoAdjustment<'tcx>>,
+
+ pub method_map: MethodMap<'tcx>,
+
+ /// Borrows
+ pub upvar_capture_map: UpvarCaptureMap,
+
+ /// Records the type of each closure. The def ID is the ID of the
+ /// expression defining the closure.
+ pub closure_tys: DefIdMap<ClosureTy<'tcx>>,
+
+ /// Records the type of each closure. The def ID is the ID of the
+ /// expression defining the closure.
+ pub closure_kinds: DefIdMap<ClosureKind>,
+}
+
+impl<'tcx> Tables<'tcx> {
+ pub fn empty() -> Tables<'tcx> {
+ Tables {
+ node_types: FnvHashMap(),
+ item_substs: NodeMap(),
+ adjustments: NodeMap(),
+ method_map: FnvHashMap(),
+ upvar_capture_map: FnvHashMap(),
+ closure_tys: DefIdMap(),
+ closure_kinds: DefIdMap(),
+ }
+ }
+}
+
+/// The data structure to keep track of all the information that typechecker
+/// generates so that so that it can be reused and doesn't have to be redone
+/// later on.
+pub struct ctxt<'tcx> {
+ /// The arenas that types etc are allocated from.
+ arenas: &'tcx CtxtArenas<'tcx>,
+
+ /// Specifically use a speedy hash algorithm for this hash map, it's used
+ /// quite often.
// FIXME(eddyb) use a FnvHashSet<InternedTy<'tcx>> when equivalent keys can
// queried from a HashSet.
interner: RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>,
// borrowck. (They are not used during trans, and hence are not
// serialized or needed for cross-crate fns.)
free_region_maps: RefCell<NodeMap<FreeRegionMap>>,
+ // FIXME: jroesch make this a refcell
- /// Stores the types for various nodes in the AST. Note that this table
- /// is not guaranteed to be populated until after typeck. See
- /// typeck::check::fn_ctxt for details.
- node_types: RefCell<NodeMap<Ty<'tcx>>>,
-
- /// Stores the type parameters which were substituted to obtain the type
- /// of this node. This only applies to nodes that refer to entities
- /// parameterized by type parameters, such as generic fns, types, or
- /// other items.
- pub item_substs: RefCell<NodeMap<ItemSubsts<'tcx>>>,
+ pub tables: RefCell<Tables<'tcx>>,
/// Maps from a trait item to the trait item "descriptor"
pub impl_or_trait_items: RefCell<DefIdMap<ImplOrTraitItem<'tcx>>>,
pub map: ast_map::Map<'tcx>,
pub freevars: RefCell<FreevarMap>,
pub tcache: RefCell<DefIdMap<TypeScheme<'tcx>>>,
- pub rcache: RefCell<FnvHashMap<creader_cache_key, Ty<'tcx>>>,
+ pub rcache: RefCell<FnvHashMap<CReaderCacheKey, Ty<'tcx>>>,
pub tc_cache: RefCell<FnvHashMap<Ty<'tcx>, TypeContents>>,
pub ast_ty_to_ty_cache: RefCell<NodeMap<Ty<'tcx>>>,
pub enum_var_cache: RefCell<DefIdMap<Rc<Vec<Rc<VariantInfo<'tcx>>>>>>,
pub ty_param_defs: RefCell<NodeMap<TypeParameterDef<'tcx>>>,
- pub adjustments: RefCell<NodeMap<AutoAdjustment<'tcx>>>,
pub normalized_cache: RefCell<FnvHashMap<Ty<'tcx>, Ty<'tcx>>>,
pub lang_items: middle::lang_items::LanguageItems,
/// A mapping of fake provided method def_ids to the default implementation
pub provided_method_sources: RefCell<DefIdMap<ast::DefId>>,
- pub struct_fields: RefCell<DefIdMap<Rc<Vec<field_ty>>>>,
+ pub struct_fields: RefCell<DefIdMap<Rc<Vec<FieldTy>>>>,
/// Maps from def-id of a type or region parameter to its
/// (inferred) variance.
/// FIXME(arielb1): why is this separate from populated_external_types?
pub populated_external_primitive_impls: RefCell<DefIdSet>,
- /// Borrows
- pub upvar_capture_map: RefCell<UpvarCaptureMap>,
-
/// These caches are used by const_eval when decoding external constants.
pub extern_const_statics: RefCell<DefIdMap<ast::NodeId>>,
pub extern_const_variants: RefCell<DefIdMap<ast::NodeId>>,
pub extern_const_fns: RefCell<DefIdMap<ast::NodeId>>,
- pub method_map: MethodMap<'tcx>,
-
pub dependency_formats: RefCell<dependency_format::Dependencies>,
- /// Records the type of each closure. The def ID is the ID of the
- /// expression defining the closure.
- pub closure_kinds: RefCell<DefIdMap<ClosureKind>>,
-
- /// Records the type of each closure. The def ID is the ID of the
- /// expression defining the closure.
- pub closure_tys: RefCell<DefIdMap<ClosureTy<'tcx>>>,
-
pub node_lint_levels: RefCell<FnvHashMap<(ast::NodeId, lint::LintId),
lint::LevelSource>>,
/// Maps a cast expression to its kind. This is keyed on the
/// *from* expression of the cast, not the cast itself.
pub cast_kinds: RefCell<NodeMap<cast::CastKind>>,
+
+ /// Maps Fn items to a collection of fragment infos.
+ ///
+ /// The main goal is to identify data (each of which may be moved
+ /// or assigned) whose subparts are not moved nor assigned
+ /// (i.e. their state is *unfragmented*) and corresponding ast
+ /// nodes where the path to that data is moved or assigned.
+ ///
+ /// In the long term, unfragmented values will have their
+ /// destructor entirely driven by a single stack-local drop-flag,
+ /// and their parents, the collections of the unfragmented values
+ /// (or more simply, "fragmented values"), are mapped to the
+ /// corresponding collections of stack-local drop-flags.
+ ///
+ /// (However, in the short term that is not the case; e.g. some
+ /// unfragmented paths still need to be zeroed, namely when they
+ /// reference parent data from an outer scope that was not
+ /// entirely moved, and therefore that needs to be zeroed so that
+ /// we do not get double-drop when we hit the end of the parent
+ /// scope.)
+ ///
+ /// Also: currently the table solely holds keys for node-ids of
+ /// unfragmented values (see `FragmentInfo` enum definition), but
+ /// longer-term we will need to also store mappings from
+ /// fragmented data to the set of unfragmented pieces that
+ /// constitute it.
+ pub fragment_infos: RefCell<DefIdMap<Vec<FragmentInfo>>>,
+}
+
+/// Describes the fragment-state associated with a NodeId.
+///
+/// Currently only unfragmented paths have entries in the table,
+/// but longer-term this enum is expected to expand to also
+/// include data for fragmented paths.
+#[derive(Copy, Clone, Debug)]
+pub enum FragmentInfo {
+ Moved { var: NodeId, move_expr: NodeId },
+ Assigned { var: NodeId, assign_expr: NodeId, assignee_id: NodeId },
}
impl<'tcx> ctxt<'tcx> {
- pub fn node_types(&self) -> Ref<NodeMap<Ty<'tcx>>> { self.node_types.borrow() }
+ pub fn node_types(&self) -> Ref<NodeMap<Ty<'tcx>>> {
+ fn projection<'a, 'tcx>(tables: &'a Tables<'tcx>) -> &'a NodeMap<Ty<'tcx>> {
+ &tables.node_types
+ }
+
+ Ref::map(self.tables.borrow(), projection)
+ }
+
pub fn node_type_insert(&self, id: NodeId, ty: Ty<'tcx>) {
- self.node_types.borrow_mut().insert(id, ty);
+ self.tables.borrow_mut().node_types.insert(id, ty);
}
pub fn intern_trait_def(&self, def: TraitDef<'tcx>) -> &'tcx TraitDef<'tcx> {
Some(ast_map::NodeTraitItem(..)) |
Some(ast_map::NodeVariant(..)) |
Some(ast_map::NodeStructCtor(..)) => {
- return write!(f, "{}", ty::item_path_str(tcx, def_id));
+ return write!(f, "{}", tcx.item_path_str(def_id));
}
_ => {}
}
}
}
-pub fn type_has_params(ty: Ty) -> bool {
- ty.flags.get().intersects(TypeFlags::HAS_PARAMS)
-}
-pub fn type_has_self(ty: Ty) -> bool {
- ty.flags.get().intersects(TypeFlags::HAS_SELF)
-}
-pub fn type_has_ty_infer(ty: Ty) -> bool {
- ty.flags.get().intersects(TypeFlags::HAS_TY_INFER)
-}
-pub fn type_needs_infer(ty: Ty) -> bool {
- ty.flags.get().intersects(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_RE_INFER)
-}
-pub fn type_is_global(ty: Ty) -> bool {
- !ty.flags.get().intersects(TypeFlags::HAS_LOCAL_NAMES)
-}
-pub fn type_has_projection(ty: Ty) -> bool {
- ty.flags.get().intersects(TypeFlags::HAS_PROJECTION)
-}
-pub fn type_has_ty_closure(ty: Ty) -> bool {
- ty.flags.get().intersects(TypeFlags::HAS_TY_CLOSURE)
-}
-
-pub fn type_has_erasable_regions(ty: Ty) -> bool {
- ty.flags.get().intersects(TypeFlags::HAS_RE_EARLY_BOUND |
- TypeFlags::HAS_RE_INFER |
- TypeFlags::HAS_FREE_REGIONS)
-}
-
-/// An "escaping region" is a bound region whose binder is not part of `t`.
-///
-/// So, for example, consider a type like the following, which has two binders:
-///
-/// for<'a> fn(x: for<'b> fn(&'a isize, &'b isize))
-/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
-/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ inner scope
-///
-/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the
-/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner
-/// fn type*, that type has an escaping region: `'a`.
-///
-/// Note that what I'm calling an "escaping region" is often just called a "free region". However,
-/// we already use the term "free region". It refers to the regions that we use to represent bound
-/// regions on a fn definition while we are typechecking its body.
-///
-/// To clarify, conceptually there is no particular difference between an "escaping" region and a
-/// "free" region. However, there is a big difference in practice. Basically, when "entering" a
-/// binding level, one is generally required to do some sort of processing to a bound region, such
-/// as replacing it with a fresh/skolemized region, or making an entry in the environment to
-/// represent the scope to which it is attached, etc. An escaping region represents a bound region
-/// for which this processing has not yet been done.
-pub fn type_has_escaping_regions(ty: Ty) -> bool {
- type_escapes_depth(ty, 0)
-}
-
-pub fn type_escapes_depth(ty: Ty, depth: u32) -> bool {
- ty.region_depth > depth
-}
-
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub struct BareFnTy<'tcx> {
pub unsafety: ast::Unsafety,
pub type UpvarCaptureMap = FnvHashMap<UpvarId, UpvarCapture>;
-impl Region {
- pub fn is_global(&self) -> bool {
- // does this represent a region that can be named in a global
- // way? used in fulfillment caching.
- match *self {
- ty::ReStatic | ty::ReEmpty => true,
- _ => false,
- }
- }
+#[derive(Copy, Clone)]
+pub struct ClosureUpvar<'tcx> {
+ pub def: def::Def,
+ pub span: Span,
+ pub ty: Ty<'tcx>,
+}
+impl Region {
pub fn is_bound(&self) -> bool {
match *self {
ty::ReEarlyBound(..) => true,
_ => false,
}
}
+
+ /// Returns the depth of `self` from the (1-based) binding level `depth`
+ pub fn from_depth(&self, depth: u32) -> Region {
+ match *self {
+ ty::ReLateBound(debruijn, r) => ty::ReLateBound(DebruijnIndex {
+ depth: debruijn.depth - (depth - 1)
+ }, r),
+ r => r
+ }
+ }
}
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash,
TySlice(Ty<'tcx>),
/// A raw pointer. Written as `*mut T` or `*const T`
- TyRawPtr(mt<'tcx>),
+ TyRawPtr(TypeAndMut<'tcx>),
/// A reference; a pointer with an associated lifetime. Written as
/// `&a mut T` or `&'a T`.
- TyRef(&'tcx Region, mt<'tcx>),
+ TyRef(&'tcx Region, TypeAndMut<'tcx>),
/// If the def-id is Some(_), then this is the type of a specific
/// fn item. Otherwise, if None(_), it a fn pointer type.
/// The anonymous type of a closure. Used to represent the type of
/// `|a| a`.
- TyClosure(DefId, &'tcx Substs<'tcx>),
+ TyClosure(DefId, Box<ClosureSubsts<'tcx>>),
/// A tuple type. For example, `(i32, bool)`.
TyTuple(Vec<Ty<'tcx>>),
TyError,
}
+/// A closure can be modeled as a struct that looks like:
+///
+/// struct Closure<'l0...'li, T0...Tj, U0...Uk> {
+/// upvar0: U0,
+/// ...
+/// upvark: Uk
+/// }
+///
+/// where 'l0...'li and T0...Tj are the lifetime and type parameters
+/// in scope on the function that defined the closure, and U0...Uk are
+/// type parameters representing the types of its upvars (borrowed, if
+/// appropriate).
+///
+/// So, for example, given this function:
+///
+/// fn foo<'a, T>(data: &'a mut T) {
+/// do(|| data.count += 1)
+/// }
+///
+/// the type of the closure would be something like:
+///
+/// struct Closure<'a, T, U0> {
+/// data: U0
+/// }
+///
+/// Note that the type of the upvar is not specified in the struct.
+/// You may wonder how the impl would then be able to use the upvar,
+/// if it doesn't know it's type? The answer is that the impl is
+/// (conceptually) not fully generic over Closure but rather tied to
+/// instances with the expected upvar types:
+///
+/// impl<'b, 'a, T> FnMut() for Closure<'a, T, &'b mut &'a mut T> {
+/// ...
+/// }
+///
+/// You can see that the *impl* fully specified the type of the upvar
+/// and thus knows full well that `data` has type `&'b mut &'a mut T`.
+/// (Here, I am assuming that `data` is mut-borrowed.)
+///
+/// Now, the last question you may ask is: Why include the upvar types
+/// as extra type parameters? The reason for this design is that the
+/// upvar types can reference lifetimes that are internal to the
+/// creating function. In my example above, for example, the lifetime
+/// `'b` represents the extent of the closure itself; this is some
+/// subset of `foo`, probably just the extent of the call to the to
+/// `do()`. If we just had the lifetime/type parameters from the
+/// enclosing function, we couldn't name this lifetime `'b`. Note that
+/// there can also be lifetimes in the types of the upvars themselves,
+/// if one of them happens to be a reference to something that the
+/// creating fn owns.
+///
+/// OK, you say, so why not create a more minimal set of parameters
+/// that just includes the extra lifetime parameters? The answer is
+/// primarily that it would be hard --- we don't know at the time when
+/// we create the closure type what the full types of the upvars are,
+/// nor do we know which are borrowed and which are not. In this
+/// design, we can just supply a fresh type parameter and figure that
+/// out later.
+///
+/// All right, you say, but why include the type parameters from the
+/// original function then? The answer is that trans may need them
+/// when monomorphizing, and they may not appear in the upvars. A
+/// closure could capture no variables but still make use of some
+/// in-scope type parameter with a bound (e.g., if our example above
+/// had an extra `U: Default`, and the closure called `U::default()`).
+///
+/// There is another reason. This design (implicitly) prohibits
+/// closures from capturing themselves (except via a trait
+/// object). This simplifies closure inference considerably, since it
+/// means that when we infer the kind of a closure or its upvars, we
+/// don't have to handle cycles where the decisions we make for
+/// closure C wind up influencing the decisions we ought to make for
+/// closure C (which would then require fixed point iteration to
+/// handle). Plus it fixes an ICE. :P
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub struct ClosureSubsts<'tcx> {
+ /// Lifetime and type parameters from the enclosing function.
+ /// These are separated out because trans wants to pass them around
+ /// when monomorphizing.
+ pub func_substs: &'tcx Substs<'tcx>,
+
+ /// The types of the upvars. The list parallels the freevars and
+ /// `upvar_borrows` lists. These are kept distinct so that we can
+ /// easily index into them.
+ pub upvar_tys: Vec<Ty<'tcx>>
+}
+
#[derive(Clone, PartialEq, Eq, Hash)]
pub struct TraitTy<'tcx> {
pub principal: ty::PolyTraitRef<'tcx>,
/// erase, or otherwise "discharge" these bound regions, we change the
/// type from `Binder<T>` to just `T` (see
/// e.g. `liberate_late_bound_regions`).
-#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct Binder<T>(pub T);
impl<T> Binder<T> {
}
#[derive(Clone, Copy, Debug)]
-pub enum terr_vstore_kind {
- terr_vec,
- terr_str,
- terr_fn,
- terr_trait
-}
-
-#[derive(Clone, Copy, Debug)]
-pub struct expected_found<T> {
+pub struct ExpectedFound<T> {
pub expected: T,
pub found: T
}
// Data structures used in type unification
-#[derive(Clone, Copy, Debug)]
-pub enum type_err<'tcx> {
- terr_mismatch,
- terr_unsafety_mismatch(expected_found<ast::Unsafety>),
- terr_abi_mismatch(expected_found<abi::Abi>),
- terr_mutability,
- terr_box_mutability,
- terr_ptr_mutability,
- terr_ref_mutability,
- terr_vec_mutability,
- terr_tuple_size(expected_found<usize>),
- terr_fixed_array_size(expected_found<usize>),
- terr_ty_param_size(expected_found<usize>),
- terr_arg_count,
- terr_regions_does_not_outlive(Region, Region),
- terr_regions_not_same(Region, Region),
- terr_regions_no_overlap(Region, Region),
- terr_regions_insufficiently_polymorphic(BoundRegion, Region),
- terr_regions_overly_polymorphic(BoundRegion, Region),
- terr_sorts(expected_found<Ty<'tcx>>),
- terr_integer_as_char,
- terr_int_mismatch(expected_found<IntVarValue>),
- terr_float_mismatch(expected_found<ast::FloatTy>),
- terr_traits(expected_found<ast::DefId>),
- terr_builtin_bounds(expected_found<BuiltinBounds>),
- terr_variadic_mismatch(expected_found<bool>),
- terr_cyclic_ty,
- terr_convergence_mismatch(expected_found<bool>),
- terr_projection_name_mismatched(expected_found<ast::Name>),
- terr_projection_bounds_length(expected_found<usize>),
+#[derive(Clone, Debug)]
+pub enum TypeError<'tcx> {
+ Mismatch,
+ UnsafetyMismatch(ExpectedFound<ast::Unsafety>),
+ AbiMismatch(ExpectedFound<abi::Abi>),
+ Mutability,
+ BoxMutability,
+ PtrMutability,
+ RefMutability,
+ VecMutability,
+ TupleSize(ExpectedFound<usize>),
+ FixedArraySize(ExpectedFound<usize>),
+ TyParamSize(ExpectedFound<usize>),
+ ArgCount,
+ RegionsDoesNotOutlive(Region, Region),
+ RegionsNotSame(Region, Region),
+ RegionsNoOverlap(Region, Region),
+ RegionsInsufficientlyPolymorphic(BoundRegion, Region),
+ RegionsOverlyPolymorphic(BoundRegion, Region),
+ Sorts(ExpectedFound<Ty<'tcx>>),
+ IntegerAsChar,
+ IntMismatch(ExpectedFound<IntVarValue>),
+ FloatMismatch(ExpectedFound<ast::FloatTy>),
+ Traits(ExpectedFound<ast::DefId>),
+ BuiltinBoundsMismatch(ExpectedFound<BuiltinBounds>),
+ VariadicMismatch(ExpectedFound<bool>),
+ CyclicTy,
+ ConvergenceMismatch(ExpectedFound<bool>),
+ ProjectionNameMismatched(ExpectedFound<ast::Name>),
+ ProjectionBoundsLength(ExpectedFound<usize>),
+ TyParamDefaultMismatch(ExpectedFound<type_variable::Default<'tcx>>)
}
/// Bounds suitable for an existentially quantified type parameter
pub region_bound: ty::Region,
pub builtin_bounds: BuiltinBounds,
pub projection_bounds: Vec<PolyProjectionPredicate<'tcx>>,
-
- // If true, this TyTrait used a "default bound" in the surface
- // syntax. This makes no difference to the type system but is
- // handy for error reporting.
- pub region_bound_will_change: bool,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
self_ty: Ty<'tcx>) -> Vec<Predicate<'tcx>> {
self.iter().filter_map(|builtin_bound|
match traits::trait_ref_for_builtin_bound(tcx, builtin_bound, self_ty) {
- Ok(trait_ref) => Some(trait_ref.as_predicate()),
+ Ok(trait_ref) => Some(trait_ref.to_predicate()),
Err(ErrorReported) => { None }
}
).collect()
Sync,
}
-/// An existential bound that does not implement any traits.
-pub fn region_existential_bound<'tcx>(r: ty::Region) -> ExistentialBounds<'tcx> {
- ty::ExistentialBounds { region_bound: r,
- builtin_bounds: BuiltinBounds::empty(),
- projection_bounds: Vec::new(),
- region_bound_will_change: false, }
-}
-
impl CLike for BuiltinBound {
fn to_usize(&self) -> usize {
*self as usize
pub def_id: ast::DefId,
pub space: subst::ParamSpace,
pub index: u32,
+ pub default_def_id: DefId, // for use in error reporing about defaults
pub default: Option<Ty<'tcx>>,
pub object_lifetime_default: ObjectLifetimeDefault,
}
}
}
- pub fn instantiate(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>)
+ pub fn instantiate(&self, tcx: &ctxt<'tcx>, substs: &Substs<'tcx>)
-> InstantiatedPredicates<'tcx> {
InstantiatedPredicates {
predicates: self.predicates.subst(tcx, substs),
}
pub fn instantiate_supertrait(&self,
- tcx: &ty::ctxt<'tcx>,
+ tcx: &ctxt<'tcx>,
poly_trait_ref: &ty::PolyTraitRef<'tcx>)
-> InstantiatedPredicates<'tcx>
{
/// substitution in terms of what happens with bound regions. See
/// lengthy comment below for details.
pub fn subst_supertrait(&self,
- tcx: &ty::ctxt<'tcx>,
+ tcx: &ctxt<'tcx>,
trait_ref: &ty::PolyTraitRef<'tcx>)
-> ty::Predicate<'tcx>
{
Predicate::Projection(ty::Binder(data.subst(tcx, substs))),
}
}
-
- // Indicates whether this predicate references only 'global'
- // types/lifetimes that are the same regardless of what fn we are
- // in. This is used for caching. Errs on the side of returning
- // false.
- pub fn is_global(&self) -> bool {
- match *self {
- ty::Predicate::Trait(ref data) => {
- let substs = data.skip_binder().trait_ref.substs;
-
- substs.types.iter().all(|t| ty::type_is_global(t)) && {
- match substs.regions {
- subst::ErasedRegions => true,
- subst::NonerasedRegions(ref r) => r.iter().all(|r| r.is_global()),
- }
- }
- }
-
- _ => {
- false
- }
- }
- }
}
#[derive(Clone, PartialEq, Eq, Hash)]
}
}
-pub trait AsPredicate<'tcx> {
- fn as_predicate(&self) -> Predicate<'tcx>;
+pub trait ToPredicate<'tcx> {
+ fn to_predicate(&self) -> Predicate<'tcx>;
}
-impl<'tcx> AsPredicate<'tcx> for TraitRef<'tcx> {
- fn as_predicate(&self) -> Predicate<'tcx> {
+impl<'tcx> ToPredicate<'tcx> for TraitRef<'tcx> {
+ fn to_predicate(&self) -> Predicate<'tcx> {
// we're about to add a binder, so let's check that we don't
// accidentally capture anything, or else that might be some
// weird debruijn accounting.
}
}
-impl<'tcx> AsPredicate<'tcx> for PolyTraitRef<'tcx> {
- fn as_predicate(&self) -> Predicate<'tcx> {
+impl<'tcx> ToPredicate<'tcx> for PolyTraitRef<'tcx> {
+ fn to_predicate(&self) -> Predicate<'tcx> {
ty::Predicate::Trait(self.to_poly_trait_predicate())
}
}
-impl<'tcx> AsPredicate<'tcx> for PolyEquatePredicate<'tcx> {
- fn as_predicate(&self) -> Predicate<'tcx> {
+impl<'tcx> ToPredicate<'tcx> for PolyEquatePredicate<'tcx> {
+ fn to_predicate(&self) -> Predicate<'tcx> {
Predicate::Equate(self.clone())
}
}
-impl<'tcx> AsPredicate<'tcx> for PolyRegionOutlivesPredicate {
- fn as_predicate(&self) -> Predicate<'tcx> {
+impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate {
+ fn to_predicate(&self) -> Predicate<'tcx> {
Predicate::RegionOutlives(self.clone())
}
}
-impl<'tcx> AsPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> {
- fn as_predicate(&self) -> Predicate<'tcx> {
+impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> {
+ fn to_predicate(&self) -> Predicate<'tcx> {
Predicate::TypeOutlives(self.clone())
}
}
-impl<'tcx> AsPredicate<'tcx> for PolyProjectionPredicate<'tcx> {
- fn as_predicate(&self) -> Predicate<'tcx> {
+impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> {
+ fn to_predicate(&self) -> Predicate<'tcx> {
Predicate::Projection(self.clone())
}
}
match impl_item.node {
ast::ConstImplItem(_, _) => {
let def_id = ast_util::local_def(id);
- let scheme = lookup_item_type(cx, def_id);
- let predicates = lookup_predicates(cx, def_id);
- construct_parameter_environment(cx,
- impl_item.span,
- &scheme.generics,
- &predicates,
- id)
+ let scheme = cx.lookup_item_type(def_id);
+ let predicates = cx.lookup_predicates(def_id);
+ cx.construct_parameter_environment(impl_item.span,
+ &scheme.generics,
+ &predicates,
+ id)
}
ast::MethodImplItem(_, ref body) => {
let method_def_id = ast_util::local_def(id);
- match ty::impl_or_trait_item(cx, method_def_id) {
+ match cx.impl_or_trait_item(method_def_id) {
MethodTraitItem(ref method_ty) => {
let method_generics = &method_ty.generics;
let method_bounds = &method_ty.predicates;
- construct_parameter_environment(
- cx,
+ cx.construct_parameter_environment(
impl_item.span,
method_generics,
method_bounds,
match *default {
Some(_) => {
let def_id = ast_util::local_def(id);
- let scheme = lookup_item_type(cx, def_id);
- let predicates = lookup_predicates(cx, def_id);
- construct_parameter_environment(cx,
- trait_item.span,
- &scheme.generics,
- &predicates,
- id)
+ let scheme = cx.lookup_item_type(def_id);
+ let predicates = cx.lookup_predicates(def_id);
+ cx.construct_parameter_environment(trait_item.span,
+ &scheme.generics,
+ &predicates,
+ id)
}
None => {
cx.sess.bug("ParameterEnvironment::from_item(): \
}
ast::MethodTraitItem(_, Some(ref body)) => {
let method_def_id = ast_util::local_def(id);
- match ty::impl_or_trait_item(cx, method_def_id) {
+ match cx.impl_or_trait_item(method_def_id) {
MethodTraitItem(ref method_ty) => {
let method_generics = &method_ty.generics;
let method_bounds = &method_ty.predicates;
- construct_parameter_environment(
- cx,
+ cx.construct_parameter_environment(
trait_item.span,
method_generics,
method_bounds,
ast::ItemFn(_, _, _, _, _, ref body) => {
// We assume this is a function.
let fn_def_id = ast_util::local_def(id);
- let fn_scheme = lookup_item_type(cx, fn_def_id);
- let fn_predicates = lookup_predicates(cx, fn_def_id);
-
- construct_parameter_environment(cx,
- item.span,
- &fn_scheme.generics,
- &fn_predicates,
- body.id)
+ let fn_scheme = cx.lookup_item_type(fn_def_id);
+ let fn_predicates = cx.lookup_predicates(fn_def_id);
+
+ cx.construct_parameter_environment(item.span,
+ &fn_scheme.generics,
+ &fn_predicates,
+ body.id)
}
ast::ItemEnum(..) |
ast::ItemStruct(..) |
ast::ItemConst(..) |
ast::ItemStatic(..) => {
let def_id = ast_util::local_def(id);
- let scheme = lookup_item_type(cx, def_id);
- let predicates = lookup_predicates(cx, def_id);
- construct_parameter_environment(cx,
- item.span,
- &scheme.generics,
- &predicates,
- id)
+ let scheme = cx.lookup_item_type(def_id);
+ let predicates = cx.lookup_predicates(def_id);
+ cx.construct_parameter_environment(item.span,
+ &scheme.generics,
+ &predicates,
+ id)
}
_ => {
cx.sess.span_bug(item.span,
}
}
}
+
+ pub fn can_type_implement_copy(&self, self_type: Ty<'tcx>, span: Span)
+ -> Result<(),CopyImplementationError> {
+ let tcx = self.tcx;
+
+ // FIXME: (@jroesch) float this code up
+ let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(self.clone()), false);
+
+ let did = match self_type.sty {
+ ty::TyStruct(struct_did, substs) => {
+ let fields = tcx.struct_fields(struct_did, substs);
+ for field in &fields {
+ if infcx.type_moves_by_default(field.mt.ty, span) {
+ return Err(FieldDoesNotImplementCopy(field.name))
+ }
+ }
+ struct_did
+ }
+ ty::TyEnum(enum_did, substs) => {
+ let enum_variants = tcx.enum_variants(enum_did);
+ for variant in enum_variants.iter() {
+ for variant_arg_type in &variant.args {
+ let substd_arg_type =
+ variant_arg_type.subst(tcx, substs);
+ if infcx.type_moves_by_default(substd_arg_type, span) {
+ return Err(VariantDoesNotImplementCopy(variant.name))
+ }
+ }
+ }
+ enum_did
+ }
+ _ => return Err(TypeIsStructural),
+ };
+
+ if tcx.has_dtor(did) {
+ return Err(TypeHasDestructor)
+ }
+
+ Ok(())
+ }
+}
+
+#[derive(Copy, Clone)]
+pub enum CopyImplementationError {
+ FieldDoesNotImplementCopy(ast::Name),
+ VariantDoesNotImplementCopy(ast::Name),
+ TypeIsStructural,
+ TypeHasDestructor,
}
/// A "type scheme", in ML terminology, is a type combined with some
pub fn for_each_impl<F: FnMut(DefId)>(&self, tcx: &ctxt<'tcx>, mut f: F) {
- ty::populate_implementations_for_trait_if_necessary(tcx, self.trait_ref.def_id);
+ tcx.populate_implementations_for_trait_if_necessary(self.trait_ref.def_id);
for &impl_def_id in self.blanket_impls.borrow().iter() {
f(impl_def_id);
self_ty: Ty<'tcx>,
mut f: F)
{
- ty::populate_implementations_for_trait_if_necessary(tcx, self.trait_ref.def_id);
+ tcx.populate_implementations_for_trait_if_necessary(self.trait_ref.def_id);
for &impl_def_id in self.blanket_impls.borrow().iter() {
f(impl_def_id);
}
}
-pub trait ClosureTyper<'tcx> {
- fn tcx(&self) -> &ty::ctxt<'tcx> {
- self.param_env().tcx
- }
-
- fn param_env<'a>(&'a self) -> &'a ty::ParameterEnvironment<'a, 'tcx>;
-
- /// Is this a `Fn`, `FnMut` or `FnOnce` closure? During typeck,
- /// returns `None` if the kind of this closure has not yet been
- /// inferred.
- fn closure_kind(&self,
- def_id: ast::DefId)
- -> Option<ty::ClosureKind>;
-
- /// Returns the argument/return types of this closure.
- fn closure_type(&self,
- def_id: ast::DefId,
- substs: &subst::Substs<'tcx>)
- -> ty::ClosureTy<'tcx>;
-
- /// Returns the set of all upvars and their transformed
- /// types. During typeck, maybe return `None` if the upvar types
- /// have not yet been inferred.
- fn closure_upvars(&self,
- def_id: ast::DefId,
- substs: &Substs<'tcx>)
- -> Option<Vec<ClosureUpvar<'tcx>>>;
-}
-
impl<'tcx> CommonTypes<'tcx> {
fn new(arena: &'tcx TypedArena<TyS<'tcx>>,
- interner: &mut FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>)
+ interner: &RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>)
-> CommonTypes<'tcx>
{
+ let mk = |sty| ctxt::intern_ty(arena, interner, sty);
CommonTypes {
- bool: intern_ty(arena, interner, TyBool),
- char: intern_ty(arena, interner, TyChar),
- err: intern_ty(arena, interner, TyError),
- isize: intern_ty(arena, interner, TyInt(ast::TyIs)),
- i8: intern_ty(arena, interner, TyInt(ast::TyI8)),
- i16: intern_ty(arena, interner, TyInt(ast::TyI16)),
- i32: intern_ty(arena, interner, TyInt(ast::TyI32)),
- i64: intern_ty(arena, interner, TyInt(ast::TyI64)),
- usize: intern_ty(arena, interner, TyUint(ast::TyUs)),
- u8: intern_ty(arena, interner, TyUint(ast::TyU8)),
- u16: intern_ty(arena, interner, TyUint(ast::TyU16)),
- u32: intern_ty(arena, interner, TyUint(ast::TyU32)),
- u64: intern_ty(arena, interner, TyUint(ast::TyU64)),
- f32: intern_ty(arena, interner, TyFloat(ast::TyF32)),
- f64: intern_ty(arena, interner, TyFloat(ast::TyF64)),
- }
- }
-}
-
-/// Create a type context and call the closure with a `&ty::ctxt` reference
-/// to the context. The closure enforces that the type context and any interned
-/// value (types, substs, etc.) can only be used while `ty::tls` has a valid
-/// reference to the context, to allow formatting values that need it.
-pub fn with_ctxt<'tcx, F, R>(s: Session,
- arenas: &'tcx CtxtArenas<'tcx>,
- def_map: DefMap,
- named_region_map: resolve_lifetime::NamedRegionMap,
- map: ast_map::Map<'tcx>,
- freevars: RefCell<FreevarMap>,
- region_maps: RegionMaps,
- lang_items: middle::lang_items::LanguageItems,
- stability: stability::Index<'tcx>,
- f: F) -> (Session, R)
- where F: FnOnce(&ctxt<'tcx>) -> R
-{
- let mut interner = FnvHashMap();
- let common_types = CommonTypes::new(&arenas.type_, &mut interner);
-
- tls::enter(ctxt {
- arenas: arenas,
- interner: RefCell::new(interner),
- substs_interner: RefCell::new(FnvHashMap()),
- bare_fn_interner: RefCell::new(FnvHashMap()),
- region_interner: RefCell::new(FnvHashMap()),
- stability_interner: RefCell::new(FnvHashMap()),
- types: common_types,
- named_region_map: named_region_map,
- region_maps: region_maps,
- free_region_maps: RefCell::new(FnvHashMap()),
- item_variance_map: RefCell::new(DefIdMap()),
- variance_computed: Cell::new(false),
- sess: s,
- def_map: def_map,
- node_types: RefCell::new(FnvHashMap()),
- item_substs: RefCell::new(NodeMap()),
- impl_trait_refs: RefCell::new(DefIdMap()),
- trait_defs: RefCell::new(DefIdMap()),
- predicates: RefCell::new(DefIdMap()),
- super_predicates: RefCell::new(DefIdMap()),
- fulfilled_predicates: RefCell::new(traits::FulfilledPredicates::new()),
- map: map,
- freevars: freevars,
- tcache: RefCell::new(DefIdMap()),
- rcache: RefCell::new(FnvHashMap()),
- tc_cache: RefCell::new(FnvHashMap()),
- ast_ty_to_ty_cache: RefCell::new(NodeMap()),
- enum_var_cache: RefCell::new(DefIdMap()),
- impl_or_trait_items: RefCell::new(DefIdMap()),
- trait_item_def_ids: RefCell::new(DefIdMap()),
- trait_items_cache: RefCell::new(DefIdMap()),
- ty_param_defs: RefCell::new(NodeMap()),
- adjustments: RefCell::new(NodeMap()),
- normalized_cache: RefCell::new(FnvHashMap()),
- lang_items: lang_items,
- provided_method_sources: RefCell::new(DefIdMap()),
- struct_fields: RefCell::new(DefIdMap()),
- destructor_for_type: RefCell::new(DefIdMap()),
- destructors: RefCell::new(DefIdSet()),
- inherent_impls: RefCell::new(DefIdMap()),
- impl_items: RefCell::new(DefIdMap()),
- used_unsafe: RefCell::new(NodeSet()),
- used_mut_nodes: RefCell::new(NodeSet()),
- populated_external_types: RefCell::new(DefIdSet()),
- populated_external_primitive_impls: RefCell::new(DefIdSet()),
- upvar_capture_map: RefCell::new(FnvHashMap()),
- extern_const_statics: RefCell::new(DefIdMap()),
- extern_const_variants: RefCell::new(DefIdMap()),
- extern_const_fns: RefCell::new(DefIdMap()),
- method_map: RefCell::new(FnvHashMap()),
- dependency_formats: RefCell::new(FnvHashMap()),
- closure_kinds: RefCell::new(DefIdMap()),
- closure_tys: RefCell::new(DefIdMap()),
- node_lint_levels: RefCell::new(FnvHashMap()),
- transmute_restrictions: RefCell::new(Vec::new()),
- stability: RefCell::new(stability),
- selection_cache: traits::SelectionCache::new(),
- repr_hint_cache: RefCell::new(DefIdMap()),
- const_qualif_map: RefCell::new(NodeMap()),
- custom_coerce_unsized_kinds: RefCell::new(DefIdMap()),
- cast_kinds: RefCell::new(NodeMap()),
- }, f)
-}
-
-// Type constructors
-
-impl<'tcx> ctxt<'tcx> {
- pub fn mk_substs(&self, substs: Substs<'tcx>) -> &'tcx Substs<'tcx> {
- if let Some(substs) = self.substs_interner.borrow().get(&substs) {
- return *substs;
+ bool: mk(TyBool),
+ char: mk(TyChar),
+ err: mk(TyError),
+ isize: mk(TyInt(ast::TyIs)),
+ i8: mk(TyInt(ast::TyI8)),
+ i16: mk(TyInt(ast::TyI16)),
+ i32: mk(TyInt(ast::TyI32)),
+ i64: mk(TyInt(ast::TyI64)),
+ usize: mk(TyUint(ast::TyUs)),
+ u8: mk(TyUint(ast::TyU8)),
+ u16: mk(TyUint(ast::TyU16)),
+ u32: mk(TyUint(ast::TyU32)),
+ u64: mk(TyUint(ast::TyU64)),
+ f32: mk(TyFloat(ast::TyF32)),
+ f64: mk(TyFloat(ast::TyF64)),
}
-
- let substs = self.arenas.substs.alloc(substs);
- self.substs_interner.borrow_mut().insert(substs, substs);
- substs
}
+}
- /// Create an unsafe fn ty based on a safe fn ty.
- pub fn safe_to_unsafe_fn_ty(&self, bare_fn: &BareFnTy<'tcx>) -> Ty<'tcx> {
- assert_eq!(bare_fn.unsafety, ast::Unsafety::Normal);
- let unsafe_fn_ty_a = self.mk_bare_fn(ty::BareFnTy {
- unsafety: ast::Unsafety::Unsafe,
- abi: bare_fn.abi,
- sig: bare_fn.sig.clone()
- });
- ty::mk_bare_fn(self, None, unsafe_fn_ty_a)
- }
-
- pub fn mk_bare_fn(&self, bare_fn: BareFnTy<'tcx>) -> &'tcx BareFnTy<'tcx> {
- if let Some(bare_fn) = self.bare_fn_interner.borrow().get(&bare_fn) {
- return *bare_fn;
- }
-
- let bare_fn = self.arenas.bare_fn.alloc(bare_fn);
- self.bare_fn_interner.borrow_mut().insert(bare_fn, bare_fn);
- bare_fn
- }
+struct FlagComputation {
+ flags: TypeFlags,
- pub fn mk_region(&self, region: Region) -> &'tcx Region {
- if let Some(region) = self.region_interner.borrow().get(®ion) {
- return *region;
- }
+ // maximum depth of any bound region that we have seen thus far
+ depth: u32,
+}
- let region = self.arenas.region.alloc(region);
- self.region_interner.borrow_mut().insert(region, region);
- region
+impl FlagComputation {
+ fn new() -> FlagComputation {
+ FlagComputation { flags: TypeFlags::empty(), depth: 0 }
}
- pub fn closure_kind(&self, def_id: ast::DefId) -> ty::ClosureKind {
- *self.closure_kinds.borrow().get(&def_id).unwrap()
+ fn for_sty(st: &TypeVariants) -> FlagComputation {
+ let mut result = FlagComputation::new();
+ result.add_sty(st);
+ result
}
- pub fn closure_type(&self,
- def_id: ast::DefId,
- substs: &subst::Substs<'tcx>)
- -> ty::ClosureTy<'tcx>
- {
- self.closure_tys.borrow().get(&def_id).unwrap().subst(self, substs)
+ fn add_flags(&mut self, flags: TypeFlags) {
+ self.flags = self.flags | (flags & TypeFlags::NOMINAL_FLAGS);
}
- pub fn type_parameter_def(&self,
- node_id: ast::NodeId)
- -> TypeParameterDef<'tcx>
- {
- self.ty_param_defs.borrow().get(&node_id).unwrap().clone()
+ fn add_depth(&mut self, depth: u32) {
+ if depth > self.depth {
+ self.depth = depth;
+ }
}
- pub fn pat_contains_ref_binding(&self, pat: &ast::Pat) -> Option<ast::Mutability> {
- pat_util::pat_contains_ref_binding(&self.def_map, pat)
- }
-
- pub fn arm_contains_ref_binding(&self, arm: &ast::Arm) -> Option<ast::Mutability> {
- pat_util::arm_contains_ref_binding(&self.def_map, arm)
- }
-}
-
-// Interns a type/name combination, stores the resulting box in cx.interner,
-// and returns the box as cast to an unsafe ptr (see comments for Ty above).
-pub fn mk_t<'tcx>(cx: &ctxt<'tcx>, st: TypeVariants<'tcx>) -> Ty<'tcx> {
- let mut interner = cx.interner.borrow_mut();
- intern_ty(&cx.arenas.type_, &mut *interner, st)
-}
-
-fn intern_ty<'tcx>(type_arena: &'tcx TypedArena<TyS<'tcx>>,
- interner: &mut FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>,
- st: TypeVariants<'tcx>)
- -> Ty<'tcx>
-{
- match interner.get(&st) {
- Some(ty) => return *ty,
- _ => ()
- }
-
- let flags = FlagComputation::for_sty(&st);
-
- let ty = match () {
- () => type_arena.alloc(TyS { sty: st,
- flags: Cell::new(flags.flags),
- region_depth: flags.depth, }),
- };
-
- debug!("Interned type: {:?} Pointer: {:?}",
- ty, ty as *const TyS);
-
- interner.insert(InternedTy { ty: ty }, ty);
-
- ty
-}
-
-struct FlagComputation {
- flags: TypeFlags,
-
- // maximum depth of any bound region that we have seen thus far
- depth: u32,
-}
-
-impl FlagComputation {
- fn new() -> FlagComputation {
- FlagComputation { flags: TypeFlags::empty(), depth: 0 }
- }
-
- fn for_sty(st: &TypeVariants) -> FlagComputation {
- let mut result = FlagComputation::new();
- result.add_sty(st);
- result
- }
-
- fn add_flags(&mut self, flags: TypeFlags) {
- self.flags = self.flags | (flags & TypeFlags::NOMINAL_FLAGS);
- }
-
- fn add_depth(&mut self, depth: u32) {
- if depth > self.depth {
- self.depth = depth;
- }
- }
-
- /// Adds the flags/depth from a set of types that appear within the current type, but within a
- /// region binder.
- fn add_bound_computation(&mut self, computation: &FlagComputation) {
- self.add_flags(computation.flags);
+ /// Adds the flags/depth from a set of types that appear within the current type, but within a
+ /// region binder.
+ fn add_bound_computation(&mut self, computation: &FlagComputation) {
+ self.add_flags(computation.flags);
// The types that contributed to `computation` occurred within
// a region binder, so subtract one from the region depth
}
}
- &TyClosure(_, substs) => {
+ &TyClosure(_, ref substs) => {
self.add_flags(TypeFlags::HAS_TY_CLOSURE);
self.add_flags(TypeFlags::HAS_LOCAL_NAMES);
- self.add_substs(substs);
+ self.add_substs(&substs.func_substs);
+ self.add_tys(&substs.upvar_tys);
}
&TyInfer(_) => {
}
}
-pub fn mk_mach_int<'tcx>(tcx: &ctxt<'tcx>, tm: ast::IntTy) -> Ty<'tcx> {
- match tm {
- ast::TyIs => tcx.types.isize,
- ast::TyI8 => tcx.types.i8,
- ast::TyI16 => tcx.types.i16,
- ast::TyI32 => tcx.types.i32,
- ast::TyI64 => tcx.types.i64,
+impl<'tcx> ctxt<'tcx> {
+ /// Create a type context and call the closure with a `&ty::ctxt` reference
+ /// to the context. The closure enforces that the type context and any interned
+ /// value (types, substs, etc.) can only be used while `ty::tls` has a valid
+ /// reference to the context, to allow formatting values that need it.
+ pub fn create_and_enter<F, R>(s: Session,
+ arenas: &'tcx CtxtArenas<'tcx>,
+ def_map: DefMap,
+ named_region_map: resolve_lifetime::NamedRegionMap,
+ map: ast_map::Map<'tcx>,
+ freevars: RefCell<FreevarMap>,
+ region_maps: RegionMaps,
+ lang_items: middle::lang_items::LanguageItems,
+ stability: stability::Index<'tcx>,
+ f: F) -> (Session, R)
+ where F: FnOnce(&ctxt<'tcx>) -> R
+ {
+ let interner = RefCell::new(FnvHashMap());
+ let common_types = CommonTypes::new(&arenas.type_, &interner);
+
+ tls::enter(ctxt {
+ arenas: arenas,
+ interner: interner,
+ substs_interner: RefCell::new(FnvHashMap()),
+ bare_fn_interner: RefCell::new(FnvHashMap()),
+ region_interner: RefCell::new(FnvHashMap()),
+ stability_interner: RefCell::new(FnvHashMap()),
+ types: common_types,
+ named_region_map: named_region_map,
+ region_maps: region_maps,
+ free_region_maps: RefCell::new(FnvHashMap()),
+ item_variance_map: RefCell::new(DefIdMap()),
+ variance_computed: Cell::new(false),
+ sess: s,
+ def_map: def_map,
+ tables: RefCell::new(Tables::empty()),
+ impl_trait_refs: RefCell::new(DefIdMap()),
+ trait_defs: RefCell::new(DefIdMap()),
+ predicates: RefCell::new(DefIdMap()),
+ super_predicates: RefCell::new(DefIdMap()),
+ fulfilled_predicates: RefCell::new(traits::FulfilledPredicates::new()),
+ map: map,
+ freevars: freevars,
+ tcache: RefCell::new(DefIdMap()),
+ rcache: RefCell::new(FnvHashMap()),
+ tc_cache: RefCell::new(FnvHashMap()),
+ ast_ty_to_ty_cache: RefCell::new(NodeMap()),
+ enum_var_cache: RefCell::new(DefIdMap()),
+ impl_or_trait_items: RefCell::new(DefIdMap()),
+ trait_item_def_ids: RefCell::new(DefIdMap()),
+ trait_items_cache: RefCell::new(DefIdMap()),
+ ty_param_defs: RefCell::new(NodeMap()),
+ normalized_cache: RefCell::new(FnvHashMap()),
+ lang_items: lang_items,
+ provided_method_sources: RefCell::new(DefIdMap()),
+ struct_fields: RefCell::new(DefIdMap()),
+ destructor_for_type: RefCell::new(DefIdMap()),
+ destructors: RefCell::new(DefIdSet()),
+ inherent_impls: RefCell::new(DefIdMap()),
+ impl_items: RefCell::new(DefIdMap()),
+ used_unsafe: RefCell::new(NodeSet()),
+ used_mut_nodes: RefCell::new(NodeSet()),
+ populated_external_types: RefCell::new(DefIdSet()),
+ populated_external_primitive_impls: RefCell::new(DefIdSet()),
+ extern_const_statics: RefCell::new(DefIdMap()),
+ extern_const_variants: RefCell::new(DefIdMap()),
+ extern_const_fns: RefCell::new(DefIdMap()),
+ dependency_formats: RefCell::new(FnvHashMap()),
+ node_lint_levels: RefCell::new(FnvHashMap()),
+ transmute_restrictions: RefCell::new(Vec::new()),
+ stability: RefCell::new(stability),
+ selection_cache: traits::SelectionCache::new(),
+ repr_hint_cache: RefCell::new(DefIdMap()),
+ const_qualif_map: RefCell::new(NodeMap()),
+ custom_coerce_unsized_kinds: RefCell::new(DefIdMap()),
+ cast_kinds: RefCell::new(NodeMap()),
+ fragment_infos: RefCell::new(DefIdMap()),
+ }, f)
}
-}
-pub fn mk_mach_uint<'tcx>(tcx: &ctxt<'tcx>, tm: ast::UintTy) -> Ty<'tcx> {
- match tm {
- ast::TyUs => tcx.types.usize,
- ast::TyU8 => tcx.types.u8,
- ast::TyU16 => tcx.types.u16,
- ast::TyU32 => tcx.types.u32,
- ast::TyU64 => tcx.types.u64,
+ // Type constructors
+
+ pub fn mk_substs(&self, substs: Substs<'tcx>) -> &'tcx Substs<'tcx> {
+ if let Some(substs) = self.substs_interner.borrow().get(&substs) {
+ return *substs;
+ }
+
+ let substs = self.arenas.substs.alloc(substs);
+ self.substs_interner.borrow_mut().insert(substs, substs);
+ substs
+ }
+
+ /// Create an unsafe fn ty based on a safe fn ty.
+ pub fn safe_to_unsafe_fn_ty(&self, bare_fn: &BareFnTy<'tcx>) -> Ty<'tcx> {
+ assert_eq!(bare_fn.unsafety, ast::Unsafety::Normal);
+ let unsafe_fn_ty_a = self.mk_bare_fn(ty::BareFnTy {
+ unsafety: ast::Unsafety::Unsafe,
+ abi: bare_fn.abi,
+ sig: bare_fn.sig.clone()
+ });
+ self.mk_fn(None, unsafe_fn_ty_a)
}
-}
-pub fn mk_mach_float<'tcx>(tcx: &ctxt<'tcx>, tm: ast::FloatTy) -> Ty<'tcx> {
- match tm {
- ast::TyF32 => tcx.types.f32,
- ast::TyF64 => tcx.types.f64,
+ pub fn mk_bare_fn(&self, bare_fn: BareFnTy<'tcx>) -> &'tcx BareFnTy<'tcx> {
+ if let Some(bare_fn) = self.bare_fn_interner.borrow().get(&bare_fn) {
+ return *bare_fn;
+ }
+
+ let bare_fn = self.arenas.bare_fn.alloc(bare_fn);
+ self.bare_fn_interner.borrow_mut().insert(bare_fn, bare_fn);
+ bare_fn
}
-}
-pub fn mk_str<'tcx>(cx: &ctxt<'tcx>) -> Ty<'tcx> {
- mk_t(cx, TyStr)
-}
+ pub fn mk_region(&self, region: Region) -> &'tcx Region {
+ if let Some(region) = self.region_interner.borrow().get(®ion) {
+ return *region;
+ }
-pub fn mk_str_slice<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, m: ast::Mutability) -> Ty<'tcx> {
- mk_rptr(cx, r,
- mt {
- ty: mk_t(cx, TyStr),
- mutbl: m
- })
-}
+ let region = self.arenas.region.alloc(region);
+ self.region_interner.borrow_mut().insert(region, region);
+ region
+ }
-pub fn mk_enum<'tcx>(cx: &ctxt<'tcx>, did: ast::DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
- // take a copy of substs so that we own the vectors inside
- mk_t(cx, TyEnum(did, substs))
-}
+ pub fn closure_kind(&self, def_id: ast::DefId) -> ty::ClosureKind {
+ *self.tables.borrow().closure_kinds.get(&def_id).unwrap()
+ }
+
+ pub fn closure_type(&self,
+ def_id: ast::DefId,
+ substs: &ClosureSubsts<'tcx>)
+ -> ty::ClosureTy<'tcx>
+ {
+ self.tables.borrow().closure_tys.get(&def_id).unwrap().subst(self, &substs.func_substs)
+ }
-pub fn mk_uniq<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> { mk_t(cx, TyBox(ty)) }
+ pub fn type_parameter_def(&self,
+ node_id: ast::NodeId)
+ -> TypeParameterDef<'tcx>
+ {
+ self.ty_param_defs.borrow().get(&node_id).unwrap().clone()
+ }
-pub fn mk_ptr<'tcx>(cx: &ctxt<'tcx>, tm: mt<'tcx>) -> Ty<'tcx> { mk_t(cx, TyRawPtr(tm)) }
+ pub fn pat_contains_ref_binding(&self, pat: &ast::Pat) -> Option<ast::Mutability> {
+ pat_util::pat_contains_ref_binding(&self.def_map, pat)
+ }
-pub fn mk_rptr<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, tm: mt<'tcx>) -> Ty<'tcx> {
- mk_t(cx, TyRef(r, tm))
-}
+ pub fn arm_contains_ref_binding(&self, arm: &ast::Arm) -> Option<ast::Mutability> {
+ pat_util::arm_contains_ref_binding(&self.def_map, arm)
+ }
-pub fn mk_mut_rptr<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
- mk_rptr(cx, r, mt {ty: ty, mutbl: ast::MutMutable})
-}
-pub fn mk_imm_rptr<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
- mk_rptr(cx, r, mt {ty: ty, mutbl: ast::MutImmutable})
-}
+ fn intern_ty(type_arena: &'tcx TypedArena<TyS<'tcx>>,
+ interner: &RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>,
+ st: TypeVariants<'tcx>)
+ -> Ty<'tcx> {
+ let ty: Ty /* don't be &mut TyS */ = {
+ let mut interner = interner.borrow_mut();
+ match interner.get(&st) {
+ Some(ty) => return *ty,
+ _ => ()
+ }
-pub fn mk_mut_ptr<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
- mk_ptr(cx, mt {ty: ty, mutbl: ast::MutMutable})
-}
+ let flags = FlagComputation::for_sty(&st);
-pub fn mk_imm_ptr<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
- mk_ptr(cx, mt {ty: ty, mutbl: ast::MutImmutable})
-}
+ let ty = match () {
+ () => type_arena.alloc(TyS { sty: st,
+ flags: Cell::new(flags.flags),
+ region_depth: flags.depth, }),
+ };
-pub fn mk_nil_ptr<'tcx>(cx: &ctxt<'tcx>) -> Ty<'tcx> {
- mk_ptr(cx, mt {ty: mk_nil(cx), mutbl: ast::MutImmutable})
-}
+ interner.insert(InternedTy { ty: ty }, ty);
+ ty
+ };
-pub fn mk_vec<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>, sz: Option<usize>) -> Ty<'tcx> {
- match sz {
- Some(n) => mk_t(cx, TyArray(ty, n)),
- None => mk_t(cx, TySlice(ty))
+ debug!("Interned type: {:?} Pointer: {:?}",
+ ty, ty as *const TyS);
+ ty
}
-}
-pub fn mk_slice<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, tm: mt<'tcx>) -> Ty<'tcx> {
- mk_rptr(cx, r,
- mt {
- ty: mk_vec(cx, tm.ty, None),
- mutbl: tm.mutbl
- })
-}
+ // Interns a type/name combination, stores the resulting box in cx.interner,
+ // and returns the box as cast to an unsafe ptr (see comments for Ty above).
+ pub fn mk_ty(&self, st: TypeVariants<'tcx>) -> Ty<'tcx> {
+ ctxt::intern_ty(&self.arenas.type_, &self.interner, st)
+ }
-pub fn mk_tup<'tcx>(cx: &ctxt<'tcx>, ts: Vec<Ty<'tcx>>) -> Ty<'tcx> {
- mk_t(cx, TyTuple(ts))
-}
+ pub fn mk_mach_int(&self, tm: ast::IntTy) -> Ty<'tcx> {
+ match tm {
+ ast::TyIs => self.types.isize,
+ ast::TyI8 => self.types.i8,
+ ast::TyI16 => self.types.i16,
+ ast::TyI32 => self.types.i32,
+ ast::TyI64 => self.types.i64,
+ }
+ }
-pub fn mk_nil<'tcx>(cx: &ctxt<'tcx>) -> Ty<'tcx> {
- mk_tup(cx, Vec::new())
-}
+ pub fn mk_mach_uint(&self, tm: ast::UintTy) -> Ty<'tcx> {
+ match tm {
+ ast::TyUs => self.types.usize,
+ ast::TyU8 => self.types.u8,
+ ast::TyU16 => self.types.u16,
+ ast::TyU32 => self.types.u32,
+ ast::TyU64 => self.types.u64,
+ }
+ }
-pub fn mk_bool<'tcx>(cx: &ctxt<'tcx>) -> Ty<'tcx> {
- mk_t(cx, TyBool)
-}
+ pub fn mk_mach_float(&self, tm: ast::FloatTy) -> Ty<'tcx> {
+ match tm {
+ ast::TyF32 => self.types.f32,
+ ast::TyF64 => self.types.f64,
+ }
+ }
-pub fn mk_bare_fn<'tcx>(cx: &ctxt<'tcx>,
- opt_def_id: Option<ast::DefId>,
- fty: &'tcx BareFnTy<'tcx>) -> Ty<'tcx> {
- mk_t(cx, TyBareFn(opt_def_id, fty))
-}
+ pub fn mk_str(&self) -> Ty<'tcx> {
+ self.mk_ty(TyStr)
+ }
-pub fn mk_ctor_fn<'tcx>(cx: &ctxt<'tcx>,
- def_id: ast::DefId,
- input_tys: &[Ty<'tcx>],
- output: Ty<'tcx>) -> Ty<'tcx> {
- let input_args = input_tys.iter().cloned().collect();
- mk_bare_fn(cx,
- Some(def_id),
- cx.mk_bare_fn(BareFnTy {
- unsafety: ast::Unsafety::Normal,
- abi: abi::Rust,
- sig: ty::Binder(FnSig {
- inputs: input_args,
- output: ty::FnConverging(output),
- variadic: false
- })
- }))
-}
-
-pub fn mk_trait<'tcx>(cx: &ctxt<'tcx>,
- principal: ty::PolyTraitRef<'tcx>,
- bounds: ExistentialBounds<'tcx>)
- -> Ty<'tcx>
-{
- assert!(bound_list_is_sorted(&bounds.projection_bounds));
+ pub fn mk_static_str(&self) -> Ty<'tcx> {
+ self.mk_imm_ref(self.mk_region(ty::ReStatic), self.mk_str())
+ }
- let inner = box TraitTy {
- principal: principal,
- bounds: bounds
- };
- mk_t(cx, TyTrait(inner))
-}
+ pub fn mk_enum(&self, did: ast::DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
+ // take a copy of substs so that we own the vectors inside
+ self.mk_ty(TyEnum(did, substs))
+ }
-fn bound_list_is_sorted(bounds: &[ty::PolyProjectionPredicate]) -> bool {
- bounds.is_empty() ||
- bounds[1..].iter().enumerate().all(
- |(index, bound)| bounds[index].sort_key() <= bound.sort_key())
-}
+ pub fn mk_box(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(TyBox(ty))
+ }
-pub fn sort_bounds_list(bounds: &mut [ty::PolyProjectionPredicate]) {
- bounds.sort_by(|a, b| a.sort_key().cmp(&b.sort_key()))
-}
+ pub fn mk_ptr(&self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(TyRawPtr(tm))
+ }
-pub fn mk_projection<'tcx>(cx: &ctxt<'tcx>,
- trait_ref: TraitRef<'tcx>,
- item_name: ast::Name)
- -> Ty<'tcx> {
- // take a copy of substs so that we own the vectors inside
- let inner = ProjectionTy { trait_ref: trait_ref, item_name: item_name };
- mk_t(cx, TyProjection(inner))
-}
+ pub fn mk_ref(&self, r: &'tcx Region, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(TyRef(r, tm))
+ }
-pub fn mk_struct<'tcx>(cx: &ctxt<'tcx>, struct_id: ast::DefId,
- substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
- // take a copy of substs so that we own the vectors inside
- mk_t(cx, TyStruct(struct_id, substs))
-}
+ pub fn mk_mut_ref(&self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ref(r, TypeAndMut {ty: ty, mutbl: ast::MutMutable})
+ }
-pub fn mk_closure<'tcx>(cx: &ctxt<'tcx>, closure_id: ast::DefId, substs: &'tcx Substs<'tcx>)
- -> Ty<'tcx> {
- mk_t(cx, TyClosure(closure_id, substs))
-}
+ pub fn mk_imm_ref(&self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ref(r, TypeAndMut {ty: ty, mutbl: ast::MutImmutable})
+ }
-pub fn mk_var<'tcx>(cx: &ctxt<'tcx>, v: TyVid) -> Ty<'tcx> {
- mk_infer(cx, TyVar(v))
-}
+ pub fn mk_mut_ptr(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ptr(TypeAndMut {ty: ty, mutbl: ast::MutMutable})
+ }
-pub fn mk_int_var<'tcx>(cx: &ctxt<'tcx>, v: IntVid) -> Ty<'tcx> {
- mk_infer(cx, IntVar(v))
-}
+ pub fn mk_imm_ptr(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ptr(TypeAndMut {ty: ty, mutbl: ast::MutImmutable})
+ }
-pub fn mk_float_var<'tcx>(cx: &ctxt<'tcx>, v: FloatVid) -> Ty<'tcx> {
- mk_infer(cx, FloatVar(v))
-}
+ pub fn mk_nil_ptr(&self) -> Ty<'tcx> {
+ self.mk_imm_ptr(self.mk_nil())
+ }
-pub fn mk_infer<'tcx>(cx: &ctxt<'tcx>, it: InferTy) -> Ty<'tcx> {
- mk_t(cx, TyInfer(it))
-}
+ pub fn mk_array(&self, ty: Ty<'tcx>, n: usize) -> Ty<'tcx> {
+ self.mk_ty(TyArray(ty, n))
+ }
+
+ pub fn mk_slice(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(TySlice(ty))
+ }
+
+ pub fn mk_tup(&self, ts: Vec<Ty<'tcx>>) -> Ty<'tcx> {
+ self.mk_ty(TyTuple(ts))
+ }
+
+ pub fn mk_nil(&self) -> Ty<'tcx> {
+ self.mk_tup(Vec::new())
+ }
+
+ pub fn mk_bool(&self) -> Ty<'tcx> {
+ self.mk_ty(TyBool)
+ }
+
+ pub fn mk_fn(&self,
+ opt_def_id: Option<ast::DefId>,
+ fty: &'tcx BareFnTy<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(TyBareFn(opt_def_id, fty))
+ }
+
+ pub fn mk_ctor_fn(&self,
+ def_id: ast::DefId,
+ input_tys: &[Ty<'tcx>],
+ output: Ty<'tcx>) -> Ty<'tcx> {
+ let input_args = input_tys.iter().cloned().collect();
+ self.mk_fn(Some(def_id), self.mk_bare_fn(BareFnTy {
+ unsafety: ast::Unsafety::Normal,
+ abi: abi::Rust,
+ sig: ty::Binder(FnSig {
+ inputs: input_args,
+ output: ty::FnConverging(output),
+ variadic: false
+ })
+ }))
+ }
+
+ pub fn mk_trait(&self,
+ principal: ty::PolyTraitRef<'tcx>,
+ bounds: ExistentialBounds<'tcx>)
+ -> Ty<'tcx>
+ {
+ assert!(bound_list_is_sorted(&bounds.projection_bounds));
+
+ let inner = box TraitTy {
+ principal: principal,
+ bounds: bounds
+ };
+ self.mk_ty(TyTrait(inner))
+ }
+
+ pub fn mk_projection(&self,
+ trait_ref: TraitRef<'tcx>,
+ item_name: ast::Name)
+ -> Ty<'tcx> {
+ // take a copy of substs so that we own the vectors inside
+ let inner = ProjectionTy { trait_ref: trait_ref, item_name: item_name };
+ self.mk_ty(TyProjection(inner))
+ }
+
+ pub fn mk_struct(&self, struct_id: ast::DefId,
+ substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
+ // take a copy of substs so that we own the vectors inside
+ self.mk_ty(TyStruct(struct_id, substs))
+ }
+
+ pub fn mk_closure(&self,
+ closure_id: ast::DefId,
+ substs: &'tcx Substs<'tcx>,
+ tys: Vec<Ty<'tcx>>)
+ -> Ty<'tcx> {
+ self.mk_closure_from_closure_substs(closure_id, Box::new(ClosureSubsts {
+ func_substs: substs,
+ upvar_tys: tys
+ }))
+ }
+
+ pub fn mk_closure_from_closure_substs(&self,
+ closure_id: ast::DefId,
+ closure_substs: Box<ClosureSubsts<'tcx>>)
+ -> Ty<'tcx> {
+ self.mk_ty(TyClosure(closure_id, closure_substs))
+ }
+
+ pub fn mk_var(&self, v: TyVid) -> Ty<'tcx> {
+ self.mk_infer(TyVar(v))
+ }
+
+ pub fn mk_int_var(&self, v: IntVid) -> Ty<'tcx> {
+ self.mk_infer(IntVar(v))
+ }
+
+ pub fn mk_float_var(&self, v: FloatVid) -> Ty<'tcx> {
+ self.mk_infer(FloatVar(v))
+ }
+
+ pub fn mk_infer(&self, it: InferTy) -> Ty<'tcx> {
+ self.mk_ty(TyInfer(it))
+ }
+
+ pub fn mk_param(&self,
+ space: subst::ParamSpace,
+ index: u32,
+ name: ast::Name) -> Ty<'tcx> {
+ self.mk_ty(TyParam(ParamTy { space: space, idx: index, name: name }))
+ }
+
+ pub fn mk_self_type(&self) -> Ty<'tcx> {
+ self.mk_param(subst::SelfSpace, 0, special_idents::type_self.name)
+ }
-pub fn mk_param<'tcx>(cx: &ctxt<'tcx>,
- space: subst::ParamSpace,
- index: u32,
- name: ast::Name) -> Ty<'tcx> {
- mk_t(cx, TyParam(ParamTy { space: space, idx: index, name: name }))
+ pub fn mk_param_from_def(&self, def: &TypeParameterDef) -> Ty<'tcx> {
+ self.mk_param(def.space, def.index, def.name)
+ }
}
-pub fn mk_self_type<'tcx>(cx: &ctxt<'tcx>) -> Ty<'tcx> {
- mk_param(cx, subst::SelfSpace, 0, special_idents::type_self.name)
+fn bound_list_is_sorted(bounds: &[ty::PolyProjectionPredicate]) -> bool {
+ bounds.is_empty() ||
+ bounds[1..].iter().enumerate().all(
+ |(index, bound)| bounds[index].sort_key() <= bound.sort_key())
}
-pub fn mk_param_from_def<'tcx>(cx: &ctxt<'tcx>, def: &TypeParameterDef) -> Ty<'tcx> {
- mk_param(cx, def.space, def.index, def.name)
+pub fn sort_bounds_list(bounds: &mut [ty::PolyProjectionPredicate]) {
+ bounds.sort_by(|a, b| a.sort_key().cmp(&b.sort_key()))
}
impl<'tcx> TyS<'tcx> {
_ => false,
}
}
-}
-
-pub fn walk_ty<'tcx, F>(ty_root: Ty<'tcx>, mut f: F)
- where F: FnMut(Ty<'tcx>),
-{
- for ty in ty_root.walk() {
- f(ty);
- }
-}
-/// Walks `ty` and any types appearing within `ty`, invoking the
-/// callback `f` on each type. If the callback returns false, then the
-/// children of the current type are ignored.
-///
-/// Note: prefer `ty.walk()` where possible.
-pub fn maybe_walk_ty<'tcx,F>(ty_root: Ty<'tcx>, mut f: F)
- where F : FnMut(Ty<'tcx>) -> bool
-{
- let mut walker = ty_root.walk();
- while let Some(ty) = walker.next() {
- if !f(ty) {
- walker.skip_current_subtree();
+ /// Walks `ty` and any types appearing within `ty`, invoking the
+ /// callback `f` on each type. If the callback returns false, then the
+ /// children of the current type are ignored.
+ ///
+ /// Note: prefer `ty.walk()` where possible.
+ pub fn maybe_walk<F>(&'tcx self, mut f: F)
+ where F : FnMut(Ty<'tcx>) -> bool
+ {
+ let mut walker = self.walk();
+ while let Some(ty) = walker.next() {
+ if !f(ty) {
+ walker.skip_current_subtree();
+ }
}
}
}
-// Folds types from the bottom up.
-pub fn fold_ty<'tcx, F>(cx: &ctxt<'tcx>, t0: Ty<'tcx>,
- fldop: F)
- -> Ty<'tcx> where
- F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
-{
- let mut f = ty_fold::BottomUpFolder {tcx: cx, fldop: fldop};
- f.fold_ty(t0)
-}
-
impl ParamTy {
pub fn new(space: subst::ParamSpace,
index: u32,
ParamTy::new(def.space, def.index, def.name)
}
- pub fn to_ty<'tcx>(self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> {
- ty::mk_param(tcx, self.space, self.idx, self.name)
+ pub fn to_ty<'tcx>(self, tcx: &ctxt<'tcx>) -> Ty<'tcx> {
+ tcx.mk_param(self.space, self.idx, self.name)
}
pub fn is_self(&self) -> bool {
}
// Type utilities
-
-pub fn type_is_nil(ty: Ty) -> bool {
- match ty.sty {
- TyTuple(ref tys) => tys.is_empty(),
- _ => false
+impl<'tcx> TyS<'tcx> {
+ pub fn is_nil(&self) -> bool {
+ match self.sty {
+ TyTuple(ref tys) => tys.is_empty(),
+ _ => false
+ }
}
-}
-
-pub fn type_is_error(ty: Ty) -> bool {
- ty.flags.get().intersects(TypeFlags::HAS_TY_ERR)
-}
-
-pub fn type_needs_subst(ty: Ty) -> bool {
- ty.flags.get().intersects(TypeFlags::NEEDS_SUBST)
-}
-
-pub fn trait_ref_contains_error(tref: &ty::TraitRef) -> bool {
- tref.substs.types.any(|&ty| type_is_error(ty))
-}
-pub fn type_is_ty_var(ty: Ty) -> bool {
- match ty.sty {
- TyInfer(TyVar(_)) => true,
- _ => false
+ pub fn is_empty(&self, cx: &ctxt) -> bool {
+ match self.sty {
+ TyEnum(did, _) => cx.enum_variants(did).is_empty(),
+ _ => false
+ }
}
-}
-
-pub fn type_is_bool(ty: Ty) -> bool { ty.sty == TyBool }
-pub fn type_is_self(ty: Ty) -> bool {
- match ty.sty {
- TyParam(ref p) => p.space == subst::SelfSpace,
- _ => false
+ pub fn is_ty_var(&self) -> bool {
+ match self.sty {
+ TyInfer(TyVar(_)) => true,
+ _ => false
+ }
}
-}
-fn type_is_slice(ty: Ty) -> bool {
- match ty.sty {
- TyRawPtr(mt) | TyRef(_, mt) => match mt.ty.sty {
- TySlice(_) | TyStr => true,
- _ => false,
- },
- _ => false
- }
-}
+ pub fn is_bool(&self) -> bool { self.sty == TyBool }
-pub fn type_is_structural(ty: Ty) -> bool {
- match ty.sty {
- TyStruct(..) | TyTuple(_) | TyEnum(..) |
- TyArray(..) | TyClosure(..) => true,
- _ => type_is_slice(ty) | type_is_trait(ty)
+ pub fn is_self(&self) -> bool {
+ match self.sty {
+ TyParam(ref p) => p.space == subst::SelfSpace,
+ _ => false
+ }
}
-}
-pub fn type_is_simd(cx: &ctxt, ty: Ty) -> bool {
- match ty.sty {
- TyStruct(did, _) => lookup_simd(cx, did),
- _ => false
+ fn is_slice(&self) -> bool {
+ match self.sty {
+ TyRawPtr(mt) | TyRef(_, mt) => match mt.ty.sty {
+ TySlice(_) | TyStr => true,
+ _ => false,
+ },
+ _ => false
+ }
}
-}
-pub fn sequence_element_type<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
- match ty.sty {
- TyArray(ty, _) | TySlice(ty) => ty,
- TyStr => mk_mach_uint(cx, ast::TyU8),
- _ => cx.sess.bug(&format!("sequence_element_type called on non-sequence value: {}",
- ty)),
+ pub fn is_structural(&self) -> bool {
+ match self.sty {
+ TyStruct(..) | TyTuple(_) | TyEnum(..) |
+ TyArray(..) | TyClosure(..) => true,
+ _ => self.is_slice() | self.is_trait()
+ }
}
-}
-pub fn simd_type<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
- match ty.sty {
- TyStruct(did, substs) => {
- let fields = lookup_struct_fields(cx, did);
- lookup_field_type(cx, did, fields[0].id, substs)
+ pub fn is_simd(&self, cx: &ctxt) -> bool {
+ match self.sty {
+ TyStruct(did, _) => cx.lookup_simd(did),
+ _ => false
}
- _ => panic!("simd_type called on invalid type")
}
-}
-pub fn simd_size(cx: &ctxt, ty: Ty) -> usize {
- match ty.sty {
- TyStruct(did, _) => {
- let fields = lookup_struct_fields(cx, did);
- fields.len()
+ pub fn sequence_element_type(&self, cx: &ctxt<'tcx>) -> Ty<'tcx> {
+ match self.sty {
+ TyArray(ty, _) | TySlice(ty) => ty,
+ TyStr => cx.mk_mach_uint(ast::TyU8),
+ _ => cx.sess.bug(&format!("sequence_element_type called on non-sequence value: {}",
+ self)),
}
- _ => panic!("simd_size called on invalid type")
}
-}
-pub fn type_is_region_ptr(ty: Ty) -> bool {
- match ty.sty {
- TyRef(..) => true,
- _ => false
+ pub fn simd_type(&self, cx: &ctxt<'tcx>) -> Ty<'tcx> {
+ match self.sty {
+ TyStruct(did, substs) => {
+ let fields = cx.lookup_struct_fields(did);
+ cx.lookup_field_type(did, fields[0].id, substs)
+ }
+ _ => panic!("simd_type called on invalid type")
+ }
}
-}
-pub fn type_is_unsafe_ptr(ty: Ty) -> bool {
- match ty.sty {
- TyRawPtr(_) => return true,
- _ => return false
+ pub fn simd_size(&self, cx: &ctxt) -> usize {
+ match self.sty {
+ TyStruct(did, _) => {
+ cx.lookup_struct_fields(did).len()
+ }
+ _ => panic!("simd_size called on invalid type")
+ }
}
-}
-pub fn type_is_unique(ty: Ty) -> bool {
- match ty.sty {
- TyBox(_) => true,
- _ => false
+ pub fn is_region_ptr(&self) -> bool {
+ match self.sty {
+ TyRef(..) => true,
+ _ => false
+ }
}
-}
-/*
- A scalar type is one that denotes an atomic datum, with no sub-components.
- (A TyRawPtr is scalar because it represents a non-managed pointer, so its
- contents are abstract to rustc.)
-*/
-pub fn type_is_scalar(ty: Ty) -> bool {
- match ty.sty {
- TyBool | TyChar | TyInt(_) | TyFloat(_) | TyUint(_) |
- TyInfer(IntVar(_)) | TyInfer(FloatVar(_)) |
- TyBareFn(..) | TyRawPtr(_) => true,
- _ => false
+ pub fn is_unsafe_ptr(&self) -> bool {
+ match self.sty {
+ TyRawPtr(_) => return true,
+ _ => return false
+ }
+ }
+
+ pub fn is_unique(&self) -> bool {
+ match self.sty {
+ TyBox(_) => true,
+ _ => false
+ }
+ }
+
+ /*
+ A scalar type is one that denotes an atomic datum, with no sub-components.
+ (A TyRawPtr is scalar because it represents a non-managed pointer, so its
+ contents are abstract to rustc.)
+ */
+ pub fn is_scalar(&self) -> bool {
+ match self.sty {
+ TyBool | TyChar | TyInt(_) | TyFloat(_) | TyUint(_) |
+ TyInfer(IntVar(_)) | TyInfer(FloatVar(_)) |
+ TyBareFn(..) | TyRawPtr(_) => true,
+ _ => false
+ }
}
-}
-/// Returns true if this type is a floating point type and false otherwise.
-pub fn type_is_floating_point(ty: Ty) -> bool {
- match ty.sty {
- TyFloat(_) |
- TyInfer(FloatVar(_)) =>
- true,
+ /// Returns true if this type is a floating point type and false otherwise.
+ pub fn is_floating_point(&self) -> bool {
+ match self.sty {
+ TyFloat(_) |
+ TyInfer(FloatVar(_)) => true,
+ _ => false,
+ }
+ }
- _ =>
- false,
+ pub fn ty_to_def_id(&self) -> Option<ast::DefId> {
+ match self.sty {
+ TyTrait(ref tt) => Some(tt.principal_def_id()),
+ TyStruct(id, _) |
+ TyEnum(id, _) |
+ TyClosure(id, _) => Some(id),
+ _ => None
+ }
}
}
None = 0b0000_0000__0000_0000__0000,
// Things that are interior to the value (first nibble):
- InteriorUnsized = 0b0000_0000__0000_0000__0001,
InteriorUnsafe = 0b0000_0000__0000_0000__0010,
InteriorParam = 0b0000_0000__0000_0000__0100,
// InteriorAll = 0b00000000__00000000__1111,
OwnsDtor = 0b0000_0000__0000_0010__0000,
OwnsAll = 0b0000_0000__1111_1111__0000,
- // Things that are reachable by the value in any way (fourth nibble):
- ReachesBorrowed = 0b0000_0010__0000_0000__0000,
- ReachesMutable = 0b0000_1000__0000_0000__0000,
- ReachesFfiUnsafe = 0b0010_0000__0000_0000__0000,
- ReachesAll = 0b0011_1111__0000_0000__0000,
-
// Things that mean drop glue is necessary
NeedsDrop = 0b0000_0000__0000_0111__0000,
- // Things that prevent values from being considered sized
- Nonsized = 0b0000_0000__0000_0000__0001,
-
// All bits
All = 0b1111_1111__1111_1111__1111
}
self.intersects(TC::OwnsOwned)
}
- pub fn is_sized(&self, _: &ctxt) -> bool {
- !self.intersects(TC::Nonsized)
- }
-
pub fn interior_param(&self) -> bool {
self.intersects(TC::InteriorParam)
}
self.intersects(TC::InteriorUnsafe)
}
- pub fn interior_unsized(&self) -> bool {
- self.intersects(TC::InteriorUnsized)
- }
-
pub fn needs_drop(&self, _: &ctxt) -> bool {
self.intersects(TC::NeedsDrop)
}
/// Includes only those bits that still apply when indirected through a `Box` pointer
pub fn owned_pointer(&self) -> TypeContents {
- TC::OwnsOwned | (
- *self & (TC::OwnsAll | TC::ReachesAll))
- }
-
- /// Includes only those bits that still apply when indirected through a reference (`&`)
- pub fn reference(&self, bits: TypeContents) -> TypeContents {
- bits | (
- *self & TC::ReachesAll)
- }
-
- /// Includes only those bits that still apply when indirected through a raw pointer (`*`)
- pub fn unsafe_pointer(&self) -> TypeContents {
- *self & TC::ReachesAll
+ TC::OwnsOwned | (*self & TC::OwnsAll)
}
pub fn union<T, F>(v: &[T], mut f: F) -> TypeContents where
}
}
-pub fn type_contents<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> TypeContents {
- return memoized(&cx.tc_cache, ty, |ty| {
- tc_ty(cx, ty, &mut FnvHashMap())
- });
+impl<'tcx> TyS<'tcx> {
+ pub fn type_contents(&'tcx self, cx: &ctxt<'tcx>) -> TypeContents {
+ return memoized(&cx.tc_cache, self, |ty| {
+ tc_ty(cx, ty, &mut FnvHashMap())
+ });
- fn tc_ty<'tcx>(cx: &ctxt<'tcx>,
- ty: Ty<'tcx>,
- cache: &mut FnvHashMap<Ty<'tcx>, TypeContents>) -> TypeContents
- {
- // Subtle: Note that we are *not* using cx.tc_cache here but rather a
- // private cache for this walk. This is needed in the case of cyclic
- // types like:
- //
- // struct List { next: Box<Option<List>>, ... }
- //
- // When computing the type contents of such a type, we wind up deeply
- // recursing as we go. So when we encounter the recursive reference
- // to List, we temporarily use TC::None as its contents. Later we'll
- // patch up the cache with the correct value, once we've computed it
- // (this is basically a co-inductive process, if that helps). So in
- // the end we'll compute TC::OwnsOwned, in this case.
- //
- // The problem is, as we are doing the computation, we will also
- // compute an *intermediate* contents for, e.g., Option<List> of
- // TC::None. This is ok during the computation of List itself, but if
- // we stored this intermediate value into cx.tc_cache, then later
- // requests for the contents of Option<List> would also yield TC::None
- // which is incorrect. This value was computed based on the crutch
- // value for the type contents of list. The correct value is
- // TC::OwnsOwned. This manifested as issue #4821.
- match cache.get(&ty) {
- Some(tc) => { return *tc; }
- None => {}
- }
- match cx.tc_cache.borrow().get(&ty) { // Must check both caches!
- Some(tc) => { return *tc; }
- None => {}
- }
- cache.insert(ty, TC::None);
-
- let result = match ty.sty {
- // usize and isize are ffi-unsafe
- TyUint(ast::TyUs) | TyInt(ast::TyIs) => {
- TC::ReachesFfiUnsafe
- }
-
- // Scalar and unique types are sendable, and durable
- TyInfer(ty::FreshIntTy(_)) | TyInfer(ty::FreshFloatTy(_)) |
- TyBool | TyInt(_) | TyUint(_) | TyFloat(_) |
- TyBareFn(..) | ty::TyChar => {
- TC::None
- }
-
- TyBox(typ) => {
- TC::ReachesFfiUnsafe | match typ.sty {
- TyStr => TC::OwnsOwned,
- _ => tc_ty(cx, typ, cache).owned_pointer(),
+ fn tc_ty<'tcx>(cx: &ctxt<'tcx>,
+ ty: Ty<'tcx>,
+ cache: &mut FnvHashMap<Ty<'tcx>, TypeContents>) -> TypeContents
+ {
+ // Subtle: Note that we are *not* using cx.tc_cache here but rather a
+ // private cache for this walk. This is needed in the case of cyclic
+ // types like:
+ //
+ // struct List { next: Box<Option<List>>, ... }
+ //
+ // When computing the type contents of such a type, we wind up deeply
+ // recursing as we go. So when we encounter the recursive reference
+ // to List, we temporarily use TC::None as its contents. Later we'll
+ // patch up the cache with the correct value, once we've computed it
+ // (this is basically a co-inductive process, if that helps). So in
+ // the end we'll compute TC::OwnsOwned, in this case.
+ //
+ // The problem is, as we are doing the computation, we will also
+ // compute an *intermediate* contents for, e.g., Option<List> of
+ // TC::None. This is ok during the computation of List itself, but if
+ // we stored this intermediate value into cx.tc_cache, then later
+ // requests for the contents of Option<List> would also yield TC::None
+ // which is incorrect. This value was computed based on the crutch
+ // value for the type contents of list. The correct value is
+ // TC::OwnsOwned. This manifested as issue #4821.
+ match cache.get(&ty) {
+ Some(tc) => { return *tc; }
+ None => {}
+ }
+ match cx.tc_cache.borrow().get(&ty) { // Must check both caches!
+ Some(tc) => { return *tc; }
+ None => {}
+ }
+ cache.insert(ty, TC::None);
+
+ let result = match ty.sty {
+ // usize and isize are ffi-unsafe
+ TyUint(ast::TyUs) | TyInt(ast::TyIs) => {
+ TC::None
}
- }
-
- TyTrait(box TraitTy { ref bounds, .. }) => {
- object_contents(bounds) | TC::ReachesFfiUnsafe | TC::Nonsized
- }
- TyRawPtr(ref mt) => {
- tc_ty(cx, mt.ty, cache).unsafe_pointer()
- }
+ // Scalar and unique types are sendable, and durable
+ TyInfer(ty::FreshIntTy(_)) | TyInfer(ty::FreshFloatTy(_)) |
+ TyBool | TyInt(_) | TyUint(_) | TyFloat(_) |
+ TyBareFn(..) | ty::TyChar => {
+ TC::None
+ }
- TyRef(r, ref mt) => {
- TC::ReachesFfiUnsafe | match mt.ty.sty {
- TyStr => borrowed_contents(*r, ast::MutImmutable),
- TyArray(..) |
- TySlice(_) => tc_ty(cx, mt.ty, cache).reference(borrowed_contents(*r,
- mt.mutbl)),
- _ => tc_ty(cx, mt.ty, cache).reference(borrowed_contents(*r, mt.mutbl)),
+ TyBox(typ) => {
+ tc_ty(cx, typ, cache).owned_pointer()
}
- }
- TyArray(ty, _) => {
- tc_ty(cx, ty, cache)
- }
+ TyTrait(_) => {
+ TC::All - TC::InteriorParam
+ }
- TySlice(ty) => {
- tc_ty(cx, ty, cache) | TC::Nonsized
- }
- TyStr => TC::Nonsized,
+ TyRawPtr(_) => {
+ TC::None
+ }
- TyStruct(did, substs) => {
- let flds = struct_fields(cx, did, substs);
- let mut res =
- TypeContents::union(&flds[..],
- |f| tc_mt(cx, f.mt, cache));
+ TyRef(_, _) => {
+ TC::None
+ }
- if !lookup_repr_hints(cx, did).contains(&attr::ReprExtern) {
- res = res | TC::ReachesFfiUnsafe;
+ TyArray(ty, _) => {
+ tc_ty(cx, ty, cache)
}
- if ty::has_dtor(cx, did) {
- res = res | TC::OwnsDtor;
+ TySlice(ty) => {
+ tc_ty(cx, ty, cache)
}
- apply_lang_items(cx, did, res)
- }
+ TyStr => TC::None,
- TyClosure(did, substs) => {
- // FIXME(#14449): `borrowed_contents` below assumes `&mut` closure.
- let param_env = ty::empty_parameter_environment(cx);
- let upvars = closure_upvars(¶m_env, did, substs).unwrap();
- TypeContents::union(&upvars, |f| tc_ty(cx, &f.ty, cache))
- }
+ TyStruct(did, substs) => {
+ let flds = cx.struct_fields(did, substs);
+ let mut res =
+ TypeContents::union(&flds[..],
+ |f| tc_ty(cx, f.mt.ty, cache));
- TyTuple(ref tys) => {
- TypeContents::union(&tys[..],
- |ty| tc_ty(cx, *ty, cache))
- }
+ if cx.has_dtor(did) {
+ res = res | TC::OwnsDtor;
+ }
+ apply_lang_items(cx, did, res)
+ }
- TyEnum(did, substs) => {
- let variants = substd_enum_variants(cx, did, substs);
- let mut res =
- TypeContents::union(&variants[..], |variant| {
- TypeContents::union(&variant.args,
- |arg_ty| {
- tc_ty(cx, *arg_ty, cache)
- })
- });
+ TyClosure(_, ref substs) => {
+ TypeContents::union(&substs.upvar_tys, |ty| tc_ty(cx, &ty, cache))
+ }
- if ty::has_dtor(cx, did) {
- res = res | TC::OwnsDtor;
+ TyTuple(ref tys) => {
+ TypeContents::union(&tys[..],
+ |ty| tc_ty(cx, *ty, cache))
}
- if !variants.is_empty() {
- let repr_hints = lookup_repr_hints(cx, did);
- if repr_hints.len() > 1 {
- // this is an error later on, but this type isn't safe
- res = res | TC::ReachesFfiUnsafe;
+ TyEnum(did, substs) => {
+ let variants = cx.substd_enum_variants(did, substs);
+ let mut res =
+ TypeContents::union(&variants[..], |variant| {
+ TypeContents::union(&variant.args,
+ |arg_ty| {
+ tc_ty(cx, *arg_ty, cache)
+ })
+ });
+
+ if cx.has_dtor(did) {
+ res = res | TC::OwnsDtor;
}
- match repr_hints.get(0) {
- Some(h) => if !h.is_ffi_safe() {
- res = res | TC::ReachesFfiUnsafe;
- },
- // ReprAny
- None => {
- res = res | TC::ReachesFfiUnsafe;
-
- // We allow ReprAny enums if they are eligible for
- // the nullable pointer optimization and the
- // contained type is an `extern fn`
-
- if variants.len() == 2 {
- let mut data_idx = 0;
-
- if variants[0].args.is_empty() {
- data_idx = 1;
- }
-
- if variants[data_idx].args.len() == 1 {
- match variants[data_idx].args[0].sty {
- TyBareFn(..) => { res = res - TC::ReachesFfiUnsafe; }
- _ => { }
- }
- }
- }
- }
- }
+ apply_lang_items(cx, did, res)
}
+ TyProjection(..) |
+ TyParam(_) => {
+ TC::All
+ }
- apply_lang_items(cx, did, res)
- }
+ TyInfer(_) |
+ TyError => {
+ cx.sess.bug("asked to compute contents of error type");
+ }
+ };
- TyProjection(..) |
- TyParam(_) => {
- TC::All
- }
+ cache.insert(ty, result);
+ result
+ }
- TyInfer(_) |
- TyError => {
- cx.sess.bug("asked to compute contents of error type");
+ fn apply_lang_items(cx: &ctxt, did: ast::DefId, tc: TypeContents)
+ -> TypeContents {
+ if Some(did) == cx.lang_items.unsafe_cell_type() {
+ tc | TC::InteriorUnsafe
+ } else {
+ tc
}
- };
-
- cache.insert(ty, result);
- result
+ }
}
- fn tc_mt<'tcx>(cx: &ctxt<'tcx>,
- mt: mt<'tcx>,
- cache: &mut FnvHashMap<Ty<'tcx>, TypeContents>) -> TypeContents
+ fn impls_bound<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
+ bound: ty::BuiltinBound,
+ span: Span)
+ -> bool
{
- let mc = TC::ReachesMutable.when(mt.mutbl == MutMutable);
- mc | tc_ty(cx, mt.ty, cache)
- }
+ let tcx = param_env.tcx;
+ let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(param_env.clone()), false);
- fn apply_lang_items(cx: &ctxt, did: ast::DefId, tc: TypeContents)
- -> TypeContents {
- if Some(did) == cx.lang_items.unsafe_cell_type() {
- tc | TC::InteriorUnsafe
- } else {
- tc
- }
- }
+ let is_impld = traits::type_known_to_meet_builtin_bound(&infcx,
+ self, bound, span);
- /// Type contents due to containing a reference with the region `region` and borrow kind `bk`
- fn borrowed_contents(region: ty::Region,
- mutbl: ast::Mutability)
- -> TypeContents {
- let b = match mutbl {
- ast::MutMutable => TC::ReachesMutable,
- ast::MutImmutable => TC::None,
- };
- b | (TC::ReachesBorrowed).when(region != ty::ReStatic)
- }
-
- fn object_contents(bounds: &ExistentialBounds) -> TypeContents {
- // These are the type contents of the (opaque) interior. We
- // make no assumptions (other than that it cannot have an
- // in-scope type parameter within, which makes no sense).
- let mut tc = TC::All - TC::InteriorParam;
- for bound in &bounds.builtin_bounds {
- tc = tc - match bound {
- BoundSync | BoundSend | BoundCopy => TC::None,
- BoundSized => TC::Nonsized,
- };
- }
- return tc;
+ debug!("Ty::impls_bound({:?}, {:?}) = {:?}",
+ self, bound, is_impld);
+
+ is_impld
}
-}
-fn type_impls_bound<'a,'tcx>(param_env: Option<&ParameterEnvironment<'a,'tcx>>,
- tcx: &ty::ctxt<'tcx>,
- ty: Ty<'tcx>,
- bound: ty::BuiltinBound,
- span: Span)
- -> bool
-{
- let pe;
- let param_env = match param_env {
- Some(e) => e,
- None => {
- pe = empty_parameter_environment(tcx);
- &pe
+ // FIXME (@jroesch): I made this public to use it, not sure if should be private
+ pub fn moves_by_default<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
+ span: Span) -> bool {
+ if self.flags.get().intersects(TypeFlags::MOVENESS_CACHED) {
+ return self.flags.get().intersects(TypeFlags::MOVES_BY_DEFAULT);
}
- };
- let infcx = infer::new_infer_ctxt(tcx);
- let is_impld = traits::type_known_to_meet_builtin_bound(&infcx, param_env, ty, bound, span);
+ assert!(!self.needs_infer());
- debug!("type_impls_bound({:?}, {:?}) = {:?}",
- ty,
- bound,
- is_impld);
+ // Fast-path for primitive types
+ let result = match self.sty {
+ TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
+ TyRawPtr(..) | TyBareFn(..) | TyRef(_, TypeAndMut {
+ mutbl: ast::MutImmutable, ..
+ }) => Some(false),
- is_impld
-}
+ TyStr | TyBox(..) | TyRef(_, TypeAndMut {
+ mutbl: ast::MutMutable, ..
+ }) => Some(true),
-pub fn type_moves_by_default<'a,'tcx>(param_env: &ParameterEnvironment<'a,'tcx>,
- span: Span,
- ty: Ty<'tcx>)
- -> bool
-{
- if ty.flags.get().intersects(TypeFlags::MOVENESS_CACHED) {
- return ty.flags.get().intersects(TypeFlags::MOVES_BY_DEFAULT);
- }
-
- assert!(!ty::type_needs_infer(ty));
-
- // Fast-path for primitive types
- let result = match ty.sty {
- TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
- TyRawPtr(..) | TyBareFn(..) | TyRef(_, mt {
- mutbl: ast::MutImmutable, ..
- }) => Some(false),
-
- TyStr | TyBox(..) | TyRef(_, mt {
- mutbl: ast::MutMutable, ..
- }) => Some(true),
-
- TyArray(..) | TySlice(_) | TyTrait(..) | TyTuple(..) |
- TyClosure(..) | TyEnum(..) | TyStruct(..) |
- TyProjection(..) | TyParam(..) | TyInfer(..) | TyError => None
- }.unwrap_or_else(|| !type_impls_bound(Some(param_env),
- param_env.tcx,
- ty,
- ty::BoundCopy,
- span));
-
- if !type_has_params(ty) && !type_has_self(ty) {
- ty.flags.set(ty.flags.get() | if result {
- TypeFlags::MOVENESS_CACHED | TypeFlags::MOVES_BY_DEFAULT
- } else {
- TypeFlags::MOVENESS_CACHED
- });
- }
+ TyArray(..) | TySlice(_) | TyTrait(..) | TyTuple(..) |
+ TyClosure(..) | TyEnum(..) | TyStruct(..) |
+ TyProjection(..) | TyParam(..) | TyInfer(..) | TyError => None
+ }.unwrap_or_else(|| !self.impls_bound(param_env, ty::BoundCopy, span));
- result
-}
+ if !self.has_param_types() && !self.has_self_ty() {
+ self.flags.set(self.flags.get() | if result {
+ TypeFlags::MOVENESS_CACHED | TypeFlags::MOVES_BY_DEFAULT
+ } else {
+ TypeFlags::MOVENESS_CACHED
+ });
+ }
-#[inline]
-pub fn type_is_sized<'a,'tcx>(param_env: Option<&ParameterEnvironment<'a,'tcx>>,
- tcx: &ty::ctxt<'tcx>,
- span: Span,
- ty: Ty<'tcx>)
- -> bool
-{
- if ty.flags.get().intersects(TypeFlags::SIZEDNESS_CACHED) {
- let result = ty.flags.get().intersects(TypeFlags::IS_SIZED);
- return result;
+ result
}
- type_is_sized_uncached(param_env, tcx, span, ty)
-}
-
-fn type_is_sized_uncached<'a,'tcx>(param_env: Option<&ParameterEnvironment<'a,'tcx>>,
- tcx: &ty::ctxt<'tcx>,
- span: Span,
- ty: Ty<'tcx>) -> bool {
- assert!(!ty::type_needs_infer(ty));
-
- // Fast-path for primitive types
- let result = match ty.sty {
- TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
- TyBox(..) | TyRawPtr(..) | TyRef(..) | TyBareFn(..) |
- TyArray(..) | TyTuple(..) | TyClosure(..) => Some(true),
-
- TyStr | TyTrait(..) | TySlice(_) => Some(false),
-
- TyEnum(..) | TyStruct(..) | TyProjection(..) | TyParam(..) |
- TyInfer(..) | TyError => None
- }.unwrap_or_else(|| type_impls_bound(param_env, tcx, ty, ty::BoundSized, span));
+ #[inline]
+ pub fn is_sized<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
+ span: Span) -> bool
+ {
+ if self.flags.get().intersects(TypeFlags::SIZEDNESS_CACHED) {
+ return self.flags.get().intersects(TypeFlags::IS_SIZED);
+ }
- if !type_has_params(ty) && !type_has_self(ty) {
- ty.flags.set(ty.flags.get() | if result {
- TypeFlags::SIZEDNESS_CACHED | TypeFlags::IS_SIZED
- } else {
- TypeFlags::SIZEDNESS_CACHED
- });
+ self.is_sized_uncached(param_env, span)
}
- result
-}
+ fn is_sized_uncached<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
+ span: Span) -> bool {
+ assert!(!self.needs_infer());
-pub fn is_ffi_safe<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
- !type_contents(cx, ty).intersects(TC::ReachesFfiUnsafe)
-}
+ // Fast-path for primitive types
+ let result = match self.sty {
+ TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
+ TyBox(..) | TyRawPtr(..) | TyRef(..) | TyBareFn(..) |
+ TyArray(..) | TyTuple(..) | TyClosure(..) => Some(true),
-// True if instantiating an instance of `r_ty` requires an instance of `r_ty`.
-pub fn is_instantiable<'tcx>(cx: &ctxt<'tcx>, r_ty: Ty<'tcx>) -> bool {
- fn type_requires<'tcx>(cx: &ctxt<'tcx>, seen: &mut Vec<DefId>,
- r_ty: Ty<'tcx>, ty: Ty<'tcx>) -> bool {
- debug!("type_requires({:?}, {:?})?",
- r_ty, ty);
+ TyStr | TyTrait(..) | TySlice(_) => Some(false),
- let r = r_ty == ty || subtypes_require(cx, seen, r_ty, ty);
+ TyEnum(..) | TyStruct(..) | TyProjection(..) | TyParam(..) |
+ TyInfer(..) | TyError => None
+ }.unwrap_or_else(|| self.impls_bound(param_env, ty::BoundSized, span));
- debug!("type_requires({:?}, {:?})? {:?}",
- r_ty, ty, r);
- return r;
- }
+ if !self.has_param_types() && !self.has_self_ty() {
+ self.flags.set(self.flags.get() | if result {
+ TypeFlags::SIZEDNESS_CACHED | TypeFlags::IS_SIZED
+ } else {
+ TypeFlags::SIZEDNESS_CACHED
+ });
+ }
- fn subtypes_require<'tcx>(cx: &ctxt<'tcx>, seen: &mut Vec<DefId>,
- r_ty: Ty<'tcx>, ty: Ty<'tcx>) -> bool {
- debug!("subtypes_require({:?}, {:?})?",
- r_ty, ty);
+ result
+ }
- let r = match ty.sty {
- // fixed length vectors need special treatment compared to
- // normal vectors, since they don't necessarily have the
- // possibility to have length zero.
- TyArray(_, 0) => false, // don't need no contents
- TyArray(ty, _) => type_requires(cx, seen, r_ty, ty),
+ // True if instantiating an instance of `r_ty` requires an instance of `r_ty`.
+ pub fn is_instantiable(&'tcx self, cx: &ctxt<'tcx>) -> bool {
+ fn type_requires<'tcx>(cx: &ctxt<'tcx>, seen: &mut Vec<DefId>,
+ r_ty: Ty<'tcx>, ty: Ty<'tcx>) -> bool {
+ debug!("type_requires({:?}, {:?})?",
+ r_ty, ty);
+
+ let r = r_ty == ty || subtypes_require(cx, seen, r_ty, ty);
+
+ debug!("type_requires({:?}, {:?})? {:?}",
+ r_ty, ty, r);
+ return r;
+ }
+
+ fn subtypes_require<'tcx>(cx: &ctxt<'tcx>, seen: &mut Vec<DefId>,
+ r_ty: Ty<'tcx>, ty: Ty<'tcx>) -> bool {
+ debug!("subtypes_require({:?}, {:?})?",
+ r_ty, ty);
+
+ let r = match ty.sty {
+ // fixed length vectors need special treatment compared to
+ // normal vectors, since they don't necessarily have the
+ // possibility to have length zero.
+ TyArray(_, 0) => false, // don't need no contents
+ TyArray(ty, _) => type_requires(cx, seen, r_ty, ty),
+
+ TyBool |
+ TyChar |
+ TyInt(_) |
+ TyUint(_) |
+ TyFloat(_) |
+ TyStr |
+ TyBareFn(..) |
+ TyParam(_) |
+ TyProjection(_) |
+ TySlice(_) => {
+ false
+ }
+ TyBox(typ) => {
+ type_requires(cx, seen, r_ty, typ)
+ }
+ TyRef(_, ref mt) => {
+ type_requires(cx, seen, r_ty, mt.ty)
+ }
- TyBool |
- TyChar |
- TyInt(_) |
- TyUint(_) |
- TyFloat(_) |
- TyStr |
- TyBareFn(..) |
- TyParam(_) |
- TyProjection(_) |
- TySlice(_) => {
- false
- }
- TyBox(typ) => {
- type_requires(cx, seen, r_ty, typ)
- }
- TyRef(_, ref mt) => {
- type_requires(cx, seen, r_ty, mt.ty)
- }
+ TyRawPtr(..) => {
+ false // unsafe ptrs can always be NULL
+ }
- TyRawPtr(..) => {
- false // unsafe ptrs can always be NULL
- }
+ TyTrait(..) => {
+ false
+ }
- TyTrait(..) => {
- false
- }
+ TyStruct(ref did, _) if seen.contains(did) => {
+ false
+ }
- TyStruct(ref did, _) if seen.contains(did) => {
- false
- }
+ TyStruct(did, substs) => {
+ seen.push(did);
+ let fields = cx.struct_fields(did, substs);
+ let r = fields.iter().any(|f| type_requires(cx, seen, r_ty, f.mt.ty));
+ seen.pop().unwrap();
+ r
+ }
- TyStruct(did, substs) => {
- seen.push(did);
- let fields = struct_fields(cx, did, substs);
- let r = fields.iter().any(|f| type_requires(cx, seen, r_ty, f.mt.ty));
- seen.pop().unwrap();
- r
- }
+ TyError |
+ TyInfer(_) |
+ TyClosure(..) => {
+ // this check is run on type definitions, so we don't expect to see
+ // inference by-products or closure types
+ cx.sess.bug(&format!("requires check invoked on inapplicable type: {:?}", ty))
+ }
- TyError |
- TyInfer(_) |
- TyClosure(..) => {
- // this check is run on type definitions, so we don't expect to see
- // inference by-products or closure types
- cx.sess.bug(&format!("requires check invoked on inapplicable type: {:?}", ty))
- }
+ TyTuple(ref ts) => {
+ ts.iter().any(|ty| type_requires(cx, seen, r_ty, *ty))
+ }
- TyTuple(ref ts) => {
- ts.iter().any(|ty| type_requires(cx, seen, r_ty, *ty))
- }
+ TyEnum(ref did, _) if seen.contains(did) => {
+ false
+ }
- TyEnum(ref did, _) if seen.contains(did) => {
- false
- }
+ TyEnum(did, substs) => {
+ seen.push(did);
+ let vs = cx.enum_variants(did);
+ let r = !vs.is_empty() && vs.iter().all(|variant| {
+ variant.args.iter().any(|aty| {
+ let sty = aty.subst(cx, substs);
+ type_requires(cx, seen, r_ty, sty)
+ })
+ });
+ seen.pop().unwrap();
+ r
+ }
+ };
- TyEnum(did, substs) => {
- seen.push(did);
- let vs = enum_variants(cx, did);
- let r = !vs.is_empty() && vs.iter().all(|variant| {
- variant.args.iter().any(|aty| {
- let sty = aty.subst(cx, substs);
- type_requires(cx, seen, r_ty, sty)
- })
- });
- seen.pop().unwrap();
- r
- }
- };
+ debug!("subtypes_require({:?}, {:?})? {:?}",
+ r_ty, ty, r);
- debug!("subtypes_require({:?}, {:?})? {:?}",
- r_ty, ty, r);
+ return r;
+ }
- return r;
+ let mut seen = Vec::new();
+ !subtypes_require(cx, &mut seen, self, self)
}
-
- let mut seen = Vec::new();
- !subtypes_require(cx, &mut seen, r_ty, r_ty)
}
/// Describes whether a type is representable. For types that are not
SelfRecursive,
}
-/// Check whether a type is representable. This means it cannot contain unboxed
-/// structural recursion. This check is needed for structs and enums.
-pub fn is_type_representable<'tcx>(cx: &ctxt<'tcx>, sp: Span, ty: Ty<'tcx>)
- -> Representability {
-
- // Iterate until something non-representable is found
- fn find_nonrepresentable<'tcx, It: Iterator<Item=Ty<'tcx>>>(cx: &ctxt<'tcx>, sp: Span,
- seen: &mut Vec<Ty<'tcx>>,
- iter: It)
- -> Representability {
- iter.fold(Representable,
- |r, ty| cmp::max(r, is_type_structurally_recursive(cx, sp, seen, ty)))
- }
-
- fn are_inner_types_recursive<'tcx>(cx: &ctxt<'tcx>, sp: Span,
- seen: &mut Vec<Ty<'tcx>>, ty: Ty<'tcx>)
- -> Representability {
- match ty.sty {
- TyTuple(ref ts) => {
- find_nonrepresentable(cx, sp, seen, ts.iter().cloned())
- }
- // Fixed-length vectors.
- // FIXME(#11924) Behavior undecided for zero-length vectors.
- TyArray(ty, _) => {
- is_type_structurally_recursive(cx, sp, seen, ty)
- }
- TyStruct(did, substs) => {
- let fields = struct_fields(cx, did, substs);
- find_nonrepresentable(cx, sp, seen, fields.iter().map(|f| f.mt.ty))
- }
- TyEnum(did, substs) => {
- let vs = enum_variants(cx, did);
- let iter = vs.iter()
- .flat_map(|variant| &variant.args)
- .map(|aty| { aty.subst_spanned(cx, substs, Some(sp)) });
+impl<'tcx> TyS<'tcx> {
+ /// Check whether a type is representable. This means it cannot contain unboxed
+ /// structural recursion. This check is needed for structs and enums.
+ pub fn is_representable(&'tcx self, cx: &ctxt<'tcx>, sp: Span) -> Representability {
+
+ // Iterate until something non-representable is found
+ fn find_nonrepresentable<'tcx, It: Iterator<Item=Ty<'tcx>>>(cx: &ctxt<'tcx>, sp: Span,
+ seen: &mut Vec<Ty<'tcx>>,
+ iter: It)
+ -> Representability {
+ iter.fold(Representable,
+ |r, ty| cmp::max(r, is_type_structurally_recursive(cx, sp, seen, ty)))
+ }
+
+ fn are_inner_types_recursive<'tcx>(cx: &ctxt<'tcx>, sp: Span,
+ seen: &mut Vec<Ty<'tcx>>, ty: Ty<'tcx>)
+ -> Representability {
+ match ty.sty {
+ TyTuple(ref ts) => {
+ find_nonrepresentable(cx, sp, seen, ts.iter().cloned())
+ }
+ // Fixed-length vectors.
+ // FIXME(#11924) Behavior undecided for zero-length vectors.
+ TyArray(ty, _) => {
+ is_type_structurally_recursive(cx, sp, seen, ty)
+ }
+ TyStruct(did, substs) => {
+ let fields = cx.struct_fields(did, substs);
+ find_nonrepresentable(cx, sp, seen, fields.iter().map(|f| f.mt.ty))
+ }
+ TyEnum(did, substs) => {
+ let vs = cx.enum_variants(did);
+ let iter = vs.iter()
+ .flat_map(|variant| &variant.args)
+ .map(|aty| { aty.subst_spanned(cx, substs, Some(sp)) });
- find_nonrepresentable(cx, sp, seen, iter)
- }
- TyClosure(..) => {
- // this check is run on type definitions, so we don't expect
- // to see closure types
- cx.sess.bug(&format!("requires check invoked on inapplicable type: {:?}", ty))
+ find_nonrepresentable(cx, sp, seen, iter)
+ }
+ TyClosure(..) => {
+ // this check is run on type definitions, so we don't expect
+ // to see closure types
+ cx.sess.bug(&format!("requires check invoked on inapplicable type: {:?}", ty))
+ }
+ _ => Representable,
}
- _ => Representable,
}
- }
- fn same_struct_or_enum_def_id(ty: Ty, did: DefId) -> bool {
- match ty.sty {
- TyStruct(ty_did, _) | TyEnum(ty_did, _) => {
- ty_did == did
+ fn same_struct_or_enum_def_id(ty: Ty, did: DefId) -> bool {
+ match ty.sty {
+ TyStruct(ty_did, _) | TyEnum(ty_did, _) => {
+ ty_did == did
+ }
+ _ => false
}
- _ => false
}
- }
- fn same_type<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
- match (&a.sty, &b.sty) {
- (&TyStruct(did_a, ref substs_a), &TyStruct(did_b, ref substs_b)) |
- (&TyEnum(did_a, ref substs_a), &TyEnum(did_b, ref substs_b)) => {
- if did_a != did_b {
- return false;
- }
+ fn same_type<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
+ match (&a.sty, &b.sty) {
+ (&TyStruct(did_a, ref substs_a), &TyStruct(did_b, ref substs_b)) |
+ (&TyEnum(did_a, ref substs_a), &TyEnum(did_b, ref substs_b)) => {
+ if did_a != did_b {
+ return false;
+ }
- let types_a = substs_a.types.get_slice(subst::TypeSpace);
- let types_b = substs_b.types.get_slice(subst::TypeSpace);
+ let types_a = substs_a.types.get_slice(subst::TypeSpace);
+ let types_b = substs_b.types.get_slice(subst::TypeSpace);
- let mut pairs = types_a.iter().zip(types_b);
+ let mut pairs = types_a.iter().zip(types_b);
- pairs.all(|(&a, &b)| same_type(a, b))
- }
- _ => {
- a == b
+ pairs.all(|(&a, &b)| same_type(a, b))
+ }
+ _ => {
+ a == b
+ }
}
}
- }
- // Does the type `ty` directly (without indirection through a pointer)
- // contain any types on stack `seen`?
- fn is_type_structurally_recursive<'tcx>(cx: &ctxt<'tcx>, sp: Span,
- seen: &mut Vec<Ty<'tcx>>,
- ty: Ty<'tcx>) -> Representability {
- debug!("is_type_structurally_recursive: {:?}", ty);
+ // Does the type `ty` directly (without indirection through a pointer)
+ // contain any types on stack `seen`?
+ fn is_type_structurally_recursive<'tcx>(cx: &ctxt<'tcx>, sp: Span,
+ seen: &mut Vec<Ty<'tcx>>,
+ ty: Ty<'tcx>) -> Representability {
+ debug!("is_type_structurally_recursive: {:?}", ty);
- match ty.sty {
- TyStruct(did, _) | TyEnum(did, _) => {
- {
- // Iterate through stack of previously seen types.
- let mut iter = seen.iter();
-
- // The first item in `seen` is the type we are actually curious about.
- // We want to return SelfRecursive if this type contains itself.
- // It is important that we DON'T take generic parameters into account
- // for this check, so that Bar<T> in this example counts as SelfRecursive:
- //
- // struct Foo;
- // struct Bar<T> { x: Bar<Foo> }
+ match ty.sty {
+ TyStruct(did, _) | TyEnum(did, _) => {
+ {
+ // Iterate through stack of previously seen types.
+ let mut iter = seen.iter();
+
+ // The first item in `seen` is the type we are actually curious about.
+ // We want to return SelfRecursive if this type contains itself.
+ // It is important that we DON'T take generic parameters into account
+ // for this check, so that Bar<T> in this example counts as SelfRecursive:
+ //
+ // struct Foo;
+ // struct Bar<T> { x: Bar<Foo> }
+
+ match iter.next() {
+ Some(&seen_type) => {
+ if same_struct_or_enum_def_id(seen_type, did) {
+ debug!("SelfRecursive: {:?} contains {:?}",
+ seen_type,
+ ty);
+ return SelfRecursive;
+ }
+ }
+ None => {}
+ }
- match iter.next() {
- Some(&seen_type) => {
- if same_struct_or_enum_def_id(seen_type, did) {
- debug!("SelfRecursive: {:?} contains {:?}",
+ // We also need to know whether the first item contains other types
+ // that are structurally recursive. If we don't catch this case, we
+ // will recurse infinitely for some inputs.
+ //
+ // It is important that we DO take generic parameters into account
+ // here, so that code like this is considered SelfRecursive, not
+ // ContainsRecursive:
+ //
+ // struct Foo { Option<Option<Foo>> }
+
+ for &seen_type in iter {
+ if same_type(ty, seen_type) {
+ debug!("ContainsRecursive: {:?} contains {:?}",
seen_type,
ty);
- return SelfRecursive;
+ return ContainsRecursive;
}
}
- None => {}
}
- // We also need to know whether the first item contains other types that
- // are structurally recursive. If we don't catch this case, we will recurse
- // infinitely for some inputs.
- //
- // It is important that we DO take generic parameters into account here,
- // so that code like this is considered SelfRecursive, not ContainsRecursive:
- //
- // struct Foo { Option<Option<Foo>> }
-
- for &seen_type in iter {
- if same_type(ty, seen_type) {
- debug!("ContainsRecursive: {:?} contains {:?}",
- seen_type,
- ty);
- return ContainsRecursive;
- }
- }
+ // For structs and enums, track all previously seen types by pushing them
+ // onto the 'seen' stack.
+ seen.push(ty);
+ let out = are_inner_types_recursive(cx, sp, seen, ty);
+ seen.pop();
+ out
+ }
+ _ => {
+ // No need to push in other cases.
+ are_inner_types_recursive(cx, sp, seen, ty)
}
-
- // For structs and enums, track all previously seen types by pushing them
- // onto the 'seen' stack.
- seen.push(ty);
- let out = are_inner_types_recursive(cx, sp, seen, ty);
- seen.pop();
- out
- }
- _ => {
- // No need to push in other cases.
- are_inner_types_recursive(cx, sp, seen, ty)
}
}
- }
-
- debug!("is_type_representable: {:?}", ty);
- // To avoid a stack overflow when checking an enum variant or struct that
- // contains a different, structurally recursive type, maintain a stack
- // of seen types and check recursion for each of them (issues #3008, #3779).
- let mut seen: Vec<Ty> = Vec::new();
- let r = is_type_structurally_recursive(cx, sp, &mut seen, ty);
- debug!("is_type_representable: {:?} is {:?}", ty, r);
- r
-}
+ debug!("is_type_representable: {:?}", self);
-pub fn type_is_trait(ty: Ty) -> bool {
- match ty.sty {
- TyTrait(..) => true,
- _ => false
+ // To avoid a stack overflow when checking an enum variant or struct that
+ // contains a different, structurally recursive type, maintain a stack
+ // of seen types and check recursion for each of them (issues #3008, #3779).
+ let mut seen: Vec<Ty> = Vec::new();
+ let r = is_type_structurally_recursive(cx, sp, &mut seen, self);
+ debug!("is_type_representable: {:?} is {:?}", self, r);
+ r
}
-}
-pub fn type_is_integral(ty: Ty) -> bool {
- match ty.sty {
- TyInfer(IntVar(_)) | TyInt(_) | TyUint(_) => true,
- _ => false
+ pub fn is_trait(&self) -> bool {
+ match self.sty {
+ TyTrait(..) => true,
+ _ => false
+ }
}
-}
-pub fn type_is_fresh(ty: Ty) -> bool {
- match ty.sty {
- TyInfer(FreshTy(_)) => true,
- TyInfer(FreshIntTy(_)) => true,
- TyInfer(FreshFloatTy(_)) => true,
- _ => false
+ pub fn is_integral(&self) -> bool {
+ match self.sty {
+ TyInfer(IntVar(_)) | TyInt(_) | TyUint(_) => true,
+ _ => false
+ }
}
-}
-pub fn type_is_uint(ty: Ty) -> bool {
- match ty.sty {
- TyInfer(IntVar(_)) | TyUint(ast::TyUs) => true,
- _ => false
+ pub fn is_fresh(&self) -> bool {
+ match self.sty {
+ TyInfer(FreshTy(_)) => true,
+ TyInfer(FreshIntTy(_)) => true,
+ TyInfer(FreshFloatTy(_)) => true,
+ _ => false
+ }
}
-}
-pub fn type_is_char(ty: Ty) -> bool {
- match ty.sty {
- TyChar => true,
- _ => false
+ pub fn is_uint(&self) -> bool {
+ match self.sty {
+ TyInfer(IntVar(_)) | TyUint(ast::TyUs) => true,
+ _ => false
+ }
}
-}
-pub fn type_is_bare_fn(ty: Ty) -> bool {
- match ty.sty {
- TyBareFn(..) => true,
- _ => false
+ pub fn is_char(&self) -> bool {
+ match self.sty {
+ TyChar => true,
+ _ => false
+ }
}
-}
-pub fn type_is_bare_fn_item(ty: Ty) -> bool {
- match ty.sty {
- TyBareFn(Some(_), _) => true,
- _ => false
+ pub fn is_bare_fn(&self) -> bool {
+ match self.sty {
+ TyBareFn(..) => true,
+ _ => false
+ }
}
-}
-pub fn type_is_fp(ty: Ty) -> bool {
- match ty.sty {
- TyInfer(FloatVar(_)) | TyFloat(_) => true,
- _ => false
+ pub fn is_bare_fn_item(&self) -> bool {
+ match self.sty {
+ TyBareFn(Some(_), _) => true,
+ _ => false
+ }
}
-}
-pub fn type_is_numeric(ty: Ty) -> bool {
- return type_is_integral(ty) || type_is_fp(ty);
-}
-
-pub fn type_is_signed(ty: Ty) -> bool {
- match ty.sty {
- TyInt(_) => true,
- _ => false
+ pub fn is_fp(&self) -> bool {
+ match self.sty {
+ TyInfer(FloatVar(_)) | TyFloat(_) => true,
+ _ => false
+ }
}
-}
-pub fn type_is_machine(ty: Ty) -> bool {
- match ty.sty {
- TyInt(ast::TyIs) | TyUint(ast::TyUs) => false,
- TyInt(..) | TyUint(..) | TyFloat(..) => true,
- _ => false
+ pub fn is_numeric(&self) -> bool {
+ self.is_integral() || self.is_fp()
}
-}
-// Whether a type is enum like, that is an enum type with only nullary
-// constructors
-pub fn type_is_c_like_enum(cx: &ctxt, ty: Ty) -> bool {
- match ty.sty {
- TyEnum(did, _) => {
- let variants = enum_variants(cx, did);
- if variants.is_empty() {
- false
- } else {
- variants.iter().all(|v| v.args.is_empty())
- }
+ pub fn is_signed(&self) -> bool {
+ match self.sty {
+ TyInt(_) => true,
+ _ => false
}
- _ => false
- }
-}
-
-// Returns the type and mutability of *ty.
-//
-// The parameter `explicit` indicates if this is an *explicit* dereference.
-// Some types---notably unsafe ptrs---can only be dereferenced explicitly.
-pub fn deref<'tcx>(ty: Ty<'tcx>, explicit: bool) -> Option<mt<'tcx>> {
- match ty.sty {
- TyBox(ty) => {
- Some(mt {
- ty: ty,
- mutbl: ast::MutImmutable,
- })
- },
- TyRef(_, mt) => Some(mt),
- TyRawPtr(mt) if explicit => Some(mt),
- _ => None
- }
-}
-
-pub fn type_content<'tcx>(ty: Ty<'tcx>) -> Ty<'tcx> {
- match ty.sty {
- TyBox(ty) => ty,
- TyRef(_, mt) | TyRawPtr(mt) => mt.ty,
- _ => ty
- }
-}
-
-// Returns the type of ty[i]
-pub fn index<'tcx>(ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
- match ty.sty {
- TyArray(ty, _) | TySlice(ty) => Some(ty),
- _ => None
- }
-}
-
-// Returns the type of elements contained within an 'array-like' type.
-// This is exactly the same as the above, except it supports strings,
-// which can't actually be indexed.
-pub fn array_element_ty<'tcx>(tcx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
- match ty.sty {
- TyArray(ty, _) | TySlice(ty) => Some(ty),
- TyStr => Some(tcx.types.u8),
- _ => None
}
-}
-
-/// Returns the type of element at index `i` in tuple or tuple-like type `t`.
-/// For an enum `t`, `variant` is None only if `t` is a univariant enum.
-pub fn positional_element_ty<'tcx>(cx: &ctxt<'tcx>,
- ty: Ty<'tcx>,
- i: usize,
- variant: Option<ast::DefId>) -> Option<Ty<'tcx>> {
-
- match (&ty.sty, variant) {
- (&TyTuple(ref v), None) => v.get(i).cloned(),
-
-
- (&TyStruct(def_id, substs), None) => lookup_struct_fields(cx, def_id)
- .get(i)
- .map(|&t|lookup_item_type(cx, t.id).ty.subst(cx, substs)),
-
- (&TyEnum(def_id, substs), Some(variant_def_id)) => {
- let variant_info = enum_variant_with_id(cx, def_id, variant_def_id);
- variant_info.args.get(i).map(|t|t.subst(cx, substs))
- }
- (&TyEnum(def_id, substs), None) => {
- assert!(enum_is_univariant(cx, def_id));
- let enum_variants = enum_variants(cx, def_id);
- let variant_info = &(*enum_variants)[0];
- variant_info.args.get(i).map(|t|t.subst(cx, substs))
+ pub fn is_machine(&self) -> bool {
+ match self.sty {
+ TyInt(ast::TyIs) | TyUint(ast::TyUs) => false,
+ TyInt(..) | TyUint(..) | TyFloat(..) => true,
+ _ => false
}
-
- _ => None
}
-}
-
-/// Returns the type of element at field `n` in struct or struct-like type `t`.
-/// For an enum `t`, `variant` must be some def id.
-pub fn named_element_ty<'tcx>(cx: &ctxt<'tcx>,
- ty: Ty<'tcx>,
- n: ast::Name,
- variant: Option<ast::DefId>) -> Option<Ty<'tcx>> {
- match (&ty.sty, variant) {
- (&TyStruct(def_id, substs), None) => {
- let r = lookup_struct_fields(cx, def_id);
- r.iter().find(|f| f.name == n)
- .map(|&f| lookup_field_type(cx, def_id, f.id, substs))
- }
- (&TyEnum(def_id, substs), Some(variant_def_id)) => {
- let variant_info = enum_variant_with_id(cx, def_id, variant_def_id);
- variant_info.arg_names.as_ref()
- .expect("must have struct enum variant if accessing a named fields")
- .iter().zip(&variant_info.args)
- .find(|&(&name, _)| name == n)
- .map(|(_name, arg_t)| arg_t.subst(cx, substs))
+ // Whether a type is enum like, that is an enum type with only nullary
+ // constructors
+ pub fn is_c_like_enum(&self, cx: &ctxt) -> bool {
+ match self.sty {
+ TyEnum(did, _) => {
+ let variants = cx.enum_variants(did);
+ if variants.is_empty() {
+ false
+ } else {
+ variants.iter().all(|v| v.args.is_empty())
+ }
+ }
+ _ => false
}
- _ => None
- }
-}
-
-pub fn node_id_to_type<'tcx>(cx: &ctxt<'tcx>, id: ast::NodeId) -> Ty<'tcx> {
- match node_id_to_type_opt(cx, id) {
- Some(ty) => ty,
- None => cx.sess.bug(
- &format!("node_id_to_type: no type for node `{}`",
- cx.map.node_to_string(id)))
- }
-}
-
-pub fn node_id_to_type_opt<'tcx>(cx: &ctxt<'tcx>, id: ast::NodeId) -> Option<Ty<'tcx>> {
- match cx.node_types.borrow().get(&id) {
- Some(&ty) => Some(ty),
- None => None
- }
-}
-
-pub fn node_id_item_substs<'tcx>(cx: &ctxt<'tcx>, id: ast::NodeId) -> ItemSubsts<'tcx> {
- match cx.item_substs.borrow().get(&id) {
- None => ItemSubsts::empty(),
- Some(ts) => ts.clone(),
}
-}
-pub fn fn_is_variadic(fty: Ty) -> bool {
- match fty.sty {
- TyBareFn(_, ref f) => f.sig.0.variadic,
- ref s => {
- panic!("fn_is_variadic() called on non-fn type: {:?}", s)
+ // Returns the type and mutability of *ty.
+ //
+ // The parameter `explicit` indicates if this is an *explicit* dereference.
+ // Some types---notably unsafe ptrs---can only be dereferenced explicitly.
+ pub fn builtin_deref(&self, explicit: bool) -> Option<TypeAndMut<'tcx>> {
+ match self.sty {
+ TyBox(ty) => {
+ Some(TypeAndMut {
+ ty: ty,
+ mutbl: ast::MutImmutable,
+ })
+ },
+ TyRef(_, mt) => Some(mt),
+ TyRawPtr(mt) if explicit => Some(mt),
+ _ => None
}
}
-}
-pub fn ty_fn_sig<'tcx>(fty: Ty<'tcx>) -> &'tcx PolyFnSig<'tcx> {
- match fty.sty {
- TyBareFn(_, ref f) => &f.sig,
- ref s => {
- panic!("ty_fn_sig() called on non-fn type: {:?}", s)
+ // Returns the type of ty[i]
+ pub fn builtin_index(&self) -> Option<Ty<'tcx>> {
+ match self.sty {
+ TyArray(ty, _) | TySlice(ty) => Some(ty),
+ _ => None
}
}
-}
-/// Returns the ABI of the given function.
-pub fn ty_fn_abi(fty: Ty) -> abi::Abi {
- match fty.sty {
- TyBareFn(_, ref f) => f.abi,
- _ => panic!("ty_fn_abi() called on non-fn type"),
+ pub fn fn_sig(&self) -> &'tcx PolyFnSig<'tcx> {
+ match self.sty {
+ TyBareFn(_, ref f) => &f.sig,
+ _ => panic!("Ty::fn_sig() called on non-fn type: {:?}", self)
+ }
}
-}
-// Type accessors for substructures of types
-pub fn ty_fn_args<'tcx>(fty: Ty<'tcx>) -> ty::Binder<Vec<Ty<'tcx>>> {
- ty_fn_sig(fty).inputs()
-}
-
-pub fn ty_fn_ret<'tcx>(fty: Ty<'tcx>) -> Binder<FnOutput<'tcx>> {
- match fty.sty {
- TyBareFn(_, ref f) => f.sig.output(),
- ref s => {
- panic!("ty_fn_ret() called on non-fn type: {:?}", s)
+ /// Returns the ABI of the given function.
+ pub fn fn_abi(&self) -> abi::Abi {
+ match self.sty {
+ TyBareFn(_, ref f) => f.abi,
+ _ => panic!("Ty::fn_abi() called on non-fn type"),
}
}
-}
-pub fn is_fn_ty(fty: Ty) -> bool {
- match fty.sty {
- TyBareFn(..) => true,
- _ => false
+ // Type accessors for substructures of types
+ pub fn fn_args(&self) -> ty::Binder<Vec<Ty<'tcx>>> {
+ self.fn_sig().inputs()
}
-}
-pub fn ty_region(tcx: &ctxt,
- span: Span,
- ty: Ty) -> Region {
- match ty.sty {
- TyRef(r, _) => *r,
- ref s => {
- tcx.sess.span_bug(
- span,
- &format!("ty_region() invoked on an inappropriate ty: {:?}",
- s));
- }
+ pub fn fn_ret(&self) -> Binder<FnOutput<'tcx>> {
+ self.fn_sig().output()
}
-}
-
-pub fn free_region_from_def(outlives_extent: region::DestructionScopeData,
- def: &RegionParameterDef)
- -> ty::Region
-{
- let ret =
- ty::ReFree(ty::FreeRegion { scope: outlives_extent,
- bound_region: ty::BrNamed(def.def_id,
- def.name) });
- debug!("free_region_from_def returns {:?}", ret);
- ret
-}
-
-// Returns the type of a pattern as a monotype. Like @expr_ty, this function
-// doesn't provide type parameter substitutions.
-pub fn pat_ty<'tcx>(cx: &ctxt<'tcx>, pat: &ast::Pat) -> Ty<'tcx> {
- return node_id_to_type(cx, pat.id);
-}
-pub fn pat_ty_opt<'tcx>(cx: &ctxt<'tcx>, pat: &ast::Pat) -> Option<Ty<'tcx>> {
- return node_id_to_type_opt(cx, pat.id);
-}
-
-
-// Returns the type of an expression as a monotype.
-//
-// NB (1): This is the PRE-ADJUSTMENT TYPE for the expression. That is, in
-// some cases, we insert `AutoAdjustment` annotations such as auto-deref or
-// auto-ref. The type returned by this function does not consider such
-// adjustments. See `expr_ty_adjusted()` instead.
-//
-// NB (2): This type doesn't provide type parameter substitutions; e.g. if you
-// ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
-// instead of "fn(ty) -> T with T = isize".
-pub fn expr_ty<'tcx>(cx: &ctxt<'tcx>, expr: &ast::Expr) -> Ty<'tcx> {
- return node_id_to_type(cx, expr.id);
-}
-
-pub fn expr_ty_opt<'tcx>(cx: &ctxt<'tcx>, expr: &ast::Expr) -> Option<Ty<'tcx>> {
- return node_id_to_type_opt(cx, expr.id);
-}
-/// Returns the type of `expr`, considering any `AutoAdjustment`
-/// entry recorded for that expression.
-///
-/// It would almost certainly be better to store the adjusted ty in with
-/// the `AutoAdjustment`, but I opted not to do this because it would
-/// require serializing and deserializing the type and, although that's not
-/// hard to do, I just hate that code so much I didn't want to touch it
-/// unless it was to fix it properly, which seemed a distraction from the
-/// thread at hand! -nmatsakis
-pub fn expr_ty_adjusted<'tcx>(cx: &ctxt<'tcx>, expr: &ast::Expr) -> Ty<'tcx> {
- adjust_ty(cx, expr.span, expr.id, expr_ty(cx, expr),
- cx.adjustments.borrow().get(&expr.id),
- |method_call| cx.method_map.borrow().get(&method_call).map(|method| method.ty))
-}
-
-pub fn expr_span(cx: &ctxt, id: NodeId) -> Span {
- match cx.map.find(id) {
- Some(ast_map::NodeExpr(e)) => {
- e.span
- }
- Some(f) => {
- cx.sess.bug(&format!("Node id {} is not an expr: {:?}",
- id,
- f));
- }
- None => {
- cx.sess.bug(&format!("Node id {} is not present \
- in the node map", id));
+ pub fn is_fn(&self) -> bool {
+ match self.sty {
+ TyBareFn(..) => true,
+ _ => false
}
}
-}
-pub fn local_var_name_str(cx: &ctxt, id: NodeId) -> InternedString {
- match cx.map.find(id) {
- Some(ast_map::NodeLocal(pat)) => {
- match pat.node {
- ast::PatIdent(_, ref path1, _) => {
- token::get_ident(path1.node)
- }
- _ => {
- cx.sess.bug(
- &format!("Variable id {} maps to {:?}, not local",
- id,
- pat));
- }
- }
- }
- r => {
- cx.sess.bug(&format!("Variable id {} maps to {:?}, not local",
- id,
- r));
+ /// See `expr_ty_adjusted`
+ pub fn adjust<F>(&'tcx self, cx: &ctxt<'tcx>,
+ span: Span,
+ expr_id: ast::NodeId,
+ adjustment: Option<&AutoAdjustment<'tcx>>,
+ mut method_type: F)
+ -> Ty<'tcx> where
+ F: FnMut(MethodCall) -> Option<Ty<'tcx>>,
+ {
+ if let TyError = self.sty {
+ return self;
}
- }
-}
-/// See `expr_ty_adjusted`
-pub fn adjust_ty<'tcx, F>(cx: &ctxt<'tcx>,
- span: Span,
- expr_id: ast::NodeId,
- unadjusted_ty: Ty<'tcx>,
- adjustment: Option<&AutoAdjustment<'tcx>>,
- mut method_type: F)
- -> Ty<'tcx> where
- F: FnMut(MethodCall) -> Option<Ty<'tcx>>,
-{
- if let TyError = unadjusted_ty.sty {
- return unadjusted_ty;
- }
-
- return match adjustment {
- Some(adjustment) => {
- match *adjustment {
- AdjustReifyFnPointer => {
- match unadjusted_ty.sty {
- ty::TyBareFn(Some(_), b) => {
- ty::mk_bare_fn(cx, None, b)
- }
- _ => {
- cx.sess.bug(
- &format!("AdjustReifyFnPointer adjustment on non-fn-item: \
- {:?}", unadjusted_ty));
+ return match adjustment {
+ Some(adjustment) => {
+ match *adjustment {
+ AdjustReifyFnPointer => {
+ match self.sty {
+ ty::TyBareFn(Some(_), b) => {
+ cx.mk_fn(None, b)
+ }
+ _ => {
+ cx.sess.bug(
+ &format!("AdjustReifyFnPointer adjustment on non-fn-item: \
+ {:?}", self));
+ }
}
}
- }
- AdjustUnsafeFnPointer => {
- match unadjusted_ty.sty {
- ty::TyBareFn(None, b) => cx.safe_to_unsafe_fn_ty(b),
- ref b => {
- cx.sess.bug(
- &format!("AdjustReifyFnPointer adjustment on non-fn-item: \
- {:?}",
- b));
+ AdjustUnsafeFnPointer => {
+ match self.sty {
+ ty::TyBareFn(None, b) => cx.safe_to_unsafe_fn_ty(b),
+ ref b => {
+ cx.sess.bug(
+ &format!("AdjustReifyFnPointer adjustment on non-fn-item: \
+ {:?}",
+ b));
+ }
}
- }
- }
-
- AdjustDerefRef(ref adj) => {
- let mut adjusted_ty = unadjusted_ty;
-
- if !ty::type_is_error(adjusted_ty) {
- for i in 0..adj.autoderefs {
- let method_call = MethodCall::autoderef(expr_id, i as u32);
- match method_type(method_call) {
- Some(method_ty) => {
- // Overloaded deref operators have all late-bound
- // regions fully instantiated and coverge.
- let fn_ret =
- ty::no_late_bound_regions(cx,
- &ty_fn_ret(method_ty)).unwrap();
- adjusted_ty = fn_ret.unwrap();
+ }
+
+ AdjustDerefRef(ref adj) => {
+ let mut adjusted_ty = self;
+
+ if !adjusted_ty.references_error() {
+ for i in 0..adj.autoderefs {
+ let method_call = MethodCall::autoderef(expr_id, i as u32);
+ match method_type(method_call) {
+ Some(method_ty) => {
+ // Overloaded deref operators have all late-bound
+ // regions fully instantiated and coverge.
+ let fn_ret =
+ cx.no_late_bound_regions(&method_ty.fn_ret()).unwrap();
+ adjusted_ty = fn_ret.unwrap();
+ }
+ None => {}
}
- None => {}
- }
- match deref(adjusted_ty, true) {
- Some(mt) => { adjusted_ty = mt.ty; }
- None => {
- cx.sess.span_bug(
- span,
- &format!("the {}th autoderef failed: {}",
- i,
- adjusted_ty)
- );
+ match adjusted_ty.builtin_deref(true) {
+ Some(mt) => { adjusted_ty = mt.ty; }
+ None => {
+ cx.sess.span_bug(
+ span,
+ &format!("the {}th autoderef failed: {}",
+ i,
+ adjusted_ty)
+ );
+ }
}
}
}
- }
- if let Some(target) = adj.unsize {
- target
- } else {
- adjust_ty_for_autoref(cx, adjusted_ty, adj.autoref)
+ if let Some(target) = adj.unsize {
+ target
+ } else {
+ adjusted_ty.adjust_for_autoref(cx, adj.autoref)
+ }
}
}
}
- }
- None => unadjusted_ty
- };
-}
-
-pub fn adjust_ty_for_autoref<'tcx>(cx: &ctxt<'tcx>,
- ty: Ty<'tcx>,
- autoref: Option<AutoRef<'tcx>>)
- -> Ty<'tcx> {
- match autoref {
- None => ty,
- Some(AutoPtr(r, m)) => {
- mk_rptr(cx, r, mt { ty: ty, mutbl: m })
- }
- Some(AutoUnsafe(m)) => {
- mk_ptr(cx, mt { ty: ty, mutbl: m })
- }
+ None => self
+ };
}
-}
-pub fn resolve_expr(tcx: &ctxt, expr: &ast::Expr) -> def::Def {
- match tcx.def_map.borrow().get(&expr.id) {
- Some(def) => def.full_def(),
- None => {
- tcx.sess.span_bug(expr.span, &format!(
- "no def-map entry for expr {}", expr.id));
+ pub fn adjust_for_autoref(&'tcx self, cx: &ctxt<'tcx>,
+ autoref: Option<AutoRef<'tcx>>)
+ -> Ty<'tcx> {
+ match autoref {
+ None => self,
+ Some(AutoPtr(r, m)) => {
+ cx.mk_ref(r, TypeAndMut { ty: self, mutbl: m })
+ }
+ Some(AutoUnsafe(m)) => {
+ cx.mk_ptr(TypeAndMut { ty: self, mutbl: m })
+ }
}
}
-}
-pub fn expr_is_lval(tcx: &ctxt, expr: &ast::Expr) -> bool {
- match expr.node {
- ast::ExprPath(..) => {
- // We can't use resolve_expr here, as this needs to run on broken
- // programs. We don't need to through - associated items are all
- // rvalues.
- match tcx.def_map.borrow().get(&expr.id) {
- Some(&def::PathResolution {
- base_def: def::DefStatic(..), ..
- }) | Some(&def::PathResolution {
- base_def: def::DefUpvar(..), ..
- }) | Some(&def::PathResolution {
- base_def: def::DefLocal(..), ..
- }) => {
- true
- }
+ fn sort_string(&self, cx: &ctxt) -> String {
- Some(..) => false,
-
- None => tcx.sess.span_bug(expr.span, &format!(
- "no def for path {}", expr.id))
- }
- }
-
- ast::ExprUnary(ast::UnDeref, _) |
- ast::ExprField(..) |
- ast::ExprTupField(..) |
- ast::ExprIndex(..) => {
- true
- }
-
- ast::ExprCall(..) |
- ast::ExprMethodCall(..) |
- ast::ExprStruct(..) |
- ast::ExprRange(..) |
- ast::ExprTup(..) |
- ast::ExprIf(..) |
- ast::ExprMatch(..) |
- ast::ExprClosure(..) |
- ast::ExprBlock(..) |
- ast::ExprRepeat(..) |
- ast::ExprVec(..) |
- ast::ExprBreak(..) |
- ast::ExprAgain(..) |
- ast::ExprRet(..) |
- ast::ExprWhile(..) |
- ast::ExprLoop(..) |
- ast::ExprAssign(..) |
- ast::ExprInlineAsm(..) |
- ast::ExprAssignOp(..) |
- ast::ExprLit(_) |
- ast::ExprUnary(..) |
- ast::ExprBox(..) |
- ast::ExprAddrOf(..) |
- ast::ExprBinary(..) |
- ast::ExprCast(..) => {
- false
- }
-
- ast::ExprParen(ref e) => expr_is_lval(tcx, e),
-
- ast::ExprIfLet(..) |
- ast::ExprWhileLet(..) |
- ast::ExprForLoop(..) |
- ast::ExprMac(..) => {
- tcx.sess.span_bug(
- expr.span,
- "macro expression remains after expansion");
- }
- }
-}
-
-pub fn stmt_node_id(s: &ast::Stmt) -> ast::NodeId {
- match s.node {
- ast::StmtDecl(_, id) | StmtExpr(_, id) | StmtSemi(_, id) => {
- return id;
- }
- ast::StmtMac(..) => panic!("unexpanded macro in trans")
- }
-}
-
-pub fn field_idx_strict(tcx: &ctxt, name: ast::Name, fields: &[field])
- -> usize {
- let mut i = 0;
- for f in fields { if f.name == name { return i; } i += 1; }
- tcx.sess.bug(&format!(
- "no field named `{}` found in the list of fields `{:?}`",
- token::get_name(name),
- fields.iter()
- .map(|f| token::get_name(f.name).to_string())
- .collect::<Vec<String>>()));
-}
-
-pub fn impl_or_trait_item_idx(id: ast::Name, trait_items: &[ImplOrTraitItem])
- -> Option<usize> {
- trait_items.iter().position(|m| m.name() == id)
-}
-
-pub fn ty_sort_string(cx: &ctxt, ty: Ty) -> String {
- match ty.sty {
- TyBool | TyChar | TyInt(_) |
- TyUint(_) | TyFloat(_) | TyStr => ty.to_string(),
- TyTuple(ref tys) if tys.is_empty() => ty.to_string(),
-
- TyEnum(id, _) => format!("enum `{}`", item_path_str(cx, id)),
- TyBox(_) => "box".to_string(),
- TyArray(_, n) => format!("array of {} elements", n),
- TySlice(_) => "slice".to_string(),
- TyRawPtr(_) => "*-ptr".to_string(),
- TyRef(_, _) => "&-ptr".to_string(),
- TyBareFn(Some(_), _) => format!("fn item"),
- TyBareFn(None, _) => "fn pointer".to_string(),
- TyTrait(ref inner) => {
- format!("trait {}", item_path_str(cx, inner.principal_def_id()))
- }
- TyStruct(id, _) => {
- format!("struct `{}`", item_path_str(cx, id))
- }
- TyClosure(..) => "closure".to_string(),
- TyTuple(_) => "tuple".to_string(),
- TyInfer(TyVar(_)) => "inferred type".to_string(),
- TyInfer(IntVar(_)) => "integral variable".to_string(),
- TyInfer(FloatVar(_)) => "floating-point variable".to_string(),
- TyInfer(FreshTy(_)) => "skolemized type".to_string(),
- TyInfer(FreshIntTy(_)) => "skolemized integral type".to_string(),
- TyInfer(FreshFloatTy(_)) => "skolemized floating-point type".to_string(),
- TyProjection(_) => "associated type".to_string(),
- TyParam(ref p) => {
- if p.space == subst::SelfSpace {
- "Self".to_string()
- } else {
- "type parameter".to_string()
+ match self.sty {
+ TyBool | TyChar | TyInt(_) |
+ TyUint(_) | TyFloat(_) | TyStr => self.to_string(),
+ TyTuple(ref tys) if tys.is_empty() => self.to_string(),
+
+ TyEnum(id, _) => format!("enum `{}`", cx.item_path_str(id)),
+ TyBox(_) => "box".to_string(),
+ TyArray(_, n) => format!("array of {} elements", n),
+ TySlice(_) => "slice".to_string(),
+ TyRawPtr(_) => "*-ptr".to_string(),
+ TyRef(_, _) => "&-ptr".to_string(),
+ TyBareFn(Some(_), _) => format!("fn item"),
+ TyBareFn(None, _) => "fn pointer".to_string(),
+ TyTrait(ref inner) => {
+ format!("trait {}", cx.item_path_str(inner.principal_def_id()))
+ }
+ TyStruct(id, _) => {
+ format!("struct `{}`", cx.item_path_str(id))
+ }
+ TyClosure(..) => "closure".to_string(),
+ TyTuple(_) => "tuple".to_string(),
+ TyInfer(TyVar(_)) => "inferred type".to_string(),
+ TyInfer(IntVar(_)) => "integral variable".to_string(),
+ TyInfer(FloatVar(_)) => "floating-point variable".to_string(),
+ TyInfer(FreshTy(_)) => "skolemized type".to_string(),
+ TyInfer(FreshIntTy(_)) => "skolemized integral type".to_string(),
+ TyInfer(FreshFloatTy(_)) => "skolemized floating-point type".to_string(),
+ TyProjection(_) => "associated type".to_string(),
+ TyParam(ref p) => {
+ if p.space == subst::SelfSpace {
+ "Self".to_string()
+ } else {
+ "type parameter".to_string()
+ }
}
+ TyError => "type error".to_string(),
}
- TyError => "type error".to_string(),
}
}
-
/// Explains the source of a type err in a short, human readable way. This is meant to be placed
/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
/// afterwards to present additional details, particularly when it comes to lifetime-related
/// errors.
-impl<'tcx> fmt::Display for type_err<'tcx> {
+impl<'tcx> fmt::Display for TypeError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ use self::TypeError::*;
+
match *self {
- terr_cyclic_ty => write!(f, "cyclic type of infinite size"),
- terr_mismatch => write!(f, "types differ"),
- terr_unsafety_mismatch(values) => {
+ CyclicTy => write!(f, "cyclic type of infinite size"),
+ Mismatch => write!(f, "types differ"),
+ UnsafetyMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
- terr_abi_mismatch(values) => {
+ AbiMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
- terr_mutability => write!(f, "values differ in mutability"),
- terr_box_mutability => {
+ Mutability => write!(f, "values differ in mutability"),
+ BoxMutability => {
write!(f, "boxed values differ in mutability")
}
- terr_vec_mutability => write!(f, "vectors differ in mutability"),
- terr_ptr_mutability => write!(f, "pointers differ in mutability"),
- terr_ref_mutability => write!(f, "references differ in mutability"),
- terr_ty_param_size(values) => {
+ VecMutability => write!(f, "vectors differ in mutability"),
+ PtrMutability => write!(f, "pointers differ in mutability"),
+ RefMutability => write!(f, "references differ in mutability"),
+ TyParamSize(values) => {
write!(f, "expected a type with {} type params, \
found one with {} type params",
values.expected,
values.found)
}
- terr_fixed_array_size(values) => {
+ FixedArraySize(values) => {
write!(f, "expected an array with a fixed size of {} elements, \
found one with {} elements",
values.expected,
values.found)
}
- terr_tuple_size(values) => {
+ TupleSize(values) => {
write!(f, "expected a tuple with {} elements, \
found one with {} elements",
values.expected,
values.found)
}
- terr_arg_count => {
+ ArgCount => {
write!(f, "incorrect number of function parameters")
}
- terr_regions_does_not_outlive(..) => {
+ RegionsDoesNotOutlive(..) => {
write!(f, "lifetime mismatch")
}
- terr_regions_not_same(..) => {
+ RegionsNotSame(..) => {
write!(f, "lifetimes are not the same")
}
- terr_regions_no_overlap(..) => {
+ RegionsNoOverlap(..) => {
write!(f, "lifetimes do not intersect")
}
- terr_regions_insufficiently_polymorphic(br, _) => {
+ RegionsInsufficientlyPolymorphic(br, _) => {
write!(f, "expected bound lifetime parameter {}, \
found concrete lifetime", br)
}
- terr_regions_overly_polymorphic(br, _) => {
+ RegionsOverlyPolymorphic(br, _) => {
write!(f, "expected concrete lifetime, \
found bound lifetime parameter {}", br)
}
- terr_sorts(values) => tls::with(|tcx| {
+ Sorts(values) => tls::with(|tcx| {
// A naive approach to making sure that we're not reporting silly errors such as:
// (expected closure, found closure).
- let expected_str = ty_sort_string(tcx, values.expected);
- let found_str = ty_sort_string(tcx, values.found);
+ let expected_str = values.expected.sort_string(tcx);
+ let found_str = values.found.sort_string(tcx);
if expected_str == found_str {
write!(f, "expected {}, found a different {}", expected_str, found_str)
} else {
write!(f, "expected {}, found {}", expected_str, found_str)
}
}),
- terr_traits(values) => tls::with(|tcx| {
+ Traits(values) => tls::with(|tcx| {
write!(f, "expected trait `{}`, found trait `{}`",
- item_path_str(tcx, values.expected),
- item_path_str(tcx, values.found))
+ tcx.item_path_str(values.expected),
+ tcx.item_path_str(values.found))
}),
- terr_builtin_bounds(values) => {
+ BuiltinBoundsMismatch(values) => {
if values.expected.is_empty() {
write!(f, "expected no bounds, found `{}`",
values.found)
values.found)
}
}
- terr_integer_as_char => {
+ IntegerAsChar => {
write!(f, "expected an integral type, found `char`")
}
- terr_int_mismatch(ref values) => {
+ IntMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
- terr_float_mismatch(ref values) => {
+ FloatMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
- terr_variadic_mismatch(ref values) => {
+ VariadicMismatch(ref values) => {
write!(f, "expected {} fn, found {} function",
if values.expected { "variadic" } else { "non-variadic" },
if values.found { "variadic" } else { "non-variadic" })
}
- terr_convergence_mismatch(ref values) => {
+ ConvergenceMismatch(ref values) => {
write!(f, "expected {} fn, found {} function",
if values.expected { "converging" } else { "diverging" },
if values.found { "converging" } else { "diverging" })
}
- terr_projection_name_mismatched(ref values) => {
+ ProjectionNameMismatched(ref values) => {
write!(f, "expected {}, found {}",
values.expected,
values.found)
}
- terr_projection_bounds_length(ref values) => {
+ ProjectionBoundsLength(ref values) => {
write!(f, "expected {} associated type bindings, found {}",
values.expected,
values.found)
+ },
+ TyParamDefaultMismatch(ref values) => {
+ write!(f, "conflicting type parameter defaults `{}` and `{}`",
+ values.expected.ty,
+ values.found.ty)
}
}
}
}
-pub fn note_and_explain_type_err<'tcx>(cx: &ctxt<'tcx>, err: &type_err<'tcx>, sp: Span) {
- match *err {
- terr_regions_does_not_outlive(subregion, superregion) => {
- note_and_explain_region(cx, "", subregion, "...");
- note_and_explain_region(cx, "...does not necessarily outlive ",
- superregion, "");
- }
- terr_regions_not_same(region1, region2) => {
- note_and_explain_region(cx, "", region1, "...");
- note_and_explain_region(cx, "...is not the same lifetime as ",
- region2, "");
- }
- terr_regions_no_overlap(region1, region2) => {
- note_and_explain_region(cx, "", region1, "...");
- note_and_explain_region(cx, "...does not overlap ",
- region2, "");
- }
- terr_regions_insufficiently_polymorphic(_, conc_region) => {
- note_and_explain_region(cx,
- "concrete lifetime that was found is ",
- conc_region, "");
- }
- terr_regions_overly_polymorphic(_, ty::ReInfer(ty::ReVar(_))) => {
- // don't bother to print out the message below for
- // inference variables, it's not very illuminating.
- }
- terr_regions_overly_polymorphic(_, conc_region) => {
- note_and_explain_region(cx,
- "expected concrete lifetime is ",
- conc_region, "");
- }
- terr_sorts(values) => {
- let expected_str = ty_sort_string(cx, values.expected);
- let found_str = ty_sort_string(cx, values.found);
- if expected_str == found_str && expected_str == "closure" {
- cx.sess.span_note(sp, &format!("no two closures, even if identical, have the same \
- type"));
- cx.sess.span_help(sp, &format!("consider boxing your closure and/or \
- using it as a trait object"));
- }
- }
- _ => {}
- }
-}
-
-pub fn provided_source(cx: &ctxt, id: ast::DefId) -> Option<ast::DefId> {
- cx.provided_method_sources.borrow().get(&id).cloned()
-}
-
-pub fn provided_trait_methods<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
- -> Vec<Rc<Method<'tcx>>> {
- if is_local(id) {
- if let ItemTrait(_, _, _, ref ms) = cx.map.expect_item(id.node).node {
- ms.iter().filter_map(|ti| {
- if let ast::MethodTraitItem(_, Some(_)) = ti.node {
- match impl_or_trait_item(cx, ast_util::local_def(ti.id)) {
- MethodTraitItem(m) => Some(m),
- _ => {
- cx.sess.bug("provided_trait_methods(): \
- non-method item found from \
- looking up provided method?!")
- }
- }
- } else {
- None
- }
- }).collect()
- } else {
- cx.sess.bug(&format!("provided_trait_methods: `{:?}` is not a trait", id))
- }
- } else {
- csearch::get_provided_trait_methods(cx, id)
- }
-}
-
-pub fn associated_consts<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
- -> Vec<Rc<AssociatedConst<'tcx>>> {
- if is_local(id) {
- match cx.map.expect_item(id.node).node {
- ItemTrait(_, _, _, ref tis) => {
- tis.iter().filter_map(|ti| {
- if let ast::ConstTraitItem(_, _) = ti.node {
- match impl_or_trait_item(cx, ast_util::local_def(ti.id)) {
- ConstTraitItem(ac) => Some(ac),
- _ => {
- cx.sess.bug("associated_consts(): \
- non-const item found from \
- looking up a constant?!")
- }
- }
- } else {
- None
- }
- }).collect()
- }
- ItemImpl(_, _, _, _, _, ref iis) => {
- iis.iter().filter_map(|ii| {
- if let ast::ConstImplItem(_, _) = ii.node {
- match impl_or_trait_item(cx, ast_util::local_def(ii.id)) {
- ConstTraitItem(ac) => Some(ac),
- _ => {
- cx.sess.bug("associated_consts(): \
- non-const item found from \
- looking up a constant?!")
- }
- }
- } else {
- None
- }
- }).collect()
- }
- _ => {
- cx.sess.bug(&format!("associated_consts: `{:?}` is not a trait \
- or impl", id))
- }
- }
- } else {
- csearch::get_associated_consts(cx, id)
- }
-}
-
/// Helper for looking things up in the various maps that are populated during
/// typeck::collect (e.g., `cx.impl_or_trait_items`, `cx.tcache`, etc). All of
/// these share the pattern that if the id is local, it should have been loaded
v
}
-pub fn trait_item<'tcx>(cx: &ctxt<'tcx>, trait_did: ast::DefId, idx: usize)
- -> ImplOrTraitItem<'tcx> {
- let method_def_id = (*ty::trait_item_def_ids(cx, trait_did))[idx].def_id();
- impl_or_trait_item(cx, method_def_id)
-}
-
-pub fn trait_items<'tcx>(cx: &ctxt<'tcx>, trait_did: ast::DefId)
- -> Rc<Vec<ImplOrTraitItem<'tcx>>> {
- let mut trait_items = cx.trait_items_cache.borrow_mut();
- match trait_items.get(&trait_did).cloned() {
- Some(trait_items) => trait_items,
- None => {
- let def_ids = ty::trait_item_def_ids(cx, trait_did);
- let items: Rc<Vec<ImplOrTraitItem>> =
- Rc::new(def_ids.iter()
- .map(|d| impl_or_trait_item(cx, d.def_id()))
- .collect());
- trait_items.insert(trait_did, items.clone());
- items
- }
- }
-}
-
-pub fn trait_impl_polarity<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
- -> Option<ast::ImplPolarity> {
- if id.krate == ast::LOCAL_CRATE {
- match cx.map.find(id.node) {
- Some(ast_map::NodeItem(item)) => {
- match item.node {
- ast::ItemImpl(_, polarity, _, _, _, _) => Some(polarity),
- _ => None
- }
- }
- _ => None
- }
- } else {
- csearch::get_impl_polarity(cx, id)
- }
-}
-
-pub fn custom_coerce_unsized_kind<'tcx>(cx: &ctxt<'tcx>, did: ast::DefId)
- -> CustomCoerceUnsized {
- memoized(&cx.custom_coerce_unsized_kinds, did, |did: DefId| {
- let (kind, src) = if did.krate != ast::LOCAL_CRATE {
- (csearch::get_custom_coerce_unsized_kind(cx, did), "external")
- } else {
- (None, "local")
- };
+impl BorrowKind {
+ pub fn from_mutbl(m: ast::Mutability) -> BorrowKind {
+ match m {
+ ast::MutMutable => MutBorrow,
+ ast::MutImmutable => ImmBorrow,
+ }
+ }
- match kind {
- Some(kind) => kind,
- None => {
- cx.sess.bug(&format!("custom_coerce_unsized_kind: \
- {} impl `{}` is missing its kind",
- src, item_path_str(cx, did)));
- }
+ /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
+ /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
+ /// mutability that is stronger than necessary so that it at least *would permit* the borrow in
+ /// question.
+ pub fn to_mutbl_lossy(self) -> ast::Mutability {
+ match self {
+ MutBorrow => ast::MutMutable,
+ ImmBorrow => ast::MutImmutable,
+
+ // We have no type corresponding to a unique imm borrow, so
+ // use `&mut`. It gives all the capabilities of an `&uniq`
+ // and hence is a safe "over approximation".
+ UniqueImmBorrow => ast::MutMutable,
}
- })
-}
+ }
-pub fn impl_or_trait_item<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
- -> ImplOrTraitItem<'tcx> {
- lookup_locally_or_in_crate_store(
- "impl_or_trait_items", id, &cx.impl_or_trait_items,
- || csearch::get_impl_or_trait_item(cx, id))
+ pub fn to_user_str(&self) -> &'static str {
+ match *self {
+ MutBorrow => "mutable",
+ ImmBorrow => "immutable",
+ UniqueImmBorrow => "uniquely immutable",
+ }
+ }
}
-/// Returns the parameter index that the given associated type corresponds to.
-pub fn associated_type_parameter_index(cx: &ctxt,
- trait_def: &TraitDef,
- associated_type_id: ast::DefId)
- -> usize {
- for type_parameter_def in &trait_def.generics.types {
- if type_parameter_def.def_id == associated_type_id {
- return type_parameter_def.index as usize
+impl<'tcx> ctxt<'tcx> {
+ /// Returns the type of element at index `i` in tuple or tuple-like type `t`.
+ /// For an enum `t`, `variant` is None only if `t` is a univariant enum.
+ pub fn positional_element_ty(&self,
+ ty: Ty<'tcx>,
+ i: usize,
+ variant: Option<ast::DefId>) -> Option<Ty<'tcx>> {
+
+ match (&ty.sty, variant) {
+ (&TyTuple(ref v), None) => v.get(i).cloned(),
+
+
+ (&TyStruct(def_id, substs), None) => self.lookup_struct_fields(def_id)
+ .get(i)
+ .map(|&t| self.lookup_item_type(t.id).ty.subst(self, substs)),
+
+ (&TyEnum(def_id, substs), Some(variant_def_id)) => {
+ let variant_info = self.enum_variant_with_id(def_id, variant_def_id);
+ variant_info.args.get(i).map(|t|t.subst(self, substs))
+ }
+
+ (&TyEnum(def_id, substs), None) => {
+ assert!(self.enum_is_univariant(def_id));
+ let enum_variants = self.enum_variants(def_id);
+ let variant_info = &enum_variants[0];
+ variant_info.args.get(i).map(|t|t.subst(self, substs))
+ }
+
+ _ => None
}
}
- cx.sess.bug("couldn't find associated type parameter index")
-}
-pub fn trait_item_def_ids(cx: &ctxt, id: ast::DefId)
- -> Rc<Vec<ImplOrTraitItemId>> {
- lookup_locally_or_in_crate_store(
- "trait_item_def_ids", id, &cx.trait_item_def_ids,
- || Rc::new(csearch::get_trait_item_def_ids(&cx.sess.cstore, id)))
-}
+ /// Returns the type of element at field `n` in struct or struct-like type `t`.
+ /// For an enum `t`, `variant` must be some def id.
+ pub fn named_element_ty(&self,
+ ty: Ty<'tcx>,
+ n: ast::Name,
+ variant: Option<ast::DefId>) -> Option<Ty<'tcx>> {
-/// Returns the trait-ref corresponding to a given impl, or None if it is
-/// an inherent impl.
-pub fn impl_trait_ref<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
- -> Option<TraitRef<'tcx>>
-{
- lookup_locally_or_in_crate_store(
- "impl_trait_refs", id, &cx.impl_trait_refs,
- || csearch::get_impl_trait(cx, id))
-}
+ match (&ty.sty, variant) {
+ (&TyStruct(def_id, substs), None) => {
+ let r = self.lookup_struct_fields(def_id);
+ r.iter().find(|f| f.name == n)
+ .map(|&f| self.lookup_field_type(def_id, f.id, substs))
+ }
+ (&TyEnum(def_id, substs), Some(variant_def_id)) => {
+ let variant_info = self.enum_variant_with_id(def_id, variant_def_id);
+ variant_info.arg_names.as_ref()
+ .expect("must have struct enum variant if accessing a named fields")
+ .iter().zip(&variant_info.args)
+ .find(|&(&name, _)| name == n)
+ .map(|(_name, arg_t)| arg_t.subst(self, substs))
+ }
+ _ => None
+ }
+ }
-/// Returns whether this DefId refers to an impl
-pub fn is_impl<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId) -> bool {
- if id.krate == ast::LOCAL_CRATE {
- if let Some(ast_map::NodeItem(
- &ast::Item { node: ast::ItemImpl(..), .. })) = cx.map.find(id.node) {
- true
- } else {
- false
+ pub fn node_id_to_type(&self, id: ast::NodeId) -> Ty<'tcx> {
+ match self.node_id_to_type_opt(id) {
+ Some(ty) => ty,
+ None => self.sess.bug(
+ &format!("node_id_to_type: no type for node `{}`",
+ self.map.node_to_string(id)))
}
- } else {
- csearch::is_impl(&cx.sess.cstore, id)
}
-}
-pub fn trait_ref_to_def_id(tcx: &ctxt, tr: &ast::TraitRef) -> ast::DefId {
- tcx.def_map.borrow().get(&tr.ref_id).expect("no def-map entry for trait").def_id()
-}
+ pub fn node_id_to_type_opt(&self, id: ast::NodeId) -> Option<Ty<'tcx>> {
+ self.tables.borrow().node_types.get(&id).cloned()
+ }
-pub fn try_add_builtin_trait(
- tcx: &ctxt,
- trait_def_id: ast::DefId,
- builtin_bounds: &mut EnumSet<BuiltinBound>)
- -> bool
-{
- //! Checks whether `trait_ref` refers to one of the builtin
- //! traits, like `Send`, and adds the corresponding
- //! bound to the set `builtin_bounds` if so. Returns true if `trait_ref`
- //! is a builtin trait.
-
- match tcx.lang_items.to_builtin_kind(trait_def_id) {
- Some(bound) => { builtin_bounds.insert(bound); true }
- None => false
- }
-}
-
-pub fn ty_to_def_id(ty: Ty) -> Option<ast::DefId> {
- match ty.sty {
- TyTrait(ref tt) =>
- Some(tt.principal_def_id()),
- TyStruct(id, _) |
- TyEnum(id, _) |
- TyClosure(id, _) =>
- Some(id),
- _ =>
- None
+ pub fn node_id_item_substs(&self, id: ast::NodeId) -> ItemSubsts<'tcx> {
+ match self.tables.borrow().item_substs.get(&id) {
+ None => ItemSubsts::empty(),
+ Some(ts) => ts.clone(),
+ }
}
-}
-// Enum information
-#[derive(Clone)]
-pub struct VariantInfo<'tcx> {
- pub args: Vec<Ty<'tcx>>,
- pub arg_names: Option<Vec<ast::Name>>,
- pub ctor_ty: Option<Ty<'tcx>>,
- pub name: ast::Name,
- pub id: ast::DefId,
- pub disr_val: Disr,
- pub vis: Visibility
-}
+ // Returns the type of a pattern as a monotype. Like @expr_ty, this function
+ // doesn't provide type parameter substitutions.
+ pub fn pat_ty(&self, pat: &ast::Pat) -> Ty<'tcx> {
+ self.node_id_to_type(pat.id)
+ }
+ pub fn pat_ty_opt(&self, pat: &ast::Pat) -> Option<Ty<'tcx>> {
+ self.node_id_to_type_opt(pat.id)
+ }
-impl<'tcx> VariantInfo<'tcx> {
+ // Returns the type of an expression as a monotype.
+ //
+ // NB (1): This is the PRE-ADJUSTMENT TYPE for the expression. That is, in
+ // some cases, we insert `AutoAdjustment` annotations such as auto-deref or
+ // auto-ref. The type returned by this function does not consider such
+ // adjustments. See `expr_ty_adjusted()` instead.
+ //
+ // NB (2): This type doesn't provide type parameter substitutions; e.g. if you
+ // ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
+ // instead of "fn(ty) -> T with T = isize".
+ pub fn expr_ty(&self, expr: &ast::Expr) -> Ty<'tcx> {
+ self.node_id_to_type(expr.id)
+ }
- /// Creates a new VariantInfo from the corresponding ast representation.
+ pub fn expr_ty_opt(&self, expr: &ast::Expr) -> Option<Ty<'tcx>> {
+ self.node_id_to_type_opt(expr.id)
+ }
+
+ /// Returns the type of `expr`, considering any `AutoAdjustment`
+ /// entry recorded for that expression.
///
- /// Does not do any caching of the value in the type context.
- pub fn from_ast_variant(cx: &ctxt<'tcx>,
- ast_variant: &ast::Variant,
- discriminant: Disr) -> VariantInfo<'tcx> {
- let ctor_ty = node_id_to_type(cx, ast_variant.node.id);
+ /// It would almost certainly be better to store the adjusted ty in with
+ /// the `AutoAdjustment`, but I opted not to do this because it would
+ /// require serializing and deserializing the type and, although that's not
+ /// hard to do, I just hate that code so much I didn't want to touch it
+ /// unless it was to fix it properly, which seemed a distraction from the
+ /// thread at hand! -nmatsakis
+ pub fn expr_ty_adjusted(&self, expr: &ast::Expr) -> Ty<'tcx> {
+ self.expr_ty(expr)
+ .adjust(self, expr.span, expr.id,
+ self.tables.borrow().adjustments.get(&expr.id),
+ |method_call| {
+ self.tables.borrow().method_map.get(&method_call).map(|method| method.ty)
+ })
+ }
- match ast_variant.node.kind {
- ast::TupleVariantKind(ref args) => {
- let arg_tys = if !args.is_empty() {
- // the regions in the argument types come from the
- // enum def'n, and hence will all be early bound
- ty::no_late_bound_regions(cx, &ty_fn_args(ctor_ty)).unwrap()
- } else {
- Vec::new()
- };
-
- return VariantInfo {
- args: arg_tys,
- arg_names: None,
- ctor_ty: Some(ctor_ty),
- name: ast_variant.node.name.name,
- id: ast_util::local_def(ast_variant.node.id),
- disr_val: discriminant,
- vis: ast_variant.node.vis
- };
- },
- ast::StructVariantKind(ref struct_def) => {
- let fields: &[StructField] = &struct_def.fields;
-
- assert!(!fields.is_empty());
-
- let arg_tys = struct_def.fields.iter()
- .map(|field| node_id_to_type(cx, field.node.id)).collect();
- let arg_names = fields.iter().map(|field| {
- match field.node.kind {
- NamedField(ident, _) => ident.name,
- UnnamedField(..) => cx.sess.bug(
- "enum_variants: all fields in struct must have a name")
- }
- }).collect();
-
- return VariantInfo {
- args: arg_tys,
- arg_names: Some(arg_names),
- ctor_ty: None,
- name: ast_variant.node.name.name,
- id: ast_util::local_def(ast_variant.node.id),
- disr_val: discriminant,
- vis: ast_variant.node.vis
- };
+ pub fn expr_span(&self, id: NodeId) -> Span {
+ match self.map.find(id) {
+ Some(ast_map::NodeExpr(e)) => {
+ e.span
+ }
+ Some(f) => {
+ self.sess.bug(&format!("Node id {} is not an expr: {:?}",
+ id, f));
+ }
+ None => {
+ self.sess.bug(&format!("Node id {} is not present \
+ in the node map", id));
}
}
}
-}
-
-pub fn substd_enum_variants<'tcx>(cx: &ctxt<'tcx>,
- id: ast::DefId,
- substs: &Substs<'tcx>)
- -> Vec<Rc<VariantInfo<'tcx>>> {
- enum_variants(cx, id).iter().map(|variant_info| {
- let substd_args = variant_info.args.iter()
- .map(|aty| aty.subst(cx, substs)).collect::<Vec<_>>();
-
- let substd_ctor_ty = variant_info.ctor_ty.subst(cx, substs);
-
- Rc::new(VariantInfo {
- args: substd_args,
- ctor_ty: substd_ctor_ty,
- ..(**variant_info).clone()
- })
- }).collect()
-}
-
-pub fn item_path_str(cx: &ctxt, id: ast::DefId) -> String {
- with_path(cx, id, |path| ast_map::path_to_string(path)).to_string()
-}
-
-#[derive(Copy, Clone)]
-pub enum DtorKind {
- NoDtor,
- TraitDtor(DefId, bool)
-}
-
-impl DtorKind {
- pub fn is_present(&self) -> bool {
- match *self {
- TraitDtor(..) => true,
- _ => false
- }
- }
-
- pub fn has_drop_flag(&self) -> bool {
- match self {
- &NoDtor => false,
- &TraitDtor(_, flag) => flag
- }
- }
-}
-
-/* If struct_id names a struct with a dtor. */
-pub fn ty_dtor(cx: &ctxt, struct_id: DefId) -> DtorKind {
- match cx.destructor_for_type.borrow().get(&struct_id) {
- Some(&method_def_id) => {
- let flag = !has_attr(cx, struct_id, "unsafe_no_drop_flag");
- TraitDtor(method_def_id, flag)
+ pub fn local_var_name_str(&self, id: NodeId) -> InternedString {
+ match self.map.find(id) {
+ Some(ast_map::NodeLocal(pat)) => {
+ match pat.node {
+ ast::PatIdent(_, ref path1, _) => path1.node.name.as_str(),
+ _ => {
+ self.sess.bug(&format!("Variable id {} maps to {:?}, not local", id, pat));
+ },
+ }
+ },
+ r => self.sess.bug(&format!("Variable id {} maps to {:?}, not local", id, r)),
}
- None => NoDtor,
}
-}
-pub fn has_dtor(cx: &ctxt, struct_id: DefId) -> bool {
- cx.destructor_for_type.borrow().contains_key(&struct_id)
-}
+ pub fn resolve_expr(&self, expr: &ast::Expr) -> def::Def {
+ match self.def_map.borrow().get(&expr.id) {
+ Some(def) => def.full_def(),
+ None => {
+ self.sess.span_bug(expr.span, &format!(
+ "no def-map entry for expr {}", expr.id));
+ }
+ }
+ }
+
+ pub fn expr_is_lval(&self, expr: &ast::Expr) -> bool {
+ match expr.node {
+ ast::ExprPath(..) => {
+ // We can't use resolve_expr here, as this needs to run on broken
+ // programs. We don't need to through - associated items are all
+ // rvalues.
+ match self.def_map.borrow().get(&expr.id) {
+ Some(&def::PathResolution {
+ base_def: def::DefStatic(..), ..
+ }) | Some(&def::PathResolution {
+ base_def: def::DefUpvar(..), ..
+ }) | Some(&def::PathResolution {
+ base_def: def::DefLocal(..), ..
+ }) => {
+ true
+ }
-pub fn with_path<T, F>(cx: &ctxt, id: ast::DefId, f: F) -> T where
- F: FnOnce(ast_map::PathElems) -> T,
-{
- if id.krate == ast::LOCAL_CRATE {
- cx.map.with_path(id.node, f)
- } else {
- f(csearch::get_item_path(cx, id).iter().cloned().chain(LinkedPath::empty()))
- }
-}
+ Some(..) => false,
-pub fn enum_is_univariant(cx: &ctxt, id: ast::DefId) -> bool {
- enum_variants(cx, id).len() == 1
-}
+ None => self.sess.span_bug(expr.span, &format!(
+ "no def for path {}", expr.id))
+ }
+ }
-pub fn type_is_empty(cx: &ctxt, ty: Ty) -> bool {
- match ty.sty {
- TyEnum(did, _) => (*enum_variants(cx, did)).is_empty(),
- _ => false
- }
-}
+ ast::ExprUnary(ast::UnDeref, _) |
+ ast::ExprField(..) |
+ ast::ExprTupField(..) |
+ ast::ExprIndex(..) => {
+ true
+ }
+
+ ast::ExprCall(..) |
+ ast::ExprMethodCall(..) |
+ ast::ExprStruct(..) |
+ ast::ExprRange(..) |
+ ast::ExprTup(..) |
+ ast::ExprIf(..) |
+ ast::ExprMatch(..) |
+ ast::ExprClosure(..) |
+ ast::ExprBlock(..) |
+ ast::ExprRepeat(..) |
+ ast::ExprVec(..) |
+ ast::ExprBreak(..) |
+ ast::ExprAgain(..) |
+ ast::ExprRet(..) |
+ ast::ExprWhile(..) |
+ ast::ExprLoop(..) |
+ ast::ExprAssign(..) |
+ ast::ExprInlineAsm(..) |
+ ast::ExprAssignOp(..) |
+ ast::ExprLit(_) |
+ ast::ExprUnary(..) |
+ ast::ExprBox(..) |
+ ast::ExprAddrOf(..) |
+ ast::ExprBinary(..) |
+ ast::ExprCast(..) => {
+ false
+ }
-trait IntTypeExt {
- fn to_ty<'tcx>(&self, cx: &ctxt<'tcx>) -> Ty<'tcx>;
- fn i64_to_disr(&self, val: i64) -> Option<Disr>;
- fn u64_to_disr(&self, val: u64) -> Option<Disr>;
- fn disr_incr(&self, val: Disr) -> Option<Disr>;
- fn disr_string(&self, val: Disr) -> String;
- fn disr_wrap_incr(&self, val: Option<Disr>) -> Disr;
-}
+ ast::ExprParen(ref e) => self.expr_is_lval(e),
-impl IntTypeExt for attr::IntType {
- fn to_ty<'tcx>(&self, cx: &ctxt<'tcx>) -> Ty<'tcx> {
- match *self {
- SignedInt(ast::TyI8) => cx.types.i8,
- SignedInt(ast::TyI16) => cx.types.i16,
- SignedInt(ast::TyI32) => cx.types.i32,
- SignedInt(ast::TyI64) => cx.types.i64,
- SignedInt(ast::TyIs) => cx.types.isize,
- UnsignedInt(ast::TyU8) => cx.types.u8,
- UnsignedInt(ast::TyU16) => cx.types.u16,
- UnsignedInt(ast::TyU32) => cx.types.u32,
- UnsignedInt(ast::TyU64) => cx.types.u64,
- UnsignedInt(ast::TyUs) => cx.types.usize,
+ ast::ExprIfLet(..) |
+ ast::ExprWhileLet(..) |
+ ast::ExprForLoop(..) |
+ ast::ExprMac(..) => {
+ self.sess.span_bug(
+ expr.span,
+ "macro expression remains after expansion");
+ }
}
}
- fn i64_to_disr(&self, val: i64) -> Option<Disr> {
- match *self {
- SignedInt(ast::TyI8) => val.to_i8() .map(|v| v as Disr),
- SignedInt(ast::TyI16) => val.to_i16() .map(|v| v as Disr),
- SignedInt(ast::TyI32) => val.to_i32() .map(|v| v as Disr),
- SignedInt(ast::TyI64) => val.to_i64() .map(|v| v as Disr),
- UnsignedInt(ast::TyU8) => val.to_u8() .map(|v| v as Disr),
- UnsignedInt(ast::TyU16) => val.to_u16() .map(|v| v as Disr),
- UnsignedInt(ast::TyU32) => val.to_u32() .map(|v| v as Disr),
- UnsignedInt(ast::TyU64) => val.to_u64() .map(|v| v as Disr),
-
- UnsignedInt(ast::TyUs) |
- SignedInt(ast::TyIs) => unreachable!(),
- }
+ pub fn field_idx_strict(&self, name: ast::Name, fields: &[Field<'tcx>])
+ -> usize {
+ let mut i = 0;
+ for f in fields { if f.name == name { return i; } i += 1; }
+ self.sess.bug(&format!(
+ "no field named `{}` found in the list of fields `{:?}`",
+ name,
+ fields.iter()
+ .map(|f| f.name.to_string())
+ .collect::<Vec<String>>()));
}
- fn u64_to_disr(&self, val: u64) -> Option<Disr> {
- match *self {
- SignedInt(ast::TyI8) => val.to_i8() .map(|v| v as Disr),
- SignedInt(ast::TyI16) => val.to_i16() .map(|v| v as Disr),
- SignedInt(ast::TyI32) => val.to_i32() .map(|v| v as Disr),
- SignedInt(ast::TyI64) => val.to_i64() .map(|v| v as Disr),
- UnsignedInt(ast::TyU8) => val.to_u8() .map(|v| v as Disr),
- UnsignedInt(ast::TyU16) => val.to_u16() .map(|v| v as Disr),
- UnsignedInt(ast::TyU32) => val.to_u32() .map(|v| v as Disr),
- UnsignedInt(ast::TyU64) => val.to_u64() .map(|v| v as Disr),
-
- UnsignedInt(ast::TyUs) |
- SignedInt(ast::TyIs) => unreachable!(),
- }
- }
+ pub fn note_and_explain_type_err(&self, err: &TypeError<'tcx>, sp: Span) {
+ use self::TypeError::*;
- fn disr_incr(&self, val: Disr) -> Option<Disr> {
- macro_rules! add1 {
- ($e:expr) => { $e.and_then(|v|v.checked_add(1)).map(|v| v as Disr) }
- }
- match *self {
- // SignedInt repr means we *want* to reinterpret the bits
- // treating the highest bit of Disr as a sign-bit, so
- // cast to i64 before range-checking.
- SignedInt(ast::TyI8) => add1!((val as i64).to_i8()),
- SignedInt(ast::TyI16) => add1!((val as i64).to_i16()),
- SignedInt(ast::TyI32) => add1!((val as i64).to_i32()),
- SignedInt(ast::TyI64) => add1!(Some(val as i64)),
-
- UnsignedInt(ast::TyU8) => add1!(val.to_u8()),
- UnsignedInt(ast::TyU16) => add1!(val.to_u16()),
- UnsignedInt(ast::TyU32) => add1!(val.to_u32()),
- UnsignedInt(ast::TyU64) => add1!(Some(val)),
+ match err.clone() {
+ RegionsDoesNotOutlive(subregion, superregion) => {
+ self.note_and_explain_region("", subregion, "...");
+ self.note_and_explain_region("...does not necessarily outlive ",
+ superregion, "");
+ }
+ RegionsNotSame(region1, region2) => {
+ self.note_and_explain_region("", region1, "...");
+ self.note_and_explain_region("...is not the same lifetime as ",
+ region2, "");
+ }
+ RegionsNoOverlap(region1, region2) => {
+ self.note_and_explain_region("", region1, "...");
+ self.note_and_explain_region("...does not overlap ",
+ region2, "");
+ }
+ RegionsInsufficientlyPolymorphic(_, conc_region) => {
+ self.note_and_explain_region("concrete lifetime that was found is ",
+ conc_region, "");
+ }
+ RegionsOverlyPolymorphic(_, ty::ReInfer(ty::ReVar(_))) => {
+ // don't bother to print out the message below for
+ // inference variables, it's not very illuminating.
+ }
+ RegionsOverlyPolymorphic(_, conc_region) => {
+ self.note_and_explain_region("expected concrete lifetime is ",
+ conc_region, "");
+ }
+ Sorts(values) => {
+ let expected_str = values.expected.sort_string(self);
+ let found_str = values.found.sort_string(self);
+ if expected_str == found_str && expected_str == "closure" {
+ self.sess.span_note(sp,
+ &format!("no two closures, even if identical, have the same type"));
+ self.sess.span_help(sp,
+ &format!("consider boxing your closure and/or \
+ using it as a trait object"));
+ }
+ },
+ TyParamDefaultMismatch(values) => {
+ let expected = values.expected;
+ let found = values.found;
+ self.sess.span_note(sp,
+ &format!("conflicting type parameter defaults `{}` and `{}`",
+ expected.ty,
+ found.ty));
+
+ match (expected.def_id.krate == ast::LOCAL_CRATE,
+ self.map.opt_span(expected.def_id.node)) {
+ (true, Some(span)) => {
+ self.sess.span_note(span,
+ &format!("a default was defined here..."));
+ }
+ (_, _) => {
+ let elems = csearch::get_item_path(self, expected.def_id)
+ .into_iter()
+ .map(|p| p.to_string())
+ .collect::<Vec<_>>();
+ self.sess.note(
+ &format!("a default is defined on `{}`",
+ elems.join("::")));
+ }
+ }
- UnsignedInt(ast::TyUs) |
- SignedInt(ast::TyIs) => unreachable!(),
- }
- }
+ self.sess.span_note(
+ expected.origin_span,
+ &format!("...that was applied to an unconstrained type variable here"));
- // This returns a String because (1.) it is only used for
- // rendering an error message and (2.) a string can represent the
- // full range from `i64::MIN` through `u64::MAX`.
- fn disr_string(&self, val: Disr) -> String {
- match *self {
- SignedInt(ast::TyI8) => format!("{}", val as i8 ),
- SignedInt(ast::TyI16) => format!("{}", val as i16),
- SignedInt(ast::TyI32) => format!("{}", val as i32),
- SignedInt(ast::TyI64) => format!("{}", val as i64),
- UnsignedInt(ast::TyU8) => format!("{}", val as u8 ),
- UnsignedInt(ast::TyU16) => format!("{}", val as u16),
- UnsignedInt(ast::TyU32) => format!("{}", val as u32),
- UnsignedInt(ast::TyU64) => format!("{}", val as u64),
+ match (found.def_id.krate == ast::LOCAL_CRATE,
+ self.map.opt_span(found.def_id.node)) {
+ (true, Some(span)) => {
+ self.sess.span_note(span,
+ &format!("a second default was defined here..."));
+ }
+ (_, _) => {
+ let elems = csearch::get_item_path(self, found.def_id)
+ .into_iter()
+ .map(|p| p.to_string())
+ .collect::<Vec<_>>();
+
+ self.sess.note(
+ &format!("a second default is defined on `{}`", elems.join(" ")));
+ }
+ }
- UnsignedInt(ast::TyUs) |
- SignedInt(ast::TyIs) => unreachable!(),
+ self.sess.span_note(
+ found.origin_span,
+ &format!("...that also applies to the same type variable here"));
+ }
+ _ => {}
}
}
- fn disr_wrap_incr(&self, val: Option<Disr>) -> Disr {
- macro_rules! add1 {
- ($e:expr) => { ($e).wrapping_add(1) as Disr }
- }
- let val = val.unwrap_or(ty::INITIAL_DISCRIMINANT_VALUE);
- match *self {
- SignedInt(ast::TyI8) => add1!(val as i8 ),
- SignedInt(ast::TyI16) => add1!(val as i16),
- SignedInt(ast::TyI32) => add1!(val as i32),
- SignedInt(ast::TyI64) => add1!(val as i64),
- UnsignedInt(ast::TyU8) => add1!(val as u8 ),
- UnsignedInt(ast::TyU16) => add1!(val as u16),
- UnsignedInt(ast::TyU32) => add1!(val as u32),
- UnsignedInt(ast::TyU64) => add1!(val as u64),
-
- UnsignedInt(ast::TyUs) |
- SignedInt(ast::TyIs) => unreachable!(),
- }
+ pub fn provided_source(&self, id: ast::DefId) -> Option<ast::DefId> {
+ self.provided_method_sources.borrow().get(&id).cloned()
}
-}
-
-/// Returns `(normalized_type, ty)`, where `normalized_type` is the
-/// IntType representation of one of {i64,i32,i16,i8,u64,u32,u16,u8},
-/// and `ty` is the original type (i.e. may include `isize` or
-/// `usize`).
-pub fn enum_repr_type<'tcx>(cx: &ctxt<'tcx>,
- opt_hint: Option<&attr::ReprAttr>)
- -> (attr::IntType, Ty<'tcx>)
-{
- let repr_type = match opt_hint {
- // Feed in the given type
- Some(&attr::ReprInt(_, int_t)) => int_t,
- // ... but provide sensible default if none provided
- //
- // NB. Historically `fn enum_variants` generate i64 here, while
- // rustc_typeck::check would generate isize.
- _ => SignedInt(ast::TyIs),
- };
-
- let repr_type_ty = repr_type.to_ty(cx);
- let repr_type = match repr_type {
- SignedInt(ast::TyIs) =>
- SignedInt(cx.sess.target.int_type),
- UnsignedInt(ast::TyUs) =>
- UnsignedInt(cx.sess.target.uint_type),
- other => other
- };
-
- (repr_type, repr_type_ty)
-}
-
-fn report_discrim_overflow(cx: &ctxt,
- variant_span: Span,
- variant_name: &str,
- repr_type: attr::IntType,
- prev_val: Disr) {
- let computed_value = repr_type.disr_wrap_incr(Some(prev_val));
- let computed_value = repr_type.disr_string(computed_value);
- let prev_val = repr_type.disr_string(prev_val);
- let repr_type = repr_type.to_ty(cx);
- span_err!(cx.sess, variant_span, E0370,
- "enum discriminant overflowed on value after {}: {}; \
- set explicitly via {} = {} if that is desired outcome",
- prev_val, repr_type, variant_name, computed_value);
-}
-
-// This computes the discriminant values for the sequence of Variants
-// attached to a particular enum, taking into account the #[repr] (if
-// any) provided via the `opt_hint`.
-fn compute_enum_variants<'tcx>(cx: &ctxt<'tcx>,
- vs: &'tcx [P<ast::Variant>],
- opt_hint: Option<&attr::ReprAttr>)
- -> Vec<Rc<ty::VariantInfo<'tcx>>> {
- let mut variants: Vec<Rc<ty::VariantInfo>> = Vec::new();
- let mut prev_disr_val: Option<ty::Disr> = None;
-
- let (repr_type, repr_type_ty) = ty::enum_repr_type(cx, opt_hint);
-
- for v in vs {
- // If the discriminant value is specified explicitly in the
- // enum, check whether the initialization expression is valid,
- // otherwise use the last value plus one.
- let current_disr_val;
-
- // This closure marks cases where, when an error occurs during
- // the computation, attempt to assign a (hopefully) fresh
- // value to avoid spurious error reports downstream.
- let attempt_fresh_value = move || -> Disr {
- repr_type.disr_wrap_incr(prev_disr_val)
- };
- match v.node.disr_expr {
- Some(ref e) => {
- debug!("disr expr, checking {}", pprust::expr_to_string(&**e));
-
- // check_expr (from check_const pass) doesn't guarantee
- // that the expression is in a form that eval_const_expr can
- // handle, so we may still get an internal compiler error
- //
- // pnkfelix: The above comment was transcribed from
- // the version of this code taken from rustc_typeck.
- // Presumably the implication is that we need to deal
- // with such ICE's as they arise.
- //
- // Since this can be called from `ty::enum_variants`
- // anyway, best thing is to make `eval_const_expr`
- // more robust (on case-by-case basis).
-
- match const_eval::eval_const_expr_partial(cx, &**e, Some(repr_type_ty)) {
- Ok(ConstVal::Int(val)) => current_disr_val = val as Disr,
- Ok(ConstVal::Uint(val)) => current_disr_val = val as Disr,
- Ok(_) => {
- let sign_desc = if repr_type.is_signed() { "signed" } else { "unsigned" };
- span_err!(cx.sess, e.span, E0079,
- "expected {} integer constant",
- sign_desc);
- current_disr_val = attempt_fresh_value();
- }
- Err(ref err) => {
- span_err!(cx.sess, err.span, E0080,
- "constant evaluation error: {}",
- err.description());
- current_disr_val = attempt_fresh_value();
+ pub fn provided_trait_methods(&self, id: ast::DefId) -> Vec<Rc<Method<'tcx>>> {
+ if is_local(id) {
+ if let ItemTrait(_, _, _, ref ms) = self.map.expect_item(id.node).node {
+ ms.iter().filter_map(|ti| {
+ if let ast::MethodTraitItem(_, Some(_)) = ti.node {
+ match self.impl_or_trait_item(ast_util::local_def(ti.id)) {
+ MethodTraitItem(m) => Some(m),
+ _ => {
+ self.sess.bug("provided_trait_methods(): \
+ non-method item found from \
+ looking up provided method?!")
+ }
+ }
+ } else {
+ None
}
+ }).collect()
+ } else {
+ self.sess.bug(&format!("provided_trait_methods: `{:?}` is not a trait", id))
+ }
+ } else {
+ csearch::get_provided_trait_methods(self, id)
+ }
+ }
+
+ pub fn associated_consts(&self, id: ast::DefId) -> Vec<Rc<AssociatedConst<'tcx>>> {
+ if is_local(id) {
+ match self.map.expect_item(id.node).node {
+ ItemTrait(_, _, _, ref tis) => {
+ tis.iter().filter_map(|ti| {
+ if let ast::ConstTraitItem(_, _) = ti.node {
+ match self.impl_or_trait_item(ast_util::local_def(ti.id)) {
+ ConstTraitItem(ac) => Some(ac),
+ _ => {
+ self.sess.bug("associated_consts(): \
+ non-const item found from \
+ looking up a constant?!")
+ }
+ }
+ } else {
+ None
+ }
+ }).collect()
}
- },
- None => {
- current_disr_val = match prev_disr_val {
- Some(prev_disr_val) => {
- if let Some(v) = repr_type.disr_incr(prev_disr_val) {
- v
+ ItemImpl(_, _, _, _, _, ref iis) => {
+ iis.iter().filter_map(|ii| {
+ if let ast::ConstImplItem(_, _) = ii.node {
+ match self.impl_or_trait_item(ast_util::local_def(ii.id)) {
+ ConstTraitItem(ac) => Some(ac),
+ _ => {
+ self.sess.bug("associated_consts(): \
+ non-const item found from \
+ looking up a constant?!")
+ }
+ }
} else {
- report_discrim_overflow(cx, v.span, v.node.name.as_str(),
- repr_type, prev_disr_val);
- attempt_fresh_value()
+ None
}
- }
- None => ty::INITIAL_DISCRIMINANT_VALUE
+ }).collect()
+ }
+ _ => {
+ self.sess.bug(&format!("associated_consts: `{:?}` is not a trait \
+ or impl", id))
}
}
+ } else {
+ csearch::get_associated_consts(self, id)
}
-
- let variant_info = Rc::new(VariantInfo::from_ast_variant(cx, &**v, current_disr_val));
- prev_disr_val = Some(current_disr_val);
-
- variants.push(variant_info);
}
- return variants;
-}
+ pub fn trait_items(&self, trait_did: ast::DefId) -> Rc<Vec<ImplOrTraitItem<'tcx>>> {
+ let mut trait_items = self.trait_items_cache.borrow_mut();
+ match trait_items.get(&trait_did).cloned() {
+ Some(trait_items) => trait_items,
+ None => {
+ let def_ids = self.trait_item_def_ids(trait_did);
+ let items: Rc<Vec<ImplOrTraitItem>> =
+ Rc::new(def_ids.iter()
+ .map(|d| self.impl_or_trait_item(d.def_id()))
+ .collect());
+ trait_items.insert(trait_did, items.clone());
+ items
+ }
+ }
+ }
-pub fn enum_variants<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
- -> Rc<Vec<Rc<VariantInfo<'tcx>>>> {
- memoized(&cx.enum_var_cache, id, |id: ast::DefId| {
- if ast::LOCAL_CRATE != id.krate {
- Rc::new(csearch::get_enum_variants(cx, id))
- } else {
- match cx.map.get(id.node) {
- ast_map::NodeItem(ref item) => {
+ pub fn trait_impl_polarity(&self, id: ast::DefId) -> Option<ast::ImplPolarity> {
+ if id.krate == ast::LOCAL_CRATE {
+ match self.map.find(id.node) {
+ Some(ast_map::NodeItem(item)) => {
match item.node {
- ast::ItemEnum(ref enum_definition, _) => {
- Rc::new(compute_enum_variants(
- cx,
- &enum_definition.variants,
- lookup_repr_hints(cx, id).get(0)))
- }
- _ => {
- cx.sess.bug("enum_variants: id not bound to an enum")
- }
+ ast::ItemImpl(_, polarity, _, _, _, _) => Some(polarity),
+ _ => None
}
}
- _ => cx.sess.bug("enum_variants: id not bound to an enum")
+ _ => None
}
+ } else {
+ csearch::get_impl_polarity(self, id)
}
- })
-}
-
-// Returns information about the enum variant with the given ID:
-pub fn enum_variant_with_id<'tcx>(cx: &ctxt<'tcx>,
- enum_id: ast::DefId,
- variant_id: ast::DefId)
- -> Rc<VariantInfo<'tcx>> {
- enum_variants(cx, enum_id).iter()
- .find(|variant| variant.id == variant_id)
- .expect("enum_variant_with_id(): no variant exists with that ID")
- .clone()
-}
-
-
-// If the given item is in an external crate, looks up its type and adds it to
-// the type cache. Returns the type parameters and type.
-pub fn lookup_item_type<'tcx>(cx: &ctxt<'tcx>,
- did: ast::DefId)
- -> TypeScheme<'tcx> {
- lookup_locally_or_in_crate_store(
- "tcache", did, &cx.tcache,
- || csearch::get_type(cx, did))
-}
+ }
-/// Given the did of a trait, returns its canonical trait ref.
-pub fn lookup_trait_def<'tcx>(cx: &ctxt<'tcx>, did: ast::DefId)
- -> &'tcx TraitDef<'tcx> {
- lookup_locally_or_in_crate_store(
- "trait_defs", did, &cx.trait_defs,
- || cx.arenas.trait_defs.alloc(csearch::get_trait_def(cx, did))
- )
-}
+ pub fn custom_coerce_unsized_kind(&self, did: ast::DefId) -> CustomCoerceUnsized {
+ memoized(&self.custom_coerce_unsized_kinds, did, |did: DefId| {
+ let (kind, src) = if did.krate != ast::LOCAL_CRATE {
+ (csearch::get_custom_coerce_unsized_kind(self, did), "external")
+ } else {
+ (None, "local")
+ };
-/// Given the did of an item, returns its full set of predicates.
-pub fn lookup_predicates<'tcx>(cx: &ctxt<'tcx>, did: ast::DefId)
- -> GenericPredicates<'tcx>
-{
- lookup_locally_or_in_crate_store(
- "predicates", did, &cx.predicates,
- || csearch::get_predicates(cx, did))
-}
+ match kind {
+ Some(kind) => kind,
+ None => {
+ self.sess.bug(&format!("custom_coerce_unsized_kind: \
+ {} impl `{}` is missing its kind",
+ src, self.item_path_str(did)));
+ }
+ }
+ })
+ }
-/// Given the did of a trait, returns its superpredicates.
-pub fn lookup_super_predicates<'tcx>(cx: &ctxt<'tcx>, did: ast::DefId)
- -> GenericPredicates<'tcx>
-{
- lookup_locally_or_in_crate_store(
- "super_predicates", did, &cx.super_predicates,
- || csearch::get_super_predicates(cx, did))
-}
+ pub fn impl_or_trait_item(&self, id: ast::DefId) -> ImplOrTraitItem<'tcx> {
+ lookup_locally_or_in_crate_store(
+ "impl_or_trait_items", id, &self.impl_or_trait_items,
+ || csearch::get_impl_or_trait_item(self, id))
+ }
-/// Get the attributes of a definition.
-pub fn get_attrs<'tcx>(tcx: &'tcx ctxt, did: DefId)
- -> Cow<'tcx, [ast::Attribute]> {
- if is_local(did) {
- Cow::Borrowed(tcx.map.attrs(did.node))
- } else {
- Cow::Owned(csearch::get_item_attrs(&tcx.sess.cstore, did))
+ pub fn trait_item_def_ids(&self, id: ast::DefId) -> Rc<Vec<ImplOrTraitItemId>> {
+ lookup_locally_or_in_crate_store(
+ "trait_item_def_ids", id, &self.trait_item_def_ids,
+ || Rc::new(csearch::get_trait_item_def_ids(&self.sess.cstore, id)))
}
-}
-/// Determine whether an item is annotated with an attribute
-pub fn has_attr(tcx: &ctxt, did: DefId, attr: &str) -> bool {
- get_attrs(tcx, did).iter().any(|item| item.check_name(attr))
-}
+ /// Returns the trait-ref corresponding to a given impl, or None if it is
+ /// an inherent impl.
+ pub fn impl_trait_ref(&self, id: ast::DefId) -> Option<TraitRef<'tcx>> {
+ lookup_locally_or_in_crate_store(
+ "impl_trait_refs", id, &self.impl_trait_refs,
+ || csearch::get_impl_trait(self, id))
+ }
-/// Determine whether an item is annotated with `#[repr(packed)]`
-pub fn lookup_packed(tcx: &ctxt, did: DefId) -> bool {
- lookup_repr_hints(tcx, did).contains(&attr::ReprPacked)
-}
+ /// Returns whether this DefId refers to an impl
+ pub fn is_impl(&self, id: ast::DefId) -> bool {
+ if id.krate == ast::LOCAL_CRATE {
+ if let Some(ast_map::NodeItem(
+ &ast::Item { node: ast::ItemImpl(..), .. })) = self.map.find(id.node) {
+ true
+ } else {
+ false
+ }
+ } else {
+ csearch::is_impl(&self.sess.cstore, id)
+ }
+ }
-/// Determine whether an item is annotated with `#[simd]`
-pub fn lookup_simd(tcx: &ctxt, did: DefId) -> bool {
- has_attr(tcx, did, "simd")
-}
+ pub fn trait_ref_to_def_id(&self, tr: &ast::TraitRef) -> ast::DefId {
+ self.def_map.borrow().get(&tr.ref_id).expect("no def-map entry for trait").def_id()
+ }
-/// Obtain the representation annotation for a struct definition.
-pub fn lookup_repr_hints(tcx: &ctxt, did: DefId) -> Rc<Vec<attr::ReprAttr>> {
- memoized(&tcx.repr_hint_cache, did, |did: DefId| {
- Rc::new(if did.krate == LOCAL_CRATE {
- get_attrs(tcx, did).iter().flat_map(|meta| {
- attr::find_repr_attrs(tcx.sess.diagnostic(), meta).into_iter()
- }).collect()
- } else {
- csearch::get_repr_attrs(&tcx.sess.cstore, did)
- })
- })
-}
+ pub fn try_add_builtin_trait(&self,
+ trait_def_id: ast::DefId,
+ builtin_bounds: &mut EnumSet<BuiltinBound>)
+ -> bool
+ {
+ //! Checks whether `trait_ref` refers to one of the builtin
+ //! traits, like `Send`, and adds the corresponding
+ //! bound to the set `builtin_bounds` if so. Returns true if `trait_ref`
+ //! is a builtin trait.
-// Look up a field ID, whether or not it's local
-pub fn lookup_field_type_unsubstituted<'tcx>(tcx: &ctxt<'tcx>,
- struct_id: DefId,
- id: DefId)
- -> Ty<'tcx> {
- if id.krate == ast::LOCAL_CRATE {
- node_id_to_type(tcx, id.node)
- } else {
- let mut tcache = tcx.tcache.borrow_mut();
- tcache.entry(id).or_insert_with(|| csearch::get_field_type(tcx, struct_id, id)).ty
+ match self.lang_items.to_builtin_kind(trait_def_id) {
+ Some(bound) => { builtin_bounds.insert(bound); true }
+ None => false
+ }
}
-}
+ pub fn substd_enum_variants(&self,
+ id: ast::DefId,
+ substs: &Substs<'tcx>)
+ -> Vec<Rc<VariantInfo<'tcx>>> {
+ self.enum_variants(id).iter().map(|variant_info| {
+ let substd_args = variant_info.args.iter()
+ .map(|aty| aty.subst(self, substs)).collect::<Vec<_>>();
-// Look up a field ID, whether or not it's local
-// Takes a list of type substs in case the struct is generic
-pub fn lookup_field_type<'tcx>(tcx: &ctxt<'tcx>,
- struct_id: DefId,
- id: DefId,
- substs: &Substs<'tcx>)
- -> Ty<'tcx> {
- lookup_field_type_unsubstituted(tcx, struct_id, id).subst(tcx, substs)
-}
+ let substd_ctor_ty = variant_info.ctor_ty.subst(self, substs);
-// Look up the list of field names and IDs for a given struct.
-// Panics if the id is not bound to a struct.
-pub fn lookup_struct_fields(cx: &ctxt, did: ast::DefId) -> Vec<field_ty> {
- if did.krate == ast::LOCAL_CRATE {
- let struct_fields = cx.struct_fields.borrow();
- match struct_fields.get(&did) {
- Some(fields) => (**fields).clone(),
- _ => {
- cx.sess.bug(
- &format!("ID not mapped to struct fields: {}",
- cx.map.node_to_string(did.node)));
- }
- }
- } else {
- csearch::get_struct_fields(&cx.sess.cstore, did)
+ Rc::new(VariantInfo {
+ args: substd_args,
+ ctor_ty: substd_ctor_ty,
+ ..(**variant_info).clone()
+ })
+ }).collect()
}
-}
-pub fn is_tuple_struct(cx: &ctxt, did: ast::DefId) -> bool {
- let fields = lookup_struct_fields(cx, did);
- !fields.is_empty() && fields.iter().all(|f| f.name == token::special_names::unnamed_field)
-}
+ pub fn item_path_str(&self, id: ast::DefId) -> String {
+ self.with_path(id, |path| ast_map::path_to_string(path))
+ }
-// Returns a list of fields corresponding to the struct's items. trans uses
-// this. Takes a list of substs with which to instantiate field types.
-pub fn struct_fields<'tcx>(cx: &ctxt<'tcx>, did: ast::DefId, substs: &Substs<'tcx>)
- -> Vec<field<'tcx>> {
- lookup_struct_fields(cx, did).iter().map(|f| {
- field {
- name: f.name,
- mt: mt {
- ty: lookup_field_type(cx, did, f.id, substs),
- mutbl: MutImmutable
- }
- }
- }).collect()
-}
+ /* If struct_id names a struct with a dtor. */
+ pub fn ty_dtor(&self, struct_id: DefId) -> DtorKind {
+ match self.destructor_for_type.borrow().get(&struct_id) {
+ Some(&method_def_id) => {
+ let flag = !self.has_attr(struct_id, "unsafe_no_drop_flag");
-// Returns a list of fields corresponding to the tuple's items. trans uses
-// this.
-pub fn tup_fields<'tcx>(v: &[Ty<'tcx>]) -> Vec<field<'tcx>> {
- v.iter().enumerate().map(|(i, &f)| {
- field {
- name: token::intern(&i.to_string()),
- mt: mt {
- ty: f,
- mutbl: MutImmutable
+ TraitDtor(method_def_id, flag)
}
+ None => NoDtor,
}
- }).collect()
-}
+ }
-/// Returns the deeply last field of nested structures, or the same type,
-/// if not a structure at all. Corresponds to the only possible unsized
-/// field, and its type can be used to determine unsizing strategy.
-pub fn struct_tail<'tcx>(cx: &ctxt<'tcx>, mut ty: Ty<'tcx>) -> Ty<'tcx> {
- while let TyStruct(def_id, substs) = ty.sty {
- match struct_fields(cx, def_id, substs).last() {
- Some(f) => ty = f.mt.ty,
- None => break
- }
+ pub fn has_dtor(&self, struct_id: DefId) -> bool {
+ self.destructor_for_type.borrow().contains_key(&struct_id)
}
- ty
-}
-/// Same as applying struct_tail on `source` and `target`, but only
-/// keeps going as long as the two types are instances of the same
-/// structure definitions.
-/// For `(Foo<Foo<T>>, Foo<Trait>)`, the result will be `(Foo<T>, Trait)`,
-/// whereas struct_tail produces `T`, and `Trait`, respectively.
-pub fn struct_lockstep_tails<'tcx>(cx: &ctxt<'tcx>,
- source: Ty<'tcx>,
- target: Ty<'tcx>)
- -> (Ty<'tcx>, Ty<'tcx>) {
- let (mut a, mut b) = (source, target);
- while let (&TyStruct(a_did, a_substs), &TyStruct(b_did, b_substs)) = (&a.sty, &b.sty) {
- if a_did != b_did {
- continue;
- }
- if let Some(a_f) = struct_fields(cx, a_did, a_substs).last() {
- if let Some(b_f) = struct_fields(cx, b_did, b_substs).last() {
- a = a_f.mt.ty;
- b = b_f.mt.ty;
- } else {
- break;
- }
+ pub fn with_path<T, F>(&self, id: ast::DefId, f: F) -> T where
+ F: FnOnce(ast_map::PathElems) -> T,
+ {
+ if id.krate == ast::LOCAL_CRATE {
+ self.map.with_path(id.node, f)
} else {
- break;
+ f(csearch::get_item_path(self, id).iter().cloned().chain(LinkedPath::empty()))
}
}
- (a, b)
-}
-#[derive(Copy, Clone)]
-pub struct ClosureUpvar<'tcx> {
- pub def: def::Def,
- pub span: Span,
- pub ty: Ty<'tcx>,
-}
+ pub fn enum_is_univariant(&self, id: ast::DefId) -> bool {
+ self.enum_variants(id).len() == 1
+ }
-// Returns a list of `ClosureUpvar`s for each upvar.
-pub fn closure_upvars<'tcx>(typer: &mc::Typer<'tcx>,
- closure_id: ast::DefId,
- substs: &Substs<'tcx>)
- -> Option<Vec<ClosureUpvar<'tcx>>>
-{
- // Presently an unboxed closure type cannot "escape" out of a
- // function, so we will only encounter ones that originated in the
- // local crate or were inlined into it along with some function.
- // This may change if abstract return types of some sort are
- // implemented.
- assert!(closure_id.krate == ast::LOCAL_CRATE);
- let tcx = typer.tcx();
- match tcx.freevars.borrow().get(&closure_id.node) {
- None => Some(vec![]),
- Some(ref freevars) => {
- freevars.iter()
- .map(|freevar| {
- let freevar_def_id = freevar.def.def_id();
- let freevar_ty = match typer.node_ty(freevar_def_id.node) {
- Ok(t) => { t }
- Err(()) => { return None; }
- };
- let freevar_ty = freevar_ty.subst(tcx, substs);
-
- let upvar_id = ty::UpvarId {
- var_id: freevar_def_id.node,
- closure_expr_id: closure_id.node
- };
-
- typer.upvar_capture(upvar_id).map(|capture| {
- let freevar_ref_ty = match capture {
- UpvarCapture::ByValue => {
- freevar_ty
- }
- UpvarCapture::ByRef(borrow) => {
- mk_rptr(tcx,
- tcx.mk_region(borrow.region),
- ty::mt {
- ty: freevar_ty,
- mutbl: borrow.kind.to_mutbl_lossy(),
- })
- }
- };
+ /// Returns `(normalized_type, ty)`, where `normalized_type` is the
+ /// IntType representation of one of {i64,i32,i16,i8,u64,u32,u16,u8},
+ /// and `ty` is the original type (i.e. may include `isize` or
+ /// `usize`).
+ pub fn enum_repr_type(&self, opt_hint: Option<&attr::ReprAttr>)
+ -> (attr::IntType, Ty<'tcx>) {
+ let repr_type = match opt_hint {
+ // Feed in the given type
+ Some(&attr::ReprInt(_, int_t)) => int_t,
+ // ... but provide sensible default if none provided
+ //
+ // NB. Historically `fn enum_variants` generate i64 here, while
+ // rustc_typeck::check would generate isize.
+ _ => SignedInt(ast::TyIs),
+ };
- ClosureUpvar {
- def: freevar.def,
- span: freevar.span,
- ty: freevar_ref_ty,
- }
- })
- })
- .collect()
- }
- }
-}
-
-// Returns the repeat count for a repeating vector expression.
-pub fn eval_repeat_count(tcx: &ctxt, count_expr: &ast::Expr) -> usize {
- match const_eval::eval_const_expr_partial(tcx, count_expr, Some(tcx.types.usize)) {
- Ok(val) => {
- let found = match val {
- ConstVal::Uint(count) => return count as usize,
- ConstVal::Int(count) if count >= 0 => return count as usize,
- ConstVal::Int(_) => "negative integer",
- ConstVal::Float(_) => "float",
- ConstVal::Str(_) => "string",
- ConstVal::Bool(_) => "boolean",
- ConstVal::Binary(_) => "binary array",
- ConstVal::Struct(..) => "struct",
- ConstVal::Tuple(_) => "tuple"
- };
- span_err!(tcx.sess, count_expr.span, E0306,
- "expected positive integer for repeat count, found {}",
- found);
- }
- Err(err) => {
- let err_description = err.description();
- let found = match count_expr.node {
- ast::ExprPath(None, ast::Path {
- global: false,
- ref segments,
- ..
- }) if segments.len() == 1 =>
- format!("{}", "found variable"),
- _ =>
- format!("but {}", err_description),
+ let repr_type_ty = repr_type.to_ty(self);
+ let repr_type = match repr_type {
+ SignedInt(ast::TyIs) =>
+ SignedInt(self.sess.target.int_type),
+ UnsignedInt(ast::TyUs) =>
+ UnsignedInt(self.sess.target.uint_type),
+ other => other
+ };
+
+ (repr_type, repr_type_ty)
+ }
+
+ fn report_discrim_overflow(&self,
+ variant_span: Span,
+ variant_name: &str,
+ repr_type: attr::IntType,
+ prev_val: Disr) {
+ let computed_value = repr_type.disr_wrap_incr(Some(prev_val));
+ let computed_value = repr_type.disr_string(computed_value);
+ let prev_val = repr_type.disr_string(prev_val);
+ let repr_type = repr_type.to_ty(self);
+ span_err!(self.sess, variant_span, E0370,
+ "enum discriminant overflowed on value after {}: {}; \
+ set explicitly via {} = {} if that is desired outcome",
+ prev_val, repr_type, variant_name, computed_value);
+ }
+
+ // This computes the discriminant values for the sequence of Variants
+ // attached to a particular enum, taking into account the #[repr] (if
+ // any) provided via the `opt_hint`.
+ fn compute_enum_variants(&self,
+ vs: &'tcx [P<ast::Variant>],
+ opt_hint: Option<&attr::ReprAttr>)
+ -> Vec<Rc<ty::VariantInfo<'tcx>>> {
+ let mut variants: Vec<Rc<ty::VariantInfo>> = Vec::new();
+ let mut prev_disr_val: Option<ty::Disr> = None;
+
+ let (repr_type, repr_type_ty) = self.enum_repr_type(opt_hint);
+
+ for v in vs {
+ // If the discriminant value is specified explicitly in the
+ // enum, check whether the initialization expression is valid,
+ // otherwise use the last value plus one.
+ let current_disr_val;
+
+ // This closure marks cases where, when an error occurs during
+ // the computation, attempt to assign a (hopefully) fresh
+ // value to avoid spurious error reports downstream.
+ let attempt_fresh_value = move || -> Disr {
+ repr_type.disr_wrap_incr(prev_disr_val)
};
- span_err!(tcx.sess, count_expr.span, E0307,
- "expected constant integer for repeat count, {}",
- found);
- }
- }
- 0
-}
-// Iterate over a type parameter's bounded traits and any supertraits
-// of those traits, ignoring kinds.
-// Here, the supertraits are the transitive closure of the supertrait
-// relation on the supertraits from each bounded trait's constraint
-// list.
-pub fn each_bound_trait_and_supertraits<'tcx, F>(tcx: &ctxt<'tcx>,
- bounds: &[PolyTraitRef<'tcx>],
- mut f: F)
- -> bool where
- F: FnMut(PolyTraitRef<'tcx>) -> bool,
-{
- for bound_trait_ref in traits::transitive_bounds(tcx, bounds) {
- if !f(bound_trait_ref) {
- return false;
+ match v.node.disr_expr {
+ Some(ref e) => {
+ debug!("disr expr, checking {}", pprust::expr_to_string(&**e));
+
+ let hint = UncheckedExprHint(repr_type_ty);
+ match const_eval::eval_const_expr_partial(self, &**e, hint) {
+ Ok(ConstVal::Int(val)) => current_disr_val = val as Disr,
+ Ok(ConstVal::Uint(val)) => current_disr_val = val as Disr,
+ Ok(_) => {
+ let sign_desc = if repr_type.is_signed() {
+ "signed"
+ } else {
+ "unsigned"
+ };
+ span_err!(self.sess, e.span, E0079,
+ "expected {} integer constant",
+ sign_desc);
+ current_disr_val = attempt_fresh_value();
+ },
+ Err(ref err) => {
+ span_err!(self.sess, err.span, E0080,
+ "constant evaluation error: {}",
+ err.description());
+ current_disr_val = attempt_fresh_value();
+ },
+ }
+ },
+ None => {
+ current_disr_val = match prev_disr_val {
+ Some(prev_disr_val) => {
+ if let Some(v) = repr_type.disr_incr(prev_disr_val) {
+ v
+ } else {
+ self.report_discrim_overflow(v.span, &v.node.name.name.as_str(),
+ repr_type, prev_disr_val);
+ attempt_fresh_value()
+ }
+ }
+ None => ty::INITIAL_DISCRIMINANT_VALUE,
+ }
+ },
+ }
+
+ let variant_info = Rc::new(VariantInfo::from_ast_variant(self, &**v, current_disr_val));
+ prev_disr_val = Some(current_disr_val);
+
+ variants.push(variant_info);
}
+
+ variants
}
- return true;
-}
-/// Given a set of predicates that apply to an object type, returns
-/// the region bounds that the (erased) `Self` type must
-/// outlive. Precisely *because* the `Self` type is erased, the
-/// parameter `erased_self_ty` must be supplied to indicate what type
-/// has been used to represent `Self` in the predicates
-/// themselves. This should really be a unique type; `FreshTy(0)` is a
-/// popular choice.
-///
-/// Requires that trait definitions have been processed so that we can
-/// elaborate predicates and walk supertraits.
-pub fn required_region_bounds<'tcx>(tcx: &ctxt<'tcx>,
- erased_self_ty: Ty<'tcx>,
- predicates: Vec<ty::Predicate<'tcx>>)
- -> Vec<ty::Region>
-{
- debug!("required_region_bounds(erased_self_ty={:?}, predicates={:?})",
- erased_self_ty,
- predicates);
-
- assert!(!erased_self_ty.has_escaping_regions());
-
- traits::elaborate_predicates(tcx, predicates)
- .filter_map(|predicate| {
- match predicate {
- ty::Predicate::Projection(..) |
- ty::Predicate::Trait(..) |
- ty::Predicate::Equate(..) |
- ty::Predicate::RegionOutlives(..) => {
- None
- }
- ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(t, r))) => {
- // Search for a bound of the form `erased_self_ty
- // : 'a`, but be wary of something like `for<'a>
- // erased_self_ty : 'a` (we interpret a
- // higher-ranked bound like that as 'static,
- // though at present the code in `fulfill.rs`
- // considers such bounds to be unsatisfiable, so
- // it's kind of a moot point since you could never
- // construct such an object, but this seems
- // correct even if that code changes).
- if t == erased_self_ty && !r.has_escaping_regions() {
- if r.has_escaping_regions() {
- Some(ty::ReStatic)
- } else {
- Some(r)
+ pub fn enum_variants(&self, id: ast::DefId) -> Rc<Vec<Rc<VariantInfo<'tcx>>>> {
+ memoized(&self.enum_var_cache, id, |id: ast::DefId| {
+ if ast::LOCAL_CRATE != id.krate {
+ Rc::new(csearch::get_enum_variants(self, id))
+ } else {
+ match self.map.get(id.node) {
+ ast_map::NodeItem(ref item) => {
+ match item.node {
+ ast::ItemEnum(ref enum_definition, _) => {
+ Rc::new(self.compute_enum_variants(
+ &enum_definition.variants,
+ self.lookup_repr_hints(id).get(0)))
+ }
+ _ => {
+ self.sess.bug("enum_variants: id not bound to an enum")
+ }
}
- } else {
- None
}
+ _ => self.sess.bug("enum_variants: id not bound to an enum")
}
}
})
- .collect()
-}
-
-pub fn item_variances(tcx: &ctxt, item_id: ast::DefId) -> Rc<ItemVariances> {
- lookup_locally_or_in_crate_store(
- "item_variance_map", item_id, &tcx.item_variance_map,
- || Rc::new(csearch::get_item_variances(&tcx.sess.cstore, item_id)))
-}
-
-pub fn trait_has_default_impl(tcx: &ctxt, trait_def_id: DefId) -> bool {
- populate_implementations_for_trait_if_necessary(tcx, trait_def_id);
-
- let def = lookup_trait_def(tcx, trait_def_id);
- def.flags.get().intersects(TraitFlags::HAS_DEFAULT_IMPL)
-}
-
-/// Records a trait-to-implementation mapping.
-pub fn record_trait_has_default_impl(tcx: &ctxt, trait_def_id: DefId) {
- let def = lookup_trait_def(tcx, trait_def_id);
- def.flags.set(def.flags.get() | TraitFlags::HAS_DEFAULT_IMPL)
-}
-
-/// Load primitive inherent implementations if necessary
-pub fn populate_implementations_for_primitive_if_necessary(tcx: &ctxt,
- primitive_def_id: ast::DefId) {
- if primitive_def_id.krate == LOCAL_CRATE {
- return
}
- if tcx.populated_external_primitive_impls.borrow().contains(&primitive_def_id) {
- return
+ // Returns information about the enum variant with the given ID:
+ pub fn enum_variant_with_id(&self,
+ enum_id: ast::DefId,
+ variant_id: ast::DefId)
+ -> Rc<VariantInfo<'tcx>> {
+ self.enum_variants(enum_id).iter()
+ .find(|variant| variant.id == variant_id)
+ .expect("enum_variant_with_id(): no variant exists with that ID")
+ .clone()
}
- debug!("populate_implementations_for_primitive_if_necessary: searching for {:?}",
- primitive_def_id);
+ // Register a given item type
+ pub fn register_item_type(&self, did: ast::DefId, ty: TypeScheme<'tcx>) {
+ self.tcache.borrow_mut().insert(did, ty);
+ }
- let impl_items = csearch::get_impl_items(&tcx.sess.cstore, primitive_def_id);
+ // If the given item is in an external crate, looks up its type and adds it to
+ // the type cache. Returns the type parameters and type.
+ pub fn lookup_item_type(&self, did: ast::DefId) -> TypeScheme<'tcx> {
+ lookup_locally_or_in_crate_store(
+ "tcache", did, &self.tcache,
+ || csearch::get_type(self, did))
+ }
- // Store the implementation info.
- tcx.impl_items.borrow_mut().insert(primitive_def_id, impl_items);
- tcx.populated_external_primitive_impls.borrow_mut().insert(primitive_def_id);
-}
+ /// Given the did of a trait, returns its canonical trait ref.
+ pub fn lookup_trait_def(&self, did: ast::DefId) -> &'tcx TraitDef<'tcx> {
+ lookup_locally_or_in_crate_store(
+ "trait_defs", did, &self.trait_defs,
+ || self.arenas.trait_defs.alloc(csearch::get_trait_def(self, did))
+ )
+ }
-/// Populates the type context with all the inherent implementations for
-/// the given type if necessary.
-pub fn populate_inherent_implementations_for_type_if_necessary(tcx: &ctxt,
- type_id: ast::DefId) {
- if type_id.krate == LOCAL_CRATE {
- return
+ /// Given the did of an item, returns its full set of predicates.
+ pub fn lookup_predicates(&self, did: ast::DefId) -> GenericPredicates<'tcx> {
+ lookup_locally_or_in_crate_store(
+ "predicates", did, &self.predicates,
+ || csearch::get_predicates(self, did))
}
- if tcx.populated_external_types.borrow().contains(&type_id) {
- return
+ /// Given the did of a trait, returns its superpredicates.
+ pub fn lookup_super_predicates(&self, did: ast::DefId) -> GenericPredicates<'tcx> {
+ lookup_locally_or_in_crate_store(
+ "super_predicates", did, &self.super_predicates,
+ || csearch::get_super_predicates(self, did))
}
- debug!("populate_inherent_implementations_for_type_if_necessary: searching for {:?}", type_id);
+ /// Get the attributes of a definition.
+ pub fn get_attrs(&self, did: DefId) -> Cow<'tcx, [ast::Attribute]> {
+ if is_local(did) {
+ Cow::Borrowed(self.map.attrs(did.node))
+ } else {
+ Cow::Owned(csearch::get_item_attrs(&self.sess.cstore, did))
+ }
+ }
- let mut inherent_impls = Vec::new();
- csearch::each_inherent_implementation_for_type(&tcx.sess.cstore, type_id, |impl_def_id| {
- // Record the implementation.
- inherent_impls.push(impl_def_id);
+ /// Determine whether an item is annotated with an attribute
+ pub fn has_attr(&self, did: DefId, attr: &str) -> bool {
+ self.get_attrs(did).iter().any(|item| item.check_name(attr))
+ }
- // Store the implementation info.
- let impl_items = csearch::get_impl_items(&tcx.sess.cstore, impl_def_id);
- tcx.impl_items.borrow_mut().insert(impl_def_id, impl_items);
- });
+ /// Determine whether an item is annotated with `#[repr(packed)]`
+ pub fn lookup_packed(&self, did: DefId) -> bool {
+ self.lookup_repr_hints(did).contains(&attr::ReprPacked)
+ }
- tcx.inherent_impls.borrow_mut().insert(type_id, Rc::new(inherent_impls));
- tcx.populated_external_types.borrow_mut().insert(type_id);
-}
+ /// Determine whether an item is annotated with `#[simd]`
+ pub fn lookup_simd(&self, did: DefId) -> bool {
+ self.has_attr(did, "simd")
+ }
-/// Populates the type context with all the implementations for the given
-/// trait if necessary.
-pub fn populate_implementations_for_trait_if_necessary(tcx: &ctxt, trait_id: ast::DefId) {
- if trait_id.krate == LOCAL_CRATE {
- return
+ /// Obtain the representation annotation for a struct definition.
+ pub fn lookup_repr_hints(&self, did: DefId) -> Rc<Vec<attr::ReprAttr>> {
+ memoized(&self.repr_hint_cache, did, |did: DefId| {
+ Rc::new(if did.krate == LOCAL_CRATE {
+ self.get_attrs(did).iter().flat_map(|meta| {
+ attr::find_repr_attrs(self.sess.diagnostic(), meta).into_iter()
+ }).collect()
+ } else {
+ csearch::get_repr_attrs(&self.sess.cstore, did)
+ })
+ })
}
- let def = lookup_trait_def(tcx, trait_id);
- if def.flags.get().intersects(TraitFlags::IMPLS_VALID) {
- return;
+ // Look up a field ID, whether or not it's local
+ pub fn lookup_field_type_unsubstituted(&self,
+ struct_id: DefId,
+ id: DefId)
+ -> Ty<'tcx> {
+ if id.krate == ast::LOCAL_CRATE {
+ self.node_id_to_type(id.node)
+ } else {
+ memoized(&self.tcache, id,
+ |id| csearch::get_field_type(self, struct_id, id)).ty
+ }
}
- debug!("populate_implementations_for_trait_if_necessary: searching for {:?}", def);
- if csearch::is_defaulted_trait(&tcx.sess.cstore, trait_id) {
- record_trait_has_default_impl(tcx, trait_id);
+ // Look up a field ID, whether or not it's local
+ // Takes a list of type substs in case the struct is generic
+ pub fn lookup_field_type(&self,
+ struct_id: DefId,
+ id: DefId,
+ substs: &Substs<'tcx>)
+ -> Ty<'tcx> {
+ self.lookup_field_type_unsubstituted(struct_id, id).subst(self, substs)
}
- csearch::each_implementation_for_trait(&tcx.sess.cstore, trait_id, |implementation_def_id| {
- let impl_items = csearch::get_impl_items(&tcx.sess.cstore, implementation_def_id);
- let trait_ref = impl_trait_ref(tcx, implementation_def_id).unwrap();
- // Record the trait->implementation mapping.
- def.record_impl(tcx, implementation_def_id, trait_ref);
-
- // For any methods that use a default implementation, add them to
- // the map. This is a bit unfortunate.
- for impl_item_def_id in &impl_items {
- let method_def_id = impl_item_def_id.def_id();
- match impl_or_trait_item(tcx, method_def_id) {
- MethodTraitItem(method) => {
- if let Some(source) = method.provided_source {
- tcx.provided_method_sources
- .borrow_mut()
- .insert(method_def_id, source);
- }
+ // Look up the list of field names and IDs for a given struct.
+ // Panics if the id is not bound to a struct.
+ pub fn lookup_struct_fields(&self, did: ast::DefId) -> Vec<FieldTy> {
+ if did.krate == ast::LOCAL_CRATE {
+ let struct_fields = self.struct_fields.borrow();
+ match struct_fields.get(&did) {
+ Some(fields) => (**fields).clone(),
+ _ => {
+ self.sess.bug(
+ &format!("ID not mapped to struct fields: {}",
+ self.map.node_to_string(did.node)));
}
- _ => {}
}
+ } else {
+ csearch::get_struct_fields(&self.sess.cstore, did)
}
+ }
- // Store the implementation info.
- tcx.impl_items.borrow_mut().insert(implementation_def_id, impl_items);
- });
-
- def.flags.set(def.flags.get() | TraitFlags::IMPLS_VALID);
-}
-
-/// Given the def_id of an impl, return the def_id of the trait it implements.
-/// If it implements no trait, return `None`.
-pub fn trait_id_of_impl(tcx: &ctxt,
- def_id: ast::DefId)
- -> Option<ast::DefId> {
- ty::impl_trait_ref(tcx, def_id).map(|tr| tr.def_id)
-}
+ pub fn is_tuple_struct(&self, did: ast::DefId) -> bool {
+ let fields = self.lookup_struct_fields(did);
+ !fields.is_empty() && fields.iter().all(|f| f.name == token::special_names::unnamed_field)
+ }
-/// If the given def ID describes a method belonging to an impl, return the
-/// ID of the impl that the method belongs to. Otherwise, return `None`.
-pub fn impl_of_method(tcx: &ctxt, def_id: ast::DefId)
- -> Option<ast::DefId> {
- if def_id.krate != LOCAL_CRATE {
- return match csearch::get_impl_or_trait_item(tcx,
- def_id).container() {
- TraitContainer(_) => None,
- ImplContainer(def_id) => Some(def_id),
- };
+ // Returns a list of fields corresponding to the struct's items. trans uses
+ // this. Takes a list of substs with which to instantiate field types.
+ pub fn struct_fields(&self, did: ast::DefId, substs: &Substs<'tcx>)
+ -> Vec<Field<'tcx>> {
+ self.lookup_struct_fields(did).iter().map(|f| {
+ Field {
+ name: f.name,
+ mt: TypeAndMut {
+ ty: self.lookup_field_type(did, f.id, substs),
+ mutbl: MutImmutable
+ }
+ }
+ }).collect()
}
- match tcx.impl_or_trait_items.borrow().get(&def_id).cloned() {
- Some(trait_item) => {
- match trait_item.container() {
- TraitContainer(_) => None,
- ImplContainer(def_id) => Some(def_id),
+
+ /// Returns the deeply last field of nested structures, or the same type,
+ /// if not a structure at all. Corresponds to the only possible unsized
+ /// field, and its type can be used to determine unsizing strategy.
+ pub fn struct_tail(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
+ while let TyStruct(def_id, substs) = ty.sty {
+ match self.struct_fields(def_id, substs).last() {
+ Some(f) => ty = f.mt.ty,
+ None => break
}
}
- None => None
+ ty
}
-}
-/// If the given def ID describes an item belonging to a trait (either a
-/// default method or an implementation of a trait method), return the ID of
-/// the trait that the method belongs to. Otherwise, return `None`.
-pub fn trait_of_item(tcx: &ctxt, def_id: ast::DefId) -> Option<ast::DefId> {
- if def_id.krate != LOCAL_CRATE {
- return csearch::get_trait_of_item(&tcx.sess.cstore, def_id, tcx);
+ /// Same as applying struct_tail on `source` and `target`, but only
+ /// keeps going as long as the two types are instances of the same
+ /// structure definitions.
+ /// For `(Foo<Foo<T>>, Foo<Trait>)`, the result will be `(Foo<T>, Trait)`,
+ /// whereas struct_tail produces `T`, and `Trait`, respectively.
+ pub fn struct_lockstep_tails(&self,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>)
+ -> (Ty<'tcx>, Ty<'tcx>) {
+ let (mut a, mut b) = (source, target);
+ while let (&TyStruct(a_did, a_substs), &TyStruct(b_did, b_substs)) = (&a.sty, &b.sty) {
+ if a_did != b_did {
+ break;
+ }
+ if let Some(a_f) = self.struct_fields(a_did, a_substs).last() {
+ if let Some(b_f) = self.struct_fields(b_did, b_substs).last() {
+ a = a_f.mt.ty;
+ b = b_f.mt.ty;
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ (a, b)
}
- match tcx.impl_or_trait_items.borrow().get(&def_id).cloned() {
- Some(impl_or_trait_item) => {
- match impl_or_trait_item.container() {
- TraitContainer(def_id) => Some(def_id),
- ImplContainer(def_id) => trait_id_of_impl(tcx, def_id),
+
+ // Returns the repeat count for a repeating vector expression.
+ pub fn eval_repeat_count(&self, count_expr: &ast::Expr) -> usize {
+ let hint = UncheckedExprHint(self.types.usize);
+ match const_eval::eval_const_expr_partial(self, count_expr, hint) {
+ Ok(val) => {
+ let found = match val {
+ ConstVal::Uint(count) => return count as usize,
+ ConstVal::Int(count) if count >= 0 => return count as usize,
+ const_val => const_val.description(),
+ };
+ span_err!(self.sess, count_expr.span, E0306,
+ "expected positive integer for repeat count, found {}",
+ found);
+ }
+ Err(err) => {
+ let err_msg = match count_expr.node {
+ ast::ExprPath(None, ast::Path {
+ global: false,
+ ref segments,
+ ..
+ }) if segments.len() == 1 =>
+ format!("found variable"),
+ _ => match err.kind {
+ ErrKind::MiscCatchAll => format!("but found {}", err.description()),
+ _ => format!("but {}", err.description())
+ }
+ };
+ span_err!(self.sess, count_expr.span, E0307,
+ "expected constant integer for repeat count, {}", err_msg);
}
}
- None => None
+ 0
}
-}
-/// If the given def ID describes an item belonging to a trait, (either a
-/// default method or an implementation of a trait method), return the ID of
-/// the method inside trait definition (this means that if the given def ID
-/// is already that of the original trait method, then the return value is
-/// the same).
-/// Otherwise, return `None`.
-pub fn trait_item_of_item(tcx: &ctxt, def_id: ast::DefId)
- -> Option<ImplOrTraitItemId> {
- let impl_item = match tcx.impl_or_trait_items.borrow().get(&def_id) {
- Some(m) => m.clone(),
- None => return None,
- };
- let name = impl_item.name();
- match trait_of_item(tcx, def_id) {
- Some(trait_did) => {
- let trait_items = ty::trait_items(tcx, trait_did);
- trait_items.iter()
- .position(|m| m.name() == name)
- .map(|idx| ty::trait_item(tcx, trait_did, idx).id())
+ // Iterate over a type parameter's bounded traits and any supertraits
+ // of those traits, ignoring kinds.
+ // Here, the supertraits are the transitive closure of the supertrait
+ // relation on the supertraits from each bounded trait's constraint
+ // list.
+ pub fn each_bound_trait_and_supertraits<F>(&self,
+ bounds: &[PolyTraitRef<'tcx>],
+ mut f: F)
+ -> bool where
+ F: FnMut(PolyTraitRef<'tcx>) -> bool,
+ {
+ for bound_trait_ref in traits::transitive_bounds(self, bounds) {
+ if !f(bound_trait_ref) {
+ return false;
+ }
}
- None => None
+ return true;
}
-}
-/// Creates a hash of the type `Ty` which will be the same no matter what crate
-/// context it's calculated within. This is used by the `type_id` intrinsic.
-pub fn hash_crate_independent<'tcx>(tcx: &ctxt<'tcx>, ty: Ty<'tcx>, svh: &Svh) -> u64 {
- let mut state = SipHasher::new();
- helper(tcx, ty, svh, &mut state);
- return state.finish();
+ /// Given a set of predicates that apply to an object type, returns
+ /// the region bounds that the (erased) `Self` type must
+ /// outlive. Precisely *because* the `Self` type is erased, the
+ /// parameter `erased_self_ty` must be supplied to indicate what type
+ /// has been used to represent `Self` in the predicates
+ /// themselves. This should really be a unique type; `FreshTy(0)` is a
+ /// popular choice.
+ ///
+ /// Requires that trait definitions have been processed so that we can
+ /// elaborate predicates and walk supertraits.
+ pub fn required_region_bounds(&self,
+ erased_self_ty: Ty<'tcx>,
+ predicates: Vec<ty::Predicate<'tcx>>)
+ -> Vec<ty::Region>
+ {
+ debug!("required_region_bounds(erased_self_ty={:?}, predicates={:?})",
+ erased_self_ty,
+ predicates);
+
+ assert!(!erased_self_ty.has_escaping_regions());
+
+ traits::elaborate_predicates(self, predicates)
+ .filter_map(|predicate| {
+ match predicate {
+ ty::Predicate::Projection(..) |
+ ty::Predicate::Trait(..) |
+ ty::Predicate::Equate(..) |
+ ty::Predicate::RegionOutlives(..) => {
+ None
+ }
+ ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(t, r))) => {
+ // Search for a bound of the form `erased_self_ty
+ // : 'a`, but be wary of something like `for<'a>
+ // erased_self_ty : 'a` (we interpret a
+ // higher-ranked bound like that as 'static,
+ // though at present the code in `fulfill.rs`
+ // considers such bounds to be unsatisfiable, so
+ // it's kind of a moot point since you could never
+ // construct such an object, but this seems
+ // correct even if that code changes).
+ if t == erased_self_ty && !r.has_escaping_regions() {
+ if r.has_escaping_regions() {
+ Some(ty::ReStatic)
+ } else {
+ Some(r)
+ }
+ } else {
+ None
+ }
+ }
+ }
+ })
+ .collect()
+ }
- fn helper<'tcx>(tcx: &ctxt<'tcx>, ty: Ty<'tcx>, svh: &Svh,
- state: &mut SipHasher) {
- macro_rules! byte { ($b:expr) => { ($b as u8).hash(state) } }
- macro_rules! hash { ($e:expr) => { $e.hash(state) } }
+ pub fn item_variances(&self, item_id: ast::DefId) -> Rc<ItemVariances> {
+ lookup_locally_or_in_crate_store(
+ "item_variance_map", item_id, &self.item_variance_map,
+ || Rc::new(csearch::get_item_variances(&self.sess.cstore, item_id)))
+ }
- let region = |state: &mut SipHasher, r: Region| {
- match r {
- ReStatic => {}
- ReLateBound(db, BrAnon(i)) => {
- db.hash(state);
- i.hash(state);
- }
- ReEmpty |
- ReEarlyBound(..) |
- ReLateBound(..) |
- ReFree(..) |
- ReScope(..) |
- ReInfer(..) => {
- tcx.sess.bug("unexpected region found when hashing a type")
- }
- }
- };
- let did = |state: &mut SipHasher, did: DefId| {
- let h = if ast_util::is_local(did) {
- svh.clone()
- } else {
- tcx.sess.cstore.get_crate_hash(did.krate)
- };
- h.as_str().hash(state);
- did.node.hash(state);
- };
- let mt = |state: &mut SipHasher, mt: mt| {
- mt.mutbl.hash(state);
- };
- let fn_sig = |state: &mut SipHasher, sig: &Binder<FnSig<'tcx>>| {
- let sig = anonymize_late_bound_regions(tcx, sig).0;
- for a in &sig.inputs { helper(tcx, *a, svh, state); }
- if let ty::FnConverging(output) = sig.output {
- helper(tcx, output, svh, state);
- }
- };
- maybe_walk_ty(ty, |ty| {
- match ty.sty {
- TyBool => byte!(2),
- TyChar => byte!(3),
- TyInt(i) => {
- byte!(4);
- hash!(i);
- }
- TyUint(u) => {
- byte!(5);
- hash!(u);
- }
- TyFloat(f) => {
- byte!(6);
- hash!(f);
- }
- TyStr => {
- byte!(7);
- }
- TyEnum(d, _) => {
- byte!(8);
- did(state, d);
- }
- TyBox(_) => {
- byte!(9);
- }
- TyArray(_, n) => {
- byte!(10);
- n.hash(state);
- }
- TySlice(_) => {
- byte!(11);
- }
- TyRawPtr(m) => {
- byte!(12);
- mt(state, m);
- }
- TyRef(r, m) => {
- byte!(13);
- region(state, *r);
- mt(state, m);
- }
- TyBareFn(opt_def_id, ref b) => {
- byte!(14);
- hash!(opt_def_id);
- hash!(b.unsafety);
- hash!(b.abi);
- fn_sig(state, &b.sig);
- return false;
- }
- TyTrait(ref data) => {
- byte!(17);
- did(state, data.principal_def_id());
- hash!(data.bounds);
-
- let principal = anonymize_late_bound_regions(tcx, &data.principal).0;
- for subty in &principal.substs.types {
- helper(tcx, subty, svh, state);
- }
+ pub fn trait_has_default_impl(&self, trait_def_id: DefId) -> bool {
+ self.populate_implementations_for_trait_if_necessary(trait_def_id);
- return false;
- }
- TyStruct(d, _) => {
- byte!(18);
- did(state, d);
- }
- TyTuple(ref inner) => {
- byte!(19);
- hash!(inner.len());
- }
- TyParam(p) => {
- byte!(20);
- hash!(p.space);
- hash!(p.idx);
- hash!(token::get_name(p.name));
- }
- TyInfer(_) => unreachable!(),
- TyError => byte!(21),
- TyClosure(d, _) => {
- byte!(22);
- did(state, d);
- }
- TyProjection(ref data) => {
- byte!(23);
- did(state, data.trait_ref.def_id);
- hash!(token::get_name(data.item_name));
- }
- }
- true
- });
+ let def = self.lookup_trait_def(trait_def_id);
+ def.flags.get().intersects(TraitFlags::HAS_DEFAULT_IMPL)
}
-}
-impl fmt::Debug for Variance {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.write_str(match *self {
- Covariant => "+",
- Contravariant => "-",
- Invariant => "o",
- Bivariant => "*",
- })
+ /// Records a trait-to-implementation mapping.
+ pub fn record_trait_has_default_impl(&self, trait_def_id: DefId) {
+ let def = self.lookup_trait_def(trait_def_id);
+ def.flags.set(def.flags.get() | TraitFlags::HAS_DEFAULT_IMPL)
}
-}
-/// Construct a parameter environment suitable for static contexts or other contexts where there
-/// are no free type/lifetime parameters in scope.
-pub fn empty_parameter_environment<'a,'tcx>(cx: &'a ctxt<'tcx>) -> ParameterEnvironment<'a,'tcx> {
- ty::ParameterEnvironment { tcx: cx,
- free_substs: Substs::empty(),
- caller_bounds: Vec::new(),
- implicit_region_bound: ty::ReEmpty,
- selection_cache: traits::SelectionCache::new(), }
-}
+ /// Load primitive inherent implementations if necessary
+ pub fn populate_implementations_for_primitive_if_necessary(&self,
+ primitive_def_id: ast::DefId) {
+ if primitive_def_id.krate == LOCAL_CRATE {
+ return
+ }
-/// Constructs and returns a substitution that can be applied to move from
-/// the "outer" view of a type or method to the "inner" view.
-/// In general, this means converting from bound parameters to
-/// free parameters. Since we currently represent bound/free type
-/// parameters in the same way, this only has an effect on regions.
-pub fn construct_free_substs<'a,'tcx>(
- tcx: &'a ctxt<'tcx>,
- generics: &Generics<'tcx>,
- free_id: ast::NodeId)
- -> Substs<'tcx>
-{
- // map T => T
- let mut types = VecPerParamSpace::empty();
- push_types_from_defs(tcx, &mut types, generics.types.as_slice());
+ if self.populated_external_primitive_impls.borrow().contains(&primitive_def_id) {
+ return
+ }
- let free_id_outlive = region::DestructionScopeData::new(free_id);
+ debug!("populate_implementations_for_primitive_if_necessary: searching for {:?}",
+ primitive_def_id);
- // map bound 'a => free 'a
- let mut regions = VecPerParamSpace::empty();
- push_region_params(&mut regions, free_id_outlive, generics.regions.as_slice());
+ let impl_items = csearch::get_impl_items(&self.sess.cstore, primitive_def_id);
- return Substs {
- types: types,
- regions: subst::NonerasedRegions(regions)
- };
+ // Store the implementation info.
+ self.impl_items.borrow_mut().insert(primitive_def_id, impl_items);
+ self.populated_external_primitive_impls.borrow_mut().insert(primitive_def_id);
+ }
- fn push_region_params(regions: &mut VecPerParamSpace<ty::Region>,
- all_outlive_extent: region::DestructionScopeData,
- region_params: &[RegionParameterDef])
- {
- for r in region_params {
- regions.push(r.space, ty::free_region_from_def(all_outlive_extent, r));
+ /// Populates the type context with all the inherent implementations for
+ /// the given type if necessary.
+ pub fn populate_inherent_implementations_for_type_if_necessary(&self,
+ type_id: ast::DefId) {
+ if type_id.krate == LOCAL_CRATE {
+ return
}
- }
- fn push_types_from_defs<'tcx>(tcx: &ty::ctxt<'tcx>,
- types: &mut VecPerParamSpace<Ty<'tcx>>,
- defs: &[TypeParameterDef<'tcx>]) {
- for def in defs {
- debug!("construct_parameter_environment(): push_types_from_defs: def={:?}",
- def);
- let ty = ty::mk_param_from_def(tcx, def);
- types.push(def.space, ty);
- }
+ if self.populated_external_types.borrow().contains(&type_id) {
+ return
+ }
+
+ debug!("populate_inherent_implementations_for_type_if_necessary: searching for {:?}",
+ type_id);
+
+ let mut inherent_impls = Vec::new();
+ csearch::each_inherent_implementation_for_type(&self.sess.cstore, type_id, |impl_def_id| {
+ // Record the implementation.
+ inherent_impls.push(impl_def_id);
+
+ // Store the implementation info.
+ let impl_items = csearch::get_impl_items(&self.sess.cstore, impl_def_id);
+ self.impl_items.borrow_mut().insert(impl_def_id, impl_items);
+ });
+
+ self.inherent_impls.borrow_mut().insert(type_id, Rc::new(inherent_impls));
+ self.populated_external_types.borrow_mut().insert(type_id);
}
-}
-/// See `ParameterEnvironment` struct def'n for details
-pub fn construct_parameter_environment<'a,'tcx>(
- tcx: &'a ctxt<'tcx>,
- span: Span,
- generics: &ty::Generics<'tcx>,
- generic_predicates: &ty::GenericPredicates<'tcx>,
- free_id: ast::NodeId)
- -> ParameterEnvironment<'a, 'tcx>
-{
- //
- // Construct the free substs.
- //
+ /// Populates the type context with all the implementations for the given
+ /// trait if necessary.
+ pub fn populate_implementations_for_trait_if_necessary(&self, trait_id: ast::DefId) {
+ if trait_id.krate == LOCAL_CRATE {
+ return
+ }
- let free_substs = construct_free_substs(tcx, generics, free_id);
- let free_id_outlive = region::DestructionScopeData::new(free_id);
+ let def = self.lookup_trait_def(trait_id);
+ if def.flags.get().intersects(TraitFlags::IMPLS_VALID) {
+ return;
+ }
- //
- // Compute the bounds on Self and the type parameters.
- //
+ debug!("populate_implementations_for_trait_if_necessary: searching for {:?}", def);
- let bounds = generic_predicates.instantiate(tcx, &free_substs);
- let bounds = liberate_late_bound_regions(tcx, free_id_outlive, &ty::Binder(bounds));
- let predicates = bounds.predicates.into_vec();
+ if csearch::is_defaulted_trait(&self.sess.cstore, trait_id) {
+ self.record_trait_has_default_impl(trait_id);
+ }
- debug!("construct_parameter_environment: free_id={:?} free_subst={:?} predicates={:?}",
- free_id,
- free_substs,
- predicates);
+ csearch::each_implementation_for_trait(&self.sess.cstore, trait_id, |impl_def_id| {
+ let impl_items = csearch::get_impl_items(&self.sess.cstore, impl_def_id);
+ let trait_ref = self.impl_trait_ref(impl_def_id).unwrap();
+ // Record the trait->implementation mapping.
+ def.record_impl(self, impl_def_id, trait_ref);
- //
- // Finally, we have to normalize the bounds in the environment, in
- // case they contain any associated type projections. This process
- // can yield errors if the put in illegal associated types, like
- // `<i32 as Foo>::Bar` where `i32` does not implement `Foo`. We
- // report these errors right here; this doesn't actually feel
- // right to me, because constructing the environment feels like a
- // kind of a "idempotent" action, but I'm not sure where would be
- // a better place. In practice, we construct environments for
- // every fn once during type checking, and we'll abort if there
- // are any errors at that point, so after type checking you can be
- // sure that this will succeed without errors anyway.
- //
+ // For any methods that use a default implementation, add them to
+ // the map. This is a bit unfortunate.
+ for impl_item_def_id in &impl_items {
+ let method_def_id = impl_item_def_id.def_id();
+ match self.impl_or_trait_item(method_def_id) {
+ MethodTraitItem(method) => {
+ if let Some(source) = method.provided_source {
+ self.provided_method_sources
+ .borrow_mut()
+ .insert(method_def_id, source);
+ }
+ }
+ _ => {}
+ }
+ }
- let unnormalized_env = ty::ParameterEnvironment {
- tcx: tcx,
- free_substs: free_substs,
- implicit_region_bound: ty::ReScope(free_id_outlive.to_code_extent()),
- caller_bounds: predicates,
- selection_cache: traits::SelectionCache::new(),
- };
+ // Store the implementation info.
+ self.impl_items.borrow_mut().insert(impl_def_id, impl_items);
+ });
- let cause = traits::ObligationCause::misc(span, free_id);
- traits::normalize_param_env_or_error(unnormalized_env, cause)
-}
+ def.flags.set(def.flags.get() | TraitFlags::IMPLS_VALID);
+ }
-impl BorrowKind {
- pub fn from_mutbl(m: ast::Mutability) -> BorrowKind {
- match m {
- ast::MutMutable => MutBorrow,
- ast::MutImmutable => ImmBorrow,
- }
+ /// Given the def_id of an impl, return the def_id of the trait it implements.
+ /// If it implements no trait, return `None`.
+ pub fn trait_id_of_impl(&self, def_id: ast::DefId) -> Option<ast::DefId> {
+ self.impl_trait_ref(def_id).map(|tr| tr.def_id)
}
- /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
- /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
- /// mutability that is stronger than necessary so that it at least *would permit* the borrow in
- /// question.
- pub fn to_mutbl_lossy(self) -> ast::Mutability {
- match self {
- MutBorrow => ast::MutMutable,
- ImmBorrow => ast::MutImmutable,
+ /// If the given def ID describes a method belonging to an impl, return the
+ /// ID of the impl that the method belongs to. Otherwise, return `None`.
+ pub fn impl_of_method(&self, def_id: ast::DefId) -> Option<ast::DefId> {
+ if def_id.krate != LOCAL_CRATE {
+ return match csearch::get_impl_or_trait_item(self,
+ def_id).container() {
+ TraitContainer(_) => None,
+ ImplContainer(def_id) => Some(def_id),
+ };
+ }
+ match self.impl_or_trait_items.borrow().get(&def_id).cloned() {
+ Some(trait_item) => {
+ match trait_item.container() {
+ TraitContainer(_) => None,
+ ImplContainer(def_id) => Some(def_id),
+ }
+ }
+ None => None
+ }
+ }
- // We have no type corresponding to a unique imm borrow, so
- // use `&mut`. It gives all the capabilities of an `&uniq`
- // and hence is a safe "over approximation".
- UniqueImmBorrow => ast::MutMutable,
+ /// If the given def ID describes an item belonging to a trait (either a
+ /// default method or an implementation of a trait method), return the ID of
+ /// the trait that the method belongs to. Otherwise, return `None`.
+ pub fn trait_of_item(&self, def_id: ast::DefId) -> Option<ast::DefId> {
+ if def_id.krate != LOCAL_CRATE {
+ return csearch::get_trait_of_item(&self.sess.cstore, def_id, self);
+ }
+ match self.impl_or_trait_items.borrow().get(&def_id).cloned() {
+ Some(impl_or_trait_item) => {
+ match impl_or_trait_item.container() {
+ TraitContainer(def_id) => Some(def_id),
+ ImplContainer(def_id) => self.trait_id_of_impl(def_id),
+ }
+ }
+ None => None
}
}
- pub fn to_user_str(&self) -> &'static str {
- match *self {
- MutBorrow => "mutable",
- ImmBorrow => "immutable",
- UniqueImmBorrow => "uniquely immutable",
+ /// If the given def ID describes an item belonging to a trait, (either a
+ /// default method or an implementation of a trait method), return the ID of
+ /// the method inside trait definition (this means that if the given def ID
+ /// is already that of the original trait method, then the return value is
+ /// the same).
+ /// Otherwise, return `None`.
+ pub fn trait_item_of_item(&self, def_id: ast::DefId) -> Option<ImplOrTraitItemId> {
+ let impl_item = match self.impl_or_trait_items.borrow().get(&def_id) {
+ Some(m) => m.clone(),
+ None => return None,
+ };
+ let name = impl_item.name();
+ match self.trait_of_item(def_id) {
+ Some(trait_did) => {
+ self.trait_items(trait_did).iter()
+ .find(|item| item.name() == name)
+ .map(|item| item.id())
+ }
+ None => None
+ }
+ }
+
+ /// Creates a hash of the type `Ty` which will be the same no matter what crate
+ /// context it's calculated within. This is used by the `type_id` intrinsic.
+ pub fn hash_crate_independent(&self, ty: Ty<'tcx>, svh: &Svh) -> u64 {
+ let mut state = SipHasher::new();
+ helper(self, ty, svh, &mut state);
+ return state.finish();
+
+ fn helper<'tcx>(tcx: &ctxt<'tcx>, ty: Ty<'tcx>, svh: &Svh,
+ state: &mut SipHasher) {
+ macro_rules! byte { ($b:expr) => { ($b as u8).hash(state) } }
+ macro_rules! hash { ($e:expr) => { $e.hash(state) } }
+
+ let region = |state: &mut SipHasher, r: Region| {
+ match r {
+ ReStatic => {}
+ ReLateBound(db, BrAnon(i)) => {
+ db.hash(state);
+ i.hash(state);
+ }
+ ReEmpty |
+ ReEarlyBound(..) |
+ ReLateBound(..) |
+ ReFree(..) |
+ ReScope(..) |
+ ReInfer(..) => {
+ tcx.sess.bug("unexpected region found when hashing a type")
+ }
+ }
+ };
+ let did = |state: &mut SipHasher, did: DefId| {
+ let h = if ast_util::is_local(did) {
+ svh.clone()
+ } else {
+ tcx.sess.cstore.get_crate_hash(did.krate)
+ };
+ h.as_str().hash(state);
+ did.node.hash(state);
+ };
+ let mt = |state: &mut SipHasher, mt: TypeAndMut| {
+ mt.mutbl.hash(state);
+ };
+ let fn_sig = |state: &mut SipHasher, sig: &Binder<FnSig<'tcx>>| {
+ let sig = tcx.anonymize_late_bound_regions(sig).0;
+ for a in &sig.inputs { helper(tcx, *a, svh, state); }
+ if let ty::FnConverging(output) = sig.output {
+ helper(tcx, output, svh, state);
+ }
+ };
+ ty.maybe_walk(|ty| {
+ match ty.sty {
+ TyBool => byte!(2),
+ TyChar => byte!(3),
+ TyInt(i) => {
+ byte!(4);
+ hash!(i);
+ }
+ TyUint(u) => {
+ byte!(5);
+ hash!(u);
+ }
+ TyFloat(f) => {
+ byte!(6);
+ hash!(f);
+ }
+ TyStr => {
+ byte!(7);
+ }
+ TyEnum(d, _) => {
+ byte!(8);
+ did(state, d);
+ }
+ TyBox(_) => {
+ byte!(9);
+ }
+ TyArray(_, n) => {
+ byte!(10);
+ n.hash(state);
+ }
+ TySlice(_) => {
+ byte!(11);
+ }
+ TyRawPtr(m) => {
+ byte!(12);
+ mt(state, m);
+ }
+ TyRef(r, m) => {
+ byte!(13);
+ region(state, *r);
+ mt(state, m);
+ }
+ TyBareFn(opt_def_id, ref b) => {
+ byte!(14);
+ hash!(opt_def_id);
+ hash!(b.unsafety);
+ hash!(b.abi);
+ fn_sig(state, &b.sig);
+ return false;
+ }
+ TyTrait(ref data) => {
+ byte!(17);
+ did(state, data.principal_def_id());
+ hash!(data.bounds);
+
+ let principal = tcx.anonymize_late_bound_regions(&data.principal).0;
+ for subty in &principal.substs.types {
+ helper(tcx, subty, svh, state);
+ }
+
+ return false;
+ }
+ TyStruct(d, _) => {
+ byte!(18);
+ did(state, d);
+ }
+ TyTuple(ref inner) => {
+ byte!(19);
+ hash!(inner.len());
+ }
+ TyParam(p) => {
+ byte!(20);
+ hash!(p.space);
+ hash!(p.idx);
+ hash!(p.name.as_str());
+ }
+ TyInfer(_) => unreachable!(),
+ TyError => byte!(21),
+ TyClosure(d, _) => {
+ byte!(22);
+ did(state, d);
+ }
+ TyProjection(ref data) => {
+ byte!(23);
+ did(state, data.trait_ref.def_id);
+ hash!(data.item_name.as_str());
+ }
+ }
+ true
+ });
+ }
+ }
+
+ /// Construct a parameter environment suitable for static contexts or other contexts where there
+ /// are no free type/lifetime parameters in scope.
+ pub fn empty_parameter_environment<'a>(&'a self) -> ParameterEnvironment<'a,'tcx> {
+ ty::ParameterEnvironment { tcx: self,
+ free_substs: Substs::empty(),
+ caller_bounds: Vec::new(),
+ implicit_region_bound: ty::ReEmpty,
+ selection_cache: traits::SelectionCache::new(), }
+ }
+
+ /// Constructs and returns a substitution that can be applied to move from
+ /// the "outer" view of a type or method to the "inner" view.
+ /// In general, this means converting from bound parameters to
+ /// free parameters. Since we currently represent bound/free type
+ /// parameters in the same way, this only has an effect on regions.
+ pub fn construct_free_substs(&self, generics: &Generics<'tcx>,
+ free_id: ast::NodeId) -> Substs<'tcx> {
+ // map T => T
+ let mut types = VecPerParamSpace::empty();
+ for def in generics.types.as_slice() {
+ debug!("construct_parameter_environment(): push_types_from_defs: def={:?}",
+ def);
+ types.push(def.space, self.mk_param_from_def(def));
}
- }
-}
-
-impl<'tcx> ctxt<'tcx> {
- pub fn is_method_call(&self, expr_id: ast::NodeId) -> bool {
- self.method_map.borrow().contains_key(&MethodCall::expr(expr_id))
- }
-
- pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
- Some(self.upvar_capture_map.borrow().get(&upvar_id).unwrap().clone())
- }
-}
-impl<'a,'tcx> mc::Typer<'tcx> for ParameterEnvironment<'a,'tcx> {
- fn node_ty(&self, id: ast::NodeId) -> mc::McResult<Ty<'tcx>> {
- Ok(ty::node_id_to_type(self.tcx, id))
- }
+ let free_id_outlive = region::DestructionScopeData::new(free_id);
- fn expr_ty_adjusted(&self, expr: &ast::Expr) -> mc::McResult<Ty<'tcx>> {
- Ok(ty::expr_ty_adjusted(self.tcx, expr))
- }
+ // map bound 'a => free 'a
+ let mut regions = VecPerParamSpace::empty();
+ for def in generics.regions.as_slice() {
+ let region =
+ ReFree(FreeRegion { scope: free_id_outlive,
+ bound_region: BrNamed(def.def_id, def.name) });
+ debug!("push_region_params {:?}", region);
+ regions.push(def.space, region);
+ }
- fn node_method_ty(&self, method_call: ty::MethodCall) -> Option<Ty<'tcx>> {
- self.tcx.method_map.borrow().get(&method_call).map(|method| method.ty)
+ Substs {
+ types: types,
+ regions: subst::NonerasedRegions(regions)
+ }
}
- fn node_method_origin(&self, method_call: ty::MethodCall)
- -> Option<ty::MethodOrigin<'tcx>>
+ /// See `ParameterEnvironment` struct def'n for details
+ pub fn construct_parameter_environment<'a>(&'a self,
+ span: Span,
+ generics: &ty::Generics<'tcx>,
+ generic_predicates: &ty::GenericPredicates<'tcx>,
+ free_id: ast::NodeId)
+ -> ParameterEnvironment<'a, 'tcx>
{
- self.tcx.method_map.borrow().get(&method_call).map(|method| method.origin.clone())
- }
+ //
+ // Construct the free substs.
+ //
- fn adjustments(&self) -> &RefCell<NodeMap<ty::AutoAdjustment<'tcx>>> {
- &self.tcx.adjustments
- }
+ let free_substs = self.construct_free_substs(generics, free_id);
+ let free_id_outlive = region::DestructionScopeData::new(free_id);
- fn is_method_call(&self, id: ast::NodeId) -> bool {
- self.tcx.is_method_call(id)
- }
+ //
+ // Compute the bounds on Self and the type parameters.
+ //
- fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option<region::CodeExtent> {
- self.tcx.region_maps.temporary_scope(rvalue_id)
- }
+ let bounds = generic_predicates.instantiate(self, &free_substs);
+ let bounds = self.liberate_late_bound_regions(free_id_outlive, &ty::Binder(bounds));
+ let predicates = bounds.predicates.into_vec();
- fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
- self.tcx.upvar_capture(upvar_id)
- }
+ debug!("construct_parameter_environment: free_id={:?} free_subst={:?} predicates={:?}",
+ free_id,
+ free_substs,
+ predicates);
- fn type_moves_by_default(&self, span: Span, ty: Ty<'tcx>) -> bool {
- type_moves_by_default(self, span, ty)
- }
-}
+ //
+ // Finally, we have to normalize the bounds in the environment, in
+ // case they contain any associated type projections. This process
+ // can yield errors if the put in illegal associated types, like
+ // `<i32 as Foo>::Bar` where `i32` does not implement `Foo`. We
+ // report these errors right here; this doesn't actually feel
+ // right to me, because constructing the environment feels like a
+ // kind of a "idempotent" action, but I'm not sure where would be
+ // a better place. In practice, we construct environments for
+ // every fn once during type checking, and we'll abort if there
+ // are any errors at that point, so after type checking you can be
+ // sure that this will succeed without errors anyway.
+ //
-impl<'a,'tcx> ClosureTyper<'tcx> for ty::ParameterEnvironment<'a,'tcx> {
- fn param_env<'b>(&'b self) -> &'b ty::ParameterEnvironment<'b,'tcx> {
- self
+ let unnormalized_env = ty::ParameterEnvironment {
+ tcx: self,
+ free_substs: free_substs,
+ implicit_region_bound: ty::ReScope(free_id_outlive.to_code_extent()),
+ caller_bounds: predicates,
+ selection_cache: traits::SelectionCache::new(),
+ };
+
+ let cause = traits::ObligationCause::misc(span, free_id);
+ traits::normalize_param_env_or_error(unnormalized_env, cause)
}
- fn closure_kind(&self,
- def_id: ast::DefId)
- -> Option<ty::ClosureKind>
- {
- Some(self.tcx.closure_kind(def_id))
+ pub fn is_method_call(&self, expr_id: ast::NodeId) -> bool {
+ self.tables.borrow().method_map.contains_key(&MethodCall::expr(expr_id))
}
- fn closure_type(&self,
- def_id: ast::DefId,
- substs: &subst::Substs<'tcx>)
- -> ty::ClosureTy<'tcx>
- {
- self.tcx.closure_type(def_id, substs)
+ pub fn is_overloaded_autoderef(&self, expr_id: ast::NodeId, autoderefs: u32) -> bool {
+ self.tables.borrow().method_map.contains_key(&MethodCall::autoderef(expr_id,
+ autoderefs))
}
- fn closure_upvars(&self,
- def_id: ast::DefId,
- substs: &Substs<'tcx>)
- -> Option<Vec<ClosureUpvar<'tcx>>>
- {
- closure_upvars(self, def_id, substs)
+ pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
+ Some(self.tables.borrow().upvar_capture_map.get(&upvar_id).unwrap().clone())
}
}
-
/// The category of explicit self.
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub enum ExplicitSelfCategory {
ByBoxExplicitSelfCategory,
}
-/// Pushes all the lifetimes in the given type onto the given list. A
-/// "lifetime in a type" is a lifetime specified by a reference or a lifetime
-/// in a list of type substitutions. This does *not* traverse into nominal
-/// types, nor does it resolve fictitious types.
-pub fn accumulate_lifetimes_in_type(accumulator: &mut Vec<ty::Region>,
- ty: Ty) {
- walk_ty(ty, |ty| {
- match ty.sty {
- TyRef(region, _) => {
- accumulator.push(*region)
- }
- TyTrait(ref t) => {
- accumulator.push_all(t.principal.0.substs.regions().as_slice());
- }
- TyEnum(_, substs) |
- TyStruct(_, substs) => {
- accum_substs(accumulator, substs);
- }
- TyClosure(_, substs) => {
- accum_substs(accumulator, substs);
- }
- TyBool |
- TyChar |
- TyInt(_) |
- TyUint(_) |
- TyFloat(_) |
- TyBox(_) |
- TyStr |
- TyArray(_, _) |
- TySlice(_) |
- TyRawPtr(_) |
- TyBareFn(..) |
- TyTuple(_) |
- TyProjection(_) |
- TyParam(_) |
- TyInfer(_) |
- TyError => {
- }
- }
- });
-
- fn accum_substs(accumulator: &mut Vec<Region>, substs: &Substs) {
- match substs.regions {
- subst::ErasedRegions => {}
- subst::NonerasedRegions(ref regions) => {
- for region in regions {
- accumulator.push(*region)
- }
- }
- }
- }
-}
-
/// A free variable referred to in a function.
#[derive(Copy, Clone, RustcEncodable, RustcDecodable)]
pub struct Freevar {
// imported.
pub type GlobMap = HashMap<NodeId, HashSet<Name>>;
-pub fn with_freevars<T, F>(tcx: &ty::ctxt, fid: ast::NodeId, f: F) -> T where
- F: FnOnce(&[Freevar]) -> T,
-{
- match tcx.freevars.borrow().get(&fid) {
- None => f(&[]),
- Some(d) => f(&d[..])
- }
-}
-
impl<'tcx> AutoAdjustment<'tcx> {
pub fn is_identity(&self) -> bool {
match *self {
}
}
-/// Replace any late-bound regions bound in `value` with free variants attached to scope-id
-/// `scope_id`.
-pub fn liberate_late_bound_regions<'tcx, T>(
- tcx: &ty::ctxt<'tcx>,
- all_outlive_scope: region::DestructionScopeData,
- value: &Binder<T>)
- -> T
- where T : TypeFoldable<'tcx>
-{
- ty_fold::replace_late_bound_regions(
- tcx, value,
- |br| ty::ReFree(ty::FreeRegion{scope: all_outlive_scope, bound_region: br})).0
-}
-
-pub fn count_late_bound_regions<'tcx, T>(
- tcx: &ty::ctxt<'tcx>,
- value: &Binder<T>)
- -> usize
- where T : TypeFoldable<'tcx>
-{
- let (_, skol_map) = ty_fold::replace_late_bound_regions(tcx, value, |_| ty::ReStatic);
- skol_map.len()
-}
+impl<'tcx> ctxt<'tcx> {
+ pub fn with_freevars<T, F>(&self, fid: ast::NodeId, f: F) -> T where
+ F: FnOnce(&[Freevar]) -> T,
+ {
+ match self.freevars.borrow().get(&fid) {
+ None => f(&[]),
+ Some(d) => f(&d[..])
+ }
+ }
-pub fn binds_late_bound_regions<'tcx, T>(
- tcx: &ty::ctxt<'tcx>,
- value: &Binder<T>)
- -> bool
- where T : TypeFoldable<'tcx>
-{
- count_late_bound_regions(tcx, value) > 0
-}
+ /// Replace any late-bound regions bound in `value` with free variants attached to scope-id
+ /// `scope_id`.
+ pub fn liberate_late_bound_regions<T>(&self,
+ all_outlive_scope: region::DestructionScopeData,
+ value: &Binder<T>)
+ -> T
+ where T : TypeFoldable<'tcx>
+ {
+ ty_fold::replace_late_bound_regions(
+ self, value,
+ |br| ty::ReFree(ty::FreeRegion{scope: all_outlive_scope, bound_region: br})).0
+ }
-/// Flattens two binding levels into one. So `for<'a> for<'b> Foo`
-/// becomes `for<'a,'b> Foo`.
-pub fn flatten_late_bound_regions<'tcx, T>(
- tcx: &ty::ctxt<'tcx>,
- bound2_value: &Binder<Binder<T>>)
- -> Binder<T>
- where T: TypeFoldable<'tcx>
-{
- let bound0_value = bound2_value.skip_binder().skip_binder();
- let value = ty_fold::fold_regions(tcx, bound0_value, |region, current_depth| {
- match region {
- ty::ReLateBound(debruijn, br) if debruijn.depth >= current_depth => {
- // should be true if no escaping regions from bound2_value
- assert!(debruijn.depth - current_depth <= 1);
- ty::ReLateBound(DebruijnIndex::new(current_depth), br)
- }
- _ => {
- region
+ /// Flattens two binding levels into one. So `for<'a> for<'b> Foo`
+ /// becomes `for<'a,'b> Foo`.
+ pub fn flatten_late_bound_regions<T>(&self, bound2_value: &Binder<Binder<T>>)
+ -> Binder<T>
+ where T: TypeFoldable<'tcx>
+ {
+ let bound0_value = bound2_value.skip_binder().skip_binder();
+ let value = ty_fold::fold_regions(self, bound0_value, &mut false,
+ |region, current_depth| {
+ match region {
+ ty::ReLateBound(debruijn, br) if debruijn.depth >= current_depth => {
+ // should be true if no escaping regions from bound2_value
+ assert!(debruijn.depth - current_depth <= 1);
+ ty::ReLateBound(DebruijnIndex::new(current_depth), br)
+ }
+ _ => {
+ region
+ }
}
+ });
+ Binder(value)
+ }
+
+ pub fn no_late_bound_regions<T>(&self, value: &Binder<T>) -> Option<T>
+ where T : TypeFoldable<'tcx> + RegionEscape
+ {
+ if value.0.has_escaping_regions() {
+ None
+ } else {
+ Some(value.0.clone())
}
- });
- Binder(value)
-}
+ }
-pub fn no_late_bound_regions<'tcx, T>(
- tcx: &ty::ctxt<'tcx>,
- value: &Binder<T>)
- -> Option<T>
- where T : TypeFoldable<'tcx>
-{
- if binds_late_bound_regions(tcx, value) {
- None
- } else {
- Some(value.0.clone())
+ /// Replace any late-bound regions bound in `value` with `'static`. Useful in trans but also
+ /// method lookup and a few other places where precise region relationships are not required.
+ pub fn erase_late_bound_regions<T>(&self, value: &Binder<T>) -> T
+ where T : TypeFoldable<'tcx>
+ {
+ ty_fold::replace_late_bound_regions(self, value, |_| ty::ReStatic).0
}
-}
-/// Replace any late-bound regions bound in `value` with `'static`. Useful in trans but also
-/// method lookup and a few other places where precise region relationships are not required.
-pub fn erase_late_bound_regions<'tcx, T>(
- tcx: &ty::ctxt<'tcx>,
- value: &Binder<T>)
- -> T
- where T : TypeFoldable<'tcx>
-{
- ty_fold::replace_late_bound_regions(tcx, value, |_| ty::ReStatic).0
-}
+ /// Rewrite any late-bound regions so that they are anonymous. Region numbers are
+ /// assigned starting at 1 and increasing monotonically in the order traversed
+ /// by the fold operation.
+ ///
+ /// The chief purpose of this function is to canonicalize regions so that two
+ /// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become
+ /// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and
+ /// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization.
+ pub fn anonymize_late_bound_regions<T>(&self, sig: &Binder<T>) -> Binder<T>
+ where T : TypeFoldable<'tcx>,
+ {
+ let mut counter = 0;
+ ty::Binder(ty_fold::replace_late_bound_regions(self, sig, |_| {
+ counter += 1;
+ ReLateBound(ty::DebruijnIndex::new(1), BrAnon(counter))
+ }).0)
+ }
-/// Rewrite any late-bound regions so that they are anonymous. Region numbers are
-/// assigned starting at 1 and increasing monotonically in the order traversed
-/// by the fold operation.
-///
-/// The chief purpose of this function is to canonicalize regions so that two
-/// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become
-/// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and
-/// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization.
-pub fn anonymize_late_bound_regions<'tcx, T>(
- tcx: &ctxt<'tcx>,
- sig: &Binder<T>)
- -> Binder<T>
- where T : TypeFoldable<'tcx>,
-{
- let mut counter = 0;
- ty::Binder(ty_fold::replace_late_bound_regions(tcx, sig, |_| {
- counter += 1;
- ReLateBound(ty::DebruijnIndex::new(1), BrAnon(counter))
- }).0)
+ pub fn make_substs_for_receiver_types(&self,
+ trait_ref: &ty::TraitRef<'tcx>,
+ method: &ty::Method<'tcx>)
+ -> subst::Substs<'tcx>
+ {
+ /*!
+ * Substitutes the values for the receiver's type parameters
+ * that are found in method, leaving the method's type parameters
+ * intact.
+ */
+
+ let meth_tps: Vec<Ty> =
+ method.generics.types.get_slice(subst::FnSpace)
+ .iter()
+ .map(|def| self.mk_param_from_def(def))
+ .collect();
+ let meth_regions: Vec<ty::Region> =
+ method.generics.regions.get_slice(subst::FnSpace)
+ .iter()
+ .map(|def| def.to_early_bound_region())
+ .collect();
+ trait_ref.substs.clone().with_method(meth_tps, meth_regions)
+ }
}
impl DebruijnIndex {
}
}
-pub fn make_substs_for_receiver_types<'tcx>(tcx: &ty::ctxt<'tcx>,
- trait_ref: &ty::TraitRef<'tcx>,
- method: &ty::Method<'tcx>)
- -> subst::Substs<'tcx>
-{
- /*!
- * Substitutes the values for the receiver's type parameters
- * that are found in method, leaving the method's type parameters
- * intact.
- */
-
- let meth_tps: Vec<Ty> =
- method.generics.types.get_slice(subst::FnSpace)
- .iter()
- .map(|def| ty::mk_param_from_def(tcx, def))
- .collect();
- let meth_regions: Vec<ty::Region> =
- method.generics.regions.get_slice(subst::FnSpace)
- .iter()
- .map(|def| def.to_early_bound_region())
- .collect();
- trait_ref.substs.clone().with_method(meth_tps, meth_regions)
-}
-
-#[derive(Copy, Clone)]
-pub enum CopyImplementationError {
- FieldDoesNotImplementCopy(ast::Name),
- VariantDoesNotImplementCopy(ast::Name),
- TypeIsStructural,
- TypeHasDestructor,
-}
-
-pub fn can_type_implement_copy<'a,'tcx>(param_env: &ParameterEnvironment<'a, 'tcx>,
- span: Span,
- self_type: Ty<'tcx>)
- -> Result<(),CopyImplementationError>
-{
- let tcx = param_env.tcx;
-
- let did = match self_type.sty {
- ty::TyStruct(struct_did, substs) => {
- let fields = ty::struct_fields(tcx, struct_did, substs);
- for field in &fields {
- if type_moves_by_default(param_env, span, field.mt.ty) {
- return Err(FieldDoesNotImplementCopy(field.name))
- }
- }
- struct_did
- }
- ty::TyEnum(enum_did, substs) => {
- let enum_variants = ty::enum_variants(tcx, enum_did);
- for variant in enum_variants.iter() {
- for variant_arg_type in &variant.args {
- let substd_arg_type =
- variant_arg_type.subst(tcx, substs);
- if type_moves_by_default(param_env, span, substd_arg_type) {
- return Err(VariantDoesNotImplementCopy(variant.name))
- }
- }
- }
- enum_did
- }
- _ => return Err(TypeIsStructural),
- };
-
- if ty::has_dtor(tcx, did) {
- return Err(TypeHasDestructor)
- }
-
- Ok(())
-}
-
-// FIXME(#20298) -- all of these types basically walk various
+// FIXME(#20298) -- all of these traits basically walk various
// structures to test whether types/regions are reachable with various
// properties. It should be possible to express them in terms of one
// common "walker" trait or something.
+/// An "escaping region" is a bound region whose binder is not part of `t`.
+///
+/// So, for example, consider a type like the following, which has two binders:
+///
+/// for<'a> fn(x: for<'b> fn(&'a isize, &'b isize))
+/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
+/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ inner scope
+///
+/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the
+/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner
+/// fn type*, that type has an escaping region: `'a`.
+///
+/// Note that what I'm calling an "escaping region" is often just called a "free region". However,
+/// we already use the term "free region". It refers to the regions that we use to represent bound
+/// regions on a fn definition while we are typechecking its body.
+///
+/// To clarify, conceptually there is no particular difference between an "escaping" region and a
+/// "free" region. However, there is a big difference in practice. Basically, when "entering" a
+/// binding level, one is generally required to do some sort of processing to a bound region, such
+/// as replacing it with a fresh/skolemized region, or making an entry in the environment to
+/// represent the scope to which it is attached, etc. An escaping region represents a bound region
+/// for which this processing has not yet been done.
pub trait RegionEscape {
fn has_escaping_regions(&self) -> bool {
self.has_regions_escaping_depth(0)
impl<'tcx> RegionEscape for Ty<'tcx> {
fn has_regions_escaping_depth(&self, depth: u32) -> bool {
- ty::type_escapes_depth(*self, depth)
+ self.region_depth > depth
}
}
}
}
+impl<'tcx> RegionEscape for ClosureSubsts<'tcx> {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
+ self.func_substs.has_regions_escaping_depth(depth) ||
+ self.upvar_tys.iter().any(|t| t.has_regions_escaping_depth(depth))
+ }
+}
+
+impl<T:RegionEscape> RegionEscape for Vec<T> {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
+ self.iter().any(|t| t.has_regions_escaping_depth(depth))
+ }
+}
+
+impl<'tcx> RegionEscape for FnSig<'tcx> {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
+ self.inputs.has_regions_escaping_depth(depth) ||
+ self.output.has_regions_escaping_depth(depth)
+ }
+}
+
impl<'tcx,T:RegionEscape> RegionEscape for VecPerParamSpace<T> {
fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.iter_enumerated().any(|(space, _, t)| {
}
}
+impl<'tcx> RegionEscape for FnOutput<'tcx> {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
+ match *self {
+ FnConverging(t) => t.has_regions_escaping_depth(depth),
+ FnDiverging => false
+ }
+ }
+}
+
impl<'tcx> RegionEscape for EquatePredicate<'tcx> {
fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.0.has_regions_escaping_depth(depth) || self.1.has_regions_escaping_depth(depth)
}
}
-pub trait HasProjectionTypes {
- fn has_projection_types(&self) -> bool;
-}
-
-impl<'tcx,T:HasProjectionTypes> HasProjectionTypes for Vec<T> {
+pub trait HasTypeFlags {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool;
fn has_projection_types(&self) -> bool {
- self.iter().any(|p| p.has_projection_types())
+ self.has_type_flags(TypeFlags::HAS_PROJECTION)
}
-}
-
-impl<'tcx,T:HasProjectionTypes> HasProjectionTypes for VecPerParamSpace<T> {
- fn has_projection_types(&self) -> bool {
- self.iter().any(|p| p.has_projection_types())
+ fn references_error(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_ERR)
}
-}
-
-impl<'tcx> HasProjectionTypes for ClosureTy<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.sig.has_projection_types()
+ fn has_param_types(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_PARAMS)
}
-}
-
-impl<'tcx> HasProjectionTypes for ClosureUpvar<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.ty.has_projection_types()
+ fn has_self_ty(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_SELF)
}
-}
-
-impl<'tcx> HasProjectionTypes for ty::InstantiatedPredicates<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.predicates.has_projection_types()
+ fn has_infer_types(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_INFER)
}
-}
-
-impl<'tcx> HasProjectionTypes for Predicate<'tcx> {
- fn has_projection_types(&self) -> bool {
- match *self {
- Predicate::Trait(ref data) => data.has_projection_types(),
- Predicate::Equate(ref data) => data.has_projection_types(),
- Predicate::RegionOutlives(ref data) => data.has_projection_types(),
- Predicate::TypeOutlives(ref data) => data.has_projection_types(),
- Predicate::Projection(ref data) => data.has_projection_types(),
- }
+ fn needs_infer(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_RE_INFER)
}
-}
-
-impl<'tcx> HasProjectionTypes for TraitPredicate<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.trait_ref.has_projection_types()
+ fn needs_subst(&self) -> bool {
+ self.has_type_flags(TypeFlags::NEEDS_SUBST)
}
-}
-
-impl<'tcx> HasProjectionTypes for EquatePredicate<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.0.has_projection_types() || self.1.has_projection_types()
+ fn has_closure_types(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_CLOSURE)
}
-}
-
-impl HasProjectionTypes for Region {
- fn has_projection_types(&self) -> bool {
- false
+ fn has_erasable_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_RE_EARLY_BOUND |
+ TypeFlags::HAS_RE_INFER |
+ TypeFlags::HAS_FREE_REGIONS)
}
-}
-
-impl<T:HasProjectionTypes,U:HasProjectionTypes> HasProjectionTypes for OutlivesPredicate<T,U> {
- fn has_projection_types(&self) -> bool {
- self.0.has_projection_types() || self.1.has_projection_types()
+ /// Indicates whether this value references only 'global'
+ /// types/lifetimes that are the same regardless of what fn we are
+ /// in. This is used for caching. Errs on the side of returning
+ /// false.
+ fn is_global(&self) -> bool {
+ !self.has_type_flags(TypeFlags::HAS_LOCAL_NAMES)
}
}
-impl<'tcx> HasProjectionTypes for ProjectionPredicate<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.projection_ty.has_projection_types() || self.ty.has_projection_types()
+impl<'tcx,T:HasTypeFlags> HasTypeFlags for Vec<T> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self[..].has_type_flags(flags)
}
}
-impl<'tcx> HasProjectionTypes for ProjectionTy<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.trait_ref.has_projection_types()
+impl<'tcx,T:HasTypeFlags> HasTypeFlags for [T] {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.iter().any(|p| p.has_type_flags(flags))
}
}
-impl<'tcx> HasProjectionTypes for Ty<'tcx> {
- fn has_projection_types(&self) -> bool {
- ty::type_has_projection(*self)
+impl<'tcx,T:HasTypeFlags> HasTypeFlags for VecPerParamSpace<T> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.iter().any(|p| p.has_type_flags(flags))
}
}
-impl<'tcx> HasProjectionTypes for TraitRef<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.substs.has_projection_types()
+impl<'tcx> HasTypeFlags for ClosureTy<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.sig.has_type_flags(flags)
}
}
-impl<'tcx> HasProjectionTypes for subst::Substs<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.types.iter().any(|t| t.has_projection_types())
+impl<'tcx> HasTypeFlags for ClosureUpvar<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.ty.has_type_flags(flags)
}
}
-impl<'tcx,T> HasProjectionTypes for Option<T>
- where T : HasProjectionTypes
-{
- fn has_projection_types(&self) -> bool {
- self.iter().any(|t| t.has_projection_types())
+impl<'tcx> HasTypeFlags for ty::InstantiatedPredicates<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.predicates.has_type_flags(flags)
}
}
-impl<'tcx,T> HasProjectionTypes for Rc<T>
- where T : HasProjectionTypes
-{
- fn has_projection_types(&self) -> bool {
- (**self).has_projection_types()
+impl<'tcx> HasTypeFlags for Predicate<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ match *self {
+ Predicate::Trait(ref data) => data.has_type_flags(flags),
+ Predicate::Equate(ref data) => data.has_type_flags(flags),
+ Predicate::RegionOutlives(ref data) => data.has_type_flags(flags),
+ Predicate::TypeOutlives(ref data) => data.has_type_flags(flags),
+ Predicate::Projection(ref data) => data.has_type_flags(flags),
+ }
}
}
-impl<'tcx,T> HasProjectionTypes for Box<T>
- where T : HasProjectionTypes
-{
- fn has_projection_types(&self) -> bool {
- (**self).has_projection_types()
+impl<'tcx> HasTypeFlags for TraitPredicate<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.trait_ref.has_type_flags(flags)
}
}
-impl<T> HasProjectionTypes for Binder<T>
- where T : HasProjectionTypes
-{
- fn has_projection_types(&self) -> bool {
- self.0.has_projection_types()
+impl<'tcx> HasTypeFlags for EquatePredicate<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.0.has_type_flags(flags) || self.1.has_type_flags(flags)
}
}
-impl<'tcx> HasProjectionTypes for FnOutput<'tcx> {
- fn has_projection_types(&self) -> bool {
- match *self {
- FnConverging(t) => t.has_projection_types(),
- FnDiverging => false,
+impl HasTypeFlags for Region {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ if flags.intersects(TypeFlags::HAS_LOCAL_NAMES) {
+ // does this represent a region that cannot be named in a global
+ // way? used in fulfillment caching.
+ match *self {
+ ty::ReStatic | ty::ReEmpty => {}
+ _ => return true
+ }
+ }
+ if flags.intersects(TypeFlags::HAS_RE_INFER) {
+ if let ty::ReInfer(_) = *self {
+ return true;
+ }
}
+ false
}
}
-impl<'tcx> HasProjectionTypes for FnSig<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.inputs.iter().any(|t| t.has_projection_types()) ||
- self.output.has_projection_types()
+impl<T:HasTypeFlags,U:HasTypeFlags> HasTypeFlags for OutlivesPredicate<T,U> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.0.has_type_flags(flags) || self.1.has_type_flags(flags)
}
}
-impl<'tcx> HasProjectionTypes for field<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.mt.ty.has_projection_types()
+impl<'tcx> HasTypeFlags for ProjectionPredicate<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.projection_ty.has_type_flags(flags) || self.ty.has_type_flags(flags)
}
}
-impl<'tcx> HasProjectionTypes for BareFnTy<'tcx> {
- fn has_projection_types(&self) -> bool {
- self.sig.has_projection_types()
+impl<'tcx> HasTypeFlags for ProjectionTy<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.trait_ref.has_type_flags(flags)
}
}
-pub trait ReferencesError {
- fn references_error(&self) -> bool;
+impl<'tcx> HasTypeFlags for Ty<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.flags.get().intersects(flags)
+ }
}
-impl<T:ReferencesError> ReferencesError for Binder<T> {
- fn references_error(&self) -> bool {
- self.0.references_error()
+impl<'tcx> HasTypeFlags for TraitRef<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.substs.has_type_flags(flags)
}
}
-impl<T:ReferencesError> ReferencesError for Rc<T> {
- fn references_error(&self) -> bool {
- (&**self).references_error()
+impl<'tcx> HasTypeFlags for subst::Substs<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.types.has_type_flags(flags) || match self.regions {
+ subst::ErasedRegions => false,
+ subst::NonerasedRegions(ref r) => r.has_type_flags(flags)
+ }
}
}
-impl<'tcx> ReferencesError for TraitPredicate<'tcx> {
- fn references_error(&self) -> bool {
- self.trait_ref.references_error()
+impl<'tcx,T> HasTypeFlags for Option<T>
+ where T : HasTypeFlags
+{
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.iter().any(|t| t.has_type_flags(flags))
}
}
-impl<'tcx> ReferencesError for ProjectionPredicate<'tcx> {
- fn references_error(&self) -> bool {
- self.projection_ty.trait_ref.references_error() || self.ty.references_error()
+impl<'tcx,T> HasTypeFlags for Rc<T>
+ where T : HasTypeFlags
+{
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ (**self).has_type_flags(flags)
}
}
-impl<'tcx> ReferencesError for TraitRef<'tcx> {
- fn references_error(&self) -> bool {
- self.input_types().iter().any(|t| t.references_error())
+impl<'tcx,T> HasTypeFlags for Box<T>
+ where T : HasTypeFlags
+{
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ (**self).has_type_flags(flags)
}
}
-impl<'tcx> ReferencesError for Ty<'tcx> {
- fn references_error(&self) -> bool {
- type_is_error(*self)
+impl<T> HasTypeFlags for Binder<T>
+ where T : HasTypeFlags
+{
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.0.has_type_flags(flags)
}
}
-impl<'tcx> ReferencesError for Predicate<'tcx> {
- fn references_error(&self) -> bool {
+impl<'tcx> HasTypeFlags for FnOutput<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
match *self {
- Predicate::Trait(ref data) => data.references_error(),
- Predicate::Equate(ref data) => data.references_error(),
- Predicate::RegionOutlives(ref data) => data.references_error(),
- Predicate::TypeOutlives(ref data) => data.references_error(),
- Predicate::Projection(ref data) => data.references_error(),
+ FnConverging(t) => t.has_type_flags(flags),
+ FnDiverging => false,
}
}
}
-impl<A,B> ReferencesError for OutlivesPredicate<A,B>
- where A : ReferencesError, B : ReferencesError
-{
- fn references_error(&self) -> bool {
- self.0.references_error() || self.1.references_error()
+impl<'tcx> HasTypeFlags for FnSig<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.inputs.iter().any(|t| t.has_type_flags(flags)) ||
+ self.output.has_type_flags(flags)
}
}
-impl<'tcx> ReferencesError for EquatePredicate<'tcx>
-{
- fn references_error(&self) -> bool {
- self.0.references_error() || self.1.references_error()
+impl<'tcx> HasTypeFlags for Field<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.mt.ty.has_type_flags(flags)
}
}
-impl ReferencesError for Region
-{
- fn references_error(&self) -> bool {
- false
+impl<'tcx> HasTypeFlags for BareFnTy<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.sig.has_type_flags(flags)
+ }
+}
+
+impl<'tcx> HasTypeFlags for ClosureSubsts<'tcx> {
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.func_substs.has_type_flags(flags) ||
+ self.upvar_tys.iter().any(|t| t.has_type_flags(flags))
}
}
}
}
-impl<'tcx> fmt::Debug for field<'tcx> {
+impl<'tcx> fmt::Debug for Field<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "field({},{})", self.name, self.mt)
}
use middle::subst;
use middle::subst::VecPerParamSpace;
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, HasTypeFlags, RegionEscape};
use middle::traits;
use std::fmt;
use syntax::abi;
use syntax::ast;
use syntax::owned_slice::OwnedSlice;
-use util::nodemap::FnvHashMap;
+use util::nodemap::{FnvHashMap, FnvHashSet};
///////////////////////////////////////////////////////////////////////////
// Two generic traits
super_fold_ty(self, t)
}
- fn fold_mt(&mut self, t: &ty::mt<'tcx>) -> ty::mt<'tcx> {
+ fn fold_mt(&mut self, t: &ty::TypeAndMut<'tcx>) -> ty::TypeAndMut<'tcx> {
super_fold_mt(self, t)
}
}
}
-impl<'tcx> TypeFoldable<'tcx> for ty::mt<'tcx> {
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::mt<'tcx> {
+impl<'tcx> TypeFoldable<'tcx> for ty::TypeAndMut<'tcx> {
+ fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::TypeAndMut<'tcx> {
folder.fold_mt(self)
}
}
}
}
-impl<'tcx> TypeFoldable<'tcx> for ty::field<'tcx> {
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::field<'tcx> {
- ty::field {
+impl<'tcx> TypeFoldable<'tcx> for ty::Field<'tcx> {
+ fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::Field<'tcx> {
+ ty::Field {
name: self.name,
mt: self.mt.fold_with(folder),
}
}
}
+impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> {
+ fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ClosureSubsts<'tcx> {
+ let func_substs = self.func_substs.fold_with(folder);
+ ty::ClosureSubsts {
+ func_substs: folder.tcx().mk_substs(func_substs),
+ upvar_tys: self.upvar_tys.fold_with(folder),
+ }
+ }
+}
+
impl<'tcx> TypeFoldable<'tcx> for ty::ItemSubsts<'tcx> {
fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ItemSubsts<'tcx> {
ty::ItemSubsts {
}
}
-impl<'tcx> TypeFoldable<'tcx> for ty::MethodOrigin<'tcx> {
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::MethodOrigin<'tcx> {
- match *self {
- ty::MethodStatic(def_id) => {
- ty::MethodStatic(def_id)
- }
- ty::MethodStaticClosure(def_id) => {
- ty::MethodStaticClosure(def_id)
- }
- ty::MethodTypeParam(ref param) => {
- ty::MethodTypeParam(ty::MethodParam {
- trait_ref: param.trait_ref.fold_with(folder),
- method_num: param.method_num,
- impl_def_id: param.impl_def_id,
- })
- }
- ty::MethodTraitObject(ref object) => {
- ty::MethodTraitObject(ty::MethodObject {
- trait_ref: object.trait_ref.fold_with(folder),
- object_trait_id: object.object_trait_id,
- method_num: object.method_num,
- vtable_index: object.vtable_index,
- })
- }
- }
- }
-}
-
impl<'tcx> TypeFoldable<'tcx> for ty::BuiltinBounds {
fn fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> ty::BuiltinBounds {
*self
space: self.space,
index: self.index,
default: self.default.fold_with(folder),
+ default_def_id: self.default_def_id,
object_lifetime_default: self.object_lifetime_default.fold_with(folder),
}
}
impl<'tcx> TypeFoldable<'tcx> for traits::VtableObjectData<'tcx> {
fn fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> traits::VtableObjectData<'tcx> {
traits::VtableObjectData {
- object_ty: self.object_ty.fold_with(folder),
upcast_trait_ref: self.upcast_trait_ref.fold_with(folder),
+ vtable_base: self.vtable_base
}
}
}
}
ty::TyClosure(did, ref substs) => {
let s = substs.fold_with(this);
- ty::TyClosure(did, this.tcx().mk_substs(s))
+ ty::TyClosure(did, s)
}
ty::TyProjection(ref data) => {
ty::TyProjection(data.fold_with(this))
ty.sty.clone()
}
};
- ty::mk_t(this.tcx(), sty)
+ this.tcx().mk_ty(sty)
}
pub fn super_fold_substs<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
}
pub fn super_fold_mt<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
- mt: &ty::mt<'tcx>)
- -> ty::mt<'tcx> {
- ty::mt {ty: mt.ty.fold_with(this),
+ mt: &ty::TypeAndMut<'tcx>)
+ -> ty::TypeAndMut<'tcx> {
+ ty::TypeAndMut {ty: mt.ty.fold_with(this),
mutbl: mt.mutbl}
}
region_bound: bounds.region_bound.fold_with(this),
builtin_bounds: bounds.builtin_bounds,
projection_bounds: bounds.projection_bounds.fold_with(this),
- region_bound_will_change: bounds.region_bound_will_change,
}
}
pub struct RegionFolder<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
+ skipped_regions: &'a mut bool,
current_depth: u32,
fld_r: &'a mut (FnMut(ty::Region, u32) -> ty::Region + 'a),
}
impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
- pub fn new<F>(tcx: &'a ty::ctxt<'tcx>, fld_r: &'a mut F) -> RegionFolder<'a, 'tcx>
+ pub fn new<F>(tcx: &'a ty::ctxt<'tcx>,
+ skipped_regions: &'a mut bool,
+ fld_r: &'a mut F) -> RegionFolder<'a, 'tcx>
where F : FnMut(ty::Region, u32) -> ty::Region
{
RegionFolder {
tcx: tcx,
+ skipped_regions: skipped_regions,
current_depth: 1,
fld_r: fld_r,
}
}
}
-pub fn collect_regions<'tcx,T>(tcx: &ty::ctxt<'tcx>, value: &T) -> Vec<ty::Region>
+/// Collects the free and escaping regions in `value` into `region_set`. Returns
+/// whether any late-bound regions were skipped
+pub fn collect_regions<'tcx,T>(tcx: &ty::ctxt<'tcx>,
+ value: &T,
+ region_set: &mut FnvHashSet<ty::Region>) -> bool
where T : TypeFoldable<'tcx>
{
- let mut vec = Vec::new();
- fold_regions(tcx, value, |r, _| { vec.push(r); r });
- vec
+ let mut have_bound_regions = false;
+ fold_regions(tcx, value, &mut have_bound_regions,
+ |r, d| { region_set.insert(r.from_depth(d)); r });
+ have_bound_regions
}
+/// Folds the escaping and free regions in `value` using `f`, and
+/// sets `skipped_regions` to true if any late-bound region was found
+/// and skipped.
pub fn fold_regions<'tcx,T,F>(tcx: &ty::ctxt<'tcx>,
value: &T,
+ skipped_regions: &mut bool,
mut f: F)
-> T
where F : FnMut(ty::Region, u32) -> ty::Region,
T : TypeFoldable<'tcx>,
{
- value.fold_with(&mut RegionFolder::new(tcx, &mut f))
+ value.fold_with(&mut RegionFolder::new(tcx, skipped_regions, &mut f))
}
impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx>
ty::ReLateBound(debruijn, _) if debruijn.depth < self.current_depth => {
debug!("RegionFolder.fold_region({:?}) skipped bound region (current depth={})",
r, self.current_depth);
+ *self.skipped_regions = true;
r
}
_ => {
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- if !ty::type_escapes_depth(t, self.current_depth-1) {
+ if !t.has_regions_escaping_depth(self.current_depth-1) {
return t;
}
fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx }
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- if !ty::type_has_erasable_regions(t) {
+ if !t.has_erasable_regions() {
return t;
}
debug!("shift_regions(value={:?}, amount={})",
value, amount);
- value.fold_with(&mut RegionFolder::new(tcx, &mut |region, _current_depth| {
+ value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| {
shift_region(region, amount)
}))
}
fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn a_is_expected(&self) -> bool { true } // irrelevant
- fn will_change(&mut self, _: bool, _: bool) -> bool {
- // we're ignoring regions in this code
- false
- }
-
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
_: ty::Variance,
a: &T,
(&ty::TyInfer(_), _) |
(_, &ty::TyInfer(_)) => {
- Err(ty::terr_sorts(ty_relate::expected_found(self, &a, &b)))
+ Err(ty::TypeError::Sorts(ty_relate::expected_found(self, &a, &b)))
}
(&ty::TyError, _) | (_, &ty::TyError) => {
//! type equality, etc.
use middle::subst::{ErasedRegions, NonerasedRegions, ParamSpace, Substs};
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, TypeError};
use middle::ty_fold::TypeFoldable;
use std::rc::Rc;
use syntax::abi;
use syntax::ast;
-pub type RelateResult<'tcx, T> = Result<T, ty::type_err<'tcx>>;
+pub type RelateResult<'tcx, T> = Result<T, ty::TypeError<'tcx>>;
#[derive(Clone, Debug)]
pub enum Cause {
- ExistentialRegionBound(bool), // if true, this is a default, else explicit
+ ExistentialRegionBound, // relating an existential region bound
}
pub trait TypeRelation<'a,'tcx> : Sized {
f(self)
}
- /// Hack for deciding whether the lifetime bound defaults change
- /// will be a breaking change or not. The bools indicate whether
- /// `a`/`b` have a default that will change to `'static`; the
- /// result is true if this will potentially affect the affect of
- /// relating `a` and `b`.
- fn will_change(&mut self, a: bool, b: bool) -> bool;
-
/// Generic relation routine suitable for most anything.
fn relate<T:Relate<'a,'tcx>>(&mut self, a: &T, b: &T) -> RelateResult<'tcx, T> {
Relate::relate(self, a, b)
}
+ /// Relete elements of two slices pairwise.
+ fn relate_zip<T:Relate<'a,'tcx>>(&mut self, a: &[T], b: &[T]) -> RelateResult<'tcx, Vec<T>> {
+ assert_eq!(a.len(), b.len());
+ a.iter().zip(b).map(|(a, b)| self.relate(a, b)).collect()
+ }
+
/// Switch variance for the purpose of relating `a` and `b`.
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
variance: ty::Variance,
///////////////////////////////////////////////////////////////////////////
// Relate impls
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::mt<'tcx> {
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::TypeAndMut<'tcx> {
fn relate<R>(relation: &mut R,
- a: &ty::mt<'tcx>,
- b: &ty::mt<'tcx>)
- -> RelateResult<'tcx, ty::mt<'tcx>>
+ a: &ty::TypeAndMut<'tcx>,
+ b: &ty::TypeAndMut<'tcx>)
+ -> RelateResult<'tcx, ty::TypeAndMut<'tcx>>
where R: TypeRelation<'a,'tcx>
{
debug!("{}.mts({:?}, {:?})",
a,
b);
if a.mutbl != b.mutbl {
- Err(ty::terr_mutability)
+ Err(TypeError::Mutability)
} else {
let mutbl = a.mutbl;
let variance = match mutbl {
ast::MutMutable => ty::Invariant,
};
let ty = try!(relation.relate_with_variance(variance, &a.ty, &b.ty));
- Ok(ty::mt {ty: ty, mutbl: mutbl})
+ Ok(ty::TypeAndMut {ty: ty, mutbl: mutbl})
}
}
}
let variances;
let opt_variances = if relation.tcx().variance_computed.get() {
- variances = ty::item_variances(relation.tcx(), item_def_id);
+ variances = relation.tcx().item_variances(item_def_id);
Some(&*variances)
} else {
None
where R: TypeRelation<'a,'tcx>
{
if a_tys.len() != b_tys.len() {
- return Err(ty::terr_ty_param_size(expected_found(relation,
+ return Err(TypeError::TyParamSize(expected_found(relation,
&a_tys.len(),
&b_tys.len())));
}
where R: TypeRelation<'a,'tcx>
{
if a.variadic != b.variadic {
- return Err(ty::terr_variadic_mismatch(
+ return Err(TypeError::VariadicMismatch(
expected_found(relation, &a.variadic, &b.variadic)));
}
(ty::FnDiverging, ty::FnDiverging) =>
Ok(ty::FnDiverging),
(a, b) =>
- Err(ty::terr_convergence_mismatch(
+ Err(TypeError::ConvergenceMismatch(
expected_found(relation, &(a != ty::FnDiverging), &(b != ty::FnDiverging)))),
});
where R: TypeRelation<'a,'tcx>
{
if a_args.len() != b_args.len() {
- return Err(ty::terr_arg_count);
+ return Err(TypeError::ArgCount);
}
a_args.iter().zip(b_args)
where R: TypeRelation<'a,'tcx>
{
if a != b {
- Err(ty::terr_unsafety_mismatch(expected_found(relation, a, b)))
+ Err(TypeError::UnsafetyMismatch(expected_found(relation, a, b)))
} else {
Ok(*a)
}
if a == b {
Ok(*a)
} else {
- Err(ty::terr_abi_mismatch(expected_found(relation, a, b)))
+ Err(TypeError::AbiMismatch(expected_found(relation, a, b)))
}
}
}
where R: TypeRelation<'a,'tcx>
{
if a.item_name != b.item_name {
- Err(ty::terr_projection_name_mismatched(
+ Err(TypeError::ProjectionNameMismatched(
expected_found(relation, &a.item_name, &b.item_name)))
} else {
let trait_ref = try!(relation.relate(&a.trait_ref, &b.trait_ref));
// so we can just iterate through the lists pairwise, so long as they are the
// same length.
if a.len() != b.len() {
- Err(ty::terr_projection_bounds_length(expected_found(relation, &a.len(), &b.len())))
+ Err(TypeError::ProjectionBoundsLength(expected_found(relation, &a.len(), &b.len())))
} else {
a.iter().zip(b)
.map(|(a, b)| relation.relate(a, b))
-> RelateResult<'tcx, ty::ExistentialBounds<'tcx>>
where R: TypeRelation<'a,'tcx>
{
- let will_change = relation.will_change(a.region_bound_will_change,
- b.region_bound_will_change);
-
let r =
try!(relation.with_cause(
- Cause::ExistentialRegionBound(will_change),
+ Cause::ExistentialRegionBound,
|relation| relation.relate_with_variance(ty::Contravariant,
&a.region_bound,
&b.region_bound)));
let pb = try!(relation.relate(&a.projection_bounds, &b.projection_bounds));
Ok(ty::ExistentialBounds { region_bound: r,
builtin_bounds: nb,
- projection_bounds: pb,
- region_bound_will_change: will_change })
+ projection_bounds: pb })
}
}
// Two sets of builtin bounds are only relatable if they are
// precisely the same (but see the coercion code).
if a != b {
- Err(ty::terr_builtin_bounds(expected_found(relation, a, b)))
+ Err(TypeError::BuiltinBoundsMismatch(expected_found(relation, a, b)))
} else {
Ok(*a)
}
{
// Different traits cannot be related
if a.def_id != b.def_id {
- Err(ty::terr_traits(expected_found(relation, &a.def_id, &b.def_id)))
+ Err(TypeError::Traits(expected_found(relation, &a.def_id, &b.def_id)))
} else {
let substs = try!(relate_item_substs(relation, a.def_id, a.substs, b.substs));
Ok(ty::TraitRef { def_id: a.def_id, substs: relation.tcx().mk_substs(substs) })
if a_id == b_id =>
{
let substs = try!(relate_item_substs(relation, a_id, a_substs, b_substs));
- Ok(ty::mk_enum(tcx, a_id, tcx.mk_substs(substs)))
+ Ok(tcx.mk_enum(a_id, tcx.mk_substs(substs)))
}
(&ty::TyTrait(ref a_), &ty::TyTrait(ref b_)) =>
{
let principal = try!(relation.relate(&a_.principal, &b_.principal));
let bounds = try!(relation.relate(&a_.bounds, &b_.bounds));
- Ok(ty::mk_trait(tcx, principal, bounds))
+ Ok(tcx.mk_trait(principal, bounds))
}
(&ty::TyStruct(a_id, a_substs), &ty::TyStruct(b_id, b_substs))
if a_id == b_id =>
{
let substs = try!(relate_item_substs(relation, a_id, a_substs, b_substs));
- Ok(ty::mk_struct(tcx, a_id, tcx.mk_substs(substs)))
+ Ok(tcx.mk_struct(a_id, tcx.mk_substs(substs)))
}
- (&ty::TyClosure(a_id, a_substs),
- &ty::TyClosure(b_id, b_substs))
+ (&ty::TyClosure(a_id, ref a_substs),
+ &ty::TyClosure(b_id, ref b_substs))
if a_id == b_id =>
{
// All TyClosure types with the same id represent
// the (anonymous) type of the same closure expression. So
// all of their regions should be equated.
- let substs = try!(relate_substs(relation, None, a_substs, b_substs));
- Ok(ty::mk_closure(tcx, a_id, tcx.mk_substs(substs)))
+ let substs = try!(relation.relate(a_substs, b_substs));
+ Ok(tcx.mk_closure_from_closure_substs(a_id, substs))
}
(&ty::TyBox(a_inner), &ty::TyBox(b_inner)) =>
{
let typ = try!(relation.relate(&a_inner, &b_inner));
- Ok(ty::mk_uniq(tcx, typ))
+ Ok(tcx.mk_box(typ))
}
(&ty::TyRawPtr(ref a_mt), &ty::TyRawPtr(ref b_mt)) =>
{
let mt = try!(relation.relate(a_mt, b_mt));
- Ok(ty::mk_ptr(tcx, mt))
+ Ok(tcx.mk_ptr(mt))
}
(&ty::TyRef(a_r, ref a_mt), &ty::TyRef(b_r, ref b_mt)) =>
{
let r = try!(relation.relate_with_variance(ty::Contravariant, a_r, b_r));
let mt = try!(relation.relate(a_mt, b_mt));
- Ok(ty::mk_rptr(tcx, tcx.mk_region(r), mt))
+ Ok(tcx.mk_ref(tcx.mk_region(r), mt))
}
(&ty::TyArray(a_t, sz_a), &ty::TyArray(b_t, sz_b)) =>
{
let t = try!(relation.relate(&a_t, &b_t));
if sz_a == sz_b {
- Ok(ty::mk_vec(tcx, t, Some(sz_a)))
+ Ok(tcx.mk_array(t, sz_a))
} else {
- Err(ty::terr_fixed_array_size(expected_found(relation, &sz_a, &sz_b)))
+ Err(TypeError::FixedArraySize(expected_found(relation, &sz_a, &sz_b)))
}
}
(&ty::TySlice(a_t), &ty::TySlice(b_t)) =>
{
let t = try!(relation.relate(&a_t, &b_t));
- Ok(ty::mk_vec(tcx, t, None))
+ Ok(tcx.mk_slice(t))
}
(&ty::TyTuple(ref as_), &ty::TyTuple(ref bs)) =>
let ts = try!(as_.iter().zip(bs)
.map(|(a, b)| relation.relate(a, b))
.collect::<Result<_, _>>());
- Ok(ty::mk_tup(tcx, ts))
+ Ok(tcx.mk_tup(ts))
} else if !(as_.is_empty() || bs.is_empty()) {
- Err(ty::terr_tuple_size(
+ Err(TypeError::TupleSize(
expected_found(relation, &as_.len(), &bs.len())))
} else {
- Err(ty::terr_sorts(expected_found(relation, &a, &b)))
+ Err(TypeError::Sorts(expected_found(relation, &a, &b)))
}
}
if a_opt_def_id == b_opt_def_id =>
{
let fty = try!(relation.relate(a_fty, b_fty));
- Ok(ty::mk_bare_fn(tcx, a_opt_def_id, tcx.mk_bare_fn(fty)))
+ Ok(tcx.mk_fn(a_opt_def_id, tcx.mk_bare_fn(fty)))
}
(&ty::TyProjection(ref a_data), &ty::TyProjection(ref b_data)) =>
{
let projection_ty = try!(relation.relate(a_data, b_data));
- Ok(ty::mk_projection(tcx, projection_ty.trait_ref, projection_ty.item_name))
+ Ok(tcx.mk_projection(projection_ty.trait_ref, projection_ty.item_name))
}
_ =>
{
- Err(ty::terr_sorts(expected_found(relation, &a, &b)))
+ Err(TypeError::Sorts(expected_found(relation, &a, &b)))
}
}
}
+impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ClosureSubsts<'tcx> {
+ fn relate<R>(relation: &mut R,
+ a: &ty::ClosureSubsts<'tcx>,
+ b: &ty::ClosureSubsts<'tcx>)
+ -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>>
+ where R: TypeRelation<'a,'tcx>
+ {
+ let func_substs = try!(relate_substs(relation, None, a.func_substs, b.func_substs));
+ let upvar_tys = try!(relation.relate_zip(&a.upvar_tys, &b.upvar_tys));
+ Ok(ty::ClosureSubsts { func_substs: relation.tcx().mk_substs(func_substs),
+ upvar_tys: upvar_tys })
+ }
+}
+
impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::Region {
fn relate<R>(relation: &mut R,
a: &ty::Region,
pub fn expected_found<'a,'tcx:'a,R,T>(relation: &mut R,
a: &T,
b: &T)
- -> ty::expected_found<T>
+ -> ty::ExpectedFound<T>
where R: TypeRelation<'a,'tcx>, T: Clone
{
expected_found_bool(relation.a_is_expected(), a, b)
pub fn expected_found_bool<T>(a_is_expected: bool,
a: &T,
b: &T)
- -> ty::expected_found<T>
+ -> ty::ExpectedFound<T>
where T: Clone
{
let a = a.clone();
let b = b.clone();
if a_is_expected {
- ty::expected_found {expected: a, found: b}
+ ty::ExpectedFound {expected: a, found: b}
} else {
- ty::expected_found {expected: b, found: a}
+ ty::ExpectedFound {expected: b, found: a}
}
}
// except according to those terms.
//! An iterator over the type substructure.
+//! WARNING: this does not keep track of the region depth.
use middle::ty::{self, Ty};
use std::iter::Iterator;
}).collect::<Vec<_>>());
}
ty::TyEnum(_, ref substs) |
- ty::TyStruct(_, ref substs) |
- ty::TyClosure(_, ref substs) => {
+ ty::TyStruct(_, ref substs) => {
push_reversed(stack, substs.types.as_slice());
}
+ ty::TyClosure(_, ref substs) => {
+ push_reversed(stack, substs.func_substs.types.as_slice());
+ push_reversed(stack, &substs.upvar_tys);
+ }
ty::TyTuple(ref ts) => {
push_reversed(stack, ts);
}
if items.eh_personality().is_none() {
items.missing.push(lang_items::EhPersonalityLangItem);
}
+ if sess.target.target.options.custom_unwind_resume &
+ items.eh_unwind_resume().is_none() {
+ items.missing.push(lang_items::EhUnwindResumeLangItem);
+ }
{
let mut cx = Context { sess: sess, items: items };
) }
weak_lang_items! {
- panic_fmt, PanicFmtLangItem, rust_begin_unwind;
+ panic_fmt, PanicFmtLangItem, rust_begin_unwind;
stack_exhausted, StackExhaustedLangItem, rust_stack_exhausted;
eh_personality, EhPersonalityLangItem, rust_eh_personality;
+ eh_unwind_resume, EhUnwindResumeLangItem, rust_eh_unwind_resume;
}
pub debug_assertions: bool,
pub debuginfo: DebugInfoLevel,
pub lint_opts: Vec<(String, lint::Level)>,
+ pub lint_cap: Option<lint::Level>,
pub describe_lints: bool,
pub output_types: Vec<OutputType>,
// This was mutable for rustpkg, which updates search paths based on the
optimize: No,
debuginfo: NoDebugInfo,
lint_opts: Vec::new(),
+ lint_cap: None,
describe_lints: false,
output_types: Vec::new(),
search_paths: SearchPaths::new(),
"Force drop flag checks on or off"),
trace_macros: bool = (false, parse_bool,
"For every macro invocation, print its name and arguments"),
+ enable_nonzeroing_move_hints: bool = (false, parse_bool,
+ "Force nonzeroing move optimization on"),
}
pub fn default_lib_output() -> CrateType {
opt::multi("A", "allow", "Set lint allowed", "OPT"),
opt::multi("D", "deny", "Set lint denied", "OPT"),
opt::multi("F", "forbid", "Set lint forbidden", "OPT"),
+ opt::multi("", "cap-lints", "Set the most restrictive lint level. \
+ More restrictive lints are capped at this \
+ level", "LEVEL"),
opt::multi("C", "codegen", "Set a codegen option", "OPT[=VALUE]"),
opt::flag("V", "version", "Print version info and exit"),
opt::flag("v", "verbose", "Use verbose output"),
`typed` (crates expanded, with type annotations), or
`expanded,identified` (fully parenthesized, AST nodes with IDs).",
"TYPE"),
- opt::flagopt_u("", "xpretty",
- "Pretty-print the input instead of compiling, unstable variants;
+ opt::flagopt_u("", "unpretty",
+ "Present the input source, unstable (and less-pretty) variants;
valid types are any of the types for `--pretty`, as well as:
`flowgraph=<nodeid>` (graphviz formatted flowgraph for node), or
`everybody_loops` (all function bodies replaced with `loop {}`).",
}
}
+ let lint_cap = matches.opt_str("cap-lints").map(|cap| {
+ lint::Level::from_str(&cap).unwrap_or_else(|| {
+ early_error(&format!("unknown lint level: `{}`", cap))
+ })
+ });
+
let debugging_opts = build_debugging_options(matches);
let parse_only = debugging_opts.parse_only;
optimize: opt_level,
debuginfo: debuginfo,
lint_opts: lint_opts,
+ lint_cap: lint_cap,
describe_lints: describe_lints,
output_types: output_types,
search_paths: search_paths,
pub fn print_enum_sizes(&self) -> bool {
self.opts.debugging_opts.print_enum_sizes
}
+ pub fn nonzeroing_move_hints(&self) -> bool {
+ self.opts.debugging_opts.enable_nonzeroing_move_hints
+ }
pub fn sysroot<'a>(&'a self) -> &'a Path {
match self.opts.maybe_sysroot {
Some (ref sysroot) => sysroot,
!msg.contains("if and else have incompatible types") &&
!msg.contains("if may be missing an else clause") &&
!msg.contains("match arms have incompatible types") &&
- !msg.contains("structure constructor specifies a structure of type") {
+ !msg.contains("structure constructor specifies a structure of type") &&
+ !msg.contains("has an incompatible type for trait") {
return None
}
let first = msg.match_indices("expected").filter(|s| {
// Hack up our own formatting for the duration to make it easier for scripts
// to parse (always use the same number of decimal places and the same unit).
const NANOS_PER_SEC: f64 = 1_000_000_000.0;
- let secs = dur.secs() as f64;
- let secs = secs + dur.extra_nanos() as f64 / NANOS_PER_SEC;
- println!("{}time: {:.3} \t{}", repeat(" ").take(old).collect::<String>(),
- secs, what);
+ let secs = dur.as_secs() as f64;
+ let secs = secs + dur.subsec_nanos() as f64 / NANOS_PER_SEC;
+
+ let mem_string = match get_resident() {
+ Some(n) => {
+ let mb = n as f64 / 1_000_000.0;
+ format!("; rss: {}MB", mb.round() as usize)
+ }
+ None => "".to_owned(),
+ };
+ println!("{}time: {:.3}{}\t{}", repeat(" ").take(old).collect::<String>(),
+ secs, mem_string, what);
DEPTH.with(|slot| slot.set(old));
rv
}
+// Memory reporting
+#[cfg(unix)]
+fn get_resident() -> Option<usize> {
+ get_proc_self_statm_field(1)
+}
+
+#[cfg(windows)]
+fn get_resident() -> Option<usize> {
+ get_working_set_size()
+}
+
+// Like std::macros::try!, but for Option<>.
+macro_rules! option_try(
+ ($e:expr) => (match $e { Some(e) => e, None => return None })
+);
+
+#[cfg(windows)]
+fn get_working_set_size() -> Option<usize> {
+ use libc::{BOOL, DWORD, HANDLE, SIZE_T, GetCurrentProcess};
+ use std::mem;
+ #[repr(C)] #[allow(non_snake_case)]
+ struct PROCESS_MEMORY_COUNTERS {
+ cb: DWORD,
+ PageFaultCount: DWORD,
+ PeakWorkingSetSize: SIZE_T,
+ WorkingSetSize: SIZE_T,
+ QuotaPeakPagedPoolUsage: SIZE_T,
+ QuotaPagedPoolUsage: SIZE_T,
+ QuotaPeakNonPagedPoolUsage: SIZE_T,
+ QuotaNonPagedPoolUsage: SIZE_T,
+ PagefileUsage: SIZE_T,
+ PeakPagefileUsage: SIZE_T,
+ }
+ type PPROCESS_MEMORY_COUNTERS = *mut PROCESS_MEMORY_COUNTERS;
+ #[link(name = "psapi")]
+ extern "system" {
+ fn GetProcessMemoryInfo(Process: HANDLE,
+ ppsmemCounters: PPROCESS_MEMORY_COUNTERS,
+ cb: DWORD) -> BOOL;
+ }
+ let mut pmc: PROCESS_MEMORY_COUNTERS = unsafe { mem::zeroed() };
+ pmc.cb = mem::size_of_val(&pmc) as DWORD;
+ match unsafe { GetProcessMemoryInfo(GetCurrentProcess(), &mut pmc, pmc.cb) } {
+ 0 => None,
+ _ => Some(pmc.WorkingSetSize as usize),
+ }
+}
+
+#[cfg_attr(windows, allow(dead_code))]
+#[allow(deprecated)]
+fn get_proc_self_statm_field(field: usize) -> Option<usize> {
+ use std::fs::File;
+ use std::io::Read;
+
+ assert!(cfg!(unix));
+
+ let mut f = option_try!(File::open("/proc/self/statm").ok());
+ let mut contents = String::new();
+ option_try!(f.read_to_string(&mut contents).ok());
+ let s = option_try!(contents.split_whitespace().nth(field));
+ let npages = option_try!(s.parse::<usize>().ok());
+ Some(npages * ::std::env::page_size())
+}
+
pub fn indent<R, F>(op: F) -> R where
R: Debug,
F: FnOnce() -> R,
use middle::ty::{ReEarlyBound, BrFresh, ctxt};
use middle::ty::{ReFree, ReScope, ReInfer, ReStatic, Region, ReEmpty};
use middle::ty::{ReSkolemized, ReVar, BrEnv};
-use middle::ty::{mt, Ty};
use middle::ty::{TyBool, TyChar, TyStruct, TyEnum};
use middle::ty::{TyError, TyStr, TyArray, TySlice, TyFloat, TyBareFn};
use middle::ty::{TyParam, TyRawPtr, TyRef, TyTuple};
use middle::ty::TyClosure;
use middle::ty::{TyBox, TyTrait, TyInt, TyUint, TyInfer};
-use middle::ty;
+use middle::ty::{self, TypeAndMut, Ty, HasTypeFlags};
use middle::ty_fold::{self, TypeFoldable};
use std::fmt;
match output {
ty::FnConverging(ty) => {
- if !ty::type_is_nil(ty) {
+ if !ty.is_nil() {
try!(write!(f, " -> {}", ty));
}
Ok(())
where GG: for<'tcx> FnOnce(&ty::ctxt<'tcx>) -> ty::Generics<'tcx>
{
let (fn_trait_kind, verbose) = try!(ty::tls::with(|tcx| {
- try!(write!(f, "{}", ty::item_path_str(tcx, did)));
+ try!(write!(f, "{}", tcx.item_path_str(did)));
Ok((tcx.lang_items.fn_trait_kind(did), tcx.sess.verbose()))
}));
ty_params.iter().zip(tps).rev().take_while(|&(def, &actual)| {
match def.default {
Some(default) => {
- if !has_self && ty::type_has_self(default) {
+ if !has_self && default.has_self_ty() {
// In an object type, there is no `Self`, and
// thus if the default value references Self,
// the user will be required to give an
parameterized(f, trait_ref.substs,
trait_ref.def_id,
projection_bounds,
- |tcx| ty::lookup_trait_def(tcx, trait_ref.def_id).generics.clone())
+ |tcx| tcx.lookup_trait_def(trait_ref.def_id).generics.clone())
}
}
.expect("could not lift TraitRef for printing");
let projections = tcx.lift(&bounds.projection_bounds[..])
.expect("could not lift projections for printing");
- let projections = projections.map_in_place(|p| p.0);
+ let projections = projections.into_iter().map(|p| p.0).collect();
let tap = ty::Binder(TraitAndProjections(principal, projections));
in_binder(f, tcx, &ty::Binder(""), Some(tap))
try!(write!(f, " + {}", bound));
}
- if bounds.region_bound_will_change && verbose() {
- try!(write!(f, " [WILL-CHANGE]"));
- }
-
Ok(())
}
}
}
}
-impl<'tcx> fmt::Display for ty::mt<'tcx> {
+impl<'tcx> fmt::Display for ty::TypeAndMut<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}{}",
if self.mutbl == ast::MutMutable { "mut " } else { "" },
}
}
-impl<'tcx> fmt::Debug for ty::MethodOrigin<'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match *self {
- ty::MethodStatic(def_id) => {
- write!(f, "MethodStatic({:?})", def_id)
- }
- ty::MethodStaticClosure(def_id) => {
- write!(f, "MethodStaticClosure({:?})", def_id)
- }
- ty::MethodTypeParam(ref p) => write!(f, "{:?}", p),
- ty::MethodTraitObject(ref p) => write!(f, "{:?}", p)
- }
- }
-}
-
-impl<'tcx> fmt::Debug for ty::MethodParam<'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "MethodParam({:?},{})",
- self.trait_ref,
- self.method_num)
- }
-}
-
-impl<'tcx> fmt::Debug for ty::MethodObject<'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "MethodObject({:?},{},{})",
- self.trait_ref,
- self.method_num,
- self.vtable_index)
- }
-}
-
impl<'tcx> fmt::Debug for ty::ExistentialBounds<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut empty = true;
impl<'tcx> fmt::Display for ty::TraitRef<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
parameterized(f, self.substs, self.def_id, &[],
- |tcx| ty::lookup_trait_def(tcx, self.def_id).generics.clone())
+ |tcx| tcx.lookup_trait_def(self.def_id).generics.clone())
}
}
if let Some(def_id) = opt_def_id {
try!(write!(f, " {{{}}}", ty::tls::with(|tcx| {
- ty::item_path_str(tcx, def_id)
+ tcx.item_path_str(def_id)
})));
}
Ok(())
TyError => write!(f, "[type error]"),
TyParam(ref param_ty) => write!(f, "{}", param_ty),
TyEnum(did, substs) | TyStruct(did, substs) => {
- parameterized(f, substs, did, &[],
- |tcx| ty::lookup_item_type(tcx, did).generics)
+ ty::tls::with(|tcx| {
+ if did.krate == ast::LOCAL_CRATE &&
+ !tcx.tcache.borrow().contains_key(&did) {
+ write!(f, "{}<..>", tcx.item_path_str(did))
+ } else {
+ parameterized(f, substs, did, &[],
+ |tcx| tcx.lookup_item_type(did).generics)
+ }
+ })
}
TyTrait(ref data) => write!(f, "{}", data),
ty::TyProjection(ref data) => write!(f, "{}", data),
TyStr => write!(f, "str"),
- TyClosure(ref did, substs) => ty::tls::with(|tcx| {
+ TyClosure(ref did, ref substs) => ty::tls::with(|tcx| {
try!(write!(f, "[closure"));
- let closure_tys = tcx.closure_tys.borrow();
- try!(closure_tys.get(did).map(|cty| &cty.sig).and_then(|sig| {
- tcx.lift(&substs).map(|substs| sig.subst(tcx, substs))
- }).map(|sig| {
- fn_sig(f, &sig.0.inputs, false, sig.0.output)
- }).unwrap_or_else(|| {
- if did.krate == ast::LOCAL_CRATE {
- try!(write!(f, " {:?}", tcx.map.span(did.node)));
+
+ if did.krate == ast::LOCAL_CRATE {
+ try!(write!(f, "@{:?}", tcx.map.span(did.node)));
+ let mut sep = " ";
+ try!(tcx.with_freevars(did.node, |freevars| {
+ for (freevar, upvar_ty) in freevars.iter().zip(&substs.upvar_tys) {
+ let node_id = freevar.def.local_node_id();
+ try!(write!(f,
+ "{}{}:{}",
+ sep,
+ tcx.local_var_name_str(node_id),
+ upvar_ty));
+ sep = ", ";
+ }
+ Ok(())
+ }))
+ } else {
+ // cross-crate closure types should only be
+ // visible in trans bug reports, I imagine.
+ try!(write!(f, "@{:?}", did));
+ let mut sep = " ";
+ for (index, upvar_ty) in substs.upvar_tys.iter().enumerate() {
+ try!(write!(f, "{}{}:{}", sep, index, upvar_ty));
+ sep = ", ";
}
- Ok(())
- }));
- if verbose() {
- try!(write!(f, " id={:?}", did));
}
+
write!(f, "]")
}),
TyArray(ty, sz) => write!(f, "[{}; {}]", ty, sz),
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "UpvarId({};`{}`;{})",
self.var_id,
- ty::tls::with(|tcx| ty::local_var_name_str(tcx, self.var_id)),
+ ty::tls::with(|tcx| tcx.local_var_name_str(self.var_id)),
self.closure_expr_id)
}
}
+++ /dev/null
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A helper class for dealing with static archives
-
-use std::env;
-use std::ffi::OsString;
-use std::fs::{self, File};
-use std::io::prelude::*;
-use std::io;
-use std::path::{Path, PathBuf};
-use std::process::{Command, Output, Stdio};
-use std::str;
-use syntax::diagnostic::Handler as ErrorHandler;
-use rustc_llvm::archive_ro::ArchiveRO;
-
-use tempdir::TempDir;
-
-pub const METADATA_FILENAME: &'static str = "rust.metadata.bin";
-
-pub struct ArchiveConfig<'a> {
- pub handler: &'a ErrorHandler,
- pub dst: PathBuf,
- pub lib_search_paths: Vec<PathBuf>,
- pub slib_prefix: String,
- pub slib_suffix: String,
- pub ar_prog: String,
- pub command_path: OsString,
-}
-
-pub struct Archive<'a> {
- config: ArchiveConfig<'a>,
-}
-
-/// Helper for adding many files to an archive with a single invocation of
-/// `ar`.
-#[must_use = "must call build() to finish building the archive"]
-pub struct ArchiveBuilder<'a> {
- archive: Archive<'a>,
- work_dir: TempDir,
- /// Filename of each member that should be added to the archive.
- members: Vec<PathBuf>,
- should_update_symbols: bool,
-}
-
-enum Action<'a> {
- Remove(&'a Path),
- AddObjects(&'a [&'a PathBuf], bool),
- UpdateSymbols,
-}
-
-pub fn find_library(name: &str, osprefix: &str, ossuffix: &str,
- search_paths: &[PathBuf],
- handler: &ErrorHandler) -> PathBuf {
- // On Windows, static libraries sometimes show up as libfoo.a and other
- // times show up as foo.lib
- let oslibname = format!("{}{}{}", osprefix, name, ossuffix);
- let unixlibname = format!("lib{}.a", name);
-
- for path in search_paths {
- debug!("looking for {} inside {:?}", name, path);
- let test = path.join(&oslibname[..]);
- if test.exists() { return test }
- if oslibname != unixlibname {
- let test = path.join(&unixlibname[..]);
- if test.exists() { return test }
- }
- }
- handler.fatal(&format!("could not find native static library `{}`, \
- perhaps an -L flag is missing?",
- name));
-}
-
-impl<'a> Archive<'a> {
- fn new(config: ArchiveConfig<'a>) -> Archive<'a> {
- Archive { config: config }
- }
-
- /// Opens an existing static archive
- pub fn open(config: ArchiveConfig<'a>) -> Archive<'a> {
- let archive = Archive::new(config);
- assert!(archive.config.dst.exists());
- archive
- }
-
- /// Removes a file from this archive
- pub fn remove_file(&mut self, file: &str) {
- self.run(None, Action::Remove(Path::new(file)));
- }
-
- /// Lists all files in an archive
- pub fn files(&self) -> Vec<String> {
- let archive = match ArchiveRO::open(&self.config.dst) {
- Some(ar) => ar,
- None => return Vec::new(),
- };
- let ret = archive.iter().filter_map(|child| child.name())
- .map(|name| name.to_string())
- .collect();
- return ret;
- }
-
- /// Creates an `ArchiveBuilder` for adding files to this archive.
- pub fn extend(self) -> ArchiveBuilder<'a> {
- ArchiveBuilder::new(self)
- }
-
- fn run(&self, cwd: Option<&Path>, action: Action) -> Output {
- let abs_dst = env::current_dir().unwrap().join(&self.config.dst);
- let ar = &self.config.ar_prog;
- let mut cmd = Command::new(ar);
- cmd.env("PATH", &self.config.command_path);
- cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
- self.prepare_ar_action(&mut cmd, &abs_dst, action);
- info!("{:?}", cmd);
-
- if let Some(p) = cwd {
- cmd.current_dir(p);
- info!("inside {:?}", p.display());
- }
-
- let handler = &self.config.handler;
- match cmd.spawn() {
- Ok(prog) => {
- let o = prog.wait_with_output().unwrap();
- if !o.status.success() {
- handler.err(&format!("{:?} failed with: {}", cmd, o.status));
- handler.note(&format!("stdout ---\n{}",
- str::from_utf8(&o.stdout).unwrap()));
- handler.note(&format!("stderr ---\n{}",
- str::from_utf8(&o.stderr).unwrap()));
- handler.abort_if_errors();
- }
- o
- },
- Err(e) => {
- handler.err(&format!("could not exec `{}`: {}",
- self.config.ar_prog, e));
- handler.abort_if_errors();
- panic!("rustc::back::archive::run() should not reach this point");
- }
- }
- }
-
- fn prepare_ar_action(&self, cmd: &mut Command, dst: &Path, action: Action) {
- match action {
- Action::Remove(file) => {
- cmd.arg("d").arg(dst).arg(file);
- }
- Action::AddObjects(objs, update_symbols) => {
- cmd.arg(if update_symbols {"crs"} else {"crS"})
- .arg(dst)
- .args(objs);
- }
- Action::UpdateSymbols => {
- cmd.arg("s").arg(dst);
- }
- }
- }
-}
-
-impl<'a> ArchiveBuilder<'a> {
- fn new(archive: Archive<'a>) -> ArchiveBuilder<'a> {
- ArchiveBuilder {
- archive: archive,
- work_dir: TempDir::new("rsar").unwrap(),
- members: vec![],
- should_update_symbols: false,
- }
- }
-
- /// Create a new static archive, ready for adding files.
- pub fn create(config: ArchiveConfig<'a>) -> ArchiveBuilder<'a> {
- let archive = Archive::new(config);
- ArchiveBuilder::new(archive)
- }
-
- /// Adds all of the contents of a native library to this archive. This will
- /// search in the relevant locations for a library named `name`.
- pub fn add_native_library(&mut self, name: &str) -> io::Result<()> {
- let location = find_library(name,
- &self.archive.config.slib_prefix,
- &self.archive.config.slib_suffix,
- &self.archive.config.lib_search_paths,
- self.archive.config.handler);
- self.add_archive(&location, name, |_| false)
- }
-
- /// Adds all of the contents of the rlib at the specified path to this
- /// archive.
- ///
- /// This ignores adding the bytecode from the rlib, and if LTO is enabled
- /// then the object file also isn't added.
- pub fn add_rlib(&mut self, rlib: &Path, name: &str,
- lto: bool) -> io::Result<()> {
- // Ignoring obj file starting with the crate name
- // as simple comparison is not enough - there
- // might be also an extra name suffix
- let obj_start = format!("{}", name);
- let obj_start = &obj_start[..];
- // Ignoring all bytecode files, no matter of
- // name
- let bc_ext = ".bytecode.deflate";
-
- self.add_archive(rlib, &name[..], |fname: &str| {
- let skip_obj = lto && fname.starts_with(obj_start)
- && fname.ends_with(".o");
- skip_obj || fname.ends_with(bc_ext) || fname == METADATA_FILENAME
- })
- }
-
- /// Adds an arbitrary file to this archive
- pub fn add_file(&mut self, file: &Path) -> io::Result<()> {
- let filename = Path::new(file.file_name().unwrap());
- let new_file = self.work_dir.path().join(&filename);
- try!(fs::copy(file, &new_file));
- self.members.push(filename.to_path_buf());
- Ok(())
- }
-
- /// Indicate that the next call to `build` should updates all symbols in
- /// the archive (run 'ar s' over it).
- pub fn update_symbols(&mut self) {
- self.should_update_symbols = true;
- }
-
- /// Combine the provided files, rlibs, and native libraries into a single
- /// `Archive`.
- pub fn build(self) -> Archive<'a> {
- // Get an absolute path to the destination, so `ar` will work even
- // though we run it from `self.work_dir`.
- let mut objects = Vec::new();
- let mut total_len = self.archive.config.dst.to_string_lossy().len();
-
- if self.members.is_empty() {
- if self.should_update_symbols {
- self.archive.run(Some(self.work_dir.path()),
- Action::UpdateSymbols);
- }
- return self.archive;
- }
-
- // Don't allow the total size of `args` to grow beyond 32,000 bytes.
- // Windows will raise an error if the argument string is longer than
- // 32,768, and we leave a bit of extra space for the program name.
- const ARG_LENGTH_LIMIT: usize = 32_000;
-
- for member_name in &self.members {
- let len = member_name.to_string_lossy().len();
-
- // `len + 1` to account for the space that's inserted before each
- // argument. (Windows passes command-line arguments as a single
- // string, not an array of strings.)
- if total_len + len + 1 > ARG_LENGTH_LIMIT {
- // Add the archive members seen so far, without updating the
- // symbol table.
- self.archive.run(Some(self.work_dir.path()),
- Action::AddObjects(&objects, false));
-
- objects.clear();
- total_len = self.archive.config.dst.to_string_lossy().len();
- }
-
- objects.push(member_name);
- total_len += len + 1;
- }
-
- // Add the remaining archive members, and update the symbol table if
- // necessary.
- self.archive.run(Some(self.work_dir.path()),
- Action::AddObjects(&objects, self.should_update_symbols));
-
- self.archive
- }
-
- fn add_archive<F>(&mut self, archive: &Path, name: &str,
- mut skip: F) -> io::Result<()>
- where F: FnMut(&str) -> bool,
- {
- let archive = match ArchiveRO::open(archive) {
- Some(ar) => ar,
- None => return Err(io::Error::new(io::ErrorKind::Other,
- "failed to open archive")),
- };
-
- // Next, we must rename all of the inputs to "guaranteed unique names".
- // We write each file into `self.work_dir` under its new unique name.
- // The reason for this renaming is that archives are keyed off the name
- // of the files, so if two files have the same name they will override
- // one another in the archive (bad).
- //
- // We skip any files explicitly desired for skipping, and we also skip
- // all SYMDEF files as these are just magical placeholders which get
- // re-created when we make a new archive anyway.
- for file in archive.iter() {
- let filename = match file.name() {
- Some(s) => s,
- None => continue,
- };
- if filename.contains(".SYMDEF") { continue }
- if skip(filename) { continue }
- let filename = Path::new(filename).file_name().unwrap()
- .to_str().unwrap();
-
- // Archives on unix systems typically do not have slashes in
- // filenames as the `ar` utility generally only uses the last
- // component of a path for the filename list in the archive. On
- // Windows, however, archives assembled with `lib.exe` will preserve
- // the full path to the file that was placed in the archive,
- // including path separators.
- //
- // The code below is munging paths so it'll go wrong pretty quickly
- // if there's some unexpected slashes in the filename, so here we
- // just chop off everything but the filename component. Note that
- // this can cause duplicate filenames, but that's also handled below
- // as well.
- let filename = Path::new(filename).file_name().unwrap()
- .to_str().unwrap();
-
- // An archive can contain files of the same name multiple times, so
- // we need to be sure to not have them overwrite one another when we
- // extract them. Consequently we need to find a truly unique file
- // name for us!
- let mut new_filename = String::new();
- for n in 0.. {
- let n = if n == 0 {String::new()} else {format!("-{}", n)};
- new_filename = format!("r{}-{}-{}", n, name, filename);
-
- // LLDB (as mentioned in back::link) crashes on filenames of
- // exactly
- // 16 bytes in length. If we're including an object file with
- // exactly 16-bytes of characters, give it some prefix so
- // that it's not 16 bytes.
- new_filename = if new_filename.len() == 16 {
- format!("lldb-fix-{}", new_filename)
- } else {
- new_filename
- };
-
- let present = self.members.iter().filter_map(|p| {
- p.file_name().and_then(|f| f.to_str())
- }).any(|s| s == new_filename);
- if !present {
- break
- }
- }
- let dst = self.work_dir.path().join(&new_filename);
- try!(try!(File::create(&dst)).write_all(file.data()));
- self.members.push(PathBuf::from(new_filename));
- }
-
- Ok(())
- }
-}
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use target_strs;
-use syntax::abi;
-
-pub fn get_target_strs(target_triple: String, target_os: abi::Os) -> target_strs::t {
- let cc_args = if target_triple.contains("thumb") {
- vec!("-mthumb".to_string())
- } else {
- vec!("-marm".to_string())
- };
- return target_strs::t {
- module_asm: "".to_string(),
-
- data_layout: match target_os {
- abi::OsMacos => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsiOS => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsWindows => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsLinux => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsAndroid => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsFreebsd | abi::OsDragonfly | abi::OsBitrig | abi::OsOpenbsd => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
- },
-
- target_triple: target_triple,
-
- cc_args: cc_args,
- };
-}
#[macro_use] extern crate log;
pub mod abi;
-pub mod archive;
pub mod tempdir;
-pub mod arm;
-pub mod mips;
-pub mod mipsel;
pub mod rpath;
pub mod sha2;
pub mod svh;
-pub mod target_strs;
-pub mod x86;
-pub mod x86_64;
pub mod target;
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use target_strs;
-use syntax::abi;
-
-pub fn get_target_strs(target_triple: String, target_os: abi::Os) -> target_strs::t {
- return target_strs::t {
- module_asm: "".to_string(),
-
- data_layout: match target_os {
- abi::OsMacos => {
- "E-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsiOS => {
- "E-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsWindows => {
- "E-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsLinux => {
- "E-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsAndroid => {
- "E-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsFreebsd | abi::OsDragonfly | abi::OsBitrig | abi::OsOpenbsd => {
- "E-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
- },
-
- target_triple: target_triple,
-
- cc_args: Vec::new(),
- };
-}
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use target_strs;
-use syntax::abi;
-
-pub fn get_target_strs(target_triple: String, target_os: abi::Os) -> target_strs::t {
- return target_strs::t {
- module_asm: "".to_string(),
-
- data_layout: match target_os {
- abi::OsMacos => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsiOS => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsWindows => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsLinux => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsAndroid => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
-
- abi::OsFreebsd | abi::OsDragonfly | abi::OsBitrig | abi::OsOpenbsd => {
- "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string()
- }
- },
-
- target_triple: target_triple,
-
- cc_args: Vec::new(),
- };
-}
//! use. This implementation is not intended for external use or for any use where security is
//! important.
-use std::iter::repeat;
use std::slice::bytes::{MutableByteVector, copy_memory};
use serialize::hex::ToHex;
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
- let mut buf: Vec<u8> = repeat(0).take((self.output_bits()+7)/8).collect();
+ let mut buf = vec![0; (self.output_bits()+7)/8];
self.result(&mut buf);
buf
}
use self::rand::Rng;
use self::rand::isaac::IsaacRng;
use serialize::hex::FromHex;
- use std::iter::repeat;
use std::u64;
use super::{Digest, Sha256, FixedBuffer};
/// correct.
fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: usize, expected: &str) {
let total_size = 1000000;
- let buffer: Vec<u8> = repeat('a' as u8).take(blocksize * 2).collect();
+ let buffer = vec![b'a'; blocksize * 2];
let mut rng = IsaacRng::new_unseeded();
let mut count = 0;
ExprCast(..) => SawExprCast,
ExprIf(..) => SawExprIf,
ExprWhile(..) => SawExprWhile,
- ExprLoop(_, id) => SawExprLoop(id.map(content)),
+ ExprLoop(_, id) => SawExprLoop(id.map(|id| id.name.as_str())),
ExprMatch(..) => SawExprMatch,
ExprClosure(..) => SawExprClosure,
ExprBlock(..) => SawExprBlock,
ExprAssign(..) => SawExprAssign,
ExprAssignOp(op, _, _) => SawExprAssignOp(op.node),
- ExprField(_, id) => SawExprField(content(id.node)),
+ ExprField(_, id) => SawExprField(id.node.name.as_str()),
ExprTupField(_, id) => SawExprTupField(id.node),
ExprIndex(..) => SawExprIndex,
ExprRange(..) => SawExprRange,
ExprPath(ref qself, _) => SawExprPath(qself.as_ref().map(|q| q.position)),
ExprAddrOf(m, _) => SawExprAddrOf(m),
- ExprBreak(id) => SawExprBreak(id.map(content)),
- ExprAgain(id) => SawExprAgain(id.map(content)),
+ ExprBreak(id) => SawExprBreak(id.map(|id| id.name.as_str())),
+ ExprAgain(id) => SawExprAgain(id.map(|id| id.name.as_str())),
ExprRet(..) => SawExprRet,
ExprInlineAsm(ref asm) => SawExprInlineAsm(asm),
ExprStruct(..) => SawExprStruct,
}
}
- // Ad-hoc overloading between Ident and Name to their intern table lookups.
- trait InternKey { fn get_content(self) -> token::InternedString; }
- impl InternKey for Ident {
- fn get_content(self) -> token::InternedString { token::get_ident(self) }
- }
- impl InternKey for Name {
- fn get_content(self) -> token::InternedString { token::get_name(self) }
- }
- fn content<K:InternKey>(k: K) -> token::InternedString { k.get_content() }
-
impl<'a, 'v> Visitor<'v> for StrictVersionHashVisitor<'a> {
fn visit_mac(&mut self, mac: &Mac) {
&MacInvocTT(ref path, ref _tts, ref _stx_ctxt) => {
let s = &path.segments;
assert_eq!(s.len(), 1);
- content(s[0].identifier)
+ s[0].identifier.name.as_str()
}
}
}
fn visit_struct_def(&mut self, s: &StructDef, ident: Ident,
g: &Generics, _: NodeId) {
- SawStructDef(content(ident)).hash(self.st);
+ SawStructDef(ident.name.as_str()).hash(self.st);
visit::walk_generics(self, g);
visit::walk_struct_def(self, s)
}
// pattern, please move that method up above this comment.)
fn visit_ident(&mut self, _: Span, ident: Ident) {
- SawIdent(content(ident)).hash(self.st);
+ SawIdent(ident.name.as_str()).hash(self.st);
}
fn visit_lifetime_ref(&mut self, l: &Lifetime) {
- SawLifetimeRef(content(l.name)).hash(self.st);
+ SawLifetimeRef(l.name.as_str()).hash(self.st);
}
fn visit_lifetime_def(&mut self, l: &LifetimeDef) {
- SawLifetimeDef(content(l.lifetime.name)).hash(self.st);
+ SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st);
}
// We do recursively walk the bodies of functions/methods
pub fn target() -> Target {
Target {
- // reference layout: e-m:o-i64:64-i128:128-n32:64-S128
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- i128:128-f32:32:32-f64:64:64-v64:64:64-v128:128:128-\
- a:0:64-n32:64-S128".to_string(),
llvm_target: "arm64-apple-ios".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
pub fn target() -> Target {
Target {
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- n32:64-S128".to_string(),
llvm_target: "aarch64-linux-android".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
pub fn target() -> Target {
let base = super::linux_base::opts();
Target {
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- n32:64-S128".to_string(),
llvm_target: "aarch64-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
has_rpath: true,
dll_prefix: "lib".to_string(),
dll_suffix: ".dylib".to_string(),
+ archive_format: "bsd".to_string(),
pre_link_args: Vec::new(),
.. Default::default()
}
base.features = "+v7".to_string();
Target {
- data_layout: "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:64:128-a:0:64-\
- n32".to_string(),
llvm_target: "arm-linux-androideabi".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
pub fn target() -> Target {
let base = super::linux_base::opts();
Target {
- data_layout: "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string(),
llvm_target: "arm-unknown-linux-gnueabi".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
pub fn target() -> Target {
let base = super::linux_base::opts();
Target {
- data_layout: "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string(),
llvm_target: "arm-unknown-linux-gnueabihf".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
pub fn target() -> Target {
Target {
- data_layout: "e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(),
llvm_target: "armv7-apple-ios".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
pub fn target() -> Target {
Target {
- data_layout: "e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(),
llvm_target: "armv7s-apple-ios".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
linker_is_gnu: true,
has_rpath: true,
position_independent_executables: true,
- pre_link_args: vec!(
- ),
+ archive_format: "gnu".to_string(),
.. Default::default()
}
"-Wl,--as-needed".to_string(),
),
position_independent_executables: true,
+ archive_format: "bsd".to_string(),
.. Default::default()
}
}
executables: true,
morestack: true,
has_rpath: true,
+ archive_format: "gnu".to_string(),
.. Default::default()
}
pub fn target() -> Target {
Target {
- data_layout: "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16\
- -i32:32:32-i64:32:64\
- -f32:32:32-f64:32:64-v64:64:64\
- -v128:128:128-a:0:64-f80:128:128\
- -n8:16:32".to_string(),
llvm_target: "i386-apple-ios".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
base.pre_link_args.push("-m32".to_string());
Target {
- data_layout: "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16\
- -i32:32:32-i64:32:64\
- -f32:32:32-f64:32:64-v64:64:64\
- -v128:128:128-a:0:64-f80:128:128\
- -n8:16:32".to_string(),
llvm_target: "i686-apple-darwin".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
options.pre_link_args.push("-shared-libgcc".to_string());
Target {
- data_layout: "e-p:32:32-f64:64:64-i64:64:64-f80:32:32-n8:16:32".to_string(),
llvm_target: "i686-pc-windows-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::Target;
+
+pub fn target() -> Target {
+ let mut base = super::windows_msvc_base::opts();
+ base.cpu = "i686".to_string();
+
+ Target {
+ llvm_target: "i686-pc-windows-msvc".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ arch: "x86".to_string(),
+ target_os: "windows".to_string(),
+ target_env: "msvc".to_string(),
+ options: base,
+ }
+}
base.pre_link_args.push("-m32".to_string());
Target {
- data_layout: "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string(),
llvm_target: "i686-unknown-dragonfly".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::Target;
+
+pub fn target() -> Target {
+ let mut base = super::freebsd_base::opts();
+ base.cpu = "pentium4".to_string();
+ base.pre_link_args.push("-m32".to_string());
+ base.morestack = false;
+
+ Target {
+ llvm_target: "i686-unknown-freebsd".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ arch: "x86".to_string(),
+ target_os: "freebsd".to_string(),
+ target_env: "".to_string(),
+ options: base,
+ }
+}
base.pre_link_args.push("-m32".to_string());
Target {
- data_layout: "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string(),
llvm_target: "i686-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
"-Wl,--as-needed".to_string(),
],
position_independent_executables: true,
+ archive_format: "gnu".to_string(),
.. Default::default()
}
}
pub fn target() -> Target {
Target {
- data_layout: "E-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string(),
llvm_target: "mips-unknown-linux-gnu".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "32".to_string(),
pub fn target() -> Target {
Target {
- data_layout: "e-p:32:32:32\
- -i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
- -f32:32:32-f64:64:64\
- -v64:64:64-v128:64:128\
- -a:0:64-n32".to_string(),
llvm_target: "mipsel-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
mod freebsd_base;
mod linux_base;
mod openbsd_base;
+mod netbsd_base;
mod windows_base;
mod windows_msvc_base;
/// Every field here must be specified, and has no default value.
#[derive(Clone, Debug)]
pub struct Target {
- /// [Data layout](http://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM.
- pub data_layout: String,
/// Target triple to pass to LLVM.
pub llvm_target: String,
/// String to use as the `target_endian` `cfg` variable.
/// these try to take "minimal defaults" that don't assume anything about the runtime they run in.
#[derive(Clone, Debug)]
pub struct TargetOptions {
+ /// [Data layout](http://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM.
+ pub data_layout: String,
/// Linker to invoke. Defaults to "cc".
pub linker: String,
/// Archive utility to use when managing archives. Defaults to "ar".
/// the functions in the executable are not randomized and can be used
/// during an exploit of a vulnerability in any code.
pub position_independent_executables: bool,
+ /// Format that archives should be emitted in. This affects whether we use
+ /// LLVM to assemble an archive or fall back to the system linker, and
+ /// currently only "gnu" is used to fall into LLVM. Unknown strings cause
+ /// the system linker to be used.
+ pub archive_format: String,
+ /// Whether the target uses a custom unwind resumption routine.
+ /// By default LLVM lowers `resume` instructions into calls to `_Unwind_Resume`
+ /// defined in libgcc. If this option is enabled, the target must provide
+ /// `eh_unwind_resume` lang item.
+ pub custom_unwind_resume: bool,
}
impl Default for TargetOptions {
/// incomplete, and if used for compilation, will certainly not work.
fn default() -> TargetOptions {
TargetOptions {
+ data_layout: String::new(),
linker: "cc".to_string(),
ar: "ar".to_string(),
pre_link_args: Vec::new(),
position_independent_executables: false,
pre_link_objects: Vec::new(),
post_link_objects: Vec::new(),
+ archive_format: String::new(),
+ custom_unwind_resume: false,
}
}
}
};
let mut base = Target {
- data_layout: get_req_field("data-layout"),
llvm_target: get_req_field("llvm-target"),
target_endian: get_req_field("target-endian"),
target_pointer_width: get_req_field("target-pointer-width"),
key!(staticlib_prefix);
key!(staticlib_suffix);
key!(features);
+ key!(data_layout);
key!(dynamic_linking, bool);
key!(executables, bool);
key!(morestack, bool);
arm_linux_androideabi,
aarch64_linux_android,
+ i686_unknown_freebsd,
x86_64_unknown_freebsd,
i686_unknown_dragonfly,
x86_64_unknown_bitrig,
x86_64_unknown_openbsd,
+ x86_64_unknown_netbsd,
x86_64_apple_darwin,
i686_apple_darwin,
x86_64_pc_windows_gnu,
i686_pc_windows_gnu,
- x86_64_pc_windows_msvc
+ x86_64_pc_windows_msvc,
+ i686_pc_windows_msvc
);
--- /dev/null
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::TargetOptions;
+use std::default::Default;
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ linker: "cc".to_string(),
+ dynamic_linking: true,
+ executables: true,
+ morestack: false,
+ linker_is_gnu: true,
+ has_rpath: true,
+ pre_link_args: vec!(
+ // GNU-style linkers will use this to omit linking to libraries
+ // which don't actually fulfill any relocations, but only for
+ // libraries which follow this flag. Thus, use it before
+ // specifying libraries to link to.
+ "-Wl,--as-needed".to_string(),
+ ),
+ position_independent_executables: true,
+ archive_format: "bsd".to_string(),
+ .. Default::default()
+ }
+}
"-Wl,--as-needed".to_string(),
),
position_independent_executables: true,
+ archive_format: "gnu".to_string(),
.. Default::default()
}
}
base.pre_link_args.push("-m32".to_string());
Target {
- data_layout: "E-S8-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string(),
llvm_target: "powerpc-unknown-linux-gnu".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "32".to_string(),
staticlib_suffix: ".lib".to_string(),
morestack: false,
is_like_windows: true,
+ archive_format: "gnu".to_string(),
pre_link_args: vec!(
// And here, we see obscure linker flags #45. On windows, it has been
// found to be necessary to have this flag to compile liblibc.
base.pre_link_args.push("-m64".to_string());
Target {
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64".to_string(),
llvm_target: "x86_64-apple-darwin".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
pub fn target() -> Target {
Target {
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64".to_string(),
llvm_target: "x86_64-apple-ios".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
// On Win64 unwinding is handled by the OS, so we can link libgcc statically.
base.pre_link_args.push("-static-libgcc".to_string());
base.pre_link_args.push("-m64".to_string());
+ base.custom_unwind_resume = true;
Target {
- // FIXME: Test this. Copied from linux (#2398)
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-pc-windows-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
base.cpu = "x86-64".to_string();
Target {
- // This is currently in sync with the specification for
- // x86_64-pc-windows-gnu but there's a comment in that file questioning
- // whether this is valid or not. Sounds like the two should stay in sync
- // at least for now.
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-pc-windows-msvc".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
base.pre_link_args.push("-m64".to_string());
Target {
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-unknown-bitrig".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
base.pre_link_args.push("-m64".to_string());
Target {
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-unknown-dragonfly".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
base.pre_link_args.push("-m64".to_string());
Target {
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-unknown-freebsd".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
base.pre_link_args.push("-m64".to_string());
Target {
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
base.position_independent_executables = false;
Target {
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-unknown-linux-musl".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
--- /dev/null
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::Target;
+
+pub fn target() -> Target {
+ let mut base = super::netbsd_base::opts();
+ base.pre_link_args.push("-m64".to_string());
+
+ Target {
+ llvm_target: "x86_64-unknown-netbsd".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "64".to_string(),
+ arch: "x86_64".to_string(),
+ target_os: "netbsd".to_string(),
+ target_env: "".to_string(),
+ options: base,
+ }
+}
base.pre_link_args.push("-m64".to_string());
Target {
- data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-unknown-openbsd".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(non_camel_case_types)]
-
-pub struct t {
- pub module_asm: String,
- pub data_layout: String,
- pub target_triple: String,
- pub cc_args: Vec<String> ,
-}
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-use target_strs;
-use syntax::abi;
-
-pub fn get_target_strs(target_triple: String, target_os: abi::Os)
- -> target_strs::t {
- return target_strs::t {
- module_asm: "".to_string(),
-
- data_layout: match target_os {
- abi::OsMacos => {
- "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16\
- -i32:32:32-i64:32:64\
- -f32:32:32-f64:32:64-v64:64:64\
- -v128:128:128-a:0:64-f80:128:128\
- -n8:16:32".to_string()
- }
-
- abi::OsiOS => {
- "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16\
- -i32:32:32-i64:32:64\
- -f32:32:32-f64:32:64-v64:64:64\
- -v128:128:128-a:0:64-f80:128:128\
- -n8:16:32".to_string()
- }
-
- abi::OsWindows => {
- "e-p:32:32-f64:64:64-i64:64:64-f80:32:32-n8:16:32".to_string()
- }
-
- abi::OsLinux => {
- "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string()
- }
- abi::OsAndroid => {
- "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string()
- }
-
- abi::OsFreebsd | abi::OsDragonfly | abi::OsBitrig | abi::OsOpenbsd => {
- "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string()
- }
-
- },
-
- target_triple: target_triple,
-
- cc_args: vec!("-m32".to_string()),
- };
-}
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-use target_strs;
-use syntax::abi;
-
-pub fn get_target_strs(target_triple: String, target_os: abi::Os) -> target_strs::t {
- return target_strs::t {
- module_asm: "".to_string(),
-
- data_layout: match target_os {
- abi::OsMacos => {
- "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64".to_string()
- }
-
- abi::OsiOS => {
- "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64".to_string()
- }
-
- abi::OsWindows => {
- // FIXME: Test this. Copied from Linux (#2398)
- "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string()
- }
-
- abi::OsLinux => {
- "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string()
- }
- abi::OsAndroid => {
- "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string()
- }
-
- abi::OsFreebsd | abi::OsDragonfly | abi::OsBitrig | abi::OsOpenbsd => {
- "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
- f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\
- s0:64:64-f80:128:128-n8:16:32:64-S128".to_string()
- }
-
- },
-
- target_triple: target_triple,
-
- cc_args: vec!("-m64".to_string()),
- };
-}
/// # Examples
///
/// ```{.rust}
-/// # #![feature(rustc_private)]
-/// # #![feature(associated_consts)]
+/// #![feature(rustc_private)]
+/// #![feature(associated_consts)]
/// #[macro_use] extern crate rustc_bitflags;
///
/// bitflags! {
/// The generated `struct`s can also be extended with type and trait implementations:
///
/// ```{.rust}
-/// # #![feature(rustc_private)]
+/// #![feature(rustc_private)]
/// #[macro_use] extern crate rustc_bitflags;
///
/// use std::fmt;
use borrowck::*;
use borrowck::InteriorKind::{InteriorElement, InteriorField};
use rustc::middle::expr_use_visitor as euv;
+use rustc::middle::infer;
use rustc::middle::mem_categorization as mc;
use rustc::middle::region;
use rustc::middle::ty;
struct CheckLoanCtxt<'a, 'tcx: 'a> {
bccx: &'a BorrowckCtxt<'a, 'tcx>,
dfcx_loans: &'a LoanDataFlow<'a, 'tcx>,
- move_data: move_data::FlowedMoveData<'a, 'tcx>,
+ move_data: &'a move_data::FlowedMoveData<'a, 'tcx>,
all_loans: &'a [Loan<'tcx>],
param_env: &'a ty::ParameterEnvironment<'a, 'tcx>,
}
None => { }
}
- self.check_assignment(assignment_id, assignment_span, assignee_cmt, mode);
+ self.check_assignment(assignment_id, assignment_span, assignee_cmt);
}
fn decl_without_init(&mut self, _id: ast::NodeId, _span: Span) { }
pub fn check_loans<'a, 'b, 'c, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
dfcx_loans: &LoanDataFlow<'b, 'tcx>,
- move_data: move_data::FlowedMoveData<'c, 'tcx>,
+ move_data: &move_data::FlowedMoveData<'c, 'tcx>,
all_loans: &[Loan<'tcx>],
fn_id: ast::NodeId,
decl: &ast::FnDecl,
debug!("check_loans(body id={})", body.id);
let param_env = ty::ParameterEnvironment::for_item(bccx.tcx, fn_id);
+ let infcx = infer::new_infer_ctxt(bccx.tcx, &bccx.tcx.tables, Some(param_env), false);
let mut clcx = CheckLoanCtxt {
bccx: bccx,
dfcx_loans: dfcx_loans,
move_data: move_data,
all_loans: all_loans,
- param_env: ¶m_env,
+ param_env: &infcx.parameter_environment
};
{
- let mut euv = euv::ExprUseVisitor::new(&mut clcx, ¶m_env);
+ let mut euv = euv::ExprUseVisitor::new(&mut clcx, &infcx);
euv.walk_fn(decl, body);
}
}
true
}
- fn is_local_variable_or_arg(&self, cmt: mc::cmt<'tcx>) -> bool {
- match cmt.cat {
- mc::cat_local(_) => true,
- _ => false
- }
- }
-
fn consume_common(&self,
id: ast::NodeId,
span: Span,
LpExtend(ref lp_base, _, LpInterior(InteriorField(_))) => {
match lp_base.to_type().sty {
ty::TyStruct(def_id, _) | ty::TyEnum(def_id, _) => {
- if ty::has_dtor(self.tcx(), def_id) {
+ if self.tcx().has_dtor(def_id) {
// In the case where the owner implements drop, then
// the path must be initialized to prevent a case of
// partial reinitialization
fn check_assignment(&self,
assignment_id: ast::NodeId,
assignment_span: Span,
- assignee_cmt: mc::cmt<'tcx>,
- mode: euv::MutateMode) {
+ assignee_cmt: mc::cmt<'tcx>) {
debug!("check_assignment(assignee_cmt={:?})", assignee_cmt);
- // Mutable values can be assigned, as long as they obey loans
- // and aliasing restrictions:
- if assignee_cmt.mutbl.is_mutable() {
- if check_for_aliasable_mutable_writes(self, assignment_span, assignee_cmt.clone()) {
- if mode != euv::Init {
- check_for_assignment_to_borrowed_path(
- self, assignment_id, assignment_span, assignee_cmt.clone());
- mark_variable_as_used_mut(self, assignee_cmt);
- }
- }
-
- return;
- }
-
- // Initializations are OK if and only if they aren't partial
- // reinitialization of a partially-uninitialized structure.
- if mode == euv::Init {
- return
- }
-
- // For immutable local variables, assignments are legal
- // if they cannot already have been assigned
- if self.is_local_variable_or_arg(assignee_cmt.clone()) {
- assert!(assignee_cmt.mutbl.is_immutable()); // no "const" locals
- let lp = opt_loan_path(&assignee_cmt).unwrap();
- self.move_data.each_assignment_of(assignment_id, &lp, |assign| {
- self.bccx.report_reassigned_immutable_variable(
- assignment_span,
- &*lp,
- assign);
+ // Check that we don't invalidate any outstanding loans
+ if let Some(loan_path) = opt_loan_path(&assignee_cmt) {
+ let scope = region::CodeExtent::from_node_id(assignment_id);
+ self.each_in_scope_loan_affecting_path(scope, &*loan_path, |loan| {
+ self.report_illegal_mutation(assignment_span, &*loan_path, loan);
false
});
- return;
}
- // Otherwise, just a plain error.
- match assignee_cmt.note {
- mc::NoteClosureEnv(upvar_id) => {
- // If this is an `Fn` closure, it simply can't mutate upvars.
- // If it's an `FnMut` closure, the original variable was declared immutable.
- // We need to determine which is the case here.
- let kind = match assignee_cmt.upvar().unwrap().cat {
- mc::cat_upvar(mc::Upvar { kind, .. }) => kind,
- _ => unreachable!()
- };
- if kind == ty::FnClosureKind {
- self.bccx.span_err(
- assignment_span,
- &format!("cannot assign to {}",
- self.bccx.cmt_to_string(&*assignee_cmt)));
- self.bccx.span_help(
- self.tcx().map.span(upvar_id.closure_expr_id),
- "consider changing this closure to take self by mutable reference");
+ // Check for reassignments to (immutable) local variables. This
+ // needs to be done here instead of in check_loans because we
+ // depend on move data.
+ if let mc::cat_local(local_id) = assignee_cmt.cat {
+ let lp = opt_loan_path(&assignee_cmt).unwrap();
+ self.move_data.each_assignment_of(assignment_id, &lp, |assign| {
+ if assignee_cmt.mutbl.is_mutable() {
+ self.tcx().used_mut_nodes.borrow_mut().insert(local_id);
} else {
- self.bccx.span_err(
+ self.bccx.report_reassigned_immutable_variable(
assignment_span,
- &format!("cannot assign to {} {}",
- assignee_cmt.mutbl.to_user_str(),
- self.bccx.cmt_to_string(&*assignee_cmt)));
- }
- }
- _ => match opt_loan_path(&assignee_cmt) {
- Some(lp) => {
- self.bccx.span_err(
- assignment_span,
- &format!("cannot assign to {} {} `{}`",
- assignee_cmt.mutbl.to_user_str(),
- self.bccx.cmt_to_string(&*assignee_cmt),
- self.bccx.loan_path_to_string(&*lp)));
- }
- None => {
- self.bccx.span_err(
- assignment_span,
- &format!("cannot assign to {} {}",
- assignee_cmt.mutbl.to_user_str(),
- self.bccx.cmt_to_string(&*assignee_cmt)));
- }
- }
- }
- return;
-
- fn mark_variable_as_used_mut<'a, 'tcx>(this: &CheckLoanCtxt<'a, 'tcx>,
- mut cmt: mc::cmt<'tcx>) {
- //! If the mutability of the `cmt` being written is inherited
- //! from a local variable, liveness will
- //! not have been able to detect that this variable's mutability
- //! is important, so we must add the variable to the
- //! `used_mut_nodes` table here.
-
- loop {
- debug!("mark_variable_as_used_mut(cmt={:?})", cmt);
- match cmt.cat.clone() {
- mc::cat_upvar(mc::Upvar { id: ty::UpvarId { var_id: id, .. }, .. }) |
- mc::cat_local(id) => {
- this.tcx().used_mut_nodes.borrow_mut().insert(id);
- return;
- }
-
- mc::cat_rvalue(..) |
- mc::cat_static_item |
- mc::cat_deref(_, _, mc::UnsafePtr(..)) |
- mc::cat_deref(_, _, mc::Implicit(..)) => {
- assert_eq!(cmt.mutbl, mc::McDeclared);
- return;
- }
-
- mc::cat_deref(_, _, mc::BorrowedPtr(..)) => {
- assert_eq!(cmt.mutbl, mc::McDeclared);
- // We need to drill down to upvar if applicable
- match cmt.upvar() {
- Some(b) => cmt = b,
- None => return
- }
- }
-
- mc::cat_deref(b, _, mc::Unique) => {
- assert_eq!(cmt.mutbl, mc::McInherited);
- cmt = b;
- }
-
- mc::cat_downcast(b, _) |
- mc::cat_interior(b, _) => {
- assert_eq!(cmt.mutbl, mc::McInherited);
- cmt = b;
- }
- }
- }
- }
-
- fn check_for_aliasable_mutable_writes<'a, 'tcx>(this: &CheckLoanCtxt<'a, 'tcx>,
- span: Span,
- cmt: mc::cmt<'tcx>) -> bool {
- //! Safety checks related to writes to aliasable, mutable locations
-
- let guarantor = cmt.guarantor();
- debug!("check_for_aliasable_mutable_writes(cmt={:?}, guarantor={:?})",
- cmt, guarantor);
- if let mc::cat_deref(ref b, _, mc::BorrowedPtr(ty::MutBorrow, _)) = guarantor.cat {
- // Statically prohibit writes to `&mut` when aliasable
- check_for_aliasability_violation(this, span, b.clone());
- }
-
- return true; // no errors reported
- }
-
- fn check_for_aliasability_violation<'a, 'tcx>(this: &CheckLoanCtxt<'a, 'tcx>,
- span: Span,
- cmt: mc::cmt<'tcx>)
- -> bool {
- match cmt.freely_aliasable(this.tcx()) {
- mc::Aliasability::NonAliasable => {
- return true;
- }
- mc::Aliasability::FreelyAliasable(mc::AliasableStaticMut(..)) => {
- return true;
- }
- mc::Aliasability::ImmutableUnique(_) => {
- this.bccx.report_aliasability_violation(
- span,
- MutabilityViolation,
- mc::AliasableReason::UnaliasableImmutable);
- return false;
- }
- mc::Aliasability::FreelyAliasable(cause) => {
- this.bccx.report_aliasability_violation(
- span,
- MutabilityViolation,
- cause);
- return false;
+ &*lp,
+ assign);
}
- }
- }
-
- fn check_for_assignment_to_borrowed_path<'a, 'tcx>(
- this: &CheckLoanCtxt<'a, 'tcx>,
- assignment_id: ast::NodeId,
- assignment_span: Span,
- assignee_cmt: mc::cmt<'tcx>)
- {
- //! Check for assignments that violate the terms of an
- //! outstanding loan.
-
- let loan_path = match opt_loan_path(&assignee_cmt) {
- Some(lp) => lp,
- None => { return; /* no loan path, can't be any loans */ }
- };
-
- let scope = region::CodeExtent::from_node_id(assignment_id);
- this.each_in_scope_loan_affecting_path(scope, &*loan_path, |loan| {
- this.report_illegal_mutation(assignment_span, &*loan_path, loan);
false
});
+ return
}
}
use self::Fragment::*;
use borrowck::InteriorKind::{InteriorField, InteriorElement};
-use borrowck::LoanPath;
+use borrowck::{self, LoanPath};
use borrowck::LoanPathKind::{LpVar, LpUpvar, LpDowncast, LpExtend};
use borrowck::LoanPathElem::{LpDeref, LpInterior};
use borrowck::move_data::InvalidMovePathIndex;
}
}
+pub fn build_unfragmented_map(this: &mut borrowck::BorrowckCtxt,
+ move_data: &MoveData,
+ id: ast::NodeId) {
+ let fr = &move_data.fragments.borrow();
+
+ // For now, don't care about other kinds of fragments; the precise
+ // classfication of all paths for non-zeroing *drop* needs them,
+ // but the loose approximation used by non-zeroing moves does not.
+ let moved_leaf_paths = fr.moved_leaf_paths();
+ let assigned_leaf_paths = fr.assigned_leaf_paths();
+
+ let mut fragment_infos = Vec::with_capacity(moved_leaf_paths.len());
+
+ let find_var_id = |move_path_index: MovePathIndex| -> Option<ast::NodeId> {
+ let lp = move_data.path_loan_path(move_path_index);
+ match lp.kind {
+ LpVar(var_id) => Some(var_id),
+ LpUpvar(ty::UpvarId { var_id, closure_expr_id }) => {
+ // The `var_id` is unique *relative to* the current function.
+ // (Check that we are indeed talking about the same function.)
+ assert_eq!(id, closure_expr_id);
+ Some(var_id)
+ }
+ LpDowncast(..) | LpExtend(..) => {
+ // This simple implementation of non-zeroing move does
+ // not attempt to deal with tracking substructure
+ // accurately in the general case.
+ None
+ }
+ }
+ };
+
+ let moves = move_data.moves.borrow();
+ for &move_path_index in moved_leaf_paths {
+ let var_id = match find_var_id(move_path_index) {
+ None => continue,
+ Some(var_id) => var_id,
+ };
+
+ move_data.each_applicable_move(move_path_index, |move_index| {
+ let info = ty::FragmentInfo::Moved {
+ var: var_id,
+ move_expr: moves[move_index.get()].id,
+ };
+ debug!("fragment_infos push({:?} \
+ due to move_path_index: {} move_index: {}",
+ info, move_path_index.get(), move_index.get());
+ fragment_infos.push(info);
+ true
+ });
+ }
+
+ for &move_path_index in assigned_leaf_paths {
+ let var_id = match find_var_id(move_path_index) {
+ None => continue,
+ Some(var_id) => var_id,
+ };
+
+ let var_assigns = move_data.var_assignments.borrow();
+ for var_assign in var_assigns.iter()
+ .filter(|&assign| assign.path == move_path_index)
+ {
+ let info = ty::FragmentInfo::Assigned {
+ var: var_id,
+ assign_expr: var_assign.id,
+ assignee_id: var_assign.assignee_id,
+ };
+ debug!("fragment_infos push({:?} due to var_assignment", info);
+ fragment_infos.push(info);
+ }
+ }
+
+ let mut fraginfo_map = this.tcx.fragment_infos.borrow_mut();
+ let fn_did = ast::DefId { krate: ast::LOCAL_CRATE, node: id };
+ let prev = fraginfo_map.insert(fn_did, fragment_infos);
+ assert!(prev.is_none());
+}
+
pub struct FragmentSets {
/// During move_data construction, `moved_leaf_paths` tracks paths
/// that have been used directly by being moved out of. When
}
}
+ pub fn moved_leaf_paths(&self) -> &[MovePathIndex] {
+ &self.moved_leaf_paths
+ }
+
+ pub fn assigned_leaf_paths(&self) -> &[MovePathIndex] {
+ &self.assigned_leaf_paths
+ }
+
pub fn add_move(&mut self, path_index: MovePathIndex) {
self.moved_leaf_paths.push(path_index);
}
}
(&ty::TyStruct(def_id, ref _substs), None) => {
- let fields = ty::lookup_struct_fields(tcx, def_id);
+ let fields = tcx.lookup_struct_fields(def_id);
match *origin_field_name {
mc::NamedField(ast_name) => {
for f in &fields {
(&ty::TyEnum(enum_def_id, substs), ref enum_variant_info) => {
let variant_info = {
- let mut variants = ty::substd_enum_variants(tcx, enum_def_id, substs);
+ let mut variants = tcx.substd_enum_variants(enum_def_id, substs);
match *enum_variant_info {
Some((variant_def_id, ref _lp2)) =>
variants.iter()
let loan_path_elem = LpInterior(InteriorField(new_field_name));
let new_lp_type = match new_field_name {
mc::NamedField(ast_name) =>
- ty::named_element_ty(tcx, parent.to_type(), ast_name, opt_variant_did),
+ tcx.named_element_ty(parent.to_type(), ast_name, opt_variant_did),
mc::PositionalField(idx) =>
- ty::positional_element_ty(tcx, parent.to_type(), idx, opt_variant_did),
+ tcx.positional_element_ty(parent.to_type(), idx, opt_variant_did),
};
let new_lp_variant = LpExtend(parent, mc, loan_path_elem);
let new_lp = LoanPath::new(new_lp_variant, new_lp_type.unwrap());
decl_id: ast::NodeId,
_decl_span: Span,
var_id: ast::NodeId) {
- let ty = ty::node_id_to_type(bccx.tcx, var_id);
+ let ty = bccx.tcx.node_id_to_type(var_id);
let loan_path = Rc::new(LoanPath::new(LpVar(var_id), ty));
move_data.add_move(bccx.tcx, loan_path, decl_id, Declared);
}
mc::cat_interior(ref b, mc::InteriorElement(Kind::Pattern, _)) => {
match b.ty.sty {
ty::TyStruct(did, _) | ty::TyEnum(did, _) => {
- if ty::has_dtor(bccx.tcx, did) {
+ if bccx.tcx.has_dtor(did) {
Some(cmt.clone())
} else {
check_and_get_illegal_move_origin(bccx, b)
fn report_error(&self, code: bckerr_code) {
self.bccx.report(BckError { cmt: self.cmt_original.clone(),
span: self.span,
- cause: self.cause,
+ cause: BorrowViolation(self.cause),
code: code });
}
}
use borrowck::*;
use borrowck::move_data::MoveData;
use rustc::middle::expr_use_visitor as euv;
+use rustc::middle::infer;
use rustc::middle::mem_categorization as mc;
use rustc::middle::region;
use rustc::middle::ty;
};
let param_env = ty::ParameterEnvironment::for_item(bccx.tcx, fn_id);
-
+ let infcx = infer::new_infer_ctxt(bccx.tcx, &bccx.tcx.tables, Some(param_env), false);
{
- let mut euv = euv::ExprUseVisitor::new(&mut glcx, ¶m_env);
+ let mut euv = euv::ExprUseVisitor::new(&mut glcx, &infcx);
euv.walk_fn(decl, body);
}
assignee_cmt: mc::cmt<'tcx>,
mode: euv::MutateMode)
{
- let opt_lp = opt_loan_path(&assignee_cmt);
- debug!("mutate(assignment_id={}, assignee_cmt={:?}) opt_lp={:?}",
- assignment_id, assignee_cmt, opt_lp);
-
- match opt_lp {
- Some(lp) => {
- gather_moves::gather_assignment(self.bccx, &self.move_data,
- assignment_id, assignment_span,
- lp, assignee_cmt.id, mode);
- }
- None => {
- // This can occur with e.g. `*foo() = 5`. In such
- // cases, there is no need to check for conflicts
- // with moves etc, just ignore.
- }
- }
+ self.guarantee_assignment_valid(assignment_id,
+ assignment_span,
+ assignee_cmt,
+ mode);
}
fn decl_without_init(&mut self, id: ast::NodeId, span: Span) {
/// Implements the A-* rules in README.md.
fn check_aliasability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
borrow_span: Span,
- loan_cause: euv::LoanCause,
+ loan_cause: AliasableViolationKind,
cmt: mc::cmt<'tcx>,
req_kind: ty::BorrowKind)
-> Result<(),()> {
(mc::Aliasability::ImmutableUnique(_), ty::MutBorrow) => {
bccx.report_aliasability_violation(
borrow_span,
- BorrowViolation(loan_cause),
+ loan_cause,
mc::AliasableReason::UnaliasableImmutable);
Err(())
}
(mc::Aliasability::FreelyAliasable(alias_cause), ty::MutBorrow) => {
bccx.report_aliasability_violation(
borrow_span,
- BorrowViolation(loan_cause),
+ loan_cause,
alias_cause);
Err(())
}
}
}
+/// Implements the M-* rules in README.md.
+fn check_mutability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
+ borrow_span: Span,
+ cause: AliasableViolationKind,
+ cmt: mc::cmt<'tcx>,
+ req_kind: ty::BorrowKind)
+ -> Result<(),()> {
+ debug!("check_mutability(cause={:?} cmt={:?} req_kind={:?}",
+ cause, cmt, req_kind);
+ match req_kind {
+ ty::UniqueImmBorrow | ty::ImmBorrow => {
+ match cmt.mutbl {
+ // I am intentionally leaving this here to help
+ // refactoring if, in the future, we should add new
+ // kinds of mutability.
+ mc::McImmutable | mc::McDeclared | mc::McInherited => {
+ // both imm and mut data can be lent as imm;
+ // for mutable data, this is a freeze
+ Ok(())
+ }
+ }
+ }
+
+ ty::MutBorrow => {
+ // Only mutable data can be lent as mutable.
+ if !cmt.mutbl.is_mutable() {
+ Err(bccx.report(BckError { span: borrow_span,
+ cause: cause,
+ cmt: cmt,
+ code: err_mutbl }))
+ } else {
+ Ok(())
+ }
+ }
+ }
+}
+
impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx }
+ /// Guarantees that `cmt` is assignable, or reports an error.
+ fn guarantee_assignment_valid(&mut self,
+ assignment_id: ast::NodeId,
+ assignment_span: Span,
+ cmt: mc::cmt<'tcx>,
+ mode: euv::MutateMode) {
+
+ let opt_lp = opt_loan_path(&cmt);
+ debug!("guarantee_assignment_valid(assignment_id={}, cmt={:?}) opt_lp={:?}",
+ assignment_id, cmt, opt_lp);
+
+ if let mc::cat_local(..) = cmt.cat {
+ // Only re-assignments to locals require it to be
+ // mutable - this is checked in check_loans.
+ } else {
+ // Check that we don't allow assignments to non-mutable data.
+ if check_mutability(self.bccx, assignment_span, MutabilityViolation,
+ cmt.clone(), ty::MutBorrow).is_err() {
+ return; // reported an error, no sense in reporting more.
+ }
+ }
+
+ // Check that we don't allow assignments to aliasable data
+ if check_aliasability(self.bccx, assignment_span, MutabilityViolation,
+ cmt.clone(), ty::MutBorrow).is_err() {
+ return; // reported an error, no sense in reporting more.
+ }
+
+ match opt_lp {
+ Some(lp) => {
+ if let mc::cat_local(..) = cmt.cat {
+ // Only re-assignments to locals require it to be
+ // mutable - this is checked in check_loans.
+ } else {
+ self.mark_loan_path_as_mutated(&lp);
+ }
+ gather_moves::gather_assignment(self.bccx, &self.move_data,
+ assignment_id, assignment_span,
+ lp, cmt.id, mode);
+ }
+ None => {
+ // This can occur with e.g. `*foo() = 5`. In such
+ // cases, there is no need to check for conflicts
+ // with moves etc, just ignore.
+ }
+ }
+ }
+
/// Guarantees that `addr_of(cmt)` will be valid for the duration of `static_scope_r`, or
/// reports an error. This may entail taking out loans, which will be added to the
/// `req_loan_map`.
}
// Check that we don't allow mutable borrows of non-mutable data.
- if check_mutability(self.bccx, borrow_span, cause,
+ if check_mutability(self.bccx, borrow_span, BorrowViolation(cause),
cmt.clone(), req_kind).is_err() {
return; // reported an error, no sense in reporting more.
}
// Check that we don't allow mutable borrows of aliasable data.
- if check_aliasability(self.bccx, borrow_span, cause,
+ if check_aliasability(self.bccx, borrow_span, BorrowViolation(cause),
cmt.clone(), req_kind).is_err() {
return; // reported an error, no sense in reporting more.
}
// restrictions: restrictions
// }
// }
-
- fn check_mutability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
- borrow_span: Span,
- cause: euv::LoanCause,
- cmt: mc::cmt<'tcx>,
- req_kind: ty::BorrowKind)
- -> Result<(),()> {
- //! Implements the M-* rules in README.md.
- debug!("check_mutability(cause={:?} cmt={:?} req_kind={:?}",
- cause, cmt, req_kind);
- match req_kind {
- ty::UniqueImmBorrow | ty::ImmBorrow => {
- match cmt.mutbl {
- // I am intentionally leaving this here to help
- // refactoring if, in the future, we should add new
- // kinds of mutability.
- mc::McImmutable | mc::McDeclared | mc::McInherited => {
- // both imm and mut data can be lent as imm;
- // for mutable data, this is a freeze
- Ok(())
- }
- }
- }
-
- ty::MutBorrow => {
- // Only mutable data can be lent as mutable.
- if !cmt.mutbl.is_mutable() {
- Err(bccx.report(BckError { span: borrow_span,
- cause: cause,
- cmt: cmt,
- code: err_mutbl }))
- } else {
- Ok(())
- }
- }
- }
- }
}
pub fn mark_loan_path_as_mutated(&self, loan_path: &LoanPath) {
impl<'a, 'tcx, 'v> Visitor<'v> for StaticInitializerCtxt<'a, 'tcx> {
fn visit_expr(&mut self, ex: &Expr) {
if let ast::ExprAddrOf(mutbl, ref base) = ex.node {
- let param_env = ty::empty_parameter_environment(self.bccx.tcx);
- let mc = mc::MemCategorizationContext::new(¶m_env);
+ let infcx = infer::new_infer_ctxt(self.bccx.tcx, &self.bccx.tcx.tables, None, false);
+ let mc = mc::MemCategorizationContext::new(&infcx);
let base_cmt = mc.cat_expr(&**base).unwrap();
let borrow_kind = ty::BorrowKind::from_mutbl(mutbl);
// Check that we don't allow borrows of unsafe static items.
- if check_aliasability(self.bccx, ex.span, euv::AddrOf,
+ if check_aliasability(self.bccx, ex.span,
+ BorrowViolation(euv::AddrOf),
base_cmt, borrow_kind).is_err() {
return; // reported an error, no sense in reporting more.
}
mc::cat_interior(ref b, mc::InteriorField(_)) => {
match b.ty.sty {
ty::TyStruct(did, _) |
- ty::TyEnum(did, _) if ty::has_dtor(bccx.tcx, did) => {
+ ty::TyEnum(did, _) if bccx.tcx.has_dtor(did) => {
bccx.span_err(
move_from.span,
&format!("cannot move out of type `{}`, \
self.bccx.report(
BckError {
span: self.span,
- cause: self.cause,
+ cause: BorrowViolation(self.cause),
cmt: cmt_base,
code: err_borrowed_pointer_too_short(
self.loan_region, lt)});
use rustc::middle::dataflow::KillFrom;
use rustc::middle::expr_use_visitor as euv;
use rustc::middle::free_region::FreeRegionMap;
-use rustc::middle::infer::error_reporting::note_and_explain_region;
use rustc::middle::mem_categorization as mc;
use rustc::middle::region;
use rustc::middle::ty::{self, Ty};
use syntax::ast;
use syntax::ast_util;
use syntax::codemap::Span;
-use syntax::parse::token;
use syntax::visit;
use syntax::visit::{Visitor, FnKind};
use syntax::ast::{FnDecl, Block, NodeId};
this.tcx,
sp,
id);
+ move_data::fragments::build_unfragmented_map(this,
+ &flowed_moves.move_data,
+ id);
check_loans::check_loans(this,
&loan_dfcx,
- flowed_moves,
+ &flowed_moves,
&all_loans[..],
id,
decl,
#[derive(PartialEq)]
pub struct BckError<'tcx> {
span: Span,
- cause: euv::LoanCause,
+ cause: AliasableViolationKind,
cmt: mc::cmt<'tcx>,
code: bckerr_code
}
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug, PartialEq)]
pub enum AliasableViolationKind {
MutabilityViolation,
BorrowViolation(euv::LoanCause)
pub fn report(&self, err: BckError<'tcx>) {
// Catch and handle some particular cases.
match (&err.code, &err.cause) {
- (&err_out_of_scope(ty::ReScope(_), ty::ReStatic), &euv::ClosureCapture(span)) |
- (&err_out_of_scope(ty::ReScope(_), ty::ReFree(..)), &euv::ClosureCapture(span)) => {
+ (&err_out_of_scope(ty::ReScope(_), ty::ReStatic),
+ &BorrowViolation(euv::ClosureCapture(span))) |
+ (&err_out_of_scope(ty::ReScope(_), ty::ReFree(..)),
+ &BorrowViolation(euv::ClosureCapture(span))) => {
return self.report_out_of_scope_escaping_closure_capture(&err, span);
}
_ => { }
.map
.find(the_move.id) {
Some(ast_map::NodeExpr(expr)) => {
- (ty::expr_ty_adjusted(self.tcx, &*expr), expr.span)
+ (self.tcx.expr_ty_adjusted(&*expr), expr.span)
}
r => {
self.tcx.sess.bug(&format!("MoveExpr({}) maps to \
}
move_data::MovePat => {
- let pat_ty = ty::node_id_to_type(self.tcx, the_move.id);
+ let pat_ty = self.tcx.node_id_to_type(the_move.id);
let span = self.tcx.map.span(the_move.id);
self.tcx.sess.span_note(span,
&format!("`{}` moved here{} because it has type `{}`, \
ol,
moved_lp_msg,
pat_ty));
- self.tcx.sess.fileline_help(span,
- "use `ref` to override");
+ match self.tcx.sess.codemap().span_to_snippet(span) {
+ Ok(string) => {
+ self.tcx.sess.span_suggestion(
+ span,
+ &format!("if you would like to borrow the value instead, \
+ use a `ref` binding as shown:"),
+ format!("ref {}", string));
+ },
+ Err(_) => {
+ self.tcx.sess.fileline_help(span,
+ "use `ref` to override");
+ },
+ }
}
move_data::Captured => {
.map
.find(the_move.id) {
Some(ast_map::NodeExpr(expr)) => {
- (ty::expr_ty_adjusted(self.tcx, &*expr), expr.span)
+ (self.tcx.expr_ty_adjusted(&*expr), expr.span)
}
r => {
self.tcx.sess.bug(&format!("Captured({}) maps to \
has type `{}`, which is {}",
ol,
moved_lp_msg,
- expr_ty,
+ moved_lp.ty,
suggestion));
self.tcx.sess.fileline_help(expr_span, help);
}
-> (&'static str, &'static str) {
match ty.sty {
_ => {
- if ty::type_moves_by_default(param_env, span, ty) {
+ if ty.moves_by_default(param_env, span) {
("non-copyable",
"perhaps you meant to use `clone()`?")
} else {
self.tcx.sess.span_end_note(s, m);
}
- pub fn span_help(&self, s: Span, m: &str) {
- self.tcx.sess.span_help(s, m);
- }
-
pub fn fileline_help(&self, s: Span, m: &str) {
self.tcx.sess.fileline_help(s, m);
}
};
match err.cause {
- euv::ClosureCapture(_) => {
+ MutabilityViolation => {
+ format!("cannot assign to {}", descr)
+ }
+ BorrowViolation(euv::ClosureCapture(_)) => {
format!("closure cannot assign to {}", descr)
}
- euv::OverloadedOperator |
- euv::AddrOf |
- euv::RefBinding |
- euv::AutoRef |
- euv::AutoUnsafe |
- euv::ForLoop |
- euv::MatchDiscriminant => {
+ BorrowViolation(euv::OverloadedOperator) |
+ BorrowViolation(euv::AddrOf) |
+ BorrowViolation(euv::RefBinding) |
+ BorrowViolation(euv::AutoRef) |
+ BorrowViolation(euv::AutoUnsafe) |
+ BorrowViolation(euv::ForLoop) |
+ BorrowViolation(euv::MatchDiscriminant) => {
format!("cannot borrow {} as mutable", descr)
}
- euv::ClosureInvocation => {
+ BorrowViolation(euv::ClosureInvocation) => {
self.tcx.sess.span_bug(err.span,
"err_mutbl with a closure invocation");
}
}
err_out_of_scope(super_scope, sub_scope) => {
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"reference must be valid for ",
sub_scope,
"...");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
"...but borrowed value is only valid for ",
super_scope,
"");
}
None => self.cmt_to_string(&*err.cmt),
};
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
&format!("{} would have to be valid for ",
descr),
loan_scope,
"...");
- note_and_explain_region(
- self.tcx,
+ self.tcx.note_and_explain_region(
&format!("...but {} is only valid for ", descr),
ptr_scope,
"");
match loan_path.kind {
LpUpvar(ty::UpvarId{ var_id: id, closure_expr_id: _ }) |
LpVar(id) => {
- out.push_str(&ty::local_var_name_str(self.tcx, id));
+ out.push_str(&self.tcx.local_var_name_str(id));
}
LpDowncast(ref lp_base, variant_def_id) => {
out.push('(');
self.append_loan_path_to_string(&**lp_base, out);
out.push_str(DOWNCAST_PRINTED_OPERATOR);
- out.push_str(&ty::item_path_str(self.tcx, variant_def_id));
+ out.push_str(&self.tcx.item_path_str(variant_def_id));
out.push(')');
}
match fname {
mc::NamedField(fname) => {
out.push('.');
- out.push_str(&token::get_name(fname));
+ out.push_str(&fname.as_str());
}
mc::PositionalField(idx) => {
out.push('.');
out.push('(');
self.append_autoderefd_loan_path_to_string(&**lp_base, out);
out.push(':');
- out.push_str(&ty::item_path_str(self.tcx, variant_def_id));
+ out.push_str(&self.tcx.item_path_str(variant_def_id));
out.push(')');
}
LpDowncast(ref lp, variant_def_id) => {
let variant_str = if variant_def_id.krate == ast::LOCAL_CRATE {
- ty::tls::with(|tcx| ty::item_path_str(tcx, variant_def_id))
+ ty::tls::with(|tcx| tcx.item_path_str(variant_def_id))
} else {
format!("{:?}", variant_def_id)
};
LpDowncast(ref lp, variant_def_id) => {
let variant_str = if variant_def_id.krate == ast::LOCAL_CRATE {
- ty::tls::with(|tcx| ty::item_path_str(tcx, variant_def_id))
+ ty::tls::with(|tcx| tcx.item_path_str(variant_def_id))
} else {
format!("{:?}", variant_def_id)
};
/// span of node where assignment occurs
pub span: Span,
+
+ /// id for l-value expression on lhs of assignment
+ pub assignee_id: ast::NodeId,
}
#[derive(Copy, Clone)]
path: path_index,
id: assign_id,
span: span,
+ assignee_id: assignee_id,
};
if self.is_var_path(path_index) {
register_long_diagnostics! {
+E0373: r##"
+This error occurs when an attempt is made to use data captured by a closure,
+when that data may no longer exist. It's most commonly seen when attempting to
+return a closure:
+
+```
+fn foo() -> Box<Fn(u32) -> u32> {
+ let x = 0u32;
+ Box::new(|y| x + y)
+}
+```
+
+Notice that `x` is stack-allocated by `foo()`. By default, Rust captures
+closed-over data by reference. This means that once `foo()` returns, `x` no
+longer exists. An attempt to access `x` within the closure would thus be unsafe.
+
+Another situation where this might be encountered is when spawning threads:
+
+```
+fn foo() {
+ let x = 0u32;
+ let y = 1u32;
+
+ let thr = std::thread::spawn(|| {
+ x + y
+ });
+}
+```
+
+Since our new thread runs in parallel, the stack frame containing `x` and `y`
+may well have disappeared by the time we try to use them. Even if we call
+`thr.join()` within foo (which blocks until `thr` has completed, ensuring the
+stack frame won't disappear), we will not succeed: the compiler cannot prove
+that this behaviour is safe, and so won't let us do it.
+
+The solution to this problem is usually to switch to using a `move` closure.
+This approach moves (or copies, where possible) data into the closure, rather
+than taking references to it. For example:
+
+```
+fn foo() -> Box<Fn(u32) -> u32> {
+ let x = 0u32;
+ Box::new(move |y| x + y)
+}
+```
+
+Now that the closure has its own copy of the data, there's no need to worry
+about safety.
+"##,
+
E0381: r##"
It is not allowed to use or capture an uninitialized variable. For example:
To fix this, ensure that any declared variables are initialized before being
used.
+"##,
+
+E0384: r##"
+This error occurs when an attempt is made to reassign an immutable variable.
+For example:
+
+```
+fn main(){
+ let x = 3;
+ x = 5; // error, reassignment of immutable variable
+}
+```
+
+By default, variables in Rust are immutable. To fix this error, add the keyword
+`mut` after the keyword `let` when declaring the variable. For example:
+
+```
+fn main(){
+ let mut x = 3;
+ x = 5;
+}
+```
"##
}
register_diagnostics! {
- E0373, // closure may outlive current fn, but it borrows {}, which is owned by current fn
E0382, // use of partially/collaterally moved value
E0383, // partial reinitialization of uninitialized structure
- E0384, // reassignment of immutable variable
E0385, // {} in an aliasable location
E0386, // {} in an immutable container
E0387, // {} in a captured outer variable in an `Fn` closure
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::iter;
-
/// A very simple BitVector type.
pub struct BitVector {
data: Vec<u64>
impl BitVector {
pub fn new(num_bits: usize) -> BitVector {
let num_words = (num_bits + 63) / 64;
- BitVector { data: iter::repeat(0).take(num_words).collect() }
+ BitVector { data: vec![0; num_words] }
}
fn word_mask(&self, bit: usize) -> (usize, u64) {
pub fn probe(&mut self, a_id: K) -> Option<V> {
self.get(a_id).value.clone()
}
-}
+ pub fn unsolved_variables(&mut self) -> Vec<K> {
+ self.values
+ .iter()
+ .filter_map(|vv| if vv.value.is_some() { None } else { Some(vv.key()) })
+ .collect()
+ }
+}
sess.diagnostic()));
krate = time(time_passes, "prelude injection", krate, |krate|
- syntax::std_inject::maybe_inject_prelude(krate));
+ syntax::std_inject::maybe_inject_prelude(&sess.parse_sess, krate));
time(time_passes, "checking that all macro invocations are gone", &krate, |krate|
syntax::ext::expand::check_for_macros(&sess.parse_sess, krate));
make_glob_map: resolve::MakeGlobMap,
f: F)
-> (Session, R)
- where F: FnOnce(&ty::ctxt<'tcx>,
+ where F: for<'a> FnOnce(&'a ty::ctxt<'tcx>,
ty::CrateAnalysis) -> R
{
let time_passes = sess.time_passes();
time(time_passes, "static item recursion checking", (), |_|
middle::check_static_recursion::check_crate(&sess, krate, &def_map, &ast_map));
- ty::with_ctxt(sess,
- arenas,
- def_map,
- named_region_map,
- ast_map,
- freevars,
- region_map,
- lang_items,
- stability::Index::new(krate),
- |tcx| {
+ ty::ctxt::create_and_enter(sess,
+ arenas,
+ def_map,
+ named_region_map,
+ ast_map,
+ freevars,
+ region_map,
+ lang_items,
+ stability::Index::new(krate),
+ |tcx| {
// passes are timed inside typeck
typeck::check_crate(tcx, trait_map);
match *output_type {
config::OutputTypeExe => {
for output in sess.crate_types.borrow().iter() {
- let p = link::filename_for_input(sess, *output,
- id, &file);
+ let p = link::filename_for_input(sess, *output, id,
+ outputs);
out_filenames.push(p);
}
}
let mut file = try!(fs::File::create(&deps_filename));
for path in &out_filenames {
try!(write!(&mut file,
- "{}: {}\n\n", path.display(), files.connect(" ")));
+ "{}: {}\n\n", path.display(), files.join(" ")));
}
Ok(())
})();
// be called straight after options have been parsed but before anything
// else (e.g., selecting input and output).
fn early_callback(&mut self,
- &getopts::Matches,
- &diagnostics::registry::Registry)
- -> Compilation;
+ _: &getopts::Matches,
+ _: &diagnostics::registry::Registry)
+ -> Compilation {
+ Compilation::Continue
+ }
// Hook for a callback late in the process of handling arguments. This will
// be called just before actual compilation starts (and before build_controller
// is called), after all arguments etc. have been completely handled.
fn late_callback(&mut self,
- &getopts::Matches,
- &Session,
- &Input,
- &Option<PathBuf>,
- &Option<PathBuf>)
- -> Compilation;
+ _: &getopts::Matches,
+ _: &Session,
+ _: &Input,
+ _: &Option<PathBuf>,
+ _: &Option<PathBuf>)
+ -> Compilation {
+ Compilation::Continue
+ }
// Called after we extract the input from the arguments. Gives the implementer
// an opportunity to change the inputs or to add some custom input handling.
// emitting error messages. Returning None will cause compilation to stop
// at this point.
fn no_input(&mut self,
- &getopts::Matches,
- &config::Options,
- &Option<PathBuf>,
- &Option<PathBuf>,
- &diagnostics::registry::Registry)
- -> Option<(Input, Option<PathBuf>)>;
+ _: &getopts::Matches,
+ _: &config::Options,
+ _: &Option<PathBuf>,
+ _: &Option<PathBuf>,
+ _: &diagnostics::registry::Registry)
+ -> Option<(Input, Option<PathBuf>)> {
+ None
+ }
// Parse pretty printing information from the arguments. The implementer can
// choose to ignore this (the default will return None) which will skip pretty
None
};
if pretty.is_none() && sess.unstable_options() {
- matches.opt_str("xpretty").map(|a| {
+ matches.opt_str("unpretty").map(|a| {
// extended with unstable pretty-print variants
pretty::parse_pretty(sess, &a, true)
})
let metadata = driver::collect_crate_metadata(sess, attrs);
*sess.crate_metadata.borrow_mut() = metadata;
for &style in &crate_types {
- let fname = link::filename_for_input(sess,
- style,
- &id,
- &t_outputs.with_extension(""));
+ let fname = link::filename_for_input(sess, style, &id,
+ &t_outputs);
println!("{}", fname.file_name().unwrap()
.to_string_lossy());
}
option_env!("CFG_VER_DATE")
}
-/// Prints version information and returns None on success or an error
-/// message on panic.
+/// Prints version information
pub fn version(binary: &str, matches: &getopts::Matches) {
let verbose = matches.opt_present("verbose");
for (name, to) in lints {
let name = name.to_lowercase().replace("_", "-");
let desc = to.into_iter().map(|x| x.as_str().replace("_", "-"))
- .collect::<Vec<String>>().connect(", ");
+ .collect::<Vec<String>>().join(", ");
println!(" {} {}",
padded(&name[..]), desc);
}
pub enum PpFlowGraphMode {
Default,
/// Drops the labels from the edges in the flowgraph output. This
- /// is mostly for use in the --xpretty flowgraph run-make tests,
+ /// is mostly for use in the --unpretty flowgraph run-make tests,
/// since the labels are largely uninteresting in those cases and
/// have become a pain to maintain.
UnlabelledEdges,
_ => {
if extended {
sess.fatal(&format!(
- "argument to `xpretty` must be one of `normal`, \
+ "argument to `unpretty` must be one of `normal`, \
`expanded`, `flowgraph[,unlabelled]=<nodeid>`, `typed`, `identified`, \
`expanded,identified`, or `everybody_loops`; got {}", name));
} else {
try!(pp::word(&mut s.s, "as"));
try!(pp::space(&mut s.s));
try!(pp::word(&mut s.s,
- &ty::expr_ty(self.tcx, expr).to_string()));
+ &self.tcx.expr_ty(expr).to_string()));
s.pclose()
}
_ => Ok(())
fn reconstructed_input(&self) -> String {
match *self {
ItemViaNode(node_id) => node_id.to_string(),
- ItemViaPath(ref parts) => parts.connect("::"),
+ ItemViaPath(ref parts) => parts.join("::"),
}
}
use rustc_typeck::middle::stability;
use rustc_typeck::middle::subst;
use rustc_typeck::middle::subst::Subst;
-use rustc_typeck::middle::ty::{self, Ty};
+use rustc_typeck::middle::ty::{self, Ty, RegionEscape};
use rustc_typeck::middle::ty_relate::TypeRelation;
use rustc_typeck::middle::infer;
use rustc_typeck::middle::infer::lub::Lub;
resolve::resolve_crate(&sess, &ast_map, resolve::MakeGlobMap::No);
let named_region_map = resolve_lifetime::krate(&sess, krate, &def_map);
let region_map = region::resolve_crate(&sess, krate);
- ty::with_ctxt(sess,
- &arenas,
- def_map,
- named_region_map,
- ast_map,
- freevars,
- region_map,
- lang_items,
- stability::Index::new(krate),
- |tcx| {
- let infcx = infer::new_infer_ctxt(tcx);
+ ty::ctxt::create_and_enter(sess,
+ &arenas,
+ def_map,
+ named_region_map,
+ ast_map,
+ freevars,
+ region_map,
+ lang_items,
+ stability::Index::new(krate),
+ |tcx| {
+ let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, false);
body(Env { infcx: &infcx });
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, ast::CRATE_NODE_ID);
return match search_mod(self, &self.infcx.tcx.map.krate().module, 0, names) {
Some(id) => id,
None => {
- panic!("no item found: `{}`", names.connect("::"));
+ panic!("no item found: `{}`", names.join("::"));
}
};
-> Ty<'tcx>
{
let input_args = input_tys.iter().cloned().collect();
- ty::mk_bare_fn(self.infcx.tcx,
- None,
- self.infcx.tcx.mk_bare_fn(ty::BareFnTy {
- unsafety: ast::Unsafety::Normal,
- abi: abi::Rust,
- sig: ty::Binder(ty::FnSig {
- inputs: input_args,
- output: ty::FnConverging(output_ty),
- variadic: false
- })
- }))
+ self.infcx.tcx.mk_fn(None,
+ self.infcx.tcx.mk_bare_fn(ty::BareFnTy {
+ unsafety: ast::Unsafety::Normal,
+ abi: abi::Rust,
+ sig: ty::Binder(ty::FnSig {
+ inputs: input_args,
+ output: ty::FnConverging(output_ty),
+ variadic: false
+ })
+ }))
}
pub fn t_nil(&self) -> Ty<'tcx> {
- ty::mk_nil(self.infcx.tcx)
+ self.infcx.tcx.mk_nil()
}
pub fn t_pair(&self, ty1: Ty<'tcx>, ty2: Ty<'tcx>) -> Ty<'tcx> {
- ty::mk_tup(self.infcx.tcx, vec![ty1, ty2])
+ self.infcx.tcx.mk_tup(vec![ty1, ty2])
}
pub fn t_param(&self, space: subst::ParamSpace, index: u32) -> Ty<'tcx> {
let name = format!("T{}", index);
- ty::mk_param(self.infcx.tcx, space, index, token::intern(&name[..]))
+ self.infcx.tcx.mk_param(space, index, token::intern(&name[..]))
}
pub fn re_early_bound(&self,
}
pub fn t_rptr(&self, r: ty::Region) -> Ty<'tcx> {
- ty::mk_imm_rptr(self.infcx.tcx,
- self.infcx.tcx.mk_region(r),
- self.tcx().types.isize)
+ self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r),
+ self.tcx().types.isize)
}
pub fn t_rptr_late_bound(&self, id: u32) -> Ty<'tcx> {
let r = self.re_late_bound_with_debruijn(id, ty::DebruijnIndex::new(1));
- ty::mk_imm_rptr(self.infcx.tcx,
- self.infcx.tcx.mk_region(r),
- self.tcx().types.isize)
+ self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r),
+ self.tcx().types.isize)
}
pub fn t_rptr_late_bound_with_debruijn(&self,
debruijn: ty::DebruijnIndex)
-> Ty<'tcx> {
let r = self.re_late_bound_with_debruijn(id, debruijn);
- ty::mk_imm_rptr(self.infcx.tcx,
- self.infcx.tcx.mk_region(r),
- self.tcx().types.isize)
+ self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r),
+ self.tcx().types.isize)
}
pub fn t_rptr_scope(&self, id: ast::NodeId) -> Ty<'tcx> {
let r = ty::ReScope(CodeExtent::from_node_id(id));
- ty::mk_imm_rptr(self.infcx.tcx, self.infcx.tcx.mk_region(r),
- self.tcx().types.isize)
+ self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r),
+ self.tcx().types.isize)
}
pub fn re_free(&self, nid: ast::NodeId, id: u32) -> ty::Region {
pub fn t_rptr_free(&self, nid: ast::NodeId, id: u32) -> Ty<'tcx> {
let r = self.re_free(nid, id);
- ty::mk_imm_rptr(self.infcx.tcx,
- self.infcx.tcx.mk_region(r),
- self.tcx().types.isize)
+ self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r),
+ self.tcx().types.isize)
}
pub fn t_rptr_static(&self) -> Ty<'tcx> {
- ty::mk_imm_rptr(self.infcx.tcx,
- self.infcx.tcx.mk_region(ty::ReStatic),
- self.tcx().types.isize)
+ self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(ty::ReStatic),
+ self.tcx().types.isize)
}
pub fn dummy_type_trace(&self) -> infer::TypeTrace<'tcx> {
// Situation:
// Theta = [A -> &'a foo]
- assert!(!ty::type_has_escaping_regions(env.t_nil()));
+ assert!(!env.t_nil().has_escaping_regions());
let t_rptr_free1 = env.t_rptr_free(0, 1);
- assert!(!ty::type_has_escaping_regions(t_rptr_free1));
+ assert!(!t_rptr_free1.has_escaping_regions());
let t_rptr_bound1 = env.t_rptr_late_bound_with_debruijn(1, ty::DebruijnIndex::new(1));
- assert!(ty::type_has_escaping_regions(t_rptr_bound1));
+ assert!(t_rptr_bound1.has_escaping_regions());
let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, ty::DebruijnIndex::new(2));
- assert!(ty::type_has_escaping_regions(t_rptr_bound2));
+ assert!(t_rptr_bound2.has_escaping_regions());
// t_fn = fn(A)
let t_param = env.t_param(subst::TypeSpace, 0);
- assert!(!ty::type_has_escaping_regions(t_param));
+ assert!(!t_param.has_escaping_regions());
let t_fn = env.t_fn(&[t_param], env.t_nil());
- assert!(!ty::type_has_escaping_regions(t_fn));
+ assert!(!t_fn.has_escaping_regions());
})
}
let tcx = env.infcx.tcx;
let int_ty = tcx.types.isize;
let uint_ty = tcx.types.usize;
- let tup1_ty = ty::mk_tup(tcx, vec!(int_ty, uint_ty, int_ty, uint_ty));
- let tup2_ty = ty::mk_tup(tcx, vec!(tup1_ty, tup1_ty, uint_ty));
- let uniq_ty = ty::mk_uniq(tcx, tup2_ty);
+ let tup1_ty = tcx.mk_tup(vec!(int_ty, uint_ty, int_ty, uint_ty));
+ let tup2_ty = tcx.mk_tup(vec!(tup1_ty, tup1_ty, uint_ty));
+ let uniq_ty = tcx.mk_box(tup2_ty);
let walked: Vec<_> = uniq_ty.walk().collect();
assert_eq!(walked, [uniq_ty,
tup2_ty,
let tcx = env.infcx.tcx;
let int_ty = tcx.types.isize;
let uint_ty = tcx.types.usize;
- let tup1_ty = ty::mk_tup(tcx, vec!(int_ty, uint_ty, int_ty, uint_ty));
- let tup2_ty = ty::mk_tup(tcx, vec!(tup1_ty, tup1_ty, uint_ty));
- let uniq_ty = ty::mk_uniq(tcx, tup2_ty);
+ let tup1_ty = tcx.mk_tup(vec!(int_ty, uint_ty, int_ty, uint_ty));
+ let tup2_ty = tcx.mk_tup(vec!(tup1_ty, tup1_ty, uint_ty));
+ let uniq_ty = tcx.mk_box(tup2_ty);
// types we expect to see (in order), plus a boolean saying
// whether to skip the subtree.
//! Use the former for unit-like structs and the latter for structs with
//! a `pub fn new()`.
+// BitSet
+#![allow(deprecated)]
+
use metadata::{csearch, decoder};
-use middle::def::*;
+use middle::{cfg, def, infer, pat_util, stability, traits};
use middle::subst::Substs;
use middle::ty::{self, Ty};
-use middle::{def, pat_util, stability};
use middle::const_eval::{eval_const_expr_partial, ConstVal};
-use middle::cfg;
+use middle::const_eval::EvalHint::ExprTypeChecked;
use rustc::ast_map;
-use util::nodemap::{FnvHashMap, NodeSet};
+use util::nodemap::{FnvHashMap, FnvHashSet, NodeSet};
use lint::{Level, Context, LintPass, LintArray, Lint};
use std::collections::{HashSet, BitSet};
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::{cmp, slice};
use std::{i8, i16, i32, i64, u8, u16, u32, u64, f32, f64};
+use std::rc::Rc;
use syntax::{abi, ast};
use syntax::ast_util::{self, is_shift_binop, local_def};
use syntax::attr::{self, AttrMetaMethods};
use syntax::codemap::{self, Span};
use syntax::feature_gate::{KNOWN_ATTRIBUTES, AttributeType};
-use syntax::parse::token;
use syntax::ast::{TyIs, TyUs, TyI8, TyU8, TyI16, TyU16, TyI32, TyU32, TyI64, TyU64};
use syntax::ptr::P;
use syntax::visit::{self, Visitor};
}
}
-declare_lint! {
- UNSIGNED_NEGATION,
- Warn,
- "using an unary minus operator on unsigned type"
-}
-
declare_lint! {
UNUSED_COMPARISONS,
Warn,
impl LintPass for TypeLimits {
fn get_lints(&self) -> LintArray {
- lint_array!(UNSIGNED_NEGATION, UNUSED_COMPARISONS, OVERFLOWING_LITERALS,
- EXCEEDING_BITSHIFTS)
+ lint_array!(UNUSED_COMPARISONS, OVERFLOWING_LITERALS, EXCEEDING_BITSHIFTS)
}
fn check_expr(&mut self, cx: &Context, e: &ast::Expr) {
ast::ExprLit(ref lit) => {
match lit.node {
ast::LitInt(_, ast::UnsignedIntLit(_)) => {
- cx.span_lint(UNSIGNED_NEGATION, e.span,
- "negation of unsigned int literal may \
- be unintentional");
+ check_unsigned_negation_feature(cx, e.span);
+ },
+ ast::LitInt(_, ast::UnsuffixedIntLit(_)) => {
+ if let ty::TyUint(_) = cx.tcx.expr_ty(e).sty {
+ check_unsigned_negation_feature(cx, e.span);
+ }
},
_ => ()
}
},
_ => {
- let t = ty::expr_ty(cx.tcx, &**expr);
+ let t = cx.tcx.expr_ty(&**expr);
match t.sty {
ty::TyUint(_) => {
- cx.span_lint(UNSIGNED_NEGATION, e.span,
- "negation of unsigned int variable may \
- be unintentional");
+ check_unsigned_negation_feature(cx, e.span);
},
_ => ()
}
}
if is_shift_binop(binop.node) {
- let opt_ty_bits = match ty::expr_ty(cx.tcx, &**l).sty {
+ let opt_ty_bits = match cx.tcx.expr_ty(&**l).sty {
ty::TyInt(t) => Some(int_ty_bits(t, cx.sess().target.int_type)),
ty::TyUint(t) => Some(uint_ty_bits(t, cx.sess().target.uint_type)),
_ => None
if let ast::LitInt(shift, _) = lit.node { shift >= bits }
else { false }
} else {
- match eval_const_expr_partial(cx.tcx, &**r, Some(cx.tcx.types.usize)) {
+ match eval_const_expr_partial(cx.tcx, &**r, ExprTypeChecked) {
Ok(ConstVal::Int(shift)) => { shift as u64 >= bits },
Ok(ConstVal::Uint(shift)) => { shift >= bits },
_ => { false }
}
},
ast::ExprLit(ref lit) => {
- match ty::expr_ty(cx.tcx, e).sty {
+ match cx.tcx.expr_ty(e).sty {
ty::TyInt(t) => {
match lit.node {
ast::LitInt(v, ast::SignedIntLit(_, ast::Plus)) |
} else {
binop
};
- match ty::expr_ty(tcx, expr).sty {
+ match tcx.expr_ty(expr).sty {
ty::TyInt(int_ty) => {
let (min, max) = int_ty_range(int_ty);
let lit_val: i64 = match lit.node {
_ => false
}
}
+
+ fn check_unsigned_negation_feature(cx: &Context, span: Span) {
+ if !cx.sess().features.borrow().negate_unsigned {
+ // FIXME(#27141): change this to syntax::feature_gate::emit_feature_err…
+ cx.sess().span_warn(span,
+ "unary negation of unsigned integers will be feature gated in the future");
+ // …and remove following two expressions.
+ if option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some() { return; }
+ cx.sess().fileline_help(span, "add #![feature(negate_unsigned)] to the \
+ crate attributes to enable the gate in advance");
+ }
+ }
}
}
cx: &'a Context<'a, 'tcx>
}
+enum FfiResult {
+ FfiSafe,
+ FfiUnsafe(&'static str),
+ FfiBadStruct(ast::DefId, &'static str),
+ FfiBadEnum(ast::DefId, &'static str)
+}
+
+/// Check if this enum can be safely exported based on the
+/// "nullable pointer optimization". Currently restricted
+/// to function pointers and references, but could be
+/// expanded to cover NonZero raw pointers and newtypes.
+/// FIXME: This duplicates code in trans.
+fn is_repr_nullable_ptr<'tcx>(variants: &Vec<Rc<ty::VariantInfo<'tcx>>>) -> bool {
+ if variants.len() == 2 {
+ let mut data_idx = 0;
+
+ if variants[0].args.is_empty() {
+ data_idx = 1;
+ } else if !variants[1].args.is_empty() {
+ return false;
+ }
+
+ if variants[data_idx].args.len() == 1 {
+ match variants[data_idx].args[0].sty {
+ ty::TyBareFn(None, _) => { return true; }
+ ty::TyRef(..) => { return true; }
+ _ => { }
+ }
+ }
+ }
+ false
+}
+
+fn ast_ty_to_normalized<'tcx>(tcx: &ty::ctxt<'tcx>,
+ id: ast::NodeId)
+ -> Ty<'tcx> {
+ let tty = match tcx.ast_ty_to_ty_cache.borrow().get(&id) {
+ Some(&t) => t,
+ None => panic!("ast_ty_to_ty_cache was incomplete after typeck!")
+ };
+ infer::normalize_associated_type(tcx, &tty)
+}
+
impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
- fn check_def(&mut self, sp: Span, id: ast::NodeId) {
- match self.cx.tcx.def_map.borrow().get(&id).unwrap().full_def() {
- def::DefPrimTy(ast::TyInt(ast::TyIs)) => {
- self.cx.span_lint(IMPROPER_CTYPES, sp,
- "found rust type `isize` in foreign module, while \
- libc::c_int or libc::c_long should be used");
+ /// Check if the given type is "ffi-safe" (has a stable, well-defined
+ /// representation which can be exported to C code).
+ fn check_type_for_ffi(&self,
+ cache: &mut FnvHashSet<Ty<'tcx>>,
+ ty: Ty<'tcx>)
+ -> FfiResult {
+ use self::FfiResult::*;
+ let cx = &self.cx.tcx;
+
+ // Protect against infinite recursion, for example
+ // `struct S(*mut S);`.
+ // FIXME: A recursion limit is necessary as well, for irregular
+ // recusive types.
+ if !cache.insert(ty) {
+ return FfiSafe;
+ }
+
+ match ty.sty {
+ ty::TyStruct(did, substs) => {
+ if !cx.lookup_repr_hints(did).contains(&attr::ReprExtern) {
+ return FfiUnsafe(
+ "found struct without foreign-function-safe \
+ representation annotation in foreign module, \
+ consider adding a #[repr(C)] attribute to \
+ the type");
+ }
+
+ // We can't completely trust repr(C) markings; make sure the
+ // fields are actually safe.
+ let fields = cx.struct_fields(did, substs);
+
+ if fields.is_empty() {
+ return FfiUnsafe(
+ "found zero-size struct in foreign module, consider \
+ adding a member to this struct");
+ }
+
+ for field in fields {
+ let field_ty = infer::normalize_associated_type(cx, &field.mt.ty);
+ let r = self.check_type_for_ffi(cache, field_ty);
+ match r {
+ FfiSafe => {}
+ FfiBadStruct(..) | FfiBadEnum(..) => { return r; }
+ FfiUnsafe(s) => { return FfiBadStruct(did, s); }
+ }
+ }
+ FfiSafe
}
- def::DefPrimTy(ast::TyUint(ast::TyUs)) => {
- self.cx.span_lint(IMPROPER_CTYPES, sp,
- "found rust type `usize` in foreign module, while \
- libc::c_uint or libc::c_ulong should be used");
+ ty::TyEnum(did, substs) => {
+ let variants = cx.substd_enum_variants(did, substs);
+ if variants.is_empty() {
+ // Empty enums are okay... although sort of useless.
+ return FfiSafe
+ }
+
+ // Check for a repr() attribute to specify the size of the
+ // discriminant.
+ let repr_hints = cx.lookup_repr_hints(did);
+ match &**repr_hints {
+ [] => {
+ // Special-case types like `Option<extern fn()>`.
+ if !is_repr_nullable_ptr(&variants) {
+ return FfiUnsafe(
+ "found enum without foreign-function-safe \
+ representation annotation in foreign module, \
+ consider adding a #[repr(...)] attribute to \
+ the type")
+ }
+ }
+ [ref hint] => {
+ if !hint.is_ffi_safe() {
+ // FIXME: This shouldn't be reachable: we should check
+ // this earlier.
+ return FfiUnsafe(
+ "enum has unexpected #[repr(...)] attribute")
+ }
+
+ // Enum with an explicitly sized discriminant; either
+ // a C-style enum or a discriminated union.
+
+ // The layout of enum variants is implicitly repr(C).
+ // FIXME: Is that correct?
+ }
+ _ => {
+ // FIXME: This shouldn't be reachable: we should check
+ // this earlier.
+ return FfiUnsafe(
+ "enum has too many #[repr(...)] attributes");
+ }
+ }
+
+ // Check the contained variants.
+ for variant in variants {
+ for arg in &variant.args {
+ let arg = infer::normalize_associated_type(cx, arg);
+ let r = self.check_type_for_ffi(cache, arg);
+ match r {
+ FfiSafe => {}
+ FfiBadStruct(..) | FfiBadEnum(..) => { return r; }
+ FfiUnsafe(s) => { return FfiBadEnum(did, s); }
+ }
+ }
+ }
+ FfiSafe
}
- def::DefTy(..) => {
- let tty = match self.cx.tcx.ast_ty_to_ty_cache.borrow().get(&id) {
- Some(&t) => t,
- None => panic!("ast_ty_to_ty_cache was incomplete after typeck!")
- };
- if !ty::is_ffi_safe(self.cx.tcx, tty) {
- self.cx.span_lint(IMPROPER_CTYPES, sp,
- "found type without foreign-function-safe \
- representation annotation in foreign module, consider \
- adding a #[repr(...)] attribute to the type");
+ ty::TyInt(ast::TyIs) => {
+ FfiUnsafe("found Rust type `isize` in foreign module, while \
+ `libc::c_int` or `libc::c_long` should be used")
+ }
+ ty::TyUint(ast::TyUs) => {
+ FfiUnsafe("found Rust type `usize` in foreign module, while \
+ `libc::c_uint` or `libc::c_ulong` should be used")
+ }
+ ty::TyChar => {
+ FfiUnsafe("found Rust type `char` in foreign module, while \
+ `u32` or `libc::wchar_t` should be used")
+ }
+
+ // Primitive types with a stable representation.
+ ty::TyBool | ty::TyInt(..) | ty::TyUint(..) |
+ ty::TyFloat(..) => FfiSafe,
+
+ ty::TyBox(..) => {
+ FfiUnsafe("found Rust type Box<_> in foreign module, \
+ consider using a raw pointer instead")
+ }
+
+ ty::TySlice(_) => {
+ FfiUnsafe("found Rust slice type in foreign module, \
+ consider using a raw pointer instead")
+ }
+
+ ty::TyTrait(..) => {
+ FfiUnsafe("found Rust trait type in foreign module, \
+ consider using a raw pointer instead")
+ }
+
+ ty::TyStr => {
+ FfiUnsafe("found Rust type `str` in foreign module; \
+ consider using a `*const libc::c_char`")
+ }
+
+ ty::TyTuple(_) => {
+ FfiUnsafe("found Rust tuple type in foreign module; \
+ consider using a struct instead`")
+ }
+
+ ty::TyRawPtr(ref m) | ty::TyRef(_, ref m) => {
+ self.check_type_for_ffi(cache, m.ty)
+ }
+
+ ty::TyArray(ty, _) => {
+ self.check_type_for_ffi(cache, ty)
+ }
+
+ ty::TyBareFn(None, bare_fn) => {
+ match bare_fn.abi {
+ abi::Rust |
+ abi::RustIntrinsic |
+ abi::RustCall => {
+ return FfiUnsafe(
+ "found function pointer with Rust calling \
+ convention in foreign module; consider using an \
+ `extern` function pointer")
+ }
+ _ => {}
+ }
+
+ let sig = cx.erase_late_bound_regions(&bare_fn.sig);
+ match sig.output {
+ ty::FnDiverging => {}
+ ty::FnConverging(output) => {
+ if !output.is_nil() {
+ let r = self.check_type_for_ffi(cache, output);
+ match r {
+ FfiSafe => {}
+ _ => { return r; }
+ }
+ }
+ }
}
+ for arg in sig.inputs {
+ let r = self.check_type_for_ffi(cache, arg);
+ match r {
+ FfiSafe => {}
+ _ => { return r; }
+ }
+ }
+ FfiSafe
+ }
+
+ ty::TyParam(..) | ty::TyInfer(..) | ty::TyError |
+ ty::TyClosure(..) | ty::TyProjection(..) |
+ ty::TyBareFn(Some(_), _) => {
+ panic!("Unexpected type in foreign function")
+ }
+ }
+ }
+
+ fn check_def(&mut self, sp: Span, id: ast::NodeId) {
+ let tty = ast_ty_to_normalized(self.cx.tcx, id);
+
+ match ImproperCTypesVisitor::check_type_for_ffi(self, &mut FnvHashSet(), tty) {
+ FfiResult::FfiSafe => {}
+ FfiResult::FfiUnsafe(s) => {
+ self.cx.span_lint(IMPROPER_CTYPES, sp, s);
+ }
+ FfiResult::FfiBadStruct(_, s) => {
+ // FIXME: This diagnostic is difficult to read, and doesn't
+ // point at the relevant field.
+ self.cx.span_lint(IMPROPER_CTYPES, sp,
+ &format!("found non-foreign-function-safe member in \
+ struct marked #[repr(C)]: {}", s));
+ }
+ FfiResult::FfiBadEnum(_, s) => {
+ // FIXME: This diagnostic is difficult to read, and doesn't
+ // point at the relevant variant.
+ self.cx.span_lint(IMPROPER_CTYPES, sp,
+ &format!("found non-foreign-function-safe member in \
+ enum: {}", s));
}
- _ => ()
}
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for ImproperCTypesVisitor<'a, 'tcx> {
fn visit_ty(&mut self, ty: &ast::Ty) {
- if let ast::TyPath(..) = ty.node {
- self.check_def(ty.span, ty.id);
+ match ty.node {
+ ast::TyPath(..) |
+ ast::TyBareFn(..) => self.check_def(ty.span, ty.id),
+ ast::TyVec(..) => {
+ self.cx.span_lint(IMPROPER_CTYPES, ty.span,
+ "found Rust slice type in foreign module, consider \
+ using a raw pointer instead");
+ }
+ ast::TyFixedLengthVec(ref ty, _) => self.visit_ty(ty),
+ ast::TyTup(..) => {
+ self.cx.span_lint(IMPROPER_CTYPES, ty.span,
+ "found Rust tuple type in foreign module; \
+ consider using a struct instead`")
+ }
+ _ => visit::walk_ty(self, ty)
}
- visit::walk_ty(self, ty);
}
}
check_ty(cx, &*input.ty);
}
if let ast::Return(ref ret_ty) = decl.output {
- check_ty(cx, &**ret_ty);
+ let tty = ast_ty_to_normalized(cx.tcx, ret_ty.id);
+ if !tty.is_nil() {
+ check_ty(cx, &ret_ty);
+ }
}
}
impl BoxPointers {
fn check_heap_type<'a, 'tcx>(&self, cx: &Context<'a, 'tcx>,
span: Span, ty: Ty<'tcx>) {
- let mut n_uniq: usize = 0;
- ty::fold_ty(cx.tcx, ty, |t| {
- match t.sty {
- ty::TyBox(_) => {
- n_uniq += 1;
- }
- _ => ()
- };
- t
- });
-
- if n_uniq > 0 {
- let m = format!("type uses owned (Box type) pointers: {}", ty);
- cx.span_lint(BOX_POINTERS, span, &m[..]);
+ for leaf_ty in ty.walk() {
+ if let ty::TyBox(_) = leaf_ty.sty {
+ let m = format!("type uses owned (Box type) pointers: {}", ty);
+ cx.span_lint(BOX_POINTERS, span, &m);
+ }
}
}
}
ast::ItemEnum(..) |
ast::ItemStruct(..) =>
self.check_heap_type(cx, it.span,
- ty::node_id_to_type(cx.tcx, it.id)),
+ cx.tcx.node_id_to_type(it.id)),
_ => ()
}
ast::ItemStruct(ref struct_def, _) => {
for struct_field in &struct_def.fields {
self.check_heap_type(cx, struct_field.span,
- ty::node_id_to_type(cx.tcx, struct_field.node.id));
+ cx.tcx.node_id_to_type(struct_field.node.id));
}
}
_ => ()
}
fn check_expr(&mut self, cx: &Context, e: &ast::Expr) {
- let ty = ty::expr_ty(cx.tcx, e);
+ let ty = cx.tcx.expr_ty(e);
self.check_heap_type(cx, e.span, ty);
}
}
ast::ItemImpl(_, _, _, ref t_ref_opt, _, _) => {
// Deriving the Copy trait does not cause a warning
if let &Some(ref trait_ref) = t_ref_opt {
- let def_id = ty::trait_ref_to_def_id(cx.tcx, trait_ref);
+ let def_id = cx.tcx.trait_ref_to_def_id(trait_ref);
if Some(def_id) == cx.tcx.lang_items.copy_trait() {
return;
}
}
- match ty::node_id_to_type(cx.tcx, item.id).sty {
+ match cx.tcx.node_id_to_type(item.id).sty {
ty::TyEnum(did, _) => did,
ty::TyStruct(did, _) => did,
_ => return,
return;
}
- let t = ty::expr_ty(cx.tcx, expr);
+ let t = cx.tcx.expr_ty(expr);
let warned = match t.sty {
ty::TyTuple(ref tys) if tys.is_empty() => return,
ty::TyBool => return,
impl NonCamelCaseTypes {
fn check_case(&self, cx: &Context, sort: &str, ident: ast::Ident, span: Span) {
fn is_camel_case(ident: ast::Ident) -> bool {
- let ident = token::get_ident(ident);
+ let ident = ident.name.as_str();
if ident.is_empty() {
return true;
}
)).collect::<Vec<_>>().concat()
}
- let s = token::get_ident(ident);
+ let s = ident.name.as_str();
if !is_camel_case(ident) {
let c = to_camel_case(&s);
}
fn check_item(&mut self, cx: &Context, it: &ast::Item) {
- let has_extern_repr = it.attrs.iter().any(|attr| {
+ let extern_repr_count = it.attrs.iter().filter(|attr| {
attr::find_repr_attrs(cx.tcx.sess.diagnostic(), attr).iter()
.any(|r| r == &attr::ReprExtern)
- });
+ }).count();
+ let has_extern_repr = extern_repr_count > 0;
+
if has_extern_repr {
return;
}
Some(item) => match item.container() {
ty::TraitContainer(..) => MethodContext::TraitDefaultImpl,
ty::ImplContainer(cid) => {
- match ty::impl_trait_ref(cx.tcx, cid) {
+ match cx.tcx.impl_trait_ref(cid) {
Some(_) => MethodContext::TraitImpl,
None => MethodContext::PlainImpl
}
}
words.push(buf);
}
- words.connect("_")
+ words.join("_")
}
fn check_snake_case(&self, cx: &Context, sort: &str, name: &str, span: Option<Span>) {
match fk {
visit::FkMethod(ident, _, _) => match method_context(cx, id, span) {
MethodContext::PlainImpl => {
- self.check_snake_case(cx, "method", &token::get_ident(ident), Some(span))
+ self.check_snake_case(cx, "method", &ident.name.as_str(), Some(span))
},
MethodContext::TraitDefaultImpl => {
- self.check_snake_case(cx, "trait method", &token::get_ident(ident), Some(span))
+ self.check_snake_case(cx, "trait method", &ident.name.as_str(), Some(span))
},
_ => (),
},
visit::FkItemFn(ident, _, _, _, _, _) => {
- self.check_snake_case(cx, "function", &token::get_ident(ident), Some(span))
+ self.check_snake_case(cx, "function", &ident.name.as_str(), Some(span))
},
_ => (),
}
fn check_item(&mut self, cx: &Context, it: &ast::Item) {
if let ast::ItemMod(_) = it.node {
- self.check_snake_case(cx, "module", &token::get_ident(it.ident), Some(it.span));
+ self.check_snake_case(cx, "module", &it.ident.name.as_str(), Some(it.span));
}
}
fn check_trait_item(&mut self, cx: &Context, trait_item: &ast::TraitItem) {
if let ast::MethodTraitItem(_, None) = trait_item.node {
- self.check_snake_case(cx, "trait method", &token::get_ident(trait_item.ident),
+ self.check_snake_case(cx, "trait method", &trait_item.ident.name.as_str(),
Some(trait_item.span));
}
}
fn check_lifetime_def(&mut self, cx: &Context, t: &ast::LifetimeDef) {
- self.check_snake_case(cx, "lifetime", &token::get_ident(t.lifetime.name.ident()),
+ self.check_snake_case(cx, "lifetime", &t.lifetime.name.as_str(),
Some(t.lifetime.span));
}
if let &ast::PatIdent(_, ref path1, _) = &p.node {
let def = cx.tcx.def_map.borrow().get(&p.id).map(|d| d.full_def());
if let Some(def::DefLocal(_)) = def {
- self.check_snake_case(cx, "variable", &token::get_ident(path1.node), Some(p.span));
+ self.check_snake_case(cx, "variable", &path1.node.name.as_str(), Some(p.span));
}
}
}
_: ast::Ident, _: &ast::Generics, _: ast::NodeId) {
for sf in &s.fields {
if let ast::StructField_ { kind: ast::NamedField(ident, _), .. } = sf.node {
- self.check_snake_case(cx, "structure field", &token::get_ident(ident),
+ self.check_snake_case(cx, "structure field", &ident.name.as_str(),
Some(sf.span));
}
}
impl NonUpperCaseGlobals {
fn check_upper_case(cx: &Context, sort: &str, ident: ast::Ident, span: Span) {
- let s = token::get_ident(ident);
+ let s = ident.name.as_str();
if s.chars().any(|c| c.is_lowercase()) {
let uc = NonSnakeCase::to_snake_case(&s).to_uppercase();
if items.len() == 1 {
if let ast::PathListIdent {ref name, ..} = items[0].node {
let m = format!("braces around {} is unnecessary",
- &token::get_ident(*name));
+ name);
cx.span_lint(UNUSED_IMPORT_BRACES, item.span,
&m[..]);
}
});
for fieldpat in field_pats {
if let ast::PatIdent(_, ident, None) = fieldpat.node.pat.node {
- if ident.node.as_str() == fieldpat.node.ident.as_str() {
+ if ident.node.name == fieldpat.node.ident.name {
+ // FIXME: should this comparison really be done on the name?
+ // doing it on the ident will fail during compilation of libcore
cx.span_lint(NON_SHORTHAND_FIELD_PATTERNS, fieldpat.span,
&format!("the `{}:` in this pattern is redundant and can \
- be removed", ident.node.as_str()))
+ be removed", ident.node))
}
}
}
pat_util::pat_bindings(&cx.tcx.def_map, &**p, |mode, id, _, path1| {
let ident = path1.node;
if let ast::BindByValue(ast::MutMutable) = mode {
- if !token::get_ident(ident).starts_with("_") {
+ if !ident.name.as_str().starts_with("_") {
match mutables.entry(ident.name.usize()) {
Vacant(entry) => { entry.insert(vec![id]); },
Occupied(mut entry) => { entry.get_mut().push(id); },
_ => return
}
- if let Some(adjustment) = cx.tcx.adjustments.borrow().get(&e.id) {
+ if let Some(adjustment) = cx.tcx.tables.borrow().adjustments.get(&e.id) {
if let ty::AdjustDerefRef(ty::AutoDerefRef { ref autoref, .. }) = *adjustment {
match autoref {
&Some(ty::AutoPtr(_, ast::MutImmutable)) => {
ast::ItemImpl(_, _, _, Some(ref trait_ref), _, ref impl_items) => {
// If the trait is private, add the impl items to private_traits so they don't get
// reported for missing docs.
- let real_trait = ty::trait_ref_to_def_id(cx.tcx, trait_ref);
+ let real_trait = cx.tcx.trait_ref_to_def_id(trait_ref);
match cx.tcx.map.find(real_trait.node) {
Some(ast_map::NodeItem(item)) => if item.vis == ast::Visibility::Inherited {
for itm in impl_items {
if ast_generics.is_parameterized() {
return;
}
- ty::mk_struct(cx.tcx, local_def(item.id),
- cx.tcx.mk_substs(Substs::empty()))
+ cx.tcx.mk_struct(local_def(item.id),
+ cx.tcx.mk_substs(Substs::empty()))
}
ast::ItemEnum(_, ref ast_generics) => {
if ast_generics.is_parameterized() {
return;
}
- ty::mk_enum(cx.tcx, local_def(item.id),
- cx.tcx.mk_substs(Substs::empty()))
+ cx.tcx.mk_enum(local_def(item.id),
+ cx.tcx.mk_substs(Substs::empty()))
}
_ => return,
};
- let parameter_environment = ty::empty_parameter_environment(cx.tcx);
- if !ty::type_moves_by_default(¶meter_environment, item.span, ty) {
+ let parameter_environment = cx.tcx.empty_parameter_environment();
+ // FIXME (@jroesch) should probably inver this so that the parameter env still impls this
+ // method
+ if !ty.moves_by_default(¶meter_environment, item.span) {
return;
}
- if ty::can_type_implement_copy(¶meter_environment, item.span, ty).is_ok() {
+ if parameter_environment.can_type_implement_copy(ty, item.span).is_ok() {
cx.span_lint(MISSING_COPY_IMPLEMENTATIONS,
item.span,
"type could implement `Copy`; consider adding `impl \
};
if self.impling_types.is_none() {
- let debug_def = ty::lookup_trait_def(cx.tcx, debug);
+ let debug_def = cx.tcx.lookup_trait_def(debug);
let mut impls = NodeSet();
debug_def.for_each_impl(cx.tcx, |d| {
if d.krate == ast::LOCAL_CRATE {
- if let Some(ty_def) = ty::ty_to_def_id(ty::node_id_to_type(cx.tcx, d.node)) {
+ if let Some(ty_def) = cx.tcx.node_id_to_type(d.node).ty_to_def_id() {
impls.insert(ty_def.node);
}
}
fn check_fn(&mut self, cx: &Context, fn_kind: visit::FnKind, _: &ast::FnDecl,
blk: &ast::Block, sp: Span, id: ast::NodeId) {
- // FIXME(#23542) Replace with type ascription.
- #![allow(trivial_casts)]
-
type F = for<'tcx> fn(&ty::ctxt<'tcx>,
ast::NodeId, ast::NodeId, ast::Ident, ast::NodeId) -> bool;
- let (name, checker) = match fn_kind {
- visit::FkItemFn(name, _, _, _, _, _) => (name, id_refers_to_this_fn as F),
- visit::FkMethod(name, _, _) => (name, id_refers_to_this_method as F),
+ let method = match fn_kind {
+ visit::FkItemFn(..) => None,
+ visit::FkMethod(..) => {
+ cx.tcx.impl_or_trait_item(local_def(id)).as_opt_method()
+ }
// closures can't recur, so they don't matter.
visit::FkFnBlock => return
};
- let impl_def_id = ty::impl_of_method(cx.tcx, local_def(id))
- .unwrap_or(local_def(ast::DUMMY_NODE_ID));
- assert!(ast_util::is_local(impl_def_id));
- let impl_node_id = impl_def_id.node;
-
// Walk through this function (say `f`) looking to see if
// every possible path references itself, i.e. the function is
// called recursively unconditionally. This is done by trying
let node_id = cfg.graph.node_data(idx).id();
// is this a recursive call?
- if node_id != ast::DUMMY_NODE_ID && checker(cx.tcx, impl_node_id, id, name, node_id) {
+ let self_recursive = if node_id != ast::DUMMY_NODE_ID {
+ match method {
+ Some(ref method) => {
+ expr_refers_to_this_method(cx.tcx, method, node_id)
+ }
+ None => expr_refers_to_this_fn(cx.tcx, id, node_id)
+ }
+ } else {
+ false
+ };
+ if self_recursive {
self_call_spans.push(cx.tcx.map.span(node_id));
// this is a self call, so we shouldn't explore past
// this node in the CFG.
// all done
return;
- // Functions for identifying if the given NodeId `id`
- // represents a call to the function `fn_id`/method
- // `method_id`.
+ // Functions for identifying if the given Expr NodeId `id`
+ // represents a call to the function `fn_id`/method `method`.
+
+ fn expr_refers_to_this_fn(tcx: &ty::ctxt,
+ fn_id: ast::NodeId,
+ id: ast::NodeId) -> bool {
+ match tcx.map.get(id) {
+ ast_map::NodeExpr(&ast::Expr { node: ast::ExprCall(ref callee, _), .. }) => {
+ tcx.def_map.borrow().get(&callee.id)
+ .map_or(false, |def| def.def_id() == local_def(fn_id))
+ }
+ _ => false
+ }
+ }
- fn id_refers_to_this_fn<'tcx>(tcx: &ty::ctxt<'tcx>,
- _: ast::NodeId,
- fn_id: ast::NodeId,
- _: ast::Ident,
+ // Check if the expression `id` performs a call to `method`.
+ fn expr_refers_to_this_method(tcx: &ty::ctxt,
+ method: &ty::Method,
id: ast::NodeId) -> bool {
- tcx.def_map.borrow().get(&id)
- .map_or(false, |def| def.def_id() == local_def(fn_id))
- }
-
- // check if the method call `id` refers to method `method_id`
- // (with name `method_name` contained in impl `impl_id`).
- fn id_refers_to_this_method<'tcx>(tcx: &ty::ctxt<'tcx>,
- impl_id: ast::NodeId,
- method_id: ast::NodeId,
- method_name: ast::Ident,
- id: ast::NodeId) -> bool {
- let did = match tcx.method_map.borrow().get(&ty::MethodCall::expr(id)) {
- None => return false,
- Some(m) => match m.origin {
- // There's no way to know if a method call via a
- // vtable is recursion, so we assume it's not.
- ty::MethodTraitObject(_) => return false,
-
- // This `did` refers directly to the method definition.
- ty::MethodStatic(did) | ty::MethodStaticClosure(did) => did,
-
- // MethodTypeParam are methods from traits:
-
- // The `impl ... for ...` of this method call
- // isn't known, e.g. it might be a default method
- // in a trait, so we get the def-id of the trait
- // method instead.
- ty::MethodTypeParam(
- ty::MethodParam { ref trait_ref, method_num, impl_def_id: None, }) => {
- ty::trait_item(tcx, trait_ref.def_id, method_num).def_id()
- }
+ let tables = tcx.tables.borrow();
- // The `impl` is known, so we check that with a
- // special case:
- ty::MethodTypeParam(
- ty::MethodParam { impl_def_id: Some(impl_def_id), .. }) => {
+ // Check for method calls and overloaded operators.
+ if let Some(m) = tables.method_map.get(&ty::MethodCall::expr(id)) {
+ if method_call_refers_to_method(tcx, method, m.def_id, m.substs, id) {
+ return true;
+ }
+ }
- let name = match tcx.map.expect_expr(id).node {
- ast::ExprMethodCall(ref sp_ident, _, _) => sp_ident.node,
- _ => tcx.sess.span_bug(
- tcx.map.span(id),
- "non-method call expr behaving like a method call?")
- };
- // It matches if it comes from the same impl,
- // and has the same method name.
- return ast_util::is_local(impl_def_id)
- && impl_def_id.node == impl_id
- && method_name.name == name.name
+ // Check for overloaded autoderef method calls.
+ if let Some(&ty::AdjustDerefRef(ref adj)) = tables.adjustments.get(&id) {
+ for i in 0..adj.autoderefs {
+ let method_call = ty::MethodCall::autoderef(id, i as u32);
+ if let Some(m) = tables.method_map.get(&method_call) {
+ if method_call_refers_to_method(tcx, method, m.def_id, m.substs, id) {
+ return true;
+ }
}
}
- };
+ }
- ast_util::is_local(did) && did.node == method_id
+ // Check for calls to methods via explicit paths (e.g. `T::method()`).
+ match tcx.map.get(id) {
+ ast_map::NodeExpr(&ast::Expr { node: ast::ExprCall(ref callee, _), .. }) => {
+ match tcx.def_map.borrow().get(&callee.id).map(|d| d.full_def()) {
+ Some(def::DefMethod(def_id)) => {
+ let no_substs = &ty::ItemSubsts::empty();
+ let ts = tables.item_substs.get(&callee.id).unwrap_or(no_substs);
+ method_call_refers_to_method(tcx, method, def_id, &ts.substs, id)
+ }
+ _ => false
+ }
+ }
+ _ => false
+ }
+ }
+
+ // Check if the method call to the method with the ID `callee_id`
+ // and instantiated with `callee_substs` refers to method `method`.
+ fn method_call_refers_to_method<'tcx>(tcx: &ty::ctxt<'tcx>,
+ method: &ty::Method,
+ callee_id: ast::DefId,
+ callee_substs: &Substs<'tcx>,
+ expr_id: ast::NodeId) -> bool {
+ let callee_item = tcx.impl_or_trait_item(callee_id);
+
+ match callee_item.container() {
+ // This is an inherent method, so the `def_id` refers
+ // directly to the method definition.
+ ty::ImplContainer(_) => {
+ callee_id == method.def_id
+ }
+
+ // A trait method, from any number of possible sources.
+ // Attempt to select a concrete impl before checking.
+ ty::TraitContainer(trait_def_id) => {
+ let trait_substs = callee_substs.clone().method_to_trait();
+ let trait_substs = tcx.mk_substs(trait_substs);
+ let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs);
+ let trait_ref = ty::Binder(trait_ref);
+ let span = tcx.map.span(expr_id);
+ let obligation =
+ traits::Obligation::new(traits::ObligationCause::misc(span, expr_id),
+ trait_ref.to_poly_trait_predicate());
+
+ let param_env = ty::ParameterEnvironment::for_item(tcx, method.def_id.node);
+ let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(param_env), false);
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ match selcx.select(&obligation) {
+ // The method comes from a `T: Trait` bound.
+ // If `T` is `Self`, then this call is inside
+ // a default method definition.
+ Ok(Some(traits::VtableParam(_))) => {
+ let self_ty = callee_substs.self_ty();
+ let on_self = self_ty.map_or(false, |t| t.is_self());
+ // We can only be recurring in a default
+ // method if we're being called literally
+ // on the `Self` type.
+ on_self && callee_id == method.def_id
+ }
+
+ // The `impl` is known, so we check that with a
+ // special case:
+ Ok(Some(traits::VtableImpl(vtable_impl))) => {
+ let container = ty::ImplContainer(vtable_impl.impl_def_id);
+ // It matches if it comes from the same impl,
+ // and has the same method name.
+ container == method.container
+ && callee_item.name() == method.name
+ }
+
+ // There's no way to know if this call is
+ // recursive, so we assume it's not.
+ _ => return false
+ }
+ }
+ }
}
}
}
ast::ExprPath(..) => (),
_ => return None
}
- if let DefFn(did, _) = ty::resolve_expr(cx.tcx, expr) {
+ if let def::DefFn(did, _) = cx.tcx.resolve_expr(expr) {
if !def_id_is_transmute(cx, did) {
return None;
}
- let typ = ty::node_id_to_type(cx.tcx, expr.id);
+ let typ = cx.tcx.node_id_to_type(expr.id);
match typ.sty {
ty::TyBareFn(_, ref bare_fn) if bare_fn.abi == RustIntrinsic => {
if let ty::FnConverging(to) = bare_fn.sig.0.output {
}
fn def_id_is_transmute(cx: &Context, def_id: DefId) -> bool {
- match ty::lookup_item_type(cx.tcx, def_id).ty.sty {
+ match cx.tcx.lookup_item_type(def_id).ty.sty {
ty::TyBareFn(_, ref bfty) if bfty.abi == RustIntrinsic => (),
_ => return false
}
- ty::with_path(cx.tcx, def_id, |path| match path.last() {
+ cx.tcx.with_path(def_id, |path| match path.last() {
Some(ref last) => last.name().as_str() == "transmute",
_ => false
})
let (drop_impl_did, dtor_self_type) =
if dtor_did.krate == ast::LOCAL_CRATE {
let impl_did = ctx.tcx.map.get_parent_did(dtor_did.node);
- let ty = ty::lookup_item_type(ctx.tcx, impl_did).ty;
+ let ty = ctx.tcx.lookup_item_type(impl_did).ty;
(impl_did, ty)
} else {
continue;
ty::TyEnum(self_type_did, _) |
ty::TyStruct(self_type_did, _) |
ty::TyClosure(self_type_did, _) => {
- let hints = ty::lookup_repr_hints(ctx.tcx, self_type_did);
+ let hints = ctx.tcx.lookup_repr_hints(self_type_did);
if hints.iter().any(|attr| *attr == attr::ReprExtern) &&
- ty::ty_dtor(ctx.tcx, self_type_did).has_drop_flag() {
+ ctx.tcx.ty_dtor(self_type_did).has_drop_flag() {
let drop_impl_span = ctx.tcx.map.def_id_span(drop_impl_did,
codemap::DUMMY_SP);
let self_defn_span = ctx.tcx.map.def_id_span(self_type_did,
#![feature(ref_slice)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]
+#![feature(slice_patterns)]
#![feature(staged_api)]
#![feature(str_char)]
store.register_renamed("raw_pointer_deriving", "raw_pointer_derive");
store.register_renamed("unknown_features", "unused_features");
+
+ store.register_removed("unsigned_negation", "replaced by negate_unsigned feature gate");
}
use ArchiveRef;
use std::ffi::CString;
+use std::marker;
use std::path::Path;
use std::slice;
use std::str;
}
pub struct Child<'a> {
- name: Option<&'a str>,
- data: &'a [u8],
+ ptr: ::ArchiveChildRef,
+ _data: marker::PhantomData<&'a ArchiveRO>,
}
impl ArchiveRO {
}
}
+ pub fn raw(&self) -> ArchiveRef { self.ptr }
+
pub fn iter(&self) -> Iter {
unsafe {
Iter { ptr: ::LLVMRustArchiveIteratorNew(self.ptr), archive: self }
type Item = Child<'a>;
fn next(&mut self) -> Option<Child<'a>> {
- unsafe {
- let ptr = ::LLVMRustArchiveIteratorCurrent(self.ptr);
- if ptr.is_null() {
- return None
- }
- let mut name_len = 0;
- let name_ptr = ::LLVMRustArchiveChildName(ptr, &mut name_len);
- let mut data_len = 0;
- let data_ptr = ::LLVMRustArchiveChildData(ptr, &mut data_len);
- let child = Child {
- name: if name_ptr.is_null() {
- None
- } else {
- let name = slice::from_raw_parts(name_ptr as *const u8,
- name_len as usize);
- str::from_utf8(name).ok().map(|s| s.trim())
- },
- data: slice::from_raw_parts(data_ptr as *const u8,
- data_len as usize),
- };
- ::LLVMRustArchiveIteratorNext(self.ptr);
- Some(child)
+ let ptr = unsafe { ::LLVMRustArchiveIteratorNext(self.ptr) };
+ if ptr.is_null() {
+ None
+ } else {
+ Some(Child { ptr: ptr, _data: marker::PhantomData })
}
}
}
}
impl<'a> Child<'a> {
- pub fn name(&self) -> Option<&'a str> { self.name }
- pub fn data(&self) -> &'a [u8] { self.data }
+ pub fn name(&self) -> Option<&'a str> {
+ unsafe {
+ let mut name_len = 0;
+ let name_ptr = ::LLVMRustArchiveChildName(self.ptr, &mut name_len);
+ if name_ptr.is_null() {
+ None
+ } else {
+ let name = slice::from_raw_parts(name_ptr as *const u8,
+ name_len as usize);
+ str::from_utf8(name).ok().map(|s| s.trim())
+ }
+ }
+ }
+
+ pub fn data(&self) -> &'a [u8] {
+ unsafe {
+ let mut data_len = 0;
+ let data_ptr = ::LLVMRustArchiveChildData(self.ptr, &mut data_len);
+ if data_ptr.is_null() {
+ panic!("failed to read data from archive child");
+ }
+ slice::from_raw_parts(data_ptr as *const u8, data_len as usize)
+ }
+ }
+
+ pub fn raw(&self) -> ::ArchiveChildRef { self.ptr }
+}
+
+impl<'a> Drop for Child<'a> {
+ fn drop(&mut self) {
+ unsafe { ::LLVMRustArchiveChildFree(self.ptr); }
+ }
}
}
bitflags! {
- flags Attribute : u32 {
+ flags Attribute : u64 {
const ZExt = 1 << 0,
const SExt = 1 << 1,
const NoReturn = 1 << 2,
const ReturnsTwice = 1 << 29,
const UWTable = 1 << 30,
const NonLazyBind = 1 << 31,
+ const OptimizeNone = 1 << 42,
}
}
DK_OptimizationFailure,
}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub enum ArchiveKind {
+ K_GNU,
+ K_MIPS64,
+ K_BSD,
+ K_COFF,
+}
+
// Opaque pointer types
#[allow(missing_copy_implementations)]
pub enum Module_opaque {}
#[allow(missing_copy_implementations)]
pub enum SMDiagnostic_opaque {}
pub type SMDiagnosticRef = *mut SMDiagnostic_opaque;
+#[allow(missing_copy_implementations)]
+pub enum RustArchiveMember_opaque {}
+pub type RustArchiveMemberRef = *mut RustArchiveMember_opaque;
pub type DiagnosticHandler = unsafe extern "C" fn(DiagnosticInfoRef, *mut c_void);
pub type InlineAsmDiagHandler = unsafe extern "C" fn(SMDiagnosticRef, *const c_void, c_uint);
pub fn LLVMGetArrayLength(ArrayTy: TypeRef) -> c_uint;
pub fn LLVMGetPointerAddressSpace(PointerTy: TypeRef) -> c_uint;
pub fn LLVMGetPointerToGlobal(EE: ExecutionEngineRef, V: ValueRef)
- -> *const ();
+ -> *const c_void;
pub fn LLVMGetVectorSize(VectorTy: TypeRef) -> c_uint;
/* Operations on other types */
Catch: BasicBlockRef,
Name: *const c_char)
-> ValueRef;
- pub fn LLVMBuildLandingPad(B: BuilderRef,
- Ty: TypeRef,
- PersFn: ValueRef,
- NumClauses: c_uint,
- Name: *const c_char)
- -> ValueRef;
+ pub fn LLVMRustBuildLandingPad(B: BuilderRef,
+ Ty: TypeRef,
+ PersFn: ValueRef,
+ NumClauses: c_uint,
+ Name: *const c_char,
+ F: ValueRef)
+ -> ValueRef;
pub fn LLVMBuildResume(B: BuilderRef, Exn: ValueRef) -> ValueRef;
pub fn LLVMBuildUnreachable(B: BuilderRef) -> ValueRef;
pub fn LLVMRustOpenArchive(path: *const c_char) -> ArchiveRef;
pub fn LLVMRustArchiveIteratorNew(AR: ArchiveRef) -> ArchiveIteratorRef;
- pub fn LLVMRustArchiveIteratorNext(AIR: ArchiveIteratorRef);
- pub fn LLVMRustArchiveIteratorCurrent(AIR: ArchiveIteratorRef) -> ArchiveChildRef;
+ pub fn LLVMRustArchiveIteratorNext(AIR: ArchiveIteratorRef) -> ArchiveChildRef;
pub fn LLVMRustArchiveChildName(ACR: ArchiveChildRef,
size: *mut size_t) -> *const c_char;
pub fn LLVMRustArchiveChildData(ACR: ArchiveChildRef,
size: *mut size_t) -> *const c_char;
+ pub fn LLVMRustArchiveChildFree(ACR: ArchiveChildRef);
pub fn LLVMRustArchiveIteratorFree(AIR: ArchiveIteratorRef);
pub fn LLVMRustDestroyArchive(AR: ArchiveRef);
CX: *mut c_void);
pub fn LLVMWriteSMDiagnosticToString(d: SMDiagnosticRef, s: RustStringRef);
+
+ pub fn LLVMRustWriteArchive(Dst: *const c_char,
+ NumMembers: size_t,
+ Members: *const RustArchiveMemberRef,
+ WriteSymbtab: bool,
+ Kind: ArchiveKind) -> c_int;
+ pub fn LLVMRustArchiveMemberNew(Filename: *const c_char,
+ Name: *const c_char,
+ Child: ArchiveChildRef) -> RustArchiveMemberRef;
+ pub fn LLVMRustArchiveMemberFree(Member: RustArchiveMemberRef);
+
+ pub fn LLVMRustSetDataLayoutFromTargetMachine(M: ModuleRef,
+ TM: TargetMachineRef);
+ pub fn LLVMRustGetModuleDataLayout(M: ModuleRef) -> TargetDataRef;
}
// LLVM requires symbols from this library, but apparently they're not printed
pub fn SetFunctionAttribute(fn_: ValueRef, attr: Attribute) {
unsafe {
- LLVMAddFunctionAttribute(fn_, FunctionIndex as c_uint, attr.bits() as uint64_t)
+ LLVMAddFunctionAttribute(fn_, FunctionIndex as c_uint,
+ attr.bits() as uint64_t)
}
}
use rustc::middle::privacy::LastPrivate::*;
use rustc::middle::privacy::PrivateDep::*;
use rustc::middle::privacy::{ExternalExports, ExportedItems, PublicItems};
-use rustc::middle::ty::{MethodTypeParam, MethodStatic};
-use rustc::middle::ty::{MethodCall, MethodMap, MethodOrigin, MethodParam};
-use rustc::middle::ty::{MethodStaticClosure, MethodObject};
-use rustc::middle::ty::MethodTraitObject;
use rustc::middle::ty::{self, Ty};
use rustc::util::nodemap::{NodeMap, NodeSet};
use syntax::ast;
use syntax::ast_util::{is_local, local_def};
use syntax::codemap::Span;
-use syntax::parse::token;
use syntax::visit::{self, Visitor};
-type Context<'a, 'tcx> = (&'a MethodMap<'tcx>, &'a def::ExportMap);
+type Context<'a, 'tcx> = (&'a ty::MethodMap<'tcx>, &'a def::ExportMap);
/// Result of a checking operation - None => no errors were found. Some => an
/// error and contains the span and message for reporting that error and
}
_ => true,
};
- let tr = ty::impl_trait_ref(self.tcx, local_def(item.id));
+ let tr = self.tcx.impl_trait_ref(local_def(item.id));
let public_trait = tr.clone().map_or(false, |tr| {
!is_local(tr.def_id) ||
self.exported_items.contains(&tr.def_id.node)
self.def_privacy(id)
}
ty::ImplContainer(id) => {
- match ty::impl_trait_ref(self.tcx, id) {
+ match self.tcx.impl_trait_ref(id) {
Some(t) => {
debug!("privacy - impl of trait {:?}", id);
self.def_privacy(t.def_id)
self.def_privacy(id)
}
ty::ImplContainer(id) => {
- match ty::impl_trait_ref(self.tcx, id) {
+ match self.tcx.impl_trait_ref(id) {
Some(t) => {
debug!("privacy - impl of trait {:?}", id);
self.def_privacy(t.def_id)
self.def_privacy(id)
}
ty::ImplContainer(id) => {
- match ty::impl_trait_ref(self.tcx, id) {
+ match self.tcx.impl_trait_ref(id) {
Some(t) => {
debug!("privacy - impl of trait {:?}", id);
self.def_privacy(t.def_id)
ast::MethodImplItem(..) => {
let imp = self.tcx.map
.get_parent_did(closest_private_id);
- match ty::impl_trait_ref(self.tcx, imp) {
+ match self.tcx.impl_trait_ref(imp) {
Some(..) => return Allowable,
_ if ii.vis == ast::Public => {
return Allowable
ast::ItemEnum(..) => "enum",
_ => return Some((err_span, err_msg, None))
};
- let msg = format!("{} `{}` is private", desc,
- token::get_ident(item.ident));
+ let msg = format!("{} `{}` is private", desc, item.ident);
Some((err_span, err_msg, Some((span, msg))))
}
span: Span,
id: ast::DefId,
name: FieldName) {
- let fields = ty::lookup_struct_fields(self.tcx, id);
+ let fields = self.tcx.lookup_struct_fields(id);
let field = match name {
NamedField(f_name) => {
debug!("privacy - check named field {} in struct {:?}", f_name, id);
return
}
- let struct_type = ty::lookup_item_type(self.tcx, id).ty;
+ let struct_type = self.tcx.lookup_item_type(id).ty;
let struct_desc = match struct_type.sty {
ty::TyStruct(_, _) =>
- format!("struct `{}`", ty::item_path_str(self.tcx, id)),
+ format!("struct `{}`", self.tcx.item_path_str(id)),
// struct variant fields have inherited visibility
ty::TyEnum(..) => return,
_ => self.tcx.sess.span_bug(span, "can't find struct for field")
};
let msg = match name {
NamedField(name) => format!("field `{}` of {} is private",
- token::get_name(name), struct_desc),
+ name, struct_desc),
UnnamedField(idx) => format!("field #{} of {} is private",
idx + 1, struct_desc),
};
name: ast::Name) {
// If the method is a default method, we need to use the def_id of
// the default implementation.
- let method_id = match ty::impl_or_trait_item(self.tcx, method_id) {
+ let method_id = match self.tcx.impl_or_trait_item(method_id) {
ty::MethodTraitItem(method_type) => {
method_type.provided_source.unwrap_or(method_id)
}
}
};
- let string = token::get_name(name);
self.report_error(self.ensure_public(span,
method_id,
None,
&format!("method `{}`",
- string)));
+ name)));
}
// Checks that a path is in scope.
let ck = |tyname: &str| {
let ck_public = |def: ast::DefId| {
debug!("privacy - ck_public {:?}", def);
- let name = token::get_name(last);
let origdid = path_res.def_id();
self.ensure_public(span,
def,
Some(origdid),
- &format!("{} `{}`", tyname, name))
+ &format!("{} `{}`", tyname, last))
};
match path_res.last_private {
}
// Checks that a method is in scope.
- fn check_method(&mut self, span: Span, origin: &MethodOrigin,
+ fn check_method(&mut self, span: Span, method_def_id: ast::DefId,
name: ast::Name) {
- match *origin {
- MethodStatic(method_id) => {
- self.check_static_method(span, method_id, name)
+ match self.tcx.impl_or_trait_item(method_def_id).container() {
+ ty::ImplContainer(_) => {
+ self.check_static_method(span, method_def_id, name)
}
- MethodStaticClosure(_) => {}
// Trait methods are always all public. The only controlling factor
// is whether the trait itself is accessible or not.
- MethodTypeParam(MethodParam { ref trait_ref, .. }) |
- MethodTraitObject(MethodObject { ref trait_ref, .. }) => {
- self.report_error(self.ensure_public(span, trait_ref.def_id,
+ ty::TraitContainer(trait_def_id) => {
+ self.report_error(self.ensure_public(span, trait_def_id,
None, "source trait"));
}
}
fn visit_expr(&mut self, expr: &ast::Expr) {
match expr.node {
ast::ExprField(ref base, ident) => {
- if let ty::TyStruct(id, _) = ty::expr_ty_adjusted(self.tcx, &**base).sty {
+ if let ty::TyStruct(id, _) = self.tcx.expr_ty_adjusted(&**base).sty {
self.check_field(expr.span, id, NamedField(ident.node.name));
}
}
ast::ExprTupField(ref base, idx) => {
- if let ty::TyStruct(id, _) = ty::expr_ty_adjusted(self.tcx, &**base).sty {
+ if let ty::TyStruct(id, _) = self.tcx.expr_ty_adjusted(&**base).sty {
self.check_field(expr.span, id, UnnamedField(idx.node));
}
}
ast::ExprMethodCall(ident, _, _) => {
- let method_call = MethodCall::expr(expr.id);
- match self.tcx.method_map.borrow().get(&method_call) {
- None => {
- self.tcx.sess.span_bug(expr.span,
- "method call not in \
- method map");
- }
- Some(method) => {
- debug!("(privacy checking) checking impl method");
- self.check_method(expr.span, &method.origin, ident.node.name);
- }
- }
+ let method_call = ty::MethodCall::expr(expr.id);
+ let method = self.tcx.tables.borrow().method_map[&method_call];
+ debug!("(privacy checking) checking impl method");
+ self.check_method(expr.span, method.def_id, ident.node.name);
}
ast::ExprStruct(_, ref fields, _) => {
- match ty::expr_ty(self.tcx, expr).sty {
+ match self.tcx.expr_ty(expr).sty {
ty::TyStruct(ctor_id, _) => {
// RFC 736: ensure all unmentioned fields are visible.
// Rather than computing the set of unmentioned fields
// (i.e. `all_fields - fields`), just check them all.
- let all_fields = ty::lookup_struct_fields(self.tcx, ctor_id);
+ let all_fields = self.tcx.lookup_struct_fields(ctor_id);
for field in all_fields {
self.check_field(expr.span, ctor_id,
NamedField(field.name));
}
ast::ExprPath(..) => {
let guard = |did: ast::DefId| {
- let fields = ty::lookup_struct_fields(self.tcx, did);
+ let fields = self.tcx.lookup_struct_fields(did);
let any_priv = fields.iter().any(|f| {
f.vis != ast::Public && (
!is_local(f.id) ||
match pattern.node {
ast::PatStruct(_, ref fields, _) => {
- match ty::pat_ty(self.tcx, pattern).sty {
+ match self.tcx.pat_ty(pattern).sty {
ty::TyStruct(id, _) => {
for field in fields {
self.check_field(pattern.span, id,
// Patterns which bind no fields are allowable (the path is check
// elsewhere).
ast::PatEnum(_, Some(ref fields)) => {
- match ty::pat_ty(self.tcx, pattern).sty {
+ match self.tcx.pat_ty(pattern).sty {
ty::TyStruct(id, _) => {
for (i, field) in fields.iter().enumerate() {
if let ast::PatWild(..) = field.node {
let not_private_trait =
trait_ref.as_ref().map_or(true, // no trait counts as public trait
|tr| {
- let did = ty::trait_ref_to_def_id(self.tcx, tr);
+ let did = self.tcx.trait_ref_to_def_id(tr);
!is_local(did) || self.trait_is_public(did.node)
});
use Resolver;
use resolve_imports::Shadowable;
use TypeNsDef;
+use {resolve_error, ResolutionError};
use self::DuplicateCheckingMode::*;
use self::NamespaceError::*;
use syntax::ast;
use syntax::ast_util::local_def;
use syntax::attr::AttrMetaMethods;
-use syntax::parse::token::{self, special_idents};
+use syntax::parse::token::special_idents;
use syntax::codemap::{Span, DUMMY_SP};
use syntax::visit::{self, Visitor};
// Return an error here by looking up the namespace that
// had the duplicate.
let ns = ns.unwrap();
- self.resolve_error(sp,
- &format!("duplicate definition of {} `{}`",
- namespace_error_to_string(duplicate_type),
- token::get_name(name)));
+ resolve_error(
+ self,
+ sp,
+ ResolutionError::DuplicateDefinition(
+ namespace_error_to_string(duplicate_type),
+ name)
+ );
{
let r = child.span_for_namespace(ns);
if let Some(sp) = r {
self.session.span_note(sp,
&format!("first definition of {} `{}` here",
namespace_error_to_string(duplicate_type),
- token::get_name(name)));
+ name));
}
}
}
let module_path = match view_path.node {
ViewPathSimple(_, ref full_path) => {
full_path.segments
- .init()
+ .split_last().unwrap().1
.iter().map(|ident| ident.identifier.name)
.collect()
}
// Build up the import directives.
let shadowable = item.attrs.iter().any(|attr| {
- attr.name() == token::get_name(special_idents::prelude_import.name)
+ attr.name() == special_idents::prelude_import.name.as_str()
});
let shadowable = if shadowable {
Shadowable::Always
ViewPathSimple(binding, ref full_path) => {
let source_name =
full_path.segments.last().unwrap().identifier.name;
- if &token::get_name(source_name)[..] == "mod" ||
- &token::get_name(source_name)[..] == "self" {
- self.resolve_error(view_path.span,
- "`self` imports are only allowed within a { } list");
+ if source_name.as_str() == "mod" || source_name.as_str() == "self" {
+ resolve_error(self,
+ view_path.span,
+ ResolutionError::SelfImportsOnlyAllowedWithin);
}
let subclass = SingleImport(binding.name,
_ => None
}).collect::<Vec<Span>>();
if mod_spans.len() > 1 {
- self.resolve_error(mod_spans[0],
- "`self` import can only appear once in the list");
+ resolve_error(
+ self,
+ mod_spans[0],
+ ResolutionError::SelfImportCanOnlyAppearOnceInTheList
+ );
for other_span in mod_spans.iter().skip(1) {
self.session.span_note(*other_span,
"another `self` import appears here");
let name = match module_path.last() {
Some(name) => *name,
None => {
- self.resolve_error(source_item.span,
- "`self` import can only appear in an import list \
- with a non-empty prefix");
+ resolve_error(
+ self,
+ source_item.span,
+ ResolutionError::
+ SelfImportOnlyInImportListWithNonEmptyPrefix
+ );
continue;
}
};
- let module_path = module_path.init();
+ let module_path = module_path.split_last().unwrap().1;
(module_path.to_vec(), name)
}
};
match trait_item.node {
ast::ConstTraitItem(..) => {
- let def = DefAssociatedConst(local_def(trait_item.id),
- FromTrait(local_def(item.id)));
+ let def = DefAssociatedConst(local_def(trait_item.id));
// NB: not DefModifiers::IMPORTABLE
name_bindings.define_value(def, trait_item.span, DefModifiers::PUBLIC);
}
ast::MethodTraitItem(..) => {
- let def = DefMethod(local_def(trait_item.id),
- FromTrait(local_def(item.id)));
+ let def = DefMethod(local_def(trait_item.id));
// NB: not DefModifiers::IMPORTABLE
name_bindings.define_value(def, trait_item.span, DefModifiers::PUBLIC);
}
debug!("(building reduced graph for external crate) ... \
adding trait item '{}'",
- token::get_name(trait_item_name));
+ trait_item_name);
self.trait_item_map.insert((trait_item_name, def_id),
trait_item_def.def_id());
self.handle_external_def(def,
def_visibility,
&*child_name_bindings,
- &token::get_name(name),
+ &name.as_str(),
name,
root);
}
def_id,
|def_like, child_name, visibility| {
debug!("(populating external module) ... found ident: {}",
- token::get_name(child_name));
+ child_name);
self.build_reduced_graph_for_external_crate_def(module,
def_like,
child_name,
SingleImport(target, _) => {
debug!("(building import directive) building import directive: {}::{}",
names_to_string(&module_.imports.borrow().last().unwrap().module_path),
- token::get_name(target));
+ target);
let mut import_resolutions = module_.import_resolutions.borrow_mut();
match import_resolutions.get_mut(&target) {
```
"##,
+E0253: r##"
+Attempt was made to import an unimportable value. This can happen when
+trying to import a method from a trait. An example of this error:
+
+```
+mod foo {
+ pub trait MyTrait {
+ fn do_something();
+ }
+}
+use foo::MyTrait::do_something;
+```
+
+It's invalid to directly import methods belonging to a trait or concrete type.
+"##,
+
E0255: r##"
You can't import a value whose name is the same as another value defined in the
module.
types:
http://doc.rust-lang.org/reference.html#types
+"##,
+
+E0364: r##"
+Private items cannot be publicly re-exported. This error indicates that
+you attempted to `pub use` a type or value that was not itself public.
+
+Here is an example that demonstrates the error:
+
+```
+mod foo {
+ const X: u32 = 1;
+}
+pub use foo::X;
+```
+
+The solution to this problem is to ensure that the items that you are
+re-exporting are themselves marked with `pub`:
+
+```
+mod foo {
+ pub const X: u32 = 1;
+}
+pub use foo::X;
+```
+
+See the 'Use Declarations' section of the reference for more information
+on this topic:
+
+http://doc.rust-lang.org/reference.html#use-declarations
+"##,
+
+E0365: r##"
+Private modules cannot be publicly re-exported. This error indicates
+that you attempted to `pub use` a module that was not itself public.
+
+Here is an example that demonstrates the error:
+
+```
+mod foo {
+ pub const X: u32 = 1;
+}
+pub use foo as foo2;
+
+```
+The solution to this problem is to ensure that the module that you are
+re-exporting is itself marked with `pub`:
+
+```
+pub mod foo {
+ pub const X: u32 = 1;
+}
+pub use foo as foo2;
+```
+
+See the 'Use Declarations' section of the reference for more information
+on this topic:
+
+http://doc.rust-lang.org/reference.html#use-declarations
+"##,
+
+E0403: r##"
+Some type parameters have the same name. Example of erroneous code:
+
+```
+fn foo<T, T>(s: T, u: T) {} // error: the name `T` is already used for a type
+ // parameter in this type parameter list
+```
+
+Please verify that none of the type parameterss are misspelled, and rename any
+clashing parameters. Example:
+
+```
+fn foo<T, Y>(s: T, u: Y) {} // ok!
+```
+"##,
+
+E0404: r##"
+You tried to implement something which was not a trait on an object. Example of
+erroneous code:
+
+```
+struct Foo;
+struct Bar;
+
+impl Foo for Bar {} // error: `Foo` is not a trait
+```
+
+Please verify that you didn't misspell the trait's name or otherwise use the
+wrong identifier. Example:
+
+```
+trait Foo {
+ // some functions
+}
+struct Bar;
+
+impl Foo for Bar { // ok!
+ // functions implementation
+}
+```
+"##,
+
+E0405: r##"
+An unknown trait was implemented. Example of erroneous code:
+
+```
+struct Foo;
+
+impl SomeTrait for Foo {} // error: use of undeclared trait name `SomeTrait`
+```
+
+Please verify that the name of the trait wasn't misspelled and ensure that it
+was imported. Example:
+
+```
+// solution 1:
+use some_file::SomeTrait;
+
+// solution 2:
+trait SomeTrait {
+ // some functions
+}
+
+struct Foo;
+
+impl SomeTrait for Foo { // ok!
+ // implements functions
+}
+```
+"##,
+
+E0407: r##"
+A definition of a method not in the implemented trait was given in a trait
+implementation. Example of erroneous code:
+
+```
+trait Foo {
+ fn a();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn a() {}
+ fn b() {} // error: method `b` is not a member of trait `Foo`
+}
+```
+
+Please verify you didn't misspell the method name and you used the correct
+trait. First example:
+
+```
+trait Foo {
+ fn a();
+ fn b();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn a() {}
+ fn b() {} // ok!
+}
+```
+
+Second example:
+
+```
+trait Foo {
+ fn a();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn a() {}
+}
+
+impl Bar {
+ fn b() {}
+}
+```
+"##,
+
+E0417: r##"
+A static variable was referenced in a pattern. Example of erroneous code:
+
+```
+static FOO : i32 = 0;
+
+match 0 {
+ FOO => {} // error: static variables cannot be referenced in a
+ // pattern, use a `const` instead
+ _ => {}
+}
+```
+
+The compiler needs to know the value of the pattern at compile time;
+compile-time patterns can defined via const or enum items. Please verify
+that the identifier is spelled correctly, and if so, use a const instead
+of static to define it. Example:
+
+```
+const FOO : i32 = 0;
+
+match 0 {
+ FOO => {} // ok!
+ _ => {}
+}
+```
+"##,
+
+E0424: r##"
+The `self` keyword was used in a static method. Example of erroneous code:
+
+```
+struct Foo;
+
+impl Foo {
+ fn bar(self) {}
+
+ fn foo() {
+ self.bar(); // error: `self` is not available in a static method.
+ }
+}
+```
+
+Please check if the method's argument list should have contained `self`,
+`&self`, or `&mut self` (in case you didn't want to create a static
+method), and add it if so. Example:
+
+```
+struct Foo;
+
+impl Foo {
+ fn bar(self) {}
+
+ fn foo(self) {
+ self.bar(); // ok!
+ }
+}
+```
+"##,
+
+E0425: r##"
+An unresolved name was used. Example of erroneous codes:
+
+```
+something_that_doesnt_exist::foo;
+// error: unresolved name `something_that_doesnt_exist::foo`
+
+// or:
+trait Foo {
+ fn bar() {
+ Self; // error: unresolved name `Self`
+ }
+}
+```
+
+Please verify you didn't misspell the name or that you're not using an
+invalid object. Example:
+
+```
+enum something_that_does_exist {
+ foo
+}
+// or:
+mod something_that_does_exist {
+ pub static foo : i32 = 0i32;
+}
+
+something_that_does_exist::foo; // ok!
+```
+"##,
+
+E0426: r##"
+An undeclared label was used. Example of erroneous code:
+
+```
+loop {
+ break 'a; // error: use of undeclared label `'a`
+}
+```
+
+Please verify you spelt or declare the label correctly. Example:
+
+```
+'a: loop {
+ break 'a; // ok!
+}
+```
+"##,
+
+E0428: r##"
+A type or module has been defined more than once. Example of erroneous
+code:
+
+```
+struct Bar;
+struct Bar; // error: duplicate definition of value `Bar`
+```
+
+Please verify you didn't misspell the type/module's name or remove/rename the
+duplicated one. Example:
+
+```
+struct Bar;
+struct Bar2; // ok!
+```
+"##,
+
+E0430: r##"
+The `self` import appears more than once in the list. Erroneous code example:
+
+```
+use something::{self, self}; // error: `self` import can only appear once in
+ // the list
+```
+
+Please verify you didn't misspell the import name or remove the duplicated
+`self` import. Example:
+
+```
+use something::self; // ok!
+```
+"##,
+
+E0431: r##"
+`self` import was made. Erroneous code example:
+
+```
+use {self}; // error: `self` import can only appear in an import list with a
+ // non-empty prefix
+```
+
+You cannot import the current module into itself, please remove this import
+or verify you didn't misspell it.
+"##,
+
+E0432: r##"
+An import was unresolved. Erroneous code example:
+
+```
+use something::Foo; // error: unresolved import `something::Foo`.
+```
+
+Please verify you didn't misspell the import name or the import does exist
+in the module from where you tried to import it. Example:
+
+```
+use something::Foo; // ok!
+
+mod something {
+ pub struct Foo;
+}
+```
+"##,
+
+E0433: r##"
+Invalid import. Example of erroneous code:
+
+```
+use something_which_doesnt_exist;
+// error: unresolved import `something_which_doesnt_exist`
+```
+
+Please verify you didn't misspell the import's name.
+"##,
+
+E0437: r##"
+Trait implementations can only implement associated types that are members of
+the trait in question. This error indicates that you attempted to implement
+an associated type whose name does not match the name of any associated type
+in the trait.
+
+Here is an example that demonstrates the error:
+
+```
+trait Foo {}
+
+impl Foo for i32 {
+ type Bar = bool;
+}
+```
+
+The solution to this problem is to remove the extraneous associated type:
+
+```
+trait Foo {}
+
+impl Foo for i32 {}
+```
+"##,
+
+E0438: r##"
+Trait implementations can only implement associated constants that are
+members of the trait in question. This error indicates that you
+attempted to implement an associated constant whose name does not
+match the name of any associated constant in the trait.
+
+Here is an example that demonstrates the error:
+
+```
+#![feature(associated_consts)]
+
+trait Foo {}
+
+impl Foo for i32 {
+ const BAR: bool = true;
+}
+```
+
+The solution to this problem is to remove the extraneous associated constant:
+
+```
+trait Foo {}
+
+impl Foo for i32 {}
+```
"##
}
register_diagnostics! {
- E0157,
- E0153,
- E0253, // not directly importable
+ E0153, // called no where
+ E0157, // called from no where
E0254, // import conflicts with imported crate in this module
E0257,
E0258,
- E0364, // item is private
- E0365 // item is private
+ E0401, // can't use type parameters from outer function
+ E0402, // cannot use an outer type parameter in this context
+ E0406, // undeclared associated type
+ E0408, // variable from pattern #1 is not bound in pattern #
+ E0409, // variable is bound with different mode in pattern # than in
+ // pattern #1
+ E0410, // variable from pattern is not bound in pattern 1
+ E0411, // use of `Self` outside of an impl or trait
+ E0412, // use of undeclared
+ E0413, // declaration of shadows an enum variant or unit-like struct in
+ // scope
+ E0414, // only irrefutable patterns allowed here
+ E0415, // identifier is bound more than once in this parameter list
+ E0416, // identifier is bound more than once in the same pattern
+ E0418, // is not an enum variant, struct or const
+ E0419, // unresolved enum variant, struct or const
+ E0420, // is not an associated const
+ E0421, // unresolved associated const
+ E0422, // does not name a structure
+ E0423, // is a struct variant name, but this expression uses it like a
+ // function name
+ E0427, // cannot use `ref` binding mode with ...
+ E0429, // `self` imports are only allowed within a { } list
+ E0434, // can't capture dynamic environment in a fn item
+ E0435, // attempt to use a non-constant value in a constant
}
#![feature(rc_weak)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]
-#![feature(slice_extras)]
+#![feature(slice_splits)]
#![feature(staged_api)]
#[macro_use] extern crate log;
use resolve_imports::{Target, ImportDirective, ImportResolution};
use resolve_imports::Shadowable;
-
// NB: This module needs to be declared first so diagnostics are
// registered before they are used.
pub mod diagnostics;
mod build_reduced_graph;
mod resolve_imports;
+pub enum ResolutionError<'a> {
+ /// error E0401: can't use type parameters from outer function
+ TypeParametersFromOuterFunction,
+ /// error E0402: cannot use an outer type parameter in this context
+ OuterTypeParameterContext,
+ /// error E0403: the name is already used for a type parameter in this type parameter list
+ NameAlreadyUsedInTypeParameterList(Name),
+ /// error E0404: is not a trait
+ IsNotATrait(&'a str),
+ /// error E0405: use of undeclared trait name
+ UndeclaredTraitName(&'a str),
+ /// error E0406: undeclared associated type
+ UndeclaredAssociatedType,
+ /// error E0407: method is not a member of trait
+ MethodNotMemberOfTrait(Name, &'a str),
+ /// error E0437: type is not a member of trait
+ TypeNotMemberOfTrait(Name, &'a str),
+ /// error E0438: const is not a member of trait
+ ConstNotMemberOfTrait(Name, &'a str),
+ /// error E0408: variable `{}` from pattern #1 is not bound in pattern
+ VariableNotBoundInPattern(Name, usize),
+ /// error E0409: variable is bound with different mode in pattern #{} than in pattern #1
+ VariableBoundWithDifferentMode(Name, usize),
+ /// error E0410: variable from pattern is not bound in pattern #1
+ VariableNotBoundInParentPattern(Name, usize),
+ /// error E0411: use of `Self` outside of an impl or trait
+ SelfUsedOutsideImplOrTrait,
+ /// error E0412: use of undeclared
+ UseOfUndeclared(&'a str, &'a str),
+ /// error E0413: declaration shadows an enum variant or unit-like struct in scope
+ DeclarationShadowsEnumVariantOrUnitLikeStruct(Name),
+ /// error E0414: only irrefutable patterns allowed here
+ OnlyIrrefutablePatternsAllowedHere,
+ /// error E0415: identifier is bound more than once in this parameter list
+ IdentifierBoundMoreThanOnceInParameterList(&'a str),
+ /// error E0416: identifier is bound more than once in the same pattern
+ IdentifierBoundMoreThanOnceInSamePattern(&'a str),
+ /// error E0417: static variables cannot be referenced in a pattern
+ StaticVariableReference,
+ /// error E0418: is not an enum variant, struct or const
+ NotAnEnumVariantStructOrConst(&'a str),
+ /// error E0419: unresolved enum variant, struct or const
+ UnresolvedEnumVariantStructOrConst(&'a str),
+ /// error E0420: is not an associated const
+ NotAnAssociatedConst(&'a str),
+ /// error E0421: unresolved associated const
+ UnresolvedAssociatedConst(&'a str),
+ /// error E0422: does not name a struct
+ DoesNotNameAStruct(&'a str),
+ /// error E0423: is a struct variant name, but this expression uses it like a function name
+ StructVariantUsedAsFunction(&'a str),
+ /// error E0424: `self` is not available in a static method
+ SelfNotAvailableInStaticMethod,
+ /// error E0425: unresolved name
+ UnresolvedName(&'a str, &'a str),
+ /// error E0426: use of undeclared label
+ UndeclaredLabel(&'a str),
+ /// error E0427: cannot use `ref` binding mode with ...
+ CannotUseRefBindingModeWith(&'a str),
+ /// error E0428: duplicate definition
+ DuplicateDefinition(&'a str, Name),
+ /// error E0429: `self` imports are only allowed within a { } list
+ SelfImportsOnlyAllowedWithin,
+ /// error E0430: `self` import can only appear once in the list
+ SelfImportCanOnlyAppearOnceInTheList,
+ /// error E0431: `self` import can only appear in an import list with a non-empty prefix
+ SelfImportOnlyInImportListWithNonEmptyPrefix,
+ /// error E0432: unresolved import
+ UnresolvedImport(Option<(&'a str, Option<&'a str>)>),
+ /// error E0433: failed to resolve
+ FailedToResolve(&'a str),
+ /// error E0434: can't capture dynamic environment in a fn item
+ CannotCaptureDynamicEnvironmentInFnItem,
+ /// error E0435: attempt to use a non-constant value in a constant
+ AttemptToUseNonConstantValueInConstant,
+}
+
+fn resolve_error<'b, 'a:'b, 'tcx:'a>(resolver: &'b Resolver<'a, 'tcx>, span: syntax::codemap::Span,
+ resolution_error: ResolutionError<'b>) {
+ if !resolver.emit_errors {
+ return;
+ }
+ match resolution_error {
+ ResolutionError::TypeParametersFromOuterFunction => {
+ span_err!(resolver.session, span, E0401, "can't use type parameters from \
+ outer function; try using a local \
+ type parameter instead");
+ },
+ ResolutionError::OuterTypeParameterContext => {
+ span_err!(resolver.session, span, E0402,
+ "cannot use an outer type parameter in this context");
+ },
+ ResolutionError::NameAlreadyUsedInTypeParameterList(name) => {
+ span_err!(resolver.session, span, E0403,
+ "the name `{}` is already used for a type \
+ parameter in this type parameter list", name);
+ },
+ ResolutionError::IsNotATrait(name) => {
+ span_err!(resolver.session, span, E0404,
+ "`{}` is not a trait",
+ name);
+ },
+ ResolutionError::UndeclaredTraitName(name) => {
+ span_err!(resolver.session, span, E0405,
+ "use of undeclared trait name `{}`",
+ name);
+ },
+ ResolutionError::UndeclaredAssociatedType => {
+ span_err!(resolver.session, span, E0406, "undeclared associated type");
+ },
+ ResolutionError::MethodNotMemberOfTrait(method, trait_) => {
+ span_err!(resolver.session, span, E0407,
+ "method `{}` is not a member of trait `{}`",
+ method,
+ trait_);
+ },
+ ResolutionError::TypeNotMemberOfTrait(type_, trait_) => {
+ span_err!(resolver.session, span, E0437,
+ "type `{}` is not a member of trait `{}`",
+ type_,
+ trait_);
+ },
+ ResolutionError::ConstNotMemberOfTrait(const_, trait_) => {
+ span_err!(resolver.session, span, E0438,
+ "const `{}` is not a member of trait `{}`",
+ const_,
+ trait_);
+ },
+ ResolutionError::VariableNotBoundInPattern(variable_name, pattern_number) => {
+ span_err!(resolver.session, span, E0408,
+ "variable `{}` from pattern #1 is not bound in pattern #{}",
+ variable_name,
+ pattern_number);
+ },
+ ResolutionError::VariableBoundWithDifferentMode(variable_name, pattern_number) => {
+ span_err!(resolver.session, span, E0409,
+ "variable `{}` is bound with different \
+ mode in pattern #{} than in pattern #1",
+ variable_name,
+ pattern_number);
+ },
+ ResolutionError::VariableNotBoundInParentPattern(variable_name, pattern_number) => {
+ span_err!(resolver.session, span, E0410,
+ "variable `{}` from pattern #{} is not bound in pattern #1",
+ variable_name,
+ pattern_number);
+ },
+ ResolutionError::SelfUsedOutsideImplOrTrait => {
+ span_err!(resolver.session, span, E0411, "use of `Self` outside of an impl or trait");
+ },
+ ResolutionError::UseOfUndeclared(kind, name) => {
+ span_err!(resolver.session, span, E0412,
+ "use of undeclared {} `{}`",
+ kind,
+ name);
+ },
+ ResolutionError::DeclarationShadowsEnumVariantOrUnitLikeStruct(name) => {
+ span_err!(resolver.session, span, E0413,
+ "declaration of `{}` shadows an enum variant or unit-like struct in \
+ scope",
+ name);
+ },
+ ResolutionError::OnlyIrrefutablePatternsAllowedHere => {
+ span_err!(resolver.session, span, E0414, "only irrefutable patterns allowed here");
+ },
+ ResolutionError::IdentifierBoundMoreThanOnceInParameterList(identifier) => {
+ span_err!(resolver.session, span, E0415,
+ "identifier `{}` is bound more than once in this parameter list",
+ identifier);
+ },
+ ResolutionError::IdentifierBoundMoreThanOnceInSamePattern(identifier) => {
+ span_err!(resolver.session, span, E0416,
+ "identifier `{}` is bound more than once in the same pattern",
+ identifier);
+ },
+ ResolutionError::StaticVariableReference => {
+ span_err!(resolver.session, span, E0417, "static variables cannot be \
+ referenced in a pattern, \
+ use a `const` instead");
+ },
+ ResolutionError::NotAnEnumVariantStructOrConst(name) => {
+ span_err!(resolver.session, span, E0418,
+ "`{}` is not an enum variant, struct or const",
+ name);
+ },
+ ResolutionError::UnresolvedEnumVariantStructOrConst(name) => {
+ span_err!(resolver.session, span, E0419,
+ "unresolved enum variant, struct or const `{}`",
+ name);
+ },
+ ResolutionError::NotAnAssociatedConst(name) => {
+ span_err!(resolver.session, span, E0420,
+ "`{}` is not an associated const",
+ name);
+ },
+ ResolutionError::UnresolvedAssociatedConst(name) => {
+ span_err!(resolver.session, span, E0421,
+ "unresolved associated const `{}`",
+ name);
+ },
+ ResolutionError::DoesNotNameAStruct(name) => {
+ span_err!(resolver.session, span, E0422, "`{}` does not name a structure", name);
+ },
+ ResolutionError::StructVariantUsedAsFunction(path_name) => {
+ span_err!(resolver.session, span, E0423,
+ "`{}` is the name of a struct or struct variant, \
+ but this expression \
+ uses it like a function name",
+ path_name);
+ },
+ ResolutionError::SelfNotAvailableInStaticMethod => {
+ span_err!(resolver.session, span, E0424, "`self` is not available in a static method. \
+ Maybe a `self` argument is missing?");
+ },
+ ResolutionError::UnresolvedName(path, name) => {
+ span_err!(resolver.session, span, E0425,
+ "unresolved name `{}`{}",
+ path,
+ name);
+ },
+ ResolutionError::UndeclaredLabel(name) => {
+ span_err!(resolver.session, span, E0426,
+ "use of undeclared label `{}`",
+ name);
+ },
+ ResolutionError::CannotUseRefBindingModeWith(descr) => {
+ span_err!(resolver.session, span, E0427,
+ "cannot use `ref` binding mode with {}",
+ descr);
+ },
+ ResolutionError::DuplicateDefinition(namespace, name) => {
+ span_err!(resolver.session, span, E0428,
+ "duplicate definition of {} `{}`",
+ namespace,
+ name);
+ },
+ ResolutionError::SelfImportsOnlyAllowedWithin => {
+ span_err!(resolver.session, span, E0429, "{}",
+ "`self` imports are only allowed within a { } list");
+ },
+ ResolutionError::SelfImportCanOnlyAppearOnceInTheList => {
+ span_err!(resolver.session, span, E0430,
+ "`self` import can only appear once in the list");
+ },
+ ResolutionError::SelfImportOnlyInImportListWithNonEmptyPrefix => {
+ span_err!(resolver.session, span, E0431,
+ "`self` import can only appear in an import list with a \
+ non-empty prefix");
+ }
+ ResolutionError::UnresolvedImport(name) => {
+ let msg = match name {
+ Some((n, Some(p))) => format!("unresolved import `{}`{}", n, p),
+ Some((n, None)) => format!("unresolved import (maybe you meant `{}::*`?)", n),
+ None => "unresolved import".to_owned()
+ };
+ span_err!(resolver.session, span, E0432, "{}", msg);
+ },
+ ResolutionError::FailedToResolve(msg) => {
+ span_err!(resolver.session, span, E0433, "failed to resolve. {}", msg);
+ },
+ ResolutionError::CannotCaptureDynamicEnvironmentInFnItem => {
+ span_err!(resolver.session, span, E0434, "{}",
+ "can't capture dynamic environment in a fn item; \
+ use the || { ... } closure form instead");
+ },
+ ResolutionError::AttemptToUseNonConstantValueInConstant =>{
+ span_err!(resolver.session, span, E0435,
+ "attempt to use a non-constant value in a constant");
+ },
+ }
+}
+
#[derive(Copy, Clone)]
struct BindingInfo {
span: Span,
if module.external_module_children.borrow().contains_key(&name) {
span_err!(self.session, span, E0259,
"an external crate named `{}` has already \
- been imported into this module",
- &token::get_name(name));
+ been imported into this module",
+ name);
}
}
if module.external_module_children.borrow().contains_key(&name) {
span_err!(self.session, span, E0260,
"the name `{}` conflicts with an external \
- crate that has been imported into this \
- module",
- &token::get_name(name));
+ crate that has been imported into this \
+ module",
+ name);
}
}
name_search_type,
false) {
Failed(None) => {
- let segment_name = token::get_name(name);
+ let segment_name = name.as_str();
let module_name = module_to_string(&*search_module);
let mut span = span;
let msg = if "???" == &module_name[..] {
Indeterminate => {
debug!("(resolving module path for import) module \
resolution is indeterminate: {}",
- token::get_name(name));
+ name);
return Indeterminate;
}
Success((target, used_proxy)) => {
match type_def.module_def {
None => {
let msg = format!("Not a module `{}`",
- token::get_name(name));
+ name);
return Failed(Some((span, msg)));
}
None => {
// There are no type bindings at all.
let msg = format!("Not a module `{}`",
- token::get_name(name));
+ name);
return Failed(Some((span, msg)));
}
}
-> ResolveResult<(Target, bool)> {
debug!("(resolving item in lexical scope) resolving `{}` in \
namespace {:?} in `{}`",
- token::get_name(name),
+ name,
namespace,
module_to_string(&*module_));
namespace,
PathSearch,
true) {
- Failed(Some((span, msg))) =>
- self.resolve_error(span, &format!("failed to resolve. {}",
- msg)),
+ Failed(Some((span, msg))) => {
+ resolve_error(self, span, ResolutionError::FailedToResolve(&*msg));
+ },
Failed(None) => (), // Continue up the search chain.
Indeterminate => {
// We couldn't see through the higher scope because of an
-> ResolveResult<ModulePrefixResult> {
// Start at the current module if we see `self` or `super`, or at the
// top of the crate otherwise.
- let mut containing_module;
- let mut i;
- let first_module_path_string = token::get_name(module_path[0]);
- if "self" == &first_module_path_string[..] {
- containing_module =
- self.get_nearest_normal_module_parent_or_self(module_);
- i = 1;
- } else if "super" == &first_module_path_string[..] {
- containing_module =
- self.get_nearest_normal_module_parent_or_self(module_);
- i = 0; // We'll handle `super` below.
- } else {
- return Success(NoPrefixFound);
- }
+ let mut i = match &*module_path[0].as_str() {
+ "self" => 1,
+ "super" => 0,
+ _ => return Success(NoPrefixFound),
+ };
+ let mut containing_module = self.get_nearest_normal_module_parent_or_self(module_);
// Now loop through all the `super`s we find.
- while i < module_path.len() {
- let string = token::get_name(module_path[i]);
- if "super" != &string[..] {
- break
- }
+ while i < module_path.len() && "super" == module_path[i].as_str() {
debug!("(resolving module prefix) resolving `super` at {}",
module_to_string(&*containing_module));
match self.get_nearest_normal_module_parent(containing_module) {
allow_private_imports: bool)
-> ResolveResult<(Target, bool)> {
debug!("(resolving name in module) resolving `{}` in `{}`",
- &token::get_name(name),
+ name,
module_to_string(&*module_));
// First, check the direct children of the module.
// We're out of luck.
debug!("(resolving name in module) failed to resolve `{}`",
- &token::get_name(name));
+ name);
return Failed(None);
}
.span_to_snippet((*imports)[index].span)
.unwrap();
if sn.contains("::") {
- self.resolve_error((*imports)[index].span,
- "unresolved import");
+ resolve_error(self,
+ (*imports)[index].span,
+ ResolutionError::UnresolvedImport(None));
} else {
- let err = format!("unresolved import (maybe you meant `{}::*`?)",
- sn);
- self.resolve_error((*imports)[index].span, &err[..]);
+ resolve_error(self,
+ (*imports)[index].span,
+ ResolutionError::UnresolvedImport(Some((&*sn, None))));
}
}
match orig_module.children.borrow().get(&name) {
None => {
debug!("!!! (with scope) didn't find `{}` in `{}`",
- token::get_name(name),
+ name,
module_to_string(&*orig_module));
}
Some(name_bindings) => {
None => {
debug!("!!! (with scope) didn't find module \
for `{}` in `{}`",
- token::get_name(name),
+ name,
module_to_string(&*orig_module));
}
Some(module_) => {
// This was an attempt to access an upvar inside a
// named function item. This is not allowed, so we
// report an error.
-
- self.resolve_error(span,
- "can't capture dynamic environment in a fn item; \
- use the || { ... } closure form instead");
+ resolve_error(
+ self,
+ span,
+ ResolutionError::CannotCaptureDynamicEnvironmentInFnItem
+ );
return None;
}
ConstantItemRibKind => {
// Still doesn't deal with upvars
- self.resolve_error(span,
- "attempt to use a non-constant \
- value in a constant");
+ resolve_error(
+ self,
+ span,
+ ResolutionError::AttemptToUseNonConstantValueInConstant
+ );
return None;
}
}
// This was an attempt to use a type parameter outside
// its scope.
- self.resolve_error(span,
- "can't use type parameters from \
- outer function; try using a local \
- type parameter instead");
+ resolve_error(self,
+ span,
+ ResolutionError::TypeParametersFromOuterFunction);
return None;
}
ConstantItemRibKind => {
// see #9186
- self.resolve_error(span,
- "cannot use an outer type \
- parameter in this context");
+ resolve_error(self, span, ResolutionError::OuterTypeParameterContext);
return None;
}
}
let name = item.ident.name;
debug!("(resolving item) resolving {}",
- token::get_name(name));
+ name);
match item.node {
ItemEnum(_, ref generics) |
visit::walk_ty_param_bounds_helper(this, bounds);
for trait_item in trait_items {
- // Create a new rib for the trait_item-specific type
- // parameters.
- //
- // FIXME #4951: Do we need a node ID here?
-
match trait_item.node {
ast::ConstTraitItem(_, ref default) => {
// Only impose the restrictions of
debug!("with_type_parameter_rib: {}", type_parameter.id);
if seen_bindings.contains(&name) {
- self.resolve_error(type_parameter.span,
- &format!("the name `{}` is already \
- used for a type \
- parameter in this type \
- parameter list",
- token::get_name(name)))
+ resolve_error(self,
+ type_parameter.span,
+ ResolutionError::NameAlreadyUsedInTypeParameterList(
+ name)
+ );
}
seen_bindings.insert(name);
debug!("(resolving trait) found trait def: {:?}", path_res);
Ok(path_res)
} else {
- self.resolve_error(trait_path.span,
- &format!("`{}` is not a trait",
- path_names_to_string(trait_path, path_depth)));
+ resolve_error(self,
+ trait_path.span,
+ ResolutionError::IsNotATrait(&*path_names_to_string(trait_path,
+ path_depth))
+ );
// If it's a typedef, give a note
if let DefTy(..) = path_res.base_def {
Err(())
}
} else {
- let msg = format!("use of undeclared trait name `{}`",
- path_names_to_string(trait_path, path_depth));
- self.resolve_error(trait_path.span, &msg);
+ resolve_error(self,
+ trait_path.span,
+ ResolutionError::UndeclaredTraitName(
+ &*path_names_to_string(trait_path, path_depth))
+ );
Err(())
}
}
if let Some(PathResolution { base_def: DefTyParam(..), .. }) = path_res {
self.record_def(eq_pred.id, path_res.unwrap());
} else {
- self.resolve_error(eq_pred.path.span, "undeclared associated type");
+ resolve_error(self,
+ eq_pred.span,
+ ResolutionError::UndeclaredAssociatedType);
}
}
}
for impl_item in impl_items {
match impl_item.node {
ConstImplItem(..) => {
- // If this is a trait impl, ensure the method
+ // If this is a trait impl, ensure the const
// exists in trait
this.check_trait_item(impl_item.ident.name,
- impl_item.span);
+ impl_item.span,
+ |n, s| ResolutionError::ConstNotMemberOfTrait(n, s));
this.with_constant_rib(|this| {
visit::walk_impl_item(this, impl_item);
});
// If this is a trait impl, ensure the method
// exists in trait
this.check_trait_item(impl_item.ident.name,
- impl_item.span);
+ impl_item.span,
+ |n, s| ResolutionError::MethodNotMemberOfTrait(n, s));
// We also need a new scope for the method-
// specific type parameters.
});
}
TypeImplItem(ref ty) => {
- // If this is a trait impl, ensure the method
+ // If this is a trait impl, ensure the type
// exists in trait
this.check_trait_item(impl_item.ident.name,
- impl_item.span);
+ impl_item.span,
+ |n, s| ResolutionError::TypeNotMemberOfTrait(n, s));
this.visit_ty(ty);
}
});
}
- fn check_trait_item(&self, name: Name, span: Span) {
+ fn check_trait_item<F>(&self, name: Name, span: Span, err: F)
+ where F: FnOnce(Name, &str) -> ResolutionError {
// If there is a TraitRef in scope for an impl, then the method must be in the trait.
if let Some((did, ref trait_ref)) = self.current_trait_ref {
if !self.trait_item_map.contains_key(&(name, did)) {
let path_str = path_names_to_string(&trait_ref.path, 0);
- self.resolve_error(span,
- &format!("method `{}` is not a member of trait `{}`",
- token::get_name(name),
- path_str));
+ resolve_error(self,
+ span,
+ err(name, &*path_str));
}
}
}
for (&key, &binding_0) in &map_0 {
match map_i.get(&key) {
None => {
- self.resolve_error(
- p.span,
- &format!("variable `{}` from pattern #1 is \
- not bound in pattern #{}",
- token::get_name(key),
- i + 1));
+ resolve_error(self,
+ p.span,
+ ResolutionError::VariableNotBoundInPattern(key,
+ i + 1));
}
Some(binding_i) => {
if binding_0.binding_mode != binding_i.binding_mode {
- self.resolve_error(
- binding_i.span,
- &format!("variable `{}` is bound with different \
- mode in pattern #{} than in pattern #1",
- token::get_name(key),
- i + 1));
+ resolve_error(self,
+ binding_i.span,
+ ResolutionError::VariableBoundWithDifferentMode(key,
+ i + 1)
+ );
}
}
}
for (&key, &binding) in &map_i {
if !map_0.contains_key(&key) {
- self.resolve_error(
- binding.span,
- &format!("variable `{}` from pattern {}{} is \
- not bound in pattern {}1",
- token::get_name(key),
- "#", i + 1, "#"));
+ resolve_error(self,
+ binding.span,
+ ResolutionError::VariableNotBoundInParentPattern(key,
+ i + 1));
}
}
}
path.segments.len() > 0 &&
maybe_qself.is_none() &&
path.segments[0].identifier.name == self_type_name;
- let msg = if is_invalid_self_type_name {
- "use of `Self` outside of an impl or trait".to_string()
+ if is_invalid_self_type_name {
+ resolve_error(self,
+ ty.span,
+ ResolutionError::SelfUsedOutsideImplOrTrait);
} else {
- format!("use of undeclared {} `{}`",
- kind, path_names_to_string(path, 0))
- };
-
- self.resolve_error(ty.span, &msg[..]);
+ resolve_error(self,
+ ty.span,
+ ResolutionError::UseOfUndeclared(
+ kind,
+ &*path_names_to_string(path,
+ 0))
+ );
+ }
}
}
}
let pat_id = pattern.id;
walk_pat(pattern, |pattern| {
match pattern.node {
- PatIdent(binding_mode, ref path1, _) => {
-
- // The meaning of pat_ident with no type parameters
+ PatIdent(binding_mode, ref path1, ref at_rhs) => {
+ // The meaning of PatIdent with no type parameters
// depends on whether an enum variant or unit-like struct
// with that name is in scope. The probing lookup has to
// be careful not to emit spurious errors. Only matching
// patterns (match) can match nullary variants or
- // unit-like structs. For binding patterns (let), matching
- // such a value is simply disallowed (since it's rarely
- // what you want).
+ // unit-like structs. For binding patterns (let
+ // and the LHS of @-patterns), matching such a value is
+ // simply disallowed (since it's rarely what you want).
+ let const_ok = mode == RefutableMode && at_rhs.is_none();
let ident = path1.node;
let renamed = mtwt::resolve(ident);
match self.resolve_bare_identifier_pattern(ident.name, pattern.span) {
- FoundStructOrEnumVariant(def, lp)
- if mode == RefutableMode => {
+ FoundStructOrEnumVariant(def, lp) if const_ok => {
debug!("(resolving pattern) resolving `{}` to \
struct or enum variant",
- token::get_name(renamed));
+ renamed);
self.enforce_default_binding_mode(
pattern,
});
}
FoundStructOrEnumVariant(..) => {
- self.resolve_error(
+ resolve_error(
+ self,
pattern.span,
- &format!("declaration of `{}` shadows an enum \
- variant or unit-like struct in \
- scope",
- token::get_name(renamed)));
+ ResolutionError::DeclarationShadowsEnumVariantOrUnitLikeStruct(
+ renamed)
+ );
}
- FoundConst(def, lp) if mode == RefutableMode => {
+ FoundConst(def, lp) if const_ok => {
debug!("(resolving pattern) resolving `{}` to \
constant",
- token::get_name(renamed));
+ renamed);
self.enforce_default_binding_mode(
pattern,
});
}
FoundConst(..) => {
- self.resolve_error(pattern.span,
- "only irrefutable patterns \
- allowed here");
+ resolve_error(
+ self,
+ pattern.span,
+ ResolutionError::OnlyIrrefutablePatternsAllowedHere
+ );
}
BareIdentifierPatternUnresolved => {
debug!("(resolving pattern) binding `{}`",
- token::get_name(renamed));
+ renamed);
let def = DefLocal(pattern.id);
bindings_list.contains_key(&renamed) {
// Forbid duplicate bindings in the same
// parameter list.
- self.resolve_error(pattern.span,
- &format!("identifier `{}` \
- is bound more \
- than once in \
- this parameter \
- list",
- token::get_ident(
- ident))
- )
+ resolve_error(
+ self,
+ pattern.span,
+ ResolutionError::IdentifierBoundMoreThanOnceInParameterList(
+ &ident.name.as_str())
+ );
} else if bindings_list.get(&renamed) ==
Some(&pat_id) {
// Then this is a duplicate variable in the
// same disjunction, which is an error.
- self.resolve_error(pattern.span,
- &format!("identifier `{}` is bound \
- more than once in the same \
- pattern",
- token::get_ident(ident)));
+ resolve_error(
+ self,
+ pattern.span,
+ ResolutionError::IdentifierBoundMoreThanOnceInSamePattern(
+ &ident.name.as_str())
+ );
}
// Else, not bound in the same pattern: do
// nothing.
self.record_def(pattern.id, path_res);
}
DefStatic(..) => {
- self.resolve_error(path.span,
- "static variables cannot be \
- referenced in a pattern, \
- use a `const` instead");
+ resolve_error(&self,
+ path.span,
+ ResolutionError::StaticVariableReference);
}
_ => {
// If anything ends up here entirely resolved,
// partially resolved, that's OK, because it may
// be a `T::CONST` that typeck will resolve.
if path_res.depth == 0 {
- self.resolve_error(
+ resolve_error(
+ self,
path.span,
- &format!("`{}` is not an enum variant, struct or const",
- token::get_ident(
- path.segments.last().unwrap().identifier)));
+ ResolutionError::NotAnEnumVariantStructOrConst(
+ &path.segments
+ .last()
+ .unwrap()
+ .identifier
+ .name
+ .as_str())
+ );
} else {
let const_name = path.segments.last().unwrap()
.identifier.name;
}
}
} else {
- self.resolve_error(path.span,
- &format!("unresolved enum variant, struct or const `{}`",
- token::get_ident(path.segments.last().unwrap().identifier)));
+ resolve_error(
+ self,
+ path.span,
+ ResolutionError::UnresolvedEnumVariantStructOrConst(
+ &path.segments.last().unwrap().identifier.name.as_str())
+ );
}
visit::walk_path(self, path);
}
self.record_def(pattern.id, path_res);
}
_ => {
- self.resolve_error(path.span,
- &format!("`{}` is not an associated const",
- token::get_ident(
- path.segments.last().unwrap().identifier)));
+ resolve_error(
+ self,
+ path.span,
+ ResolutionError::NotAnAssociatedConst(
+ &path.segments.last().unwrap().identifier.name.as_str()
+ )
+ );
}
}
} else {
- self.resolve_error(path.span,
- &format!("unresolved associated const `{}`",
- token::get_ident(path.segments.last().unwrap().identifier)));
+ resolve_error(
+ self,
+ path.span,
+ ResolutionError::UnresolvedAssociatedConst(
+ &path.segments.last().unwrap().identifier.name.as_str()
+ )
+ );
}
visit::walk_pat(self, pattern);
}
result => {
debug!("(resolving pattern) didn't find struct \
def: {:?}", result);
- let msg = format!("`{}` does not name a structure",
- path_names_to_string(path, 0));
- self.resolve_error(path.span, &msg[..]);
+ resolve_error(
+ self,
+ path.span,
+ ResolutionError::DoesNotNameAStruct(
+ &*path_names_to_string(path, 0))
+ );
}
}
visit::walk_path(self, path);
Success((target, _)) => {
debug!("(resolve bare identifier pattern) succeeded in \
finding {} at {:?}",
- token::get_name(name),
+ name,
target.bindings.value_def.borrow());
match *target.bindings.value_def.borrow() {
None => {
return FoundConst(def, LastMod(AllPublic));
}
DefStatic(..) => {
- self.resolve_error(span,
- "static variables cannot be \
- referenced in a pattern, \
- use a `const` instead");
+ resolve_error(self,
+ span,
+ ResolutionError::StaticVariableReference);
return BareIdentifierPatternUnresolved;
}
_ => {
Failed(err) => {
match err {
Some((span, msg)) => {
- self.resolve_error(span, &format!("failed to resolve: {}",
- msg));
+ resolve_error(self, span, ResolutionError::FailedToResolve(&*msg));
}
None => ()
}
debug!("(resolve bare identifier pattern) failed to find {}",
- token::get_name(name));
+ name);
return BareIdentifierPatternUnresolved;
}
}
segments: &[ast::PathSegment],
namespace: Namespace)
-> Option<(Def, LastPrivate)> {
- let module_path = segments.init().iter()
+ let module_path = segments.split_last().unwrap().1.iter()
.map(|ps| ps.identifier.name)
.collect::<Vec<_>>();
}
};
- self.resolve_error(span, &format!("failed to resolve. {}",
- msg));
+ resolve_error(self, span, ResolutionError::FailedToResolve(&*msg));
return None;
}
Indeterminate => panic!("indeterminate unexpected"),
segments: &[ast::PathSegment],
namespace: Namespace)
-> Option<(Def, LastPrivate)> {
- let module_path = segments.init().iter()
+ let module_path = segments.split_last().unwrap().1.iter()
.map(|ps| ps.identifier.name)
.collect::<Vec<_>>();
}
};
- self.resolve_error(span, &format!("failed to resolve. {}",
- msg));
+ resolve_error(self, span, ResolutionError::FailedToResolve(&*msg));
return None;
}
match search_result {
Some(DlDef(def)) => {
debug!("(resolving path in local ribs) resolved `{}` to local: {:?}",
- token::get_ident(ident),
+ ident,
def);
Some(def)
}
// found a module instead. Modules don't have defs.
debug!("(resolving item path by identifier in lexical \
scope) failed to resolve {} after success...",
- token::get_name(name));
+ name);
return None;
}
Some(def) => {
debug!("(resolving item path in lexical scope) \
resolved `{}` to item",
- token::get_name(name));
+ name);
// This lookup is "all public" because it only searched
// for one identifier in the current module (couldn't
// have passed through reexports or anything like that.
}
Failed(err) => {
debug!("(resolving item path by identifier in lexical scope) \
- failed to resolve {}", token::get_name(name));
+ failed to resolve {}", name);
if let Some((span, msg)) = err {
- self.resolve_error(span, &format!("failed to resolve. {}", msg))
+ resolve_error(self, span, ResolutionError::FailedToResolve(&*msg))
}
return None;
rs
}
- fn resolve_error(&self, span: Span, s: &str) {
- if self.emit_errors {
- self.session.span_err(span, s);
- }
- }
-
fn find_fallback_in_self_type(&mut self, name: Name) -> FallbackSuggestion {
fn extract_path_and_node_id(t: &Ty, allow: FallbackChecks)
-> Option<(Path, NodeId, FallbackChecks)> {
// Look for a method in the current self type's impl module.
if let Some(module) = get_module(self, path.span, &name_path) {
if let Some(binding) = module.children.borrow().get(&name) {
- if let Some(DefMethod(did, _)) = binding.def_for_namespace(ValueNS) {
+ if let Some(DefMethod(did)) = binding.def_for_namespace(ValueNS) {
if is_static_method(self, did) {
return StaticMethod(path_names_to_string(&path, 0))
}
for rib in self.value_ribs.iter().rev() {
for (&k, _) in &rib.bindings {
- maybes.push(token::get_name(k));
+ maybes.push(k.as_str());
values.push(usize::MAX);
}
}
// Check if struct variant
if let DefVariant(_, _, true) = path_res.base_def {
let path_name = path_names_to_string(path, 0);
- self.resolve_error(expr.span,
- &format!("`{}` is a struct variant name, but \
- this expression \
- uses it like a function name",
- path_name));
+
+ resolve_error(self,
+ expr.span,
+ ResolutionError::StructVariantUsedAsFunction(&*path_name));
let msg = format!("did you mean to write: \
`{} {{ /* fields */ }}`?",
match type_res.map(|r| r.base_def) {
Some(DefTy(struct_id, _))
if self.structs.contains_key(&struct_id) => {
- self.resolve_error(expr.span,
- &format!("`{}` is a structure name, but \
- this expression \
- uses it like a function name",
- path_name));
+ resolve_error(
+ self,
+ expr.span,
+ ResolutionError::StructVariantUsedAsFunction(
+ &*path_name)
+ );
let msg = format!("did you mean to write: \
`{} {{ /* fields */ }}`?",
false // Stop advancing
});
- if method_scope &&
- &token::get_name(special_names::self_)[..] == path_name {
- self.resolve_error(
- expr.span,
- "`self` is not available \
- in a static method. Maybe a \
- `self` argument is missing?");
+ if method_scope && special_names::self_ == path_name {
+ resolve_error(
+ self,
+ expr.span,
+ ResolutionError::SelfNotAvailableInStaticMethod
+ );
} else {
let last_name = path.segments.last().unwrap().identifier.name;
let mut msg = match self.find_fallback_in_self_type(last_name) {
msg = format!(". Did you mean {}?", msg)
}
- self.resolve_error(
- expr.span,
- &format!("unresolved name `{}`{}",
- path_name, msg));
+ resolve_error(self,
+ expr.span,
+ ResolutionError::UnresolvedName(&*path_name,
+ &*msg));
}
}
}
Some(definition) => self.record_def(expr.id, definition),
None => {
debug!("(resolving expression) didn't find struct def",);
- let msg = format!("`{}` does not name a structure",
- path_names_to_string(path, 0));
- self.resolve_error(path.span, &msg[..]);
+
+ resolve_error(self,
+ path.span,
+ ResolutionError::DoesNotNameAStruct(
+ &*path_names_to_string(path, 0))
+ );
}
}
let renamed = mtwt::resolve(label);
match self.search_label(renamed) {
None => {
- self.resolve_error(
- expr.span,
- &format!("use of undeclared label `{}`",
- token::get_ident(label)))
+ resolve_error(self,
+ expr.span,
+ ResolutionError::UndeclaredLabel(&label.name.as_str()))
}
Some(DlDef(def @ DefLabel(_))) => {
// Since this def is a label, it is never read.
fn get_traits_containing_item(&mut self, name: Name) -> Vec<DefId> {
debug!("(getting traits containing item) looking for '{}'",
- token::get_name(name));
+ name);
fn add_trait_info(found_traits: &mut Vec<DefId>,
trait_def_id: DefId,
debug!("(adding trait info) found trait {}:{} for method '{}'",
trait_def_id.krate,
trait_def_id.node,
- token::get_name(name));
+ name);
found_traits.push(trait_def_id);
}
match pat_binding_mode {
BindByValue(_) => {}
BindByRef(..) => {
- self.resolve_error(pat.span,
- &format!("cannot use `ref` binding mode \
- with {}",
- descr));
+ resolve_error(self,
+ pat.span,
+ ResolutionError::CannotUseRefBindingModeWith(descr));
}
}
}
debug!("Children:");
build_reduced_graph::populate_module_if_necessary(self, &module_);
for (&name, _) in module_.children.borrow().iter() {
- debug!("* {}", token::get_name(name));
+ debug!("* {}", name);
}
debug!("Import resolutions:");
}
}
- debug!("* {}:{}{}", token::get_name(name), value_repr, type_repr);
+ debug!("* {}:{}{}", name, value_repr, type_repr);
}
}
}
} else {
result.push_str("::")
}
- result.push_str(&token::get_name(*name));
+ result.push_str(&name.as_str());
};
result
}
use rustc::middle::def::Export;
use syntax::ast;
-use syntax::parse::token;
use std::ops::{Deref, DerefMut};
use std::rc::Rc;
match import_resolution.target_for_namespace(ns) {
Some(target) => {
debug!("(computing exports) maybe export '{}'",
- token::get_name(*name));
+ name);
self.add_exports_of_namebindings(exports,
*name,
&*target.bindings,
use Resolver;
use UseLexicalScopeFlag;
use {names_to_string, module_to_string};
+use {resolve_error, ResolutionError};
use build_reduced_graph;
use syntax::ast::{DefId, NodeId, Name};
use syntax::attr::AttrMetaMethods;
-use syntax::parse::token;
use syntax::codemap::Span;
use std::mem::replace;
Some((span, msg)) => (span, format!(". {}", msg)),
None => (import_directive.span, String::new())
};
- let msg = format!("unresolved import `{}`{}",
- import_path_to_string(
- &import_directive.module_path,
- import_directive.subclass),
- help);
- self.resolver.resolve_error(span, &msg[..]);
+ resolve_error(self.resolver,
+ span,
+ ResolutionError::UnresolvedImport(
+ Some((&*import_path_to_string(
+ &import_directive.module_path,
+ import_directive.subclass),
+ Some(&*help))))
+ );
}
ResolveResult::Indeterminate => break, // Bail out. We'll come around next time.
ResolveResult::Success(()) => () // Good. Continue.
-> ResolveResult<()> {
debug!("(resolving single import) resolving `{}` = `{}::{}` from \
`{}` id {}, last private {:?}",
- token::get_name(target),
+ target,
module_to_string(&*target_module),
- token::get_name(source),
+ source,
module_to_string(module_),
directive.id,
lp);
value_result = BoundResult(target_module.clone(),
(*child_name_bindings).clone());
if directive.is_public && !child_name_bindings.is_public(ValueNS) {
- let msg = format!("`{}` is private", token::get_name(source));
+ let msg = format!("`{}` is private, and cannot be reexported",
+ source);
+ let note_msg =
+ format!("Consider marking `{}` as `pub` in the imported module",
+ source);
span_err!(self.resolver.session, directive.span, E0364, "{}", &msg);
+ self.resolver.session.span_note(directive.span, ¬e_msg);
pub_err = true;
}
}
type_result = BoundResult(target_module.clone(),
(*child_name_bindings).clone());
if !pub_err && directive.is_public && !child_name_bindings.is_public(TypeNS) {
- let msg = format!("`{}` is private", token::get_name(source));
+ let msg = format!("`{}` is private, and cannot be reexported",
+ source);
+ let note_msg = format!("Consider declaring module `{}` as a `pub mod`",
+ source);
span_err!(self.resolver.session, directive.span, E0365, "{}", &msg);
+ self.resolver.session.span_note(directive.span, ¬e_msg);
}
}
}
if value_result.is_unbound() && type_result.is_unbound() {
let msg = format!("There is no `{}` in `{}`",
- token::get_name(source),
+ source,
module_to_string(&target_module));
return ResolveResult::Failed(Some((directive.span, msg)));
}
for (ident, target_import_resolution) in import_resolutions.iter() {
debug!("(resolving glob import) writing module resolution \
{} into `{}`",
- token::get_name(*ident),
+ *ident,
module_to_string(module_));
if !target_import_resolution.is_public {
debug!("(resolving glob import) writing resolution `{}` in `{}` \
to `{}`",
- &token::get_name(name),
+ name,
module_to_string(&*containing_module),
module_to_string(module_));
let msg = format!("a {} named `{}` has already been imported \
in this module",
namespace_name,
- &token::get_name(name));
+ name);
span_err!(self.resolver.session, import_directive.span, E0251, "{}", msg);
} else {
let target = Target::new(containing_module.clone(),
namespace: Namespace) {
let target = import_resolution.target_for_namespace(namespace);
debug!("check_for_conflicting_import: {}; target exists: {}",
- &token::get_name(name),
+ name,
target.is_some());
match target {
span_err!(self.resolver.session, import_span, E0252,
"a {} named `{}` has already been imported \
in this module", ns_word,
- &token::get_name(name));
+ name);
let use_id = import_resolution.id(namespace);
let item = self.resolver.ast_map.expect_item(use_id);
// item is syntax::ast::Item;
span_note!(self.resolver.session, item.span,
"previous import of `{}` here",
- token::get_name(name));
+ name);
}
Some(_) | None => {}
}
namespace: Namespace) {
if !name_bindings.defined_in_namespace_with(namespace, DefModifiers::IMPORTABLE) {
let msg = format!("`{}` is not directly importable",
- token::get_name(name));
+ name);
span_err!(self.resolver.session, import_span, E0253, "{}", &msg[..]);
}
}
let msg = format!("import `{0}` conflicts with imported \
crate in this module \
(maybe you meant `use {0}::*`?)",
- &token::get_name(name));
+ name);
span_err!(self.resolver.session, import_span, E0254, "{}", &msg[..]);
}
Some(_) | None => {}
if let Some(ref value) = *name_bindings.value_def.borrow() {
span_err!(self.resolver.session, import_span, E0255,
"import `{}` conflicts with value in this module",
- &token::get_name(name));
+ name);
if let Some(span) = value.value_span {
self.resolver.session.span_note(span, "conflicting value here");
}
};
span_err!(self.resolver.session, import_span, E0256,
"import `{}` conflicts with {}",
- &token::get_name(name), what);
+ name, what);
if let Some(span) = ty.type_span {
self.resolver.session.span_note(span, note);
}
fn import_directive_subclass_to_string(subclass: ImportDirectiveSubclass) -> String {
match subclass {
- SingleImport(_, source) => {
- token::get_name(source).to_string()
- }
+ SingleImport(_, source) => source.to_string(),
GlobImport => "*".to_string()
}
}
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A helper class for dealing with static archives
+
+use std::env;
+use std::ffi::{CString, CStr, OsString};
+use std::fs::{self, File};
+use std::io::prelude::*;
+use std::io;
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::process::{Command, Output, Stdio};
+use std::str;
+
+use libc;
+use llvm::archive_ro::{ArchiveRO, Child};
+use llvm::{self, ArchiveKind};
+use rustc::metadata::loader::METADATA_FILENAME;
+use rustc::session::Session;
+use rustc_back::tempdir::TempDir;
+
+pub struct ArchiveConfig<'a> {
+ pub sess: &'a Session,
+ pub dst: PathBuf,
+ pub src: Option<PathBuf>,
+ pub lib_search_paths: Vec<PathBuf>,
+ pub ar_prog: String,
+ pub command_path: OsString,
+}
+
+/// Helper for adding many files to an archive with a single invocation of
+/// `ar`.
+#[must_use = "must call build() to finish building the archive"]
+pub struct ArchiveBuilder<'a> {
+ config: ArchiveConfig<'a>,
+ work_dir: TempDir,
+ removals: Vec<String>,
+ additions: Vec<Addition>,
+ should_update_symbols: bool,
+ src_archive: Option<Option<ArchiveRO>>,
+}
+
+enum Addition {
+ File {
+ path: PathBuf,
+ name_in_archive: String,
+ },
+ Archive {
+ archive: ArchiveRO,
+ archive_name: String,
+ skip: Box<FnMut(&str) -> bool>,
+ },
+}
+
+enum Action<'a> {
+ Remove(&'a [String]),
+ AddObjects(&'a [&'a PathBuf], bool),
+ UpdateSymbols,
+}
+
+pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session)
+ -> PathBuf {
+ // On Windows, static libraries sometimes show up as libfoo.a and other
+ // times show up as foo.lib
+ let oslibname = format!("{}{}{}",
+ sess.target.target.options.staticlib_prefix,
+ name,
+ sess.target.target.options.staticlib_suffix);
+ let unixlibname = format!("lib{}.a", name);
+
+ for path in search_paths {
+ debug!("looking for {} inside {:?}", name, path);
+ let test = path.join(&oslibname[..]);
+ if test.exists() { return test }
+ if oslibname != unixlibname {
+ let test = path.join(&unixlibname[..]);
+ if test.exists() { return test }
+ }
+ }
+ sess.fatal(&format!("could not find native static library `{}`, \
+ perhaps an -L flag is missing?", name));
+}
+
+fn is_relevant_child(c: &Child) -> bool {
+ match c.name() {
+ Some(name) => !name.contains("SYMDEF"),
+ None => false,
+ }
+}
+
+impl<'a> ArchiveBuilder<'a> {
+ /// Create a new static archive, ready for modifying the archive specified
+ /// by `config`.
+ pub fn new(config: ArchiveConfig<'a>) -> ArchiveBuilder<'a> {
+ ArchiveBuilder {
+ config: config,
+ work_dir: TempDir::new("rsar").unwrap(),
+ removals: Vec::new(),
+ additions: Vec::new(),
+ should_update_symbols: false,
+ src_archive: None,
+ }
+ }
+
+ /// Removes a file from this archive
+ pub fn remove_file(&mut self, file: &str) {
+ self.removals.push(file.to_string());
+ }
+
+ /// Lists all files in an archive
+ pub fn src_files(&mut self) -> Vec<String> {
+ if self.src_archive().is_none() {
+ return Vec::new()
+ }
+ let archive = self.src_archive.as_ref().unwrap().as_ref().unwrap();
+ let ret = archive.iter()
+ .filter(is_relevant_child)
+ .filter_map(|child| child.name())
+ .filter(|name| !self.removals.iter().any(|x| x == name))
+ .map(|name| name.to_string())
+ .collect();
+ return ret;
+ }
+
+ fn src_archive(&mut self) -> Option<&ArchiveRO> {
+ if let Some(ref a) = self.src_archive {
+ return a.as_ref()
+ }
+ let src = match self.config.src {
+ Some(ref src) => src,
+ None => return None,
+ };
+ self.src_archive = Some(ArchiveRO::open(src));
+ self.src_archive.as_ref().unwrap().as_ref()
+ }
+
+ /// Adds all of the contents of a native library to this archive. This will
+ /// search in the relevant locations for a library named `name`.
+ pub fn add_native_library(&mut self, name: &str) -> io::Result<()> {
+ let location = find_library(name, &self.config.lib_search_paths,
+ self.config.sess);
+ self.add_archive(&location, name, |_| false)
+ }
+
+ /// Adds all of the contents of the rlib at the specified path to this
+ /// archive.
+ ///
+ /// This ignores adding the bytecode from the rlib, and if LTO is enabled
+ /// then the object file also isn't added.
+ pub fn add_rlib(&mut self, rlib: &Path, name: &str, lto: bool)
+ -> io::Result<()> {
+ // Ignoring obj file starting with the crate name
+ // as simple comparison is not enough - there
+ // might be also an extra name suffix
+ let obj_start = format!("{}", name);
+
+ // Ignoring all bytecode files, no matter of
+ // name
+ let bc_ext = ".bytecode.deflate";
+
+ self.add_archive(rlib, &name[..], move |fname: &str| {
+ let skip_obj = lto && fname.starts_with(&obj_start)
+ && fname.ends_with(".o");
+ skip_obj || fname.ends_with(bc_ext) || fname == METADATA_FILENAME
+ })
+ }
+
+ fn add_archive<F>(&mut self, archive: &Path, name: &str, skip: F)
+ -> io::Result<()>
+ where F: FnMut(&str) -> bool + 'static
+ {
+ let archive = match ArchiveRO::open(archive) {
+ Some(ar) => ar,
+ None => return Err(io::Error::new(io::ErrorKind::Other,
+ "failed to open archive")),
+ };
+ self.additions.push(Addition::Archive {
+ archive: archive,
+ archive_name: name.to_string(),
+ skip: Box::new(skip),
+ });
+ Ok(())
+ }
+
+ /// Adds an arbitrary file to this archive
+ pub fn add_file(&mut self, file: &Path) {
+ let name = file.file_name().unwrap().to_str().unwrap();
+ self.additions.push(Addition::File {
+ path: file.to_path_buf(),
+ name_in_archive: name.to_string(),
+ });
+ }
+
+ /// Indicate that the next call to `build` should updates all symbols in
+ /// the archive (run 'ar s' over it).
+ pub fn update_symbols(&mut self) {
+ self.should_update_symbols = true;
+ }
+
+ /// Combine the provided files, rlibs, and native libraries into a single
+ /// `Archive`.
+ pub fn build(&mut self) {
+ let res = match self.llvm_archive_kind() {
+ Some(kind) => self.build_with_llvm(kind),
+ None => self.build_with_ar_cmd(),
+ };
+ if let Err(e) = res {
+ self.config.sess.fatal(&format!("failed to build archive: {}", e));
+ }
+ }
+
+ pub fn llvm_archive_kind(&self) -> Option<ArchiveKind> {
+ if unsafe { llvm::LLVMVersionMinor() < 7 } {
+ return None
+ }
+
+ // Currently LLVM only supports writing archives in the 'gnu' format.
+ match &self.config.sess.target.target.options.archive_format[..] {
+ "gnu" => Some(ArchiveKind::K_GNU),
+ "mips64" => Some(ArchiveKind::K_MIPS64),
+ "bsd" => Some(ArchiveKind::K_BSD),
+ "coff" => Some(ArchiveKind::K_COFF),
+ _ => None,
+ }
+ }
+
+ pub fn using_llvm(&self) -> bool {
+ self.llvm_archive_kind().is_some()
+ }
+
+ fn build_with_ar_cmd(&mut self) -> io::Result<()> {
+ let removals = mem::replace(&mut self.removals, Vec::new());
+ let additions = mem::replace(&mut self.additions, Vec::new());
+ let should_update_symbols = mem::replace(&mut self.should_update_symbols,
+ false);
+
+ // Don't use fs::copy because libs may be installed as read-only and we
+ // want to modify this archive, so we use `io::copy` to not preserve
+ // permission bits.
+ if let Some(ref s) = self.config.src {
+ try!(io::copy(&mut try!(File::open(s)),
+ &mut try!(File::create(&self.config.dst))));
+ }
+
+ if removals.len() > 0 {
+ self.run(None, Action::Remove(&removals));
+ }
+
+ let mut members = Vec::new();
+ for addition in additions {
+ match addition {
+ Addition::File { path, name_in_archive } => {
+ let dst = self.work_dir.path().join(&name_in_archive);
+ try!(fs::copy(&path, &dst));
+ members.push(PathBuf::from(name_in_archive));
+ }
+ Addition::Archive { archive, archive_name, mut skip } => {
+ try!(self.add_archive_members(&mut members, archive,
+ &archive_name, &mut *skip));
+ }
+ }
+ }
+
+ // Get an absolute path to the destination, so `ar` will work even
+ // though we run it from `self.work_dir`.
+ let mut objects = Vec::new();
+ let mut total_len = self.config.dst.to_string_lossy().len();
+
+ if members.is_empty() {
+ if should_update_symbols {
+ self.run(Some(self.work_dir.path()), Action::UpdateSymbols);
+ }
+ return Ok(())
+ }
+
+ // Don't allow the total size of `args` to grow beyond 32,000 bytes.
+ // Windows will raise an error if the argument string is longer than
+ // 32,768, and we leave a bit of extra space for the program name.
+ const ARG_LENGTH_LIMIT: usize = 32_000;
+
+ for member_name in &members {
+ let len = member_name.to_string_lossy().len();
+
+ // `len + 1` to account for the space that's inserted before each
+ // argument. (Windows passes command-line arguments as a single
+ // string, not an array of strings.)
+ if total_len + len + 1 > ARG_LENGTH_LIMIT {
+ // Add the archive members seen so far, without updating the
+ // symbol table.
+ self.run(Some(self.work_dir.path()),
+ Action::AddObjects(&objects, false));
+
+ objects.clear();
+ total_len = self.config.dst.to_string_lossy().len();
+ }
+
+ objects.push(member_name);
+ total_len += len + 1;
+ }
+
+ // Add the remaining archive members, and update the symbol table if
+ // necessary.
+ self.run(Some(self.work_dir.path()),
+ Action::AddObjects(&objects, should_update_symbols));
+ Ok(())
+ }
+
+ fn add_archive_members(&mut self, members: &mut Vec<PathBuf>,
+ archive: ArchiveRO, name: &str,
+ skip: &mut FnMut(&str) -> bool) -> io::Result<()> {
+ // Next, we must rename all of the inputs to "guaranteed unique names".
+ // We write each file into `self.work_dir` under its new unique name.
+ // The reason for this renaming is that archives are keyed off the name
+ // of the files, so if two files have the same name they will override
+ // one another in the archive (bad).
+ //
+ // We skip any files explicitly desired for skipping, and we also skip
+ // all SYMDEF files as these are just magical placeholders which get
+ // re-created when we make a new archive anyway.
+ for file in archive.iter().filter(is_relevant_child) {
+ let filename = file.name().unwrap();
+ if skip(filename) { continue }
+ let filename = Path::new(filename).file_name().unwrap()
+ .to_str().unwrap();
+
+ // Archives on unix systems typically do not have slashes in
+ // filenames as the `ar` utility generally only uses the last
+ // component of a path for the filename list in the archive. On
+ // Windows, however, archives assembled with `lib.exe` will preserve
+ // the full path to the file that was placed in the archive,
+ // including path separators.
+ //
+ // The code below is munging paths so it'll go wrong pretty quickly
+ // if there's some unexpected slashes in the filename, so here we
+ // just chop off everything but the filename component. Note that
+ // this can cause duplicate filenames, but that's also handled below
+ // as well.
+ let filename = Path::new(filename).file_name().unwrap()
+ .to_str().unwrap();
+
+ // An archive can contain files of the same name multiple times, so
+ // we need to be sure to not have them overwrite one another when we
+ // extract them. Consequently we need to find a truly unique file
+ // name for us!
+ let mut new_filename = String::new();
+ for n in 0.. {
+ let n = if n == 0 {String::new()} else {format!("-{}", n)};
+ new_filename = format!("r{}-{}-{}", n, name, filename);
+
+ // LLDB (as mentioned in back::link) crashes on filenames of
+ // exactly
+ // 16 bytes in length. If we're including an object file with
+ // exactly 16-bytes of characters, give it some prefix so
+ // that it's not 16 bytes.
+ new_filename = if new_filename.len() == 16 {
+ format!("lldb-fix-{}", new_filename)
+ } else {
+ new_filename
+ };
+
+ let present = members.iter().filter_map(|p| {
+ p.file_name().and_then(|f| f.to_str())
+ }).any(|s| s == new_filename);
+ if !present {
+ break
+ }
+ }
+ let dst = self.work_dir.path().join(&new_filename);
+ try!(try!(File::create(&dst)).write_all(file.data()));
+ members.push(PathBuf::from(new_filename));
+ }
+ Ok(())
+ }
+
+ fn run(&self, cwd: Option<&Path>, action: Action) -> Output {
+ let abs_dst = env::current_dir().unwrap().join(&self.config.dst);
+ let ar = &self.config.ar_prog;
+ let mut cmd = Command::new(ar);
+ cmd.env("PATH", &self.config.command_path);
+ cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
+ self.prepare_ar_action(&mut cmd, &abs_dst, action);
+ info!("{:?}", cmd);
+
+ if let Some(p) = cwd {
+ cmd.current_dir(p);
+ info!("inside {:?}", p.display());
+ }
+
+ let sess = &self.config.sess;
+ match cmd.spawn() {
+ Ok(prog) => {
+ let o = prog.wait_with_output().unwrap();
+ if !o.status.success() {
+ sess.err(&format!("{:?} failed with: {}", cmd, o.status));
+ sess.note(&format!("stdout ---\n{}",
+ str::from_utf8(&o.stdout).unwrap()));
+ sess.note(&format!("stderr ---\n{}",
+ str::from_utf8(&o.stderr).unwrap()));
+ sess.abort_if_errors();
+ }
+ o
+ },
+ Err(e) => {
+ sess.fatal(&format!("could not exec `{}`: {}",
+ self.config.ar_prog, e));
+ }
+ }
+ }
+
+ fn prepare_ar_action(&self, cmd: &mut Command, dst: &Path, action: Action) {
+ match action {
+ Action::Remove(files) => {
+ cmd.arg("d").arg(dst).args(files);
+ }
+ Action::AddObjects(objs, update_symbols) => {
+ cmd.arg(if update_symbols {"crs"} else {"crS"})
+ .arg(dst)
+ .args(objs);
+ }
+ Action::UpdateSymbols => {
+ cmd.arg("s").arg(dst);
+ }
+ }
+ }
+
+ fn build_with_llvm(&mut self, kind: ArchiveKind) -> io::Result<()> {
+ let mut archives = Vec::new();
+ let mut strings = Vec::new();
+ let mut members = Vec::new();
+ let removals = mem::replace(&mut self.removals, Vec::new());
+
+ unsafe {
+ if let Some(archive) = self.src_archive() {
+ for child in archive.iter() {
+ let child_name = match child.name() {
+ Some(s) => s,
+ None => continue,
+ };
+ if removals.iter().any(|r| r == child_name) {
+ continue
+ }
+
+ let name = try!(CString::new(child_name));
+ members.push(llvm::LLVMRustArchiveMemberNew(0 as *const _,
+ name.as_ptr(),
+ child.raw()));
+ strings.push(name);
+ }
+ }
+ for addition in mem::replace(&mut self.additions, Vec::new()) {
+ match addition {
+ Addition::File { path, name_in_archive } => {
+ let path = try!(CString::new(path.to_str().unwrap()));
+ let name = try!(CString::new(name_in_archive));
+ members.push(llvm::LLVMRustArchiveMemberNew(path.as_ptr(),
+ name.as_ptr(),
+ 0 as *mut _));
+ strings.push(path);
+ strings.push(name);
+ }
+ Addition::Archive { archive, archive_name: _, mut skip } => {
+ for child in archive.iter().filter(is_relevant_child) {
+ let child_name = child.name().unwrap();
+ if skip(child_name) { continue }
+
+ let name = try!(CString::new(child_name));
+ let m = llvm::LLVMRustArchiveMemberNew(0 as *const _,
+ name.as_ptr(),
+ child.raw());
+ members.push(m);
+ strings.push(name);
+ }
+ archives.push(archive);
+ }
+ }
+ }
+
+ let dst = self.config.dst.to_str().unwrap().as_bytes();
+ let dst = try!(CString::new(dst));
+ let r = llvm::LLVMRustWriteArchive(dst.as_ptr(),
+ members.len() as libc::size_t,
+ members.as_ptr(),
+ self.should_update_symbols,
+ kind);
+ let ret = if r != 0 {
+ let err = llvm::LLVMRustGetLastError();
+ let msg = if err.is_null() {
+ "failed to write archive".to_string()
+ } else {
+ String::from_utf8_lossy(CStr::from_ptr(err).to_bytes())
+ .into_owned()
+ };
+ Err(io::Error::new(io::ErrorKind::Other, msg))
+ } else {
+ Ok(())
+ };
+ for member in members {
+ llvm::LLVMRustArchiveMemberFree(member);
+ }
+ return ret
+ }
+ }
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::archive::{Archive, ArchiveBuilder, ArchiveConfig, METADATA_FILENAME};
+use super::archive::{ArchiveBuilder, ArchiveConfig};
use super::linker::{Linker, GnuLinker, MsvcLinker};
use super::rpath::RPathConfig;
use super::rpath;
+use super::msvc;
use super::svh::Svh;
use session::config;
use session::config::NoDebugInfo;
use session::search_paths::PathKind;
use session::Session;
use metadata::common::LinkMeta;
-use metadata::{encoder, cstore, filesearch, csearch, creader};
use metadata::filesearch::FileDoesntMatch;
+use metadata::loader::METADATA_FILENAME;
+use metadata::{encoder, cstore, filesearch, csearch, creader};
use middle::ty::{self, Ty};
use rustc::ast_map::{PathElem, PathElems, PathName};
use trans::{CrateContext, CrateTranslation, gensym_name};
// First, connect each component with <len, name> pairs.
for e in path {
- push(&mut n, &token::get_name(e.name()))
+ push(&mut n, &e.name().as_str())
}
match hash {
mangle(path.chain(Some(gensym_name(flav))), None)
}
-pub fn get_cc_prog(sess: &Session) -> String {
- match sess.opts.cg.linker {
- Some(ref linker) => return linker.to_string(),
- None => sess.target.target.options.linker.clone(),
+pub fn get_linker(sess: &Session) -> (String, Command) {
+ if let Some(ref linker) = sess.opts.cg.linker {
+ (linker.clone(), Command::new(linker))
+ } else if sess.target.target.options.is_like_msvc {
+ ("link.exe".to_string(), msvc::link_exe_cmd(sess))
+ } else {
+ (sess.target.target.options.linker.clone(),
+ Command::new(&sess.target.target.options.linker))
}
}
// Remove the temporary object file and metadata if we aren't saving temps
if !sess.opts.cg.save_temps {
- let obj_filename = outputs.temp_path(OutputTypeObject);
- if !sess.opts.output_types.contains(&OutputTypeObject) {
- remove(sess, &obj_filename);
+ for obj in object_filenames(sess, outputs) {
+ remove(sess, &obj);
}
- remove(sess, &obj_filename.with_extension("metadata.o"));
+ remove(sess, &outputs.with_extension("metadata.o"));
}
out_filenames
pub fn filename_for_input(sess: &Session,
crate_type: config::CrateType,
- name: &str,
- out_filename: &Path) -> PathBuf {
- let libname = format!("{}{}", name, sess.opts.cg.extra_filename);
+ crate_name: &str,
+ outputs: &OutputFilenames) -> PathBuf {
+ let libname = format!("{}{}", crate_name, sess.opts.cg.extra_filename);
match crate_type {
config::CrateTypeRlib => {
- out_filename.with_file_name(&format!("lib{}.rlib", libname))
+ outputs.out_directory.join(&format!("lib{}.rlib", libname))
}
config::CrateTypeDylib => {
let (prefix, suffix) = (&sess.target.target.options.dll_prefix,
&sess.target.target.options.dll_suffix);
- out_filename.with_file_name(&format!("{}{}{}",
- prefix,
- libname,
- suffix))
+ outputs.out_directory.join(&format!("{}{}{}", prefix, libname,
+ suffix))
}
config::CrateTypeStaticlib => {
- out_filename.with_file_name(&format!("lib{}.a", libname))
+ outputs.out_directory.join(&format!("lib{}.a", libname))
}
config::CrateTypeExecutable => {
let suffix = &sess.target.target.options.exe_suffix;
+ let out_filename = outputs.path(OutputTypeExe);
if suffix.is_empty() {
out_filename.to_path_buf()
} else {
crate_type: config::CrateType,
outputs: &OutputFilenames,
crate_name: &str) -> PathBuf {
- let obj_filename = outputs.temp_path(OutputTypeObject);
+ let objects = object_filenames(sess, outputs);
let out_filename = match outputs.single_output_file {
Some(ref file) => file.clone(),
- None => {
- let out_filename = outputs.path(OutputTypeExe);
- filename_for_input(sess, crate_type, crate_name, &out_filename)
- }
+ None => filename_for_input(sess, crate_type, crate_name, outputs),
};
- // Make sure the output and obj_filename are both writeable.
- // Mac, FreeBSD, and Windows system linkers check this already --
- // however, the Linux linker will happily overwrite a read-only file.
- // We should be consistent.
- let obj_is_writeable = is_writeable(&obj_filename);
- let out_is_writeable = is_writeable(&out_filename);
- if !out_is_writeable {
- sess.fatal(&format!("output file {} is not writeable -- check its \
- permissions.",
- out_filename.display()));
- }
- else if !obj_is_writeable {
- sess.fatal(&format!("object file {} is not writeable -- check its \
- permissions.",
- obj_filename.display()));
+ // Make sure files are writeable. Mac, FreeBSD, and Windows system linkers
+ // check this already -- however, the Linux linker will happily overwrite a
+ // read-only file. We should be consistent.
+ for file in objects.iter().chain(Some(&out_filename)) {
+ if !is_writeable(file) {
+ sess.fatal(&format!("output file {} is not writeable -- check its \
+ permissions", file.display()));
+ }
}
+ let tmpdir = TempDir::new("rustc").ok().expect("needs a temp dir");
match crate_type {
config::CrateTypeRlib => {
- link_rlib(sess, Some(trans), &obj_filename, &out_filename).build();
+ link_rlib(sess, Some(trans), &objects, &out_filename,
+ tmpdir.path()).build();
}
config::CrateTypeStaticlib => {
- link_staticlib(sess, &obj_filename, &out_filename);
+ link_staticlib(sess, &objects, &out_filename, tmpdir.path());
}
config::CrateTypeExecutable => {
- link_natively(sess, trans, false, &obj_filename, &out_filename);
+ link_natively(sess, trans, false, &objects, &out_filename, outputs,
+ tmpdir.path());
}
config::CrateTypeDylib => {
- link_natively(sess, trans, true, &obj_filename, &out_filename);
+ link_natively(sess, trans, true, &objects, &out_filename, outputs,
+ tmpdir.path());
}
}
out_filename
}
+fn object_filenames(sess: &Session, outputs: &OutputFilenames) -> Vec<PathBuf> {
+ (0..sess.opts.cg.codegen_units).map(|i| {
+ let ext = format!("{}.o", i);
+ outputs.temp_path(OutputTypeObject).with_extension(&ext)
+ }).collect()
+}
+
fn archive_search_paths(sess: &Session) -> Vec<PathBuf> {
let mut search = Vec::new();
sess.target_filesearch(PathKind::Native).for_each_lib_search_path(|path, _| {
return search;
}
+fn archive_config<'a>(sess: &'a Session,
+ output: &Path,
+ input: Option<&Path>) -> ArchiveConfig<'a> {
+ ArchiveConfig {
+ sess: sess,
+ dst: output.to_path_buf(),
+ src: input.map(|p| p.to_path_buf()),
+ lib_search_paths: archive_search_paths(sess),
+ ar_prog: get_ar_prog(sess),
+ command_path: command_path(sess),
+ }
+}
+
// Create an 'rlib'
//
// An rlib in its current incarnation is essentially a renamed .a file. The
// native libraries and inserting all of the contents into this archive.
fn link_rlib<'a>(sess: &'a Session,
trans: Option<&CrateTranslation>, // None == no metadata/bytecode
- obj_filename: &Path,
- out_filename: &Path) -> ArchiveBuilder<'a> {
- info!("preparing rlib from {:?} to {:?}", obj_filename, out_filename);
- let handler = &sess.diagnostic().handler;
- let config = ArchiveConfig {
- handler: handler,
- dst: out_filename.to_path_buf(),
- lib_search_paths: archive_search_paths(sess),
- slib_prefix: sess.target.target.options.staticlib_prefix.clone(),
- slib_suffix: sess.target.target.options.staticlib_suffix.clone(),
- ar_prog: get_ar_prog(sess),
- command_path: command_path(sess),
- };
- let mut ab = ArchiveBuilder::create(config);
- ab.add_file(obj_filename).unwrap();
+ objects: &[PathBuf],
+ out_filename: &Path,
+ tmpdir: &Path) -> ArchiveBuilder<'a> {
+ info!("preparing rlib from {:?} to {:?}", objects, out_filename);
+ let mut ab = ArchiveBuilder::new(archive_config(sess, out_filename, None));
+ for obj in objects {
+ ab.add_file(obj);
+ }
for &(ref l, kind) in sess.cstore.get_used_libraries().borrow().iter() {
match kind {
// symbol table of the archive.
ab.update_symbols();
- let mut ab = match sess.target.target.options.is_like_osx {
- // For OSX/iOS, we must be careful to update symbols only when adding
- // object files. We're about to start adding non-object files, so run
- // `ar` now to process the object files.
- true => ab.build().extend(),
- false => ab,
- };
+ // For OSX/iOS, we must be careful to update symbols only when adding
+ // object files. We're about to start adding non-object files, so run
+ // `ar` now to process the object files.
+ if sess.target.target.options.is_like_osx && !ab.using_llvm() {
+ ab.build();
+ }
// Note that it is important that we add all of our non-object "magical
// files" *after* all of the object files in the archive. The reason for
// this is as follows:
//
// * When performing LTO, this archive will be modified to remove
- // obj_filename from above. The reason for this is described below.
+ // objects from above. The reason for this is described below.
//
// * When the system linker looks at an archive, it will attempt to
// determine the architecture of the archive in order to see whether its
// contain the metadata in a separate file. We use a temp directory
// here so concurrent builds in the same directory don't try to use
// the same filename for metadata (stomping over one another)
- let tmpdir = TempDir::new("rustc").ok().expect("needs a temp dir");
- let metadata = tmpdir.path().join(METADATA_FILENAME);
+ let metadata = tmpdir.join(METADATA_FILENAME);
match fs::File::create(&metadata).and_then(|mut f| {
f.write_all(&trans.metadata)
}) {
metadata.display(), e));
}
}
- ab.add_file(&metadata).unwrap();
- remove(sess, &metadata);
+ ab.add_file(&metadata);
// For LTO purposes, the bytecode of this library is also inserted
// into the archive. If codegen_units > 1, we insert each of the
// bitcode files.
- for i in 0..sess.opts.cg.codegen_units {
+ for obj in objects {
// Note that we make sure that the bytecode filename in the
// archive is never exactly 16 bytes long by adding a 16 byte
// extension to it. This is to work around a bug in LLDB that
// would cause it to crash if the name of a file in an archive
// was exactly 16 bytes.
- let bc_filename = obj_filename.with_extension(&format!("{}.bc", i));
- let bc_deflated_filename = obj_filename.with_extension(
- &format!("{}.bytecode.deflate", i));
+ let bc_filename = obj.with_extension("bc");
+ let bc_deflated_filename = tmpdir.join({
+ obj.with_extension("bytecode.deflate").file_name().unwrap()
+ });
let mut bc_data = Vec::new();
match fs::File::open(&bc_filename).and_then(|mut f| {
}
};
- ab.add_file(&bc_deflated_filename).unwrap();
- remove(sess, &bc_deflated_filename);
+ ab.add_file(&bc_deflated_filename);
// See the bottom of back::write::run_passes for an explanation
// of when we do and don't keep .0.bc files around.
// After adding all files to the archive, we need to update the
// symbol table of the archive. This currently dies on OSX (see
// #11162), and isn't necessary there anyway
- if !sess.target.target.options.is_like_osx {
+ if !sess.target.target.options.is_like_osx || ab.using_llvm() {
ab.update_symbols();
}
}
// There's no need to include metadata in a static archive, so ensure to not
// link in the metadata object file (and also don't prepare the archive with a
// metadata file).
-fn link_staticlib(sess: &Session, obj_filename: &Path, out_filename: &Path) {
- let ab = link_rlib(sess, None, obj_filename, out_filename);
- let mut ab = match sess.target.target.options.is_like_osx {
- true => ab.build().extend(),
- false => ab,
- };
+fn link_staticlib(sess: &Session, objects: &[PathBuf], out_filename: &Path,
+ tempdir: &Path) {
+ let mut ab = link_rlib(sess, None, objects, out_filename, tempdir);
+ if sess.target.target.options.is_like_osx && !ab.using_llvm() {
+ ab.build();
+ }
if sess.target.target.options.morestack {
ab.add_native_library("morestack").unwrap();
}
}
ab.update_symbols();
- let _ = ab.build();
+ ab.build();
if !all_native_libs.is_empty() {
sess.note("link against the following native artifacts when linking against \
// This will invoke the system linker/cc to create the resulting file. This
// links to all upstream files as well.
fn link_natively(sess: &Session, trans: &CrateTranslation, dylib: bool,
- obj_filename: &Path, out_filename: &Path) {
- info!("preparing dylib? ({}) from {:?} to {:?}", dylib, obj_filename,
+ objects: &[PathBuf], out_filename: &Path,
+ outputs: &OutputFilenames,
+ tmpdir: &Path) {
+ info!("preparing dylib? ({}) from {:?} to {:?}", dylib, objects,
out_filename);
- let tmpdir = TempDir::new("rustc").ok().expect("needs a temp dir");
// The invocations of cc share some flags across platforms
- let pname = get_cc_prog(sess);
- let mut cmd = Command::new(&pname);
+ let (pname, mut cmd) = get_linker(sess);
cmd.env("PATH", command_path(sess));
let root = sess.target_filesearch(PathKind::Native).get_lib_path();
} else {
Box::new(GnuLinker { cmd: &mut cmd, sess: &sess }) as Box<Linker>
};
- link_args(&mut *linker, sess, dylib, tmpdir.path(),
- trans, obj_filename, out_filename);
+ link_args(&mut *linker, sess, dylib, tmpdir,
+ trans, objects, out_filename, outputs);
if !sess.target.target.options.no_compiler_rt {
linker.link_staticlib("compiler-rt");
}
dylib: bool,
tmpdir: &Path,
trans: &CrateTranslation,
- obj_filename: &Path,
- out_filename: &Path) {
+ objects: &[PathBuf],
+ out_filename: &Path,
+ outputs: &OutputFilenames) {
// The default library location, we need this to find the runtime.
// The location of crates will be determined as needed.
let t = &sess.target.target;
cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path));
- cmd.add_object(obj_filename);
+ for obj in objects {
+ cmd.add_object(obj);
+ }
cmd.output_filename(out_filename);
// Stack growth requires statically linking a __morestack function. Note
// executable. This metadata is in a separate object file from the main
// object file, so we link that in here.
if dylib {
- cmd.add_object(&obj_filename.with_extension("metadata.o"));
+ cmd.add_object(&outputs.with_extension("metadata.o"));
}
// Try to strip as much out of the generated object by removing unused
// Pass optimization flags down to the linker.
cmd.optimize();
+ // Pass debuginfo flags down to the linker.
+ cmd.debuginfo();
+
// We want to prevent the compiler from accidentally leaking in any system
// libraries, so we explicitly ask gcc to not link to any libraries by
// default. Note that this does not happen for windows because windows pulls
add_dynamic_crate(cmd, sess, &src.dylib.unwrap().0)
}
cstore::RequireStatic => {
- add_static_crate(cmd, sess, tmpdir, &src.rlib.unwrap().0)
+ add_static_crate(cmd, sess, tmpdir, dylib, &src.rlib.unwrap().0)
}
}
}
// Adds the static "rlib" versions of all crates to the command line.
+ // There's a bit of magic which happens here specifically related to LTO and
+ // dynamic libraries. Specifically:
+ //
+ // * For LTO, we remove upstream object files.
+ // * For dylibs we remove metadata and bytecode from upstream rlibs
+ //
+ // When performing LTO, all of the bytecode from the upstream libraries has
+ // already been included in our object file output. As a result we need to
+ // remove the object files in the upstream libraries so the linker doesn't
+ // try to include them twice (or whine about duplicate symbols). We must
+ // continue to include the rest of the rlib, however, as it may contain
+ // static native libraries which must be linked in.
+ //
+ // When making a dynamic library, linkers by default don't include any
+ // object files in an archive if they're not necessary to resolve the link.
+ // We basically want to convert the archive (rlib) to a dylib, though, so we
+ // *do* want everything included in the output, regardless of whether the
+ // linker thinks it's needed or not. As a result we must use the
+ // --whole-archive option (or the platform equivalent). When using this
+ // option the linker will fail if there are non-objects in the archive (such
+ // as our own metadata and/or bytecode). All in all, for rlibs to be
+ // entirely included in dylibs, we need to remove all non-object files.
+ //
+ // Note, however, that if we're not doing LTO or we're not producing a dylib
+ // (aka we're making an executable), we can just pass the rlib blindly to
+ // the linker (fast) because it's fine if it's not actually included as
+ // we're at the end of the dependency chain.
fn add_static_crate(cmd: &mut Linker, sess: &Session, tmpdir: &Path,
- cratepath: &Path) {
- // When performing LTO on an executable output, all of the
- // bytecode from the upstream libraries has already been
- // included in our object file output. We need to modify all of
- // the upstream archives to remove their corresponding object
- // file to make sure we don't pull the same code in twice.
- //
- // We must continue to link to the upstream archives to be sure
- // to pull in native static dependencies. As the final caveat,
- // on Linux it is apparently illegal to link to a blank archive,
- // so if an archive no longer has any object files in it after
- // we remove `lib.o`, then don't link against it at all.
- //
- // If we're not doing LTO, then our job is simply to just link
- // against the archive.
- if sess.lto() {
- let name = cratepath.file_name().unwrap().to_str().unwrap();
- let name = &name[3..name.len() - 5]; // chop off lib/.rlib
- time(sess.time_passes(),
- &format!("altering {}.rlib", name),
- (), |()| {
- let dst = tmpdir.join(cratepath.file_name().unwrap());
- match fs::copy(&cratepath, &dst) {
- Ok(..) => {}
- Err(e) => {
- sess.fatal(&format!("failed to copy {} to {}: {}",
- cratepath.display(),
- dst.display(), e));
- }
+ dylib: bool, cratepath: &Path) {
+ if !sess.lto() && !dylib {
+ cmd.link_rlib(&fix_windows_verbatim_for_gcc(cratepath));
+ return
+ }
+
+ let dst = tmpdir.join(cratepath.file_name().unwrap());
+ let name = cratepath.file_name().unwrap().to_str().unwrap();
+ let name = &name[3..name.len() - 5]; // chop off lib/.rlib
+
+ time(sess.time_passes(), &format!("altering {}.rlib", name), (), |()| {
+ let cfg = archive_config(sess, &dst, Some(cratepath));
+ let mut archive = ArchiveBuilder::new(cfg);
+ archive.remove_file(METADATA_FILENAME);
+ archive.update_symbols();
+
+ let mut any_objects = false;
+ for f in archive.src_files() {
+ if f.ends_with("bytecode.deflate") {
+ archive.remove_file(&f);
+ continue
}
- // Fix up permissions of the copy, as fs::copy() preserves
- // permissions, but the original file may have been installed
- // by a package manager and may be read-only.
- match fs::metadata(&dst).and_then(|m| {
- let mut perms = m.permissions();
- perms.set_readonly(false);
- fs::set_permissions(&dst, perms)
- }) {
- Ok(..) => {}
- Err(e) => {
- sess.fatal(&format!("failed to chmod {} when preparing \
- for LTO: {}", dst.display(), e));
+ let canonical = f.replace("-", "_");
+ let canonical_name = name.replace("-", "_");
+ if sess.lto() && canonical.starts_with(&canonical_name) &&
+ canonical.ends_with(".o") {
+ let num = &f[name.len()..f.len() - 2];
+ if num.len() > 0 && num[1..].parse::<u32>().is_ok() {
+ archive.remove_file(&f);
+ continue
}
}
- let handler = &sess.diagnostic().handler;
- let config = ArchiveConfig {
- handler: handler,
- dst: dst.clone(),
- lib_search_paths: archive_search_paths(sess),
- slib_prefix: sess.target.target.options.staticlib_prefix.clone(),
- slib_suffix: sess.target.target.options.staticlib_suffix.clone(),
- ar_prog: get_ar_prog(sess),
- command_path: command_path(sess),
- };
- let mut archive = Archive::open(config);
- archive.remove_file(&format!("{}.o", name));
- let files = archive.files();
- if files.iter().any(|s| s.ends_with(".o")) {
- cmd.link_rlib(&dst);
- }
- });
- } else {
- cmd.link_rlib(&fix_windows_verbatim_for_gcc(cratepath));
- }
+ any_objects = true;
+ }
+
+ if any_objects {
+ archive.build();
+ cmd.link_whole_rlib(&fix_windows_verbatim_for_gcc(&dst));
+ }
+ });
}
// Same thing as above, but for dynamic crates instead of static crates.
// Just need to tell the linker about where the library lives and
// what its name is
- if let Some(dir) = cratepath.parent() {
+ let parent = cratepath.parent();
+ if let Some(dir) = parent {
cmd.include_path(&fix_windows_verbatim_for_gcc(dir));
}
let filestem = cratepath.file_stem().unwrap().to_str().unwrap();
- cmd.link_dylib(&unlib(&sess.target, filestem));
+ cmd.link_rust_dylib(&unlib(&sess.target, filestem),
+ parent.unwrap_or(Path::new("")));
}
}
use std::ffi::OsString;
use std::path::{Path, PathBuf};
use std::process::Command;
+use std::fs;
-use rustc_back::archive;
+use back::archive;
use session::Session;
use session::config;
+use session::config::DebugInfoLevel::{NoDebugInfo, LimitedDebugInfo, FullDebugInfo};
/// Linker abstraction used by back::link to build up the command to invoke a
/// linker.
/// MSVC linker (e.g. `link.exe`) is being used.
pub trait Linker {
fn link_dylib(&mut self, lib: &str);
+ fn link_rust_dylib(&mut self, lib: &str, path: &Path);
fn link_framework(&mut self, framework: &str);
fn link_staticlib(&mut self, lib: &str);
fn link_rlib(&mut self, lib: &Path);
+ fn link_whole_rlib(&mut self, lib: &Path);
fn link_whole_staticlib(&mut self, lib: &str, search_path: &[PathBuf]);
fn include_path(&mut self, path: &Path);
fn framework_path(&mut self, path: &Path);
fn gc_sections(&mut self, is_dylib: bool);
fn position_independent_executable(&mut self);
fn optimize(&mut self);
+ fn debuginfo(&mut self);
fn no_default_libraries(&mut self);
fn build_dylib(&mut self, out_filename: &Path);
fn args(&mut self, args: &[String]);
fn position_independent_executable(&mut self) { self.cmd.arg("-pie"); }
fn args(&mut self, args: &[String]) { self.cmd.args(args); }
+ fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
fn link_framework(&mut self, framework: &str) {
self.cmd.arg("-framework").arg(framework);
}
// -force_load is the OSX equivalent of --whole-archive, but it
// involves passing the full path to the library to link.
let mut v = OsString::from("-Wl,-force_load,");
- v.push(&archive::find_library(lib,
- &target.options.staticlib_prefix,
- &target.options.staticlib_suffix,
- search_path,
- &self.sess.diagnostic().handler));
+ v.push(&archive::find_library(lib, search_path, &self.sess));
+ self.cmd.arg(&v);
+ }
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ if self.sess.target.target.options.is_like_osx {
+ let mut v = OsString::from("-Wl,-force_load,");
+ v.push(lib);
self.cmd.arg(&v);
+ } else {
+ self.cmd.arg("-Wl,--whole-archive").arg(lib)
+ .arg("-Wl,--no-whole-archive");
}
}
}
}
+ fn debuginfo(&mut self) {
+ // Don't do anything special here for GNU-style linkers.
+ }
+
fn no_default_libraries(&mut self) {
// Unfortunately right now passing -nodefaultlibs to gcc on windows
// doesn't work so hot (in terms of native dependencies). This if
fn link_dylib(&mut self, lib: &str) {
self.cmd.arg(&format!("{}.lib", lib));
}
+
+ fn link_rust_dylib(&mut self, lib: &str, path: &Path) {
+ // When producing a dll, the MSVC linker may not actually emit a
+ // `foo.lib` file if the dll doesn't actually export any symbols, so we
+ // check to see if the file is there and just omit linking to it if it's
+ // not present.
+ let name = format!("{}.lib", lib);
+ if fs::metadata(&path.join(&name)).is_ok() {
+ self.cmd.arg(name);
+ }
+ }
+
fn link_staticlib(&mut self, lib: &str) {
self.cmd.arg(&format!("{}.lib", lib));
}
// not supported?
self.link_staticlib(lib);
}
+ fn link_whole_rlib(&mut self, path: &Path) {
+ // not supported?
+ self.link_rlib(path);
+ }
fn optimize(&mut self) {
// Needs more investigation of `/OPT` arguments
}
+
+ fn debuginfo(&mut self) {
+ match self.sess.opts.debuginfo {
+ NoDebugInfo => {
+ // Do nothing if debuginfo is disabled
+ },
+ LimitedDebugInfo |
+ FullDebugInfo => {
+ // This will cause the Microsoft linker to generate a PDB file
+ // from the CodeView line tables in the object files.
+ self.cmd.arg("/DEBUG");
+ }
+ }
+ }
+
fn whole_archives(&mut self) {
// hints not supported?
}
use llvm::{ModuleRef, TargetMachineRef, True, False};
use rustc::metadata::cstore;
use rustc::util::common::time;
+use back::write::{ModuleConfig, with_llvm_pmb};
use libc;
use flate;
use std::ffi::CString;
pub fn run(sess: &session::Session, llmod: ModuleRef,
- tm: TargetMachineRef, reachable: &[String]) {
+ tm: TargetMachineRef, reachable: &[String],
+ config: &ModuleConfig) {
if sess.opts.cg.prefer_dynamic {
sess.err("cannot prefer dynamic linking when performing LTO");
sess.note("only 'staticlib' and 'bin' outputs are supported with LTO");
};
let archive = ArchiveRO::open(&path).expect("wanted an rlib");
- let file = path.file_name().unwrap().to_str().unwrap();
- let file = &file[3..file.len() - 5]; // chop off lib/.rlib
- debug!("reading {}", file);
- for i in 0.. {
- let filename = format!("{}.{}.bytecode.deflate", file, i);
- let msg = format!("check for {}", filename);
- let bc_encoded = time(sess.time_passes(), &msg, (), |_| {
- archive.iter().find(|section| {
- section.name() == Some(&filename[..])
- })
- });
- let bc_encoded = match bc_encoded {
- Some(data) => data,
- None => {
- if i == 0 {
- // No bitcode was found at all.
- sess.fatal(&format!("missing compressed bytecode in {}",
- path.display()));
- }
- // No more bitcode files to read.
- break
- }
- };
- let bc_encoded = bc_encoded.data();
+ let bytecodes = archive.iter().filter_map(|child| {
+ child.name().map(|name| (name, child))
+ }).filter(|&(name, _)| name.ends_with("bytecode.deflate"));
+ for (name, data) in bytecodes {
+ let bc_encoded = data.data();
let bc_decoded = if is_versioned_bytecode_format(bc_encoded) {
- time(sess.time_passes(), &format!("decode {}.{}.bc", file, i), (), |_| {
+ time(sess.time_passes(), &format!("decode {}", name), (), |_| {
// Read the version
let version = extract_bytecode_format_version(bc_encoded);
}
})
} else {
- time(sess.time_passes(), &format!("decode {}.{}.bc", file, i), (), |_| {
+ time(sess.time_passes(), &format!("decode {}", name), (), |_| {
// the object must be in the old, pre-versioning format, so simply
// inflate everything and let LLVM decide if it can make sense of it
match flate::inflate_bytes(bc_encoded) {
};
let ptr = bc_decoded.as_ptr();
- debug!("linking {}, part {}", name, i);
- time(sess.time_passes(),
- &format!("ll link {}.{}", name, i),
- (),
+ debug!("linking {}", name);
+ time(sess.time_passes(), &format!("ll link {}", name), (),
|()| unsafe {
if !llvm::LLVMRustLinkInExternalBitcode(llmod,
ptr as *const libc::c_char,
llvm::LLVMRustAddAnalysisPasses(tm, pm, llmod);
llvm::LLVMRustAddPass(pm, "verify\0".as_ptr() as *const _);
- let opt = match sess.opts.optimize {
- config::No => 0,
- config::Less => 1,
- config::Default => 2,
- config::Aggressive => 3,
- };
-
- let builder = llvm::LLVMPassManagerBuilderCreate();
- llvm::LLVMPassManagerBuilderSetOptLevel(builder, opt);
- llvm::LLVMPassManagerBuilderPopulateLTOPassManager(builder, pm,
- /* Internalize = */ False,
- /* RunInliner = */ True);
- llvm::LLVMPassManagerBuilderDispose(builder);
+ with_llvm_pmb(llmod, config, &mut |b| {
+ llvm::LLVMPassManagerBuilderPopulateLTOPassManager(b, pm,
+ /* Internalize = */ False,
+ /* RunInliner = */ True);
+ });
llvm::LLVMRustAddPass(pm, "verify\0".as_ptr() as *const _);
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! MSVC-specific logic for linkers and such.
+//!
+//! This module contains a cross-platform interface but has a blank unix
+//! implementation. The Windows implementation builds on top of Windows native
+//! libraries (reading registry keys), so it otherwise wouldn't link on unix.
+//!
+//! Note that we don't have much special logic for finding the system linker on
+//! any other platforms, so it may seem a little odd to single out MSVC to have
+//! a good deal of code just to find the linker. Unlike Unix systems, however,
+//! the MSVC linker is not in the system PATH by default. It also additionally
+//! needs a few environment variables or command line flags to be able to link
+//! against system libraries.
+//!
+//! In order to have a nice smooth experience on Windows, the logic in this file
+//! is here to find the MSVC linker and set it up in the default configuration
+//! one would need to set up anyway. This means that the Rust compiler can be
+//! run not only in the developer shells of MSVC but also the standard cmd.exe
+//! shell or MSYS shells.
+//!
+//! As a high-level note, all logic in this module for looking up various
+//! paths/files is copied over from Clang in its MSVCToolChain.cpp file, but
+//! comments can also be found below leading through the various code paths.
+
+use std::process::Command;
+use session::Session;
+
+#[cfg(windows)]
+mod registry;
+
+#[cfg(windows)]
+pub fn link_exe_cmd(sess: &Session) -> Command {
+ use std::env;
+ use std::ffi::OsString;
+ use std::fs;
+ use std::io;
+ use std::path::{Path, PathBuf};
+ use self::registry::{RegistryKey, LOCAL_MACHINE};
+
+ // When finding the link.exe binary the 32-bit version is at the top level
+ // but the versions to cross to other architectures are stored in
+ // sub-folders. Unknown architectures also just bail out early to return the
+ // standard `link.exe` command.
+ let extra = match &sess.target.target.arch[..] {
+ "x86" => "",
+ "x86_64" => "amd64",
+ "arm" => "arm",
+ _ => return Command::new("link.exe"),
+ };
+
+ let vs_install_dir = get_vs_install_dir();
+
+ // First up, we need to find the `link.exe` binary itself, and there's a few
+ // locations that we can look. First up is the standard VCINSTALLDIR
+ // environment variable which is normally set by the vcvarsall.bat file. If
+ // an environment is set up manually by whomever's driving the compiler then
+ // we shouldn't muck with that decision and should instead respect that.
+ //
+ // Next up is looking in PATH itself. Here we look for `cl.exe` and then
+ // assume that `link.exe` is next to it if we find it. Note that we look for
+ // `cl.exe` because MinGW ships /usr/bin/link.exe which is normally found in
+ // PATH but we're not interested in finding that.
+ //
+ // Finally we read the Windows registry to discover the VS install root.
+ // From here we probe for `link.exe` just to make sure that it exists.
+ let mut cmd = env::var_os("VCINSTALLDIR").and_then(|dir| {
+ let mut p = PathBuf::from(dir);
+ p.push("bin");
+ p.push(extra);
+ p.push("link.exe");
+ if fs::metadata(&p).is_ok() {Some(p)} else {None}
+ }).or_else(|| {
+ env::var_os("PATH").and_then(|path| {
+ env::split_paths(&path).find(|path| {
+ fs::metadata(&path.join("cl.exe")).is_ok()
+ }).map(|p| {
+ p.join("link.exe")
+ })
+ })
+ }).or_else(|| {
+ vs_install_dir.as_ref().and_then(|p| {
+ let mut p = p.join("VC/bin");
+ p.push(extra);
+ p.push("link.exe");
+ if fs::metadata(&p).is_ok() {Some(p)} else {None}
+ })
+ }).map(|linker| {
+ Command::new(linker)
+ }).unwrap_or_else(|| {
+ Command::new("link.exe")
+ });
+
+ // The MSVC linker uses the LIB environment variable as the default lookup
+ // path for libraries. This environment variable is normally set up by the
+ // VS shells, so we only want to start adding our own pieces if it's not
+ // set.
+ //
+ // If we're adding our own pieces, then we need to add a few primary
+ // directories to the default search path for the linker. The first is in
+ // the VS install direcotry, the next is the Windows SDK directory, and the
+ // last is the possible UCRT installation directory.
+ //
+ // The UCRT is a recent addition to Visual Studio installs (2015 at the time
+ // of this writing), and it's in the normal windows SDK folder, but there
+ // apparently aren't registry keys pointing to it. As a result we detect the
+ // installation and then add it manually. This logic will probably need to
+ // be tweaked over time...
+ if env::var_os("LIB").is_none() {
+ if let Some(mut vs_install_dir) = vs_install_dir {
+ vs_install_dir.push("VC/lib");
+ vs_install_dir.push(extra);
+ let mut arg = OsString::from("/LIBPATH:");
+ arg.push(&vs_install_dir);
+ cmd.arg(arg);
+
+ if let Some((ucrt_root, vers)) = ucrt_install_dir(&vs_install_dir) {
+ if let Some(arch) = windows_sdk_v8_subdir(sess) {
+ let mut arg = OsString::from("/LIBPATH:");
+ arg.push(ucrt_root.join("Lib").join(vers)
+ .join("ucrt").join(arch));
+ cmd.arg(arg);
+ }
+ }
+ }
+ if let Some(path) = get_windows_sdk_lib_path(sess) {
+ let mut arg = OsString::from("/LIBPATH:");
+ arg.push(&path);
+ cmd.arg(arg);
+ }
+ }
+
+ return cmd;
+
+ // When looking for the Visual Studio installation directory we look in a
+ // number of locations in varying degrees of precedence:
+ //
+ // 1. The Visual Studio registry keys
+ // 2. The Visual Studio Express registry keys
+ // 3. A number of somewhat standard environment variables
+ //
+ // If we find a hit from any of these keys then we strip off the IDE/Tools
+ // folders which are typically found at the end.
+ //
+ // As a final note, when we take a look at the registry keys they're
+ // typically found underneath the version of what's installed, but we don't
+ // quite know what's installed. As a result we probe all sub-keys of the two
+ // keys we're looking at to find out the maximum version of what's installed
+ // and we use that root directory.
+ fn get_vs_install_dir() -> Option<PathBuf> {
+ LOCAL_MACHINE.open(r"SOFTWARE\Microsoft\VisualStudio".as_ref()).or_else(|_| {
+ LOCAL_MACHINE.open(r"SOFTWARE\Microsoft\VCExpress".as_ref())
+ }).ok().and_then(|key| {
+ max_version(&key).and_then(|(_vers, key)| {
+ key.query_str("InstallDir").ok()
+ })
+ }).or_else(|| {
+ env::var_os("VS120COMNTOOLS")
+ }).or_else(|| {
+ env::var_os("VS100COMNTOOLS")
+ }).or_else(|| {
+ env::var_os("VS90COMNTOOLS")
+ }).or_else(|| {
+ env::var_os("VS80COMNTOOLS")
+ }).map(PathBuf::from).and_then(|mut dir| {
+ if dir.ends_with("Common7/IDE") || dir.ends_with("Common7/Tools") {
+ dir.pop();
+ dir.pop();
+ Some(dir)
+ } else {
+ None
+ }
+ })
+ }
+
+ // Given a registry key, look at all the sub keys and find the one which has
+ // the maximal numeric value.
+ //
+ // Returns the name of the maximal key as well as the opened maximal key.
+ fn max_version(key: &RegistryKey) -> Option<(OsString, RegistryKey)> {
+ let mut max_vers = 0;
+ let mut max_key = None;
+ for subkey in key.iter().filter_map(|k| k.ok()) {
+ let val = subkey.to_str().and_then(|s| {
+ s.trim_left_matches("v").replace(".", "").parse().ok()
+ });
+ let val = match val {
+ Some(s) => s,
+ None => continue,
+ };
+ if val > max_vers {
+ if let Ok(k) = key.open(&subkey) {
+ max_vers = val;
+ max_key = Some((subkey, k));
+ }
+ }
+ }
+ return max_key
+ }
+
+ fn get_windows_sdk_path() -> Option<(PathBuf, usize, Option<OsString>)> {
+ let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows";
+ let key = LOCAL_MACHINE.open(key.as_ref());
+ let (n, k) = match key.ok().as_ref().and_then(max_version) {
+ Some(p) => p,
+ None => return None,
+ };
+ let mut parts = n.to_str().unwrap().trim_left_matches("v").splitn(2, ".");
+ let major = parts.next().unwrap().parse::<usize>().unwrap();
+ let _minor = parts.next().unwrap().parse::<usize>().unwrap();
+ k.query_str("InstallationFolder").ok().map(|folder| {
+ let ver = k.query_str("ProductVersion");
+ (PathBuf::from(folder), major, ver.ok())
+ })
+ }
+
+ fn get_windows_sdk_lib_path(sess: &Session) -> Option<PathBuf> {
+ let (mut path, major, ver) = match get_windows_sdk_path() {
+ Some(p) => p,
+ None => return None,
+ };
+ path.push("Lib");
+ if major <= 7 {
+ // In Windows SDK 7.x, x86 libraries are directly in the Lib folder,
+ // x64 libraries are inside, and it's not necessary to link against
+ // the SDK 7.x when targeting ARM or other architectures.
+ let x86 = match &sess.target.target.arch[..] {
+ "x86" => true,
+ "x86_64" => false,
+ _ => return None,
+ };
+ Some(if x86 {path} else {path.join("x64")})
+ } else if major <= 8 {
+ // Windows SDK 8.x installs libraries in a folder whose names
+ // depend on the version of the OS you're targeting. By default
+ // choose the newest, which usually corresponds to the version of
+ // the OS you've installed the SDK on.
+ let extra = match windows_sdk_v8_subdir(sess) {
+ Some(e) => e,
+ None => return None,
+ };
+ ["winv6.3", "win8", "win7"].iter().map(|p| path.join(p)).find(|part| {
+ fs::metadata(part).is_ok()
+ }).map(|path| {
+ path.join("um").join(extra)
+ })
+ } else if let Some(mut ver) = ver {
+ // Windows SDK 10 splits the libraries into architectures the same
+ // as Windows SDK 8.x, except for the addition of arm64.
+ // Additionally, the SDK 10 is split by Windows 10 build numbers
+ // rather than the OS version like the SDK 8.x does.
+ let extra = match windows_sdk_v10_subdir(sess) {
+ Some(e) => e,
+ None => return None,
+ };
+ // To get the correct directory we need to get the Windows SDK 10
+ // version, and so far it looks like the "ProductVersion" of the SDK
+ // corresponds to the folder name that the libraries are located in
+ // except that the folder contains an extra ".0". For now just
+ // append a ".0" to look for find the directory we're in. This logic
+ // will likely want to be refactored one day.
+ ver.push(".0");
+ let p = path.join(ver).join("um").join(extra);
+ fs::metadata(&p).ok().map(|_| p)
+ } else { None }
+ }
+
+ fn windows_sdk_v8_subdir(sess: &Session) -> Option<&'static str> {
+ match &sess.target.target.arch[..] {
+ "x86" => Some("x86"),
+ "x86_64" => Some("x64"),
+ "arm" => Some("arm"),
+ _ => return None,
+ }
+ }
+
+ fn windows_sdk_v10_subdir(sess: &Session) -> Option<&'static str> {
+ match &sess.target.target.arch[..] {
+ "x86" => Some("x86"),
+ "x86_64" => Some("x64"),
+ "arm" => Some("arm"),
+ "aarch64" => Some("arm64"), // FIXME - Check if aarch64 is correct
+ _ => return None,
+ }
+ }
+
+ fn ucrt_install_dir(vs_install_dir: &Path) -> Option<(PathBuf, String)> {
+ let is_vs_14 = vs_install_dir.iter().filter_map(|p| p.to_str()).any(|s| {
+ s == "Microsoft Visual Studio 14.0"
+ });
+ if !is_vs_14 {
+ return None
+ }
+ let key = r"SOFTWARE\Microsoft\Windows Kits\Installed Roots";
+ let sdk_dir = LOCAL_MACHINE.open(key.as_ref()).and_then(|p| {
+ p.query_str("KitsRoot10")
+ }).map(PathBuf::from);
+ let sdk_dir = match sdk_dir {
+ Ok(p) => p,
+ Err(..) => return None,
+ };
+ (move || -> io::Result<_> {
+ let mut max = None;
+ let mut max_s = None;
+ for entry in try!(fs::read_dir(&sdk_dir.join("Lib"))) {
+ let entry = try!(entry);
+ if let Ok(s) = entry.file_name().into_string() {
+ if let Ok(u) = s.replace(".", "").parse::<usize>() {
+ if Some(u) > max {
+ max = Some(u);
+ max_s = Some(s);
+ }
+ }
+ }
+ }
+ Ok(max_s.map(|m| (sdk_dir, m)))
+ })().ok().and_then(|x| x)
+ }
+}
+
+#[cfg(not(windows))]
+pub fn link_exe_cmd(_sess: &Session) -> Command {
+ Command::new("link.exe")
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::io;
+use std::ffi::{OsString, OsStr};
+use std::os::windows::prelude::*;
+use std::ops::RangeFrom;
+use libc::{DWORD, LPCWSTR, LONG, LPDWORD, LPBYTE, ERROR_SUCCESS};
+use libc::c_void;
+
+const HKEY_LOCAL_MACHINE: HKEY = 0x80000002 as HKEY;
+const KEY_WOW64_32KEY: REGSAM = 0x0200;
+const KEY_READ: REGSAM = (STANDARD_RIGTS_READ | KEY_QUERY_VALUE |
+ KEY_ENUMERATE_SUB_KEYS | KEY_NOTIFY) & !SYNCHRONIZE;
+const STANDARD_RIGTS_READ: REGSAM = READ_CONTROL;
+const READ_CONTROL: REGSAM = 0x00020000;
+const KEY_QUERY_VALUE: REGSAM = 0x0001;
+const KEY_ENUMERATE_SUB_KEYS: REGSAM = 0x0008;
+const KEY_NOTIFY: REGSAM = 0x0010;
+const SYNCHRONIZE: REGSAM = 0x00100000;
+const REG_SZ: DWORD = 1;
+const ERROR_NO_MORE_ITEMS: DWORD = 259;
+
+enum __HKEY__ {}
+pub type HKEY = *mut __HKEY__;
+pub type PHKEY = *mut HKEY;
+pub type REGSAM = DWORD;
+pub type LPWSTR = *mut u16;
+pub type PFILETIME = *mut c_void;
+
+#[link(name = "advapi32")]
+extern "system" {
+ fn RegOpenKeyExW(hKey: HKEY,
+ lpSubKey: LPCWSTR,
+ ulOptions: DWORD,
+ samDesired: REGSAM,
+ phkResult: PHKEY) -> LONG;
+ fn RegQueryValueExW(hKey: HKEY,
+ lpValueName: LPCWSTR,
+ lpReserved: LPDWORD,
+ lpType: LPDWORD,
+ lpData: LPBYTE,
+ lpcbData: LPDWORD) -> LONG;
+ fn RegEnumKeyExW(hKey: HKEY,
+ dwIndex: DWORD,
+ lpName: LPWSTR,
+ lpcName: LPDWORD,
+ lpReserved: LPDWORD,
+ lpClass: LPWSTR,
+ lpcClass: LPDWORD,
+ lpftLastWriteTime: PFILETIME) -> LONG;
+ fn RegCloseKey(hKey: HKEY) -> LONG;
+}
+
+pub struct RegistryKey(Repr);
+
+struct OwnedKey(HKEY);
+
+enum Repr {
+ Const(HKEY),
+ Owned(OwnedKey),
+}
+
+pub struct Iter<'a> {
+ idx: RangeFrom<DWORD>,
+ key: &'a RegistryKey,
+}
+
+unsafe impl Sync for RegistryKey {}
+unsafe impl Send for RegistryKey {}
+
+pub static LOCAL_MACHINE: RegistryKey = RegistryKey(Repr::Const(HKEY_LOCAL_MACHINE));
+
+impl RegistryKey {
+ fn raw(&self) -> HKEY {
+ match self.0 {
+ Repr::Const(val) => val,
+ Repr::Owned(ref val) => val.0,
+ }
+ }
+
+ pub fn open(&self, key: &OsStr) -> io::Result<RegistryKey> {
+ let key = key.encode_wide().chain(Some(0)).collect::<Vec<_>>();
+ let mut ret = 0 as *mut _;
+ let err = unsafe {
+ RegOpenKeyExW(self.raw(), key.as_ptr(), 0,
+ KEY_READ | KEY_WOW64_32KEY, &mut ret)
+ };
+ if err == ERROR_SUCCESS {
+ Ok(RegistryKey(Repr::Owned(OwnedKey(ret))))
+ } else {
+ Err(io::Error::from_raw_os_error(err as i32))
+ }
+ }
+
+ pub fn iter(&self) -> Iter {
+ Iter { idx: 0.., key: self }
+ }
+
+ pub fn query_str(&self, name: &str) -> io::Result<OsString> {
+ let name: &OsStr = name.as_ref();
+ let name = name.encode_wide().chain(Some(0)).collect::<Vec<_>>();
+ let mut len = 0;
+ let mut kind = 0;
+ unsafe {
+ let err = RegQueryValueExW(self.raw(), name.as_ptr(), 0 as *mut _,
+ &mut kind, 0 as *mut _, &mut len);
+ if err != ERROR_SUCCESS {
+ return Err(io::Error::from_raw_os_error(err as i32))
+ }
+ if kind != REG_SZ {
+ return Err(io::Error::new(io::ErrorKind::Other,
+ "registry key wasn't a string"))
+ }
+
+ // The length here is the length in bytes, but we're using wide
+ // characters so we need to be sure to halve it for the capacity
+ // passed in.
+ let mut v = Vec::with_capacity(len as usize / 2);
+ let err = RegQueryValueExW(self.raw(), name.as_ptr(), 0 as *mut _,
+ 0 as *mut _, v.as_mut_ptr() as *mut _,
+ &mut len);
+ if err != ERROR_SUCCESS {
+ return Err(io::Error::from_raw_os_error(err as i32))
+ }
+ v.set_len(len as usize / 2);
+
+ // Some registry keys may have a terminating nul character, but
+ // we're not interested in that, so chop it off if it's there.
+ if v[v.len() - 1] == 0 {
+ v.pop();
+ }
+ Ok(OsString::from_wide(&v))
+ }
+ }
+}
+
+impl Drop for OwnedKey {
+ fn drop(&mut self) {
+ unsafe { RegCloseKey(self.0); }
+ }
+}
+
+impl<'a> Iterator for Iter<'a> {
+ type Item = io::Result<OsString>;
+
+ fn next(&mut self) -> Option<io::Result<OsString>> {
+ self.idx.next().and_then(|i| unsafe {
+ let mut v = Vec::with_capacity(256);
+ let mut len = v.capacity() as DWORD;
+ let ret = RegEnumKeyExW(self.key.raw(), i, v.as_mut_ptr(), &mut len,
+ 0 as *mut _, 0 as *mut _, 0 as *mut _,
+ 0 as *mut _);
+ if ret == ERROR_NO_MORE_ITEMS as LONG {
+ None
+ } else if ret != ERROR_SUCCESS {
+ Some(Err(io::Error::from_raw_os_error(ret as i32)))
+ } else {
+ v.set_len(len as usize);
+ Some(Ok(OsString::from_wide(&v)))
+ }
+ })
+ }
+}
// except according to those terms.
use back::lto;
-use back::link::{get_cc_prog, remove};
+use back::link::{get_linker, remove};
use session::config::{OutputFilenames, Passes, SomePasses, AllPasses};
use session::Session;
use session::config;
use std::fs;
use std::mem;
use std::path::Path;
-use std::process::{Command, Stdio};
use std::ptr;
use std::str;
use std::sync::{Arc, Mutex};
}
}
-fn create_target_machine(sess: &Session) -> TargetMachineRef {
+pub fn create_target_machine(sess: &Session) -> TargetMachineRef {
let reloc_model_arg = match sess.opts.cg.relocation_model {
Some(ref s) => &s[..],
None => &sess.target.target.options.relocation_model[..],
/// Module-specific configuration for `optimize_and_codegen`.
#[derive(Clone)]
-struct ModuleConfig {
+pub struct ModuleConfig {
/// LLVM TargetMachine to use for codegen.
tm: TargetMachineRef,
/// Names of additional optimization passes to run.
llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
}
- match config.opt_level {
- Some(opt_level) => {
- // Create the two optimizing pass managers. These mirror what clang
- // does, and are by populated by LLVM's default PassManagerBuilder.
- // Each manager has a different set of passes, but they also share
- // some common passes.
- let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
- let mpm = llvm::LLVMCreatePassManager();
-
- // If we're verifying or linting, add them to the function pass
- // manager.
- let addpass = |pass: &str| {
- let pass = CString::new(pass).unwrap();
- llvm::LLVMRustAddPass(fpm, pass.as_ptr())
- };
+ if config.opt_level.is_some() {
+ // Create the two optimizing pass managers. These mirror what clang
+ // does, and are by populated by LLVM's default PassManagerBuilder.
+ // Each manager has a different set of passes, but they also share
+ // some common passes.
+ let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
+ let mpm = llvm::LLVMCreatePassManager();
+
+ // If we're verifying or linting, add them to the function pass
+ // manager.
+ let addpass = |pass: &str| {
+ let pass = CString::new(pass).unwrap();
+ llvm::LLVMRustAddPass(fpm, pass.as_ptr())
+ };
- if !config.no_verify { assert!(addpass("verify")); }
- if !config.no_prepopulate_passes {
- llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
- llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
- populate_llvm_passes(fpm, mpm, llmod, opt_level, &config);
- }
+ if !config.no_verify { assert!(addpass("verify")); }
+ if !config.no_prepopulate_passes {
+ llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
+ llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
+ with_llvm_pmb(llmod, &config, &mut |b| {
+ llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
+ llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
+ })
+ }
- for pass in &config.passes {
- if !addpass(pass) {
- cgcx.handler.warn(&format!("unknown pass `{}`, ignoring",
- pass));
- }
+ for pass in &config.passes {
+ if !addpass(pass) {
+ cgcx.handler.warn(&format!("unknown pass `{}`, ignoring",
+ pass));
}
+ }
- for pass in &cgcx.plugin_passes {
- if !addpass(pass) {
- cgcx.handler.err(&format!("a plugin asked for LLVM pass \
- `{}` but LLVM does not \
- recognize it", pass));
- }
+ for pass in &cgcx.plugin_passes {
+ if !addpass(pass) {
+ cgcx.handler.err(&format!("a plugin asked for LLVM pass \
+ `{}` but LLVM does not \
+ recognize it", pass));
}
+ }
- cgcx.handler.abort_if_errors();
+ cgcx.handler.abort_if_errors();
- // Finally, run the actual optimization passes
- time(config.time_passes, "llvm function passes", (), |()|
- llvm::LLVMRustRunFunctionPassManager(fpm, llmod));
- time(config.time_passes, "llvm module passes", (), |()|
- llvm::LLVMRunPassManager(mpm, llmod));
+ // Finally, run the actual optimization passes
+ time(config.time_passes, "llvm function passes", (), |()|
+ llvm::LLVMRustRunFunctionPassManager(fpm, llmod));
+ time(config.time_passes, "llvm module passes", (), |()|
+ llvm::LLVMRunPassManager(mpm, llmod));
- // Deallocate managers that we're now done with
- llvm::LLVMDisposePassManager(fpm);
- llvm::LLVMDisposePassManager(mpm);
+ // Deallocate managers that we're now done with
+ llvm::LLVMDisposePassManager(fpm);
+ llvm::LLVMDisposePassManager(mpm);
- match cgcx.lto_ctxt {
- Some((sess, reachable)) if sess.lto() => {
- time(sess.time_passes(), "all lto passes", (), |()|
- lto::run(sess, llmod, tm, reachable));
+ match cgcx.lto_ctxt {
+ Some((sess, reachable)) if sess.lto() => {
+ time(sess.time_passes(), "all lto passes", (), |()|
+ lto::run(sess, llmod, tm, reachable, &config));
- if config.emit_lto_bc {
- let name = format!("{}.lto.bc", name_extra);
- let out = output_names.with_extension(&name);
- let out = path2cstr(&out);
- llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
- }
- },
- _ => {},
- }
- },
- None => {},
+ if config.emit_lto_bc {
+ let name = format!("{}.lto.bc", name_extra);
+ let out = output_names.with_extension(&name);
+ let out = path2cstr(&out);
+ llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
+ }
+ },
+ _ => {},
+ }
}
// A codegen-specific pass manager is used to generate object
// Sanity check
assert!(trans.modules.len() == sess.opts.cg.codegen_units);
- unsafe {
- configure_llvm(sess);
- }
-
let tm = create_target_machine(sess);
// Figure out what we actually need to build.
let needs_crate_bitcode =
sess.crate_types.borrow().contains(&config::CrateTypeRlib) &&
sess.opts.output_types.contains(&config::OutputTypeExe);
+ let needs_crate_object =
+ sess.opts.output_types.contains(&config::OutputTypeExe);
if needs_crate_bitcode {
modules_config.emit_bc = true;
}
if sess.opts.cg.codegen_units == 1 {
// 1) Only one codegen unit. In this case it's no difficulty
// to copy `foo.0.x` to `foo.x`.
- copy_gracefully(&crate_output.with_extension(ext), &crate_output.path(output_type));
+ copy_gracefully(&crate_output.with_extension(ext),
+ &crate_output.path(output_type));
if !sess.opts.cg.save_temps && !keep_numbered {
// The user just wants `foo.x`, not `foo.0.x`.
remove(sess, &crate_output.with_extension(ext));
}
};
- let link_obj = |output_path: &Path| {
- // Running `ld -r` on a single input is kind of pointless.
- if sess.opts.cg.codegen_units == 1 {
- copy_gracefully(&crate_output.with_extension("0.o"), output_path);
- // Leave the .0.o file around, to mimic the behavior of the normal
- // code path.
- return;
- }
-
- // Some builds of MinGW GCC will pass --force-exe-suffix to ld, which
- // will automatically add a .exe extension if the extension is not
- // already .exe or .dll. To ensure consistent behavior on Windows, we
- // add the .exe suffix explicitly and then rename the output file to
- // the desired path. This will give the correct behavior whether or
- // not GCC adds --force-exe-suffix.
- let windows_output_path =
- if sess.target.target.options.is_like_windows {
- Some(output_path.with_extension("o.exe"))
- } else {
- None
- };
-
- let pname = get_cc_prog(sess);
- let mut cmd = Command::new(&pname[..]);
-
- cmd.args(&sess.target.target.options.pre_link_args);
- cmd.arg("-nostdlib");
-
- for index in 0..trans.modules.len() {
- cmd.arg(&crate_output.with_extension(&format!("{}.o", index)));
- }
-
- cmd.arg("-r").arg("-o")
- .arg(windows_output_path.as_ref().map(|s| &**s).unwrap_or(output_path));
-
- cmd.args(&sess.target.target.options.post_link_args);
-
- if sess.opts.debugging_opts.print_link_args {
- println!("{:?}", &cmd);
- }
-
- cmd.stdin(Stdio::null());
- match cmd.status() {
- Ok(status) => {
- if !status.success() {
- sess.err(&format!("linking of {} with `{:?}` failed",
- output_path.display(), cmd));
- sess.abort_if_errors();
- }
- },
- Err(e) => {
- sess.err(&format!("could not exec the linker `{}`: {}",
- pname,
- e));
- sess.abort_if_errors();
- },
- }
-
- match windows_output_path {
- Some(ref windows_path) => {
- fs::rename(windows_path, output_path).unwrap();
- },
- None => {
- // The file is already named according to `output_path`.
- }
- }
- };
-
// Flag to indicate whether the user explicitly requested bitcode.
// Otherwise, we produced it only as a temporary output, and will need
// to get rid of it.
let mut user_wants_bitcode = false;
+ let mut user_wants_objects = false;
for output_type in output_types {
match *output_type {
config::OutputTypeBitcode => {
copy_if_one_unit("0.s", config::OutputTypeAssembly, false);
}
config::OutputTypeObject => {
- link_obj(&crate_output.path(config::OutputTypeObject));
- }
- config::OutputTypeExe => {
- // If config::OutputTypeObject is already in the list, then
- // `crate.o` will be handled by the config::OutputTypeObject case.
- // Otherwise, we need to create the temporary object so we
- // can run the linker.
- if !sess.opts.output_types.contains(&config::OutputTypeObject) {
- link_obj(&crate_output.temp_path(config::OutputTypeObject));
- }
+ user_wants_objects = true;
+ copy_if_one_unit("0.o", config::OutputTypeObject, true);
}
+ config::OutputTypeExe |
config::OutputTypeDepInfo => {}
}
}
let keep_numbered_bitcode = needs_crate_bitcode ||
(user_wants_bitcode && sess.opts.cg.codegen_units > 1);
+ let keep_numbered_objects = needs_crate_object ||
+ (user_wants_objects && sess.opts.cg.codegen_units > 1);
+
for i in 0..trans.modules.len() {
- if modules_config.emit_obj {
+ if modules_config.emit_obj && !keep_numbered_objects {
let ext = format!("{}.o", i);
- remove(sess, &crate_output.with_extension(&ext[..]));
+ remove(sess, &crate_output.with_extension(&ext));
}
if modules_config.emit_bc && !keep_numbered_bitcode {
let ext = format!("{}.bc", i);
- remove(sess, &crate_output.with_extension(&ext[..]));
+ remove(sess, &crate_output.with_extension(&ext));
}
}
}
pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) {
- let pname = get_cc_prog(sess);
- let mut cmd = Command::new(&pname[..]);
+ let (pname, mut cmd) = get_linker(sess);
cmd.arg("-c").arg("-o").arg(&outputs.path(config::OutputTypeObject))
.arg(&outputs.temp_path(config::OutputTypeAssembly));
}
},
Err(e) => {
- sess.err(&format!("could not exec the linker `{}`: {}",
- pname,
- e));
+ sess.err(&format!("could not exec the linker `{}`: {}", pname, e));
sess.abort_if_errors();
}
}
}
-unsafe fn configure_llvm(sess: &Session) {
- use std::sync::Once;
- static INIT: Once = Once::new();
-
+pub unsafe fn configure_llvm(sess: &Session) {
let mut llvm_c_strs = Vec::new();
let mut llvm_args = Vec::new();
}
}
- INIT.call_once(|| {
- llvm::LLVMInitializePasses();
-
- // Only initialize the platforms supported by Rust here, because
- // using --llvm-root will have multiple platforms that rustllvm
- // doesn't actually link to and it's pointless to put target info
- // into the registry that Rust cannot generate machine code for.
- llvm::LLVMInitializeX86TargetInfo();
- llvm::LLVMInitializeX86Target();
- llvm::LLVMInitializeX86TargetMC();
- llvm::LLVMInitializeX86AsmPrinter();
- llvm::LLVMInitializeX86AsmParser();
-
- llvm::LLVMInitializeARMTargetInfo();
- llvm::LLVMInitializeARMTarget();
- llvm::LLVMInitializeARMTargetMC();
- llvm::LLVMInitializeARMAsmPrinter();
- llvm::LLVMInitializeARMAsmParser();
-
- llvm::LLVMInitializeAArch64TargetInfo();
- llvm::LLVMInitializeAArch64Target();
- llvm::LLVMInitializeAArch64TargetMC();
- llvm::LLVMInitializeAArch64AsmPrinter();
- llvm::LLVMInitializeAArch64AsmParser();
-
- llvm::LLVMInitializeMipsTargetInfo();
- llvm::LLVMInitializeMipsTarget();
- llvm::LLVMInitializeMipsTargetMC();
- llvm::LLVMInitializeMipsAsmPrinter();
- llvm::LLVMInitializeMipsAsmParser();
-
- llvm::LLVMInitializePowerPCTargetInfo();
- llvm::LLVMInitializePowerPCTarget();
- llvm::LLVMInitializePowerPCTargetMC();
- llvm::LLVMInitializePowerPCAsmPrinter();
- llvm::LLVMInitializePowerPCAsmParser();
-
- llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int,
- llvm_args.as_ptr());
- });
+ llvm::LLVMInitializePasses();
+
+ // Only initialize the platforms supported by Rust here, because
+ // using --llvm-root will have multiple platforms that rustllvm
+ // doesn't actually link to and it's pointless to put target info
+ // into the registry that Rust cannot generate machine code for.
+ llvm::LLVMInitializeX86TargetInfo();
+ llvm::LLVMInitializeX86Target();
+ llvm::LLVMInitializeX86TargetMC();
+ llvm::LLVMInitializeX86AsmPrinter();
+ llvm::LLVMInitializeX86AsmParser();
+
+ llvm::LLVMInitializeARMTargetInfo();
+ llvm::LLVMInitializeARMTarget();
+ llvm::LLVMInitializeARMTargetMC();
+ llvm::LLVMInitializeARMAsmPrinter();
+ llvm::LLVMInitializeARMAsmParser();
+
+ llvm::LLVMInitializeAArch64TargetInfo();
+ llvm::LLVMInitializeAArch64Target();
+ llvm::LLVMInitializeAArch64TargetMC();
+ llvm::LLVMInitializeAArch64AsmPrinter();
+ llvm::LLVMInitializeAArch64AsmParser();
+
+ llvm::LLVMInitializeMipsTargetInfo();
+ llvm::LLVMInitializeMipsTarget();
+ llvm::LLVMInitializeMipsTargetMC();
+ llvm::LLVMInitializeMipsAsmPrinter();
+ llvm::LLVMInitializeMipsAsmParser();
+
+ llvm::LLVMInitializePowerPCTargetInfo();
+ llvm::LLVMInitializePowerPCTarget();
+ llvm::LLVMInitializePowerPCTargetMC();
+ llvm::LLVMInitializePowerPCAsmPrinter();
+ llvm::LLVMInitializePowerPCAsmParser();
+
+ llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int,
+ llvm_args.as_ptr());
}
-unsafe fn populate_llvm_passes(fpm: llvm::PassManagerRef,
- mpm: llvm::PassManagerRef,
- llmod: ModuleRef,
- opt: llvm::CodeGenOptLevel,
- config: &ModuleConfig) {
+pub unsafe fn with_llvm_pmb(llmod: ModuleRef,
+ config: &ModuleConfig,
+ f: &mut FnMut(llvm::PassManagerBuilderRef)) {
// Create the PassManagerBuilder for LLVM. We configure it with
// reasonable defaults and prepare it to actually populate the pass
// manager.
let builder = llvm::LLVMPassManagerBuilderCreate();
+ let opt = config.opt_level.unwrap_or(llvm::CodeGenLevelNone);
llvm::LLVMRustConfigurePassManagerBuilder(builder, opt,
config.merge_functions,
}
}
- // Use the builder to populate the function/module pass managers.
- llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
- llvm::LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
+ f(builder);
llvm::LLVMPassManagerBuilderDispose(builder);
}
extern crate libc;
extern crate rustc;
extern crate rustc_back;
-extern crate serialize;
extern crate rustc_llvm as llvm;
+extern crate serialize;
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
pub mod back {
pub use rustc_back::abi;
- pub use rustc_back::archive;
- pub use rustc_back::arm;
- pub use rustc_back::mips;
- pub use rustc_back::mipsel;
pub use rustc_back::rpath;
pub use rustc_back::svh;
- pub use rustc_back::target_strs;
- pub use rustc_back::x86;
- pub use rustc_back::x86_64;
+ pub mod archive;
pub mod linker;
pub mod link;
pub mod lto;
pub mod write;
-
+ pub mod msvc;
}
pub mod trans;
//! DumpCsvVisitor walks the AST and processes it.
-use super::{escape, generated_code, recorder, SaveContext, PathCollector};
+use super::{escape, generated_code, recorder, SaveContext, PathCollector, Data};
use session::Session;
use middle::def;
use middle::ty::{self, Ty};
-use rustc::ast_map::NodeItem;
-use std::cell::Cell;
use std::fs::File;
use std::path::Path;
-use syntax::ast_util;
use syntax::ast::{self, NodeId, DefId};
use syntax::codemap::*;
-use syntax::parse::token::{self, get_ident, keywords};
+use syntax::parse::token::{self, keywords};
use syntax::owned_slice::OwnedSlice;
use syntax::visit::{self, Visitor};
use syntax::print::pprust::{path_to_string, ty_to_string};
pub fn new(tcx: &'l ty::ctxt<'tcx>,
analysis: &'l ty::CrateAnalysis,
output_file: Box<File>) -> DumpCsvVisitor<'l, 'tcx> {
- let span_utils = SpanUtils {
- sess: &tcx.sess,
- err_count: Cell::new(0)
- };
+ let span_utils = SpanUtils::new(&tcx.sess);
DumpCsvVisitor {
sess: &tcx.sess,
tcx: tcx,
- save_ctxt: SaveContext::new(tcx, span_utils.clone()),
+ save_ctxt: SaveContext::from_span_utils(tcx, span_utils.clone()),
analysis: analysis,
span: span_utils.clone(),
fmt: FmtStrs::new(box Recorder {
}
}
- fn process_method(&mut self, sig: &ast::MethodSig,
+ fn process_method(&mut self,
+ sig: &ast::MethodSig,
body: Option<&ast::Block>,
- id: ast::NodeId, name: ast::Name,
+ id: ast::NodeId,
+ name: ast::Name,
span: Span) {
if generated_code(span) {
return;
}
- debug!("process_method: {}:{}", id, token::get_name(name));
-
- let mut scope_id;
- // The qualname for a method is the trait name or name of the struct in an impl in
- // which the method is declared in, followed by the method's name.
- let qualname = match ty::impl_of_method(self.tcx, ast_util::local_def(id)) {
- Some(impl_id) => match self.tcx.map.get(impl_id.node) {
- NodeItem(item) => {
- scope_id = item.id;
- match item.node {
- ast::ItemImpl(_, _, _, _, ref ty, _) => {
- let mut result = String::from("<");
- result.push_str(&ty_to_string(&**ty));
-
- match ty::trait_of_item(self.tcx, ast_util::local_def(id)) {
- Some(def_id) => {
- result.push_str(" as ");
- result.push_str(
- &ty::item_path_str(self.tcx, def_id));
- },
- None => {}
- }
- result.push_str(">");
- result
- }
- _ => {
- self.sess.span_bug(span,
- &format!("Container {} for method {} not an impl?",
- impl_id.node, id));
- },
- }
- },
- _ => {
- self.sess.span_bug(span,
- &format!("Container {} for method {} is not a node item {:?}",
- impl_id.node, id, self.tcx.map.get(impl_id.node)));
- },
- },
- None => match ty::trait_of_item(self.tcx, ast_util::local_def(id)) {
- Some(def_id) => {
- scope_id = def_id.node;
- match self.tcx.map.get(def_id.node) {
- NodeItem(_) => {
- format!("::{}", ty::item_path_str(self.tcx, def_id))
- }
- _ => {
- self.sess.span_bug(span,
- &format!("Could not find container {} for method {}",
- def_id.node, id));
- }
- }
- },
- None => {
- self.sess.span_bug(span,
- &format!("Could not find container for method {}", id));
- },
- },
- };
+ debug!("process_method: {}:{}", id, name);
- let qualname = &format!("{}::{}", qualname, &token::get_name(name));
+ let method_data = self.save_ctxt.get_method_data(id, name, span);
- // record the decl for this def (if it has one)
- let decl_id = ty::trait_item_of_item(self.tcx, ast_util::local_def(id))
- .and_then(|new_id| {
- let def_id = new_id.def_id();
- if def_id.node != 0 && def_id != ast_util::local_def(id) {
- Some(def_id)
- } else {
- None
- }
- });
-
- let sub_span = self.span.sub_span_after_keyword(span, keywords::Fn);
if body.is_some() {
self.fmt.method_str(span,
- sub_span,
- id,
- qualname,
- decl_id,
- scope_id);
- self.process_formals(&sig.decl.inputs, qualname);
+ Some(method_data.span),
+ method_data.id,
+ &method_data.qualname,
+ method_data.declaration,
+ method_data.scope);
+ self.process_formals(&sig.decl.inputs, &method_data.qualname);
} else {
self.fmt.method_decl_str(span,
- sub_span,
- id,
- qualname,
- scope_id);
+ Some(method_data.span),
+ method_data.id,
+ &method_data.qualname,
+ method_data.scope);
}
// walk arg and return types
self.process_generic_params(&sig.generics,
span,
- qualname,
+ &method_data.qualname,
id);
}
parent_id: NodeId) {
let field_data = self.save_ctxt.get_field_data(field, parent_id);
if let Some(field_data) = field_data {
- down_cast_data!(field_data, VariableData, self, field.span);
self.fmt.field_str(field.span,
Some(field_data.span),
field_data.id,
self.fmt.static_str(span,
sub_span,
id,
- &get_ident((*ident).clone()),
+ &ident.name.as_str(),
&qualname,
&self.span.snippet(expr.span),
&ty_to_string(&*typ),
let ctor_id = match def.ctor_id {
Some(node_id) => node_id,
- None => -1,
+ None => ast::DUMMY_NODE_ID,
};
let val = self.span.snippet(item.span);
let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Struct);
&enum_data.value);
for variant in &enum_definition.variants {
- let name = &get_ident(variant.node.name);
+ let name = &variant.node.name.name.as_str();
let mut qualname = enum_data.qualname.clone();
qualname.push_str("::");
qualname.push_str(name);
ast::StructVariantKind(ref struct_def) => {
let ctor_id = match struct_def.ctor_id {
Some(node_id) => node_id,
- None => -1,
+ None => ast::DUMMY_NODE_ID,
};
self.fmt.struct_variant_str(variant.span,
self.span.span_for_first_ident(variant.span),
fn process_path(&mut self,
id: NodeId,
- span: Span,
path: &ast::Path,
ref_kind: Option<recorder::Row>) {
- if generated_code(span) {
- return
+ if generated_code(path.span) {
+ return;
}
- let def_map = self.tcx.def_map.borrow();
- if !def_map.contains_key(&id) {
- self.sess.span_bug(span,
- &format!("def_map has no key for {} in visit_expr", id));
- }
- let def = def_map.get(&id).unwrap().full_def();
- let sub_span = self.span.span_for_last_ident(span);
- match def {
- def::DefUpvar(..) |
- def::DefLocal(..) |
- def::DefStatic(..) |
- def::DefConst(..) |
- def::DefAssociatedConst(..) |
- def::DefVariant(..) => self.fmt.ref_str(ref_kind.unwrap_or(recorder::VarRef),
- span,
- sub_span,
- def.def_id(),
- self.cur_scope),
- def::DefStruct(def_id) => self.fmt.ref_str(recorder::TypeRef,
- span,
- sub_span,
- def_id,
- self.cur_scope),
- def::DefTy(def_id, _) => self.fmt.ref_str(recorder::TypeRef,
- span,
- sub_span,
- def_id,
- self.cur_scope),
- def::DefMethod(declid, provenence) => {
- let sub_span = self.span.sub_span_for_meth_name(span);
- let defid = if declid.krate == ast::LOCAL_CRATE {
- let ti = ty::impl_or_trait_item(self.tcx, declid);
- match provenence {
- def::FromTrait(def_id) => {
- Some(ty::trait_items(self.tcx, def_id)
- .iter()
- .find(|mr| {
- mr.name() == ti.name()
- })
- .unwrap()
- .def_id())
- }
- def::FromImpl(def_id) => {
- let impl_items = self.tcx.impl_items.borrow();
- Some(impl_items.get(&def_id)
- .unwrap()
- .iter()
- .find(|mr| {
- ty::impl_or_trait_item(
- self.tcx,
- mr.def_id()
- ).name() == ti.name()
- })
- .unwrap()
- .def_id())
- }
- }
- } else {
- None
- };
- self.fmt.meth_call_str(span,
- sub_span,
- defid,
- Some(declid),
- self.cur_scope);
- },
- def::DefFn(def_id, _) => {
- self.fmt.fn_call_str(span,
- sub_span,
- def_id,
- self.cur_scope)
+ let path_data = self.save_ctxt.get_path_data(id, path);
+ let path_data = match path_data {
+ Some(pd) => pd,
+ None => {
+ self.tcx.sess.span_bug(path.span,
+ &format!("Unexpected def kind while looking \
+ up path in `{}`",
+ self.span.snippet(path.span)))
+ }
+ };
+ match path_data {
+ Data::VariableRefData(ref vrd) => {
+ self.fmt.ref_str(ref_kind.unwrap_or(recorder::VarRef),
+ path.span,
+ Some(vrd.span),
+ vrd.ref_id,
+ vrd.scope);
+
+ }
+ Data::TypeRefData(ref trd) => {
+ self.fmt.ref_str(recorder::TypeRef,
+ path.span,
+ Some(trd.span),
+ trd.ref_id,
+ trd.scope);
+ }
+ Data::MethodCallData(ref mcd) => {
+ self.fmt.meth_call_str(path.span,
+ Some(mcd.span),
+ mcd.ref_id,
+ mcd.decl_id,
+ mcd.scope);
+ }
+ Data::FunctionCallData(fcd) => {
+ self.fmt.fn_call_str(path.span,
+ Some(fcd.span),
+ fcd.ref_id,
+ fcd.scope);
+ }
+ _ => {
+ self.sess.span_bug(path.span,
+ &format!("Unexpected data: {:?}", path_data));
}
- _ => self.sess.span_bug(span,
- &format!("Unexpected def kind while looking \
- up path in `{}`: `{:?}`",
- self.span.snippet(span),
- def)),
}
- // modules or types in the path prefix
+
+ // Modules or types in the path prefix.
+ let def_map = self.tcx.def_map.borrow();
+ let def = def_map.get(&id).unwrap().full_def();
match def {
- def::DefMethod(did, _) => {
- let ti = ty::impl_or_trait_item(self.tcx, did);
+ def::DefMethod(did) => {
+ let ti = self.tcx.impl_or_trait_item(did);
if let ty::MethodTraitItem(m) = ti {
if m.explicit_self == ty::StaticExplicitSelfCategory {
self.write_sub_path_trait_truncated(path);
struct_lit_data.ref_id,
struct_lit_data.scope);
let struct_def = struct_lit_data.ref_id;
+ let scope = self.save_ctxt.enclosing_scope(ex.id);
for field in fields {
if generated_code(field.ident.span) {
let field_data = self.save_ctxt.get_field_ref_data(field,
struct_def,
- self.cur_scope);
+ scope);
self.fmt.ref_str(recorder::VarRef,
field.ident.span,
Some(field_data.span),
fn process_method_call(&mut self,
ex: &ast::Expr,
args: &Vec<P<ast::Expr>>) {
- let method_map = self.tcx.method_map.borrow();
- let method_callee = method_map.get(&ty::MethodCall::expr(ex.id)).unwrap();
- let (def_id, decl_id) = match method_callee.origin {
- ty::MethodStatic(def_id) |
- ty::MethodStaticClosure(def_id) => {
- // method invoked on an object with a concrete type (not a static method)
- let decl_id =
- match ty::trait_item_of_item(self.tcx, def_id) {
- None => None,
- Some(decl_id) => Some(decl_id.def_id()),
- };
-
- // This incantation is required if the method referenced is a
- // trait's default implementation.
- let def_id = match ty::impl_or_trait_item(self.tcx, def_id) {
- ty::MethodTraitItem(method) => {
- method.provided_source.unwrap_or(def_id)
- }
- _ => self.sess
- .span_bug(ex.span,
- "save::process_method_call: non-method \
- DefId in MethodStatic or MethodStaticClosure"),
- };
- (Some(def_id), decl_id)
- }
- ty::MethodTypeParam(ref mp) => {
- // method invoked on a type parameter
- let trait_item = ty::trait_item(self.tcx,
- mp.trait_ref.def_id,
- mp.method_num);
- (None, Some(trait_item.def_id()))
- }
- ty::MethodTraitObject(ref mo) => {
- // method invoked on a trait instance
- let trait_item = ty::trait_item(self.tcx,
- mo.trait_ref.def_id,
- mo.method_num);
- (None, Some(trait_item.def_id()))
- }
- };
- let sub_span = self.span.sub_span_for_meth_name(ex.span);
- self.fmt.meth_call_str(ex.span,
- sub_span,
- def_id,
- decl_id,
- self.cur_scope);
+ if let Some(call_data) = self.save_ctxt.get_expr_data(ex) {
+ down_cast_data!(call_data, MethodCallData, self, ex.span);
+ self.fmt.meth_call_str(ex.span,
+ Some(call_data.span),
+ call_data.ref_id,
+ call_data.decl_id,
+ call_data.scope);
+ }
// walk receiver and args
visit::walk_exprs(self, &args);
fn process_pat(&mut self, p:&ast::Pat) {
if generated_code(p.span) {
- return
+ return;
}
match p.node {
def::DefConst(..) | def::DefAssociatedConst(..) => None,
def::DefVariant(_, variant_id, _) => Some(variant_id),
_ => {
- match ty::ty_to_def_id(ty::node_id_to_type(self.tcx, p.id)) {
+ match self.tcx.node_id_to_type(p.id).ty_to_def_id() {
None => {
self.sess.span_bug(p.span,
&format!("Could not find struct_def for `{}`",
};
if let Some(struct_def) = struct_def {
- let struct_fields = ty::lookup_struct_fields(self.tcx, struct_def);
+ let struct_fields = self.tcx.lookup_struct_fields(struct_def);
for &Spanned { node: ref field, span } in fields {
+ if generated_code(span) {
+ continue;
+ }
+
let sub_span = self.span.span_for_first_ident(span);
for f in &struct_fields {
if f.name == field.ident.name {
break;
}
}
- self.visit_pat(&*field.pat);
+ self.visit_pat(&field.pat);
}
}
}
sub_span,
item.id,
mod_id,
- &get_ident(ident),
+ &ident.name.as_str(),
self.cur_scope);
self.write_sub_paths_truncated(path, true);
}
if !name_string.is_empty() {
name_string.push_str(", ");
}
- name_string.push_str(n.as_str());
+ name_string.push_str(&n.as_str());
}
}
}
}
ast::ItemExternCrate(ref s) => {
- let name = get_ident(item.ident);
- let name = &name;
let location = match *s {
Some(s) => s.to_string(),
- None => name.to_string(),
+ None => item.ident.to_string(),
};
let alias_span = self.span.span_for_last_ident(item.span);
let cnum = match self.sess.cstore.find_extern_mod_stmt_cnum(item.id) {
alias_span,
item.id,
cnum,
- name,
+ &item.ident.name.as_str(),
&location,
self.cur_scope);
}
trait_item.span, &*ty, &*expr);
}
ast::MethodTraitItem(ref sig, ref body) => {
- self.process_method(sig, body.as_ref().map(|x| &**x),
- trait_item.id, trait_item.ident.name, trait_item.span);
+ self.process_method(sig,
+ body.as_ref().map(|x| &**x),
+ trait_item.id,
+ trait_item.ident.name,
+ trait_item.span);
}
ast::ConstTraitItem(_, None) |
ast::TypeTraitItem(..) => {}
impl_item.span, &ty, &expr);
}
ast::MethodImplItem(ref sig, ref body) => {
- self.process_method(sig, Some(body), impl_item.id,
- impl_item.ident.name, impl_item.span);
+ self.process_method(sig,
+ Some(body),
+ impl_item.id,
+ impl_item.ident.name,
+ impl_item.span);
}
ast::TypeImplItem(_) |
ast::MacImplItem(_) => {}
visit::walk_expr(self, ex);
}
ast::ExprPath(_, ref path) => {
- self.process_path(ex.id, path.span, path, None);
+ self.process_path(ex.id, path, None);
visit::walk_expr(self, ex);
}
ast::ExprStruct(ref path, ref fields, ref base) =>
self.visit_expr(&**sub_ex);
- let ty = &ty::expr_ty_adjusted(self.tcx, &**sub_ex).sty;
+ let ty = &self.tcx.expr_ty_adjusted(&**sub_ex).sty;
match *ty {
ty::TyStruct(def_id, _) => {
- let fields = ty::lookup_struct_fields(self.tcx, def_id);
+ let fields = self.tcx.lookup_struct_fields(def_id);
for (i, f) in fields.iter().enumerate() {
if i == idx.node {
let sub_span = self.span.sub_span_after_token(ex.span, token::Dot);
// This is to get around borrow checking, because we need mut self to call process_path.
let mut paths_to_process = vec![];
+
// process collected paths
for &(id, ref p, immut, ref_kind) in &collector.collected_paths {
let def_map = self.tcx.def_map.borrow();
def)
}
}
+
for &(id, ref path, ref_kind) in &paths_to_process {
- self.process_path(id, path.span, path, ref_kind);
+ self.process_path(id, path, ref_kind);
}
visit::walk_expr_opt(self, &arm.guard);
- self.visit_expr(&*arm.body);
+ self.visit_expr(&arm.body);
}
fn visit_stmt(&mut self, s: &ast::Stmt) {
use std::fs::{self, File};
use std::path::{Path, PathBuf};
+use rustc::ast_map::NodeItem;
+
use syntax::{attr};
use syntax::ast::{self, NodeId, DefId};
use syntax::ast_util;
use syntax::codemap::*;
-use syntax::parse::token::{self, get_ident, keywords};
+use syntax::parse::token::{self, keywords};
use syntax::visit::{self, Visitor};
use syntax::print::pprust::ty_to_string;
VariableRefData(VariableRefData),
/// Data for a reference to a type or trait.
TypeRefData(TypeRefData),
+ /// Data for a reference to a module.
+ ModRefData(ModRefData),
+ /// Data about a function call.
+ FunctionCallData(FunctionCallData),
+ /// Data about a method call.
+ MethodCallData(MethodCallData),
}
/// Data for all kinds of functions and methods.
}
/// Data for the use of some item (e.g., the use of a local variable, which
-/// will refere to that variables declaration (by ref_id)).
+/// will refer to that variables declaration (by ref_id)).
#[derive(Debug)]
pub struct VariableRefData {
pub name: String,
pub ref_id: DefId,
}
+/// Data for a reference to a module.
+#[derive(Debug)]
+pub struct ModRefData {
+ pub span: Span,
+ pub scope: NodeId,
+ pub ref_id: DefId,
+}
+
+/// Data about a function call.
+#[derive(Debug)]
+pub struct FunctionCallData {
+ pub span: Span,
+ pub scope: NodeId,
+ pub ref_id: DefId,
+}
+
+/// Data about a method call.
+#[derive(Debug)]
+pub struct MethodCallData {
+ pub span: Span,
+ pub scope: NodeId,
+ pub ref_id: Option<DefId>,
+ pub decl_id: Option<DefId>,
+}
+
+
impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> {
- pub fn new(tcx: &'l ty::ctxt<'tcx>,
- span_utils: SpanUtils<'l>)
- -> SaveContext<'l, 'tcx> {
+ pub fn new(tcx: &'l ty::ctxt<'tcx>) -> SaveContext <'l, 'tcx> {
+ let span_utils = SpanUtils::new(&tcx.sess);
+ SaveContext::from_span_utils(tcx, span_utils)
+ }
+
+ pub fn from_span_utils(tcx: &'l ty::ctxt<'tcx>,
+ span_utils: SpanUtils<'l>)
+ -> SaveContext<'l, 'tcx> {
SaveContext {
tcx: tcx,
span_utils: span_utils,
qualname: qualname,
declaration: None,
span: sub_span.unwrap(),
- scope: self.tcx.map.get_parent(item.id),
+ scope: self.enclosing_scope(item.id),
})
}
ast::ItemStatic(ref typ, mt, ref expr) => {
Data::VariableData(VariableData {
id: item.id,
- name: get_ident(item.ident).to_string(),
+ name: item.ident.to_string(),
qualname: qualname,
span: sub_span.unwrap(),
- scope: self.tcx.map.get_parent(item.id),
+ scope: self.enclosing_scope(item.id),
value: value,
type_value: ty_to_string(&typ),
})
Data::VariableData(VariableData {
id: item.id,
- name: get_ident(item.ident).to_string(),
+ name: item.ident.to_string(),
qualname: qualname,
span: sub_span.unwrap(),
- scope: self.tcx.map.get_parent(item.id),
+ scope: self.enclosing_scope(item.id),
value: self.span_utils.snippet(expr.span),
type_value: ty_to_string(&typ),
})
Data::ModData(ModData {
id: item.id,
- name: get_ident(item.ident).to_string(),
+ name: item.ident.to_string(),
qualname: qualname,
span: sub_span.unwrap(),
- scope: self.tcx.map.get_parent(item.id),
+ scope: self.enclosing_scope(item.id),
filename: filename,
})
},
value: val,
span: sub_span.unwrap(),
qualname: enum_name,
- scope: self.tcx.map.get_parent(item.id),
+ scope: self.enclosing_scope(item.id),
})
},
ast::ItemImpl(_, _, _, ref trait_ref, ref typ, _) => {
let mut type_data = None;
let sub_span;
- let parent = self.tcx.map.get_parent(item.id);
+ let parent = self.enclosing_scope(item.id);
match typ.node {
// Common case impl for a struct or something basic.
}
}
- // FIXME: we ought to be able to get the parent id ourselves, but we can't
- // for now.
- pub fn get_field_data(&self, field: &ast::StructField, parent: NodeId) -> Option<Data> {
+ pub fn get_field_data(&self, field: &ast::StructField, scope: NodeId) -> Option<VariableData> {
match field.node.kind {
ast::NamedField(ident, _) => {
- let name = get_ident(ident);
let qualname = format!("::{}::{}",
- self.tcx.map.path_to_string(parent),
- name);
+ self.tcx.map.path_to_string(scope),
+ ident);
let typ = self.tcx.node_types().get(&field.node.id).unwrap()
.to_string();
let sub_span = self.span_utils.sub_span_before_token(field.span, token::Colon);
- Some(Data::VariableData(VariableData {
+ Some(VariableData {
id: field.node.id,
- name: get_ident(ident).to_string(),
+ name: ident.to_string(),
qualname: qualname,
span: sub_span.unwrap(),
- scope: parent,
+ scope: scope,
value: "".to_owned(),
type_value: typ,
- }))
+ })
},
_ => None,
}
}
- // FIXME: we ought to be able to get the parent id ourselves, but we can't
- // for now.
+ // FIXME would be nice to take a MethodItem here, but the ast provides both
+ // trait and impl flavours, so the caller must do the disassembly.
+ pub fn get_method_data(&self,
+ id: ast::NodeId,
+ name: ast::Name,
+ span: Span) -> FunctionData {
+ // The qualname for a method is the trait name or name of the struct in an impl in
+ // which the method is declared in, followed by the method's name.
+ let qualname = match self.tcx.impl_of_method(ast_util::local_def(id)) {
+ Some(impl_id) => match self.tcx.map.get(impl_id.node) {
+ NodeItem(item) => {
+ match item.node {
+ ast::ItemImpl(_, _, _, _, ref ty, _) => {
+ let mut result = String::from("<");
+ result.push_str(&ty_to_string(&**ty));
+
+ match self.tcx.trait_of_item(ast_util::local_def(id)) {
+ Some(def_id) => {
+ result.push_str(" as ");
+ result.push_str(
+ &self.tcx.item_path_str(def_id));
+ },
+ None => {}
+ }
+ result.push_str(">");
+ result
+ }
+ _ => {
+ self.tcx.sess.span_bug(span,
+ &format!("Container {} for method {} not an impl?",
+ impl_id.node, id));
+ },
+ }
+ },
+ _ => {
+ self.tcx.sess.span_bug(span,
+ &format!("Container {} for method {} is not a node item {:?}",
+ impl_id.node, id, self.tcx.map.get(impl_id.node)));
+ },
+ },
+ None => match self.tcx.trait_of_item(ast_util::local_def(id)) {
+ Some(def_id) => {
+ match self.tcx.map.get(def_id.node) {
+ NodeItem(_) => {
+ format!("::{}", self.tcx.item_path_str(def_id))
+ }
+ _ => {
+ self.tcx.sess.span_bug(span,
+ &format!("Could not find container {} for method {}",
+ def_id.node, id));
+ }
+ }
+ },
+ None => {
+ self.tcx.sess.span_bug(span,
+ &format!("Could not find container for method {}", id));
+ },
+ },
+ };
+
+ let qualname = format!("{}::{}", qualname, name);
+
+ let decl_id = self.tcx.trait_item_of_item(ast_util::local_def(id))
+ .and_then(|new_id| {
+ let def_id = new_id.def_id();
+ if def_id.node != 0 && def_id != ast_util::local_def(id) {
+ Some(def_id)
+ } else {
+ None
+ }
+ });
+
+ let sub_span = self.span_utils.sub_span_after_keyword(span, keywords::Fn);
+
+ FunctionData {
+ id: id,
+ name: name.to_string(),
+ qualname: qualname,
+ declaration: decl_id,
+ span: sub_span.unwrap(),
+ scope: self.enclosing_scope(id),
+ }
+ }
+
pub fn get_trait_ref_data(&self,
trait_ref: &ast::TraitRef,
parent: NodeId)
pub fn get_expr_data(&self, expr: &ast::Expr) -> Option<Data> {
match expr.node {
ast::ExprField(ref sub_ex, ident) => {
- let ty = &ty::expr_ty_adjusted(self.tcx, &sub_ex).sty;
+ let ty = &self.tcx.expr_ty_adjusted(&sub_ex).sty;
match *ty {
ty::TyStruct(def_id, _) => {
- let fields = ty::lookup_struct_fields(self.tcx, def_id);
+ let fields = self.tcx.lookup_struct_fields(def_id);
for f in &fields {
if f.name == ident.node.name {
let sub_span = self.span_utils.span_for_last_ident(expr.span);
return Some(Data::VariableRefData(VariableRefData {
- name: get_ident(ident.node).to_string(),
+ name: ident.node.to_string(),
span: sub_span.unwrap(),
- scope: self.tcx.map.get_parent(expr.id),
+ scope: self.enclosing_scope(expr.id),
ref_id: f.id,
}));
}
self.tcx.sess.span_bug(expr.span,
&format!("Couldn't find field {} on {:?}",
- &get_ident(ident.node), ty))
+ ident.node, ty))
}
_ => {
debug!("Expected struct type, found {:?}", ty);
}
}
ast::ExprStruct(ref path, _, _) => {
- let ty = &ty::expr_ty_adjusted(&self.tcx, expr).sty;
+ let ty = &self.tcx.expr_ty_adjusted(expr).sty;
match *ty {
ty::TyStruct(def_id, _) => {
let sub_span = self.span_utils.span_for_last_ident(path.span);
Some(Data::TypeRefData(TypeRefData {
span: sub_span.unwrap(),
- scope: self.tcx.map.get_parent(expr.id),
+ scope: self.enclosing_scope(expr.id),
ref_id: def_id,
}))
}
}
}
}
+ ast::ExprMethodCall(..) => {
+ let method_call = ty::MethodCall::expr(expr.id);
+ let method_id = self.tcx.tables.borrow().method_map[&method_call].def_id;
+ let (def_id, decl_id) = match self.tcx.impl_or_trait_item(method_id).container() {
+ ty::ImplContainer(_) => (Some(method_id), None),
+ ty::TraitContainer(_) => (None, Some(method_id))
+ };
+ let sub_span = self.span_utils.sub_span_for_meth_name(expr.span);
+ let parent = self.enclosing_scope(expr.id);
+ Some(Data::MethodCallData(MethodCallData {
+ span: sub_span.unwrap(),
+ scope: parent,
+ ref_id: def_id,
+ decl_id: decl_id,
+ }))
+ }
+ ast::ExprPath(_, ref path) => {
+ self.get_path_data(expr.id, path)
+ }
_ => {
// FIXME
unimplemented!();
}
}
+ pub fn get_path_data(&self,
+ id: NodeId,
+ path: &ast::Path)
+ -> Option<Data> {
+ let def_map = self.tcx.def_map.borrow();
+ if !def_map.contains_key(&id) {
+ self.tcx.sess.span_bug(path.span,
+ &format!("def_map has no key for {} in visit_expr", id));
+ }
+ let def = def_map.get(&id).unwrap().full_def();
+ let sub_span = self.span_utils.span_for_last_ident(path.span);
+ match def {
+ def::DefUpvar(..) |
+ def::DefLocal(..) |
+ def::DefStatic(..) |
+ def::DefConst(..) |
+ def::DefAssociatedConst(..) |
+ def::DefVariant(..) => {
+ Some(Data::VariableRefData(VariableRefData {
+ name: self.span_utils.snippet(sub_span.unwrap()),
+ span: sub_span.unwrap(),
+ scope: self.enclosing_scope(id),
+ ref_id: def.def_id(),
+ }))
+ }
+ def::DefStruct(def_id) |
+ def::DefTy(def_id, _) |
+ def::DefTrait(def_id) |
+ def::DefTyParam(_, _, def_id, _) => {
+ Some(Data::TypeRefData(TypeRefData {
+ span: sub_span.unwrap(),
+ ref_id: def_id,
+ scope: self.enclosing_scope(id),
+ }))
+ }
+ def::DefMethod(decl_id) => {
+ let sub_span = self.span_utils.sub_span_for_meth_name(path.span);
+ let def_id = if decl_id.krate == ast::LOCAL_CRATE {
+ let ti = self.tcx.impl_or_trait_item(decl_id);
+ match ti.container() {
+ ty::TraitContainer(def_id) => {
+ self.tcx.trait_items(def_id)
+ .iter()
+ .find(|mr| {
+ mr.name() == ti.name() && self.trait_method_has_body(mr)
+ })
+ .map(|mr| mr.def_id())
+ }
+ ty::ImplContainer(def_id) => {
+ let impl_items = self.tcx.impl_items.borrow();
+ Some(impl_items.get(&def_id)
+ .unwrap()
+ .iter()
+ .find(|mr| {
+ self.tcx.impl_or_trait_item(mr.def_id()).name()
+ == ti.name()
+ })
+ .unwrap()
+ .def_id())
+ }
+ }
+ } else {
+ None
+ };
+ Some(Data::MethodCallData(MethodCallData {
+ span: sub_span.unwrap(),
+ scope: self.enclosing_scope(id),
+ ref_id: def_id,
+ decl_id: Some(decl_id),
+ }))
+ },
+ def::DefFn(def_id, _) => {
+ Some(Data::FunctionCallData(FunctionCallData {
+ ref_id: def_id,
+ span: sub_span.unwrap(),
+ scope: self.enclosing_scope(id),
+ }))
+ }
+ def::DefMod(def_id) => {
+ Some(Data::ModRefData(ModRefData {
+ ref_id: def_id,
+ span: sub_span.unwrap(),
+ scope: self.enclosing_scope(id),
+ }))
+ }
+ _ => None,
+ }
+ }
+
+ fn trait_method_has_body(&self, mr: &ty::ImplOrTraitItem) -> bool {
+ let def_id = mr.def_id();
+ if def_id.krate != ast::LOCAL_CRATE {
+ return false;
+ }
+
+ let trait_item = self.tcx.map.expect_trait_item(def_id.node);
+ if let ast::TraitItem_::MethodTraitItem(_, Some(_)) = trait_item.node {
+ true
+ } else {
+ false
+ }
+ }
+
pub fn get_field_ref_data(&self,
field_ref: &ast::Field,
struct_id: DefId,
parent: NodeId)
-> VariableRefData {
- let fields = ty::lookup_struct_fields(&self.tcx, struct_id);
- let field_name = get_ident(field_ref.ident.node).to_string();
+ let fields = self.tcx.lookup_struct_fields(struct_id);
+ let field_name = field_ref.ident.node.to_string();
for f in &fields {
if f.name == field_ref.ident.node.name {
// We don't really need a sub-span here, but no harm done
}
}
+ #[inline]
+ fn enclosing_scope(&self, id: NodeId) -> NodeId {
+ self.tcx.map.get_enclosing_scope(id).unwrap_or(0)
+ }
}
// An AST visitor for collecting paths from patterns.
}
ast::PatIdent(bm, ref path1, _) => {
debug!("PathCollector, visit ident in pat {}: {:?} {:?}",
- token::get_ident(path1.node),
+ path1.node,
p.span,
path1.span);
let immut = match bm {
// If the expression is a macro expansion or other generated code, run screaming
// and don't index.
-fn generated_code(span: Span) -> bool {
+pub fn generated_code(span: Span) -> bool {
span.expn_id != NO_EXPANSION || span == DUMMY_SP
}
}
impl<'a> SpanUtils<'a> {
+ pub fn new(sess: &'a Session) -> SpanUtils<'a> {
+ SpanUtils {
+ sess: sess,
+ err_count: Cell::new(0)
+ }
+ }
+
// Standard string for extents/location.
pub fn extent_str(&self, span: Span) -> String {
let lo_loc = self.sess.codemap().lookup_char_pos(span.lo);
use middle::const_eval;
use middle::def::{self, DefMap};
use middle::expr_use_visitor as euv;
+use middle::infer;
use middle::lang_items::StrEqFnLangItem;
use middle::mem_categorization as mc;
use middle::pat_util::*;
use trans::build::{Not, Store, Sub, add_comment};
use trans::build;
use trans::callee;
-use trans::cleanup::{self, CleanupMethods};
+use trans::cleanup::{self, CleanupMethods, DropHintMethods};
use trans::common::*;
use trans::consts;
use trans::datum::*;
use trans::tvec;
use trans::type_of;
use middle::ty::{self, Ty};
-use session::config::{NoDebugInfo, FullDebugInfo};
+use session::config::NoDebugInfo;
use util::common::indenter;
use util::nodemap::FnvHashMap;
use util::ppaux;
impl<'a> ConstantExpr<'a> {
fn eq(self, other: ConstantExpr<'a>, tcx: &ty::ctxt) -> bool {
- match const_eval::compare_lit_exprs(tcx, self.0, other.0, None,
- |id| {ty::node_id_item_substs(tcx, id).substs}) {
+ match const_eval::compare_lit_exprs(tcx, self.0, other.0) {
Some(result) => result == Ordering::Equal,
None => panic!("compare_list_exprs: type mismatch"),
}
let ccx = bcx.ccx();
match *self {
ConstantValue(ConstantExpr(lit_expr), _) => {
- let lit_ty = ty::node_id_to_type(bcx.tcx(), lit_expr.id);
+ let lit_ty = bcx.tcx().node_id_to_type(lit_expr.id);
let (llval, _) = consts::const_expr(ccx, &*lit_expr, bcx.fcx.param_substs, None);
let lit_datum = immediate_rvalue(llval, lit_ty);
let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx));
#[derive(Clone, Copy, PartialEq)]
pub enum TransBindingMode {
+ /// By-value binding for a copy type: copies from matched data
+ /// into a fresh LLVM alloca.
TrByCopy(/* llbinding */ ValueRef),
- TrByMove,
+
+ /// By-value binding for a non-copy type where we copy into a
+ /// fresh LLVM alloca; this most accurately reflects the language
+ /// semantics (e.g. it properly handles overwrites of the matched
+ /// input), but potentially injects an unwanted copy.
+ TrByMoveIntoCopy(/* llbinding */ ValueRef),
+
+ /// Binding a non-copy type by reference under the hood; this is
+ /// a codegen optimization to avoid unnecessary memory traffic.
+ TrByMoveRef,
+
+ /// By-ref binding exposed in the original source input.
TrByRef,
}
+impl TransBindingMode {
+ /// if binding by making a fresh copy; returns the alloca that it
+ /// will copy into; otherwise None.
+ fn alloca_if_copy(&self) -> Option<ValueRef> {
+ match *self {
+ TrByCopy(llbinding) | TrByMoveIntoCopy(llbinding) => Some(llbinding),
+ TrByMoveRef | TrByRef => None,
+ }
+ }
+}
+
/// Information about a pattern binding:
/// - `llmatch` is a pointer to a stack slot. The stack slot contains a
/// pointer into the value being matched. Hence, llmatch has type `T**`
return false;
}
+// As noted in `fn match_datum`, we should eventually pass around a
+// `Datum<Lvalue>` for the `val`; but until we get to that point, this
+// `MatchInput` struct will serve -- it has everything `Datum<Lvalue>`
+// does except for the type field.
+#[derive(Copy, Clone)]
+pub struct MatchInput { val: ValueRef, lval: Lvalue }
+
+impl<'tcx> Datum<'tcx, Lvalue> {
+ pub fn match_input(&self) -> MatchInput {
+ MatchInput {
+ val: self.val,
+ lval: self.kind,
+ }
+ }
+}
+
+impl MatchInput {
+ fn from_val(val: ValueRef) -> MatchInput {
+ MatchInput {
+ val: val,
+ lval: Lvalue::new("MatchInput::from_val"),
+ }
+ }
+
+ fn to_datum<'tcx>(self, ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
+ Datum::new(self.val, ty, self.lval)
+ }
+}
+
fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
m: &[Match<'a, 'p, 'blk, 'tcx>],
col: usize,
- val: ValueRef)
+ val: MatchInput)
-> Vec<Match<'a, 'p, 'blk, 'tcx>> {
debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={})",
bcx.to_str(),
m,
col,
- bcx.val_to_string(val));
+ bcx.val_to_string(val.val));
let _indenter = indenter();
m.iter().map(|br| {
loop {
pat = match pat.node {
ast::PatIdent(_, ref path, Some(ref inner)) => {
- bound_ptrs.push((path.node, val));
+ bound_ptrs.push((path.node, val.val));
&**inner
},
_ => break
dm: &DefMap,
m: &[Match<'a, 'p, 'blk, 'tcx>],
col: usize,
- val: ValueRef,
+ val: MatchInput,
mut e: F)
-> Vec<Match<'a, 'p, 'blk, 'tcx>> where
F: FnMut(&[&'p ast::Pat]) -> Option<Vec<&'p ast::Pat>>,
bcx.to_str(),
m,
col,
- bcx.val_to_string(val));
+ bcx.val_to_string(val.val));
let _indenter = indenter();
m.iter().filter_map(|br| {
match this.node {
ast::PatIdent(_, ref path, None) => {
if pat_is_binding(dm, &*this) {
- bound_ptrs.push((path.node, val));
+ bound_ptrs.push((path.node, val.val));
}
}
ast::PatVec(ref before, Some(ref slice), ref after) => {
dm: &DefMap,
m: &[Match<'a, 'p, 'blk, 'tcx>],
col: usize,
- val: ValueRef)
+ val: MatchInput)
-> Vec<Match<'a, 'p, 'blk, 'tcx>> {
debug!("enter_default(bcx={}, m={:?}, col={}, val={})",
bcx.to_str(),
m,
col,
- bcx.val_to_string(val));
+ bcx.val_to_string(val.val));
let _indenter = indenter();
// Collect all of the matches that can match against anything.
opt: &Opt,
col: usize,
variant_size: usize,
- val: ValueRef)
+ val: MatchInput)
-> Vec<Match<'a, 'p, 'blk, 'tcx>> {
debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={})",
bcx.to_str(),
m,
*opt,
col,
- bcx.val_to_string(val));
+ bcx.val_to_string(val.val));
let _indenter = indenter();
let ctor = match opt {
check_match::Constructor::Variant(def_id)
};
- let param_env = ty::empty_parameter_environment(bcx.tcx());
+ let param_env = bcx.tcx().empty_parameter_environment();
let mcx = check_match::MatchCheckCtxt {
tcx: bcx.tcx(),
param_env: param_env,
let opt_def = tcx.def_map.borrow().get(&cur.id).map(|d| d.full_def());
match opt_def {
Some(def::DefVariant(enum_id, var_id, _)) => {
- let variant = ty::enum_variant_with_id(tcx, enum_id, var_id);
+ let variant = tcx.enum_variant_with_id(enum_id, var_id);
Variant(variant.disr_val,
adt::represent_node(bcx, cur.id),
var_id,
fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
repr: &adt::Repr<'tcx>,
disr_val: ty::Disr,
- val: ValueRef)
+ val: MatchInput)
-> ExtractedBlock<'blk, 'tcx> {
let _icx = push_ctxt("match::extract_variant_args");
let args = (0..adt::num_args(repr, disr_val)).map(|i| {
- adt::trans_field_ptr(bcx, repr, val, disr_val, i)
+ adt::trans_field_ptr(bcx, repr, val.val, disr_val, i)
}).collect();
ExtractedBlock { vals: args, bcx: bcx }
/// Helper for converting from the ValueRef that we pass around in the match code, which is always
/// an lvalue, into a Datum. Eventually we should just pass around a Datum and be done with it.
-fn match_datum<'tcx>(val: ValueRef, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
- Datum::new(val, left_ty, Lvalue)
+fn match_datum<'tcx>(val: MatchInput, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
+ val.to_datum(left_ty)
}
fn bind_subslice_pat(bcx: Block,
pat_id: ast::NodeId,
- val: ValueRef,
+ val: MatchInput,
offset_left: usize,
offset_right: usize) -> ValueRef {
let _icx = push_ctxt("match::bind_subslice_pat");
let vec_ty = node_id_type(bcx, pat_id);
- let unit_ty = ty::sequence_element_type(bcx.tcx(), ty::type_content(vec_ty));
+ let vec_ty_contents = match vec_ty.sty {
+ ty::TyBox(ty) => ty,
+ ty::TyRef(_, mt) | ty::TyRawPtr(mt) => mt.ty,
+ _ => vec_ty
+ };
+ let unit_ty = vec_ty_contents.sequence_element_type(bcx.tcx());
let vec_datum = match_datum(val, vec_ty);
let (base, len) = vec_datum.get_vec_base_and_len(bcx);
let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]);
let slice_len_offset = C_uint(bcx.ccx(), offset_left + offset_right);
let slice_len = Sub(bcx, len, slice_len_offset, DebugLoc::None);
- let slice_ty = ty::mk_slice(bcx.tcx(),
- bcx.tcx().mk_region(ty::ReStatic),
- ty::mt {ty: unit_ty, mutbl: ast::MutImmutable});
+ let slice_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic),
+ bcx.tcx().mk_slice(unit_ty));
let scratch = rvalue_scratch_datum(bcx, slice_ty, "");
Store(bcx, slice_begin,
GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_ADDR]));
left_ty: Ty<'tcx>,
before: usize,
after: usize,
- val: ValueRef)
+ val: MatchInput)
-> ExtractedBlock<'blk, 'tcx> {
let _icx = push_ctxt("match::extract_vec_elems");
let vec_datum = match_datum(val, left_ty);
}
let _icx = push_ctxt("compare_values");
- if ty::type_is_scalar(rhs_t) {
+ if rhs_t.is_scalar() {
let cmp = compare_scalar_types(cx, lhs, rhs, rhs_t, ast::BiEq, debug_loc);
return Result::new(cx, cmp);
}
// NOTE: cast &[u8] and &[u8; N] to &str and abuse the str_eq lang item,
// which calls memcmp().
let pat_len = val_ty(rhs).element_type().array_length();
- let ty_str_slice = ty::mk_str_slice(cx.tcx(),
- cx.tcx().mk_region(ty::ReStatic),
- ast::MutImmutable);
+ let ty_str_slice = cx.tcx().mk_static_str();
let rhs_str = alloc_ty(cx, ty_str_slice, "rhs_str");
Store(cx, GEPi(cx, rhs, &[0, 0]), expr::get_dataptr(cx, rhs_str));
cs: Option<cleanup::ScopeId>)
-> Block<'blk, 'tcx> {
for (&ident, &binding_info) in bindings_map {
- let llval = match binding_info.trmode {
+ let (llval, aliases_other_state) = match binding_info.trmode {
// By value mut binding for a copy type: load from the ptr
// into the matched value and copy to our alloca
- TrByCopy(llbinding) => {
+ TrByCopy(llbinding) |
+ TrByMoveIntoCopy(llbinding) => {
let llval = Load(bcx, binding_info.llmatch);
- let datum = Datum::new(llval, binding_info.ty, Lvalue);
+ let lvalue = match binding_info.trmode {
+ TrByCopy(..) =>
+ Lvalue::new("_match::insert_lllocals"),
+ TrByMoveIntoCopy(..) => {
+ // match_input moves from the input into a
+ // separate stack slot.
+ //
+ // E.g. consider moving the value `D(A)` out
+ // of the tuple `(D(A), D(B))` and into the
+ // local variable `x` via the pattern `(x,_)`,
+ // leaving the remainder of the tuple `(_,
+ // D(B))` still to be dropped in the future.
+ //
+ // Thus, here we must must zero the place that
+ // we are moving *from*, because we do not yet
+ // track drop flags for a fragmented parent
+ // match input expression.
+ //
+ // Longer term we will be able to map the move
+ // into `(x, _)` up to the parent path that
+ // owns the whole tuple, and mark the
+ // corresponding stack-local drop-flag
+ // tracking the first component of the tuple.
+ let hint_kind = HintKind::ZeroAndMaintain;
+ Lvalue::new_with_hint("_match::insert_lllocals (match_input)",
+ bcx, binding_info.id, hint_kind)
+ }
+ _ => unreachable!(),
+ };
+ let datum = Datum::new(llval, binding_info.ty, lvalue);
call_lifetime_start(bcx, llbinding);
bcx = datum.store_to(bcx, llbinding);
if let Some(cs) = cs {
bcx.fcx.schedule_lifetime_end(cs, llbinding);
}
- llbinding
+ (llbinding, false)
},
// By value move bindings: load from the ptr into the matched value
- TrByMove => Load(bcx, binding_info.llmatch),
+ TrByMoveRef => (Load(bcx, binding_info.llmatch), true),
// By ref binding: use the ptr into the matched value
- TrByRef => binding_info.llmatch
+ TrByRef => (binding_info.llmatch, true),
};
- let datum = Datum::new(llval, binding_info.ty, Lvalue);
+
+ // A local that aliases some other state must be zeroed, since
+ // the other state (e.g. some parent data that we matched
+ // into) will still have its subcomponents (such as this
+ // local) destructed at the end of the parent's scope. Longer
+ // term, we will properly map such parents to the set of
+ // unique drop flags for its fragments.
+ let hint_kind = if aliases_other_state {
+ HintKind::ZeroAndMaintain
+ } else {
+ HintKind::DontZeroJustUse
+ };
+ let lvalue = Lvalue::new_with_hint("_match::insert_lllocals (local)",
+ bcx,
+ binding_info.id,
+ hint_kind);
+ let datum = Datum::new(llval, binding_info.ty, lvalue);
if let Some(cs) = cs {
+ let opt_datum = lvalue.dropflag_hint(bcx);
bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch);
- bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty);
+ bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum);
}
debug!("binding {} to {}", binding_info.id, bcx.val_to_string(llval));
guard_expr: &ast::Expr,
data: &ArmData<'p, 'blk, 'tcx>,
m: &[Match<'a, 'p, 'blk, 'tcx>],
- vals: &[ValueRef],
+ vals: &[MatchInput],
chk: &FailureHandler,
has_genuine_default: bool)
-> Block<'blk, 'tcx> {
bcx.to_str(),
guard_expr,
m,
- vals.iter().map(|v| bcx.val_to_string(*v)).collect::<Vec<_>>().connect(", "));
+ vals.iter().map(|v| bcx.val_to_string(v.val)).collect::<Vec<_>>().join(", "));
let _indenter = indenter();
let mut bcx = insert_lllocals(bcx, &data.bindings_map, None);
let val = val.to_llbool(bcx);
for (_, &binding_info) in &data.bindings_map {
- if let TrByCopy(llbinding) = binding_info.trmode {
- call_lifetime_end(bcx, llbinding);
+ if let Some(llbinding) = binding_info.trmode.alloca_if_copy() {
+ call_lifetime_end(bcx, llbinding)
}
}
fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
m: &[Match<'a, 'p, 'blk, 'tcx>],
- vals: &[ValueRef],
+ vals: &[MatchInput],
chk: &FailureHandler,
has_genuine_default: bool) {
debug!("compile_submatch(bcx={}, m={:?}, vals=[{}])",
bcx.to_str(),
m,
- vals.iter().map(|v| bcx.val_to_string(*v)).collect::<Vec<_>>().connect(", "));
+ vals.iter().map(|v| bcx.val_to_string(v.val)).collect::<Vec<_>>().join(", "));
let _indenter = indenter();
let _icx = push_ctxt("match::compile_submatch");
let mut bcx = bcx;
fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
m: &[Match<'a, 'p, 'blk, 'tcx>],
- vals: &[ValueRef],
+ vals: &[MatchInput],
chk: &FailureHandler,
col: usize,
- val: ValueRef,
+ val: MatchInput,
has_genuine_default: bool) {
let fcx = bcx.fcx;
let tcx = bcx.tcx();
.unwrap_or(DUMMY_NODE_ID);
let left_ty = if pat_id == DUMMY_NODE_ID {
- ty::mk_nil(tcx)
+ tcx.mk_nil()
} else {
node_id_type(bcx, pat_id)
};
let mcx = check_match::MatchCheckCtxt {
tcx: bcx.tcx(),
- param_env: ty::empty_parameter_environment(bcx.tcx()),
+ param_env: bcx.tcx().empty_parameter_environment(),
};
let adt_vals = if any_irrefutable_adt_pat(bcx.tcx(), m, col) {
let repr = adt::represent_type(bcx.ccx(), left_ty);
let arg_count = adt::num_args(&*repr, 0);
let (arg_count, struct_val) = if type_is_sized(bcx.tcx(), left_ty) {
- (arg_count, val)
+ (arg_count, val.val)
} else {
// For an unsized ADT (i.e. DST struct), we need to treat
// the last field specially: instead of simply passing a
// ValueRef pointing to that field, as with all the others,
// we skip it and instead construct a 'fat ptr' below.
- (arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val)))
+ (arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val.val)))
};
let mut field_vals: Vec<ValueRef> = (0..arg_count).map(|ix|
adt::trans_field_ptr(bcx, &*repr, struct_val, 0, ix)
// The last field is technically unsized but
// since we can only ever match that field behind
// a reference we construct a fat ptr here.
- let fields = ty::lookup_struct_fields(bcx.tcx(), def_id);
+ let fields = bcx.tcx().lookup_struct_fields(def_id);
let unsized_ty = fields.iter().last().map(|field| {
- let fty = ty::lookup_field_type(bcx.tcx(), def_id, field.id, substs);
+ let fty = bcx.tcx().lookup_field_type(def_id, field.id, substs);
monomorphize::normalize_associated_type(bcx.tcx(), &fty)
}).unwrap();
let llty = type_of::type_of(bcx.ccx(), unsized_ty);
let scratch = alloca_no_lifetime(bcx, llty, "__struct_field_fat_ptr");
let data = adt::trans_field_ptr(bcx, &*repr, struct_val, 0, arg_count);
- let len = Load(bcx, expr::get_len(bcx, val));
+ let len = Load(bcx, expr::get_len(bcx, val.val));
Store(bcx, data, expr::get_dataptr(bcx, scratch));
Store(bcx, len, expr::get_len(bcx, scratch));
field_vals.push(scratch);
}
Some(field_vals)
} else if any_uniq_pat(m, col) || any_region_pat(m, col) {
- Some(vec!(Load(bcx, val)))
+ Some(vec!(Load(bcx, val.val)))
} else {
match left_ty.sty {
ty::TyArray(_, n) => {
&check_match::Single, col,
field_vals.len())
);
- let mut vals = field_vals;
+ let mut vals: Vec<_> = field_vals.into_iter()
+ .map(|v|MatchInput::from_val(v))
+ .collect();
vals.push_all(&vals_left);
compile_submatch(bcx, &pats, &vals, chk, has_genuine_default);
return;
let opts = get_branches(bcx, m, col);
debug!("options={:?}", opts);
let mut kind = NoBranch;
- let mut test_val = val;
+ let mut test_val = val.val;
debug!("test_val={}", bcx.val_to_string(test_val));
if !opts.is_empty() {
match opts[0] {
ConstantValue(..) | ConstantRange(..) => {
- test_val = load_if_immediate(bcx, val, left_ty);
- kind = if ty::type_is_integral(left_ty) {
+ test_val = load_if_immediate(bcx, val.val, left_ty);
+ kind = if left_ty.is_integral() {
Switch
} else {
Compare
};
}
Variant(_, ref repr, _, _) => {
- let (the_kind, val_opt) = adt::trans_switch(bcx, &**repr, val);
+ let (the_kind, val_opt) = adt::trans_switch(bcx, &**repr, val.val);
kind = the_kind;
if let Some(tval) = val_opt { test_val = tval; }
}
SliceLengthEqual(..) | SliceLengthGreaterOrEqual(..) => {
- let (_, len) = tvec::get_base_and_len(bcx, val, left_ty);
+ let (_, len) = tvec::get_base_and_len(bcx, val.val, left_ty);
test_val = len;
kind = Switch;
}
ConstantValue(..) | ConstantRange(..) => ()
}
let opt_ms = enter_opt(opt_cx, pat_id, dm, m, opt, col, size, val);
- let mut opt_vals = unpacked;
+ let mut opt_vals: Vec<_> = unpacked.into_iter()
+ .map(|v|MatchInput::from_val(v))
+ .collect();
opt_vals.push_all(&vals_left[..]);
compile_submatch(opt_cx,
&opt_ms[..],
reassigned: false
};
{
- let mut visitor = euv::ExprUseVisitor::new(&mut rc, bcx);
+ let infcx = infer::normalizing_infer_ctxt(bcx.tcx(), &bcx.tcx().tables);
+ let mut visitor = euv::ExprUseVisitor::new(&mut rc, &infcx);
visitor.walk_expr(body);
}
rc.reassigned
match base_cmt.cat {
mc::cat_upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) |
mc::cat_local(vid) => {
- self.reassigned |= self.node == vid && Some(field) == self.field
+ self.reassigned |= self.node == vid &&
+ (self.field.is_none() || Some(field) == self.field)
},
_ => {}
}
let variable_ty = node_id_type(bcx, p_id);
let llvariable_ty = type_of::type_of(ccx, variable_ty);
let tcx = bcx.tcx();
- let param_env = ty::empty_parameter_environment(tcx);
+ let param_env = tcx.empty_parameter_environment();
let llmatch;
let trmode;
+ let moves_by_default = variable_ty.moves_by_default(¶m_env, span);
match bm {
- ast::BindByValue(_)
- if !ty::type_moves_by_default(¶m_env, span, variable_ty) || reassigned =>
+ ast::BindByValue(_) if !moves_by_default || reassigned =>
{
llmatch = alloca_no_lifetime(bcx,
- llvariable_ty.ptr_to(),
- "__llmatch");
- trmode = TrByCopy(alloca_no_lifetime(bcx,
- llvariable_ty,
- &bcx.name(name)));
+ llvariable_ty.ptr_to(),
+ "__llmatch");
+ let llcopy = alloca_no_lifetime(bcx,
+ llvariable_ty,
+ &bcx.name(name));
+ trmode = if moves_by_default {
+ TrByMoveIntoCopy(llcopy)
+ } else {
+ TrByCopy(llcopy)
+ };
}
ast::BindByValue(_) => {
// in this case, the final type of the variable will be T,
// but during matching we need to store a *T as explained
// above
llmatch = alloca_no_lifetime(bcx,
- llvariable_ty.ptr_to(),
- &bcx.name(name));
- trmode = TrByMove;
+ llvariable_ty.ptr_to(),
+ &bcx.name(name));
+ trmode = TrByMoveRef;
}
ast::BindByRef(_) => {
llmatch = alloca_no_lifetime(bcx,
}
let t = node_id_type(bcx, discr_expr.id);
- let chk = if ty::type_is_empty(tcx, t) {
+ let chk = if t.is_empty(tcx) {
Unreachable
} else {
Infallible
&& arm.pats.last().unwrap().node == ast::PatWild(ast::PatWildSingle)
});
- compile_submatch(bcx, &matches[..], &[discr_datum.val], &chk, has_default);
+ compile_submatch(bcx, &matches[..], &[discr_datum.match_input()], &chk, has_default);
let mut arm_cxs = Vec::new();
for arm_data in &arm_datas {
let scope = cleanup::var_scope(tcx, p_id);
bcx = mk_binding_alloca(
bcx, p_id, path1.node.name, scope, (),
- |(), bcx, llval, ty| { drop_done_fill_mem(bcx, llval, ty); bcx });
+ "_match::store_local::create_dummy_locals",
+ |(), bcx, Datum { val: llval, ty, kind }| {
+ // Dummy-locals start out uninitialized, so set their
+ // drop-flag hints (if any) to "moved."
+ if let Some(hint) = kind.dropflag_hint(bcx) {
+ let moved_hint = adt::DTOR_MOVED_HINT as usize;
+ debug!("store moved_hint={} for hint={:?}, uninitialized dummy",
+ moved_hint, hint);
+ Store(bcx, C_u8(bcx.fcx.ccx, moved_hint), hint.to_value().value());
+ }
+
+ if kind.drop_flag_info.must_zero() {
+ // if no drop-flag hint, or the hint requires
+ // we maintain the embedded drop-flag, then
+ // mark embedded drop-flag(s) as moved
+ // (i.e. "already dropped").
+ drop_done_fill_mem(bcx, llval, ty);
+ }
+ bcx
+ });
});
bcx
}
let var_scope = cleanup::var_scope(tcx, local.id);
return mk_binding_alloca(
bcx, pat.id, ident.name, var_scope, (),
- |(), bcx, v, _| expr::trans_into(bcx, &**init_expr,
- expr::SaveIn(v)));
+ "_match::store_local",
+ |(), bcx, Datum { val: v, .. }| expr::trans_into(bcx, &**init_expr,
+ expr::SaveIn(v)));
}
None => {}
add_comment(bcx, "creating zeroable ref llval");
}
let var_scope = cleanup::var_scope(tcx, local.id);
- bind_irrefutable_pat(bcx, pat, init_datum.val, var_scope)
+ bind_irrefutable_pat(bcx, pat, init_datum.match_input(), var_scope)
}
None => {
create_dummy_locals(bcx, pat)
}
}
-/// Generates code for argument patterns like `fn foo(<pat>: T)`.
-/// Creates entries in the `lllocals` map for each of the bindings
-/// in `pat`.
-///
-/// # Arguments
-///
-/// - `pat` is the argument pattern
-/// - `llval` is a pointer to the argument value (in other words,
-/// if the argument type is `T`, then `llval` is a `T*`). In some
-/// cases, this code may zero out the memory `llval` points at.
-pub fn store_arg<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- pat: &ast::Pat,
- arg: Datum<'tcx, Rvalue>,
- arg_scope: cleanup::ScopeId)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("match::store_arg");
-
- match simple_identifier(&*pat) {
- Some(ident) => {
- // Generate nicer LLVM for the common case of fn a pattern
- // like `x: T`
- let arg_ty = node_id_type(bcx, pat.id);
- if type_of::arg_is_indirect(bcx.ccx(), arg_ty)
- && bcx.sess().opts.debuginfo != FullDebugInfo {
- // Don't copy an indirect argument to an alloca, the caller
- // already put it in a temporary alloca and gave it up, unless
- // we emit extra-debug-info, which requires local allocas :(.
- let arg_val = arg.add_clean(bcx.fcx, arg_scope);
- bcx.fcx.lllocals.borrow_mut()
- .insert(pat.id, Datum::new(arg_val, arg_ty, Lvalue));
- bcx
- } else {
- mk_binding_alloca(
- bcx, pat.id, ident.name, arg_scope, arg,
- |arg, bcx, llval, _| arg.store_to(bcx, llval))
- }
- }
-
- None => {
- // General path. Copy out the values that are used in the
- // pattern.
- let arg = unpack_datum!(
- bcx, arg.to_lvalue_datum_in_scope(bcx, "__arg", arg_scope));
- bind_irrefutable_pat(bcx, pat, arg.val, arg_scope)
- }
- }
-}
-
fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>,
p_id: ast::NodeId,
name: ast::Name,
cleanup_scope: cleanup::ScopeId,
arg: A,
+ caller_name: &'static str,
populate: F)
-> Block<'blk, 'tcx> where
- F: FnOnce(A, Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
+ F: FnOnce(A, Block<'blk, 'tcx>, Datum<'tcx, Lvalue>) -> Block<'blk, 'tcx>,
{
let var_ty = node_id_type(bcx, p_id);
// Allocate memory on stack for the binding.
let llval = alloc_ty(bcx, var_ty, &bcx.name(name));
+ let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse);
+ let datum = Datum::new(llval, var_ty, lvalue);
// Subtle: be sure that we *populate* the memory *before*
// we schedule the cleanup.
- let bcx = populate(arg, bcx, llval, var_ty);
+ let bcx = populate(arg, bcx, datum);
bcx.fcx.schedule_lifetime_end(cleanup_scope, llval);
- bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty);
+ bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty, lvalue.dropflag_hint(bcx));
// Now that memory is initialized and has cleanup scheduled,
- // create the datum and insert into the local variable map.
- let datum = Datum::new(llval, var_ty, Lvalue);
+ // insert datum into the local variable map.
bcx.fcx.lllocals.borrow_mut().insert(p_id, datum);
bcx
}
/// - bcx: starting basic block context
/// - pat: the irrefutable pattern being matched.
/// - val: the value being matched -- must be an lvalue (by ref, with cleanup)
-fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
pat: &ast::Pat,
- val: ValueRef,
+ val: MatchInput,
cleanup_scope: cleanup::ScopeId)
-> Block<'blk, 'tcx> {
debug!("bind_irrefutable_pat(bcx={}, pat={:?})",
// map.
bcx = mk_binding_alloca(
bcx, pat.id, path1.node.name, cleanup_scope, (),
- |(), bcx, llval, ty| {
+ "_match::bind_irrefutable_pat",
+ |(), bcx, Datum { val: llval, ty, kind: _ }| {
match pat_binding_mode {
ast::BindByValue(_) => {
// By value binding: move the value that `val`
// points at into the binding's stack slot.
- let d = Datum::new(val, ty, Lvalue);
+ let d = val.to_datum(ty);
d.store_to(bcx, llval)
}
// By ref binding: the value of the variable
// is the pointer `val` itself or fat pointer referenced by `val`
if type_is_fat_ptr(bcx.tcx(), ty) {
- expr::copy_fat_ptr(bcx, val, llval);
+ expr::copy_fat_ptr(bcx, val.val, llval);
}
else {
- Store(bcx, val, llval);
+ Store(bcx, val.val, llval);
}
bcx
match opt_def {
Some(def::DefVariant(enum_id, var_id, _)) => {
let repr = adt::represent_node(bcx, pat.id);
- let vinfo = ty::enum_variant_with_id(ccx.tcx(),
- enum_id,
- var_id);
+ let vinfo = ccx.tcx().enum_variant_with_id(enum_id, var_id);
let args = extract_variant_args(bcx,
&*repr,
vinfo.disr_val,
val);
if let Some(ref sub_pat) = *sub_pats {
for (i, &argval) in args.vals.iter().enumerate() {
- bcx = bind_irrefutable_pat(bcx, &*sub_pat[i],
- argval, cleanup_scope);
+ bcx = bind_irrefutable_pat(
+ bcx,
+ &*sub_pat[i],
+ MatchInput::from_val(argval),
+ cleanup_scope);
}
}
}
let repr = adt::represent_node(bcx, pat.id);
for (i, elem) in elems.iter().enumerate() {
let fldptr = adt::trans_field_ptr(bcx, &*repr,
- val, 0, i);
- bcx = bind_irrefutable_pat(bcx, &**elem,
- fldptr, cleanup_scope);
+ val.val, 0, i);
+ bcx = bind_irrefutable_pat(
+ bcx,
+ &**elem,
+ MatchInput::from_val(fldptr),
+ cleanup_scope);
}
}
}
let pat_repr = adt::represent_type(bcx.ccx(), pat_ty);
expr::with_field_tys(tcx, pat_ty, Some(pat.id), |discr, field_tys| {
for f in fields {
- let ix = ty::field_idx_strict(tcx, f.node.ident.name, field_tys);
- let fldptr = adt::trans_field_ptr(bcx, &*pat_repr, val,
- discr, ix);
- bcx = bind_irrefutable_pat(bcx, &*f.node.pat, fldptr, cleanup_scope);
+ let ix = tcx.field_idx_strict(f.node.ident.name, field_tys);
+ let fldptr = adt::trans_field_ptr(
+ bcx,
+ &*pat_repr,
+ val.val,
+ discr,
+ ix);
+ bcx = bind_irrefutable_pat(bcx,
+ &*f.node.pat,
+ MatchInput::from_val(fldptr),
+ cleanup_scope);
}
})
}
ast::PatTup(ref elems) => {
let repr = adt::represent_node(bcx, pat.id);
for (i, elem) in elems.iter().enumerate() {
- let fldptr = adt::trans_field_ptr(bcx, &*repr, val, 0, i);
- bcx = bind_irrefutable_pat(bcx, &**elem, fldptr, cleanup_scope);
+ let fldptr = adt::trans_field_ptr(bcx, &*repr, val.val, 0, i);
+ bcx = bind_irrefutable_pat(
+ bcx,
+ &**elem,
+ MatchInput::from_val(fldptr),
+ cleanup_scope);
}
}
ast::PatBox(ref inner) => {
- let llbox = Load(bcx, val);
- bcx = bind_irrefutable_pat(bcx, &**inner, llbox, cleanup_scope);
+ let llbox = Load(bcx, val.val);
+ bcx = bind_irrefutable_pat(
+ bcx, &**inner, MatchInput::from_val(llbox), cleanup_scope);
}
ast::PatRegion(ref inner, _) => {
- let loaded_val = Load(bcx, val);
- bcx = bind_irrefutable_pat(bcx, &**inner, loaded_val, cleanup_scope);
+ let loaded_val = Load(bcx, val.val);
+ bcx = bind_irrefutable_pat(
+ bcx,
+ &**inner,
+ MatchInput::from_val(loaded_val),
+ cleanup_scope);
}
ast::PatVec(ref before, ref slice, ref after) => {
let pat_ty = node_id_type(bcx, pat.id);
.chain(slice.iter())
.chain(after.iter())
.zip(extracted.vals)
- .fold(bcx, |bcx, (inner, elem)|
- bind_irrefutable_pat(bcx, &**inner, elem, cleanup_scope)
- );
+ .fold(bcx, |bcx, (inner, elem)| {
+ bind_irrefutable_pat(
+ bcx,
+ &**inner,
+ MatchInput::from_val(elem),
+ cleanup_scope)
+ });
}
ast::PatMac(..) => {
bcx.sess().span_bug(pat.span, "unexpanded macro");
//! used unboxed and any field can have pointers (including mutable)
//! taken to it, implementing them for Rust seems difficult.
-#![allow(unsigned_negation)]
-
pub use self::Repr::*;
use std::rc::Rc;
use llvm::{ValueRef, True, IntEQ, IntNE};
use back::abi::FAT_PTR_ADDR;
use middle::subst;
-use middle::ty::{self, Ty, ClosureTyper};
+use middle::ty::{self, Ty};
use middle::ty::Disr;
use syntax::ast;
use syntax::attr;
(repeat_u8_as_u32!($name) as u64)) }
}
+/// `DTOR_NEEDED_HINT` is a stack-local hint that just means
+/// "we do not know whether the destructor has run or not; check the
+/// drop-flag embedded in the value itself."
+pub const DTOR_NEEDED_HINT: u8 = 0x3d;
+
+/// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has
+/// definitely been moved; you do not need to run its destructor."
+///
+/// (However, for now, such values may still end up being explicitly
+/// zeroed by the generated code; this is the distinction between
+/// `datum::DropFlagInfo::ZeroAndMaintain` versus
+/// `datum::DropFlagInfo::DontZeroJustUse`.)
+pub const DTOR_MOVED_HINT: u8 = 0x2d;
+
pub const DTOR_NEEDED: u8 = 0xd4;
pub const DTOR_NEEDED_U32: u32 = repeat_u8_as_u32!(DTOR_NEEDED);
pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64!(DTOR_NEEDED);
Univariant(mk_struct(cx, &elems[..], false, t), 0)
}
ty::TyStruct(def_id, substs) => {
- let fields = ty::lookup_struct_fields(cx.tcx(), def_id);
+ let fields = cx.tcx().lookup_struct_fields(def_id);
let mut ftys = fields.iter().map(|field| {
- let fty = ty::lookup_field_type(cx.tcx(), def_id, field.id, substs);
+ let fty = cx.tcx().lookup_field_type(def_id, field.id, substs);
monomorphize::normalize_associated_type(cx.tcx(), &fty)
}).collect::<Vec<_>>();
- let packed = ty::lookup_packed(cx.tcx(), def_id);
- let dtor = ty::ty_dtor(cx.tcx(), def_id).has_drop_flag();
+ let packed = cx.tcx().lookup_packed(def_id);
+ let dtor = cx.tcx().ty_dtor(def_id).has_drop_flag();
if dtor {
ftys.push(cx.tcx().dtor_type());
}
Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor))
}
- ty::TyClosure(def_id, substs) => {
- let typer = NormalizingClosureTyper::new(cx.tcx());
- let upvars = typer.closure_upvars(def_id, substs).unwrap();
- let upvar_types = upvars.iter().map(|u| u.ty).collect::<Vec<_>>();
- Univariant(mk_struct(cx, &upvar_types[..], false, t), 0)
+ ty::TyClosure(_, ref substs) => {
+ Univariant(mk_struct(cx, &substs.upvar_tys, false, t), 0)
}
ty::TyEnum(def_id, substs) => {
let cases = get_cases(cx.tcx(), def_id, substs);
- let hint = *ty::lookup_repr_hints(cx.tcx(), def_id).get(0)
+ let hint = *cx.tcx().lookup_repr_hints(def_id).get(0)
.unwrap_or(&attr::ReprAny);
- let dtor = ty::ty_dtor(cx.tcx(), def_id).has_drop_flag();
+ let dtor = cx.tcx().ty_dtor(def_id).has_drop_flag();
if cases.is_empty() {
// Uninhabitable; represent as unit
// been rejected by a checker before this point.
if !cases.iter().enumerate().all(|(i,c)| c.discr == (i as Disr)) {
cx.sess().bug(&format!("non-C-like enum {} with specified \
- discriminants",
- ty::item_path_str(cx.tcx(),
- def_id)));
+ discriminants",
+ cx.tcx().item_path_str(def_id)));
}
if cases.len() == 1 {
mut path: DiscrField) -> Option<DiscrField> {
match ty.sty {
// Fat &T/&mut T/Box<T> i.e. T is [T], str, or Trait
- ty::TyRef(_, ty::mt { ty, .. }) | ty::TyBox(ty) if !type_is_sized(tcx, ty) => {
+ ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) if !type_is_sized(tcx, ty) => {
path.push(FAT_PTR_ADDR);
Some(path)
},
// Is this the NonZero lang item wrapping a pointer or integer type?
ty::TyStruct(did, substs) if Some(did) == tcx.lang_items.non_zero() => {
- let nonzero_fields = ty::lookup_struct_fields(tcx, did);
+ let nonzero_fields = tcx.lookup_struct_fields(did);
assert_eq!(nonzero_fields.len(), 1);
- let nonzero_field = ty::lookup_field_type(tcx, did, nonzero_fields[0].id, substs);
+ let nonzero_field = tcx.lookup_field_type(did, nonzero_fields[0].id, substs);
match nonzero_field.sty {
- ty::TyRawPtr(ty::mt { ty, .. }) if !type_is_sized(tcx, ty) => {
+ ty::TyRawPtr(ty::TypeAndMut { ty, .. }) if !type_is_sized(tcx, ty) => {
path.push_all(&[0, FAT_PTR_ADDR]);
Some(path)
},
// Perhaps one of the fields of this struct is non-zero
// let's recurse and find out
ty::TyStruct(def_id, substs) => {
- let fields = ty::lookup_struct_fields(tcx, def_id);
+ let fields = tcx.lookup_struct_fields(def_id);
for (j, field) in fields.iter().enumerate() {
- let field_ty = ty::lookup_field_type(tcx, def_id, field.id, substs);
+ let field_ty = tcx.lookup_field_type(def_id, field.id, substs);
if let Some(mut fpath) = find_discr_field_candidate(tcx, field_ty, path.clone()) {
fpath.push(j);
return Some(fpath);
// Perhaps one of the upvars of this struct is non-zero
// Let's recurse and find out!
- ty::TyClosure(def_id, substs) => {
- let typer = NormalizingClosureTyper::new(tcx);
- let upvars = typer.closure_upvars(def_id, substs).unwrap();
- let upvar_types = upvars.iter().map(|u| u.ty).collect::<Vec<_>>();
-
- for (j, &ty) in upvar_types.iter().enumerate() {
+ ty::TyClosure(_, ref substs) => {
+ for (j, &ty) in substs.upvar_tys.iter().enumerate() {
if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) {
fpath.push(j);
return Some(fpath);
def_id: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Vec<Case<'tcx>> {
- ty::enum_variants(tcx, def_id).iter().map(|vi| {
+ tcx.enum_variants(def_id).iter().map(|vi| {
let arg_tys = vi.args.iter().map(|&raw_ty| {
monomorphize::apply_param_substs(tcx, substs, &raw_ty)
}).collect();
pub fn ty_of_inttype<'tcx>(tcx: &ty::ctxt<'tcx>, ity: IntType) -> Ty<'tcx> {
match ity {
- attr::SignedInt(t) => ty::mk_mach_int(tcx, t),
- attr::UnsignedInt(t) => ty::mk_mach_uint(tcx, t)
+ attr::SignedInt(t) => tcx.mk_mach_int(t),
+ attr::UnsignedInt(t) => tcx.mk_mach_uint(t)
}
}
-> datum::DatumBlock<'blk, 'tcx, datum::Expr>
{
let tcx = bcx.tcx();
- let ptr_ty = ty::mk_imm_ptr(bcx.tcx(), tcx.dtor_type());
+ let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type());
match *r {
Univariant(ref st, dtor) if dtor_active(dtor) => {
let flag_ptr = GEPi(bcx, val, &[0, st.fields.len() - 1]);
));
bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
let ptr = struct_field_ptr(variant_cx, st, value, (st.fields.len() - 1), false);
- datum::Datum::new(ptr, ptr_ty, datum::Lvalue)
+ datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr"))
.store_to(variant_cx, scratch.val)
});
let expr_datum = scratch.to_expr_datum();
.chain(arch_clobbers.iter()
.map(|s| s.to_string()))
.collect::<Vec<String>>()
- .connect(",");
+ .join(",");
debug!("Asm Constraints: {}", &all_constraints[..]);
use libc::{c_uint, c_ulonglong};
use llvm::{self, ValueRef, AttrHelper};
-use middle::ty::{self, ClosureTyper};
+use middle::ty;
+use middle::infer;
use session::config::NoDebugInfo;
use syntax::abi;
use syntax::ast;
let function_type;
let (fn_sig, abi, env_ty) = match fn_type.sty {
ty::TyBareFn(_, ref f) => (&f.sig, f.abi, None),
- ty::TyClosure(closure_did, substs) => {
- let typer = common::NormalizingClosureTyper::new(ccx.tcx());
- function_type = typer.closure_type(closure_did, substs);
+ ty::TyClosure(closure_did, ref substs) => {
+ let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables);
+ function_type = infcx.closure_type(closure_did, substs);
let self_type = base::self_type_for_closure(ccx, closure_did, fn_type);
(&function_type.sig, abi::RustCall, Some(self_type))
}
_ => ccx.sess().bug("expected closure or function.")
};
- let fn_sig = ty::erase_late_bound_regions(ccx.tcx(), fn_sig);
+ let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig);
let mut attrs = llvm::AttrBuilder::new();
let ret_ty = fn_sig.output;
// We can also mark the return value as `dereferenceable` in certain cases
match ret_ty.sty {
// These are not really pointers but pairs, (pointer, len)
- ty::TyRef(_, ty::mt { ty: inner, .. })
+ ty::TyRef(_, ty::TypeAndMut { ty: inner, .. })
| ty::TyBox(inner) if common::type_is_sized(ccx.tcx(), inner) => {
let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
attrs.ret(llvm::DereferenceableAttribute(llret_sz));
attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
} else {
attrs.arg(idx, llvm::NonNullAttribute);
- if ty::type_is_trait(inner) {
+ if inner.is_trait() {
attrs.arg(idx + 1, llvm::NonNullAttribute);
}
}
// `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
// both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
// on memory dependencies rather than pointer equality
- let interior_unsafe = ty::type_contents(ccx.tcx(), mt.ty).interior_unsafe();
+ let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe();
if mt.mutbl == ast::MutMutable || !interior_unsafe {
attrs.arg(idx, llvm::Attribute::NoAlias);
attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
} else {
attrs.arg(idx, llvm::NonNullAttribute);
- if ty::type_is_trait(mt.ty) {
+ if mt.ty.is_trait() {
attrs.arg(idx + 1, llvm::NonNullAttribute);
}
}
use middle::cfg;
use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
use middle::weak_lang_items;
+use middle::pat_util::simple_identifier;
use middle::subst::Substs;
-use middle::ty::{self, Ty, ClosureTyper, type_is_simd, simd_size};
+use middle::ty::{self, Ty, HasTypeFlags};
use rustc::ast_map;
-use session::config::{self, NoDebugInfo};
+use session::config::{self, NoDebugInfo, FullDebugInfo};
use session::Session;
use trans::_match;
use trans::adt;
use trans::build::*;
use trans::builder::{Builder, noname};
use trans::callee;
-use trans::cleanup::CleanupMethods;
-use trans::cleanup;
+use trans::cleanup::{self, CleanupMethods, DropHint};
use trans::closure;
use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_integral};
use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
-use trans::common::{CrateContext, FunctionContext};
+use trans::common::{CrateContext, DropFlagHintsMap, FunctionContext};
use trans::common::{Result, NodeIdAndSpan};
use trans::common::{node_id_type, return_type_is_void};
use trans::common::{type_is_immediate, type_is_zero_size, val_ty};
use libc::c_uint;
use std::ffi::{CStr, CString};
use std::cell::{Cell, RefCell};
-use std::collections::HashSet;
+use std::collections::{HashMap, HashSet};
use std::mem;
use std::str;
use std::{i8, i16, i32, i64};
let closure_kind = ccx.tcx().closure_kind(closure_id);
match closure_kind {
ty::FnClosureKind => {
- ty::mk_imm_rptr(ccx.tcx(), ccx.tcx().mk_region(ty::ReStatic), fn_ty)
+ ccx.tcx().mk_imm_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty)
}
ty::FnMutClosureKind => {
- ty::mk_mut_rptr(ccx.tcx(), ccx.tcx().mk_region(ty::ReStatic), fn_ty)
+ ccx.tcx().mk_mut_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty)
}
ty::FnOnceClosureKind => fn_ty
}
}
pub fn kind_for_closure(ccx: &CrateContext, closure_id: ast::DefId) -> ty::ClosureKind {
- *ccx.tcx().closure_kinds.borrow().get(&closure_id).unwrap()
+ *ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap()
}
pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, did: ast::DefId,
// don't do this then linker errors can be generated where the linker
// complains that one object files has a thread local version of the
// symbol and another one doesn't.
- for attr in ty::get_attrs(ccx.tcx(), did).iter() {
+ for attr in ccx.tcx().get_attrs(did).iter() {
if attr.check_name("thread_local") {
llvm::set_thread_local(c, true);
}
}
})
}
- ty::TyClosure(def_id, substs) => {
+ ty::TyClosure(_, ref substs) => {
let repr = adt::represent_type(cx.ccx(), t);
- let typer = common::NormalizingClosureTyper::new(cx.tcx());
- let upvars = typer.closure_upvars(def_id, substs).unwrap();
- for (i, upvar) in upvars.iter().enumerate() {
+ for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
let llupvar = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
- cx = f(cx, llupvar, upvar.ty);
+ cx = f(cx, llupvar, upvar_ty);
}
}
ty::TyArray(_, n) => {
let (base, len) = tvec::get_fixed_base_and_len(cx, data_ptr, n);
- let unit_ty = ty::sequence_element_type(cx.tcx(), t);
+ let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
}
ty::TySlice(_) | ty::TyStr => {
- let unit_ty = ty::sequence_element_type(cx.tcx(), t);
+ let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::iter_vec_raw(cx, data_ptr, unit_ty, info.unwrap(), f);
}
ty::TyTuple(ref args) => {
let ccx = fcx.ccx;
let repr = adt::represent_type(ccx, t);
- let variants = ty::enum_variants(ccx.tcx(), tid);
+ let variants = ccx.tcx().enum_variants(tid);
let n_variants = (*variants).len();
// NB: we must hit the discriminant first so that structural
let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false);
(ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false)
}
- ty::TyStruct(_, _) if type_is_simd(cx.tcx(), rhs_t) => {
+ ty::TyStruct(_, _) if rhs_t.is_simd(cx.tcx()) => {
let mut res = C_bool(cx.ccx(), false);
- for i in 0 .. simd_size(cx.tcx(), rhs_t) {
+ for i in 0 .. rhs_t.simd_size(cx.tcx()) {
res = Or(cx, res,
IsNull(cx,
ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))), debug_loc);
}
}
- let val = if ty::type_is_bool(t) {
+ let val = if t.is_bool() {
LoadRangeAssert(cx, ptr, 0, 2, llvm::False)
- } else if ty::type_is_char(t) {
+ } else if t.is_char() {
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
- } else if (ty::type_is_region_ptr(t) || ty::type_is_unique(t))
+ } else if (t.is_region_ptr() || t.is_unique())
&& !common::type_is_fat_ptr(cx.tcx(), t) {
LoadNonNull(cx, ptr)
} else {
return;
}
- let store = Store(cx, from_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t));
- unsafe {
- llvm::LLVMSetAlignment(store, type_of::align_of(cx.ccx(), t));
+ if common::type_is_fat_ptr(cx.tcx(), t) {
+ Store(cx, ExtractValue(cx, v, abi::FAT_PTR_ADDR), expr::get_dataptr(cx, dst));
+ Store(cx, ExtractValue(cx, v, abi::FAT_PTR_EXTRA), expr::get_len(cx, dst));
+ } else {
+ let store = Store(cx, from_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t));
+ unsafe {
+ llvm::LLVMSetAlignment(store, type_of::align_of(cx.ccx(), t));
+ }
}
}
pub fn from_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
- if ty::type_is_bool(ty) {
+ if ty.is_bool() {
ZExt(bcx, val, Type::i8(bcx.ccx()))
} else {
val
}
pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
- if ty::type_is_bool(ty) {
+ if ty.is_bool() {
Trunc(bcx, val, Type::i1(bcx.ccx()))
} else {
val
t: Ty<'tcx>) {
let _icx = push_ctxt("memcpy_ty");
let ccx = bcx.ccx();
- if ty::type_is_structural(t) {
+ if t.is_structural() {
let llty = type_of::type_of(ccx, t);
let llsz = llsize_of(ccx, llty);
let llalign = type_of::align_of(ccx, t);
let _icx = push_ctxt("alloc_ty");
let ccx = bcx.ccx();
let ty = type_of::type_of(ccx, t);
- assert!(!ty::type_has_params(t));
+ assert!(!t.has_param_types());
let val = alloca(bcx, ty, name);
return val;
}
Alloca(cx, ty, name)
}
+pub fn set_value_name(val: ValueRef, name: &str) {
+ unsafe {
+ let name = CString::new(name).unwrap();
+ llvm::LLVMSetValueName(val, name.as_ptr());
+ }
+}
+
// Creates the alloca slot which holds the pointer to the slot for the final return value
pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
output_type: Ty<'tcx>) -> ValueRef {
llfn: llfndecl,
llenv: None,
llretslotptr: Cell::new(None),
- param_env: ty::empty_parameter_environment(ccx.tcx()),
+ param_env: ccx.tcx().empty_parameter_environment(),
alloca_insert_pt: Cell::new(None),
llreturn: Cell::new(None),
needs_ret_allocas: nested_returns,
caller_expects_out_pointer: uses_outptr,
lllocals: RefCell::new(NodeMap()),
llupvars: RefCell::new(NodeMap()),
+ lldropflag_hints: RefCell::new(DropFlagHintsMap::new()),
id: id,
param_substs: param_substs,
span: sp,
}
}
+ // Create the drop-flag hints for every unfragmented path in the function.
+ let tcx = fcx.ccx.tcx();
+ let fn_did = ast::DefId { krate: ast::LOCAL_CRATE, node: fcx.id };
+ let mut hints = fcx.lldropflag_hints.borrow_mut();
+ let fragment_infos = tcx.fragment_infos.borrow();
+
+ // Intern table for drop-flag hint datums.
+ let mut seen = HashMap::new();
+
+ if let Some(fragment_infos) = fragment_infos.get(&fn_did) {
+ for &info in fragment_infos {
+
+ let make_datum = |id| {
+ let init_val = C_u8(fcx.ccx, adt::DTOR_NEEDED_HINT as usize);
+ let llname = &format!("dropflag_hint_{}", id);
+ debug!("adding hint {}", llname);
+ let ptr = alloc_ty(entry_bcx, tcx.types.u8, llname);
+ Store(entry_bcx, init_val, ptr);
+ let ty = tcx.mk_ptr(ty::TypeAndMut { ty: tcx.types.u8, mutbl: ast::MutMutable });
+ let flag = datum::Lvalue::new_dropflag_hint("base::init_function");
+ let datum = datum::Datum::new(ptr, ty, flag);
+ datum
+ };
+
+ let (var, datum) = match info {
+ ty::FragmentInfo::Moved { var, .. } |
+ ty::FragmentInfo::Assigned { var, .. } => {
+ let datum = seen.get(&var).cloned().unwrap_or_else(|| {
+ let datum = make_datum(var);
+ seen.insert(var, datum.clone());
+ datum
+ });
+ (var, datum)
+ }
+ };
+ match info {
+ ty::FragmentInfo::Moved { move_expr: expr_id, .. } => {
+ debug!("FragmentInfo::Moved insert drop hint for {}", expr_id);
+ hints.insert(expr_id, DropHint::new(var, datum));
+ }
+ ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => {
+ debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id);
+ hints.insert(expr_id, DropHint::new(var, datum));
+ }
+ }
+ }
+ }
+
entry_bcx
}
}
}
-// work around bizarre resolve errors
-pub type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
-
-// create_datums_for_fn_args: creates rvalue datums for each of the
-// incoming function arguments. These will later be stored into
-// appropriate lvalue datums.
-pub fn create_datums_for_fn_args<'a, 'tcx>(bcx: Block<'a, 'tcx>,
- arg_tys: &[Ty<'tcx>])
- -> Vec<RvalueDatum<'tcx>> {
+// create_datums_for_fn_args: creates lvalue datums for each of the
+// incoming function arguments.
+pub fn create_datums_for_fn_args<'a, 'tcx>(mut bcx: Block<'a, 'tcx>,
+ args: &[ast::Arg],
+ arg_tys: &[Ty<'tcx>],
+ has_tupled_arg: bool,
+ arg_scope: cleanup::CustomScopeIndex)
+ -> Block<'a, 'tcx> {
let _icx = push_ctxt("create_datums_for_fn_args");
let fcx = bcx.fcx;
+ let arg_scope_id = cleanup::CustomScope(arg_scope);
// Return an array wrapping the ValueRefs that we get from `get_param` for
// each argument into datums.
- let mut i = fcx.arg_offset() as c_uint;
- arg_tys.iter().map(|&arg_ty| {
- if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
- let llty = type_of::type_of(bcx.ccx(), arg_ty);
- let data = get_param(fcx.llfn, i);
- let extra = get_param(fcx.llfn, i + 1);
- let fat_ptr = expr::make_fat_ptr(bcx, llty, data, extra);
- i += 2;
- datum::Datum::new(fat_ptr, arg_ty, datum::Rvalue { mode: datum::ByValue })
- } else {
- let llarg = get_param(fcx.llfn, i);
- i += 1;
- datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty))
- }
- }).collect()
-}
-
-/// Creates rvalue datums for each of the incoming function arguments and
-/// tuples the arguments. These will later be stored into appropriate lvalue
-/// datums.
-///
-/// FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
-fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>(
- mut bcx: Block<'blk, 'tcx>,
- arg_scope: cleanup::CustomScopeIndex,
- arg_tys: &[Ty<'tcx>])
- -> Vec<RvalueDatum<'tcx>> {
- let mut result = Vec::new();
- let mut idx = bcx.fcx.arg_offset() as c_uint;
+ //
+ // For certain mode/type combinations, the raw llarg values are passed
+ // by value. However, within the fn body itself, we want to always
+ // have all locals and arguments be by-ref so that we can cancel the
+ // cleanup and for better interaction with LLVM's debug info. So, if
+ // the argument would be passed by value, we store it into an alloca.
+ // This alloca should be optimized away by LLVM's mem-to-reg pass in
+ // the event it's not truly needed.
+ let mut idx = fcx.arg_offset() as c_uint;
for (i, &arg_ty) in arg_tys.iter().enumerate() {
- if i < arg_tys.len() - 1 {
- // Regular argument.
- result.push(if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
- let llty = type_of::type_of(bcx.ccx(), arg_ty);
- let data = get_param(bcx.fcx.llfn, idx);
- let extra = get_param(bcx.fcx.llfn, idx + 1);
+ let arg_datum = if !has_tupled_arg || i < arg_tys.len() - 1 {
+ if type_of::arg_is_indirect(bcx.ccx(), arg_ty)
+ && bcx.sess().opts.debuginfo != FullDebugInfo {
+ // Don't copy an indirect argument to an alloca, the caller
+ // already put it in a temporary alloca and gave it up, unless
+ // we emit extra-debug-info, which requires local allocas :(.
+ let llarg = get_param(fcx.llfn, idx);
+ idx += 1;
+ bcx.fcx.schedule_lifetime_end(arg_scope_id, llarg);
+ bcx.fcx.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None);
+
+ datum::Datum::new(llarg, arg_ty, datum::Lvalue::new("create_datum_for_fn_args"))
+ } else if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
+ let data = get_param(fcx.llfn, idx);
+ let extra = get_param(fcx.llfn, idx + 1);
idx += 2;
- let fat_ptr = expr::make_fat_ptr(bcx, llty, data, extra);
- datum::Datum::new(fat_ptr, arg_ty, datum::Rvalue { mode: datum::ByValue })
+ unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "",
+ arg_scope_id, (data, extra),
+ |(data, extra), bcx, dst| {
+ Store(bcx, data, expr::get_dataptr(bcx, dst));
+ Store(bcx, extra, expr::get_len(bcx, dst));
+ bcx
+ }))
} else {
- let val = get_param(bcx.fcx.llfn, idx);
+ let llarg = get_param(fcx.llfn, idx);
idx += 1;
- datum::Datum::new(val, arg_ty, arg_kind(bcx.fcx, arg_ty))
- });
-
- continue
- }
-
- // This is the last argument. Tuple it.
- match arg_ty.sty {
- ty::TyTuple(ref tupled_arg_tys) => {
- let tuple_args_scope_id = cleanup::CustomScope(arg_scope);
- let tuple =
+ let tmp = datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty));
+ unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "",
+ arg_scope_id, tmp,
+ |tmp, bcx, dst| tmp.store_to(bcx, dst)))
+ }
+ } else {
+ // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
+ match arg_ty.sty {
+ ty::TyTuple(ref tupled_arg_tys) => {
unpack_datum!(bcx,
datum::lvalue_scratch_datum(bcx,
arg_ty,
"tupled_args",
- tuple_args_scope_id,
+ arg_scope_id,
(),
|(),
mut bcx,
};
}
bcx
- }));
- let tuple = unpack_datum!(bcx,
- tuple.to_expr_datum()
- .to_rvalue_datum(bcx,
- "argtuple"));
- result.push(tuple);
- }
- _ => {
- bcx.tcx().sess.bug("last argument of a function with \
- `rust-call` ABI isn't a tuple?!")
+ }))
+ }
+ _ => {
+ bcx.tcx().sess.bug("last argument of a function with \
+ `rust-call` ABI isn't a tuple?!")
+ }
}
};
- }
-
- result
-}
-
-fn copy_args_to_allocas<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- arg_scope: cleanup::CustomScopeIndex,
- args: &[ast::Arg],
- arg_datums: Vec<RvalueDatum<'tcx>>)
- -> Block<'blk, 'tcx> {
- debug!("copy_args_to_allocas");
-
- let _icx = push_ctxt("copy_args_to_allocas");
- let mut bcx = bcx;
-
- let arg_scope_id = cleanup::CustomScope(arg_scope);
-
- for (i, arg_datum) in arg_datums.into_iter().enumerate() {
- // For certain mode/type combinations, the raw llarg values are passed
- // by value. However, within the fn body itself, we want to always
- // have all locals and arguments be by-ref so that we can cancel the
- // cleanup and for better interaction with LLVM's debug info. So, if
- // the argument would be passed by value, we store it into an alloca.
- // This alloca should be optimized away by LLVM's mem-to-reg pass in
- // the event it's not truly needed.
-
- bcx = _match::store_arg(bcx, &*args[i].pat, arg_datum, arg_scope_id);
+ let pat = &*args[i].pat;
+ bcx = if let Some(ident) = simple_identifier(&*pat) {
+ // Generate nicer LLVM for the common case of fn a pattern
+ // like `x: T`
+ set_value_name(arg_datum.val, &bcx.name(ident.name));
+ bcx.fcx.lllocals.borrow_mut().insert(pat.id, arg_datum);
+ bcx
+ } else {
+ // General path. Copy out the values that are used in the
+ // pattern.
+ _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id)
+ };
debuginfo::create_argument_metadata(bcx, &args[i]);
}
decl.inputs.iter()
.map(|arg| node_id_type(bcx, arg.id))
.collect::<Vec<_>>();
- let monomorphized_arg_types = match closure_env {
- closure::ClosureEnv::NotClosure => {
- monomorphized_arg_types
- }
-
- // Tuple up closure argument types for the "rust-call" ABI.
- closure::ClosureEnv::Closure(_) => {
- vec![ty::mk_tup(ccx.tcx(), monomorphized_arg_types)]
- }
- };
for monomorphized_arg_type in &monomorphized_arg_types {
debug!("trans_closure: monomorphized_arg_type: {:?}",
monomorphized_arg_type);
debug!("trans_closure: function lltype: {}",
bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
- let arg_datums = match closure_env {
- closure::ClosureEnv::NotClosure if abi == RustCall => {
- create_datums_for_fn_args_under_call_abi(bcx, arg_scope, &monomorphized_arg_types[..])
- }
- _ => {
- let arg_tys = untuple_arguments_if_necessary(ccx, &monomorphized_arg_types, abi);
- create_datums_for_fn_args(bcx, &arg_tys)
- }
+ let has_tupled_arg = match closure_env {
+ closure::ClosureEnv::NotClosure => abi == RustCall,
+ _ => false
};
- bcx = copy_args_to_allocas(bcx, arg_scope, &decl.inputs, arg_datums);
+ bcx = create_datums_for_fn_args(bcx, &decl.inputs, &monomorphized_arg_types,
+ has_tupled_arg, arg_scope);
bcx = closure_env.load(bcx, cleanup::CustomScope(arg_scope));
let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
debug!("trans_fn(param_substs={:?})", param_substs);
let _icx = push_ctxt("trans_fn");
- let fn_ty = ty::node_id_to_type(ccx.tcx(), id);
- let output_type = ty::erase_late_bound_regions(ccx.tcx(), &ty::ty_fn_ret(fn_ty));
- let abi = ty::ty_fn_abi(fn_ty);
+ let fn_ty = ccx.tcx().node_id_to_type(id);
+ let output_type = ccx.tcx().erase_late_bound_regions(&fn_ty.fn_ret());
+ let abi = fn_ty.fn_abi();
trans_closure(ccx, decl, body, llfndecl, param_substs, id, attrs, output_type, abi,
closure::ClosureEnv::NotClosure);
}
let result_ty = match ctor_ty.sty {
ty::TyBareFn(_, ref bft) => {
- ty::erase_late_bound_regions(bcx.tcx(), &bft.sig.output()).unwrap()
+ bcx.tcx().erase_late_bound_regions(&bft.sig.output()).unwrap()
}
_ => ccx.sess().bug(
&format!("trans_enum_variant_constructor: \
disr: ty::Disr,
param_substs: &'tcx Substs<'tcx>,
llfndecl: ValueRef) {
- let ctor_ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
+ let ctor_ty = ccx.tcx().node_id_to_type(ctor_id);
let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty);
let result_ty = match ctor_ty.sty {
ty::TyBareFn(_, ref bft) => {
- ty::erase_late_bound_regions(ccx.tcx(), &bft.sig.output())
+ ccx.tcx().erase_late_bound_regions(&bft.sig.output())
}
_ => ccx.sess().bug(
&format!("trans_enum_variant_or_tuple_like_struct: \
assert!(!fcx.needs_ret_allocas);
- let arg_tys =
- ty::erase_late_bound_regions(
- ccx.tcx(), &ty::ty_fn_args(ctor_ty));
-
- let arg_datums = create_datums_for_fn_args(bcx, &arg_tys[..]);
+ let arg_tys = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_args());
if !type_is_zero_size(fcx.ccx, result_ty.unwrap()) {
let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot");
let repr = adt::represent_type(ccx, result_ty.unwrap());
- for (i, arg_datum) in arg_datums.into_iter().enumerate() {
+ let mut llarg_idx = fcx.arg_offset() as c_uint;
+ for (i, arg_ty) in arg_tys.into_iter().enumerate() {
let lldestptr = adt::trans_field_ptr(bcx,
&*repr,
dest,
disr,
i);
- arg_datum.store_to(bcx, lldestptr);
+ if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
+ Store(bcx, get_param(fcx.llfn, llarg_idx), expr::get_dataptr(bcx, lldestptr));
+ Store(bcx, get_param(fcx.llfn, llarg_idx + 1), expr::get_len(bcx, lldestptr));
+ llarg_idx += 2;
+ } else {
+ let arg = get_param(fcx.llfn, llarg_idx);
+ llarg_idx += 1;
+
+ if arg_is_indirect(ccx, arg_ty) {
+ memcpy_ty(bcx, lldestptr, arg, arg_ty);
+ } else {
+ store_ty(bcx, arg, lldestptr, arg_ty);
+ }
+ }
}
adt::trans_set_discr(bcx, &*repr, dest, disr);
}
return
}
- let ty = ty::node_id_to_type(ccx.tcx(), id);
+ let ty = ccx.tcx().node_id_to_type(id);
let avar = adt::represent_type(ccx, ty);
match *avar {
adt::General(_, ref variants, _) => {
}
}
+fn set_global_section(ccx: &CrateContext, llval: ValueRef, i: &ast::Item) {
+ match attr::first_attr_value_str_by_name(&i.attrs,
+ "link_section") {
+ Some(sect) => {
+ if contains_null(§) {
+ ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`",
+ §));
+ }
+ unsafe {
+ let buf = CString::new(sect.as_bytes()).unwrap();
+ llvm::LLVMSetSection(llval, buf.as_ptr());
+ }
+ },
+ None => ()
+ }
+}
+
pub fn trans_item(ccx: &CrateContext, item: &ast::Item) {
let _icx = push_ctxt("trans_item");
} else {
trans_fn(ccx, &**decl, &**body, llfn, empty_substs, item.id, &item.attrs);
}
+ set_global_section(ccx, llfn, item);
update_linkage(ccx, llfn, Some(item.id),
if is_origin { OriginalTranslation } else { InlinedCopy });
// error in trans. This is used to write compile-fail tests
// that actually test that compilation succeeds without
// reporting an error.
- if ty::has_attr(ccx.tcx(), local_def(item.id), "rustc_error") {
+ if ccx.tcx().has_attr(local_def(item.id), "rustc_error") {
ccx.tcx().sess.span_fatal(item.span, "compilation successful");
}
}
let mut v = TransItemVisitor{ ccx: ccx };
v.visit_expr(&**expr);
- let g = consts::trans_static(ccx, m, item.id);
+ let g = consts::trans_static(ccx, m, expr, item.id, &item.attrs);
+ set_global_section(ccx, g, item);
update_linkage(ccx, g, Some(item.id), OriginalTranslation);
},
ast::ItemForeignMod(ref foreign_mod) => {
debug!("register_fn_llvmty id={} sym={}", node_id, sym);
let llfn = declare::define_fn(ccx, &sym[..], cc, llfty,
- ty::FnConverging(ty::mk_nil(ccx.tcx()))).unwrap_or_else(||{
+ ty::FnConverging(ccx.tcx().mk_nil())).unwrap_or_else(||{
ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym));
});
finish_register_fn(ccx, sym, node_id, llfn);
llvm::SetDLLStorageClass(llfn, llvm::DLLExportStorageClass);
}
}
+ if ccx.tcx().lang_items.eh_unwind_resume() == Some(def) {
+ llvm::SetLinkage(llfn, llvm::ExternalLinkage);
+ if ccx.use_dll_storage_attrs() {
+ llvm::SetDLLStorageClass(llfn, llvm::DLLExportStorageClass);
+ }
+ }
}
fn register_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
&ccx.int_type());
let llfn = declare::define_cfn(ccx, "main", llfty,
- ty::mk_nil(ccx.tcx())).unwrap_or_else(||{
+ ccx.tcx().mk_nil()).unwrap_or_else(||{
ccx.sess().span_err(sp, "entry symbol `main` defined multiple times");
// FIXME: We should be smart and show a better diagnostic here.
ccx.sess().help("did you use #[no_mangle] on `fn main`? Use #[start] instead");
debug!("get_item_val: id={} item={:?}", id, item);
let val = match item {
ast_map::NodeItem(i) => {
- let ty = ty::node_id_to_type(ccx.tcx(), i.id);
+ let ty = ccx.tcx().node_id_to_type(i.id);
let sym = || exported_name(ccx, id, ty, &i.attrs);
let v = match i.node {
- ast::ItemStatic(_, _, ref expr) => {
+ ast::ItemStatic(..) => {
// If this static came from an external crate, then
// we need to get the symbol from csearch instead of
// using the current crate's name/version
let sym = sym();
debug!("making {}", sym);
- // We need the translated value here, because for enums the
- // LLVM type is not fully determined by the Rust type.
- let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
- let (v, ty) = consts::const_expr(ccx, &**expr, empty_substs, None);
- ccx.static_values().borrow_mut().insert(id, v);
- unsafe {
- // boolean SSA values are i1, but they have to be stored in i8 slots,
- // otherwise some LLVM optimization passes don't work as expected
- let llty = if ty::type_is_bool(ty) {
- llvm::LLVMInt8TypeInContext(ccx.llcx())
- } else {
- llvm::LLVMTypeOf(v)
- };
-
- // FIXME(nagisa): probably should be declare_global, because no definition
- // is happening here, but we depend on it being defined here from
- // const::trans_static. This all logic should be replaced.
- let g = declare::define_global(ccx, &sym[..],
- Type::from_ref(llty)).unwrap_or_else(||{
- ccx.sess().span_fatal(i.span, &format!("symbol `{}` is already defined",
- sym))
- });
-
- if attr::contains_name(&i.attrs,
- "thread_local") {
- llvm::set_thread_local(g, true);
- }
- ccx.item_symbols().borrow_mut().insert(i.id, sym);
- g
- }
+ // Create the global before evaluating the initializer;
+ // this is necessary to allow recursive statics.
+ let llty = type_of(ccx, ty);
+ let g = declare::define_global(ccx, &sym[..],
+ llty).unwrap_or_else(|| {
+ ccx.sess().span_fatal(i.span, &format!("symbol `{}` is already defined",
+ sym))
+ });
+
+ ccx.item_symbols().borrow_mut().insert(i.id, sym);
+ g
}
ast::ItemFn(_, _, _, abi, _, _) => {
_ => ccx.sess().bug("get_item_val: weird result in table")
};
- match attr::first_attr_value_str_by_name(&i.attrs,
- "link_section") {
- Some(sect) => {
- if contains_null(§) {
- ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`",
- §));
- }
- unsafe {
- let buf = CString::new(sect.as_bytes()).unwrap();
- llvm::LLVMSetSection(v, buf.as_ptr());
- }
- },
- None => ()
- }
-
v
}
match ni.node {
ast::ForeignItemFn(..) => {
let abi = ccx.tcx().map.get_foreign_abi(id);
- let ty = ty::node_id_to_type(ccx.tcx(), ni.id);
+ let ty = ccx.tcx().node_id_to_type(ni.id);
let name = foreign::link_name(&*ni);
let llfn = foreign::register_foreign_item_fn(ccx, abi, ty, &name);
attributes::from_fn_attrs(ccx, &ni.attrs, llfn);
}
};
assert!(!args.is_empty());
- let ty = ty::node_id_to_type(ccx.tcx(), id);
+ let ty = ccx.tcx().node_id_to_type(id);
let parent = ccx.tcx().map.get_parent(id);
let enm = ccx.tcx().map.expect_item(parent);
let sym = exported_name(ccx,
};
let parent = ccx.tcx().map.get_parent(id);
let struct_item = ccx.tcx().map.expect_item(parent);
- let ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
+ let ty = ccx.tcx().node_id_to_type(ctor_id);
let sym = exported_name(ccx,
id,
ty,
fn register_method(ccx: &CrateContext, id: ast::NodeId,
attrs: &[ast::Attribute], span: Span) -> ValueRef {
- let mty = ty::node_id_to_type(ccx.tcx(), id);
+ let mty = ccx.tcx().node_id_to_type(id);
let sym = exported_name(ccx, id, mty, &attrs);
// cannot proceed despite the Once not running more than once.
POISONED = true;
}
+
+ ::back::write::configure_llvm(&tcx.sess);
});
if POISONED {
if ccx.sess().opts.debuginfo != NoDebugInfo {
debuginfo::finalize(&ccx);
}
+ for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() {
+ unsafe {
+ let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g));
+ llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+ llvm::LLVMDeleteGlobal(old_g);
+ }
+ }
}
// Translate the metadata.
terminate(cx, "Invoke");
debug!("Invoke({} with arguments ({}))",
cx.val_to_string(fn_),
- args.iter().map(|a| cx.val_to_string(*a)).collect::<Vec<String>>().connect(", "));
+ args.iter().map(|a| cx.val_to_string(*a)).collect::<Vec<String>>().join(", "));
debug_loc.apply(cx.fcx);
B(cx).invoke(fn_, args, then, catch, attributes)
}
num_clauses: usize) -> ValueRef {
check_not_terminated(cx);
assert!(!cx.unreachable.get());
- B(cx).landing_pad(ty, pers_fn, num_clauses)
+ B(cx).landing_pad(ty, pers_fn, num_clauses, cx.fcx.llfn)
+}
+
+pub fn AddClause(cx: Block, landing_pad: ValueRef, clause: ValueRef) {
+ B(cx).add_clause(landing_pad, clause)
}
pub fn SetCleanup(cx: Block, landing_pad: ValueRef) {
args.iter()
.map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
- .connect(", "));
+ .join(", "));
unsafe {
let v = llvm::LLVMBuildInvoke(self.llbuilder,
args.iter()
.map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
- .connect(", "));
+ .join(", "));
unsafe {
let v = llvm::LLVMBuildCall(self.llbuilder, llfn, args.as_ptr(),
}
}
- pub fn landing_pad(&self, ty: Type, pers_fn: ValueRef, num_clauses: usize) -> ValueRef {
+ pub fn landing_pad(&self, ty: Type, pers_fn: ValueRef,
+ num_clauses: usize,
+ llfn: ValueRef) -> ValueRef {
self.count_insn("landingpad");
unsafe {
- llvm::LLVMBuildLandingPad(
- self.llbuilder, ty.to_ref(), pers_fn, num_clauses as c_uint, noname())
+ llvm::LLVMRustBuildLandingPad(self.llbuilder, ty.to_ref(), pers_fn,
+ num_clauses as c_uint, noname(), llfn)
+ }
+ }
+
+ pub fn add_clause(&self, landing_pad: ValueRef, clause: ValueRef) {
+ unsafe {
+ llvm::LLVMAddClause(landing_pad, clause);
}
}
use trans::type_::Type;
use std::cmp;
-use std::iter::repeat;
#[derive(Clone, Copy, PartialEq)]
enum RegClass {
}
let words = (ty_size(ty) + 7) / 8;
- let mut cls: Vec<_> = repeat(NoClass).take(words).collect();
+ let mut cls = vec![NoClass; words];
if words > 4 {
all_mem(&mut cls);
return cls;
use back::link;
use session;
use llvm::{self, ValueRef, get_params};
-use metadata::csearch;
use middle::def;
use middle::subst;
use middle::subst::{Subst, Substs};
use trans::callee;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
-use trans::closure;
use trans::common::{self, Block, Result, NodeIdAndSpan, ExprId, CrateContext,
ExprOrMethodCall, FunctionContext, MethodCallKey};
use trans::consts;
use trans::monomorphize;
use trans::type_::Type;
use trans::type_of;
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, HasTypeFlags, RegionEscape};
use middle::ty::MethodCall;
use rustc::ast_map;
pub enum CalleeData<'tcx> {
// Constructor for enum variant/tuple-like-struct
// i.e. Some, Ok
- NamedTupleConstructor(subst::Substs<'tcx>, ty::Disr),
+ NamedTupleConstructor(ty::Disr),
// Represents a (possibly monomorphized) top-level fn item or method
// item. Note that this is just the fn-ptr and is not a Rust closure
pub struct Callee<'blk, 'tcx: 'blk> {
pub bcx: Block<'blk, 'tcx>,
pub data: CalleeData<'tcx>,
+ pub ty: Ty<'tcx>
}
fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr)
let DatumBlock { bcx, datum, .. } = expr::trans(bcx, expr);
match datum.ty.sty {
ty::TyBareFn(..) => {
- let llval = datum.to_llscalarish(bcx);
- return Callee {
+ Callee {
bcx: bcx,
- data: Fn(llval),
- };
+ ty: datum.ty,
+ data: Fn(datum.to_llscalarish(bcx))
+ }
}
_ => {
bcx.tcx().sess.span_bug(
}
}
- fn fn_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, llfn: ValueRef)
+ fn fn_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, Rvalue>)
-> Callee<'blk, 'tcx> {
- return Callee {
+ Callee {
bcx: bcx,
- data: Fn(llfn),
- };
+ data: Fn(datum.val),
+ ty: datum.ty
+ }
}
fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
_ => false
}
} => {
- let substs = common::node_id_substs(bcx.ccx(),
- ExprId(ref_expr.id),
- bcx.fcx.param_substs);
Callee {
bcx: bcx,
- data: NamedTupleConstructor(substs, 0)
+ data: NamedTupleConstructor(0),
+ ty: expr_ty
}
}
def::DefFn(did, _) if match expr_ty.sty {
ExprId(ref_expr.id),
bcx.fcx.param_substs);
let def_id = inline::maybe_instantiate_inline(bcx.ccx(), did);
- Callee { bcx: bcx, data: Intrinsic(def_id.node, substs) }
+ Callee { bcx: bcx, data: Intrinsic(def_id.node, substs), ty: expr_ty }
}
- def::DefFn(did, _) | def::DefMethod(did, def::FromImpl(_)) => {
+ def::DefFn(did, _) => {
fn_callee(bcx, trans_fn_ref(bcx.ccx(), did, ExprId(ref_expr.id),
- bcx.fcx.param_substs).val)
+ bcx.fcx.param_substs))
}
- def::DefMethod(meth_did, def::FromTrait(trait_did)) => {
- fn_callee(bcx, meth::trans_static_method_callee(bcx.ccx(),
- meth_did,
- trait_did,
- ref_expr.id,
- bcx.fcx.param_substs).val)
+ def::DefMethod(meth_did) => {
+ let method_item = bcx.tcx().impl_or_trait_item(meth_did);
+ let fn_datum = match method_item.container() {
+ ty::ImplContainer(_) => {
+ trans_fn_ref(bcx.ccx(), meth_did,
+ ExprId(ref_expr.id),
+ bcx.fcx.param_substs)
+ }
+ ty::TraitContainer(trait_did) => {
+ meth::trans_static_method_callee(bcx.ccx(),
+ meth_did,
+ trait_did,
+ ref_expr.id,
+ bcx.fcx.param_substs)
+ }
+ };
+ fn_callee(bcx, fn_datum)
}
def::DefVariant(tid, vid, _) => {
- let vinfo = ty::enum_variant_with_id(bcx.tcx(), tid, vid);
- let substs = common::node_id_substs(bcx.ccx(),
- ExprId(ref_expr.id),
- bcx.fcx.param_substs);
+ let vinfo = bcx.tcx().enum_variant_with_id(tid, vid);
// Nullary variants are not callable
assert!(!vinfo.args.is_empty());
Callee {
bcx: bcx,
- data: NamedTupleConstructor(substs, vinfo.disr_val)
+ data: NamedTupleConstructor(vinfo.disr_val),
+ ty: expr_ty
}
}
def::DefStruct(_) => {
- let substs = common::node_id_substs(bcx.ccx(),
- ExprId(ref_expr.id),
- bcx.fcx.param_substs);
Callee {
bcx: bcx,
- data: NamedTupleConstructor(substs, 0)
+ data: NamedTupleConstructor(0),
+ ty: expr_ty
}
}
def::DefStatic(..) |
trans_fn_ref_with_substs(ccx, def_id, node, param_substs, substs)
}
-fn trans_fn_ref_with_substs_to_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- def_id: ast::DefId,
- ref_id: ast::NodeId,
- substs: subst::Substs<'tcx>)
- -> Callee<'blk, 'tcx> {
- Callee {
- bcx: bcx,
- data: Fn(trans_fn_ref_with_substs(bcx.ccx(),
- def_id,
- ExprId(ref_id),
- bcx.fcx.param_substs,
- substs).val),
- }
-}
-
/// Translates an adapter that implements the `Fn` trait for a fn
/// pointer. This is basically the equivalent of something like:
///
ty::FnOnceClosureKind => false,
};
let bare_fn_ty_maybe_ref = if is_by_ref {
- ty::mk_imm_rptr(tcx, tcx.mk_region(ty::ReStatic), bare_fn_ty)
+ tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), bare_fn_ty)
} else {
bare_fn_ty
};
bare_fn_ty));
}
};
- let sig = ty::erase_late_bound_regions(tcx, sig);
- let tuple_input_ty = ty::mk_tup(tcx, sig.inputs.to_vec());
- let tuple_fn_ty = ty::mk_bare_fn(tcx,
- opt_def_id,
- tcx.mk_bare_fn(ty::BareFnTy {
- unsafety: ast::Unsafety::Normal,
- abi: synabi::RustCall,
- sig: ty::Binder(ty::FnSig {
- inputs: vec![bare_fn_ty_maybe_ref,
- tuple_input_ty],
- output: sig.output,
- variadic: false
- })}));
+ let sig = tcx.erase_late_bound_regions(sig);
+ let tuple_input_ty = tcx.mk_tup(sig.inputs.to_vec());
+ let tuple_fn_ty = tcx.mk_fn(opt_def_id,
+ tcx.mk_bare_fn(ty::BareFnTy {
+ unsafety: ast::Unsafety::Normal,
+ abi: synabi::RustCall,
+ sig: ty::Binder(ty::FnSig {
+ inputs: vec![bare_fn_ty_maybe_ref,
+ tuple_input_ty],
+ output: sig.output,
+ variadic: false
+ })}));
debug!("tuple_fn_ty: {:?}", tuple_fn_ty);
//
expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot"))
);
- bcx = trans_call_inner(bcx,
- DebugLoc::None,
- bare_fn_ty,
- |bcx, _| Callee { bcx: bcx, data: Fn(llfnpointer) },
- ArgVals(&llargs[(self_idx + 1)..]),
- dest).bcx;
+ bcx = trans_call_inner(bcx, DebugLoc::None, |bcx, _| {
+ Callee {
+ bcx: bcx,
+ data: Fn(llfnpointer),
+ ty: bare_fn_ty
+ }
+ }, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx;
finish_fn(&fcx, bcx, sig.output, DebugLoc::None);
param_substs,
substs);
- assert!(substs.types.all(|t| !ty::type_needs_infer(*t)));
- assert!(substs.types.all(|t| !ty::type_has_escaping_regions(*t)));
+ assert!(!substs.types.needs_infer());
+ assert!(!substs.types.has_escaping_regions());
let substs = substs.erase_regions();
// Load the info for the appropriate trait if necessary.
- match ty::trait_of_item(tcx, def_id) {
+ match tcx.trait_of_item(def_id) {
None => {}
Some(trait_id) => {
- ty::populate_implementations_for_trait_if_necessary(tcx, trait_id)
+ tcx.populate_implementations_for_trait_if_necessary(trait_id)
}
}
// We need to do a bunch of special handling for default methods.
// We need to modify the def_id and our substs in order to monomorphize
// the function.
- let (is_default, def_id, substs) = match ty::provided_source(tcx, def_id) {
+ let (is_default, def_id, substs) = match tcx.provided_source(def_id) {
None => {
(false, def_id, tcx.mk_substs(substs))
}
// So, what we need to do is find this substitution and
// compose it with the one we already have.
- let impl_id = ty::impl_or_trait_item(tcx, def_id).container()
+ let impl_id = tcx.impl_or_trait_item(def_id).container()
.id();
- let impl_or_trait_item = ty::impl_or_trait_item(tcx, source_id);
+ let impl_or_trait_item = tcx.impl_or_trait_item(source_id);
match impl_or_trait_item {
ty::MethodTraitItem(method) => {
- let trait_ref = ty::impl_trait_ref(tcx, impl_id).unwrap();
+ let trait_ref = tcx.impl_trait_ref(impl_id).unwrap();
// Compute the first substitution
let first_subst =
- ty::make_substs_for_receiver_types(tcx, &trait_ref, &*method)
+ tcx.make_substs_for_receiver_types(&trait_ref, &*method)
.erase_regions();
// And compose them
}
};
- // If this is a closure, redirect to it.
- match closure::get_or_create_declaration_if_closure(ccx, def_id, substs) {
- None => {}
- Some(llfn) => return llfn,
- }
-
// Check whether this fn has an inlined copy and, if so, redirect
// def_id to the local id of the inlined copy.
let def_id = inline::maybe_instantiate_inline(ccx, def_id);
// Monotype of the REFERENCE to the function (type params
// are subst'd)
let ref_ty = match node {
- ExprId(id) => ty::node_id_to_type(tcx, id),
+ ExprId(id) => tcx.node_id_to_type(id),
MethodCallKey(method_call) => {
- tcx.method_map.borrow().get(&method_call).unwrap().ty
+ tcx.tables.borrow().method_map[&method_call].ty
}
};
let ref_ty = monomorphize::apply_param_substs(tcx,
}
// Type scheme of the function item (may have type params)
- let fn_type_scheme = ty::lookup_item_type(tcx, def_id);
+ let fn_type_scheme = tcx.lookup_item_type(def_id);
let fn_type = monomorphize::normalize_associated_type(tcx, &fn_type_scheme.ty);
// Find the actual function pointer.
// ______________________________________________________________________
// Translating calls
-pub fn trans_call<'a, 'blk, 'tcx>(in_cx: Block<'blk, 'tcx>,
+pub fn trans_call<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
call_expr: &ast::Expr,
f: &ast::Expr,
args: CallArgs<'a, 'tcx>,
dest: expr::Dest)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_call");
- trans_call_inner(in_cx,
+ trans_call_inner(bcx,
call_expr.debug_loc(),
- common::expr_ty_adjusted(in_cx, f),
- |cx, _| trans(cx, f),
+ |bcx, _| trans(bcx, f),
args,
Some(dest)).bcx
}
let _icx = push_ctxt("trans_method_call");
debug!("trans_method_call(call_expr={:?})", call_expr);
let method_call = MethodCall::expr(call_expr.id);
- let method_ty = match bcx.tcx().method_map.borrow().get(&method_call) {
- Some(method) => match method.origin {
- ty::MethodTraitObject(_) => match method.ty.sty {
- ty::TyBareFn(_, ref fty) => {
- ty::mk_bare_fn(bcx.tcx(), None, meth::opaque_method_ty(bcx.tcx(), fty))
- }
- _ => method.ty
- },
- _ => method.ty
- },
- None => panic!("method not found in trans_method_call")
- };
trans_call_inner(
bcx,
call_expr.debug_loc(),
- common::monomorphize_type(bcx, method_ty),
|cx, arg_cleanup_scope| {
meth::trans_method_callee(cx, method_call, Some(rcvr), arg_cleanup_scope)
},
dest: Option<expr::Dest>,
debug_loc: DebugLoc)
-> Result<'blk, 'tcx> {
- let fty = if did.krate == ast::LOCAL_CRATE {
- ty::node_id_to_type(bcx.tcx(), did.node)
- } else {
- csearch::get_type(bcx.tcx(), did).ty
- };
- callee::trans_call_inner(bcx,
- debug_loc,
- fty,
- |bcx, _| {
- trans_fn_ref_with_substs_to_callee(bcx,
- did,
- 0,
- subst::Substs::trans_empty())
- },
- ArgVals(args),
- dest)
+ callee::trans_call_inner(bcx, debug_loc, |bcx, _| {
+ let datum = trans_fn_ref_with_substs(bcx.ccx(),
+ did,
+ ExprId(0),
+ bcx.fcx.param_substs,
+ subst::Substs::trans_empty());
+ Callee {
+ bcx: bcx,
+ data: Fn(datum.val),
+ ty: datum.ty
+ }
+ }, ArgVals(args), dest)
}
-/// This behemoth of a function translates function calls. Unfortunately, in order to generate more
-/// efficient LLVM output at -O0, it has quite a complex signature (refactoring this into two
-/// functions seems like a good idea).
+/// This behemoth of a function translates function calls. Unfortunately, in
+/// order to generate more efficient LLVM output at -O0, it has quite a complex
+/// signature (refactoring this into two functions seems like a good idea).
///
-/// In particular, for lang items, it is invoked with a dest of None, and in that case the return
-/// value contains the result of the fn. The lang item must not return a structural type or else
-/// all heck breaks loose.
+/// In particular, for lang items, it is invoked with a dest of None, and in
+/// that case the return value contains the result of the fn. The lang item must
+/// not return a structural type or else all heck breaks loose.
///
-/// For non-lang items, `dest` is always Some, and hence the result is written into memory
-/// somewhere. Nonetheless we return the actual return value of the function.
+/// For non-lang items, `dest` is always Some, and hence the result is written
+/// into memory somewhere. Nonetheless we return the actual return value of the
+/// function.
pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc,
- callee_ty: Ty<'tcx>,
get_callee: F,
args: CallArgs<'a, 'tcx>,
dest: Option<expr::Dest>)
let callee = get_callee(bcx, cleanup::CustomScope(arg_cleanup_scope));
let mut bcx = callee.bcx;
- let (abi, ret_ty) = match callee_ty.sty {
+ let (abi, ret_ty) = match callee.ty.sty {
ty::TyBareFn(_, ref f) => {
- let output = ty::erase_late_bound_regions(bcx.tcx(), &f.sig.output());
+ let output = bcx.tcx().erase_late_bound_regions(&f.sig.output());
(f.abi, output)
}
_ => panic!("expected bare rust fn or closure in trans_call_inner")
};
- let (llfn, llenv, llself) = match callee.data {
+ let (llfn, llself) = match callee.data {
Fn(llfn) => {
- (llfn, None, None)
+ (llfn, None)
}
TraitItem(d) => {
- (d.llfn, None, Some(d.llself))
+ (d.llfn, Some(d.llself))
}
Intrinsic(node, substs) => {
assert!(abi == synabi::RustIntrinsic);
}
};
- return intrinsic::trans_intrinsic_call(bcx, node, callee_ty,
+ return intrinsic::trans_intrinsic_call(bcx, node, callee.ty,
arg_cleanup_scope, args,
dest.unwrap(), substs,
call_info);
}
- NamedTupleConstructor(substs, disr) => {
+ NamedTupleConstructor(disr) => {
assert!(dest.is_some());
fcx.pop_custom_cleanup_scope(arg_cleanup_scope);
- let ctor_ty = callee_ty.subst(bcx.tcx(), &substs);
return base::trans_named_tuple_constructor(bcx,
- ctor_ty,
+ callee.ty,
disr,
args,
dest.unwrap(),
expr::Ignore => {
let ret_ty = match ret_ty {
ty::FnConverging(ret_ty) => ret_ty,
- ty::FnDiverging => ty::mk_nil(ccx.tcx())
+ ty::FnDiverging => ccx.tcx().mk_nil()
};
if !is_rust_fn ||
type_of::return_uses_outptr(ccx, ret_ty) ||
}
}
- // Push the environment (or a trait object's self).
- match (llenv, llself) {
- (Some(llenv), None) => llargs.push(llenv),
- (None, Some(llself)) => llargs.push(llself),
- _ => {}
+ // Push a trait object's self.
+ if let Some(llself) = llself {
+ llargs.push(llself);
}
// Push the arguments.
bcx = trans_args(bcx,
args,
- callee_ty,
+ callee.ty,
&mut llargs,
cleanup::CustomScope(arg_cleanup_scope),
llself.is_some(),
let (llret, b) = base::invoke(bcx,
llfn,
&llargs[..],
- callee_ty,
+ callee.ty,
debug_loc);
bcx = b;
llresult = llret;
};
bcx = trans_args(bcx,
args,
- callee_ty,
+ callee.ty,
&mut llargs,
cleanup::CustomScope(arg_cleanup_scope),
false,
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
bcx = foreign::trans_native_call(bcx,
- callee_ty,
+ callee.ty,
llfn,
opt_llretslot.unwrap(),
&llargs[..],
// value.
ArgVals(&'a [ValueRef]),
- // For overloaded operators: `(lhs, Vec(rhs, rhs_id), autoref)`. `lhs`
+ // For overloaded operators: `(lhs, Option(rhs, rhs_id), autoref)`. `lhs`
// is the left-hand-side and `rhs/rhs_id` is the datum/expr-id of
- // the right-hand-side arguments (if any). `autoref` indicates whether the `rhs`
+ // the right-hand-side argument (if any). `autoref` indicates whether the `rhs`
// arguments should be auto-referenced
- ArgOverloadedOp(Datum<'tcx, Expr>, Vec<(Datum<'tcx, Expr>, ast::NodeId)>, bool),
+ ArgOverloadedOp(Datum<'tcx, Expr>, Option<(Datum<'tcx, Expr>, ast::NodeId)>, bool),
// Supply value of arguments as a list of expressions that must be
// translated, for overloaded call operators.
ignore_self: bool)
-> Block<'blk, 'tcx>
{
- let args =
- ty::erase_late_bound_regions(
- bcx.tcx(), &ty::ty_fn_args(fn_ty));
+ let args = bcx.tcx().erase_late_bound_regions(&fn_ty.fn_args());
// Translate the `self` argument first.
if !ignore_self {
ignore_self: bool)
-> Block<'blk, 'tcx> {
// Translate the `self` argument first.
- let arg_tys = ty::erase_late_bound_regions(bcx.tcx(), &ty::ty_fn_args(fn_ty));
+ let arg_tys = bcx.tcx().erase_late_bound_regions( &fn_ty.fn_args());
if !ignore_self {
let arg_datum = unpack_datum!(bcx, expr::trans(bcx, arg_exprs[0]));
bcx = trans_arg_datum(bcx,
debug!("trans_args(abi={})", abi);
let _icx = push_ctxt("trans_args");
- let arg_tys = ty::erase_late_bound_regions(cx.tcx(), &ty::ty_fn_args(fn_ty));
- let variadic = ty::fn_is_variadic(fn_ty);
+ let arg_tys = cx.tcx().erase_late_bound_regions(&fn_ty.fn_args());
+ let variadic = fn_ty.fn_sig().0.variadic;
let mut bcx = cx;
DontAutorefArg,
llargs);
- assert_eq!(arg_tys.len(), 1 + rhs.len());
- for (rhs, rhs_id) in rhs {
+ if let Some((rhs, rhs_id)) = rhs {
+ assert_eq!(arg_tys.len(), 2);
bcx = trans_arg_datum(bcx, arg_tys[1], rhs,
arg_cleanup_scope,
if autoref { DoAutorefArg(rhs_id) } else { DontAutorefArg },
llargs);
+ } else {
+ assert_eq!(arg_tys.len(), 1);
}
}
ArgVals(vs) => {
use llvm::{BasicBlockRef, ValueRef};
use trans::base;
use trans::build;
-use trans::callee;
use trans::common;
-use trans::common::{Block, FunctionContext, ExprId, NodeIdAndSpan};
+use trans::common::{Block, FunctionContext, NodeIdAndSpan};
+use trans::datum::{Datum, Lvalue};
use trans::debuginfo::{DebugLoc, ToDebugLoc};
-use trans::declare;
use trans::glue;
use middle::region;
use trans::type_::Type;
pub trait Cleanup<'tcx> {
fn must_unwind(&self) -> bool;
- fn clean_on_unwind(&self) -> bool;
fn is_lifetime_end(&self) -> bool;
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
CustomScope(CustomScopeIndex)
}
+#[derive(Copy, Clone, Debug)]
+pub struct DropHint<K>(pub ast::NodeId, pub K);
+
+pub type DropHintDatum<'tcx> = DropHint<Datum<'tcx, Lvalue>>;
+pub type DropHintValue = DropHint<ValueRef>;
+
+impl<K> DropHint<K> {
+ pub fn new(id: ast::NodeId, k: K) -> DropHint<K> { DropHint(id, k) }
+}
+
+impl DropHint<ValueRef> {
+ pub fn value(&self) -> ValueRef { self.1 }
+}
+
+pub trait DropHintMethods {
+ type ValueKind;
+ fn to_value(&self) -> Self::ValueKind;
+}
+impl<'tcx> DropHintMethods for DropHintDatum<'tcx> {
+ type ValueKind = DropHintValue;
+ fn to_value(&self) -> DropHintValue { DropHint(self.0, self.1.val) }
+}
+
impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
/// Invoked when we start to trans the code contained within a new cleanup scope.
fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) {
fn schedule_drop_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
- ty: Ty<'tcx>) {
+ ty: Ty<'tcx>,
+ drop_hint: Option<DropHintDatum<'tcx>>) {
if !self.type_needs_drop(ty) { return; }
+ let drop_hint = drop_hint.map(|hint|hint.to_value());
let drop = box DropValue {
is_immediate: false,
- must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
fill_on_drop: false,
skip_dtor: false,
+ drop_hint: drop_hint,
};
debug!("schedule_drop_mem({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
fn schedule_drop_and_fill_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
- ty: Ty<'tcx>) {
+ ty: Ty<'tcx>,
+ drop_hint: Option<DropHintDatum<'tcx>>) {
if !self.type_needs_drop(ty) { return; }
+ let drop_hint = drop_hint.map(|datum|datum.to_value());
let drop = box DropValue {
is_immediate: false,
- must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
fill_on_drop: true,
skip_dtor: false,
+ drop_hint: drop_hint,
};
- debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={:?}, fill_on_drop={}, skip_dtor={})",
+ debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={:?},
+ fill_on_drop={}, skip_dtor={}, has_drop_hint={})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty,
drop.fill_on_drop,
- drop.skip_dtor);
+ drop.skip_dtor,
+ drop_hint.is_some());
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
let drop = box DropValue {
is_immediate: false,
- must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
fill_on_drop: false,
skip_dtor: true,
+ drop_hint: None,
};
debug!("schedule_drop_adt_contents({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
ty: Ty<'tcx>) {
if !self.type_needs_drop(ty) { return; }
- let drop = box DropValue {
+ let drop = Box::new(DropValue {
is_immediate: true,
- must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
fill_on_drop: false,
skip_dtor: false,
- };
+ drop_hint: None,
+ });
debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
cleanup_scope,
//
// At this point, `popped_scopes` is empty, and so the final block
// that we return to the user is `Cleanup(AST 24)`.
- while !popped_scopes.is_empty() {
- let mut scope = popped_scopes.pop().unwrap();
-
- if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
- {
+ while let Some(mut scope) = popped_scopes.pop() {
+ if !scope.cleanups.is_empty() {
let name = scope.block_name("clean");
debug!("generating cleanups for {}", name);
let bcx_in = self.new_block(label.is_unwind(),
None);
let mut bcx_out = bcx_in;
for cleanup in scope.cleanups.iter().rev() {
- if cleanup_is_suitable_for(&**cleanup, label) {
- bcx_out = cleanup.trans(bcx_out,
- scope.debug_loc);
- }
+ bcx_out = cleanup.trans(bcx_out,
+ scope.debug_loc);
}
build::Br(bcx_out, prev_llbb, DebugLoc::None);
prev_llbb = bcx_in.llbb;
- } else {
- debug!("no suitable cleanups in {}",
- scope.block_name("clean"));
- }
- scope.add_cached_early_exit(label, prev_llbb);
+ scope.add_cached_early_exit(label, prev_llbb);
+ }
self.push_scope(scope);
}
debug!("get_or_create_landing_pad");
+ self.inject_unwind_resume_hook();
+
// Check if a landing pad block exists; if not, create one.
{
let mut scopes = self.scopes.borrow_mut();
&[Type::i8p(self.ccx), Type::i32(self.ccx)],
false);
- // The exception handling personality function.
- //
- // If our compilation unit has the `eh_personality` lang item somewhere
- // within it, then we just need to translate that. Otherwise, we're
- // building an rlib which will depend on some upstream implementation of
- // this function, so we just codegen a generic reference to it. We don't
- // specify any of the types for the function, we just make it a symbol
- // that LLVM can later use.
- let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
- Some(def_id) => {
- callee::trans_fn_ref(pad_bcx.ccx(), def_id, ExprId(0),
- pad_bcx.fcx.param_substs).val
- }
- None => {
- let mut personality = self.ccx.eh_personality().borrow_mut();
- match *personality {
- Some(llpersonality) => llpersonality,
- None => {
- let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
- let f = declare::declare_cfn(self.ccx, "rust_eh_personality", fty,
- self.ccx.tcx().types.i32);
- *personality = Some(f);
- f
- }
- }
- }
- };
+ let llpersonality = pad_bcx.fcx.eh_personality();
// The only landing pad clause will be 'cleanup'
let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1);
#[derive(Copy, Clone)]
pub struct DropValue<'tcx> {
is_immediate: bool,
- must_unwind: bool,
val: ValueRef,
ty: Ty<'tcx>,
fill_on_drop: bool,
skip_dtor: bool,
+ drop_hint: Option<DropHintValue>,
}
impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
fn must_unwind(&self) -> bool {
- self.must_unwind
- }
-
- fn clean_on_unwind(&self) -> bool {
- self.must_unwind
+ true
}
fn is_lifetime_end(&self) -> bool {
let bcx = if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
} else {
- glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
+ glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor, self.drop_hint)
};
if self.fill_on_drop {
base::drop_done_fill_mem(bcx, self.val, self.ty);
true
}
- fn clean_on_unwind(&self) -> bool {
- true
- }
-
fn is_lifetime_end(&self) -> bool {
false
}
false
}
- fn clean_on_unwind(&self) -> bool {
- true
- }
-
fn is_lifetime_end(&self) -> bool {
true
}
r
}
-fn cleanup_is_suitable_for(c: &Cleanup,
- label: EarlyExitLabel) -> bool {
- !label.is_unwind() || c.clean_on_unwind()
-}
-
///////////////////////////////////////////////////////////////////////////
// These traits just exist to put the methods into this file.
fn schedule_drop_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
- ty: Ty<'tcx>);
+ ty: Ty<'tcx>,
+ drop_hint: Option<DropHintDatum<'tcx>>);
fn schedule_drop_and_fill_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
- ty: Ty<'tcx>);
+ ty: Ty<'tcx>,
+ drop_hint: Option<DropHintDatum<'tcx>>);
fn schedule_drop_adt_contents(&self,
cleanup_scope: ScopeId,
val: ValueRef,
use arena::TypedArena;
use back::link::{self, mangle_internal_name_by_path_and_seq};
use llvm::{ValueRef, get_params};
-use middle::mem_categorization::Typer;
+use middle::infer;
use trans::adt;
use trans::attributes;
use trans::base::*;
use trans::debuginfo::{self, DebugLoc};
use trans::declare;
use trans::expr;
-use trans::monomorphize::{self, MonoId};
+use trans::monomorphize::{MonoId};
use trans::type_of::*;
-use middle::ty::{self, ClosureTyper};
-use middle::subst::Substs;
+use middle::ty;
use session::config::FullDebugInfo;
use syntax::abi::RustCall;
bcx.fcx.llupvars.borrow_mut().insert(def_id.node, upvar_ptr);
if kind == ty::FnOnceClosureKind && !captured_by_ref {
+ let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id);
bcx.fcx.schedule_drop_mem(arg_scope_id,
upvar_ptr,
- node_id_type(bcx, def_id.node))
+ node_id_type(bcx, def_id.node),
+ hint)
}
if let Some(env_pointer_alloca) = env_pointer_alloca {
/// Returns the LLVM function declaration for a closure, creating it if
/// necessary. If the ID does not correspond to a closure ID, returns None.
-pub fn get_or_create_declaration_if_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- closure_id: ast::DefId,
- substs: &Substs<'tcx>)
- -> Option<Datum<'tcx, Rvalue>> {
- if !ccx.tcx().closure_kinds.borrow().contains_key(&closure_id) {
- // Not a closure.
- return None
- }
-
- let function_type = ty::node_id_to_type(ccx.tcx(), closure_id.node);
- let function_type = monomorphize::apply_param_substs(ccx.tcx(), substs, &function_type);
-
+pub fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ closure_id: ast::DefId,
+ substs: &ty::ClosureSubsts<'tcx>)
+ -> ValueRef {
// Normalize type so differences in regions and typedefs don't cause
// duplicate declarations
- let function_type = erase_regions(ccx.tcx(), &function_type);
- let params = match function_type.sty {
- ty::TyClosure(_, substs) => &substs.types,
- _ => unreachable!()
- };
+ let substs = erase_regions(ccx.tcx(), substs);
let mono_id = MonoId {
def: closure_id,
- params: params
+ params: &substs.func_substs.types
};
- match ccx.closure_vals().borrow().get(&mono_id) {
- Some(&llfn) => {
- debug!("get_or_create_declaration_if_closure(): found closure {:?}: {:?}",
- mono_id, ccx.tn().val_to_string(llfn));
- return Some(Datum::new(llfn, function_type, Rvalue::new(ByValue)))
- }
- None => {}
+ if let Some(&llfn) = ccx.closure_vals().borrow().get(&mono_id) {
+ debug!("get_or_create_closure_declaration(): found closure {:?}: {:?}",
+ mono_id, ccx.tn().val_to_string(llfn));
+ return llfn;
}
let symbol = ccx.tcx().map.with_path(closure_id.node, |path| {
mangle_internal_name_by_path_and_seq(path, "closure")
});
- // Currently there’s only a single user of get_or_create_declaration_if_closure and it
- // unconditionally defines the function, therefore we use define_* here.
- let llfn = declare::define_internal_rust_fn(ccx, &symbol[..], function_type).unwrap_or_else(||{
- ccx.sess().bug(&format!("symbol `{}` already defined", symbol));
- });
+ let function_type = ccx.tcx().mk_closure_from_closure_substs(closure_id, Box::new(substs));
+ let llfn = declare::define_internal_rust_fn(ccx, &symbol[..], function_type);
// set an inline hint for all closures
attributes::inline(llfn, attributes::InlineAttr::Hint);
ccx.tn().val_to_string(llfn));
ccx.closure_vals().borrow_mut().insert(mono_id, llfn);
- Some(Datum::new(llfn, function_type, Rvalue::new(ByValue)))
+ llfn
}
pub enum Dest<'a, 'tcx: 'a> {
decl: &ast::FnDecl,
body: &ast::Block,
id: ast::NodeId,
- param_substs: &'tcx Substs<'tcx>)
+ closure_substs: &'tcx ty::ClosureSubsts<'tcx>)
-> Option<Block<'a, 'tcx>>
{
+ let param_substs = closure_substs.func_substs;
+
let ccx = match dest {
Dest::SaveIn(bcx, _) => bcx.ccx(),
Dest::Ignore(ccx) => ccx
debug!("trans_closure_expr()");
let closure_id = ast_util::local_def(id);
- let llfn = get_or_create_declaration_if_closure(
- ccx,
- closure_id,
- param_substs).unwrap();
+ let llfn = get_or_create_closure_declaration(ccx, closure_id, closure_substs);
// Get the type of this closure. Use the current `param_substs` as
// the closure substitutions. This makes sense because the closure
// takes the same set of type arguments as the enclosing fn, and
// this function (`trans_closure`) is invoked at the point
// of the closure expression.
- let typer = NormalizingClosureTyper::new(tcx);
- let function_type = typer.closure_type(closure_id, param_substs);
+
+ let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables);
+ let function_type = infcx.closure_type(closure_id, closure_substs);
let freevars: Vec<ty::Freevar> =
- ty::with_freevars(tcx, id, |fv| fv.iter().cloned().collect());
+ tcx.with_freevars(id, |fv| fv.iter().cloned().collect());
- let sig = ty::erase_late_bound_regions(tcx, &function_type.sig);
+ let sig = tcx.erase_late_bound_regions(&function_type.sig);
trans_closure(ccx,
decl,
body,
- llfn.val,
+ llfn,
param_substs,
id,
&[],
pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
closure_def_id: ast::DefId,
- substs: Substs<'tcx>,
- node: ExprOrMethodCall,
- param_substs: &'tcx Substs<'tcx>,
+ substs: ty::ClosureSubsts<'tcx>,
trait_closure_kind: ty::ClosureKind)
-> ValueRef
{
- // The substitutions should have no type parameters remaining
- // after passing through fulfill_obligation
- let llfn = callee::trans_fn_ref_with_substs(ccx,
- closure_def_id,
- node,
- param_substs,
- substs.clone()).val;
+ // If this is a closure, redirect to it.
+ let llfn = get_or_create_closure_declaration(ccx, closure_def_id, &substs);
// If the closure is a Fn closure, but a FnOnce is needed (etc),
// then adapt the self type
fn trans_closure_adapter_shim<'a, 'tcx>(
ccx: &'a CrateContext<'a, 'tcx>,
closure_def_id: ast::DefId,
- substs: Substs<'tcx>,
+ substs: ty::ClosureSubsts<'tcx>,
llfn_closure_kind: ty::ClosureKind,
trait_closure_kind: ty::ClosureKind,
llfn: ValueRef)
fn trans_fn_once_adapter_shim<'a, 'tcx>(
ccx: &'a CrateContext<'a, 'tcx>,
closure_def_id: ast::DefId,
- substs: Substs<'tcx>,
+ substs: ty::ClosureSubsts<'tcx>,
llreffn: ValueRef)
-> ValueRef
{
ccx.tn().val_to_string(llreffn));
let tcx = ccx.tcx();
- let typer = NormalizingClosureTyper::new(tcx);
+ let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables);
// Find a version of the closure type. Substitute static for the
// region since it doesn't really matter.
- let substs = tcx.mk_substs(substs);
- let closure_ty = ty::mk_closure(tcx, closure_def_id, substs);
- let ref_closure_ty = ty::mk_imm_rptr(tcx, tcx.mk_region(ty::ReStatic), closure_ty);
+ let closure_ty = tcx.mk_closure_from_closure_substs(closure_def_id, Box::new(substs.clone()));
+ let ref_closure_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), closure_ty);
// Make a version with the type of by-ref closure.
- let ty::ClosureTy { unsafety, abi, mut sig } = typer.closure_type(closure_def_id, substs);
+ let ty::ClosureTy { unsafety, abi, mut sig } = infcx.closure_type(closure_def_id, &substs);
sig.0.inputs.insert(0, ref_closure_ty); // sig has no self type as of yet
let llref_bare_fn_ty = tcx.mk_bare_fn(ty::BareFnTy { unsafety: unsafety,
abi: abi,
sig: sig.clone() });
- let llref_fn_ty = ty::mk_bare_fn(tcx, None, llref_bare_fn_ty);
+ let llref_fn_ty = tcx.mk_fn(None, llref_bare_fn_ty);
debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}",
llref_fn_ty);
let llonce_bare_fn_ty = tcx.mk_bare_fn(ty::BareFnTy { unsafety: unsafety,
abi: abi,
sig: sig });
- let llonce_fn_ty = ty::mk_bare_fn(tcx, None, llonce_bare_fn_ty);
+ let llonce_fn_ty = tcx.mk_fn(None, llonce_bare_fn_ty);
// Create the by-value helper.
let function_name = link::mangle_internal_name_by_type_and_seq(ccx, llonce_fn_ty, "once_shim");
- let lloncefn = declare::define_internal_rust_fn(ccx, &function_name[..], llonce_fn_ty)
- .unwrap_or_else(||{
- ccx.sess().bug(&format!("symbol `{}` already defined", function_name));
- });
-
- let sig = ty::erase_late_bound_regions(tcx, &llonce_bare_fn_ty.sig);
+ let lloncefn = declare::define_internal_rust_fn(ccx, &function_name,
+ llonce_fn_ty);
+ let sig = tcx.erase_late_bound_regions(&llonce_bare_fn_ty.sig);
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx,
ast::DUMMY_NODE_ID,
false,
sig.output,
- substs,
+ substs.func_substs,
None,
&block_arena);
let mut bcx = init_function(&fcx, false, sig.output);
let callee_data = TraitItem(MethodData { llfn: llreffn,
llself: env_datum.val });
- bcx = callee::trans_call_inner(bcx,
- DebugLoc::None,
- llref_fn_ty,
- |bcx, _| Callee { bcx: bcx, data: callee_data },
- ArgVals(&llargs[(self_idx + 1)..]),
- dest).bcx;
+ bcx = callee::trans_call_inner(bcx, DebugLoc::None, |bcx, _| {
+ Callee {
+ bcx: bcx,
+ data: callee_data,
+ ty: llref_fn_ty
+ }
+ }, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx;
fcx.pop_custom_cleanup_scope(self_scope);
use middle::def;
use middle::infer;
use middle::lang_items::LangItem;
-use middle::mem_categorization as mc;
-use middle::region;
-use middle::subst::{self, Subst, Substs};
+use middle::subst::{self, Substs};
use trans::base;
use trans::build;
+use trans::callee;
use trans::cleanup;
use trans::consts;
use trans::datum;
use trans::type_::Type;
use trans::type_of;
use middle::traits;
-use middle::ty::{self, HasProjectionTypes, Ty};
+use middle::ty::{self, HasTypeFlags, Ty};
use middle::ty_fold;
use middle::ty_fold::{TypeFolder, TypeFoldable};
use rustc::ast_map::{PathElem, PathName};
use syntax::codemap::{DUMMY_SP, Span};
use syntax::parse::token::InternedString;
use syntax::parse::token;
-use util::common::memoized;
-use util::nodemap::FnvHashSet;
pub use trans::context::CrateContext;
return t_norm;
}
- fn fold_existential_bounds(&mut self, s: &ty::ExistentialBounds<'tcx>)
- -> ty::ExistentialBounds<'tcx> {
- let mut s = ty_fold::super_fold_existential_bounds(self, s);
-
- // this annoying flag messes up trans normalization
- s.region_bound_will_change = false;
-
- s
- }
-
fn fold_binder<T>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T>
where T : TypeFoldable<'tcx>
{
- let u = ty::anonymize_late_bound_regions(self.tcx(), t);
+ let u = self.tcx().anonymize_late_bound_regions(t);
ty_fold::super_fold_binder(self, &u)
}
/// Is the type's representation size known at compile time?
pub fn type_is_sized<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
- ty::type_is_sized(None, tcx, DUMMY_SP, ty)
+ ty.is_sized(&tcx.empty_parameter_environment(), DUMMY_SP)
}
pub fn type_is_fat_ptr<'tcx>(cx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::TyRawPtr(ty::mt{ty, ..}) |
- ty::TyRef(_, ty::mt{ty, ..}) |
+ ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
+ ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyBox(ty) => {
!type_is_sized(cx, ty)
}
}
}
-// Some things don't need cleanups during unwinding because the
-// thread can free them all at once later. Currently only things
-// that only contain scalars and shared boxes can avoid unwind
-// cleanups.
-pub fn type_needs_unwind_cleanup<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
- return memoized(ccx.needs_unwind_cleanup_cache(), ty, |ty| {
- type_needs_unwind_cleanup_(ccx.tcx(), ty, &mut FnvHashSet())
- });
-
- fn type_needs_unwind_cleanup_<'tcx>(tcx: &ty::ctxt<'tcx>,
- ty: Ty<'tcx>,
- tycache: &mut FnvHashSet<Ty<'tcx>>)
- -> bool
- {
- // Prevent infinite recursion
- if !tycache.insert(ty) {
- return false;
- }
-
- let mut needs_unwind_cleanup = false;
- ty::maybe_walk_ty(ty, |ty| {
- needs_unwind_cleanup |= match ty.sty {
- ty::TyBool | ty::TyInt(_) | ty::TyUint(_) |
- ty::TyFloat(_) | ty::TyTuple(_) | ty::TyRawPtr(_) => false,
-
- ty::TyEnum(did, substs) =>
- ty::enum_variants(tcx, did).iter().any(|v|
- v.args.iter().any(|&aty| {
- let t = aty.subst(tcx, substs);
- type_needs_unwind_cleanup_(tcx, t, tycache)
- })
- ),
-
- _ => true
- };
- !needs_unwind_cleanup
- });
- needs_unwind_cleanup
- }
-}
-
/// If `type_needs_drop` returns true, then `ty` is definitely
/// non-copy and *might* have a destructor attached; if it returns
/// false, then `ty` definitely has no destructor (i.e. no drop glue).
/// (Note that this implies that if `ty` has a destructor attached,
/// then `type_needs_drop` will definitely return `true` for `ty`.)
pub fn type_needs_drop<'tcx>(cx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
- type_needs_drop_given_env(cx, ty, &ty::empty_parameter_environment(cx))
+ type_needs_drop_given_env(cx, ty, &cx.empty_parameter_environment())
}
/// Core implementation of type_needs_drop, potentially making use of
// normalized version of the type, and therefore will definitely
// know whether the type implements Copy (and thus needs no
// cleanup/drop/zeroing) ...
- let implements_copy = !ty::type_moves_by_default(¶m_env, DUMMY_SP, ty);
+ let implements_copy = !ty.moves_by_default(param_env, DUMMY_SP);
if implements_copy { return false; }
// bound attached (see above), it is sound to treat it as having a
// destructor (e.g. zero its memory on move).
- let contents = ty::type_contents(cx, ty);
+ let contents = ty.type_contents(cx);
debug!("type_needs_drop ty={:?} contents={:?}", ty, contents);
contents.needs_drop(cx)
}
fn type_is_newtype_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyStruct(def_id, substs) => {
- let fields = ty::lookup_struct_fields(ccx.tcx(), def_id);
+ let fields = ccx.tcx().lookup_struct_fields(def_id);
fields.len() == 1 && {
- let ty = ty::lookup_field_type(ccx.tcx(), def_id, fields[0].id, substs);
+ let ty = ccx.tcx().lookup_field_type(def_id, fields[0].id, substs);
let ty = monomorphize::normalize_associated_type(ccx.tcx(), &ty);
type_is_immediate(ccx, ty)
}
use trans::type_of::sizing_type_of;
let tcx = ccx.tcx();
- let simple = ty::type_is_scalar(ty) ||
- ty::type_is_unique(ty) || ty::type_is_region_ptr(ty) ||
+ let simple = ty.is_scalar() ||
+ ty.is_unique() || ty.is_region_ptr() ||
type_is_newtype_immediate(ccx, ty) ||
- ty::type_is_simd(tcx, ty);
+ ty.is_simd(tcx);
if simple && !type_is_fat_ptr(tcx, ty) {
return true;
}
/// zero-size, but not all zero-size types use a `void` return type (in order to aid with C ABI
/// compatibility).
pub fn return_type_is_void(ccx: &CrateContext, ty: Ty) -> bool {
- ty::type_is_nil(ty) || ty::type_is_empty(ccx.tcx(), ty)
+ ty.is_nil() || ty.is_empty(ccx.tcx())
}
/// Generates a unique symbol based off the name given. This is used to create
pub type ExternMap = FnvHashMap<String, ValueRef>;
pub fn validate_substs(substs: &Substs) {
- assert!(substs.types.all(|t| !ty::type_needs_infer(*t)));
+ assert!(!substs.types.needs_infer());
}
// work around bizarre resolve errors
type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>;
+#[derive(Clone, Debug)]
+struct HintEntry<'tcx> {
+ // The datum for the dropflag-hint itself; note that many
+ // source-level Lvalues will be associated with the same
+ // dropflag-hint datum.
+ datum: cleanup::DropHintDatum<'tcx>,
+}
+
+pub struct DropFlagHintsMap<'tcx> {
+ // Maps NodeId for expressions that read/write unfragmented state
+ // to that state's drop-flag "hint." (A stack-local hint
+ // indicates either that (1.) it is certain that no-drop is
+ // needed, or (2.) inline drop-flag must be consulted.)
+ node_map: NodeMap<HintEntry<'tcx>>,
+}
+
+impl<'tcx> DropFlagHintsMap<'tcx> {
+ pub fn new() -> DropFlagHintsMap<'tcx> { DropFlagHintsMap { node_map: NodeMap() } }
+ pub fn has_hint(&self, id: ast::NodeId) -> bool { self.node_map.contains_key(&id) }
+ pub fn insert(&mut self, id: ast::NodeId, datum: cleanup::DropHintDatum<'tcx>) {
+ self.node_map.insert(id, HintEntry { datum: datum });
+ }
+ pub fn hint_datum(&self, id: ast::NodeId) -> Option<cleanup::DropHintDatum<'tcx>> {
+ self.node_map.get(&id).map(|t|t.datum)
+ }
+}
+
// Function context. Every LLVM function we create will have one of
// these.
pub struct FunctionContext<'a, 'tcx: 'a> {
// section of the executable we're generating.
pub llfn: ValueRef,
- // always an empty parameter-environment
+ // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv
pub param_env: ty::ParameterEnvironment<'a, 'tcx>,
// The environment argument in a closure.
// Same as above, but for closure upvars
pub llupvars: RefCell<NodeMap<ValueRef>>,
+ // Carries info about drop-flags for local bindings (longer term,
+ // paths) for the code being compiled.
+ pub lldropflag_hints: RefCell<DropFlagHintsMap<'tcx>>,
+
// The NodeId of the function, or -1 if it doesn't correspond to
// a user-defined function.
pub id: ast::NodeId,
}
pub fn monomorphize<T>(&self, value: &T) -> T
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
monomorphize::apply_param_substs(self.ccx.tcx(),
self.param_substs,
pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
type_needs_drop_given_env(self.ccx.tcx(), ty, &self.param_env)
}
+
+ pub fn eh_personality(&self) -> ValueRef {
+ // The exception handling personality function.
+ //
+ // If our compilation unit has the `eh_personality` lang item somewhere
+ // within it, then we just need to translate that. Otherwise, we're
+ // building an rlib which will depend on some upstream implementation of
+ // this function, so we just codegen a generic reference to it. We don't
+ // specify any of the types for the function, we just make it a symbol
+ // that LLVM can later use.
+ //
+ // Note that MSVC is a little special here in that we don't use the
+ // `eh_personality` lang item at all. Currently LLVM has support for
+ // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+ // *name of the personality function* to decide what kind of unwind side
+ // tables/landing pads to emit. It looks like Dwarf is used by default,
+ // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+ // an "exception", but for MSVC we want to force SEH. This means that we
+ // can't actually have the personality function be our standard
+ // `rust_eh_personality` function, but rather we wired it up to the
+ // CRT's custom personality function, which forces LLVM to consider
+ // landing pads as "landing pads for SEH".
+ let target = &self.ccx.sess().target.target;
+ match self.ccx.tcx().lang_items.eh_personality() {
+ Some(def_id) if !target.options.is_like_msvc => {
+ callee::trans_fn_ref(self.ccx, def_id, ExprId(0),
+ self.param_substs).val
+ }
+ _ => {
+ let mut personality = self.ccx.eh_personality().borrow_mut();
+ match *personality {
+ Some(llpersonality) => llpersonality,
+ None => {
+ let name = if !target.options.is_like_msvc {
+ "rust_eh_personality"
+ } else if target.arch == "x86" {
+ "_except_handler3"
+ } else {
+ "__C_specific_handler"
+ };
+ let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
+ let f = declare::declare_cfn(self.ccx, name, fty,
+ self.ccx.tcx().types.i32);
+ *personality = Some(f);
+ f
+ }
+ }
+ }
+ }
+ }
+
+ /// By default, LLVM lowers `resume` instructions into calls to `_Unwind_Resume`
+ /// defined in libgcc, however, unlike personality routines, there is no easy way to
+ /// override that symbol. This method injects a local-scoped `_Unwind_Resume` function
+ /// which immediately defers to the user-defined `eh_unwind_resume` lang item.
+ pub fn inject_unwind_resume_hook(&self) {
+ let ccx = self.ccx;
+ if !ccx.sess().target.target.options.custom_unwind_resume ||
+ ccx.unwind_resume_hooked().get() {
+ return;
+ }
+
+ let new_resume = match ccx.tcx().lang_items.eh_unwind_resume() {
+ Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0), &self.param_substs).val,
+ None => {
+ let fty = Type::variadic_func(&[], &Type::void(self.ccx));
+ declare::declare_cfn(self.ccx, "rust_eh_unwind_resume", fty,
+ self.ccx.tcx().mk_nil())
+ }
+ };
+
+ unsafe {
+ let resume_type = Type::func(&[Type::i8(ccx).ptr_to()], &Type::void(ccx));
+ let old_resume = llvm::LLVMAddFunction(ccx.llmod(),
+ "_Unwind_Resume\0".as_ptr() as *const _,
+ resume_type.to_ref());
+ llvm::SetLinkage(old_resume, llvm::InternalLinkage);
+ let llbb = llvm::LLVMAppendBasicBlockInContext(ccx.llcx(),
+ old_resume,
+ "\0".as_ptr() as *const _);
+ let builder = ccx.builder();
+ builder.position_at_end(llbb);
+ builder.call(new_resume, &[llvm::LLVMGetFirstParam(old_resume)], None);
+ builder.unreachable(); // it should never return
+
+ // Until DwarfEHPrepare pass has run, _Unwind_Resume is not referenced by any live code
+ // and is subject to dead code elimination. Here we add _Unwind_Resume to @llvm.globals
+ // to prevent that.
+ let i8p_ty = Type::i8p(ccx);
+ let used_ty = Type::array(&i8p_ty, 1);
+ let used = llvm::LLVMAddGlobal(ccx.llmod(), used_ty.to_ref(),
+ "llvm.used\0".as_ptr() as *const _);
+ let old_resume = llvm::LLVMConstBitCast(old_resume, i8p_ty.to_ref());
+ llvm::LLVMSetInitializer(used, C_array(i8p_ty, &[old_resume]));
+ llvm::SetLinkage(used, llvm::AppendingLinkage);
+ llvm::LLVMSetSection(used, "llvm.metadata\0".as_ptr() as *const _)
+ }
+ ccx.unwind_resume_hooked().set(true);
+ }
}
// Basic block context. We create a block context for each basic block
pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() }
pub fn name(&self, name: ast::Name) -> String {
- token::get_name(name).to_string()
+ name.to_string()
}
pub fn node_id_to_string(&self, id: ast::NodeId) -> String {
}
pub fn monomorphize<T>(&self, value: &T) -> T
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
monomorphize::apply_param_substs(self.tcx(),
self.fcx.param_substs,
}
}
-impl<'blk, 'tcx> mc::Typer<'tcx> for BlockS<'blk, 'tcx> {
- fn node_ty(&self, id: ast::NodeId) -> mc::McResult<Ty<'tcx>> {
- Ok(node_id_type(self, id))
- }
-
- fn expr_ty_adjusted(&self, expr: &ast::Expr) -> mc::McResult<Ty<'tcx>> {
- Ok(expr_ty_adjusted(self, expr))
- }
-
- fn node_method_ty(&self, method_call: ty::MethodCall) -> Option<Ty<'tcx>> {
- self.tcx()
- .method_map
- .borrow()
- .get(&method_call)
- .map(|method| monomorphize_type(self, method.ty))
- }
-
- fn node_method_origin(&self, method_call: ty::MethodCall)
- -> Option<ty::MethodOrigin<'tcx>>
- {
- self.tcx()
- .method_map
- .borrow()
- .get(&method_call)
- .map(|method| method.origin.clone())
- }
-
- fn adjustments<'a>(&'a self) -> &'a RefCell<NodeMap<ty::AutoAdjustment<'tcx>>> {
- &self.tcx().adjustments
- }
-
- fn is_method_call(&self, id: ast::NodeId) -> bool {
- self.tcx().method_map.borrow().contains_key(&ty::MethodCall::expr(id))
- }
-
- fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option<region::CodeExtent> {
- self.tcx().region_maps.temporary_scope(rvalue_id)
- }
-
- fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
- Some(self.tcx().upvar_capture_map.borrow().get(&upvar_id).unwrap().clone())
- }
-
- fn type_moves_by_default(&self, span: Span, ty: Ty<'tcx>) -> bool {
- self.fcx.param_env.type_moves_by_default(span, ty)
- }
-}
-
-impl<'blk, 'tcx> ty::ClosureTyper<'tcx> for BlockS<'blk, 'tcx> {
- fn param_env<'a>(&'a self) -> &'a ty::ParameterEnvironment<'a, 'tcx> {
- &self.fcx.param_env
- }
-
- fn closure_kind(&self,
- def_id: ast::DefId)
- -> Option<ty::ClosureKind>
- {
- let typer = NormalizingClosureTyper::new(self.tcx());
- typer.closure_kind(def_id)
- }
-
- fn closure_type(&self,
- def_id: ast::DefId,
- substs: &subst::Substs<'tcx>)
- -> ty::ClosureTy<'tcx>
- {
- let typer = NormalizingClosureTyper::new(self.tcx());
- typer.closure_type(def_id, substs)
- }
-
- fn closure_upvars(&self,
- def_id: ast::DefId,
- substs: &Substs<'tcx>)
- -> Option<Vec<ty::ClosureUpvar<'tcx>>>
- {
- let typer = NormalizingClosureTyper::new(self.tcx());
- typer.closure_upvars(def_id, substs)
- }
-}
-
pub struct Result<'blk, 'tcx: 'blk> {
pub bcx: Block<'blk, 'tcx>,
pub val: ValueRef
pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> {
let tcx = bcx.tcx();
- let t = ty::node_id_to_type(tcx, id);
+ let t = tcx.node_id_to_type(id);
monomorphize_type(bcx, t)
}
}
pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &ast::Expr) -> Ty<'tcx> {
- monomorphize_type(bcx, ty::expr_ty_adjusted(bcx.tcx(), ex))
+ monomorphize_type(bcx, bcx.tcx().expr_ty_adjusted(ex))
}
/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
debug!("trans fulfill_obligation: trait_ref={:?} def_id={:?}",
trait_ref, trait_ref.def_id());
- ty::populate_implementations_for_trait_if_necessary(tcx, trait_ref.def_id());
- let infcx = infer::new_infer_ctxt(tcx);
// Do the initial selection for the obligation. This yields the
// shallow result we are looking for -- that is, what specific impl.
- let typer = NormalizingClosureTyper::new(tcx);
- let mut selcx = traits::SelectionContext::new(&infcx, &typer);
+ let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables);
+ let mut selcx = traits::SelectionContext::new(&infcx);
+
let obligation =
traits::Obligation::new(traits::ObligationCause::misc(span, ast::DUMMY_NODE_ID),
trait_ref.to_poly_trait_predicate());
// Currently, we use a fulfillment context to completely resolve
// all nested obligations. This is because they can inform the
// inference of the impl's type parameters.
- let mut fulfill_cx = traits::FulfillmentContext::new(true);
+ let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
let vtable = selection.map(|predicate| {
fulfill_cx.register_predicate_obligation(&infcx, predicate);
});
- let vtable = drain_fulfillment_cx_or_panic(span, &infcx, &mut fulfill_cx, &vtable);
+ let vtable = erase_regions(tcx,
+ &drain_fulfillment_cx_or_panic(span, &infcx, &mut fulfill_cx, &vtable)
+ );
- info!("Cache miss: {:?}", trait_ref);
- ccx.trait_cache().borrow_mut().insert(trait_ref,
- vtable.clone());
+ info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
+
+ ccx.trait_cache().borrow_mut().insert(trait_ref, vtable.clone());
vtable
}
predicates);
let tcx = ccx.tcx();
- let infcx = infer::new_infer_ctxt(tcx);
- let typer = NormalizingClosureTyper::new(tcx);
- let mut selcx = traits::SelectionContext::new(&infcx, &typer);
- let mut fulfill_cx = traits::FulfillmentContext::new(false);
+ let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables);
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
let cause = traits::ObligationCause::dummy();
let traits::Normalized { value: predicates, obligations } =
traits::normalize(&mut selcx, cause.clone(), &predicates);
drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()).is_ok()
}
-pub struct NormalizingClosureTyper<'a,'tcx:'a> {
- param_env: ty::ParameterEnvironment<'a, 'tcx>
-}
-
-impl<'a,'tcx> NormalizingClosureTyper<'a,'tcx> {
- pub fn new(tcx: &'a ty::ctxt<'tcx>) -> NormalizingClosureTyper<'a,'tcx> {
- // Parameter environment is used to give details about type parameters,
- // but since we are in trans, everything is fully monomorphized.
- NormalizingClosureTyper { param_env: ty::empty_parameter_environment(tcx) }
- }
-}
-
-impl<'a,'tcx> ty::ClosureTyper<'tcx> for NormalizingClosureTyper<'a,'tcx> {
- fn param_env<'b>(&'b self) -> &'b ty::ParameterEnvironment<'b,'tcx> {
- &self.param_env
- }
-
- fn closure_kind(&self,
- def_id: ast::DefId)
- -> Option<ty::ClosureKind>
- {
- self.param_env.closure_kind(def_id)
- }
-
- fn closure_type(&self,
- def_id: ast::DefId,
- substs: &subst::Substs<'tcx>)
- -> ty::ClosureTy<'tcx>
- {
- // the substitutions in `substs` are already monomorphized,
- // but we still must normalize associated types
- let closure_ty = self.param_env.tcx.closure_type(def_id, substs);
- monomorphize::normalize_associated_type(self.param_env.tcx, &closure_ty)
- }
-
- fn closure_upvars(&self,
- def_id: ast::DefId,
- substs: &Substs<'tcx>)
- -> Option<Vec<ty::ClosureUpvar<'tcx>>>
- {
- // the substitutions in `substs` are already monomorphized,
- // but we still must normalize associated types
- let result = ty::closure_upvars(&self.param_env, def_id, substs);
- monomorphize::normalize_associated_type(self.param_env.tcx, &result)
- }
-}
-
pub fn drain_fulfillment_cx_or_panic<'a,'tcx,T>(span: Span,
infcx: &infer::InferCtxt<'a,'tcx>,
fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
// In principle, we only need to do this so long as `result`
// contains unbound type parameters. It could be a slight
// optimization to stop iterating early.
- let typer = NormalizingClosureTyper::new(infcx.tcx);
- match fulfill_cx.select_all_or_error(infcx, &typer) {
+ match fulfill_cx.select_all_or_error(infcx) {
Ok(()) => { }
Err(errors) => {
return Err(errors);
let substs = match node {
ExprId(id) => {
- ty::node_id_item_substs(tcx, id).substs
+ tcx.node_id_item_substs(id).substs
}
MethodCallKey(method_call) => {
- tcx.method_map.borrow().get(&method_call).unwrap().substs.clone()
+ tcx.tables.borrow().method_map[&method_call].substs.clone()
}
};
- if substs.types.any(|t| ty::type_needs_infer(*t)) {
+ if substs.types.needs_infer() {
tcx.sess.bug(&format!("type parameters for node {:?} include inference types: {:?}",
node, substs));
}
use middle::const_eval::{const_int_checked_rem, const_uint_checked_rem};
use middle::const_eval::{const_int_checked_shl, const_uint_checked_shl};
use middle::const_eval::{const_int_checked_shr, const_uint_checked_shr};
+use middle::const_eval::EvalHint::ExprTypeChecked;
+use middle::const_eval::eval_const_expr_partial;
use trans::{adt, closure, debuginfo, expr, inline, machine};
use trans::base::{self, push_ctxt};
use trans::common::*;
use middle::ty::{self, Ty};
use util::nodemap::NodeMap;
-use std::iter::repeat;
+use std::ffi::{CStr, CString};
use libc::c_uint;
-use syntax::{ast, ast_util};
+use syntax::{ast, ast_util, attr};
use syntax::parse::token;
use syntax::ptr::P;
C_integral(Type::uint_from_ty(cx, t), u, false)
}
ast::LitInt(i, ast::UnsuffixedIntLit(_)) => {
- let lit_int_ty = ty::node_id_to_type(cx.tcx(), e.id);
+ let lit_int_ty = cx.tcx().node_id_to_type(e.id);
match lit_int_ty.sty {
ty::TyInt(t) => {
C_integral(Type::int_from_ty(cx, t), i as u64, true)
C_floating(&fs, Type::float_from_ty(cx, t))
}
ast::LitFloatUnsuffixed(ref fs) => {
- let lit_float_ty = ty::node_id_to_type(cx.tcx(), e.id);
+ let lit_float_ty = cx.tcx().node_id_to_type(e.id);
match lit_float_ty.sty {
ty::TyFloat(t) => {
C_floating(&fs, Type::float_from_ty(cx, t))
v: ValueRef,
ty: Ty<'tcx>)
-> (ValueRef, Ty<'tcx>) {
- match ty::deref(ty, true) {
+ match ty.builtin_deref(true) {
Some(mt) => {
if type_is_sized(cx.tcx(), mt.ty) {
(const_deref_ptr(cx, v), mt.ty)
ast::ExprPath(..) => {
let def = ccx.tcx().def_map.borrow().get(&expr.id).unwrap().full_def();
match def {
- def::DefConst(def_id) | def::DefAssociatedConst(def_id, _) => {
- if !ccx.tcx().adjustments.borrow().contains_key(&expr.id) {
+ def::DefConst(def_id) | def::DefAssociatedConst(def_id) => {
+ if !ccx.tcx().tables.borrow().adjustments.contains_key(&expr.id) {
debug!("get_const_expr_as_global ({:?}): found const {:?}",
expr.id, def_id);
return get_const_val(ccx, def_id, expr);
// Avoid autorefs as they would create global instead of stack
// references, even when only the latter are correct.
let ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs,
- &ty::expr_ty(ccx.tcx(), expr));
+ &ccx.tcx().expr_ty(expr));
const_expr_unadjusted(ccx, expr, ty, param_substs, None)
} else {
const_expr(ccx, expr, param_substs, None).0
fn_args: FnArgMap)
-> (ValueRef, Ty<'tcx>) {
let ety = monomorphize::apply_param_substs(cx.tcx(), param_substs,
- &ty::expr_ty(cx.tcx(), e));
+ &cx.tcx().expr_ty(e));
let llconst = const_expr_unadjusted(cx, e, ety, param_substs, fn_args);
let mut llconst = llconst;
let mut ety_adjusted = monomorphize::apply_param_substs(cx.tcx(), param_substs,
- &ty::expr_ty_adjusted(cx.tcx(), e));
- let opt_adj = cx.tcx().adjustments.borrow().get(&e.id).cloned();
+ &cx.tcx().expr_ty_adjusted(e));
+ let opt_adj = cx.tcx().tables.borrow().adjustments.get(&e.id).cloned();
match opt_adj {
Some(ty::AdjustReifyFnPointer) => {
// FIXME(#19925) once fn item types are
// Don't copy data to do a deref+ref
// (i.e., skip the last auto-deref).
llconst = addr_of(cx, llconst, "autoref");
- ty = ty::mk_imm_rptr(cx.tcx(), cx.tcx().mk_region(ty::ReStatic), ty);
+ ty = cx.tcx().mk_imm_ref(cx.tcx().mk_region(ty::ReStatic), ty);
}
} else {
let (dv, dt) = const_deref(cx, llconst, ty);
param_substs,
&target);
- let pointee_ty = ty::deref(ty, true)
+ let pointee_ty = ty.builtin_deref(true)
.expect("consts: unsizing got non-pointer type").ty;
let (base, old_info) = if !type_is_sized(cx.tcx(), pointee_ty) {
// Normally, the source is a thin pointer and we are
(llconst, None)
};
- let unsized_ty = ty::deref(target, true)
+ let unsized_ty = target.builtin_deref(true)
.expect("consts: unsizing got non-pointer target type").ty;
let ptr_ty = type_of::in_memory_type_of(cx, unsized_ty).ptr_to();
let base = ptrcast(base, ptr_ty);
.map(|e| const_expr(cx, &**e, param_substs, fn_args).0)
.collect()
};
- unsafe {
- let _icx = push_ctxt("const_expr");
- match e.node {
- ast::ExprLit(ref lit) => {
- const_lit(cx, e, &**lit)
- }
- ast::ExprBinary(b, ref e1, ref e2) => {
+ let _icx = push_ctxt("const_expr");
+ match e.node {
+ ast::ExprLit(ref lit) => {
+ const_lit(cx, e, &**lit)
+ },
+ ast::ExprBinary(b, ref e1, ref e2) => {
/* Neither type is bottom, and we expect them to be unified
* already, so the following is safe. */
let (te1, ty) = const_expr(cx, &**e1, param_substs, fn_args);
debug!("const_expr_unadjusted: te1={}, ty={:?}",
cx.tn().val_to_string(te1),
ty);
- let is_simd = ty::type_is_simd(cx.tcx(), ty);
+ let is_simd = ty.is_simd(cx.tcx());
let intype = if is_simd {
- ty::simd_type(cx.tcx(), ty)
+ ty.simd_type(cx.tcx())
} else {
ty
};
- let is_float = ty::type_is_fp(intype);
- let signed = ty::type_is_signed(intype);
+ let is_float = intype.is_fp();
+ let signed = intype.is_signed();
let (te2, _) = const_expr(cx, &**e2, param_substs, fn_args);
check_binary_expr_validity(cx, e, ty, te1, te2);
- match b.node {
- ast::BiAdd => {
- if is_float { llvm::LLVMConstFAdd(te1, te2) }
- else { llvm::LLVMConstAdd(te1, te2) }
- }
- ast::BiSub => {
- if is_float { llvm::LLVMConstFSub(te1, te2) }
- else { llvm::LLVMConstSub(te1, te2) }
- }
- ast::BiMul => {
- if is_float { llvm::LLVMConstFMul(te1, te2) }
- else { llvm::LLVMConstMul(te1, te2) }
- }
- ast::BiDiv => {
- if is_float { llvm::LLVMConstFDiv(te1, te2) }
- else if signed { llvm::LLVMConstSDiv(te1, te2) }
- else { llvm::LLVMConstUDiv(te1, te2) }
- }
- ast::BiRem => {
- if is_float { llvm::LLVMConstFRem(te1, te2) }
- else if signed { llvm::LLVMConstSRem(te1, te2) }
- else { llvm::LLVMConstURem(te1, te2) }
- }
- ast::BiAnd => llvm::LLVMConstAnd(te1, te2),
- ast::BiOr => llvm::LLVMConstOr(te1, te2),
- ast::BiBitXor => llvm::LLVMConstXor(te1, te2),
- ast::BiBitAnd => llvm::LLVMConstAnd(te1, te2),
- ast::BiBitOr => llvm::LLVMConstOr(te1, te2),
- ast::BiShl => {
- let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
- llvm::LLVMConstShl(te1, te2)
- }
- ast::BiShr => {
- let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
- if signed { llvm::LLVMConstAShr(te1, te2) }
- else { llvm::LLVMConstLShr(te1, te2) }
- }
- ast::BiEq | ast::BiNe | ast::BiLt | ast::BiLe | ast::BiGt | ast::BiGe => {
- if is_float {
- let cmp = base::bin_op_to_fcmp_predicate(cx, b.node);
- ConstFCmp(cmp, te1, te2)
- } else {
- let cmp = base::bin_op_to_icmp_predicate(cx, b.node, signed);
- let bool_val = ConstICmp(cmp, te1, te2);
- if is_simd {
- // LLVM outputs an `< size x i1 >`, so we need to perform
- // a sign extension to get the correctly sized type.
- llvm::LLVMConstIntCast(bool_val, val_ty(te1).to_ref(), True)
- } else {
- bool_val
- }
- }
- }
- }
- },
- ast::ExprUnary(u, ref inner_e) => {
+ unsafe { match b.node {
+ ast::BiAdd if is_float => llvm::LLVMConstFAdd(te1, te2),
+ ast::BiAdd => llvm::LLVMConstAdd(te1, te2),
+
+ ast::BiSub if is_float => llvm::LLVMConstFSub(te1, te2),
+ ast::BiSub => llvm::LLVMConstSub(te1, te2),
+
+ ast::BiMul if is_float => llvm::LLVMConstFMul(te1, te2),
+ ast::BiMul => llvm::LLVMConstMul(te1, te2),
+
+ ast::BiDiv if is_float => llvm::LLVMConstFDiv(te1, te2),
+ ast::BiDiv if signed => llvm::LLVMConstSDiv(te1, te2),
+ ast::BiDiv => llvm::LLVMConstUDiv(te1, te2),
+
+ ast::BiRem if is_float => llvm::LLVMConstFRem(te1, te2),
+ ast::BiRem if signed => llvm::LLVMConstSRem(te1, te2),
+ ast::BiRem => llvm::LLVMConstURem(te1, te2),
+
+ ast::BiAnd => llvm::LLVMConstAnd(te1, te2),
+ ast::BiOr => llvm::LLVMConstOr(te1, te2),
+ ast::BiBitXor => llvm::LLVMConstXor(te1, te2),
+ ast::BiBitAnd => llvm::LLVMConstAnd(te1, te2),
+ ast::BiBitOr => llvm::LLVMConstOr(te1, te2),
+ ast::BiShl => {
+ let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
+ llvm::LLVMConstShl(te1, te2)
+ },
+ ast::BiShr => {
+ let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
+ if signed { llvm::LLVMConstAShr(te1, te2) }
+ else { llvm::LLVMConstLShr(te1, te2) }
+ },
+ ast::BiEq | ast::BiNe | ast::BiLt | ast::BiLe | ast::BiGt | ast::BiGe => {
+ if is_float {
+ let cmp = base::bin_op_to_fcmp_predicate(cx, b.node);
+ ConstFCmp(cmp, te1, te2)
+ } else {
+ let cmp = base::bin_op_to_icmp_predicate(cx, b.node, signed);
+ let bool_val = ConstICmp(cmp, te1, te2);
+ if is_simd {
+ // LLVM outputs an `< size x i1 >`, so we need to perform
+ // a sign extension to get the correctly sized type.
+ llvm::LLVMConstIntCast(bool_val, val_ty(te1).to_ref(), True)
+ } else {
+ bool_val
+ }
+ }
+ },
+ } } // unsafe { match b.node {
+ },
+ ast::ExprUnary(u, ref inner_e) => {
let (te, ty) = const_expr(cx, &**inner_e, param_substs, fn_args);
check_unary_expr_validity(cx, e, ty, te);
- let is_float = ty::type_is_fp(ty);
- match u {
- ast::UnUniq | ast::UnDeref => {
- const_deref(cx, te, ty).0
- }
- ast::UnNot => llvm::LLVMConstNot(te),
- ast::UnNeg => {
- if is_float { llvm::LLVMConstFNeg(te) }
- else { llvm::LLVMConstNeg(te) }
- }
+ let is_float = ty.is_fp();
+ unsafe { match u {
+ ast::UnUniq | ast::UnDeref => const_deref(cx, te, ty).0,
+ ast::UnNot => llvm::LLVMConstNot(te),
+ ast::UnNeg if is_float => llvm::LLVMConstFNeg(te),
+ ast::UnNeg => llvm::LLVMConstNeg(te),
+ } }
+ },
+ ast::ExprField(ref base, field) => {
+ let (bv, bt) = const_expr(cx, &**base, param_substs, fn_args);
+ let brepr = adt::represent_type(cx, bt);
+ expr::with_field_tys(cx.tcx(), bt, None, |discr, field_tys| {
+ let ix = cx.tcx().field_idx_strict(field.node.name, field_tys);
+ adt::const_get_field(cx, &*brepr, bv, discr, ix)
+ })
+ },
+ ast::ExprTupField(ref base, idx) => {
+ let (bv, bt) = const_expr(cx, &**base, param_substs, fn_args);
+ let brepr = adt::represent_type(cx, bt);
+ expr::with_field_tys(cx.tcx(), bt, None, |discr, _| {
+ adt::const_get_field(cx, &*brepr, bv, discr, idx.node)
+ })
+ },
+
+ ast::ExprIndex(ref base, ref index) => {
+ let (bv, bt) = const_expr(cx, &**base, param_substs, fn_args);
+ let iv = match eval_const_expr_partial(cx.tcx(), &index, ExprTypeChecked) {
+ Ok(ConstVal::Int(i)) => i as u64,
+ Ok(ConstVal::Uint(u)) => u,
+ _ => cx.sess().span_bug(index.span,
+ "index is not an integer-constant expression")
+ };
+ let (arr, len) = match bt.sty {
+ ty::TyArray(_, u) => (bv, C_uint(cx, u)),
+ ty::TySlice(_) | ty::TyStr => {
+ let e1 = const_get_elt(cx, bv, &[0]);
+ (const_deref_ptr(cx, e1), const_get_elt(cx, bv, &[1]))
+ },
+ ty::TyRef(_, mt) => match mt.ty.sty {
+ ty::TyArray(_, u) => {
+ (const_deref_ptr(cx, bv), C_uint(cx, u))
+ },
+ _ => cx.sess().span_bug(base.span,
+ &format!("index-expr base must be a vector \
+ or string type, found {:?}",
+ bt)),
+ },
+ _ => cx.sess().span_bug(base.span,
+ &format!("index-expr base must be a vector \
+ or string type, found {:?}",
+ bt)),
+ };
+
+ let len = unsafe { llvm::LLVMConstIntGetZExtValue(len) as u64 };
+ let len = match bt.sty {
+ ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => match ty.sty {
+ ty::TyStr => {
+ assert!(len > 0);
+ len - 1
+ },
+ _ => len,
+ },
+ _ => len,
+ };
+ if iv >= len {
+ // FIXME #3170: report this earlier on in the const-eval
+ // pass. Reporting here is a bit late.
+ cx.sess().span_err(e.span,
+ "const index-expr is out of bounds");
+ C_undef(type_of::type_of(cx, bt).element_type())
+ } else {
+ const_get_elt(cx, arr, &[iv as c_uint])
}
- }
- ast::ExprField(ref base, field) => {
- let (bv, bt) = const_expr(cx, &**base, param_substs, fn_args);
- let brepr = adt::represent_type(cx, bt);
- expr::with_field_tys(cx.tcx(), bt, None, |discr, field_tys| {
- let ix = ty::field_idx_strict(cx.tcx(), field.node.name, field_tys);
- adt::const_get_field(cx, &*brepr, bv, discr, ix)
- })
- }
- ast::ExprTupField(ref base, idx) => {
- let (bv, bt) = const_expr(cx, &**base, param_substs, fn_args);
- let brepr = adt::represent_type(cx, bt);
- expr::with_field_tys(cx.tcx(), bt, None, |discr, _| {
- adt::const_get_field(cx, &*brepr, bv, discr, idx.node)
- })
- }
-
- ast::ExprIndex(ref base, ref index) => {
- let (bv, bt) = const_expr(cx, &**base, param_substs, fn_args);
- let iv = match const_eval::eval_const_expr_partial(cx.tcx(), &**index, None) {
- Ok(ConstVal::Int(i)) => i as u64,
- Ok(ConstVal::Uint(u)) => u,
- _ => cx.sess().span_bug(index.span,
- "index is not an integer-constant expression")
- };
- let (arr, len) = match bt.sty {
- ty::TyArray(_, u) => (bv, C_uint(cx, u)),
- ty::TySlice(_) | ty::TyStr => {
- let e1 = const_get_elt(cx, bv, &[0]);
- (const_deref_ptr(cx, e1), const_get_elt(cx, bv, &[1]))
- }
- ty::TyRef(_, mt) => match mt.ty.sty {
- ty::TyArray(_, u) => {
- (const_deref_ptr(cx, bv), C_uint(cx, u))
- },
- _ => cx.sess().span_bug(base.span,
- &format!("index-expr base must be a vector \
- or string type, found {:?}",
- bt))
- },
- _ => cx.sess().span_bug(base.span,
- &format!("index-expr base must be a vector \
- or string type, found {:?}",
- bt))
- };
-
- let len = llvm::LLVMConstIntGetZExtValue(len) as u64;
- let len = match bt.sty {
- ty::TyBox(ty) | ty::TyRef(_, ty::mt{ty, ..}) => match ty.sty {
- ty::TyStr => {
- assert!(len > 0);
- len - 1
- }
- _ => len
- },
- _ => len
- };
- if iv >= len {
- // FIXME #3170: report this earlier on in the const-eval
- // pass. Reporting here is a bit late.
- cx.sess().span_err(e.span,
- "const index-expr is out of bounds");
- C_undef(type_of::type_of(cx, bt).element_type())
- } else {
- const_get_elt(cx, arr, &[iv as c_uint])
- }
- }
- ast::ExprCast(ref base, _) => {
+ },
+ ast::ExprCast(ref base, _) => {
let t_cast = ety;
let llty = type_of::type_of(cx, t_cast);
let (v, t_expr) = const_expr(cx, &**base, param_substs, fn_args);
}
if type_is_fat_ptr(cx.tcx(), t_expr) {
// Fat pointer casts.
- let t_cast_inner = ty::deref(t_cast, true).expect("cast to non-pointer").ty;
+ let t_cast_inner = t_cast.builtin_deref(true).expect("cast to non-pointer").ty;
let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to();
let addr = ptrcast(const_get_elt(cx, v, &[abi::FAT_PTR_ADDR as u32]),
ptr_ty);
return addr;
}
}
- match (CastTy::from_ty(cx.tcx(), t_expr).expect("bad input type for cast"),
- CastTy::from_ty(cx.tcx(), t_cast).expect("bad output type for cast")) {
- (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => {
- let repr = adt::represent_type(cx, t_expr);
- let discr = adt::const_get_discrim(cx, &*repr, v);
- let iv = C_integral(cx.int_type(), discr, false);
- let s = adt::is_discr_signed(&*repr) as Bool;
- llvm::LLVMConstIntCast(iv, llty.to_ref(), s)
- }
- (CastTy::Int(_), CastTy::Int(_)) => {
- let s = ty::type_is_signed(t_expr) as Bool;
- llvm::LLVMConstIntCast(v, llty.to_ref(), s)
- }
- (CastTy::Int(_), CastTy::Float) => {
- if ty::type_is_signed(t_expr) {
- llvm::LLVMConstSIToFP(v, llty.to_ref())
- } else {
- llvm::LLVMConstUIToFP(v, llty.to_ref())
- }
- }
- (CastTy::Float, CastTy::Float) => {
- llvm::LLVMConstFPCast(v, llty.to_ref())
- }
- (CastTy::Float, CastTy::Int(IntTy::I)) => {
- llvm::LLVMConstFPToSI(v, llty.to_ref())
- }
- (CastTy::Float, CastTy::Int(_)) => {
- llvm::LLVMConstFPToUI(v, llty.to_ref())
- }
- (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_))
- | (CastTy::RPtr(_), CastTy::Ptr(_)) => {
- ptrcast(v, llty)
- }
- (CastTy::FnPtr, CastTy::FnPtr) => ptrcast(v, llty), // isn't this a coercion?
- (CastTy::Int(_), CastTy::Ptr(_)) => {
- llvm::LLVMConstIntToPtr(v, llty.to_ref())
- }
- (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => {
- llvm::LLVMConstPtrToInt(v, llty.to_ref())
- }
- _ => {
- cx.sess().impossible_case(e.span,
- "bad combination of types for cast")
- }
- }
- }
- ast::ExprAddrOf(ast::MutImmutable, ref sub) => {
- // If this is the address of some static, then we need to return
- // the actual address of the static itself (short circuit the rest
- // of const eval).
- let mut cur = sub;
- loop {
- match cur.node {
- ast::ExprParen(ref sub) => cur = sub,
- ast::ExprBlock(ref blk) => {
+ unsafe { match (
+ CastTy::from_ty(cx.tcx(), t_expr).expect("bad input type for cast"),
+ CastTy::from_ty(cx.tcx(), t_cast).expect("bad output type for cast"),
+ ) {
+ (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => {
+ let repr = adt::represent_type(cx, t_expr);
+ let discr = adt::const_get_discrim(cx, &*repr, v);
+ let iv = C_integral(cx.int_type(), discr, false);
+ let s = adt::is_discr_signed(&*repr) as Bool;
+ llvm::LLVMConstIntCast(iv, llty.to_ref(), s)
+ },
+ (CastTy::Int(_), CastTy::Int(_)) => {
+ let s = t_expr.is_signed() as Bool;
+ llvm::LLVMConstIntCast(v, llty.to_ref(), s)
+ },
+ (CastTy::Int(_), CastTy::Float) => {
+ if t_expr.is_signed() {
+ llvm::LLVMConstSIToFP(v, llty.to_ref())
+ } else {
+ llvm::LLVMConstUIToFP(v, llty.to_ref())
+ }
+ },
+ (CastTy::Float, CastTy::Float) => llvm::LLVMConstFPCast(v, llty.to_ref()),
+ (CastTy::Float, CastTy::Int(IntTy::I)) => llvm::LLVMConstFPToSI(v, llty.to_ref()),
+ (CastTy::Float, CastTy::Int(_)) => llvm::LLVMConstFPToUI(v, llty.to_ref()),
+ (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_))
+ | (CastTy::RPtr(_), CastTy::Ptr(_)) => {
+ ptrcast(v, llty)
+ },
+ (CastTy::FnPtr, CastTy::FnPtr) => ptrcast(v, llty), // isn't this a coercion?
+ (CastTy::Int(_), CastTy::Ptr(_)) => llvm::LLVMConstIntToPtr(v, llty.to_ref()),
+ (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => {
+ llvm::LLVMConstPtrToInt(v, llty.to_ref())
+ },
+ _ => {
+ cx.sess().impossible_case(e.span,
+ "bad combination of types for cast")
+ },
+ } } // unsafe { match ( ... ) {
+ },
+ ast::ExprAddrOf(ast::MutImmutable, ref sub) => {
+ // If this is the address of some static, then we need to return
+ // the actual address of the static itself (short circuit the rest
+ // of const eval).
+ let mut cur = sub;
+ loop {
+ match cur.node {
+ ast::ExprParen(ref sub) => cur = sub,
+ ast::ExprBlock(ref blk) => {
if let Some(ref sub) = blk.expr {
cur = sub;
} else {
break;
}
- }
- _ => break,
- }
- }
- let opt_def = cx.tcx().def_map.borrow().get(&cur.id).map(|d| d.full_def());
- if let Some(def::DefStatic(def_id, _)) = opt_def {
- get_static_val(cx, def_id, ety)
- } else {
- // If this isn't the address of a static, then keep going through
- // normal constant evaluation.
- let (v, _) = const_expr(cx, &**sub, param_substs, fn_args);
- addr_of(cx, v, "ref")
- }
- }
- ast::ExprAddrOf(ast::MutMutable, ref sub) => {
- let (v, _) = const_expr(cx, &**sub, param_substs, fn_args);
- addr_of_mut(cx, v, "ref_mut_slice")
- }
- ast::ExprTup(ref es) => {
- let repr = adt::represent_type(cx, ety);
- let vals = map_list(&es[..]);
- adt::trans_const(cx, &*repr, 0, &vals[..])
- }
- ast::ExprStruct(_, ref fs, ref base_opt) => {
- let repr = adt::represent_type(cx, ety);
-
- let base_val = match *base_opt {
+ },
+ _ => break,
+ }
+ }
+ let opt_def = cx.tcx().def_map.borrow().get(&cur.id).map(|d| d.full_def());
+ if let Some(def::DefStatic(def_id, _)) = opt_def {
+ get_static_val(cx, def_id, ety)
+ } else {
+ // If this isn't the address of a static, then keep going through
+ // normal constant evaluation.
+ let (v, _) = const_expr(cx, &**sub, param_substs, fn_args);
+ addr_of(cx, v, "ref")
+ }
+ },
+ ast::ExprAddrOf(ast::MutMutable, ref sub) => {
+ let (v, _) = const_expr(cx, &**sub, param_substs, fn_args);
+ addr_of_mut(cx, v, "ref_mut_slice")
+ },
+ ast::ExprTup(ref es) => {
+ let repr = adt::represent_type(cx, ety);
+ let vals = map_list(&es[..]);
+ adt::trans_const(cx, &*repr, 0, &vals[..])
+ },
+ ast::ExprStruct(_, ref fs, ref base_opt) => {
+ let repr = adt::represent_type(cx, ety);
+
+ let base_val = match *base_opt {
Some(ref base) => Some(const_expr(cx, &**base, param_substs, fn_args)),
None => None
- };
-
- expr::with_field_tys(cx.tcx(), ety, Some(e.id), |discr, field_tys| {
- let cs = field_tys.iter().enumerate()
- .map(|(ix, &field_ty)| {
- match fs.iter().find(|f| field_ty.name == f.ident.node.name) {
- Some(ref f) => const_expr(cx, &*f.expr, param_substs, fn_args).0,
- None => {
- match base_val {
- Some((bv, _)) => {
- adt::const_get_field(cx, &*repr, bv,
- discr, ix)
- }
- None => {
- cx.sess().span_bug(e.span,
- "missing struct field")
- }
- }
- }
- }
- }).collect::<Vec<_>>();
- if ty::type_is_simd(cx.tcx(), ety) {
- C_vector(&cs[..])
- } else {
- adt::trans_const(cx, &*repr, discr, &cs[..])
- }
- })
- }
- ast::ExprVec(ref es) => {
- let unit_ty = ty::sequence_element_type(cx.tcx(), ety);
+ };
+
+ expr::with_field_tys(cx.tcx(), ety, Some(e.id), |discr, field_tys| {
+ let cs = field_tys.iter().enumerate()
+ .map(|(ix, &field_ty)| {
+ match (fs.iter().find(|f| field_ty.name == f.ident.node.name), base_val) {
+ (Some(ref f), _) => const_expr(cx, &*f.expr, param_substs, fn_args).0,
+ (_, Some((bv, _))) => adt::const_get_field(cx, &*repr, bv, discr, ix),
+ (_, None) => cx.sess().span_bug(e.span, "missing struct field"),
+ }
+ }).collect::<Vec<_>>();
+ if ety.is_simd(cx.tcx()) {
+ C_vector(&cs[..])
+ } else {
+ adt::trans_const(cx, &*repr, discr, &cs[..])
+ }
+ })
+ },
+ ast::ExprVec(ref es) => {
+ let unit_ty = ety.sequence_element_type(cx.tcx());
let llunitty = type_of::type_of(cx, unit_ty);
- let vs = es.iter().map(|e| const_expr(cx, &**e, param_substs, fn_args).0)
- .collect::<Vec<_>>();
+ let vs = es.iter()
+ .map(|e| const_expr(cx, &**e, param_substs, fn_args).0)
+ .collect::<Vec<_>>();
// If the vector contains enums, an LLVM array won't work.
if vs.iter().any(|vi| val_ty(*vi) != llunitty) {
C_struct(cx, &vs[..], false)
} else {
C_array(llunitty, &vs[..])
}
- }
- ast::ExprRepeat(ref elem, ref count) => {
- let unit_ty = ty::sequence_element_type(cx.tcx(), ety);
+ },
+ ast::ExprRepeat(ref elem, ref count) => {
+ let unit_ty = ety.sequence_element_type(cx.tcx());
let llunitty = type_of::type_of(cx, unit_ty);
- let n = ty::eval_repeat_count(cx.tcx(), count);
+ let n = cx.tcx().eval_repeat_count(count);
let unit_val = const_expr(cx, &**elem, param_substs, fn_args).0;
- let vs: Vec<_> = repeat(unit_val).take(n).collect();
+ let vs = vec![unit_val; n];
if val_ty(unit_val) != llunitty {
C_struct(cx, &vs[..], false)
} else {
C_array(llunitty, &vs[..])
}
- }
- ast::ExprPath(..) => {
+ },
+ ast::ExprPath(..) => {
let def = cx.tcx().def_map.borrow().get(&e.id).unwrap().full_def();
match def {
def::DefLocal(id) => {
def::DefFn(..) | def::DefMethod(..) => {
expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val
}
- def::DefConst(def_id) | def::DefAssociatedConst(def_id, _) => {
+ def::DefConst(def_id) | def::DefAssociatedConst(def_id) => {
const_deref_ptr(cx, get_const_val(cx, def_id, e))
}
def::DefVariant(enum_did, variant_did, _) => {
- let vinfo = ty::enum_variant_with_id(cx.tcx(),
- enum_did,
- variant_did);
+ let vinfo = cx.tcx().enum_variant_with_id(enum_did, variant_did);
if !vinfo.args.is_empty() {
// N-ary variant.
expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val
or variant def")
}
}
- }
- ast::ExprCall(ref callee, ref args) => {
- let mut callee = &**callee;
- loop {
- callee = match callee.node {
- ast::ExprParen(ref inner) => &**inner,
- ast::ExprBlock(ref block) => match block.expr {
- Some(ref tail) => &**tail,
- None => break
- },
- _ => break
- };
- }
- let def = cx.tcx().def_map.borrow()[&callee.id].full_def();
- let arg_vals = map_list(args);
- match def {
- def::DefFn(did, _) | def::DefMethod(did, _) => {
- const_fn_call(cx, ExprId(callee.id), did, &arg_vals, param_substs)
- }
- def::DefStruct(_) => {
- if ty::type_is_simd(cx.tcx(), ety) {
- C_vector(&arg_vals[..])
- } else {
- let repr = adt::represent_type(cx, ety);
- adt::trans_const(cx, &*repr, 0, &arg_vals[..])
- }
- }
- def::DefVariant(enum_did, variant_did, _) => {
- let repr = adt::represent_type(cx, ety);
- let vinfo = ty::enum_variant_with_id(cx.tcx(),
- enum_did,
- variant_did);
- adt::trans_const(cx,
- &*repr,
- vinfo.disr_val,
- &arg_vals[..])
- }
- _ => cx.sess().span_bug(e.span, "expected a struct, variant, or const fn def")
- }
- }
- ast::ExprMethodCall(_, _, ref args) => {
- let arg_vals = map_list(args);
- let method_call = ty::MethodCall::expr(e.id);
- let method_did = match cx.tcx().method_map.borrow()[&method_call].origin {
- ty::MethodStatic(did) => did,
- _ => cx.sess().span_bug(e.span, "expected a const method def")
- };
- const_fn_call(cx, MethodCallKey(method_call),
- method_did, &arg_vals, param_substs)
- }
- ast::ExprParen(ref e) => const_expr(cx, &**e, param_substs, fn_args).0,
- ast::ExprBlock(ref block) => {
+ },
+ ast::ExprCall(ref callee, ref args) => {
+ let mut callee = &**callee;
+ loop {
+ callee = match callee.node {
+ ast::ExprParen(ref inner) => &**inner,
+ ast::ExprBlock(ref block) => match block.expr {
+ Some(ref tail) => &**tail,
+ None => break,
+ },
+ _ => break,
+ };
+ }
+ let def = cx.tcx().def_map.borrow()[&callee.id].full_def();
+ let arg_vals = map_list(args);
+ match def {
+ def::DefFn(did, _) | def::DefMethod(did) => {
+ const_fn_call(cx, ExprId(callee.id), did, &arg_vals, param_substs)
+ }
+ def::DefStruct(_) => {
+ if ety.is_simd(cx.tcx()) {
+ C_vector(&arg_vals[..])
+ } else {
+ let repr = adt::represent_type(cx, ety);
+ adt::trans_const(cx, &*repr, 0, &arg_vals[..])
+ }
+ }
+ def::DefVariant(enum_did, variant_did, _) => {
+ let repr = adt::represent_type(cx, ety);
+ let vinfo = cx.tcx().enum_variant_with_id(enum_did, variant_did);
+ adt::trans_const(cx,
+ &*repr,
+ vinfo.disr_val,
+ &arg_vals[..])
+ }
+ _ => cx.sess().span_bug(e.span, "expected a struct, variant, or const fn def"),
+ }
+ },
+ ast::ExprMethodCall(_, _, ref args) => {
+ let arg_vals = map_list(args);
+ let method_call = ty::MethodCall::expr(e.id);
+ let method_did = cx.tcx().tables.borrow().method_map[&method_call].def_id;
+ const_fn_call(cx, MethodCallKey(method_call),
+ method_did, &arg_vals, param_substs)
+ },
+ ast::ExprParen(ref e) => const_expr(cx, &**e, param_substs, fn_args).0,
+ ast::ExprBlock(ref block) => {
match block.expr {
Some(ref expr) => const_expr(cx, &**expr, param_substs, fn_args).0,
- None => C_nil(cx)
+ None => C_nil(cx),
+ }
+ },
+ ast::ExprClosure(_, ref decl, ref body) => {
+ match ety.sty {
+ ty::TyClosure(_, ref substs) => {
+ closure::trans_closure_expr(closure::Dest::Ignore(cx), decl,
+ body, e.id, substs);
+ }
+ _ =>
+ cx.sess().span_bug(
+ e.span,
+ &format!("bad type for closure expr: {:?}", ety))
}
- }
- ast::ExprClosure(_, ref decl, ref body) => {
- closure::trans_closure_expr(closure::Dest::Ignore(cx),
- decl,
- body,
- e.id,
- param_substs);
C_null(type_of::type_of(cx, ety))
- }
- _ => cx.sess().span_bug(e.span,
- "bad constant expression type in consts::const_expr")
- }
+ },
+ _ => cx.sess().span_bug(e.span,
+ "bad constant expression type in consts::const_expr"),
}
}
-
-pub fn trans_static(ccx: &CrateContext, m: ast::Mutability, id: ast::NodeId) -> ValueRef {
+pub fn trans_static(ccx: &CrateContext,
+ m: ast::Mutability,
+ expr: &ast::Expr,
+ id: ast::NodeId,
+ attrs: &Vec<ast::Attribute>)
+ -> ValueRef {
unsafe {
let _icx = push_ctxt("trans_static");
let g = base::get_item_val(ccx, id);
- // At this point, get_item_val has already translated the
- // constant's initializer to determine its LLVM type.
- let v = ccx.static_values().borrow().get(&id).unwrap().clone();
+
+ let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
+ let (v, _) = const_expr(ccx, expr, empty_substs, None);
+
// boolean SSA values are i1, but they have to be stored in i8 slots,
// otherwise some LLVM optimization passes don't work as expected
- let v = if llvm::LLVMTypeOf(v) == Type::i1(ccx).to_ref() {
- llvm::LLVMConstZExt(v, Type::i8(ccx).to_ref())
+ let mut val_llty = llvm::LLVMTypeOf(v);
+ let v = if val_llty == Type::i1(ccx).to_ref() {
+ val_llty = Type::i8(ccx).to_ref();
+ llvm::LLVMConstZExt(v, val_llty)
} else {
v
};
+
+ let ty = ccx.tcx().node_id_to_type(id);
+ let llty = type_of::type_of(ccx, ty);
+ let g = if val_llty == llty.to_ref() {
+ g
+ } else {
+ // If we created the global with the wrong type,
+ // correct the type.
+ let empty_string = CString::new("").unwrap();
+ let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g));
+ let name_string = CString::new(name_str_ref.to_bytes()).unwrap();
+ llvm::LLVMSetValueName(g, empty_string.as_ptr());
+ let new_g = llvm::LLVMGetOrInsertGlobal(
+ ccx.llmod(), name_string.as_ptr(), val_llty);
+ // To avoid breaking any invariants, we leave around the old
+ // global for the moment; we'll replace all references to it
+ // with the new global later. (See base::trans_crate.)
+ ccx.statics_to_rauw().borrow_mut().push((g, new_g));
+ new_g
+ };
llvm::LLVMSetInitializer(g, v);
// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
if m != ast::MutMutable {
- let node_ty = ty::node_id_to_type(ccx.tcx(), id);
- let tcontents = ty::type_contents(ccx.tcx(), node_ty);
+ let tcontents = ty.type_contents(ccx.tcx());
if !tcontents.interior_unsafe() {
- llvm::LLVMSetGlobalConstant(g, True);
+ llvm::LLVMSetGlobalConstant(g, llvm::True);
}
}
+
debuginfo::create_global_var_metadata(ccx, id, g);
+
+ if attr::contains_name(attrs,
+ "thread_local") {
+ llvm::set_thread_local(g, true);
+ }
g
}
}
+
fn get_static_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, did: ast::DefId,
ty: Ty<'tcx>) -> ValueRef {
if ast_util::is_local(did) { return base::get_item_val(ccx, did.node) }
use llvm;
use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef};
-use llvm::TargetData;
-use llvm::mk_target_data;
use metadata::common::LinkMeta;
use middle::def::ExportMap;
use middle::traits;
pub struct LocalCrateContext<'tcx> {
llmod: ModuleRef,
llcx: ContextRef,
- td: TargetData,
tn: TypeNames,
externs: RefCell<ExternMap>,
item_vals: RefCell<NodeMap<ValueRef>>,
/// Cache of emitted const values
const_values: RefCell<FnvHashMap<(ast::NodeId, &'tcx Substs<'tcx>), ValueRef>>,
- /// Cache of emitted static values
- static_values: RefCell<NodeMap<ValueRef>>,
-
/// Cache of external const values
extern_const_values: RefCell<DefIdMap<ValueRef>>,
/// Cache of closure wrappers for bare fn's.
closure_bare_wrapper_cache: RefCell<FnvHashMap<ValueRef, ValueRef>>,
+ /// List of globals for static variables which need to be passed to the
+ /// LLVM function ReplaceAllUsesWith (RAUW) when translation is complete.
+ /// (We have to make sure we don't invalidate any ValueRefs referring
+ /// to constants.)
+ statics_to_rauw: RefCell<Vec<(ValueRef, ValueRef)>>,
+
lltypes: RefCell<FnvHashMap<Ty<'tcx>, Type>>,
llsizingtypes: RefCell<FnvHashMap<Ty<'tcx>, Type>>,
adt_reprs: RefCell<FnvHashMap<Ty<'tcx>, Rc<adt::Repr<'tcx>>>>,
dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
eh_personality: RefCell<Option<ValueRef>>,
+ rust_try_fn: RefCell<Option<ValueRef>>,
+ unwind_resume_hooked: Cell<bool>,
intrinsics: RefCell<FnvHashMap<&'static str, ValueRef>>,
let mod_name = CString::new(mod_name).unwrap();
let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
- let data_layout = sess.target.target.data_layout.as_bytes();
- let data_layout = CString::new(data_layout).unwrap();
- llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
+ let custom_data_layout = &sess.target.target.options.data_layout[..];
+ if custom_data_layout.len() > 0 {
+ let data_layout = CString::new(custom_data_layout).unwrap();
+ llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
+ } else {
+ let tm = ::back::write::create_target_machine(sess);
+ llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
+ llvm::LLVMRustDisposeTargetMachine(tm);
+ }
let llvm_target = sess.target.target.llvm_target.as_bytes();
let llvm_target = CString::new(llvm_target).unwrap();
unsafe {
let (llcx, llmod) = create_context_and_module(&shared.tcx.sess, name);
- let td = mk_target_data(&shared.tcx
- .sess
- .target
- .target
- .data_layout
- );
-
let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo {
Some(debuginfo::CrateDebugContext::new(llmod))
} else {
let mut local_ccx = LocalCrateContext {
llmod: llmod,
llcx: llcx,
- td: td,
tn: TypeNames::new(),
externs: RefCell::new(FnvHashMap()),
item_vals: RefCell::new(NodeMap()),
const_unsized: RefCell::new(FnvHashMap()),
const_globals: RefCell::new(FnvHashMap()),
const_values: RefCell::new(FnvHashMap()),
- static_values: RefCell::new(NodeMap()),
extern_const_values: RefCell::new(DefIdMap()),
impl_method_cache: RefCell::new(FnvHashMap()),
closure_bare_wrapper_cache: RefCell::new(FnvHashMap()),
+ statics_to_rauw: RefCell::new(Vec::new()),
lltypes: RefCell::new(FnvHashMap()),
llsizingtypes: RefCell::new(FnvHashMap()),
adt_reprs: RefCell::new(FnvHashMap()),
closure_vals: RefCell::new(FnvHashMap()),
dbg_cx: dbg_cx,
eh_personality: RefCell::new(None),
+ rust_try_fn: RefCell::new(None),
+ unwind_resume_hooked: Cell::new(false),
intrinsics: RefCell::new(FnvHashMap()),
n_llvm_insns: Cell::new(0),
trait_cache: RefCell::new(FnvHashMap()),
self.local.llcx
}
- pub fn td<'a>(&'a self) -> &'a TargetData {
- &self.local.td
+ pub fn td(&self) -> llvm::TargetDataRef {
+ unsafe { llvm::LLVMRustGetModuleDataLayout(self.llmod()) }
}
pub fn tn<'a>(&'a self) -> &'a TypeNames {
&self.local.const_values
}
- pub fn static_values<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
- &self.local.static_values
- }
-
pub fn extern_const_values<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
&self.local.extern_const_values
}
&self.local.closure_bare_wrapper_cache
}
+ pub fn statics_to_rauw<'a>(&'a self) -> &'a RefCell<Vec<(ValueRef, ValueRef)>> {
+ &self.local.statics_to_rauw
+ }
+
pub fn lltypes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Type>> {
&self.local.lltypes
}
&self.local.eh_personality
}
+ pub fn rust_try_fn<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> {
+ &self.local.rust_try_fn
+ }
+
+ pub fn unwind_resume_hooked<'a>(&'a self) -> &'a Cell<bool> {
+ &self.local.unwind_resume_hooked
+ }
+
fn intrinsics<'a>(&'a self) -> &'a RefCell<FnvHashMap<&'static str, ValueRef>> {
&self.local.intrinsics
}
($name:expr, fn() -> $ret:expr) => (
if *key == $name {
let f = declare::declare_cfn(ccx, $name, Type::func(&[], &$ret),
- ty::mk_nil(ccx.tcx()));
+ ccx.tcx().mk_nil());
ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if *key == $name {
let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret),
- ty::mk_nil(ccx.tcx()));
+ ccx.tcx().mk_nil());
ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void);
ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
+ ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
// Some intrinsics were introduced in later versions of LLVM, but they have
// fallbacks in libc or libm and such.
} else if *key == $name {
let f = declare::declare_cfn(ccx, stringify!($cname),
Type::func(&[$($arg),*], &void),
- ty::mk_nil(ccx.tcx()));
+ ccx.tcx().mk_nil());
llvm::SetLinkage(f, llvm::InternalLinkage);
let bld = ccx.builder();
} else if *key == $name {
let f = declare::declare_cfn(ccx, stringify!($cname),
Type::func(&[$($arg),*], &$ret),
- ty::mk_nil(ccx.tcx()));
+ ccx.tcx().mk_nil());
ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
pub use self::RvalueMode::*;
use llvm::ValueRef;
+use trans::adt;
use trans::base::*;
-use trans::build::Load;
+use trans::build::{Load, Store};
use trans::common::*;
use trans::cleanup;
-use trans::cleanup::CleanupMethods;
+use trans::cleanup::{CleanupMethods, DropHintDatum, DropHintMethods};
use trans::expr;
use trans::tvec;
use trans::type_of;
-use middle::ty::{self, Ty};
+use middle::ty::Ty;
use std::fmt;
use syntax::ast;
/// describes where the value is stored, what Rust type the value has,
/// whether it is addressed by reference, and so forth. Please refer
/// the section on datums in `README.md` for more details.
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, Debug)]
pub struct Datum<'tcx, K> {
/// The llvm value. This is either a pointer to the Rust value or
/// the value itself, depending on `kind` below.
/// `val` is a pointer into memory for which a cleanup is scheduled
/// (and thus has type *T). If you move out of an Lvalue, you must
/// zero out the memory (FIXME #5016).
- LvalueExpr,
+ LvalueExpr(Lvalue),
}
-#[derive(Clone, Copy, Debug)]
-pub struct Lvalue;
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum DropFlagInfo {
+ DontZeroJustUse(ast::NodeId),
+ ZeroAndMaintain(ast::NodeId),
+ None,
+}
+
+impl DropFlagInfo {
+ pub fn must_zero(&self) -> bool {
+ match *self {
+ DropFlagInfo::DontZeroJustUse(..) => false,
+ DropFlagInfo::ZeroAndMaintain(..) => true,
+ DropFlagInfo::None => true,
+ }
+ }
+
+ pub fn hint_datum<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
+ -> Option<DropHintDatum<'tcx>> {
+ let id = match *self {
+ DropFlagInfo::None => return None,
+ DropFlagInfo::DontZeroJustUse(id) |
+ DropFlagInfo::ZeroAndMaintain(id) => id,
+ };
+
+ let hints = bcx.fcx.lldropflag_hints.borrow();
+ let retval = hints.hint_datum(id);
+ assert!(retval.is_some(), "An id (={}) means must have a hint", id);
+ retval
+ }
+}
+
+// FIXME: having Lvalue be `Copy` is a bit of a footgun, since clients
+// may not realize that subparts of an Lvalue can have a subset of
+// drop-flags associated with them, while this as written will just
+// memcpy the drop_flag_info. But, it is an easier way to get `_match`
+// off the ground to just let this be `Copy` for now.
+#[derive(Copy, Clone, Debug)]
+pub struct Lvalue {
+ pub source: &'static str,
+ pub drop_flag_info: DropFlagInfo
+}
#[derive(Debug)]
pub struct Rvalue {
pub mode: RvalueMode
}
+/// Classifies what action we should take when a value is moved away
+/// with respect to its drop-flag.
+///
+/// Long term there will be no need for this classification: all flags
+/// (which will be stored on the stack frame) will have the same
+/// interpretation and maintenance code associated with them.
+#[derive(Copy, Clone, Debug)]
+pub enum HintKind {
+ /// When the value is moved, set the drop-flag to "dropped"
+ /// (i.e. "zero the flag", even when the specific representation
+ /// is not literally 0) and when it is reinitialized, set the
+ /// drop-flag back to "initialized".
+ ZeroAndMaintain,
+
+ /// When the value is moved, do not set the drop-flag to "dropped"
+ /// However, continue to read the drop-flag in deciding whether to
+ /// drop. (In essence, the path/fragment in question will never
+ /// need to be dropped at the points where it is moved away by
+ /// this code, but we are defending against the scenario where
+ /// some *other* code could move away (or drop) the value and thus
+ /// zero-the-flag, which is why we will still read from it.
+ DontZeroJustUse,
+}
+
+impl Lvalue { // Constructors for various Lvalues.
+ pub fn new<'blk, 'tcx>(source: &'static str) -> Lvalue {
+ debug!("Lvalue at {} no drop flag info", source);
+ Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
+ }
+
+ pub fn new_dropflag_hint(source: &'static str) -> Lvalue {
+ debug!("Lvalue at {} is drop flag hint", source);
+ Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
+ }
+
+ pub fn new_with_hint<'blk, 'tcx>(source: &'static str,
+ bcx: Block<'blk, 'tcx>,
+ id: ast::NodeId,
+ k: HintKind) -> Lvalue {
+ let (opt_id, info) = {
+ let hint_available = Lvalue::has_dropflag_hint(bcx, id) &&
+ bcx.tcx().sess.nonzeroing_move_hints();
+ let info = match k {
+ HintKind::ZeroAndMaintain if hint_available =>
+ DropFlagInfo::ZeroAndMaintain(id),
+ HintKind::DontZeroJustUse if hint_available =>
+ DropFlagInfo::DontZeroJustUse(id),
+ _ =>
+ DropFlagInfo::None,
+ };
+ (Some(id), info)
+ };
+ debug!("Lvalue at {}, id: {:?} info: {:?}", source, opt_id, info);
+ Lvalue { source: source, drop_flag_info: info }
+ }
+} // end Lvalue constructor methods.
+
+impl Lvalue {
+ fn has_dropflag_hint<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ id: ast::NodeId) -> bool {
+ let hints = bcx.fcx.lldropflag_hints.borrow();
+ hints.has_hint(id)
+ }
+ pub fn dropflag_hint<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
+ -> Option<DropHintDatum<'tcx>> {
+ self.drop_flag_info.hint_datum(bcx)
+ }
+}
+
impl Rvalue {
pub fn new(m: RvalueMode) -> Rvalue {
Rvalue { mode: m }
// Subtle. Populate the scratch memory *before* scheduling cleanup.
let bcx = populate(arg, bcx, scratch);
bcx.fcx.schedule_lifetime_end(scope, scratch);
- bcx.fcx.schedule_drop_mem(scope, scratch, ty);
+ bcx.fcx.schedule_drop_mem(scope, scratch, ty, None);
- DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue))
+ DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum")))
}
/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
ByValue => { fcx.schedule_drop_immediate(scope, val, ty); }
ByRef => {
fcx.schedule_lifetime_end(scope, val);
- fcx.schedule_drop_mem(scope, val, ty);
+ fcx.schedule_drop_mem(scope, val, ty, None);
}
}
}
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("<Lvalue as KindOps>::post_store");
if bcx.fcx.type_needs_drop(ty) {
- // cancel cleanup of affine values by drop-filling the memory
- let () = drop_done_fill_mem(bcx, val, ty);
+ // cancel cleanup of affine values:
+ // 1. if it has drop-hint, mark as moved; then code
+ // aware of drop-hint won't bother calling the
+ // drop-glue itself.
+ if let Some(hint_datum) = self.drop_flag_info.hint_datum(bcx) {
+ let moved_hint_byte = adt::DTOR_MOVED_HINT as usize;
+ let hint_llval = hint_datum.to_value().value();
+ Store(bcx, C_u8(bcx.fcx.ccx, moved_hint_byte), hint_llval);
+ }
+ // 2. if the drop info says its necessary, drop-fill the memory.
+ if self.drop_flag_info.must_zero() {
+ let () = drop_done_fill_mem(bcx, val, ty);
+ }
bcx
} else {
+ // FIXME (#5016) would be nice to assert this, but we have
+ // to allow for e.g. DontZeroJustUse flags, for now.
+ //
+ // (The dropflag hint construction should be taking
+ // !type_needs_drop into account; earlier analysis phases
+ // may not have all the info they need to include such
+ // information properly, I think; in particular the
+ // fragments analysis works on a non-monomorphized view of
+ // the code.)
+ //
+ // assert_eq!(self.drop_flag_info, DropFlagInfo::None);
bcx
}
}
}
fn to_expr_kind(self) -> Expr {
- LvalueExpr
+ LvalueExpr(self)
}
}
ty: Ty<'tcx>)
-> Block<'blk, 'tcx> {
match *self {
- LvalueExpr => Lvalue.post_store(bcx, val, ty),
+ LvalueExpr(ref l) => l.post_store(bcx, val, ty),
RvalueExpr(ref r) => r.post_store(bcx, val, ty),
}
}
fn is_by_ref(&self) -> bool {
match *self {
- LvalueExpr => Lvalue.is_by_ref(),
+ LvalueExpr(ref l) => l.is_by_ref(),
RvalueExpr(ref r) => r.is_by_ref()
}
}
match self.kind.mode {
ByRef => {
add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty);
- DatumBlock::new(bcx, Datum::new(self.val, self.ty, Lvalue))
+ DatumBlock::new(bcx, Datum::new(
+ self.val,
+ self.ty,
+ Lvalue::new("datum::to_lvalue_datum_in_scope")))
}
ByValue => {
{
let Datum { val, ty, kind } = self;
match kind {
- LvalueExpr => if_lvalue(Datum::new(val, ty, Lvalue)),
+ LvalueExpr(l) => if_lvalue(Datum::new(val, ty, l)),
RvalueExpr(r) => if_rvalue(Datum::new(val, ty, r)),
}
}
};
Datum {
val: val,
- kind: Lvalue,
+ kind: Lvalue::new("Datum::get_element"),
ty: ty,
}
}
* affine values (since they must never be duplicated).
*/
- assert!(!ty::type_moves_by_default(&ty::empty_parameter_environment(bcx.tcx()),
- DUMMY_SP,
- self.ty));
+ assert!(!self.ty
+ .moves_by_default(&bcx.tcx().empty_parameter_environment(), DUMMY_SP));
self.shallow_copy_raw(bcx, dst)
}
}
pub fn to_llbool<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
- assert!(ty::type_is_bool(self.ty));
+ assert!(self.ty.is_bool());
self.to_llscalarish(bcx)
}
}
use trans::common::{C_bytes, CrateContext};
use trans::declare;
use trans::type_::Type;
-use middle::ty::ClosureTyper;
use session::config::NoDebugInfo;
use std::ffi::CString;
use middle::subst::{self, Substs};
use rustc::ast_map;
use trans::{type_of, adt, machine, monomorphize};
-use trans::common::{self, CrateContext, FunctionContext, NormalizingClosureTyper, Block};
-use trans::_match::{BindingInfo, TrByCopy, TrByMove, TrByRef};
+use trans::common::{self, CrateContext, FunctionContext, Block};
+use trans::_match::{BindingInfo, TransBindingMode};
use trans::type_::Type;
-use middle::ty::{self, Ty, ClosureTyper};
+use middle::ty::{self, Ty};
use session::config::{self, FullDebugInfo};
use util::nodemap::FnvHashMap;
use util::common::path2cstr;
let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
unique_type_id.push_str(&inner_type_id[..]);
},
- ty::TyRawPtr(ty::mt { ty: inner_type, mutbl } ) => {
+ ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
unique_type_id.push('*');
if mutbl == ast::MutMutable {
unique_type_id.push_str("mut");
let inner_type_id = self.get_unique_type_id_as_string(inner_type_id);
unique_type_id.push_str(&inner_type_id[..]);
},
- ty::TyRef(_, ty::mt { ty: inner_type, mutbl }) => {
+ ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => {
unique_type_id.push('&');
if mutbl == ast::MutMutable {
unique_type_id.push_str("mut");
ty::TyTrait(ref trait_data) => {
unique_type_id.push_str("trait ");
- let principal =
- ty::erase_late_bound_regions(cx.tcx(),
- &trait_data.principal);
+ let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal);
from_def_id_and_substs(self,
cx,
unique_type_id.push_str(" fn(");
- let sig = ty::erase_late_bound_regions(cx.tcx(), sig);
+ let sig = cx.tcx().erase_late_bound_regions(sig);
for ¶meter_type in &sig.inputs {
let parameter_type_id =
}
}
},
- ty::TyClosure(def_id, substs) => {
- let typer = NormalizingClosureTyper::new(cx.tcx());
- let closure_ty = typer.closure_type(def_id, substs);
- self.get_unique_type_id_of_closure_type(cx,
- closure_ty,
- &mut unique_type_id);
+ ty::TyClosure(_, ref substs) if substs.upvar_tys.is_empty() => {
+ push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
+ },
+ ty::TyClosure(_, ref substs) => {
+ unique_type_id.push_str("closure ");
+ for upvar_type in &substs.upvar_tys {
+ let upvar_type_id =
+ self.get_unique_type_id_of_type(cx, upvar_type);
+ let upvar_type_id =
+ self.get_unique_type_id_as_string(upvar_type_id);
+ unique_type_id.push_str(&upvar_type_id[..]);
+ }
},
_ => {
cx.sess().bug(&format!("get_unique_type_id_of_type() - unexpected type: {:?}",
}
}
- fn get_unique_type_id_of_closure_type<'a>(&mut self,
- cx: &CrateContext<'a, 'tcx>,
- closure_ty: ty::ClosureTy<'tcx>,
- unique_type_id: &mut String) {
- let ty::ClosureTy { unsafety,
- ref sig,
- abi: _ } = closure_ty;
-
- if unsafety == ast::Unsafety::Unsafe {
- unique_type_id.push_str("unsafe ");
- }
-
- unique_type_id.push_str("|");
-
- let sig = ty::erase_late_bound_regions(cx.tcx(), sig);
-
- for ¶meter_type in &sig.inputs {
- let parameter_type_id =
- self.get_unique_type_id_of_type(cx, parameter_type);
- let parameter_type_id =
- self.get_unique_type_id_as_string(parameter_type_id);
- unique_type_id.push_str(¶meter_type_id[..]);
- unique_type_id.push(',');
- }
-
- if sig.variadic {
- unique_type_id.push_str("...");
- }
-
- unique_type_id.push_str("|->");
-
- match sig.output {
- ty::FnConverging(ret_ty) => {
- let return_type_id = self.get_unique_type_id_of_type(cx, ret_ty);
- let return_type_id = self.get_unique_type_id_as_string(return_type_id);
- unique_type_id.push_str(&return_type_id[..]);
- }
- ty::FnDiverging => {
- unique_type_id.push_str("!");
- }
- }
- }
-
// Get the UniqueTypeId for an enum variant. Enum variants are not really
// types of their own, so they need special handling. We still need a
// UniqueTypeId for them, since to debuginfo they *are* real types.
unique_type_id: UniqueTypeId,
span: Span)
-> MetadataCreationResult {
- let data_ptr_type = ty::mk_ptr(cx.tcx(), ty::mt {
+ let data_ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut {
ty: element_type,
mutbl: ast::MutImmutable
});
span: Span)
-> MetadataCreationResult
{
- let signature = ty::erase_late_bound_regions(cx.tcx(), signature);
+ let signature = cx.tcx().erase_late_bound_regions(signature);
let mut signature_metadata: Vec<DIType> = Vec::with_capacity(signature.inputs.len() + 1);
trait_pointer_metadata(cx, t, None, unique_type_id),
false)
}
- ty::TyBox(ty) | ty::TyRawPtr(ty::mt{ty, ..}) | ty::TyRef(_, ty::mt{ty, ..}) => {
+ ty::TyBox(ty) |
+ ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
+ ty::TyRef(_, ty::TypeAndMut{ty, ..}) => {
match ty.sty {
ty::TySlice(typ) => {
vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span)
}
}
ty::TyBareFn(_, ref barefnty) => {
- subroutine_type_metadata(cx, unique_type_id, &barefnty.sig, usage_site_span)
+ let fn_metadata = subroutine_type_metadata(cx,
+ unique_type_id,
+ &barefnty.sig,
+ usage_site_span).metadata;
+ match debug_context(cx).type_map
+ .borrow()
+ .find_metadata_for_unique_id(unique_type_id) {
+ Some(metadata) => return metadata,
+ None => { /* proceed normally */ }
+ };
+
+ // This is actually a function pointer, so wrap it in pointer DI
+ MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false)
+
}
- ty::TyClosure(def_id, substs) => {
- let typer = NormalizingClosureTyper::new(cx.tcx());
- let sig = typer.closure_type(def_id, substs).sig;
- subroutine_type_metadata(cx, unique_type_id, &sig, usage_site_span)
+ ty::TyClosure(_, ref substs) => {
+ prepare_tuple_metadata(cx,
+ t,
+ &substs.upvar_tys,
+ unique_type_id,
+ usage_site_span).finalize(cx)
}
ty::TyStruct(def_id, substs) => {
prepare_struct_metadata(cx,
}
}
-fn diverging_type_metadata(cx: &CrateContext) -> DIType {
+pub fn diverging_type_metadata(cx: &CrateContext) -> DIType {
unsafe {
llvm::LLVMDIBuilderCreateBasicType(
DIB(cx),
// Creates MemberDescriptions for the fields of a struct
struct StructMemberDescriptionFactory<'tcx> {
- fields: Vec<ty::field<'tcx>>,
+ fields: Vec<ty::Field<'tcx>>,
is_simd: bool,
span: Span,
}
let name = if field.name == special_idents::unnamed_field.name {
format!("__{}", i)
} else {
- token::get_name(field.name).to_string()
+ field.name.to_string()
};
let offset = if self.is_simd {
unique_type_id,
containing_scope);
- let mut fields = ty::struct_fields(cx.tcx(), def_id, substs);
+ let mut fields = cx.tcx().struct_fields(def_id, substs);
// The `Ty` values returned by `ty::struct_fields` can still contain
// `TyProjection` variants, so normalize those away.
struct_llvm_type,
StructMDF(StructMemberDescriptionFactory {
fields: fields,
- is_simd: ty::type_is_simd(cx.tcx(), struct_type),
+ is_simd: struct_type.is_simd(cx.tcx()),
span: span,
})
)
describe_enum_variant(cx,
self.enum_type,
struct_def,
- &*(*self.variants)[i],
+ &*self.variants[i],
discriminant_info,
self.containing_scope,
self.span);
describe_enum_variant(cx,
self.enum_type,
struct_def,
- &*(*self.variants)[0],
+ &*self.variants[0],
NoDiscriminant,
self.containing_scope,
self.span);
// DWARF representation of enums uniform.
// First create a description of the artificial wrapper struct:
- let non_null_variant = &(*self.variants)[non_null_variant_index as usize];
- let non_null_variant_name = token::get_name(non_null_variant.name);
+ let non_null_variant = &self.variants[non_null_variant_index as usize];
+ let non_null_variant_name = non_null_variant.name.as_str();
// The llvm type and metadata of the pointer
let non_null_llvm_type = type_of::type_of(cx, nnty);
// MemberDescription of the struct's single field.
let sole_struct_member_description = MemberDescription {
name: match non_null_variant.arg_names {
- Some(ref names) => token::get_name(names[0]).to_string(),
+ Some(ref names) => names[0].to_string(),
None => "__0".to_string()
},
llvm_type: non_null_llvm_type,
// Encode the information about the null variant in the union
// member's name.
let null_variant_index = (1 - non_null_variant_index) as usize;
- let null_variant_name = token::get_name((*self.variants)[null_variant_index].name);
+ let null_variant_name = self.variants[null_variant_index].name;
let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
0,
null_variant_name);
describe_enum_variant(cx,
self.enum_type,
struct_def,
- &*(*self.variants)[nndiscr as usize],
+ &*self.variants[nndiscr as usize],
OptimizedDiscriminant,
self.containing_scope,
self.span);
// Encode the information about the null variant in the union
// member's name.
let null_variant_index = (1 - nndiscr) as usize;
- let null_variant_name = token::get_name((*self.variants)[null_variant_index].name);
+ let null_variant_name = self.variants[null_variant_index].name;
let discrfield = discrfield.iter()
.skip(1)
.map(|x| x.to_string())
- .collect::<Vec<_>>().connect("$");
+ .collect::<Vec<_>>().join("$");
let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
discrfield,
null_variant_name);
struct_def.packed);
// Could do some consistency checks here: size, align, field count, discr type
- let variant_name = token::get_name(variant_info.name);
- let variant_name = &variant_name;
+ let variant_name = variant_info.name.as_str();
let unique_type_id = debug_context(cx).type_map
.borrow_mut()
.get_unique_type_id_of_enum_variant(
cx,
enum_type,
- variant_name);
+ &variant_name);
let metadata_stub = create_struct_stub(cx,
variant_llvm_type,
- variant_name,
+ &variant_name,
unique_type_id,
containing_scope);
let mut arg_names: Vec<_> = match variant_info.arg_names {
Some(ref names) => {
names.iter()
- .map(|&name| token::get_name(name).to_string())
+ .map(|name| name.to_string())
.collect()
}
None => {
let loc = span_start(cx, definition_span);
let file_metadata = file_metadata(cx, &loc.file.name);
- let variants = ty::enum_variants(cx.tcx(), enum_def_id);
+ let variants = cx.tcx().enum_variants(enum_def_id);
let enumerators_metadata: Vec<DIDescriptor> = variants
.iter()
.map(|v| {
- let token = token::get_name(v.name);
+ let token = v.name.as_str();
let name = CString::new(token.as_bytes()).unwrap();
unsafe {
llvm::LLVMDIBuilderCreateEnumerator(
.collect();
let discriminant_type_metadata = |inttype| {
- // We can reuse the type of the discriminant for all monomorphized
- // instances of an enum because it doesn't depend on any type
- // parameters. The def_id, uniquely identifying the enum's polytype acts
- // as key in this cache.
+ let disr_type_key = (enum_def_id, inttype);
let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types
.borrow()
- .get(&enum_def_id).cloned();
+ .get(&disr_type_key).cloned();
match cached_discriminant_type_metadata {
Some(discriminant_type_metadata) => discriminant_type_metadata,
None => {
debug_context(cx).created_enum_disr_types
.borrow_mut()
- .insert(enum_def_id, discriminant_type_metadata);
+ .insert(disr_type_key, discriminant_type_metadata);
discriminant_type_metadata
}
csearch::get_item_path(cx.tcx(), def_id).last().unwrap().name()
};
- token::get_name(name)
+ name.as_str()
}
}
};
let is_local_to_unit = is_node_local_to_unit(cx, node_id);
- let variable_type = ty::node_id_to_type(cx.tcx(), node_id);
+ let variable_type = cx.tcx().node_id_to_type(node_id);
let type_metadata = type_metadata(cx, variable_type, span);
let namespace_node = namespace_for_item(cx, ast_util::local_def(node_id));
- let var_name = token::get_name(name).to_string();
+ let var_name = name.to_string();
let linkage_name =
namespace_node.mangled_name_of_contained_item(&var_name[..]);
let var_scope = namespace_node.scope;
// dereference once more. For ByCopy we just use the stack slot we created
// for the binding.
let var_access = match binding.trmode {
- TrByCopy(llbinding) => VariableAccess::DirectVariable {
+ TransBindingMode::TrByCopy(llbinding) |
+ TransBindingMode::TrByMoveIntoCopy(llbinding) => VariableAccess::DirectVariable {
alloca: llbinding
},
- TrByMove => VariableAccess::IndirectVariable {
+ TransBindingMode::TrByMoveRef => VariableAccess::IndirectVariable {
alloca: binding.llmatch,
address_operations: &aops
},
- TrByRef => VariableAccess::DirectVariable {
+ TransBindingMode::TrByRef => VariableAccess::DirectVariable {
alloca: binding.llmatch
}
};
create_DIArray, is_node_local_to_unit};
use self::namespace::{namespace_for_item, NamespaceTreeNode};
use self::type_names::compute_debuginfo_type_name;
-use self::metadata::{type_metadata, file_metadata, scope_metadata, TypeMap, compile_unit_metadata};
+use self::metadata::{type_metadata, diverging_type_metadata};
+use self::metadata::{file_metadata, scope_metadata, TypeMap, compile_unit_metadata};
use self::source_loc::InternalDebugLocation;
use llvm;
use rustc::ast_map;
use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext, Block};
use trans;
-use trans::monomorphize;
-use middle::ty::{self, Ty, ClosureTyper};
+use trans::{monomorphize, type_of};
+use middle::ty::{self, Ty};
use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo};
-use util::nodemap::{DefIdMap, NodeMap, FnvHashMap, FnvHashSet};
+use util::nodemap::{NodeMap, FnvHashMap, FnvHashSet};
use libc::c_uint;
use std::cell::{Cell, RefCell};
use std::ptr;
use std::rc::Rc;
use syntax::codemap::{Span, Pos};
-use syntax::{ast, codemap, ast_util};
+use syntax::{abi, ast, codemap, ast_util};
+use syntax::attr::IntType;
use syntax::parse::token::{self, special_idents};
pub mod gdb;
builder: DIBuilderRef,
current_debug_location: Cell<InternalDebugLocation>,
created_files: RefCell<FnvHashMap<String, DIFile>>,
- created_enum_disr_types: RefCell<DefIdMap<DIType>>,
+ created_enum_disr_types: RefCell<FnvHashMap<(ast::DefId, IntType), DIType>>,
type_map: RefCell<TypeMap<'tcx>>,
namespace_map: RefCell<FnvHashMap<Vec<ast::Name>, Rc<NamespaceTreeNode>>>,
builder: builder,
current_debug_location: Cell::new(InternalDebugLocation::UnknownLocation),
created_files: RefCell::new(FnvHashMap()),
- created_enum_disr_types: RefCell::new(DefIdMap()),
+ created_enum_disr_types: RefCell::new(FnvHashMap()),
type_map: RefCell::new(TypeMap::new()),
namespace_map: RefCell::new(FnvHashMap()),
composite_types_completed: RefCell::new(FnvHashSet()),
let function_type_metadata = unsafe {
let fn_signature = get_function_signature(cx,
fn_ast_id,
- &*fn_decl,
param_substs,
span);
llvm::LLVMDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature)
// Get_template_parameters() will append a `<...>` clause to the function
// name if necessary.
- let mut function_name = String::from(&*token::get_name(name));
+ let mut function_name = name.to_string();
let template_parameters = get_template_parameters(cx,
generics,
param_substs,
fn get_function_signature<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
fn_ast_id: ast::NodeId,
- fn_decl: &ast::FnDecl,
param_substs: &Substs<'tcx>,
error_reporting_span: Span) -> DIArray {
if cx.sess().opts.debuginfo == LimitedDebugInfo {
return create_DIArray(DIB(cx), &[]);
}
- let mut signature = Vec::with_capacity(fn_decl.inputs.len() + 1);
-
// Return type -- llvm::DIBuilder wants this at index 0
assert_type_for_node_id(cx, fn_ast_id, error_reporting_span);
- let return_type = ty::node_id_to_type(cx.tcx(), fn_ast_id);
- let return_type = monomorphize::apply_param_substs(cx.tcx(),
- param_substs,
- &return_type);
- if ty::type_is_nil(return_type) {
- signature.push(ptr::null_mut())
+ let fn_type = cx.tcx().node_id_to_type(fn_ast_id);
+
+ let (sig, abi) = match fn_type.sty {
+ ty::TyBareFn(_, ref barefnty) => {
+ (cx.tcx().erase_late_bound_regions(&barefnty.sig), barefnty.abi)
+ }
+ ty::TyClosure(def_id, ref substs) => {
+ let closure_type = cx.tcx().closure_type(def_id, substs);
+ (cx.tcx().erase_late_bound_regions(&closure_type.sig), closure_type.abi)
+ }
+
+ _ => cx.sess().bug("get_function_metdata: Expected a function type!")
+ };
+ let sig = monomorphize::apply_param_substs(cx.tcx(), param_substs, &sig);
+
+ let mut signature = Vec::with_capacity(sig.inputs.len() + 1);
+
+ // Return type -- llvm::DIBuilder wants this at index 0
+ signature.push(match sig.output {
+ ty::FnConverging(ret_ty) => match ret_ty.sty {
+ ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(),
+ _ => type_metadata(cx, ret_ty, codemap::DUMMY_SP)
+ },
+ ty::FnDiverging => diverging_type_metadata(cx)
+ });
+
+ let inputs = &if abi == abi::RustCall {
+ type_of::untuple_arguments(cx, &sig.inputs)
} else {
- signature.push(type_metadata(cx, return_type, codemap::DUMMY_SP));
- }
+ sig.inputs
+ };
// Arguments types
- for arg in &fn_decl.inputs {
- assert_type_for_node_id(cx, arg.pat.id, arg.pat.span);
- let arg_type = ty::node_id_to_type(cx.tcx(), arg.pat.id);
- let arg_type = monomorphize::apply_param_substs(cx.tcx(),
- param_substs,
- &arg_type);
- signature.push(type_metadata(cx, arg_type, codemap::DUMMY_SP));
+ for &argument_type in inputs {
+ signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP));
}
return create_DIArray(DIB(cx), &signature[..]);
actual_self_type,
codemap::DUMMY_SP);
- let name = token::get_name(special_idents::type_self.name);
+ let name = special_idents::type_self.name.as_str();
let name = CString::new(name.as_bytes()).unwrap();
let param_metadata = unsafe {
// Again, only create type information if full debuginfo is enabled
if cx.sess().opts.debuginfo == FullDebugInfo {
let actual_type_metadata = type_metadata(cx, actual_type, codemap::DUMMY_SP);
- let ident = token::get_ident(ident);
- let name = CString::new(ident.as_bytes()).unwrap();
+ let name = CString::new(ident.name.as_str().as_bytes()).unwrap();
let param_metadata = unsafe {
llvm::LLVMDIBuilderCreateTemplateTypeParameter(
DIB(cx),
let filename = span_start(cx, span).file.name.clone();
let file_metadata = file_metadata(cx, &filename[..]);
- let name = token::get_name(variable_name);
let loc = span_start(cx, span);
let type_metadata = type_metadata(cx, variable_type, span);
CapturedVariable => (0, DW_TAG_auto_variable)
};
- let name = CString::new(name.as_bytes()).unwrap();
+ let name = CString::new(variable_name.as_str().as_bytes()).unwrap();
match (variable_access, &[][..]) {
(DirectVariable { alloca }, address_operations) |
(IndirectVariable {alloca, address_operations}, _) => {
use llvm::debuginfo::DIScope;
use rustc::ast_map;
use trans::common::CrateContext;
-use middle::ty::{self, ClosureTyper};
use std::ffi::CString;
use std::ptr;
Some(ref parent) => fill_nested(&*parent.upgrade().unwrap(), output),
None => {}
}
- let string = token::get_name(node.name);
- output.push_str(&format!("{}", string.len()));
+ let string = node.name.as_str();
+ output.push_str(&string.len().to_string());
output.push_str(&string);
}
let mut name = String::from("_ZN");
fill_nested(self, &mut name);
- name.push_str(&format!("{}", item_name.len()));
+ name.push_str(&item_name.len().to_string());
name.push_str(item_name);
name.push('E');
name
}
pub fn namespace_for_item(cx: &CrateContext, def_id: ast::DefId) -> Rc<NamespaceTreeNode> {
- ty::with_path(cx.tcx(), def_id, |path| {
+ cx.tcx().with_path(def_id, |path| {
// prepend crate name if not already present
let krate = if def_id.krate == ast::LOCAL_CRATE {
let crate_namespace_name = token::intern(crate_root_namespace(cx));
Some(ref node) => node.scope,
None => ptr::null_mut()
};
- let namespace_name = token::get_name(name);
+ let namespace_name = name.as_str();
let namespace_name = CString::new(namespace_name.as_bytes()).unwrap();
let scope = unsafe {
llvm::LLVMDIBuilderCreateNameSpace(
use trans::common::CrateContext;
use middle::subst::{self, Substs};
-use middle::ty::{self, Ty, ClosureTyper};
+use middle::ty::{self, Ty};
use syntax::ast;
-use syntax::parse::token;
// Compute the name of the type as it should be stored in debuginfo. Does not do
push_debuginfo_type_name(cx, inner_type, true, output);
output.push('>');
},
- ty::TyRawPtr(ty::mt { ty: inner_type, mutbl } ) => {
+ ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
output.push('*');
match mutbl {
ast::MutImmutable => output.push_str("const "),
push_debuginfo_type_name(cx, inner_type, true, output);
},
- ty::TyRef(_, ty::mt { ty: inner_type, mutbl }) => {
+ ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => {
output.push('&');
if mutbl == ast::MutMutable {
output.push_str("mut ");
output.push(']');
},
ty::TyTrait(ref trait_data) => {
- let principal = ty::erase_late_bound_regions(cx.tcx(), &trait_data.principal);
+ let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal);
push_item_name(cx, principal.def_id, false, output);
push_type_params(cx, principal.substs, output);
},
output.push_str("fn(");
- let sig = ty::erase_late_bound_regions(cx.tcx(), sig);
+ let sig = cx.tcx().erase_late_bound_regions(sig);
if !sig.inputs.is_empty() {
for ¶meter_type in &sig.inputs {
push_debuginfo_type_name(cx, parameter_type, true, output);
output.push(')');
match sig.output {
- ty::FnConverging(result_type) if ty::type_is_nil(result_type) => {}
+ ty::FnConverging(result_type) if result_type.is_nil() => {}
ty::FnConverging(result_type) => {
output.push_str(" -> ");
push_debuginfo_type_name(cx, result_type, true, output);
def_id: ast::DefId,
qualified: bool,
output: &mut String) {
- ty::with_path(cx.tcx(), def_id, |path| {
+ cx.tcx().with_path(def_id, |path| {
if qualified {
if def_id.krate == ast::LOCAL_CRATE {
output.push_str(crate_root_namespace(cx));
let mut path_element_count = 0;
for path_element in path {
- let name = token::get_name(path_element.name());
- output.push_str(&name);
+ output.push_str(&path_element.name().as_str());
output.push_str("::");
path_element_count += 1;
}
output.pop();
output.pop();
} else {
- let name = token::get_name(path.last()
- .expect("debuginfo: Empty item path?")
- .name());
- output.push_str(&name);
+ let name = path.last().expect("debuginfo: Empty item path?").name();
+ output.push_str(&name.as_str());
}
});
}
output.push('>');
}
}
-
//! * Use define_* family of methods when you might be defining the ValueRef.
//! * When in doubt, define.
use llvm::{self, ValueRef};
-use middle::ty::{self, ClosureTyper};
+use middle::ty;
+use middle::infer;
use syntax::abi;
use trans::attributes;
use trans::base;
-use trans::common;
use trans::context::CrateContext;
use trans::monomorphize;
use trans::type_::Type;
ty::TyBareFn(_, ref f) => {
(&f.sig, f.abi, None)
}
- ty::TyClosure(closure_did, substs) => {
- let typer = common::NormalizingClosureTyper::new(ccx.tcx());
- function_type = typer.closure_type(closure_did, substs);
+ ty::TyClosure(closure_did, ref substs) => {
+ let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables);
+ function_type = infcx.closure_type(closure_did, substs);
let self_type = base::self_type_for_closure(ccx, closure_did, fn_type);
let llenvironment_type = type_of::type_of_explicit_arg(ccx, self_type);
debug!("declare_rust_fn function_type={:?} self_type={:?}",
_ => ccx.sess().bug("expected closure or fn")
};
- let sig = ty::Binder(ty::erase_late_bound_regions(ccx.tcx(), sig));
+ let sig = ty::Binder(ccx.tcx().erase_late_bound_regions(sig));
debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
let llfty = type_of::type_of_rust_fn(ccx, env, &sig, abi);
debug!("declare_rust_fn llfty={}", ccx.tn().type_to_string(llfty));
/// return None if the name already has a definition associated with it. In that
/// case an error should be reported to the user, because it usually happens due
/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
-pub fn define_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, fn_type: Type,
- output: ty::FnOutput) -> Option<ValueRef> {
+pub fn define_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv,
+ fn_type: Type, output: ty::FnOutput) -> Option<ValueRef> {
if get_defined_value(ccx, name).is_some() {
None
} else {
/// Declare a Rust function with an intention to define it.
///
/// Use this function when you intend to define a function. This function will
-/// return None if the name already has a definition associated with it. In that
-/// case an error should be reported to the user, because it usually happens due
-/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
-pub fn define_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str,
- fn_type: ty::Ty<'tcx>) -> Option<ValueRef> {
+/// return panic if the name already has a definition associated with it. This
+/// can happen with #[no_mangle] or #[export_name], for example.
+pub fn define_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ name: &str,
+ fn_type: ty::Ty<'tcx>) -> ValueRef {
if get_defined_value(ccx, name).is_some() {
- None
+ ccx.sess().fatal(&format!("symbol `{}` already defined", name))
} else {
- Some(declare_internal_rust_fn(ccx, name, fn_type))
+ declare_internal_rust_fn(ccx, name, fn_type)
}
}
-/// Get defined or externally defined (AvailableExternally linkage) value by name.
+/// Get defined or externally defined (AvailableExternally linkage) value by
+/// name.
fn get_defined_value(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
debug!("get_defined_value(name={:?})", name);
let namebuf = CString::new(name).unwrap_or_else(|_|{
use middle::check_const;
use middle::def;
use middle::lang_items::CoerceUnsizedTraitLangItem;
-use middle::mem_categorization::Typer;
use middle::subst::{Substs, VecPerParamSpace};
use middle::traits;
use trans::{_match, adt, asm, base, callee, closure, consts, controlflow};
use trans::base::*;
use trans::build::*;
-use trans::cleanup::{self, CleanupMethods};
+use trans::cleanup::{self, CleanupMethods, DropHintMethods};
use trans::common::*;
use trans::datum::*;
use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
use trans::tvec;
use trans::type_of;
use middle::cast::{CastKind, CastTy};
-use middle::ty::{struct_fields, tup_fields};
use middle::ty::{AdjustDerefRef, AdjustReifyFnPointer, AdjustUnsafeFnPointer};
use middle::ty::{self, Ty};
use middle::ty::MethodCall;
use syntax::parse::token::InternedString;
use syntax::ptr::P;
use syntax::parse::token;
-use std::iter::repeat;
use std::mem;
// Destinations
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
- if bcx.tcx().adjustments.borrow().contains_key(&expr.id) {
+ if bcx.tcx().tables.borrow().adjustments.contains_key(&expr.id) {
// use trans, which may be less efficient but
// which will perform the adjustments:
let datum = unpack_datum!(bcx, trans(bcx, expr));
let const_ty = expr_ty_adjusted(bcx, expr);
let llty = type_of::type_of(bcx.ccx(), const_ty);
let global = PointerCast(bcx, global, llty.ptr_to());
- let datum = Datum::new(global, const_ty, Lvalue);
+ let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
return DatumBlock::new(bcx, datum.to_expr_datum());
}
let llty = type_of::type_of(bcx.ccx(), const_ty);
// HACK(eddyb) get around issues with lifetime intrinsics.
let scratch = alloca_no_lifetime(bcx, llty, "const");
- let lldest = if !ty::type_is_structural(const_ty) {
+ let lldest = if !const_ty.is_structural() {
// Cast pointer to slot, because constants have different types.
PointerCast(bcx, scratch, val_ty(global))
} else {
GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_ADDR])
}
-pub fn make_fat_ptr(bcx: Block, ty: Type, data: ValueRef, extra: ValueRef) -> ValueRef {
- InsertValue(bcx, InsertValue(bcx, C_undef(ty), data, 0), extra, 1)
-}
pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
Store(bcx, Load(bcx, get_len(bcx, src_ptr)), get_len(bcx, dst_ptr));
old_info: Option<ValueRef>,
param_substs: &'tcx Substs<'tcx>)
-> ValueRef {
- let (source, target) = ty::struct_lockstep_tails(ccx.tcx(), source, target);
+ let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
(&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len),
(&ty::TyTrait(_), &ty::TyTrait(_)) => {
{
let mut bcx = bcx;
let mut datum = datum;
- let adjustment = match bcx.tcx().adjustments.borrow().get(&expr.id).cloned() {
+ let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
None => {
return DatumBlock::new(bcx, datum);
}
match datum.ty.sty {
// Don't skip a conversion from Box<T> to &T, etc.
ty::TyRef(..) => {
- let method_call = MethodCall::autoderef(expr.id, 0);
- if bcx.tcx().method_map.borrow().contains_key(&method_call) {
+ if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
// Don't skip an overloaded deref.
0
} else {
match (&source.ty.sty, &target.ty.sty) {
(&ty::TyBox(a), &ty::TyBox(b)) |
- (&ty::TyRef(_, ty::mt { ty: a, .. }), &ty::TyRef(_, ty::mt { ty: b, .. })) |
- (&ty::TyRef(_, ty::mt { ty: a, .. }), &ty::TyRawPtr(ty::mt { ty: b, .. })) |
- (&ty::TyRawPtr(ty::mt { ty: a, .. }), &ty::TyRawPtr(ty::mt { ty: b, .. })) => {
+ (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
+ &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
+ (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
+ &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
+ (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
+ &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
let (inner_source, inner_target) = (a, b);
let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
let kind = match fulfill_obligation(bcx.ccx(), span, trait_ref) {
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
- ty::custom_coerce_unsized_kind(bcx.tcx(), impl_def_id)
+ bcx.tcx().custom_coerce_unsized_kind(impl_def_id)
}
vtable => {
bcx.sess().span_bug(span, &format!("invalid CoerceUnsized vtable: {:?}",
base: &ast::Expr,
get_idx: F)
-> DatumBlock<'blk, 'tcx, Expr> where
- F: FnOnce(&'blk ty::ctxt<'tcx>, &[ty::field<'tcx>]) -> usize,
+ F: FnOnce(&'blk ty::ctxt<'tcx>, &[ty::Field<'tcx>]) -> usize,
{
let mut bcx = bcx;
let _icx = push_ctxt("trans_rec_field");
// Always generate an lvalue datum, because this pointer doesn't own
// the data and cleanup is scheduled elsewhere.
- DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr))
+ DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
}
})
base: &ast::Expr,
field: ast::Name)
-> DatumBlock<'blk, 'tcx, Expr> {
- trans_field(bcx, base, |tcx, field_tys| ty::field_idx_strict(tcx, field, field_tys))
+ trans_field(bcx, base, |tcx, field_tys| tcx.field_idx_strict(field, field_tys))
}
/// Translates `base.<idx>`.
// Check for overloaded index.
let method_ty = ccx.tcx()
- .method_map
+ .tables
.borrow()
+ .method_map
.get(&method_call)
.map(|method| method.ty);
let elt_datum = match method_ty {
let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
let ref_ty = // invoked methods have LB regions instantiated:
- ty::no_late_bound_regions(
- bcx.tcx(), &ty::ty_fn_ret(method_ty)).unwrap().unwrap();
- let elt_ty = match ty::deref(ref_ty, true) {
+ bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
+ let elt_ty = match ref_ty.builtin_deref(true) {
None => {
bcx.tcx().sess.span_bug(index_expr.span,
"index method didn't return a \
index_expr,
method_call,
base_datum,
- vec![(ix_datum, idx.id)],
+ Some((ix_datum, idx.id)),
Some(SaveIn(scratch.val)),
false));
let datum = scratch.to_expr_datum();
+ let lval = Lvalue::new("expr::trans_index overload");
if type_is_sized(bcx.tcx(), elt_ty) {
- Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr)
+ Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
} else {
- Datum::new(datum.val, elt_ty, LvalueExpr)
+ Datum::new(datum.val, elt_ty, LvalueExpr(lval))
}
}
None => {
ccx.int_type());
let ix_val = {
if ix_size < int_size {
- if ty::type_is_signed(expr_ty(bcx, idx)) {
+ if expr_ty(bcx, idx).is_signed() {
SExt(bcx, ix_val, ccx.int_type())
} else { ZExt(bcx, ix_val, ccx.int_type()) }
} else if ix_size > int_size {
}
};
- let unit_ty = ty::sequence_element_type(bcx.tcx(), base_datum.ty);
+ let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
let (base, len) = base_datum.get_vec_base_and_len(bcx);
});
let elt = InBoundsGEP(bcx, base, &[ix_val]);
let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
- Datum::new(elt, unit_ty, LvalueExpr)
+ let lval = Lvalue::new("expr::trans_index fallback");
+ Datum::new(elt, unit_ty, LvalueExpr(lval))
}
};
// Case 2.
base::get_extern_const(bcx.ccx(), did, const_ty)
};
- DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr))
+ let lval = Lvalue::new("expr::trans_def");
+ DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr(lval)))
}
def::DefConst(_) => {
bcx.sess().span_bug(ref_expr.span,
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
let src_datum = unpack_datum!(
bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
- bcx = glue::drop_ty(bcx,
- dst_datum.val,
- dst_datum.ty,
- expr.debug_loc());
+ let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
+ let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
+
+ // 1. Drop the data at the destination, passing the
+ // drop-hint in case the lvalue has already been
+ // dropped or moved.
+ bcx = glue::drop_ty_core(bcx,
+ dst_datum.val,
+ dst_datum.ty,
+ expr.debug_loc(),
+ false,
+ opt_hint_val);
+
+ // 2. We are overwriting the destination; ensure that
+ // its drop-hint (if any) says "initialized."
+ if let Some(hint_val) = opt_hint_val {
+ let hint_llval = hint_val.value();
+ let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT as usize);
+ Store(bcx, drop_needed, hint_llval);
+ }
src_datum.store_to(bcx, dst_datum.val)
} else {
src_datum.store_to(bcx, dst_datum.val)
None,
expr.span,
expr.id,
- ty::mk_struct(tcx, did, tcx.mk_substs(substs)),
+ tcx.mk_struct(did, tcx.mk_substs(substs)),
dest)
} else {
tcx.sess.span_bug(expr.span,
SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
Ignore => closure::Dest::Ignore(bcx.ccx())
};
- closure::trans_closure_expr(dest, decl, body, expr.id, bcx.fcx.param_substs)
- .unwrap_or(bcx)
+ let substs = match expr_ty(bcx, expr).sty {
+ ty::TyClosure(_, ref substs) => substs,
+ ref t =>
+ bcx.tcx().sess.span_bug(
+ expr.span,
+ &format!("closure expr without closure type: {:?}", t)),
+ };
+ closure::trans_closure_expr(dest, decl, body, expr.id, substs).unwrap_or(bcx)
}
ast::ExprCall(ref f, ref args) => {
if bcx.tcx().is_method_call(expr.id) {
let lhs = unpack_datum!(bcx, trans(bcx, &**lhs));
let rhs_datum = unpack_datum!(bcx, trans(bcx, &**rhs));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
- vec![(rhs_datum, rhs.id)], Some(dest),
+ Some((rhs_datum, rhs.id)), Some(dest),
!ast_util::is_by_value_binop(op.node)).bcx
}
ast::ExprUnary(op, ref subexpr) => {
// if not overloaded, would be RvalueDatumExpr
let arg = unpack_datum!(bcx, trans(bcx, &**subexpr));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
- arg, Vec::new(), Some(dest), !ast_util::is_by_value_unop(op)).bcx
+ arg, None, Some(dest), !ast_util::is_by_value_unop(op)).bcx
}
ast::ExprIndex(ref base, ref idx) => {
// if not overloaded, would be RvalueDatumExpr
let base = unpack_datum!(bcx, trans(bcx, &**base));
let idx_datum = unpack_datum!(bcx, trans(bcx, &**idx));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
- vec![(idx_datum, idx.id)], Some(dest), true).bcx
+ Some((idx_datum, idx.id)), Some(dest), true).bcx
}
ast::ExprCast(..) => {
// Trait casts used to come this way, now they should be coercions.
match def {
def::DefVariant(tid, vid, _) => {
- let variant_info = ty::enum_variant_with_id(bcx.tcx(), tid, vid);
+ let variant_info = bcx.tcx().enum_variant_with_id(tid, vid);
if !variant_info.args.is_empty() {
// N-ary variant.
let llfn = callee::trans_fn_ref(bcx.ccx(), vid,
def::DefStruct(_) => {
let ty = expr_ty(bcx, ref_expr);
match ty.sty {
- ty::TyStruct(did, _) if ty::has_dtor(bcx.tcx(), did) => {
+ ty::TyStruct(did, _) if bcx.tcx().has_dtor(did) => {
let repr = adt::represent_type(bcx.ccx(), ty);
adt::trans_set_discr(bcx, &*repr, lldest, 0);
}
match def {
def::DefFn(did, _) |
- def::DefStruct(did) | def::DefVariant(_, did, _) |
- def::DefMethod(did, def::FromImpl(_)) => {
+ def::DefStruct(did) | def::DefVariant(_, did, _) => {
callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs)
}
- def::DefMethod(impl_did, def::FromTrait(trait_did)) => {
- meth::trans_static_method_callee(ccx, impl_did,
- trait_did, ref_expr.id,
- param_substs)
+ def::DefMethod(method_did) => {
+ match ccx.tcx().impl_or_trait_item(method_did).container() {
+ ty::ImplContainer(_) => {
+ callee::trans_fn_ref(ccx, method_did,
+ ExprId(ref_expr.id),
+ param_substs)
+ }
+ ty::TraitContainer(trait_did) => {
+ meth::trans_static_method_callee(ccx, method_did,
+ trait_did, ref_expr.id,
+ param_substs)
+ }
+ }
}
_ => {
ccx.tcx().sess.span_bug(ref_expr.span, &format!(
def::DefUpvar(nid, _) => {
// Can't move upvars, so this is never a ZeroMemLastUse.
let local_ty = node_id_type(bcx, nid);
+ let lval = Lvalue::new_with_hint("expr::trans_local_var (upvar)",
+ bcx, nid, HintKind::ZeroAndMaintain);
match bcx.fcx.llupvars.borrow().get(&nid) {
- Some(&val) => Datum::new(val, local_ty, Lvalue),
+ Some(&val) => Datum::new(val, local_ty, lval),
None => {
bcx.sess().bug(&format!(
"trans_local_var: no llval for upvar {} found",
node_id_opt: Option<ast::NodeId>,
op: F)
-> R where
- F: FnOnce(ty::Disr, &[ty::field<'tcx>]) -> R,
+ F: FnOnce(ty::Disr, &[ty::Field<'tcx>]) -> R,
{
match ty.sty {
ty::TyStruct(did, substs) => {
- let fields = struct_fields(tcx, did, substs);
+ let fields = tcx.struct_fields(did, substs);
let fields = monomorphize::normalize_associated_type(tcx, &fields);
op(0, &fields[..])
}
ty::TyTuple(ref v) => {
- op(0, &tup_fields(&v[..]))
+ let fields: Vec<_> = v.iter().enumerate().map(|(i, &f)| {
+ ty::Field {
+ name: token::intern(&i.to_string()),
+ mt: ty::TypeAndMut {
+ ty: f,
+ mutbl: ast::MutImmutable
+ }
+ }
+ }).collect();
+ op(0, &fields)
}
ty::TyEnum(_, substs) => {
let def = tcx.def_map.borrow().get(&node_id).unwrap().full_def();
match def {
def::DefVariant(enum_id, variant_id, _) => {
- let variant_info = ty::enum_variant_with_id(tcx, enum_id, variant_id);
- let fields = struct_fields(tcx, variant_id, substs);
+ let variant_info = tcx.enum_variant_with_id(enum_id, variant_id);
+ let fields = tcx.struct_fields(variant_id, substs);
let fields = monomorphize::normalize_associated_type(tcx, &fields);
op(variant_info.disr_val, &fields[..])
}
let tcx = bcx.tcx();
with_field_tys(tcx, ty, Some(expr_id), |discr, field_tys| {
- let mut need_base: Vec<bool> = repeat(true).take(field_tys.len()).collect();
+ let mut need_base = vec![true; field_tys.len()];
let numbered_fields = fields.iter().map(|field| {
let opt_pos =
// panic occur before the ADT as a whole is ready.
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
- if ty::type_is_simd(bcx.tcx(), ty) {
+ if ty.is_simd(bcx.tcx()) {
// Issue 23112: The original logic appeared vulnerable to same
// order-of-eval bug. But, SIMD values are tuple-structs;
// i.e. functional record update (FRU) syntax is unavailable.
bcx = trans_into(bcx, &**e, SaveIn(dest));
let scope = cleanup::CustomScope(custom_cleanup_scope);
fcx.schedule_lifetime_end(scope, dest);
- fcx.schedule_drop_mem(scope, dest, e_ty);
+ // FIXME: nonzeroing move should generalize to fields
+ fcx.schedule_drop_mem(scope, dest, e_ty, None);
}
}
// The only overloaded operator that is translated to a datum
// is an overloaded deref, since it is always yields a `&T`.
// Otherwise, we should be in the RvalueDpsExpr path.
- assert!(
- op == ast::UnDeref ||
- !ccx.tcx().method_map.borrow().contains_key(&method_call));
+ assert!(op == ast::UnDeref || !ccx.tcx().is_method_call(expr.id));
let un_ty = expr_ty(bcx, expr);
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
let val = datum.to_llscalarish(bcx);
let (bcx, llneg) = {
- if ty::type_is_fp(un_ty) {
+ if un_ty.is_fp() {
let result = FNeg(bcx, val, debug_loc);
(bcx, result)
} else {
- let is_signed = ty::type_is_signed(un_ty);
+ let is_signed = un_ty.is_signed();
let result = Neg(bcx, val, debug_loc);
let bcx = if bcx.ccx().check_overflow() && is_signed {
let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lval: Datum<'tcx, Lvalue>)
-> DatumBlock<'blk, 'tcx, Expr> {
- let dest_ty = ty::mk_imm_rptr(bcx.tcx(), bcx.tcx().mk_region(ty::ReStatic), lval.ty);
+ let dest_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), lval.ty);
let scratch = rvalue_scratch_datum(bcx, dest_ty, "__fat_ptr");
memcpy_ty(bcx, scratch.val, lval.val, scratch.ty);
let _icx = push_ctxt("trans_eager_binop");
let tcx = bcx.tcx();
- let is_simd = ty::type_is_simd(tcx, lhs_t);
+ let is_simd = lhs_t.is_simd(tcx);
let intype = if is_simd {
- ty::simd_type(tcx, lhs_t)
+ lhs_t.simd_type(tcx)
} else {
lhs_t
};
- let is_float = ty::type_is_fp(intype);
- let is_signed = ty::type_is_signed(intype);
+ let is_float = intype.is_fp();
+ let is_signed = intype.is_signed();
let info = expr_info(binop_expr);
let binop_debug_loc = binop_expr.debug_loc();
let ccx = bcx.ccx();
// if overloaded, would be RvalueDpsExpr
- assert!(!ccx.tcx().method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
+ assert!(!ccx.tcx().is_method_call(expr.id));
match op.node {
ast::BiAnd => {
expr: &ast::Expr,
method_call: MethodCall,
lhs: Datum<'tcx, Expr>,
- rhs: Vec<(Datum<'tcx, Expr>, ast::NodeId)>,
+ rhs: Option<(Datum<'tcx, Expr>, ast::NodeId)>,
dest: Option<Dest>,
autoref: bool)
-> Result<'blk, 'tcx> {
- let method_ty = bcx.tcx().method_map.borrow().get(&method_call).unwrap().ty;
callee::trans_call_inner(bcx,
expr.debug_loc(),
- monomorphize_type(bcx, method_ty),
|bcx, arg_cleanup_scope| {
meth::trans_method_callee(bcx,
method_call,
-> Block<'blk, 'tcx> {
debug!("trans_overloaded_call {}", expr.id);
let method_call = MethodCall::expr(expr.id);
- let method_type = bcx.tcx()
- .method_map
- .borrow()
- .get(&method_call)
- .unwrap()
- .ty;
let mut all_args = vec!(callee);
all_args.extend(args.iter().map(|e| &**e));
unpack_result!(bcx,
callee::trans_call_inner(bcx,
expr.debug_loc(),
- monomorphize_type(bcx,
- method_type),
|bcx, arg_cleanup_scope| {
meth::trans_method_callee(
bcx,
return true;
}
- match (ty::deref(t_in, true), ty::deref(t_out, true)) {
- (Some(ty::mt{ ty: t_in, .. }), Some(ty::mt{ ty: t_out, .. })) => {
+ match (t_in.builtin_deref(true), t_out.builtin_deref(true)) {
+ (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
t_in == t_out
}
_ => {
ll_t_in = val_ty(discr);
(discr, adt::is_discr_signed(&*repr))
} else {
- (datum.to_llscalarish(bcx), ty::type_is_signed(t_in))
+ (datum.to_llscalarish(bcx), t_in.is_signed())
};
let newval = match (r_t_in, r_t_out) {
debug!("trans_assign_op(expr={:?})", expr);
// User-defined operator methods cannot be used with `+=` etc right now
- assert!(!bcx.tcx().method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
+ assert!(!bcx.tcx().is_method_call(expr.id));
// Evaluate LHS (destination), which should be an lvalue
let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
// Compute final type. Note that we are loose with the region and
// mutability, since those things don't matter in trans.
let referent_ty = lv_datum.ty;
- let ptr_ty = ty::mk_imm_rptr(bcx.tcx(), bcx.tcx().mk_region(ty::ReStatic), referent_ty);
+ let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
// Get the pointer.
let llref = lv_datum.to_llref();
let mut bcx = bcx;
// Check for overloaded deref.
- let method_ty = ccx.tcx().method_map.borrow()
+ let method_ty = ccx.tcx()
+ .tables
+ .borrow()
+ .method_map
.get(&method_call).map(|method| method.ty);
+
let datum = match method_ty {
Some(method_ty) => {
let method_ty = monomorphize_type(bcx, method_ty);
};
let ref_ty = // invoked methods have their LB regions instantiated
- ty::no_late_bound_regions(
- ccx.tcx(), &ty::ty_fn_ret(method_ty)).unwrap().unwrap();
+ ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
- datum, Vec::new(), Some(SaveIn(scratch.val)),
+ datum, None, Some(SaveIn(scratch.val)),
false));
scratch.to_expr_datum()
}
if type_is_sized(bcx.tcx(), content_ty) {
let ptr = load_ty(bcx, datum.val, datum.ty);
- DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr))
+ DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
} else {
// A fat pointer and a DST lvalue have the same representation
// just different types. Since there is no temporary for `*e`
// object code path for running drop glue and free. Instead,
// we schedule cleanup for `e`, turning it into an lvalue.
- let datum = Datum::new(datum.val, content_ty, LvalueExpr);
+ let lval = Lvalue::new("expr::deref_once ty_uniq");
+ let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
DatumBlock::new(bcx, datum)
}
}
- ty::TyRawPtr(ty::mt { ty: content_ty, .. }) |
- ty::TyRef(_, ty::mt { ty: content_ty, .. }) => {
+ ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
+ ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
+ let lval = Lvalue::new("expr::deref_once ptr");
if type_is_sized(bcx.tcx(), content_ty) {
let ptr = datum.to_llscalarish(bcx);
// rvalue for non-owning pointers like &T or *T, in which
// case cleanup *is* scheduled elsewhere, by the true
// owner (or, in the case of *T, by the user).
- DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr))
+ DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
} else {
// A fat pointer and a DST lvalue have the same representation
// just different types.
- DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr))
+ DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
}
}
_ => {
bcx.tcx().sess.span_bug(
expr.span,
- &format!("deref invoked on expr of illegal type {:?}",
+ &format!("deref invoked on expr of invalid type {:?}",
datum.ty));
}
};
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
let tcx = bcx.tcx();
- let is_simd = ty::type_is_simd(tcx, lhs_t);
+ let is_simd = lhs_t.is_simd(tcx);
let intype = if is_simd {
- ty::simd_type(tcx, lhs_t)
+ lhs_t.simd_type(tcx)
} else {
lhs_t
};
- let is_signed = ty::type_is_signed(intype);
+ let is_signed = intype.is_signed();
if is_signed {
AShr(bcx, lhs, rhs, binop_debug_loc)
} else {
}
fn expr_kind(tcx: &ty::ctxt, expr: &ast::Expr) -> ExprKind {
- if tcx.method_map.borrow().contains_key(&MethodCall::expr(expr.id)) {
+ if tcx.is_method_call(expr.id) {
// Overloaded operations are generally calls, and hence they are
// generated via DPS, but there are a few exceptions:
return match expr.node {
match expr.node {
ast::ExprPath(..) => {
- match ty::resolve_expr(tcx, expr) {
+ match tcx.resolve_expr(expr) {
def::DefStruct(_) | def::DefVariant(..) => {
- if let ty::TyBareFn(..) = ty::node_id_to_type(tcx, expr.id).sty {
+ if let ty::TyBareFn(..) = tcx.node_id_to_type(expr.id).sty {
// ctor function
ExprKind::RvalueDatum
} else {
use syntax::abi::{RustIntrinsic, Rust, RustCall, Stdcall, Fastcall, System};
use syntax::codemap::Span;
use syntax::parse::token::{InternedString, special_idents};
-use syntax::parse::token;
use syntax::ast;
use syntax::attr;
use syntax::print::pprust;
pub fn register_static(ccx: &CrateContext,
foreign_item: &ast::ForeignItem) -> ValueRef {
- let ty = ty::node_id_to_type(ccx.tcx(), foreign_item.id);
+ let ty = ccx.tcx().node_id_to_type(foreign_item.id);
let llty = type_of::type_of(ccx, ty);
let ident = link_name(foreign_item);
ty::TyBareFn(_, ref fn_ty) => (fn_ty.abi, &fn_ty.sig),
_ => ccx.sess().bug("trans_native_call called on non-function type")
};
- let fn_sig = ty::erase_late_bound_regions(ccx.tcx(), fn_sig);
+ let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig);
let llsig = foreign_signature(ccx, &fn_sig, &passed_arg_tys[..]);
let fn_type = cabi::compute_abi_info(ccx,
&llsig.llarg_tys,
let llarg_foreign = if foreign_indirect {
llarg_rust
} else {
- if ty::type_is_bool(passed_arg_tys[i]) {
+ if passed_arg_tys[i].is_bool() {
let val = LoadRangeAssert(bcx, llarg_rust, 0, 2, llvm::False);
Trunc(bcx, val, Type::i1(bcx.ccx()))
} else {
fn gate_simd_ffi(tcx: &ty::ctxt, decl: &ast::FnDecl, ty: &ty::BareFnTy) {
if !tcx.sess.features.borrow().simd_ffi {
let check = |ast_ty: &ast::Ty, ty: ty::Ty| {
- if ty::type_is_simd(tcx, ty) {
+ if ty.is_simd(tcx) {
tcx.sess.span_err(ast_ty.span,
&format!("use of SIMD type `{}` in FFI is highly experimental and \
may result in invalid code",
match foreign_mod.abi {
Rust | RustIntrinsic => {}
abi => {
- let ty = ty::node_id_to_type(ccx.tcx(), foreign_item.id);
+ let ty = ccx.tcx().node_id_to_type(foreign_item.id);
match ty.sty {
ty::TyBareFn(_, bft) => gate_simd_ffi(ccx.tcx(), &**decl, bft),
_ => ccx.tcx().sess.span_bug(foreign_item.span,
_ => panic!("expected bare fn in decl_rust_fn_with_foreign_abi")
};
let llfn = declare::declare_fn(ccx, name, cconv, llfn_ty,
- ty::FnConverging(ty::mk_nil(ccx.tcx())));
+ ty::FnConverging(ccx.tcx().mk_nil()));
add_argument_attributes(&tys, llfn);
debug!("decl_rust_fn_with_foreign_abi(llfn_ty={}, llfn={})",
ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
let tys = foreign_types_for_id(ccx, node_id);
let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
- let t = ty::node_id_to_type(ccx.tcx(), node_id);
+ let t = ccx.tcx().node_id_to_type(node_id);
let cconv = match t.sty {
ty::TyBareFn(_, ref fn_ty) => {
llvm_calling_convention(ccx, fn_ty.abi)
hash: Option<&str>) {
let _icx = push_ctxt("foreign::build_foreign_fn");
- let fnty = ty::node_id_to_type(ccx.tcx(), id);
+ let fnty = ccx.tcx().node_id_to_type(id);
let mty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fnty);
let tys = foreign_types_for_fn_ty(ccx, mty);
{
let _icx = push_ctxt("foreign::foreign::build_rust_fn");
let tcx = ccx.tcx();
- let t = ty::node_id_to_type(tcx, id);
+ let t = tcx.node_id_to_type(id);
let t = monomorphize::apply_param_substs(tcx, param_substs, &t);
let ps = ccx.tcx().map.with_path(id, |path| {
ccx.tcx().map.path_to_string(id),
id, t);
- let llfn = declare::define_internal_rust_fn(ccx, &ps[..], t).unwrap_or_else(||{
- ccx.sess().bug(&format!("symbol `{}` already defined", ps));
- });
+ let llfn = declare::define_internal_rust_fn(ccx, &ps, t);
attributes::from_fn_attrs(ccx, attrs, llfn);
base::trans_fn(ccx, decl, body, llfn, param_substs, id, &[]);
llfn
// pointer). It makes adapting types easier, since we can
// always just bitcast pointers.
if !foreign_indirect {
- llforeign_arg = if ty::type_is_bool(rust_ty) {
+ llforeign_arg = if rust_ty.is_bool() {
let lltemp = builder.alloca(Type::bool(ccx), "");
builder.store(builder.zext(llforeign_arg, Type::bool(ccx)), lltemp);
lltemp
let llrust_arg = if rust_indirect || type_is_fat_ptr(ccx.tcx(), rust_ty) {
llforeign_arg
} else {
- if ty::type_is_bool(rust_ty) {
+ if rust_ty.is_bool() {
let tmp = builder.load_range_assert(llforeign_arg, 0, 2, llvm::False);
builder.trunc(tmp, Type::i1(ccx))
} else if type_of::type_of(ccx, rust_ty).is_aggregate() {
Some(ln) => ln.clone(),
None => match weak_lang_items::link_name(&i.attrs) {
Some(name) => name,
- None => token::get_ident(i.ident),
+ None => i.ident.name.as_str(),
}
}
}
fn foreign_types_for_id<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
id: ast::NodeId) -> ForeignTypes<'tcx> {
- foreign_types_for_fn_ty(ccx, ty::node_id_to_type(ccx.tcx(), id))
+ foreign_types_for_fn_ty(ccx, ccx.tcx().node_id_to_type(id))
}
fn foreign_types_for_fn_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ty::TyBareFn(_, ref fn_ty) => &fn_ty.sig,
_ => ccx.sess().bug("foreign_types_for_fn_ty called on non-function type")
};
- let fn_sig = ty::erase_late_bound_regions(ccx.tcx(), fn_sig);
+ let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig);
let llsig = foreign_signature(ccx, &fn_sig, &fn_sig.inputs);
let fn_ty = cabi::compute_abi_info(ccx,
&llsig.llarg_tys,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
- drop_ty_core(bcx, v, t, debug_loc, false)
+ drop_ty_core(bcx, v, t, debug_loc, false, None)
}
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
- skip_dtor: bool) -> Block<'blk, 'tcx> {
+ skip_dtor: bool,
+ drop_hint: Option<cleanup::DropHintValue>)
+ -> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
- debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor);
+ debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
let _icx = push_ctxt("drop_ty");
+ let mut bcx = bcx;
if bcx.fcx.type_needs_drop(t) {
let ccx = bcx.ccx();
let g = if skip_dtor {
v
};
- Call(bcx, glue, &[ptr], None, debug_loc);
+ match drop_hint {
+ Some(drop_hint) => {
+ let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
+ let moved_val =
+ C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
+ let may_need_drop =
+ ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
+ bcx = with_cond(bcx, may_need_drop, |cx| {
+ Call(cx, glue, &[ptr], None, debug_loc);
+ cx
+ })
+ }
+ None => {
+ // No drop-hint ==> call standard drop glue
+ Call(bcx, glue, &[ptr], None, debug_loc);
+ }
+ }
}
bcx
}
let _icx = push_ctxt("drop_ty_immediate");
let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
store_ty(bcx, v, vp, t);
- drop_ty_core(bcx, vp, t, debug_loc, skip_dtor)
+ drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None)
}
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
let llty = if type_is_sized(ccx.tcx(), t) {
type_of(ccx, t).ptr_to()
} else {
- type_of(ccx, ty::mk_uniq(ccx.tcx(), t)).ptr_to()
+ type_of(ccx, ccx.tcx().mk_box(t)).ptr_to()
};
let llfnty = Type::glue_fn(ccx, llty);
// To avoid infinite recursion, don't `make_drop_glue` until after we've
// added the entry to the `drop_glues` cache.
if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
- let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ty::mk_nil(ccx.tcx()));
+ let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil());
ccx.drop_glues().borrow_mut().insert(g, llfn);
return llfn;
};
let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop");
- let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ty::mk_nil(ccx.tcx())).unwrap_or_else(||{
+ let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm));
});
ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
- ty::FnConverging(ty::mk_nil(ccx.tcx())),
+ ty::FnConverging(ccx.tcx().mk_nil()),
empty_substs, None, &arena);
- let bcx = init_function(&fcx, false, ty::FnConverging(ty::mk_nil(ccx.tcx())));
+ let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil()));
update_linkage(ccx, llfn, None, OriginalTranslation);
let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint);
let bcx = make_drop_glue(bcx, llrawptr0, g);
- finish_fn(&fcx, bcx, ty::FnConverging(ty::mk_nil(ccx.tcx())), DebugLoc::None);
+ finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None);
llfn
}
} else {
let tcx = ccx.tcx();
let name = csearch::get_symbol(&ccx.sess().cstore, did);
- let class_ty = ty::lookup_item_type(tcx, parent_id).ty.subst(tcx, substs);
+ let class_ty = tcx.lookup_item_type(parent_id).ty.subst(tcx, substs);
let llty = type_of_dtor(ccx, class_ty);
- let dtor_ty = ty::mk_ctor_fn(ccx.tcx(),
- did,
- &[get_drop_glue_type(ccx, t)],
- ty::mk_nil(ccx.tcx()));
+ let dtor_ty = ccx.tcx().mk_ctor_fn(did,
+ &[get_drop_glue_type(ccx, t)],
+ ccx.tcx().mk_nil());
foreign::get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), &name[..], llvm::CCallConv,
llty, dtor_ty)
}
let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr));
ty.element_type().func_params()
};
- assert_eq!(params.len(), 1);
+ assert_eq!(params.len(), if type_is_sized(bcx.tcx(), t) { 1 } else { 2 });
// Be sure to put the contents into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
let glue_type = get_drop_glue_type(bcx.ccx(), t);
- let dtor_ty = ty::mk_ctor_fn(bcx.tcx(), class_did, &[glue_type], ty::mk_nil(bcx.tcx()));
- let (_, bcx) = invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None);
+ let dtor_ty = bcx.tcx().mk_ctor_fn(class_did, &[glue_type], bcx.tcx().mk_nil());
+ let (_, bcx) = if type_is_sized(bcx.tcx(), t) {
+ invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None)
+ } else {
+ let args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_len(bcx, v0))];
+ invoke(bcx, dtor_addr, &args, dtor_ty, DebugLoc::None)
+ };
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
}
let ccx = bcx.ccx();
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized.
- assert!(!ty::type_is_simd(bcx.tcx(), t));
+ assert!(!t.is_simd(bcx.tcx()));
let repr = adt::represent_type(ccx, t);
let sizing_type = adt::sizing_type_of(ccx, &*repr, true);
let sized_size = C_uint(ccx, llsize_of_alloc(ccx, sizing_type));
// Recurse to get the size of the dynamically sized field (must be
// the last field).
- let fields = ty::struct_fields(bcx.tcx(), id, substs);
+ let fields = bcx.tcx().struct_fields(id, substs);
let last_field = fields[fields.len()-1];
let field_ty = last_field.mt.ty;
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
(Load(bcx, size_ptr), Load(bcx, align_ptr))
}
ty::TySlice(_) | ty::TyStr => {
- let unit_ty = ty::sequence_element_type(bcx.tcx(), t);
+ let unit_ty = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
}
ty::TyStruct(did, substs) | ty::TyEnum(did, substs) => {
let tcx = bcx.tcx();
- match (ty::ty_dtor(tcx, did), skip_dtor) {
+ match (tcx.ty_dtor(did), skip_dtor) {
(ty::TraitDtor(dtor, true), false) => {
// FIXME(16758) Since the struct is unsized, it is hard to
// find the drop flag (which is at the end of the struct).
use middle::subst::Substs;
use trans::base::{push_ctxt, trans_item, get_item_val, trans_fn};
use trans::common::*;
-use middle::ty;
use syntax::ast;
use syntax::ast_util::local_def;
Some(&Some(node_id)) => {
// Already inline
debug!("instantiate_inline({}): already inline as node id {}",
- ty::item_path_str(ccx.tcx(), fn_id), node_id);
+ ccx.tcx().item_path_str(fn_id), node_id);
return Some(local_def(node_id));
}
Some(&None) => {
let mut my_id = 0;
match item.node {
ast::ItemEnum(_, _) => {
- let vs_here = ty::enum_variants(ccx.tcx(), local_def(item.id));
- let vs_there = ty::enum_variants(ccx.tcx(), parent_id);
+ let vs_here = ccx.tcx().enum_variants(local_def(item.id));
+ let vs_there = ccx.tcx().enum_variants(parent_id);
for (here, there) in vs_here.iter().zip(vs_there.iter()) {
if there.id == fn_id { my_id = here.id.node; }
ccx.external().borrow_mut().insert(there.id, Some(here.id.node));
// the logic to do that already exists in `middle`. In order to
// reuse that code, it needs to be able to look up the traits for
// inlined items.
- let ty_trait_item = ty::impl_or_trait_item(ccx.tcx(), fn_id).clone();
+ let ty_trait_item = ccx.tcx().impl_or_trait_item(fn_id).clone();
ccx.tcx().impl_or_trait_items.borrow_mut()
.insert(local_def(trait_item.id), ty_trait_item);
// Translate monomorphic impl methods immediately.
if let ast::MethodImplItem(ref sig, ref body) = impl_item.node {
- let impl_tpt = ty::lookup_item_type(ccx.tcx(), impl_did);
+ let impl_tpt = ccx.tcx().lookup_item_type(impl_did);
if impl_tpt.generics.types.is_empty() &&
sig.generics.ty_params.is_empty() {
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
#![allow(non_upper_case_globals)]
+use arena::TypedArena;
use llvm;
use llvm::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef, TypeKind};
use middle::subst;
use middle::subst::FnSpace;
use trans::adt;
+use trans::attributes;
use trans::base::*;
use trans::build::*;
use trans::callee;
use trans::common::*;
use trans::datum::*;
use trans::debuginfo::DebugLoc;
+use trans::declare;
use trans::expr;
use trans::glue;
use trans::type_of::*;
use trans::machine;
use trans::machine::llsize_of;
use trans::type_::Type;
-use middle::ty::{self, Ty};
-use syntax::abi::RustIntrinsic;
+use middle::ty::{self, Ty, HasTypeFlags};
+use middle::subst::Substs;
+use syntax::abi::{self, RustIntrinsic};
use syntax::ast;
use syntax::parse::token;
pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
- let name = match &token::get_ident(item.ident)[..] {
+ let name = match &*item.ident.name.as_str() {
"sqrtf32" => "llvm.sqrt.f32",
"sqrtf64" => "llvm.sqrt.f64",
"powif32" => "llvm.powi.f32",
debug!("transmute_restriction: {:?}", transmute_restriction);
- assert!(!ty::type_has_params(transmute_restriction.substituted_from));
- assert!(!ty::type_has_params(transmute_restriction.substituted_to));
+ assert!(!transmute_restriction.substituted_from.has_param_types());
+ assert!(!transmute_restriction.substituted_to.has_param_types());
let llfromtype = type_of::sizing_type_of(ccx,
transmute_restriction.substituted_from);
let ret_ty = match callee_ty.sty {
ty::TyBareFn(_, ref f) => {
- ty::erase_late_bound_regions(bcx.tcx(), &f.sig.output())
+ bcx.tcx().erase_late_bound_regions(&f.sig.output())
}
_ => panic!("expected bare_fn in trans_intrinsic_call")
};
let foreign_item = tcx.map.expect_foreign_item(node);
- let name = token::get_ident(foreign_item.ident);
+ let name = foreign_item.ident.name.as_str();
// For `transmute` we can just trans the input expr directly into dest
- if &name[..] == "transmute" {
+ if name == "transmute" {
let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
match args {
callee::ArgExprs(arg_exprs) => {
// (the first argument) and then trans the source value (the
// second argument) directly into the resulting destination
// address.
- if &name[..] == "move_val_init" {
+ if name == "move_val_init" {
if let callee::ArgExprs(ref exprs) = args {
let (dest_expr, source_expr) = if exprs.len() != 2 {
ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
}
}
+ let call_debug_location = DebugLoc::At(call_info.id, call_info.span);
+
+ // For `try` we need some custom control flow
+ if &name[..] == "try" {
+ if let callee::ArgExprs(ref exprs) = args {
+ let (func, data) = if exprs.len() != 2 {
+ ccx.sess().bug("expected two exprs as arguments for \
+ `try` intrinsic");
+ } else {
+ (&exprs[0], &exprs[1])
+ };
+
+ // translate arguments
+ let func = unpack_datum!(bcx, expr::trans(bcx, func));
+ let func = unpack_datum!(bcx, func.to_rvalue_datum(bcx, "func"));
+ let data = unpack_datum!(bcx, expr::trans(bcx, data));
+ let data = unpack_datum!(bcx, data.to_rvalue_datum(bcx, "data"));
+
+ let dest = match dest {
+ expr::SaveIn(d) => d,
+ expr::Ignore => alloc_ty(bcx, tcx.mk_mut_ptr(tcx.types.i8),
+ "try_result"),
+ };
+
+ // do the invoke
+ bcx = try_intrinsic(bcx, func.val, data.val, dest,
+ call_debug_location);
+
+ fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
+ return Result::new(bcx, dest);
+ } else {
+ ccx.sess().bug("expected two exprs as arguments for \
+ `try` intrinsic");
+ }
+ }
+
// Push the arguments.
let mut llargs = Vec::new();
bcx = callee::trans_args(bcx,
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
- let call_debug_location = DebugLoc::At(call_info.id, call_info.span);
-
// These are the only intrinsic functions that diverge.
- if &name[..] == "abort" {
+ if name == "abort" {
let llfn = ccx.get_intrinsic(&("llvm.trap"));
Call(bcx, llfn, &[], None, call_debug_location);
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
};
let simple = get_simple_intrinsic(ccx, &*foreign_item);
- let llval = match (simple, &name[..]) {
+ let llval = match (simple, &*name) {
(Some(llfn), _) => {
Call(bcx, llfn, &llargs, None, call_debug_location)
}
C_str_slice(ccx, ty_name)
}
(_, "type_id") => {
- let hash = ty::hash_crate_independent(
- ccx.tcx(),
- *substs.types.get(FnSpace, 0),
- &ccx.link_meta().crate_hash);
+ let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
+ &ccx.link_meta().crate_hash);
C_u64(ccx, hash)
}
(_, "init_dropped") => {
ret
}
}
+
+fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ func: ValueRef,
+ data: ValueRef,
+ dest: ValueRef,
+ dloc: DebugLoc) -> Block<'blk, 'tcx> {
+ if bcx.sess().no_landing_pads() {
+ Call(bcx, func, &[data], None, dloc);
+ Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
+ bcx
+ } else if bcx.sess().target.target.options.is_like_msvc {
+ trans_msvc_try(bcx, func, data, dest, dloc)
+ } else {
+ trans_gnu_try(bcx, func, data, dest, dloc)
+ }
+}
+
+// MSVC's definition of the `rust_try` function. The exact implementation here
+// is a little different than the GNU (standard) version below, not only because
+// of the personality function but also because of the other fiddly bits about
+// SEH. LLVM also currently requires us to structure this a very particular way
+// as explained below.
+//
+// Like with the GNU version we generate a shim wrapper
+fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ func: ValueRef,
+ data: ValueRef,
+ dest: ValueRef,
+ dloc: DebugLoc) -> Block<'blk, 'tcx> {
+ let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| {
+ let ccx = bcx.ccx();
+ let dloc = DebugLoc::None;
+ let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try",
+ try_fn_ty);
+ let (fcx, block_arena);
+ block_arena = TypedArena::new();
+ fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false,
+ output, ccx.tcx().mk_substs(Substs::trans_empty()),
+ None, &block_arena);
+ let bcx = init_function(&fcx, true, output);
+ let then = fcx.new_temp_block("then");
+ let catch = fcx.new_temp_block("catch");
+ let catch_return = fcx.new_temp_block("catch-return");
+ let catch_resume = fcx.new_temp_block("catch-resume");
+ let personality = fcx.eh_personality();
+
+ let eh_typeid_for = ccx.get_intrinsic(&"llvm.eh.typeid.for");
+ let rust_try_filter = match bcx.tcx().lang_items.msvc_try_filter() {
+ Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
+ bcx.fcx.param_substs).val,
+ None => bcx.sess().bug("msvc_try_filter not defined"),
+ };
+
+ // Type indicator for the exception being thrown, not entirely sure
+ // what's going on here but it's what all the examples in LLVM use.
+ let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
+ false);
+
+ llvm::SetFunctionAttribute(rust_try, llvm::Attribute::NoInline);
+ llvm::SetFunctionAttribute(rust_try, llvm::Attribute::OptimizeNone);
+ let func = llvm::get_param(rust_try, 0);
+ let data = llvm::get_param(rust_try, 1);
+
+ // Invoke the function, specifying our two temporary landing pads as the
+ // ext point. After the invoke we've terminated our basic block.
+ Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
+
+ // All the magic happens in this landing pad, and this is basically the
+ // only landing pad in rust tagged with "catch" to indicate that we're
+ // catching an exception. The other catch handlers in the GNU version
+ // below just catch *all* exceptions, but that's because most exceptions
+ // are already filtered out by the gnu personality function.
+ //
+ // For MSVC we're just using a standard personality function that we
+ // can't customize (e.g. _except_handler3 or __C_specific_handler), so
+ // we need to do the exception filtering ourselves. This is currently
+ // performed by the `__rust_try_filter` function. This function,
+ // specified in the landingpad instruction, will be invoked by Windows
+ // SEH routines and will return whether the exception in question can be
+ // caught (aka the Rust runtime is the one that threw the exception).
+ //
+ // To get this to compile (currently LLVM segfaults if it's not in this
+ // particular structure), when the landingpad is executing we test to
+ // make sure that the ID of the exception being thrown is indeed the one
+ // that we were expecting. If it's not, we resume the exception, and
+ // otherwise we return the pointer that we got Full disclosure: It's not
+ // clear to me what this `llvm.eh.typeid` stuff is doing *other* then
+ // just allowing LLVM to compile this file without segfaulting. I would
+ // expect the entire landing pad to just be:
+ //
+ // %vals = landingpad ...
+ // %ehptr = extractvalue { i8*, i32 } %vals, 0
+ // ret i8* %ehptr
+ //
+ // but apparently LLVM chokes on this, so we do the more complicated
+ // thing to placate it.
+ let vals = LandingPad(catch, lpad_ty, personality, 1);
+ let rust_try_filter = BitCast(catch, rust_try_filter, Type::i8p(ccx));
+ AddClause(catch, vals, rust_try_filter);
+ let ehptr = ExtractValue(catch, vals, 0);
+ let sel = ExtractValue(catch, vals, 1);
+ let filter_sel = Call(catch, eh_typeid_for, &[rust_try_filter], None,
+ dloc);
+ let is_filter = ICmp(catch, llvm::IntEQ, sel, filter_sel, dloc);
+ CondBr(catch, is_filter, catch_return.llbb, catch_resume.llbb, dloc);
+
+ // Our "catch-return" basic block is where we've determined that we
+ // actually need to catch this exception, in which case we just return
+ // the exception pointer.
+ Ret(catch_return, ehptr, dloc);
+
+ // The "catch-resume" block is where we're running this landing pad but
+ // we actually need to not catch the exception, so just resume the
+ // exception to return.
+ Resume(catch_resume, vals);
+
+ // On the successful branch we just return null.
+ Ret(then, C_null(Type::i8p(ccx)), dloc);
+
+ return rust_try
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = Call(bcx, llfn, &[func, data], None, dloc);
+ Store(bcx, ret, dest);
+ return bcx;
+}
+
+// Definition of the standard "try" function for Rust using the GNU-like model
+// of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
+// instructions).
+//
+// This translation is a little surprising because
+// we always call a shim function instead of inlining the call to `invoke`
+// manually here. This is done because in LLVM we're only allowed to have one
+// personality per function definition. The call to the `try` intrinsic is
+// being inlined into the function calling it, and that function may already
+// have other personality functions in play. By calling a shim we're
+// guaranteed that our shim will have the right personality function.
+//
+fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ func: ValueRef,
+ data: ValueRef,
+ dest: ValueRef,
+ dloc: DebugLoc) -> Block<'blk, 'tcx> {
+ let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| {
+ let ccx = bcx.ccx();
+ let dloc = DebugLoc::None;
+
+ // Translates the shims described above:
+ //
+ // bcx:
+ // invoke %func(%args...) normal %normal unwind %catch
+ //
+ // normal:
+ // ret null
+ //
+ // catch:
+ // (ptr, _) = landingpad
+ // ret ptr
+
+ let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try", try_fn_ty);
+ attributes::emit_uwtable(rust_try, true);
+ let catch_pers = match bcx.tcx().lang_items.eh_personality_catch() {
+ Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
+ bcx.fcx.param_substs).val,
+ None => bcx.tcx().sess.bug("eh_personality_catch not defined"),
+ };
+
+ let (fcx, block_arena);
+ block_arena = TypedArena::new();
+ fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false,
+ output, ccx.tcx().mk_substs(Substs::trans_empty()),
+ None, &block_arena);
+ let bcx = init_function(&fcx, true, output);
+ let then = bcx.fcx.new_temp_block("then");
+ let catch = bcx.fcx.new_temp_block("catch");
+
+ let func = llvm::get_param(rust_try, 0);
+ let data = llvm::get_param(rust_try, 1);
+ Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
+ Ret(then, C_null(Type::i8p(ccx)), dloc);
+
+ // Type indicator for the exception being thrown.
+ // The first value in this tuple is a pointer to the exception object being thrown.
+ // The second value is a "selector" indicating which of the landing pad clauses
+ // the exception's type had been matched to. rust_try ignores the selector.
+ let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
+ false);
+ let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
+ AddClause(catch, vals, C_null(Type::i8p(ccx)));
+ let ptr = ExtractValue(catch, vals, 0);
+ Ret(catch, ptr, dloc);
+ fcx.cleanup();
+
+ return rust_try
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = Call(bcx, llfn, &[func, data], None, dloc);
+ Store(bcx, ret, dest);
+ return bcx;
+}
+
+// Helper to generate the `Ty` associated with `rust_try`
+fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
+ f: &mut FnMut(Ty<'tcx>,
+ ty::FnOutput<'tcx>) -> ValueRef)
+ -> ValueRef {
+ let ccx = fcx.ccx;
+ if let Some(llfn) = *ccx.rust_try_fn().borrow() {
+ return llfn
+ }
+
+ // Define the type up front for the signature of the rust_try function.
+ let tcx = ccx.tcx();
+ let i8p = tcx.mk_mut_ptr(tcx.types.i8);
+ let fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
+ unsafety: ast::Unsafety::Unsafe,
+ abi: abi::Rust,
+ sig: ty::Binder(ty::FnSig {
+ inputs: vec![i8p],
+ output: ty::FnOutput::FnConverging(tcx.mk_nil()),
+ variadic: false,
+ }),
+ });
+ let fn_ty = tcx.mk_fn(None, fn_ty);
+ let output = ty::FnOutput::FnConverging(i8p);
+ let try_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
+ unsafety: ast::Unsafety::Unsafe,
+ abi: abi::Rust,
+ sig: ty::Binder(ty::FnSig {
+ inputs: vec![fn_ty, i8p],
+ output: output,
+ variadic: false,
+ }),
+ });
+ let rust_try = f(tcx.mk_fn(None, try_fn_ty), output);
+ *ccx.rust_try_fn().borrow_mut() = Some(rust_try);
+ return rust_try
+}
impl<T:LlvmRepr> LlvmRepr for [T] {
fn llrepr(&self, ccx: &CrateContext) -> String {
let reprs: Vec<String> = self.iter().map(|t| t.llrepr(ccx)).collect();
- format!("[{}]", reprs.connect(","))
+ format!("[{}]", reprs.join(","))
}
}
// Returns the number of bytes clobbered by a Store to this type.
pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> llsize {
unsafe {
- return llvm::LLVMStoreSizeOfType(cx.td().lltd, ty.to_ref());
+ return llvm::LLVMStoreSizeOfType(cx.td(), ty.to_ref());
}
}
// array of T. This is the "ABI" size. It includes any ABI-mandated padding.
pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize {
unsafe {
- return llvm::LLVMABISizeOfType(cx.td().lltd, ty.to_ref());
+ return llvm::LLVMABISizeOfType(cx.td(), ty.to_ref());
}
}
// below.
pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> llsize {
unsafe {
- let nbits = llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref());
+ let nbits = llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref());
if nbits & 7 != 0 {
// Not an even number of bytes, spills into "next" byte.
1 + (nbits >> 3)
/// Returns the "real" size of the type in bits.
pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits {
unsafe {
- llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref())
+ llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref())
}
}
// allocations inside a stack frame, which LLVM has a free hand in.
pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign {
unsafe {
- return llvm::LLVMPreferredAlignmentOfType(cx.td().lltd, ty.to_ref());
+ return llvm::LLVMPreferredAlignmentOfType(cx.td(), ty.to_ref());
}
}
// and similar ABI-mandated things.
pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign {
unsafe {
- return llvm::LLVMABIAlignmentOfType(cx.td().lltd, ty.to_ref());
+ return llvm::LLVMABIAlignmentOfType(cx.td(), ty.to_ref());
}
}
pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: usize) -> u64 {
unsafe {
- return llvm::LLVMOffsetOfElement(cx.td().lltd,
+ return llvm::LLVMOffsetOfElement(cx.td(),
struct_ty.to_ref(),
element as u32);
}
use middle::subst::VecPerParamSpace;
use middle::subst;
use middle::traits;
-use middle::ty::ClosureTyper;
use rustc::ast_map;
use trans::base::*;
use trans::build::*;
use trans::monomorphize;
use trans::type_::Type;
use trans::type_of::*;
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, HasTypeFlags};
use middle::ty::MethodCall;
-use syntax::abi::{Rust, RustCall};
-use syntax::parse::token;
use syntax::{ast, attr, visit};
use syntax::codemap::DUMMY_SP;
use syntax::ptr::P;
-> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_method_callee");
- let (origin, method_ty) =
- bcx.tcx().method_map
- .borrow()
- .get(&method_call)
- .map(|method| (method.origin.clone(), method.ty))
- .unwrap();
-
- match origin {
- ty::MethodStatic(did) |
- ty::MethodStaticClosure(did) => {
- debug!("trans_method_callee: static, {:?}", did);
+ let method = bcx.tcx().tables.borrow().method_map[&method_call];
+
+ match bcx.tcx().impl_or_trait_item(method.def_id).container() {
+ ty::ImplContainer(_) => {
+ debug!("trans_method_callee: static, {:?}", method.def_id);
+ let datum = callee::trans_fn_ref(bcx.ccx(),
+ method.def_id,
+ MethodCallKey(method_call),
+ bcx.fcx.param_substs);
Callee {
bcx: bcx,
- data: Fn(callee::trans_fn_ref(bcx.ccx(),
- did,
- MethodCallKey(method_call),
- bcx.fcx.param_substs).val),
+ data: Fn(datum.val),
+ ty: datum.ty
}
}
- ty::MethodTypeParam(ty::MethodParam {
- ref trait_ref,
- method_num,
- impl_def_id: _
- }) => {
- let trait_ref = ty::Binder(bcx.monomorphize(trait_ref));
+ ty::TraitContainer(trait_def_id) => {
+ let trait_substs = method.substs.clone().method_to_trait();
+ let trait_substs = bcx.tcx().mk_substs(trait_substs);
+ let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs);
+
+ let trait_ref = ty::Binder(bcx.monomorphize(&trait_ref));
let span = bcx.tcx().map.span(method_call.expr_id);
debug!("method_call={:?} trait_ref={:?} trait_ref id={:?} substs={:?}",
method_call,
debug!("origin = {:?}", origin);
trans_monomorphized_callee(bcx,
method_call,
- trait_ref.def_id(),
- method_num,
- origin)
- }
-
- ty::MethodTraitObject(ref mt) => {
- let self_expr = match self_expr {
- Some(self_expr) => self_expr,
- None => {
- bcx.sess().span_bug(bcx.tcx().map.span(method_call.expr_id),
- "self expr wasn't provided for trait object \
- callee (trying to call overloaded op?)")
- }
- };
- trans_trait_callee(bcx,
- monomorphize_type(bcx, method_ty),
- mt.vtable_index,
- self_expr,
- arg_cleanup_scope)
+ self_expr,
+ trait_def_id,
+ method.def_id,
+ method.ty,
+ origin,
+ arg_cleanup_scope)
}
}
}
debug!("trans_static_method_callee(method_id={:?}, trait_id={}, \
expr_id={})",
method_id,
- ty::item_path_str(tcx, trait_id),
+ tcx.item_path_str(trait_id),
expr_id);
let mname = if method_id.krate == ast::LOCAL_CRATE {
csearch::get_item_path(tcx, method_id).last().unwrap().name()
};
debug!("trans_static_method_callee: method_id={:?}, expr_id={}, \
- name={}", method_id, expr_id, token::get_name(mname));
+ name={}", method_id, expr_id, mname);
// Find the substitutions for the fn itself. This includes
// type parameters that belong to the trait but also some that
Vec::new()));
let trait_substs = tcx.mk_substs(trait_substs);
debug!("trait_substs={:?}", trait_substs);
- let trait_ref = ty::Binder(ty::TraitRef { def_id: trait_id,
- substs: trait_substs });
+ let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, trait_substs));
let vtbl = fulfill_obligation(ccx,
DUMMY_SP,
trait_ref);
substs: impl_substs,
nested: _ }) =>
{
- assert!(impl_substs.types.all(|t| !ty::type_needs_infer(*t)));
+ assert!(!impl_substs.types.needs_infer());
// Create the substitutions that are in scope. This combines
// the type parameters from the impl with those declared earlier.
callee_substs)
}
traits::VtableObject(ref data) => {
- let trait_item_def_ids =
- ty::trait_item_def_ids(ccx.tcx(), trait_id);
- let method_offset_in_trait =
- trait_item_def_ids.iter()
- .position(|item| item.def_id() == method_id)
- .unwrap();
- let (llfn, ty) =
- trans_object_shim(ccx,
- data.object_ty,
- data.upcast_trait_ref.clone(),
- method_offset_in_trait);
- immediate_rvalue(llfn, ty)
+ let idx = traits::get_vtable_index_of_object_method(tcx, data, method_id);
+ trans_object_shim(ccx,
+ data.upcast_trait_ref.clone(),
+ method_id,
+ idx)
}
_ => {
tcx.sess.bug(&format!("static call to invalid vtable: {:?}",
.expect("could not find impl while translating");
let meth_did = impl_items.iter()
.find(|&did| {
- ty::impl_or_trait_item(ccx.tcx(), did.def_id()).name() == name
+ ccx.tcx().impl_or_trait_item(did.def_id()).name() == name
}).expect("could not find method while \
translating");
fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
method_call: MethodCall,
+ self_expr: Option<&ast::Expr>,
trait_id: ast::DefId,
- n_method: usize,
- vtable: traits::Vtable<'tcx, ()>)
+ method_id: ast::DefId,
+ method_ty: Ty<'tcx>,
+ vtable: traits::Vtable<'tcx, ()>,
+ arg_cleanup_scope: cleanup::ScopeId)
-> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_monomorphized_callee");
match vtable {
traits::VtableImpl(vtable_impl) => {
let ccx = bcx.ccx();
let impl_did = vtable_impl.impl_def_id;
- let mname = match ty::trait_item(ccx.tcx(), trait_id, n_method) {
+ let mname = match ccx.tcx().impl_or_trait_item(method_id) {
ty::MethodTraitItem(method) => method.name,
_ => {
bcx.tcx().sess.bug("can't monomorphize a non-method trait \
bcx, MethodCallKey(method_call), vtable_impl.substs);
// translate the function
- let llfn = trans_fn_ref_with_substs(bcx.ccx(),
- mth_id,
- MethodCallKey(method_call),
- bcx.fcx.param_substs,
- callee_substs).val;
+ let datum = trans_fn_ref_with_substs(bcx.ccx(),
+ mth_id,
+ MethodCallKey(method_call),
+ bcx.fcx.param_substs,
+ callee_substs);
- Callee { bcx: bcx, data: Fn(llfn) }
+ Callee { bcx: bcx, data: Fn(datum.val), ty: datum.ty }
}
traits::VtableClosure(vtable_closure) => {
// The substitutions should have no type parameters remaining
let llfn = closure::trans_closure_method(bcx.ccx(),
vtable_closure.closure_def_id,
vtable_closure.substs,
- MethodCallKey(method_call),
- bcx.fcx.param_substs,
trait_closure_kind);
Callee {
bcx: bcx,
data: Fn(llfn),
+ ty: monomorphize_type(bcx, method_ty)
}
}
traits::VtableFnPointer(fn_ty) => {
let trait_closure_kind = bcx.tcx().lang_items.fn_trait_kind(trait_id).unwrap();
let llfn = trans_fn_pointer_shim(bcx.ccx(), trait_closure_kind, fn_ty);
- Callee { bcx: bcx, data: Fn(llfn) }
+ Callee {
+ bcx: bcx,
+ data: Fn(llfn),
+ ty: monomorphize_type(bcx, method_ty)
+ }
}
traits::VtableObject(ref data) => {
- let (llfn, _) = trans_object_shim(bcx.ccx(),
- data.object_ty,
- data.upcast_trait_ref.clone(),
- n_method);
- Callee { bcx: bcx, data: Fn(llfn) }
+ let idx = traits::get_vtable_index_of_object_method(bcx.tcx(), data, method_id);
+ if let Some(self_expr) = self_expr {
+ if let ty::TyBareFn(_, ref fty) = monomorphize_type(bcx, method_ty).sty {
+ let ty = bcx.tcx().mk_fn(None, opaque_method_ty(bcx.tcx(), fty));
+ return trans_trait_callee(bcx, ty, idx, self_expr, arg_cleanup_scope);
+ }
+ }
+ let datum = trans_object_shim(bcx.ccx(),
+ data.upcast_trait_ref.clone(),
+ method_id,
+ idx);
+ Callee { bcx: bcx, data: Fn(datum.val), ty: datum.ty }
}
traits::VtableBuiltin(..) |
traits::VtableDefaultImpl(..) |
/// object. Objects are represented as a pair, so we first evaluate the self expression and then
/// extract the self data and vtable out of the pair.
fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- method_ty: Ty<'tcx>,
+ opaque_fn_ty: Ty<'tcx>,
vtable_index: usize,
self_expr: &ast::Expr,
arg_cleanup_scope: cleanup::ScopeId)
let llself = Load(bcx, GEPi(bcx, llval, &[0, abi::FAT_PTR_ADDR]));
let llvtable = Load(bcx, GEPi(bcx, llval, &[0, abi::FAT_PTR_EXTRA]));
- trans_trait_callee_from_llval(bcx, method_ty, vtable_index, llself, llvtable)
+ trans_trait_callee_from_llval(bcx, opaque_fn_ty, vtable_index, llself, llvtable)
}
/// Same as `trans_trait_callee()` above, except that it is given a by-ref pointer to the object
/// pair.
-pub fn trans_trait_callee_from_llval<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- callee_ty: Ty<'tcx>,
- vtable_index: usize,
- llself: ValueRef,
- llvtable: ValueRef)
- -> Callee<'blk, 'tcx> {
+fn trans_trait_callee_from_llval<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ opaque_fn_ty: Ty<'tcx>,
+ vtable_index: usize,
+ llself: ValueRef,
+ llvtable: ValueRef)
+ -> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_trait_callee");
let ccx = bcx.ccx();
// Load the data pointer from the object.
debug!("trans_trait_callee_from_llval(callee_ty={}, vtable_index={}, llself={}, llvtable={})",
- callee_ty,
+ opaque_fn_ty,
vtable_index,
bcx.val_to_string(llself),
bcx.val_to_string(llvtable));
// Replace the self type (&Self or Box<Self>) with an opaque pointer.
- let llcallee_ty = match callee_ty.sty {
- ty::TyBareFn(_, ref f) if f.abi == Rust || f.abi == RustCall => {
- let fake_sig =
- ty::Binder(ty::FnSig {
- inputs: f.sig.0.inputs[1..].to_vec(),
- output: f.sig.0.output,
- variadic: f.sig.0.variadic,
- });
- type_of_rust_fn(ccx, Some(Type::i8p(ccx)), &fake_sig, f.abi)
- }
- _ => {
- ccx.sess().bug("meth::trans_trait_callee given non-bare-rust-fn");
- }
- };
let mptr = Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET]));
+ let llcallee_ty = type_of_fn_from_ty(ccx, opaque_fn_ty);
- return Callee {
+ Callee {
bcx: bcx,
data: TraitItem(MethodData {
llfn: PointerCast(bcx, mptr, llcallee_ty.ptr_to()),
llself: PointerCast(bcx, llself, Type::i8p(ccx)),
- })
- };
+ }),
+ ty: opaque_fn_ty
+ }
}
/// Generate a shim function that allows an object type like `SomeTrait` to
///
/// In fact, all virtual calls can be thought of as normal trait calls
/// that go through this shim function.
-pub fn trans_object_shim<'a, 'tcx>(
+fn trans_object_shim<'a, 'tcx>(
ccx: &'a CrateContext<'a, 'tcx>,
- object_ty: Ty<'tcx>,
upcast_trait_ref: ty::PolyTraitRef<'tcx>,
- method_offset_in_trait: usize)
- -> (ValueRef, Ty<'tcx>)
+ method_id: ast::DefId,
+ vtable_index: usize)
+ -> Datum<'tcx, Rvalue>
{
let _icx = push_ctxt("trans_object_shim");
let tcx = ccx.tcx();
- let trait_id = upcast_trait_ref.def_id();
- debug!("trans_object_shim(object_ty={:?}, upcast_trait_ref={:?}, method_offset_in_trait={})",
- object_ty,
+ debug!("trans_object_shim(upcast_trait_ref={:?}, method_id={:?})",
upcast_trait_ref,
- method_offset_in_trait);
-
- let object_trait_ref =
- match object_ty.sty {
- ty::TyTrait(ref data) => {
- data.principal_trait_ref_with_self_ty(tcx, object_ty)
- }
- _ => {
- tcx.sess.bug(&format!("trans_object_shim() called on non-object: {:?}",
- object_ty));
- }
- };
+ method_id);
// Upcast to the trait in question and extract out the substitutions.
- let upcast_trait_ref = ty::erase_late_bound_regions(tcx, &upcast_trait_ref);
+ let upcast_trait_ref = tcx.erase_late_bound_regions(&upcast_trait_ref);
let object_substs = upcast_trait_ref.substs.clone().erase_regions();
debug!("trans_object_shim: object_substs={:?}", object_substs);
// Lookup the type of this method as declared in the trait and apply substitutions.
- let method_ty = match ty::trait_item(tcx, trait_id, method_offset_in_trait) {
+ let method_ty = match tcx.impl_or_trait_item(method_id) {
ty::MethodTraitItem(method) => method,
_ => {
tcx.sess.bug("can't create a method shim for a non-method item")
debug!("trans_object_shim: fty={:?} method_ty={:?}", fty, method_ty);
//
- let shim_fn_ty = ty::mk_bare_fn(tcx, None, fty);
- let method_bare_fn_ty = ty::mk_bare_fn(tcx, None, method_ty);
+ let shim_fn_ty = tcx.mk_fn(None, fty);
+ let method_bare_fn_ty = tcx.mk_fn(None, method_ty);
let function_name = link::mangle_internal_name_by_type_and_seq(ccx, shim_fn_ty, "object_shim");
- let llfn = declare::define_internal_rust_fn(ccx, &function_name, shim_fn_ty).unwrap_or_else(||{
- ccx.sess().bug(&format!("symbol `{}` already defined", function_name));
- });
+ let llfn = declare::define_internal_rust_fn(ccx, &function_name, shim_fn_ty);
- let sig = ty::erase_late_bound_regions(ccx.tcx(), &fty.sig);
+ let sig = ccx.tcx().erase_late_bound_regions(&fty.sig);
let empty_substs = tcx.mk_substs(Substs::trans_empty());
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
fcx.llretslotptr.get().map(
|_| expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot")));
- let method_offset_in_vtable =
- traits::get_vtable_index_of_object_method(bcx.tcx(),
- object_trait_ref.clone(),
- trait_id,
- method_offset_in_trait);
debug!("trans_object_shim: method_offset_in_vtable={}",
- method_offset_in_vtable);
+ vtable_index);
bcx = trans_call_inner(bcx,
DebugLoc::None,
- method_bare_fn_ty,
|bcx, _| trans_trait_callee_from_llval(bcx,
method_bare_fn_ty,
- method_offset_in_vtable,
+ vtable_index,
llself, llvtable),
ArgVals(&llargs[(self_idx + 2)..]),
dest).bcx;
finish_fn(&fcx, bcx, sig.output, DebugLoc::None);
- (llfn, method_bare_fn_ty)
+ immediate_rvalue(llfn, shim_fn_ty)
}
/// Creates a returns a dynamic vtable for the given type and vtable origin.
let llfn = closure::trans_closure_method(ccx,
closure_def_id,
substs,
- ExprId(0),
- param_substs,
trait_closure_kind);
vec![llfn].into_iter()
}
substs,
param_substs);
- let trt_id = match ty::impl_trait_ref(tcx, impl_id) {
+ let trt_id = match tcx.impl_trait_ref(impl_id) {
Some(t_id) => t_id.def_id,
None => ccx.sess().bug("make_impl_vtable: don't know how to \
make a vtable for a type impl!")
};
- ty::populate_implementations_for_trait_if_necessary(tcx, trt_id);
+ tcx.populate_implementations_for_trait_if_necessary(trt_id);
let nullptr = C_null(Type::nil(ccx).ptr_to());
- let trait_item_def_ids = ty::trait_item_def_ids(tcx, trt_id);
+ let trait_item_def_ids = tcx.trait_item_def_ids(trt_id);
trait_item_def_ids
.iter()
debug!("emit_vtable_methods: trait_method_def_id={:?}",
trait_method_def_id);
- let trait_method_type = match ty::impl_or_trait_item(tcx, trait_method_def_id) {
+ let trait_method_type = match tcx.impl_or_trait_item(trait_method_def_id) {
ty::MethodTraitItem(m) => m,
_ => ccx.sess().bug("should be a method, not other assoc item"),
};
// The substitutions we have are on the impl, so we grab
// the method type from the impl to substitute into.
let impl_method_def_id = method_with_name(ccx, impl_id, name);
- let impl_method_type = match ty::impl_or_trait_item(tcx, impl_method_def_id) {
+ let impl_method_type = match tcx.impl_or_trait_item(impl_method_def_id) {
ty::MethodTraitItem(m) => m,
_ => ccx.sess().bug("should be a method, not other assoc item"),
};
// particular set of type parameters. Note that this
// method could then never be called, so we do not want to
// try and trans it, in that case. Issue #23435.
- if ty::provided_source(tcx, impl_method_def_id).is_some() {
+ if tcx.provided_source(impl_method_def_id).is_some() {
let predicates = impl_method_type.predicates.predicates.subst(tcx, &substs);
if !normalize_and_test_predicates(ccx, predicates.into_vec()) {
debug!("emit_vtable_methods: predicates do not hold");
}
/// Replace the self type (&Self or Box<Self>) with an opaque pointer.
-pub fn opaque_method_ty<'tcx>(tcx: &ty::ctxt<'tcx>, method_ty: &ty::BareFnTy<'tcx>)
- -> &'tcx ty::BareFnTy<'tcx> {
+fn opaque_method_ty<'tcx>(tcx: &ty::ctxt<'tcx>, method_ty: &ty::BareFnTy<'tcx>)
+ -> &'tcx ty::BareFnTy<'tcx> {
let mut inputs = method_ty.sig.0.inputs.clone();
- inputs[0] = ty::mk_mut_ptr(tcx, ty::mk_mach_int(tcx, ast::TyI8));
+ inputs[0] = tcx.mk_mut_ptr(tcx.mk_mach_int(ast::TyI8));
tcx.mk_bare_fn(ty::BareFnTy {
unsafety: method_ty.unsafety,
use trans::common::*;
use trans::declare;
use trans::foreign;
-use middle::ty::{self, HasProjectionTypes, Ty};
+use middle::ty::{self, HasTypeFlags, Ty};
use syntax::abi;
use syntax::ast;
psubsts,
ref_id);
- assert!(psubsts.types.all(|t| {
- !ty::type_needs_infer(*t) && !ty::type_has_params(*t)
- }));
+ assert!(!psubsts.types.needs_infer() && !psubsts.types.has_param_types());
let _icx = push_ctxt("monomorphic_fn");
params: &psubsts.types
};
- let item_ty = ty::lookup_item_type(ccx.tcx(), fn_id).ty;
+ let item_ty = ccx.tcx().lookup_item_type(fn_id).ty;
debug!("monomorphic_fn about to subst into {:?}", item_ty);
- let mono_ty = item_ty.subst(ccx.tcx(), psubsts);
+ let mono_ty = apply_param_substs(ccx.tcx(), psubsts, &item_ty);
+ debug!("mono_ty = {:?} (post-substitution)", mono_ty);
match ccx.monomorphized().borrow().get(&hash_id) {
Some(&val) => {
debug!("leaving monomorphic fn {}",
- ty::item_path_str(ccx.tcx(), fn_id));
+ ccx.tcx().item_path_str(fn_id));
return (val, mono_ty, false);
}
None => ()
}
}
- debug!("mono_ty = {:?} (post-substitution)", mono_ty);
-
- let mono_ty = normalize_associated_type(ccx.tcx(), &mono_ty);
- debug!("mono_ty = {:?} (post-normalization)", mono_ty);
-
ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
let depth;
let lldecl = if abi != abi::Rust {
foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, &s[..])
} else {
- // FIXME(nagisa): perhaps needs a more fine grained selection? See setup_lldecl below.
- declare::define_internal_rust_fn(ccx, &s[..], mono_ty).unwrap_or_else(||{
- ccx.sess().bug(&format!("symbol `{}` already defined", s));
- })
+ // FIXME(nagisa): perhaps needs a more fine grained selection? See
+ // setup_lldecl below.
+ declare::define_internal_rust_fn(ccx, &s, mono_ty)
};
ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl);
}
ast_map::NodeVariant(v) => {
let parent = ccx.tcx().map.get_parent(fn_id.node);
- let tvs = ty::enum_variants(ccx.tcx(), local_def(parent));
+ let tvs = ccx.tcx().enum_variants(local_def(parent));
let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap();
let d = mk_lldecl(abi::Rust);
attributes::inline(d, attributes::InlineAttr::Hint);
// Ugh -- but this ensures any new variants won't be forgotten
ast_map::NodeForeignItem(..) |
ast_map::NodeLifetime(..) |
+ ast_map::NodeTyParam(..) |
ast_map::NodeExpr(..) |
ast_map::NodeStmt(..) |
ast_map::NodeArg(..) |
ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
- debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id));
+ debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id));
(lldecl, mono_ty, true)
}
param_substs: &Substs<'tcx>,
value: &T)
-> T
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
let substituted = value.subst(tcx, param_substs);
normalize_associated_type(tcx, &substituted)
/// and hence we can be sure that all associated types will be
/// completely normalized away.
pub fn normalize_associated_type<'tcx,T>(tcx: &ty::ctxt<'tcx>, value: &T) -> T
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
debug!("normalize_associated_type(t={:?})", value);
}
// FIXME(#20304) -- cache
-
- let infcx = infer::new_infer_ctxt(tcx);
- let typer = NormalizingClosureTyper::new(tcx);
- let mut selcx = traits::SelectionContext::new(&infcx, &typer);
+ let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables);
+ let mut selcx = traits::SelectionContext::new(&infcx);
let cause = traits::ObligationCause::dummy();
let traits::Normalized { value: result, obligations } =
traits::normalize(&mut selcx, cause, &value);
result,
obligations);
- let mut fulfill_cx = traits::FulfillmentContext::new(true);
+ let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
+
for obligation in obligations {
fulfill_cx.register_predicate_obligation(&infcx, obligation);
}
let count = elements_required(bcx, content_expr);
debug!(" vt={}, count={}", vt.to_string(ccx), count);
- let fixed_ty = ty::mk_vec(bcx.tcx(),
- vt.unit_ty,
- Some(count));
+ let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count);
let llfixed_ty = type_of::type_of(bcx.ccx(), fixed_ty);
// Always create an alloca even if zero-sized, to preserve
// Arrange for the backing array to be cleaned up.
let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id);
fcx.schedule_lifetime_end(cleanup_scope, llfixed);
- fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty);
+ fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty, None);
// Generate the content into the backing array.
// llfixed has type *[T x N], but we want the type *T,
SaveIn(lleltptr));
let scope = cleanup::CustomScope(temp_scope);
fcx.schedule_lifetime_end(scope, lleltptr);
- fcx.schedule_drop_mem(scope, lleltptr, vt.unit_ty);
+ fcx.schedule_drop_mem(scope, lleltptr, vt.unit_ty, None);
}
fcx.pop_custom_cleanup_scope(temp_scope);
}
return expr::trans_into(bcx, &**element, Ignore);
}
SaveIn(lldest) => {
- match ty::eval_repeat_count(bcx.tcx(), &**count_expr) {
+ match bcx.tcx().eval_repeat_count(&**count_expr) {
0 => expr::trans_into(bcx, &**element, Ignore),
1 => expr::trans_into(bcx, &**element, SaveIn(lldest)),
count => {
fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &ast::Expr)
-> VecTypes<'tcx> {
let vec_ty = node_id_type(bcx, vec_expr.id);
- vec_types(bcx, ty::sequence_element_type(bcx.tcx(), vec_ty))
+ vec_types(bcx, vec_ty.sequence_element_type(bcx.tcx()))
}
fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>)
},
ast::ExprVec(ref es) => es.len(),
ast::ExprRepeat(_, ref count_expr) => {
- ty::eval_repeat_count(bcx.tcx(), &**count_expr)
+ bcx.tcx().eval_repeat_count(&**count_expr)
}
_ => bcx.tcx().sess.span_bug(content_expr.span,
"unexpected vec content")
}
// Only used for pattern matching.
- ty::TyBox(ty) | ty::TyRef(_, ty::mt{ty, ..}) => {
+ ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => {
let inner = if type_is_sized(bcx.tcx(), ty) {
Load(bcx, llval)
} else {
use std::mem;
use std::ptr;
use std::cell::RefCell;
-use std::iter::repeat;
use libc::c_uint;
if n_elts == 0 {
return Vec::new();
}
- let mut elts: Vec<_> = repeat(Type { rf: ptr::null_mut() }).take(n_elts).collect();
+ let mut elts = vec![Type { rf: ptr::null_mut() }; n_elts];
llvm::LLVMGetStructElementTypes(self.to_ref(),
elts.as_mut_ptr() as *mut TypeRef);
elts
pub fn func_params(&self) -> Vec<Type> {
unsafe {
let n_args = llvm::LLVMCountParamTypes(self.to_ref()) as usize;
- let mut args: Vec<_> = repeat(Type { rf: ptr::null_mut() }).take(n_args).collect();
+ let mut args = vec![Type { rf: ptr::null_mut() }; n_args];
llvm::LLVMGetParamTypes(self.to_ref(),
args.as_mut_ptr() as *mut TypeRef);
args
pub fn types_to_str(&self, tys: &[Type]) -> String {
let strs: Vec<String> = tys.iter().map(|t| self.type_to_string(*t)).collect();
- format!("[{}]", strs.connect(","))
+ format!("[{}]", strs.join(","))
}
pub fn val_to_string(&self, val: ValueRef) -> String {
}
}
-/// Yields the types of the "real" arguments for this function. For most
-/// functions, these are simply the types of the arguments. For functions with
-/// the `RustCall` ABI, however, this untuples the arguments of the function.
-pub fn untuple_arguments_if_necessary<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- inputs: &[Ty<'tcx>],
- abi: abi::Abi)
- -> Vec<Ty<'tcx>> {
- if abi != abi::RustCall {
- return inputs.iter().cloned().collect()
- }
-
+/// Yields the types of the "real" arguments for a function using the `RustCall`
+/// ABI by untupling the arguments of the function.
+pub fn untuple_arguments<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ inputs: &[Ty<'tcx>])
+ -> Vec<Ty<'tcx>> {
if inputs.is_empty() {
return Vec::new()
}
match inputs[inputs.len() - 1].sty {
ty::TyTuple(ref tupled_arguments) => {
- debug!("untuple_arguments_if_necessary(): untupling arguments");
+ debug!("untuple_arguments(): untupling arguments");
for &tupled_argument in tupled_arguments {
result.push(tupled_argument);
}
sig,
abi);
- let sig = ty::erase_late_bound_regions(cx.tcx(), sig);
+ let sig = cx.tcx().erase_late_bound_regions(sig);
assert!(!sig.variadic); // rust fns are never variadic
let mut atys: Vec<Type> = Vec::new();
// First, munge the inputs, if this has the `rust-call` ABI.
- let inputs = untuple_arguments_if_necessary(cx, &sig.inputs, abi);
+ let inputs = &if abi == abi::RustCall {
+ untuple_arguments(cx, &sig.inputs)
+ } else {
+ sig.inputs
+ };
// Arg 0: Output pointer.
// (if the output type is non-immediate)
}
// ... then explicit args.
- for input in &inputs {
+ for input in inputs {
let arg_ty = type_of_explicit_arg(cx, input);
if type_is_fat_ptr(cx.tcx(), input) {
ty::TyUint(t) => Type::uint_from_ty(cx, t),
ty::TyFloat(t) => Type::float_from_ty(cx, t),
- ty::TyBox(ty) | ty::TyRef(_, ty::mt{ty, ..}) | ty::TyRawPtr(ty::mt{ty, ..}) => {
+ ty::TyBox(ty) |
+ ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
+ ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
if type_is_sized(cx.tcx(), ty) {
Type::i8p(cx)
} else {
}
ty::TyStruct(..) => {
- if ty::type_is_simd(cx.tcx(), t) {
- let llet = type_of(cx, ty::simd_type(cx.tcx(), t));
- let n = ty::simd_size(cx.tcx(), t) as u64;
+ if t.is_simd(cx.tcx()) {
+ let llet = type_of(cx, t.simd_type(cx.tcx()));
+ let n = t.simd_size(cx.tcx()) as u64;
ensure_array_fits_in_address_space(cx, llet, n, t);
Type::vector(&llet, n)
} else {
}
pub fn foreign_arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
- if ty::type_is_bool(t) {
+ if t.is_bool() {
Type::i1(cx)
} else {
type_of(cx, t)
}
pub fn arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
- if ty::type_is_bool(t) {
+ if t.is_bool() {
Type::i1(cx)
} else if type_is_immediate(cx, t) && type_of(cx, t).is_aggregate() {
// We want to pass small aggregates as immediate values, but using an aggregate LLVM type
/// For the raw type without far pointer indirection, see `in_memory_type_of`.
pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
let ty = if !type_is_sized(cx.tcx(), ty) {
- ty::mk_imm_ptr(cx.tcx(), ty)
+ cx.tcx().mk_imm_ptr(ty)
} else {
ty
};
adt::incomplete_type_of(cx, &*repr, "closure")
}
- ty::TyBox(ty) | ty::TyRef(_, ty::mt{ty, ..}) | ty::TyRawPtr(ty::mt{ty, ..}) => {
+ ty::TyBox(ty) |
+ ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
+ ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
if !type_is_sized(cx.tcx(), ty) {
if let ty::TyStr = ty.sty {
// This means we get a nicer name in the output (str is always
cx.tn().find_type("str_slice").unwrap()
} else {
let ptr_ty = in_memory_type_of(cx, ty).ptr_to();
- let unsized_part = ty::struct_tail(cx.tcx(), ty);
+ let unsized_part = cx.tcx().struct_tail(ty);
let info_ty = match unsized_part.sty {
ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => {
Type::uint_from_ty(cx, ast::TyUs)
adt::type_of(cx, &*repr)
}
ty::TyStruct(did, ref substs) => {
- if ty::type_is_simd(cx.tcx(), t) {
- let llet = in_memory_type_of(cx, ty::simd_type(cx.tcx(), t));
- let n = ty::simd_size(cx.tcx(), t) as u64;
+ if t.is_simd(cx.tcx()) {
+ let llet = in_memory_type_of(cx, t.simd_type(cx.tcx()));
+ let n = t.simd_size(cx.tcx()) as u64;
ensure_array_fits_in_address_space(cx, llet, n, t);
Type::vector(&llet, n)
} else {
// If this was an enum or struct, fill in the type now.
match t.sty {
ty::TyEnum(..) | ty::TyStruct(..) | ty::TyClosure(..)
- if !ty::type_is_simd(cx.tcx(), t) => {
+ if !t.is_simd(cx.tcx()) => {
let repr = adt::represent_type(cx, t);
adt::finish_type_of(cx, &*repr, &mut llty);
}
did: ast::DefId,
tps: &[Ty<'tcx>])
-> String {
- let base = ty::item_path_str(cx.tcx(), did);
+ let base = cx.tcx().item_path_str(did);
let strings: Vec<String> = tps.iter().map(|t| t.to_string()).collect();
let tstr = if strings.is_empty() {
base
} else {
- format!("{}<{}>", base, strings.connect(", "))
+ format!("{}<{}>", base, strings.join(", "))
};
if did.krate == 0 {
use middle::astconv_util::{prim_ty_to_ty, check_path_args, NO_TPS, NO_REGIONS};
use middle::const_eval::{self, ConstVal};
+use middle::const_eval::EvalHint::UncheckedExprHint;
use middle::def;
use middle::implicator::object_region_bounds;
use middle::resolve_lifetime as rl;
use middle::privacy::{AllPublic, LastMod};
-use middle::subst::{FnSpace, TypeSpace, SelfSpace, Subst, Substs};
+use middle::subst::{FnSpace, TypeSpace, SelfSpace, Subst, Substs, ParamSpace};
use middle::traits;
-use middle::ty::{self, RegionEscape, Ty, AsPredicate};
+use middle::ty::{self, RegionEscape, Ty, ToPredicate, HasTypeFlags};
use middle::ty_fold;
-use rscope::{self, UnelidableRscope, RegionScope, ElidableRscope, ExplicitRscope,
- ObjectLifetimeDefaultRscope, ShiftedRscope, BindingRscope};
+use require_c_abi_if_variadic;
+use rscope::{self, UnelidableRscope, RegionScope, ElidableRscope,
+ ObjectLifetimeDefaultRscope, ShiftedRscope, BindingRscope,
+ ElisionFailureInfo, ElidedLifetime};
use util::common::{ErrorReported, FN_OUTPUT_NAME};
use util::nodemap::FnvHashSet;
-use std::iter::repeat;
use std::slice;
use syntax::{abi, ast, ast_util};
use syntax::codemap::{Span, Pos};
+use syntax::feature_gate::emit_feature_err;
use syntax::parse::token;
use syntax::print::pprust;
}
/// What type should we use when a type is omitted?
- fn ty_infer(&self, span: Span) -> Ty<'tcx>;
+ fn ty_infer(&self,
+ param_and_substs: Option<ty::TypeParameterDef<'tcx>>,
+ substs: Option<&mut Substs<'tcx>>,
+ space: Option<ParamSpace>,
+ span: Span) -> Ty<'tcx>;
/// Projecting an associated type from a (potentially)
/// higher-ranked trait reference is more complicated, because of
item_name: ast::Name)
-> Ty<'tcx>
{
- if ty::binds_late_bound_regions(self.tcx(), &poly_trait_ref) {
+ if let Some(trait_ref) = self.tcx().no_late_bound_regions(&poly_trait_ref) {
+ self.projected_ty(span, trait_ref, item_name)
+ } else {
+ // no late-bound regions, we can just ignore the binder
span_err!(self.tcx().sess, span, E0212,
"cannot extract an associated type from a higher-ranked trait bound \
in this context");
self.tcx().types.err
- } else {
- // no late-bound regions, we can just ignore the binder
- self.projected_ty(span, poly_trait_ref.0.clone(), item_name)
}
}
r
}
+fn report_elision_failure(
+ tcx: &ty::ctxt,
+ default_span: Span,
+ params: Vec<ElisionFailureInfo>)
+{
+ let mut m = String::new();
+ let len = params.len();
+ for (i, info) in params.into_iter().enumerate() {
+ let ElisionFailureInfo {
+ name, lifetime_count: n, have_bound_regions
+ } = info;
+
+ let help_name = if name.is_empty() {
+ format!("argument {}", i + 1)
+ } else {
+ format!("`{}`", name)
+ };
+
+ m.push_str(&(if n == 1 {
+ help_name
+ } else {
+ format!("one of {}'s {} elided {}lifetimes", help_name, n,
+ if have_bound_regions { "free " } else { "" } )
+ })[..]);
+
+ if len == 2 && i == 0 {
+ m.push_str(" or ");
+ } else if i + 2 == len {
+ m.push_str(", or ");
+ } else if i + 1 != len {
+ m.push_str(", ");
+ }
+ }
+ if len == 1 {
+ fileline_help!(tcx.sess, default_span,
+ "this function's return type contains a borrowed value, but \
+ the signature does not say which {} it is borrowed from",
+ m);
+ } else if len == 0 {
+ fileline_help!(tcx.sess, default_span,
+ "this function's return type contains a borrowed value, but \
+ there is no value for it to be borrowed from");
+ fileline_help!(tcx.sess, default_span,
+ "consider giving it a 'static lifetime");
+ } else {
+ fileline_help!(tcx.sess, default_span,
+ "this function's return type contains a borrowed value, but \
+ the signature does not say whether it is borrowed from {}",
+ m);
+ }
+}
+
pub fn opt_ast_region_to_region<'tcx>(
this: &AstConv<'tcx>,
rscope: &RegionScope,
ast_region_to_region(this.tcx(), lifetime)
}
- None => {
- match rscope.anon_regions(default_span, 1) {
- Err(v) => {
- debug!("optional region in illegal location");
- span_err!(this.tcx().sess, default_span, E0106,
- "missing lifetime specifier");
- match v {
- Some(v) => {
- let mut m = String::new();
- let len = v.len();
- for (i, (name, n)) in v.into_iter().enumerate() {
- let help_name = if name.is_empty() {
- format!("argument {}", i + 1)
- } else {
- format!("`{}`", name)
- };
-
- m.push_str(&(if n == 1 {
- help_name
- } else {
- format!("one of {}'s {} elided lifetimes", help_name, n)
- })[..]);
-
- if len == 2 && i == 0 {
- m.push_str(" or ");
- } else if i + 2 == len {
- m.push_str(", or ");
- } else if i + 1 != len {
- m.push_str(", ");
- }
- }
- if len == 1 {
- fileline_help!(this.tcx().sess, default_span,
- "this function's return type contains a borrowed value, but \
- the signature does not say which {} it is borrowed from",
- m);
- } else if len == 0 {
- fileline_help!(this.tcx().sess, default_span,
- "this function's return type contains a borrowed value, but \
- there is no value for it to be borrowed from");
- fileline_help!(this.tcx().sess, default_span,
- "consider giving it a 'static lifetime");
- } else {
- fileline_help!(this.tcx().sess, default_span,
- "this function's return type contains a borrowed value, but \
- the signature does not say whether it is borrowed from {}",
- m);
- }
- }
- None => {},
- }
- ty::ReStatic
+ None => match rscope.anon_regions(default_span, 1) {
+ Ok(rs) => rs[0],
+ Err(params) => {
+ span_err!(this.tcx().sess, default_span, E0106,
+ "missing lifetime specifier");
+ if let Some(params) = params {
+ report_elision_failure(this.tcx(), default_span, params);
}
-
- Ok(rs) => rs[0],
+ ty::ReStatic
}
}
};
// they were optional (e.g. paths inside expressions).
let mut type_substs = if param_mode == PathParamMode::Optional &&
types_provided.is_empty() {
- (0..formal_ty_param_count).map(|_| this.ty_infer(span)).collect()
+ let mut substs = region_substs.clone();
+ ty_param_defs
+ .iter()
+ .map(|p| this.ty_infer(Some(p.clone()), Some(&mut substs), Some(TypeSpace), span))
+ .collect()
} else {
types_provided
};
// other type parameters may reference `Self` in their
// defaults. This will lead to an ICE if we are not
// careful!
- if self_ty.is_none() && ty::type_has_self(default) {
+ if self_ty.is_none() && default.has_self_ty() {
span_err!(tcx.sess, span, E0393,
"the type parameter `{}` must be explicitly specified \
in an object type because its default value `{}` references \
/// Returns the appropriate lifetime to use for any output lifetimes
/// (if one exists) and a vector of the (pattern, number of lifetimes)
/// corresponding to each input type/pattern.
-fn find_implied_output_region(input_tys: &[Ty], input_pats: Vec<String>)
- -> (Option<ty::Region>, Vec<(String, usize)>)
+fn find_implied_output_region<'tcx>(tcx: &ty::ctxt<'tcx>,
+ input_tys: &[Ty<'tcx>],
+ input_pats: Vec<String>) -> ElidedLifetime
{
- let mut lifetimes_for_params: Vec<(String, usize)> = Vec::new();
+ let mut lifetimes_for_params = Vec::new();
let mut possible_implied_output_region = None;
for (input_type, input_pat) in input_tys.iter().zip(input_pats) {
- let mut accumulator = Vec::new();
- ty::accumulate_lifetimes_in_type(&mut accumulator, *input_type);
+ let mut regions = FnvHashSet();
+ let have_bound_regions = ty_fold::collect_regions(tcx,
+ input_type,
+ &mut regions);
+
+ debug!("find_implied_output_regions: collected {:?} from {:?} \
+ have_bound_regions={:?}", ®ions, input_type, have_bound_regions);
- if accumulator.len() == 1 {
+ if regions.len() == 1 {
// there's a chance that the unique lifetime of this
// iteration will be the appropriate lifetime for output
// parameters, so lets store it.
- possible_implied_output_region = Some(accumulator[0])
+ possible_implied_output_region = regions.iter().cloned().next();
}
- lifetimes_for_params.push((input_pat, accumulator.len()));
+ lifetimes_for_params.push(ElisionFailureInfo {
+ name: input_pat,
+ lifetime_count: regions.len(),
+ have_bound_regions: have_bound_regions
+ });
}
- let implied_output_region =
- if lifetimes_for_params.iter().map(|&(_, n)| n).sum::<usize>() == 1 {
- assert!(possible_implied_output_region.is_some());
- possible_implied_output_region
- } else {
- None
- };
- (implied_output_region, lifetimes_for_params)
+ if lifetimes_for_params.iter().map(|e| e.lifetime_count).sum::<usize>() == 1 {
+ Ok(possible_implied_output_region.unwrap())
+ } else {
+ Err(Some(lifetimes_for_params))
+ }
}
fn convert_ty_with_lifetime_elision<'tcx>(this: &AstConv<'tcx>,
- implied_output_region: Option<ty::Region>,
- param_lifetimes: Vec<(String, usize)>,
+ elided_lifetime: ElidedLifetime,
ty: &ast::Ty)
-> Ty<'tcx>
{
- match implied_output_region {
- Some(implied_output_region) => {
+ match elided_lifetime {
+ Ok(implied_output_region) => {
let rb = ElidableRscope::new(implied_output_region);
ast_ty_to_ty(this, &rb, ty)
}
- None => {
+ Err(param_lifetimes) => {
// All regions must be explicitly specified in the output
// if the lifetime elision rules do not apply. This saves
// the user from potentially-confusing errors.
0, ®ion_substs, a_t))
.collect::<Vec<Ty<'tcx>>>();
- let input_params: Vec<_> = repeat(String::new()).take(inputs.len()).collect();
- let (implied_output_region,
- params_lifetimes) = find_implied_output_region(&*inputs, input_params);
+ let input_params = vec![String::new(); inputs.len()];
+ let implied_output_region = find_implied_output_region(this.tcx(), &inputs, input_params);
- let input_ty = ty::mk_tup(this.tcx(), inputs);
+ let input_ty = this.tcx().mk_tup(inputs);
let (output, output_span) = match data.output {
Some(ref output_ty) => {
(convert_ty_with_lifetime_elision(this,
implied_output_region,
- params_lifetimes,
- &**output_ty),
+ &output_ty),
output_ty.span)
}
None => {
- (ty::mk_nil(this.tcx()), data.span)
+ (this.tcx().mk_nil(), data.span)
}
};
// For now, require that parenthetical notation be used
// only with `Fn()` etc.
if !this.tcx().sess.features.borrow().unboxed_closures && trait_def.paren_sugar {
- span_err!(this.tcx().sess, span, E0215,
- "angle-bracket notation is not stable when \
- used with the `Fn` family of traits, use parentheses");
- fileline_help!(this.tcx().sess, span,
- "add `#![feature(unboxed_closures)]` to \
- the crate attributes to enable");
+ emit_feature_err(&this.tcx().sess.parse_sess.span_diagnostic,
+ "unboxed_closures", span,
+ "\
+ the precise format of `Fn`-family traits' type parameters is \
+ subject to change. Use parenthetical notation (Fn(Foo, Bar) -> Baz) instead");
}
convert_angle_bracketed_parameters(this, rscope, span, &trait_def.generics, data)
// For now, require that parenthetical notation be used
// only with `Fn()` etc.
if !this.tcx().sess.features.borrow().unboxed_closures && !trait_def.paren_sugar {
- span_err!(this.tcx().sess, span, E0216,
- "parenthetical notation is only stable when \
- used with the `Fn` family of traits");
- fileline_help!(this.tcx().sess, span,
- "add `#![feature(unboxed_closures)]` to \
- the crate attributes to enable");
+ emit_feature_err(&this.tcx().sess.parse_sess.span_diagnostic,
+ "unboxed_closures", span,
+ "\
+ parenthetical notation is only stable when used with `Fn`-family traits");
}
convert_parenthesized_parameters(this, rscope, span, &trait_def.generics, data)
// this, we currently insert a dummy type and then remove it
// later. Yuck.
- let dummy_self_ty = ty::mk_infer(tcx, ty::FreshTy(0));
+ let dummy_self_ty = tcx.mk_infer(ty::FreshTy(0));
if self_ty.is_none() { // if converting for an object type
let mut dummy_substs = trait_ref.skip_binder().substs.clone(); // binder moved here -+
assert!(dummy_substs.self_ty().is_none()); // |
let candidate = try!(one_bound_for_assoc_type(tcx,
candidates,
&trait_ref.to_string(),
- &token::get_name(binding.item_name),
+ &binding.item_name.as_str(),
binding.span));
Ok(ty::Binder(ty::ProjectionPredicate { // <-------------------------+
// FIXME(#12938): This is a hack until we have full support for DST.
if Some(did) == this.tcx().lang_items.owned_box() {
assert_eq!(substs.types.len(TypeSpace), 1);
- return ty::mk_uniq(this.tcx(), *substs.types.get(TypeSpace, 0));
+ return this.tcx().mk_box(*substs.types.get(TypeSpace, 0));
}
decl_ty.subst(this.tcx(), &substs)
let mut associated_types: FnvHashSet<(ast::DefId, ast::Name)> =
traits::supertraits(tcx, object_trait_ref)
.flat_map(|tr| {
- let trait_def = ty::lookup_trait_def(tcx, tr.def_id());
+ let trait_def = tcx.lookup_trait_def(tr.def_id());
trait_def.associated_type_names
.clone()
.into_iter()
span_err!(tcx.sess, span, E0191,
"the value of the associated type `{}` (from the trait `{}`) must be specified",
name,
- ty::item_path_str(tcx, trait_def_id));
+ tcx.item_path_str(trait_def_id));
}
- ty::mk_trait(tcx, object.principal, object.bounds)
+ tcx.mk_trait(object.principal, object.bounds)
}
fn report_ambiguous_associated_type(tcx: &ty::ctxt,
// any ambiguity.
fn find_bound_for_assoc_item<'tcx>(this: &AstConv<'tcx>,
ty_param_node_id: ast::NodeId,
+ ty_param_name: ast::Name,
assoc_name: ast::Name,
span: Span)
-> Result<ty::PolyTraitRef<'tcx>, ErrorReported>
.filter(|b| this.trait_defines_associated_type_named(b.def_id(), assoc_name))
.collect();
- let ty_param_name = tcx.type_parameter_def(ty_param_node_id).name;
one_bound_for_assoc_type(tcx,
suitable_bounds,
- &token::get_name(ty_param_name),
- &token::get_name(assoc_name),
+ &ty_param_name.as_str(),
+ &assoc_name.as_str(),
span)
}
(_, def::DefSelfTy(Some(trait_did), Some((impl_id, _)))) => {
// `Self` in an impl of a trait - we have a concrete self type and a
// trait reference.
- match tcx.map.expect_item(impl_id).node {
- ast::ItemImpl(_, _, _, Some(ref trait_ref), _, _) => {
- if this.ensure_super_predicates(span, trait_did).is_err() {
- return (tcx.types.err, ty_path_def);
- }
+ let trait_ref = tcx.impl_trait_ref(ast_util::local_def(impl_id)).unwrap();
+ let trait_ref = if let Some(free_substs) = this.get_free_substs() {
+ trait_ref.subst(tcx, free_substs)
+ } else {
+ trait_ref
+ };
- let trait_segment = &trait_ref.path.segments.last().unwrap();
- let trait_ref = ast_path_to_mono_trait_ref(this,
- &ExplicitRscope,
- span,
- PathParamMode::Explicit,
- trait_did,
- Some(ty),
- trait_segment);
-
- let candidates: Vec<ty::PolyTraitRef> =
- traits::supertraits(tcx, ty::Binder(trait_ref.clone()))
- .filter(|r| this.trait_defines_associated_type_named(r.def_id(),
- assoc_name))
- .collect();
-
- match one_bound_for_assoc_type(tcx,
- candidates,
- "Self",
- &token::get_name(assoc_name),
- span) {
- Ok(bound) => bound,
- Err(ErrorReported) => return (tcx.types.err, ty_path_def),
- }
- }
- _ => unreachable!()
+ if this.ensure_super_predicates(span, trait_did).is_err() {
+ return (tcx.types.err, ty_path_def);
+ }
+
+ let candidates: Vec<ty::PolyTraitRef> =
+ traits::supertraits(tcx, ty::Binder(trait_ref))
+ .filter(|r| this.trait_defines_associated_type_named(r.def_id(),
+ assoc_name))
+ .collect();
+
+ match one_bound_for_assoc_type(tcx,
+ candidates,
+ "Self",
+ &assoc_name.as_str(),
+ span) {
+ Ok(bound) => bound,
+ Err(ErrorReported) => return (tcx.types.err, ty_path_def),
}
}
- (&ty::TyParam(_), def::DefTyParam(..)) |
- (&ty::TyParam(_), def::DefSelfTy(Some(_), None)) => {
- // A type parameter or Self, we need to find the associated item from
- // a bound.
- let ty_param_node_id = ty_path_def.local_node_id();
- match find_bound_for_assoc_item(this, ty_param_node_id, assoc_name, span) {
+ (&ty::TyParam(_), def::DefSelfTy(Some(trait_did), None)) => {
+ assert_eq!(trait_did.krate, ast::LOCAL_CRATE);
+ match find_bound_for_assoc_item(this,
+ trait_did.node,
+ token::special_idents::type_self.name,
+ assoc_name,
+ span) {
+ Ok(bound) => bound,
+ Err(ErrorReported) => return (tcx.types.err, ty_path_def),
+ }
+ }
+ (&ty::TyParam(_), def::DefTyParam(_, _, param_did, param_name)) => {
+ assert_eq!(param_did.krate, ast::LOCAL_CRATE);
+ match find_bound_for_assoc_item(this,
+ param_did.node,
+ param_name,
+ assoc_name,
+ span) {
Ok(bound) => bound,
Err(ErrorReported) => return (tcx.types.err, ty_path_def),
}
span,
&ty.to_string(),
"Trait",
- &token::get_name(assoc_name));
+ &assoc_name.as_str());
return (tcx.types.err, ty_path_def);
}
};
_ => unreachable!()
}
} else {
- let trait_items = ty::trait_items(tcx, trait_did);
+ let trait_items = tcx.trait_items(trait_did);
let item = trait_items.iter().find(|i| i.name() == assoc_name);
item.expect("missing associated type").def_id()
};
let self_ty = if let Some(ty) = opt_self_ty {
ty
} else {
- let path_str = ty::item_path_str(tcx, trait_def_id);
+ let path_str = tcx.item_path_str(trait_def_id);
report_ambiguous_associated_type(tcx,
span,
"Type",
&path_str,
- &token::get_ident(item_segment.identifier));
+ &item_segment.identifier.name.as_str());
return tcx.types.err;
};
base_segments.last().unwrap(),
&mut projection_bounds);
- check_path_args(tcx, base_segments.init(), NO_TPS | NO_REGIONS);
+ check_path_args(tcx, base_segments.split_last().unwrap().1, NO_TPS | NO_REGIONS);
trait_ref_to_object_type(this,
rscope,
span,
&[])
}
def::DefTy(did, _) | def::DefStruct(did) => {
- check_path_args(tcx, base_segments.init(), NO_TPS | NO_REGIONS);
+ check_path_args(tcx, base_segments.split_last().unwrap().1, NO_TPS | NO_REGIONS);
ast_path_to_ty(this,
rscope,
span,
}
def::DefTyParam(space, index, _, name) => {
check_path_args(tcx, base_segments, NO_TPS | NO_REGIONS);
- ty::mk_param(tcx, space, index, name)
+ tcx.mk_param(space, index, name)
}
def::DefSelfTy(_, Some((_, self_ty_id))) => {
// Self in impl (we know the concrete type).
def::DefSelfTy(Some(_), None) => {
// Self in trait.
check_path_args(tcx, base_segments, NO_TPS | NO_REGIONS);
- ty::mk_self_type(tcx)
+ tcx.mk_self_type()
}
def::DefAssociatedTy(trait_did, _) => {
check_path_args(tcx, &base_segments[..base_segments.len()-2], NO_TPS | NO_REGIONS);
prim_ty_to_ty(tcx, base_segments, prim_ty)
}
_ => {
+ let node = def.def_id().node;
span_err!(tcx.sess, span, E0248,
- "found value name used as a type: {:?}", *def);
+ "found value `{}` used as a type",
+ tcx.map.path_to_string(node));
return this.tcx().types.err;
}
}
let typ = match ast_ty.node {
ast::TyVec(ref ty) => {
- ty::mk_vec(tcx, ast_ty_to_ty(this, rscope, &**ty), None)
+ tcx.mk_slice(ast_ty_to_ty(this, rscope, &**ty))
}
ast::TyObjectSum(ref ty, ref bounds) => {
match ast_ty_to_trait_ref(this, rscope, &**ty, bounds) {
}
}
ast::TyPtr(ref mt) => {
- ty::mk_ptr(tcx, ty::mt {
+ tcx.mk_ptr(ty::TypeAndMut {
ty: ast_ty_to_ty(this, rscope, &*mt.ty),
mutbl: mt.mutbl
})
rscope,
ty::ObjectLifetimeDefault::Specific(r));
let t = ast_ty_to_ty(this, rscope1, &*mt.ty);
- ty::mk_rptr(tcx, tcx.mk_region(r), ty::mt {ty: t, mutbl: mt.mutbl})
+ tcx.mk_ref(tcx.mk_region(r), ty::TypeAndMut {ty: t, mutbl: mt.mutbl})
}
ast::TyTup(ref fields) => {
let flds = fields.iter()
.map(|t| ast_ty_to_ty(this, rscope, &**t))
.collect();
- ty::mk_tup(tcx, flds)
+ tcx.mk_tup(flds)
}
ast::TyParen(ref typ) => ast_ty_to_ty(this, rscope, &**typ),
ast::TyBareFn(ref bf) => {
- if bf.decl.variadic && bf.abi != abi::C {
- span_err!(tcx.sess, ast_ty.span, E0045,
- "variadic function must have C calling convention");
- }
+ require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span);
let bare_fn = ty_of_bare_fn(this, bf.unsafety, bf.abi, &*bf.decl);
- ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(bare_fn))
+ tcx.mk_fn(None, tcx.mk_bare_fn(bare_fn))
}
ast::TyPolyTraitRef(ref bounds) => {
conv_ty_poly_trait_ref(this, rscope, ast_ty.span, bounds)
ty
}
ast::TyFixedLengthVec(ref ty, ref e) => {
- match const_eval::eval_const_expr_partial(tcx, &**e, Some(tcx.types.usize)) {
+ let hint = UncheckedExprHint(tcx.types.usize);
+ match const_eval::eval_const_expr_partial(tcx, &e, hint) {
Ok(r) => {
match r {
ConstVal::Int(i) =>
- ty::mk_vec(tcx, ast_ty_to_ty(this, rscope, &**ty),
- Some(i as usize)),
+ tcx.mk_array(ast_ty_to_ty(this, rscope, &**ty),
+ i as usize),
ConstVal::Uint(i) =>
- ty::mk_vec(tcx, ast_ty_to_ty(this, rscope, &**ty),
- Some(i as usize)),
+ tcx.mk_array(ast_ty_to_ty(this, rscope, &**ty),
+ i as usize),
_ => {
span_err!(tcx.sess, ast_ty.span, E0249,
"expected constant integer expression \
// values in a ExprClosure, or as
// the type of local variables. Both of these cases are
// handled specially and will not descend into this routine.
- this.ty_infer(ast_ty.span)
+ this.ty_infer(None, None, None, ast_ty.span)
}
};
{
match a.ty.node {
ast::TyInfer if expected_ty.is_some() => expected_ty.unwrap(),
- ast::TyInfer => this.ty_infer(a.ty.span),
+ ast::TyInfer => this.ty_infer(None, None, None, a.ty.span),
_ => ast_ty_to_ty(this, rscope, &*a.ty),
}
}
// here), if self is by-reference, then the implied output region is the
// region of the self parameter.
let mut explicit_self_category_result = None;
- let (self_ty, mut implied_output_region) = match opt_self_info {
+ let (self_ty, implied_output_region) = match opt_self_info {
None => (None, None),
Some(self_info) => {
// This type comes from an impl or trait; no late-bound
(Some(self_info.untransformed_self_ty), None)
}
ty::ByReferenceExplicitSelfCategory(region, mutability) => {
- (Some(ty::mk_rptr(this.tcx(),
+ (Some(this.tcx().mk_ref(
this.tcx().mk_region(region),
- ty::mt {
+ ty::TypeAndMut {
ty: self_info.untransformed_self_ty,
mutbl: mutability
})),
Some(region))
}
ty::ByBoxExplicitSelfCategory => {
- (Some(ty::mk_uniq(this.tcx(), self_info.untransformed_self_ty)), None)
+ (Some(this.tcx().mk_box(self_info.untransformed_self_ty)), None)
}
}
}
// Second, if there was exactly one lifetime (either a substitution or a
// reference) in the arguments, then any anonymous regions in the output
// have that lifetime.
- let lifetimes_for_params = if implied_output_region.is_none() {
- let input_tys = if self_ty.is_some() {
- // Skip the first argument if `self` is present.
- &self_and_input_tys[1..]
- } else {
- &self_and_input_tys[..]
- };
+ let implied_output_region = match implied_output_region {
+ Some(r) => Ok(r),
+ None => {
+ let input_tys = if self_ty.is_some() {
+ // Skip the first argument if `self` is present.
+ &self_and_input_tys[1..]
+ } else {
+ &self_and_input_tys[..]
+ };
- let (ior, lfp) = find_implied_output_region(input_tys, input_pats);
- implied_output_region = ior;
- lfp
- } else {
- vec![]
+ find_implied_output_region(this.tcx(), input_tys, input_pats)
+ }
};
let output_ty = match decl.output {
ast::Return(ref output) if output.node == ast::TyInfer =>
- ty::FnConverging(this.ty_infer(output.span)),
+ ty::FnConverging(this.ty_infer(None, None, None, output.span)),
ast::Return(ref output) =>
ty::FnConverging(convert_ty_with_lifetime_elision(this,
implied_output_region,
- lifetimes_for_params,
- &**output)),
- ast::DefaultReturn(..) => ty::FnConverging(ty::mk_nil(this.tcx())),
+ &output)),
+ ast::DefaultReturn(..) => ty::FnConverging(this.tcx().mk_nil()),
ast::NoReturn(..) => ty::FnDiverging
};
_ if is_infer && expected_ret_ty.is_some() =>
expected_ret_ty.unwrap(),
_ if is_infer =>
- ty::FnConverging(this.ty_infer(decl.output.span())),
+ ty::FnConverging(this.ty_infer(None, None, None, decl.output.span())),
ast::Return(ref output) =>
ty::FnConverging(ast_ty_to_ty(this, &rb, &**output)),
ast::DefaultReturn(..) => unreachable!(),
principal_trait_ref,
builtin_bounds);
- let (region_bound, will_change) = match region_bound {
- Some(r) => (r, false),
+ let region_bound = match region_bound {
+ Some(r) => r,
None => {
match rscope.object_lifetime_default(span) {
- Some(r) => (r, rscope.object_lifetime_default_will_change_in_1_3()),
+ Some(r) => r,
None => {
span_err!(this.tcx().sess, span, E0228,
"the lifetime bound for this object type cannot be deduced \
from context; please supply an explicit bound");
- (ty::ReStatic, false)
+ ty::ReStatic
}
}
}
};
- debug!("region_bound: {:?} will_change: {:?}",
- region_bound, will_change);
+ debug!("region_bound: {:?}", region_bound);
ty::sort_bounds_list(&mut projection_bounds);
region_bound: region_bound,
builtin_bounds: builtin_bounds,
projection_bounds: projection_bounds,
- region_bound_will_change: will_change,
}
}
ast::TraitTyParamBound(ref b, ast::TraitBoundModifier::None) => {
match ::lookup_full_def(tcx, b.trait_ref.path.span, b.trait_ref.ref_id) {
def::DefTrait(trait_did) => {
- if ty::try_add_builtin_trait(tcx,
- trait_did,
+ if tcx.try_add_builtin_trait(trait_did,
&mut builtin_bounds) {
let segments = &b.trait_ref.path.segments;
let parameters = &segments[segments.len() - 1].parameters;
for builtin_bound in &self.builtin_bounds {
match traits::trait_ref_for_builtin_bound(tcx, builtin_bound, param_ty) {
- Ok(trait_ref) => { vec.push(trait_ref.as_predicate()); }
+ Ok(trait_ref) => { vec.push(trait_ref.to_predicate()); }
Err(ErrorReported) => { }
}
}
// account for the binder being introduced below; no need to shift `param_ty`
// because, at present at least, it can only refer to early-bound regions
let region_bound = ty_fold::shift_region(region_bound, 1);
- vec.push(ty::Binder(ty::OutlivesPredicate(param_ty, region_bound)).as_predicate());
+ vec.push(ty::Binder(ty::OutlivesPredicate(param_ty, region_bound)).to_predicate());
}
for bound_trait_ref in &self.trait_bounds {
- vec.push(bound_trait_ref.as_predicate());
+ vec.push(bound_trait_ref.to_predicate());
}
for projection in &self.projection_bounds {
- vec.push(projection.as_predicate());
+ vec.push(projection.to_predicate());
}
vec
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use middle::const_eval;
use middle::def;
use middle::infer;
use middle::pat_util::{PatIdMap, pat_id_map, pat_is_binding};
use middle::pat_util::pat_is_resolved_const;
use middle::privacy::{AllPublic, LastMod};
use middle::subst::Substs;
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, HasTypeFlags};
use check::{check_expr, check_expr_has_type, check_expr_with_expectation};
use check::{check_expr_coercable_to_type, demand, FnCtxt, Expectation};
use check::{check_expr_with_lvalue_pref, LvaluePreference};
use require_same_types;
use util::nodemap::FnvHashMap;
-use std::cmp::{self, Ordering};
+use std::cmp;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use syntax::ast;
use syntax::ast_util;
use syntax::codemap::{Span, Spanned};
-use syntax::parse::token;
use syntax::print::pprust;
use syntax::ptr::P;
let expected_ty = structurally_resolved_type(fcx, pat.span, expected);
if let ty::TyRef(_, mt) = expected_ty.sty {
if let ty::TySlice(_) = mt.ty.sty {
- pat_ty = ty::mk_slice(tcx, tcx.mk_region(ty::ReStatic),
- ty::mt{ ty: tcx.types.u8, mutbl: ast::MutImmutable })
+ pat_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
+ tcx.mk_slice(tcx.types.u8))
}
}
}
let rhs_ty = fcx.expr_ty(end);
// Check that both end-points are of numeric or char type.
- let numeric_or_char = |t| ty::type_is_numeric(t) || ty::type_is_char(t);
+ let numeric_or_char = |ty: Ty| ty.is_numeric() || ty.is_char();
let lhs_compat = numeric_or_char(lhs_ty);
let rhs_compat = numeric_or_char(rhs_ty);
fcx.write_ty(pat.id, common_type);
- // Finally we evaluate the constants and check that the range is non-empty.
- let get_substs = |id| fcx.item_substs()[&id].substs.clone();
- match const_eval::compare_lit_exprs(tcx, begin, end, Some(&common_type), get_substs) {
- Some(Ordering::Less) |
- Some(Ordering::Equal) => {}
- Some(Ordering::Greater) => {
- span_err!(tcx.sess, begin.span, E0030,
- "lower range bound must be less than or equal to upper");
- }
- None => tcx.sess.span_bug(begin.span, "literals of different types in range pat")
- }
-
// subtyping doesn't matter here, as the value is some kind of scalar
demand::eqtype(fcx, pat.span, expected, lhs_ty);
}
ast::PatEnum(..) | ast::PatIdent(..) if pat_is_resolved_const(&tcx.def_map, pat) => {
let const_did = tcx.def_map.borrow().get(&pat.id).unwrap().def_id();
- let const_scheme = ty::lookup_item_type(tcx, const_did);
+ let const_scheme = tcx.lookup_item_type(const_did);
assert!(const_scheme.generics.is_empty());
let const_ty = pcx.fcx.instantiate_type_scheme(pat.span,
&Substs::empty(),
// then `x` is assigned a value of type `&M T` where M is the mutability
// and T is the expected type.
let region_var = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- let mt = ty::mt { ty: expected, mutbl: mutbl };
- let region_ty = ty::mk_rptr(tcx, tcx.mk_region(region_var), mt);
+ let mt = ty::TypeAndMut { ty: expected, mutbl: mutbl };
+ let region_ty = tcx.mk_ref(tcx.mk_region(region_var), mt);
// `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is
// required. However, we use equality, which is stronger. See (*) for
resolve_ty_and_def_ufcs(fcx, path_res, Some(self_ty),
path, pat.span, pat.id) {
if check_assoc_item_is_const(pcx, def, pat.span) {
- let scheme = ty::lookup_item_type(tcx, def.def_id());
- let predicates = ty::lookup_predicates(tcx, def.def_id());
+ let scheme = tcx.lookup_item_type(def.def_id());
+ let predicates = tcx.lookup_predicates(def.def_id());
instantiate_path(fcx, segments,
scheme, &predicates,
opt_ty, def, pat.span, pat.id);
let element_tys: Vec<_> =
(0..elements.len()).map(|_| fcx.infcx().next_ty_var())
.collect();
- let pat_ty = ty::mk_tup(tcx, element_tys.clone());
+ let pat_ty = tcx.mk_tup(element_tys.clone());
fcx.write_ty(pat.id, pat_ty);
demand::eqtype(fcx, pat.span, expected, pat_ty);
for (element_pat, element_ty) in elements.iter().zip(element_tys) {
}
ast::PatBox(ref inner) => {
let inner_ty = fcx.infcx().next_ty_var();
- let uniq_ty = ty::mk_uniq(tcx, inner_ty);
+ let uniq_ty = tcx.mk_box(inner_ty);
if check_dereferencable(pcx, pat.span, expected, &**inner) {
// Here, `demand::subtype` is good enough, but I don't
ast::PatRegion(ref inner, mutbl) => {
let inner_ty = fcx.infcx().next_ty_var();
- let mt = ty::mt { ty: inner_ty, mutbl: mutbl };
+ let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl };
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- let rptr_ty = ty::mk_rptr(tcx, tcx.mk_region(region), mt);
+ let rptr_ty = tcx.mk_ref(tcx.mk_region(region), mt);
if check_dereferencable(pcx, pat.span, expected, &**inner) {
// `demand::subtype` would be good enough, but using
let expected_ty = structurally_resolved_type(fcx, pat.span, expected);
let inner_ty = fcx.infcx().next_ty_var();
let pat_ty = match expected_ty.sty {
- ty::TyArray(_, size) => ty::mk_vec(tcx, inner_ty, Some({
+ ty::TyArray(_, size) => tcx.mk_array(inner_ty, {
let min_len = before.len() + after.len();
match *slice {
Some(_) => cmp::max(min_len, size),
None => min_len
}
- })),
+ }),
_ => {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- ty::mk_slice(tcx, tcx.mk_region(region), ty::mt {
- ty: inner_ty,
- mutbl: ty::deref(expected_ty, true).map(|mt| mt.mutbl)
- .unwrap_or(ast::MutImmutable)
+ tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
+ ty: tcx.mk_slice(inner_ty),
+ mutbl: expected_ty.builtin_deref(true).map(|mt| mt.mutbl)
+ .unwrap_or(ast::MutImmutable)
})
}
};
}
if let Some(ref slice) = *slice {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- let mutbl = ty::deref(expected_ty, true)
+ let mutbl = expected_ty.builtin_deref(true)
.map_or(ast::MutImmutable, |mt| mt.mutbl);
- let slice_ty = ty::mk_slice(tcx, tcx.mk_region(region), ty::mt {
- ty: inner_ty,
+ let slice_ty = tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
+ ty: tcx.mk_slice(inner_ty),
mutbl: mutbl
});
check_pat(pcx, &**slice, slice_ty);
let tcx = pcx.fcx.ccx.tcx;
if pat_is_binding(&tcx.def_map, inner) {
let expected = fcx.infcx().shallow_resolve(expected);
- ty::deref(expected, true).map_or(true, |mt| match mt.ty.sty {
+ expected.builtin_deref(true).map_or(true, |mt| match mt.ty.sty {
ty::TyTrait(_) => {
// This is "x = SomeTrait" being reduced from
// "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
// us to give better error messages (pointing to a usually better
// arm for inconsistent arms or to the whole match when a `()` type
// is required).
- Expectation::ExpectHasType(ety) if ety != ty::mk_nil(fcx.tcx()) => {
+ Expectation::ExpectHasType(ety) if ety != fcx.tcx().mk_nil() => {
check_expr_coercable_to_type(fcx, &*arm.body, ety);
ety
}
check_expr_has_type(fcx, &**e, tcx.types.bool);
}
- if ty::type_is_error(result_ty) || ty::type_is_error(bty) {
+ if result_ty.references_error() || bty.references_error() {
tcx.types.err
} else {
let (origin, expected, found) = match match_src {
return;
},
_ => {
- let def_type = ty::lookup_item_type(tcx, def.def_id());
+ let def_type = tcx.lookup_item_type(def.def_id());
match def_type.ty.sty {
ty::TyStruct(struct_def_id, _) =>
(struct_def_id, struct_def_id),
instantiate_path(pcx.fcx,
&path.segments,
- ty::lookup_item_type(tcx, enum_def_id),
- &ty::lookup_predicates(tcx, enum_def_id),
+ tcx.lookup_item_type(enum_def_id),
+ &tcx.lookup_predicates(enum_def_id),
None,
def,
pat.span,
.map(|substs| substs.substs.clone())
.unwrap_or_else(|| Substs::empty());
- let struct_fields = ty::struct_fields(tcx, variant_def_id, &item_substs);
+ let struct_fields = tcx.struct_fields(variant_def_id, &item_substs);
check_struct_pat_fields(pcx, pat.span, fields, &struct_fields,
variant_def_id, etc);
}
let enum_def = def.variant_def_ids()
.map_or_else(|| def.def_id(), |(enum_def, _)| enum_def);
- let ctor_scheme = ty::lookup_item_type(tcx, enum_def);
- let ctor_predicates = ty::lookup_predicates(tcx, enum_def);
- let path_scheme = if ty::is_fn_ty(ctor_scheme.ty) {
- let fn_ret = ty::no_late_bound_regions(tcx, &ty::ty_fn_ret(ctor_scheme.ty)).unwrap();
+ let ctor_scheme = tcx.lookup_item_type(enum_def);
+ let ctor_predicates = tcx.lookup_predicates(enum_def);
+ let path_scheme = if ctor_scheme.ty.is_fn() {
+ let fn_ret = tcx.no_late_bound_regions(&ctor_scheme.ty.fn_ret()).unwrap();
ty::TypeScheme {
ty: fn_ret.unwrap(),
generics: ctor_scheme.generics,
ty::TyEnum(enum_def_id, expected_substs)
if def == def::DefVariant(enum_def_id, def.def_id(), false) =>
{
- let variant = ty::enum_variant_with_id(tcx, enum_def_id, def.def_id());
+ let variant = tcx.enum_variant_with_id(enum_def_id, def.def_id());
(variant.args.iter()
.map(|t| fcx.instantiate_type_scheme(pat.span, expected_substs, t))
.collect(),
"variant")
}
ty::TyStruct(struct_def_id, expected_substs) => {
- let struct_fields = ty::struct_fields(tcx, struct_def_id, expected_substs);
+ let struct_fields = tcx.struct_fields(struct_def_id, expected_substs);
(struct_fields.iter()
.map(|field| fcx.instantiate_type_scheme(pat.span,
expected_substs,
pub fn check_struct_pat_fields<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
span: Span,
fields: &'tcx [Spanned<ast::FieldPat>],
- struct_fields: &[ty::field<'tcx>],
+ struct_fields: &[ty::Field<'tcx>],
struct_id: ast::DefId,
etc: bool) {
let tcx = pcx.fcx.ccx.tcx;
Occupied(occupied) => {
span_err!(tcx.sess, span, E0025,
"field `{}` bound multiple times in the pattern",
- token::get_ident(field.ident));
+ field.ident);
span_note!(tcx.sess, *occupied.get(),
"field `{}` previously bound here",
- token::get_ident(field.ident));
+ field.ident);
tcx.types.err
}
Vacant(vacant) => {
.unwrap_or_else(|| {
span_err!(tcx.sess, span, E0026,
"struct `{}` does not have a field named `{}`",
- ty::item_path_str(tcx, struct_id),
- token::get_ident(field.ident));
+ tcx.item_path_str(struct_id),
+ field.ident);
tcx.types.err
})
}
.filter(|field| !used_fields.contains_key(&field.name)) {
span_err!(tcx.sess, span, E0027,
"pattern does not mention field `{}`",
- token::get_name(field.name));
+ field.name);
}
}
}
use middle::infer::InferCtxt;
use middle::traits::{self, FulfillmentContext, Normalized, MiscObligation,
SelectionContext, ObligationCause};
-use middle::ty::{self, HasProjectionTypes};
+use middle::ty::HasTypeFlags;
use middle::ty_fold::TypeFoldable;
use syntax::ast;
use syntax::codemap::Span;
+//FIXME(@jroesch): Ideally we should be able to drop the fulfillment_cx argument.
pub fn normalize_associated_types_in<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
- typer: &(ty::ClosureTyper<'tcx>+'a),
fulfillment_cx: &mut FulfillmentContext<'tcx>,
span: Span,
body_id: ast::NodeId,
value: &T)
-> T
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
debug!("normalize_associated_types_in(value={:?})", value);
- let mut selcx = SelectionContext::new(infcx, typer);
+ let mut selcx = SelectionContext::new(infcx);
let cause = ObligationCause::new(span, body_id, MiscObligation);
let Normalized { value: result, obligations } = traits::normalize(&mut selcx, cause, value);
debug!("normalize_associated_types_in: result={:?} predicates={:?}",
use CrateCtxt;
use middle::infer;
-use middle::ty::{self, Ty, ClosureTyper};
+use middle::ty::{self, Ty};
use syntax::ast;
use syntax::codemap::Span;
use syntax::parse::token;
return Some(CallStep::Builtin);
}
- ty::TyClosure(def_id, substs) => {
+ ty::TyClosure(def_id, ref substs) => {
assert_eq!(def_id.krate, ast::LOCAL_CRATE);
// Check whether this is a call to a closure where we
// haven't yet decided on whether the closure is fn vs
// fnmut vs fnonce. If so, we have to defer further processing.
- if fcx.closure_kind(def_id).is_none() {
+ if fcx.infcx().closure_kind(def_id).is_none() {
let closure_ty =
- fcx.closure_type(def_id, substs);
+ fcx.infcx().closure_type(def_id, substs);
let fn_sig =
fcx.infcx().replace_late_bound_regions_with_fresh_var(call_expr.span,
infer::FnCall,
call_expr: &ast::Expr,
method_callee: ty::MethodCallee<'tcx>) {
let method_call = ty::MethodCall::expr(call_expr.id);
- fcx.inh.method_map.borrow_mut().insert(method_call, method_callee);
+ fcx.inh.tables.borrow_mut().method_map.insert(method_call, method_callee);
}
#[derive(Debug)]
// we should not be invoked until the closure kind has been
// determined by upvar inference
- assert!(fcx.closure_kind(self.closure_def_id).is_some());
+ assert!(fcx.infcx().closure_kind(self.closure_def_id).is_some());
// We may now know enough to figure out fn vs fnmut etc.
match try_overloaded_call_traits(fcx, self.call_expr, self.callee_expr,
// can't because of the annoying need for a TypeTrace.
// (This always bites me, should find a way to
// refactor it.)
- let method_sig =
- ty::no_late_bound_regions(fcx.tcx(),
- ty::ty_fn_sig(method_callee.ty)).unwrap();
+ let method_sig = fcx.tcx().no_late_bound_regions(method_callee.ty.fn_sig())
+ .unwrap();
debug!("attempt_resolution: method_callee={:?}",
method_callee);
demand::eqtype(fcx, self.call_expr.span, self_arg_ty, method_arg_ty);
}
- let nilty = ty::mk_nil(fcx.tcx());
+ let nilty = fcx.tcx().mk_nil();
demand::eqtype(fcx,
self.call_expr.span,
method_sig.output.unwrap_or(nilty),
use lint;
use middle::cast::{CastKind, CastTy};
-use middle::ty;
-use middle::ty::Ty;
+use middle::ty::{self, Ty, HasTypeFlags};
use syntax::ast;
use syntax::ast::UintTy::{TyU8};
use syntax::codemap::Span;
ty::TySlice(_) | ty::TyStr => Some(UnsizeKind::Length),
ty::TyTrait(ref tty) => Some(UnsizeKind::Vtable(tty.principal_def_id())),
ty::TyStruct(did, substs) => {
- match ty::struct_fields(fcx.tcx(), did, substs).pop() {
+ match fcx.tcx().struct_fields(did, substs).pop() {
None => None,
Some(f) => unsize_kind(fcx, f.mt.ty)
}
CastError::NeedViaInt |
CastError::NeedViaUsize => {
fcx.type_error_message(self.span, |actual| {
- format!("illegal cast; cast through {} first: `{}` as `{}`",
- match e {
- CastError::NeedViaPtr => "a raw pointer",
- CastError::NeedViaInt => "an integer",
- CastError::NeedViaUsize => "a usize",
- _ => unreachable!()
- },
+ format!("casting `{}` as `{}` is invalid",
actual,
fcx.infcx().ty_to_string(self.cast_ty))
- }, self.expr_ty, None)
+ }, self.expr_ty, None);
+ fcx.ccx.tcx.sess.fileline_help(self.span,
+ &format!("cast through {} first", match e {
+ CastError::NeedViaPtr => "a raw pointer",
+ CastError::NeedViaInt => "an integer",
+ CastError::NeedViaUsize => "a usize",
+ _ => unreachable!()
+ }));
}
CastError::CastToBool => {
- span_err!(fcx.tcx().sess, self.span, E0054,
- "cannot cast as `bool`, compare with zero instead");
+ span_err!(fcx.tcx().sess, self.span, E0054, "cannot cast as `bool`");
+ fcx.ccx.tcx.sess.fileline_help(self.span, "compare with zero instead");
}
CastError::CastToChar => {
fcx.type_error_message(self.span, |actual| {
}
CastError::IllegalCast => {
fcx.type_error_message(self.span, |actual| {
- format!("illegal cast: `{}` as `{}`",
+ format!("casting `{}` as `{}` is invalid",
actual,
fcx.infcx().ty_to_string(self.cast_ty))
}, self.expr_ty, None);
}
CastError::DifferingKinds => {
fcx.type_error_message(self.span, |actual| {
- format!("illegal cast: `{}` as `{}`; vtable kinds may not match",
+ format!("casting `{}` as `{}` is invalid",
actual,
fcx.infcx().ty_to_string(self.cast_ty))
}, self.expr_ty, None);
+ fcx.ccx.tcx.sess.fileline_note(self.span, "vtable kinds may not match");
}
}
}
fn trivial_cast_lint<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) {
let t_cast = self.cast_ty;
let t_expr = self.expr_ty;
- if ty::type_is_numeric(t_cast) && ty::type_is_numeric(t_expr) {
+ if t_cast.is_numeric() && t_expr.is_numeric() {
fcx.tcx().sess.add_lint(lint::builtin::TRIVIAL_NUMERIC_CASTS,
self.expr.id,
self.span,
debug!("check_cast({}, {:?} as {:?})", self.expr.id, self.expr_ty,
self.cast_ty);
- if ty::type_is_error(self.expr_ty) || ty::type_is_error(self.cast_ty) {
+ if self.expr_ty.references_error() || self.cast_ty.references_error() {
// No sense in giving duplicate error messages
} else if self.try_coercion_cast(fcx) {
self.trivial_cast_lint(fcx);
fn check_ptr_ptr_cast<'a>(&self,
fcx: &FnCtxt<'a, 'tcx>,
- m_expr: &'tcx ty::mt<'tcx>,
- m_cast: &'tcx ty::mt<'tcx>)
+ m_expr: &'tcx ty::TypeAndMut<'tcx>,
+ m_cast: &'tcx ty::TypeAndMut<'tcx>)
-> Result<CastKind, CastError>
{
debug!("check_ptr_ptr_cast m_expr={:?} m_cast={:?}",
return Ok(CastKind::PtrPtrCast);
}
- // sized -> unsized? report illegal cast (don't complain about vtable kinds)
+ // sized -> unsized? report invalid cast (don't complain about vtable kinds)
if fcx.type_is_known_to_be_sized(m_expr.ty, self.span) {
return Err(CastError::IllegalCast);
}
fn check_fptr_ptr_cast<'a>(&self,
fcx: &FnCtxt<'a, 'tcx>,
- m_cast: &'tcx ty::mt<'tcx>)
+ m_cast: &'tcx ty::TypeAndMut<'tcx>)
-> Result<CastKind, CastError>
{
// fptr-ptr cast. must be to sized ptr
fn check_ptr_addr_cast<'a>(&self,
fcx: &FnCtxt<'a, 'tcx>,
- m_expr: &'tcx ty::mt<'tcx>)
+ m_expr: &'tcx ty::TypeAndMut<'tcx>)
-> Result<CastKind, CastError>
{
// ptr-addr cast. must be from sized ptr
fn check_ref_cast<'a>(&self,
fcx: &FnCtxt<'a, 'tcx>,
- m_expr: &'tcx ty::mt<'tcx>,
- m_cast: &'tcx ty::mt<'tcx>)
+ m_expr: &'tcx ty::TypeAndMut<'tcx>,
+ m_cast: &'tcx ty::TypeAndMut<'tcx>)
-> Result<CastKind, CastError>
{
// array-ptr-cast.
fn check_addr_ptr_cast<'a>(&self,
fcx: &FnCtxt<'a, 'tcx>,
- m_cast: &'tcx ty::mt<'tcx>)
+ m_cast: &'tcx ty::TypeAndMut<'tcx>)
-> Result<CastKind, CastError>
{
// ptr-addr cast. pointer must be thin.
opt_kind,
expected_sig);
- let mut fn_ty = astconv::ty_of_closure(
- fcx,
- ast::Unsafety::Normal,
- decl,
- abi::RustCall,
- expected_sig);
-
- let closure_type = ty::mk_closure(fcx.ccx.tcx,
- expr_def_id,
- fcx.ccx.tcx.mk_substs(
- fcx.inh.param_env.free_substs.clone()));
+ let mut fn_ty = astconv::ty_of_closure(fcx,
+ ast::Unsafety::Normal,
+ decl,
+ abi::RustCall,
+ expected_sig);
+
+ // Create type variables (for now) to represent the transformed
+ // types of upvars. These will be unified during the upvar
+ // inference phase (`upvar.rs`).
+ let num_upvars = fcx.tcx().with_freevars(expr.id, |fv| fv.len());
+ let upvar_tys = fcx.infcx().next_ty_vars(num_upvars);
+
+ debug!("check_closure: expr.id={:?} upvar_tys={:?}",
+ expr.id, upvar_tys);
+
+ let closure_type =
+ fcx.ccx.tcx.mk_closure(
+ expr_def_id,
+ fcx.ccx.tcx.mk_substs(fcx.inh.infcx.parameter_environment.free_substs.clone()),
+ upvar_tys);
fcx.write_ty(expr.id, closure_type);
- let fn_sig =
- ty::liberate_late_bound_regions(fcx.tcx(),
- region::DestructionScopeData::new(body.id),
- &fn_ty.sig);
+ let fn_sig = fcx.tcx().liberate_late_bound_regions(
+ region::DestructionScopeData::new(body.id), &fn_ty.sig);
check_fn(fcx.ccx,
ast::Unsafety::Normal,
// Tuple up the arguments and insert the resulting function type into
// the `closures` table.
- fn_ty.sig.0.inputs = vec![ty::mk_tup(fcx.tcx(), fn_ty.sig.0.inputs)];
+ fn_ty.sig.0.inputs = vec![fcx.tcx().mk_tup(fn_ty.sig.0.inputs)];
debug!("closure for {:?} --> sig={:?} opt_kind={:?}",
expr_def_id,
fn_ty.sig,
opt_kind);
- fcx.inh.closure_tys.borrow_mut().insert(expr_def_id, fn_ty);
+ fcx.inh.tables.borrow_mut().closure_tys.insert(expr_def_id, fn_ty);
match opt_kind {
- Some(kind) => { fcx.inh.closure_kinds.borrow_mut().insert(expr_def_id, kind); }
+ Some(kind) => { fcx.inh.tables.borrow_mut().closure_kinds.insert(expr_def_id, kind); }
None => { }
}
}
expected_vid: ty::TyVid)
-> (Option<ty::FnSig<'tcx>>, Option<ty::ClosureKind>)
{
- let fulfillment_cx = fcx.inh.fulfillment_cx.borrow();
+ let fulfillment_cx = fcx.inh.infcx.fulfillment_cx.borrow();
// Here `expected_ty` is known to be a type inference variable.
let expected_sig =
use middle::traits::{self, ObligationCause};
use middle::traits::{predicate_for_trait_def, report_selection_error};
use middle::ty::{AutoDerefRef, AdjustDerefRef};
-use middle::ty::{self, mt, Ty};
+use middle::ty::{self, TypeAndMut, Ty, TypeError};
use middle::ty_relate::RelateResult;
use util::common::indent;
// &T to autoref to &&T.
return None;
}
- let ty = ty::mk_rptr(self.tcx(), r_borrow,
- mt {ty: inner_ty, mutbl: mutbl_b});
+ let ty = self.tcx().mk_ref(r_borrow,
+ TypeAndMut {ty: inner_ty, mutbl: mutbl_b});
if let Err(err) = self.subtype(ty, b) {
if first_error.is_none() {
first_error = Some(err);
(u, cu)
} else {
debug!("Missing Unsize or CoerceUnsized traits");
- return Err(ty::terr_mismatch);
+ return Err(TypeError::Mismatch);
};
// Note, we want to avoid unnecessary unsizing. We don't want to coerce to
}
_ => (source, None)
};
- let source = ty::adjust_ty_for_autoref(self.tcx(), source, reborrow);
+ let source = source.adjust_for_autoref(self.tcx(), reborrow);
- let mut selcx = traits::SelectionContext::new(self.fcx.infcx(), self.fcx);
+ let mut selcx = traits::SelectionContext::new(self.fcx.infcx());
// Use a FIFO queue for this custom fulfillment procedure.
let mut queue = VecDeque::new();
// Uncertain or unimplemented.
Ok(None) | Err(traits::Unimplemented) => {
debug!("coerce_unsized: early return - can't prove obligation");
- return Err(ty::terr_mismatch);
+ return Err(TypeError::Mismatch);
}
// Object safety violations or miscellaneous.
match b.sty {
ty::TyBareFn(None, _) => {
- let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, fn_ty_a);
+ let a_fn_pointer = self.tcx().mk_fn(None, fn_ty_a);
try!(self.subtype(a_fn_pointer, b));
Ok(Some(ty::AdjustReifyFnPointer))
}
};
// Check that the types which they point at are compatible.
- let a_unsafe = ty::mk_ptr(self.tcx(), ty::mt{ mutbl: mutbl_b, ty: mt_a.ty });
+ let a_unsafe = self.tcx().mk_ptr(ty::TypeAndMut{ mutbl: mutbl_b, ty: mt_a.ty });
try!(self.subtype(a_unsafe, b));
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
(ast::MutMutable, ast::MutMutable) |
(ast::MutImmutable, ast::MutImmutable) |
(ast::MutMutable, ast::MutImmutable) => Ok(None),
- (ast::MutImmutable, ast::MutMutable) => Err(ty::terr_mutability)
+ (ast::MutImmutable, ast::MutMutable) => Err(TypeError::Mutability)
}
}
use syntax::ast;
use syntax::codemap::Span;
-use syntax::parse::token;
use super::assoc;
debug!("compare_impl_method: impl_trait_ref (liberated) = {:?}",
impl_trait_ref);
- let infcx = infer::new_infer_ctxt(tcx);
- let mut fulfillment_cx = traits::FulfillmentContext::new(true);
+ let mut infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, true);
+ let mut fulfillment_cx = infcx.fulfillment_cx.borrow_mut();
let trait_to_impl_substs = &impl_trait_ref.substs;
span_err!(tcx.sess, impl_m_span, E0049,
"method `{}` has {} type parameter{} \
but its trait declaration has {} type parameter{}",
- token::get_name(trait_m.name),
+ trait_m.name,
num_impl_m_type_params,
if num_impl_m_type_params == 1 {""} else {"s"},
num_trait_m_type_params,
span_err!(tcx.sess, impl_m_span, E0050,
"method `{}` has {} parameter{} \
but the declaration in trait `{}` has {}",
- token::get_name(trait_m.name),
+ trait_m.name,
impl_m.fty.sig.0.inputs.len(),
if impl_m.fty.sig.0.inputs.len() == 1 {""} else {"s"},
- ty::item_path_str(tcx, trait_m.def_id),
+ tcx.item_path_str(trait_m.def_id),
trait_m.fty.sig.0.inputs.len());
return;
}
let trait_param_env = impl_param_env.with_caller_bounds(hybrid_preds.into_vec());
let trait_param_env = traits::normalize_param_env_or_error(trait_param_env,
normalize_cause.clone());
+ // FIXME(@jroesch) this seems ugly, but is a temporary change
+ infcx.parameter_environment = trait_param_env;
debug!("compare_impl_method: trait_bounds={:?}",
- trait_param_env.caller_bounds);
+ infcx.parameter_environment.caller_bounds);
- let mut selcx = traits::SelectionContext::new(&infcx, &trait_param_env);
+ let mut selcx = traits::SelectionContext::new(&infcx);
for predicate in impl_pred.fns {
let traits::Normalized { value: predicate, .. } =
// type.
// Compute skolemized form of impl and trait method tys.
- let impl_fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(impl_m.fty.clone()));
+ let impl_fty = tcx.mk_fn(None, tcx.mk_bare_fn(impl_m.fty.clone()));
let impl_fty = impl_fty.subst(tcx, impl_to_skol_substs);
- let trait_fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(trait_m.fty.clone()));
+ let trait_fty = tcx.mk_fn(None, tcx.mk_bare_fn(trait_m.fty.clone()));
let trait_fty = trait_fty.subst(tcx, &trait_to_skol_substs);
let err = infcx.commit_if_ok(|snapshot| {
impl_sig.subst(tcx, impl_to_skol_substs);
let impl_sig =
assoc::normalize_associated_types_in(&infcx,
- &impl_param_env,
&mut fulfillment_cx,
impl_m_span,
impl_m_body_id,
&impl_sig);
- let impl_fty =
- ty::mk_bare_fn(tcx,
- None,
- tcx.mk_bare_fn(ty::BareFnTy { unsafety: impl_m.fty.unsafety,
- abi: impl_m.fty.abi,
- sig: ty::Binder(impl_sig) }));
+ let impl_fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy {
+ unsafety: impl_m.fty.unsafety,
+ abi: impl_m.fty.abi,
+ sig: ty::Binder(impl_sig)
+ }));
debug!("compare_impl_method: impl_fty={:?}",
impl_fty);
trait_sig.subst(tcx, &trait_to_skol_substs);
let trait_sig =
assoc::normalize_associated_types_in(&infcx,
- &impl_param_env,
&mut fulfillment_cx,
impl_m_span,
impl_m_body_id,
&trait_sig);
- let trait_fty =
- ty::mk_bare_fn(tcx,
- None,
- tcx.mk_bare_fn(ty::BareFnTy { unsafety: trait_m.fty.unsafety,
- abi: trait_m.fty.abi,
- sig: ty::Binder(trait_sig) }));
+ let trait_fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy {
+ unsafety: trait_m.fty.unsafety,
+ abi: trait_m.fty.abi,
+ sig: ty::Binder(trait_sig)
+ }));
debug!("compare_impl_method: trait_fty={:?}",
trait_fty);
trait_fty);
span_err!(tcx.sess, impl_m_span, E0053,
"method `{}` has an incompatible type for trait: {}",
- token::get_name(trait_m.name),
+ trait_m.name,
terr);
return;
}
// Check that all obligations are satisfied by the implementation's
// version.
- match fulfillment_cx.select_all_or_error(&infcx, &trait_param_env) {
+ match fulfillment_cx.select_all_or_error(&infcx) {
Err(ref errors) => { traits::report_fulfillment_errors(&infcx, errors) }
Ok(_) => {}
}
// anyway, so it shouldn't be needed there either. Anyway, we can
// always add more relations later (it's backwards compat).
let mut free_regions = FreeRegionMap::new();
- free_regions.relate_free_regions_from_predicates(tcx, &trait_param_env.caller_bounds);
+ free_regions.relate_free_regions_from_predicates(tcx,
+ &infcx.parameter_environment.caller_bounds);
infcx.resolve_regions_and_report_errors(&free_regions, impl_m_body_id);
span_err!(tcx.sess, span, E0195,
"lifetime parameters or bounds on method `{}` do \
not match the trait declaration",
- token::get_name(impl_m.name));
+ impl_m.name);
return false;
}
debug!("compare_const_impl(impl_trait_ref={:?})",
impl_trait_ref);
- let infcx = infer::new_infer_ctxt(tcx);
- let mut fulfillment_cx = traits::FulfillmentContext::new(true);
+ let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, true);
+ let mut fulfillment_cx = infcx.fulfillment_cx.borrow_mut();
// The below is for the most part highly similar to the procedure
// for methods above. It is simpler in many respects, especially
// There is no "body" here, so just pass dummy id.
let impl_ty =
assoc::normalize_associated_types_in(&infcx,
- &impl_param_env,
&mut fulfillment_cx,
impl_c_span,
0,
&impl_ty);
+
debug!("compare_const_impl: impl_ty={:?}",
impl_ty);
let trait_ty =
assoc::normalize_associated_types_in(&infcx,
- &impl_param_env,
&mut fulfillment_cx,
impl_c_span,
0,
&trait_ty);
+
debug!("compare_const_impl: trait_ty={:?}",
trait_ty);
span_err!(tcx.sess, impl_c_span, E0326,
"implemented const `{}` has an incompatible type for \
trait: {}",
- token::get_name(trait_c.name),
+ trait_c.name,
terr);
return;
}
ty_a: Ty<'tcx>,
ty_b: Ty<'tcx>,
handle_err: F) where
- F: FnOnce(Span, Ty<'tcx>, Ty<'tcx>, &ty::type_err<'tcx>),
+ F: FnOnce(Span, Ty<'tcx>, Ty<'tcx>, &ty::TypeError<'tcx>),
{
// n.b.: order of actual, expected is reversed
match infer::mk_subty(fcx.infcx(), b_is_expected, infer::Misc(sp),
use middle::region;
use middle::subst::{self, Subst};
use middle::ty::{self, Ty};
+use util::nodemap::FnvHashSet;
use syntax::ast;
use syntax::codemap::{self, Span};
///
pub fn check_drop_impl(tcx: &ty::ctxt, drop_impl_did: ast::DefId) -> Result<(), ()> {
let ty::TypeScheme { generics: ref dtor_generics,
- ty: dtor_self_type } = ty::lookup_item_type(tcx, drop_impl_did);
- let dtor_predicates = ty::lookup_predicates(tcx, drop_impl_did);
+ ty: dtor_self_type } = tcx.lookup_item_type(drop_impl_did);
+ let dtor_predicates = tcx.lookup_predicates(drop_impl_did);
match dtor_self_type.sty {
ty::TyEnum(self_type_did, self_to_impl_substs) |
- ty::TyStruct(self_type_did, self_to_impl_substs) |
- ty::TyClosure(self_type_did, self_to_impl_substs) => {
+ ty::TyStruct(self_type_did, self_to_impl_substs) => {
try!(ensure_drop_params_and_item_params_correspond(tcx,
drop_impl_did,
dtor_generics,
let ty::TypeScheme { generics: ref named_type_generics,
ty: named_type } =
- ty::lookup_item_type(tcx, self_type_did);
+ tcx.lookup_item_type(self_type_did);
+
+ let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, false);
- let infcx = infer::new_infer_ctxt(tcx);
infcx.commit_if_ok(|snapshot| {
let (named_type_to_skolem, skol_map) =
infcx.construct_skolemized_subst(named_type_generics, snapshot);
// We can assume the predicates attached to struct/enum definition
// hold.
- let generic_assumptions = ty::lookup_predicates(tcx, self_type_did);
+ let generic_assumptions = tcx.lookup_predicates(self_type_did);
let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
assert!(assumptions_in_impl_context.predicates.is_empty_in(subst::SelfSpace));
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
typ, scope);
- // types that have been traversed so far by `traverse_type_if_unseen`
- let mut breadcrumbs: Vec<Ty<'tcx>> = Vec::new();
+ let parent_scope = rcx.tcx().region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
+ rcx.tcx().sess.span_bug(
+ span, &format!("no enclosing scope found for scope: {:?}", scope))
+ });
let result = iterate_over_potentially_unsafe_regions_in_type(
- rcx,
- &mut breadcrumbs,
+ &mut DropckContext {
+ rcx: rcx,
+ span: span,
+ parent_scope: parent_scope,
+ breadcrumbs: FnvHashSet()
+ },
TypeContext::Root,
typ,
- span,
- scope,
- 0,
0);
match result {
Ok(()) => {}
rcx.tcx().sess,
span,
"overflowed on enum {} variant {} argument {} type: {}",
- ty::item_path_str(tcx, def_id),
+ tcx.item_path_str(def_id),
variant,
arg_index,
detected_on_typ);
rcx.tcx().sess,
span,
"overflowed on struct {} field {} type: {}",
- ty::item_path_str(tcx, def_id),
+ tcx.item_path_str(def_id),
field,
detected_on_typ);
}
Overflow(TypeContext, ty::Ty<'tcx>),
}
+#[derive(Copy, Clone)]
enum TypeContext {
Root,
EnumVariant {
}
}
-// The `depth` counts the number of calls to this function;
-// the `xref_depth` counts the subset of such calls that go
-// across a `Box<T>` or `PhantomData<T>`.
-fn iterate_over_potentially_unsafe_regions_in_type<'a, 'tcx>(
- rcx: &mut Rcx<'a, 'tcx>,
- breadcrumbs: &mut Vec<Ty<'tcx>>,
- context: TypeContext,
- ty_root: ty::Ty<'tcx>,
+struct DropckContext<'a, 'b: 'a, 'tcx: 'b> {
+ rcx: &'a mut Rcx<'b, 'tcx>,
+ /// types that have already been traversed
+ breadcrumbs: FnvHashSet<Ty<'tcx>>,
+ /// span for error reporting
span: Span,
- scope: region::CodeExtent,
- depth: usize,
- xref_depth: usize) -> Result<(), Error<'tcx>>
+ /// the scope reachable dtorck types must outlive
+ parent_scope: region::CodeExtent
+}
+
+// `context` is used for reporting overflow errors
+fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>(
+ cx: &mut DropckContext<'a, 'b, 'tcx>,
+ context: TypeContext,
+ ty: Ty<'tcx>,
+ depth: usize) -> Result<(), Error<'tcx>>
{
+ let tcx = cx.rcx.tcx();
// Issue #22443: Watch out for overflow. While we are careful to
// handle regular types properly, non-regular ones cause problems.
- let recursion_limit = rcx.tcx().sess.recursion_limit.get();
- if xref_depth >= recursion_limit {
- return Err(Error::Overflow(context, ty_root))
+ let recursion_limit = tcx.sess.recursion_limit.get();
+ if depth / 4 >= recursion_limit {
+ // This can get into rather deep recursion, especially in the
+ // presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T.
+ // use a higher recursion limit to avoid errors.
+ return Err(Error::Overflow(context, ty))
}
- let origin = || infer::SubregionOrigin::SafeDestructor(span);
- let mut walker = ty_root.walk();
- let opt_phantom_data_def_id = rcx.tcx().lang_items.phantom_data();
+ let opt_phantom_data_def_id = tcx.lang_items.phantom_data();
- let destructor_for_type = rcx.tcx().destructor_for_type.borrow();
+ if !cx.breadcrumbs.insert(ty) {
+ debug!("iterate_over_potentially_unsafe_regions_in_type \
+ {}ty: {} scope: {:?} - cached",
+ (0..depth).map(|_| ' ').collect::<String>(),
+ ty, cx.parent_scope);
+ return Ok(()); // we already visited this type
+ }
+ debug!("iterate_over_potentially_unsafe_regions_in_type \
+ {}ty: {} scope: {:?}",
+ (0..depth).map(|_| ' ').collect::<String>(),
+ ty, cx.parent_scope);
+
+ // If `typ` has a destructor, then we must ensure that all
+ // borrowed data reachable via `typ` must outlive the parent
+ // of `scope`. This is handled below.
+ //
+ // However, there is an important special case: by
+ // parametricity, any generic type parameters have *no* trait
+ // bounds in the Drop impl can not be used in any way (apart
+ // from being dropped), and thus we can treat data borrowed
+ // via such type parameters remains unreachable.
+ //
+ // For example, consider `impl<T> Drop for Vec<T> { ... }`,
+ // which does have to be able to drop instances of `T`, but
+ // otherwise cannot read data from `T`.
+ //
+ // Of course, for the type expression passed in for any such
+ // unbounded type parameter `T`, we must resume the recursive
+ // analysis on `T` (since it would be ignored by
+ // type_must_outlive).
+ //
+ // FIXME (pnkfelix): Long term, we could be smart and actually
+ // feed which generic parameters can be ignored *into* `fn
+ // type_must_outlive` (or some generalization thereof). But
+ // for the short term, it probably covers most cases of
+ // interest to just special case Drop impls where: (1.) there
+ // are no generic lifetime parameters and (2.) *all* generic
+ // type parameters are unbounded. If both conditions hold, we
+ // simply skip the `type_must_outlive` call entirely (but
+ // resume the recursive checking of the type-substructure).
+ if has_dtor_of_interest(tcx, ty, cx.span) {
+ debug!("iterate_over_potentially_unsafe_regions_in_type \
+ {}ty: {} - is a dtorck type!",
+ (0..depth).map(|_| ' ').collect::<String>(),
+ ty);
- let xref_depth_orig = xref_depth;
+ regionck::type_must_outlive(cx.rcx,
+ infer::SubregionOrigin::SafeDestructor(cx.span),
+ ty,
+ ty::ReScope(cx.parent_scope));
- while let Some(typ) = walker.next() {
- // Avoid recursing forever.
- if breadcrumbs.contains(&typ) {
- continue;
- }
- breadcrumbs.push(typ);
-
- // If we encounter `PhantomData<T>`, then we should replace it
- // with `T`, the type it represents as owned by the
- // surrounding context, before doing further analysis.
- let (typ, xref_depth) = match typ.sty {
- ty::TyStruct(struct_did, substs) => {
- if opt_phantom_data_def_id == Some(struct_did) {
- let item_type = ty::lookup_item_type(rcx.tcx(), struct_did);
- let tp_def = item_type.generics.types
- .opt_get(subst::TypeSpace, 0).unwrap();
- let new_typ = substs.type_for_def(tp_def);
- debug!("replacing phantom {:?} with {:?}",
- typ, new_typ);
- (new_typ, xref_depth_orig + 1)
- } else {
- (typ, xref_depth_orig)
- }
- }
-
- // Note: When TyBox is removed from compiler, the
- // definition of `Box<T>` must carry a PhantomData that
- // puts us into the previous case.
- ty::TyBox(new_typ) => {
- debug!("replacing TyBox {:?} with {:?}",
- typ, new_typ);
- (new_typ, xref_depth_orig + 1)
- }
+ return Ok(());
+ }
- _ => {
- (typ, xref_depth_orig)
- }
- };
-
- let dtor_kind = match typ.sty {
- ty::TyEnum(def_id, _) |
- ty::TyStruct(def_id, _) => {
- match destructor_for_type.get(&def_id) {
- Some(def_id) => DtorKind::KnownDropMethod(*def_id),
- None => DtorKind::PureRecur,
- }
- }
- ty::TyTrait(ref ty_trait) => {
- DtorKind::Unknown(ty_trait.bounds.clone())
- }
- _ => DtorKind::PureRecur,
- };
+ debug!("iterate_over_potentially_unsafe_regions_in_type \
+ {}ty: {} scope: {:?} - checking interior",
+ (0..depth).map(|_| ' ').collect::<String>(),
+ ty, cx.parent_scope);
+
+ // We still need to ensure all referenced data is safe.
+ match ty.sty {
+ ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) |
+ ty::TyFloat(_) | ty::TyStr => {
+ // primitive - definitely safe
+ Ok(())
+ }
- debug!("iterate_over_potentially_unsafe_regions_in_type \
- {}typ: {} scope: {:?} xref: {}",
- (0..depth).map(|_| ' ').collect::<String>(),
- typ, scope, xref_depth);
+ ty::TyBox(ity) | ty::TyArray(ity, _) | ty::TySlice(ity) => {
+ // single-element containers, behave like their element
+ iterate_over_potentially_unsafe_regions_in_type(
+ cx, context, ity, depth+1)
+ }
- // If `typ` has a destructor, then we must ensure that all
- // borrowed data reachable via `typ` must outlive the parent
- // of `scope`. This is handled below.
- //
- // However, there is an important special case: by
- // parametricity, any generic type parameters have *no* trait
- // bounds in the Drop impl can not be used in any way (apart
- // from being dropped), and thus we can treat data borrowed
- // via such type parameters remains unreachable.
- //
- // For example, consider `impl<T> Drop for Vec<T> { ... }`,
- // which does have to be able to drop instances of `T`, but
- // otherwise cannot read data from `T`.
- //
- // Of course, for the type expression passed in for any such
- // unbounded type parameter `T`, we must resume the recursive
- // analysis on `T` (since it would be ignored by
- // type_must_outlive).
- //
- // FIXME (pnkfelix): Long term, we could be smart and actually
- // feed which generic parameters can be ignored *into* `fn
- // type_must_outlive` (or some generalization thereof). But
- // for the short term, it probably covers most cases of
- // interest to just special case Drop impls where: (1.) there
- // are no generic lifetime parameters and (2.) *all* generic
- // type parameters are unbounded. If both conditions hold, we
- // simply skip the `type_must_outlive` call entirely (but
- // resume the recursive checking of the type-substructure).
-
- if has_dtor_of_interest(rcx.tcx(), dtor_kind, typ, span) {
- // If `typ` has a destructor, then we must ensure that all
- // borrowed data reachable via `typ` must outlive the
- // parent of `scope`. (It does not suffice for it to
- // outlive `scope` because that could imply that the
- // borrowed data is torn down in between the end of
- // `scope` and when the destructor itself actually runs.)
-
- let parent_region =
- match rcx.tcx().region_maps.opt_encl_scope(scope) {
- Some(parent_scope) => ty::ReScope(parent_scope),
- None => rcx.tcx().sess.span_bug(
- span, &format!("no enclosing scope found for scope: {:?}",
- scope)),
- };
-
- regionck::type_must_outlive(rcx, origin(), typ, parent_region);
-
- } else {
- // Okay, `typ` itself is itself not reachable by a
- // destructor; but it may contain substructure that has a
- // destructor.
-
- match typ.sty {
- ty::TyStruct(struct_did, substs) => {
- debug!("typ: {:?} is struct; traverse structure and not type-expression",
- typ);
- // Don't recurse; we extract type's substructure,
- // so do not process subparts of type expression.
- walker.skip_current_subtree();
-
- let fields =
- ty::lookup_struct_fields(rcx.tcx(), struct_did);
- for field in &fields {
- let field_type =
- ty::lookup_field_type(rcx.tcx(),
- struct_did,
- field.id,
- substs);
- try!(iterate_over_potentially_unsafe_regions_in_type(
- rcx,
- breadcrumbs,
- TypeContext::Struct {
- def_id: struct_did,
- field: field.name,
- },
- field_type,
- span,
- scope,
- depth+1,
- xref_depth))
- }
- }
+ ty::TyStruct(did, substs) if Some(did) == opt_phantom_data_def_id => {
+ // PhantomData<T> - behaves identically to T
+ let ity = *substs.types.get(subst::TypeSpace, 0);
+ iterate_over_potentially_unsafe_regions_in_type(
+ cx, context, ity, depth+1)
+ }
- ty::TyEnum(enum_did, substs) => {
- debug!("typ: {:?} is enum; traverse structure and not type-expression",
- typ);
- // Don't recurse; we extract type's substructure,
- // so do not process subparts of type expression.
- walker.skip_current_subtree();
-
- let all_variant_info =
- ty::substd_enum_variants(rcx.tcx(),
- enum_did,
- substs);
- for variant_info in &all_variant_info {
- for (i, arg_type) in variant_info.args.iter().enumerate() {
- try!(iterate_over_potentially_unsafe_regions_in_type(
- rcx,
- breadcrumbs,
- TypeContext::EnumVariant {
- def_id: enum_did,
- variant: variant_info.name,
- arg_index: i,
- },
- *arg_type,
- span,
- scope,
- depth+1,
- xref_depth));
- }
- }
- }
+ ty::TyStruct(did, substs) => {
+ let fields = tcx.lookup_struct_fields(did);
+ for field in &fields {
+ let fty = tcx.lookup_field_type(did, field.id, substs);
+ let fty = cx.rcx.fcx.resolve_type_vars_if_possible(
+ cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty));
+ try!(iterate_over_potentially_unsafe_regions_in_type(
+ cx,
+ TypeContext::Struct {
+ def_id: did,
+ field: field.name,
+ },
+ fty,
+ depth+1))
+ }
+ Ok(())
+ }
- ty::TyRef(..) | ty::TyRawPtr(_) | ty::TyBareFn(..) => {
- // Don't recurse, since references, pointers,
- // and bare functions don't own instances
- // of the types appearing within them.
- walker.skip_current_subtree();
+ ty::TyEnum(did, substs) => {
+ let all_variant_info = tcx.substd_enum_variants(did, substs);
+ for variant_info in &all_variant_info {
+ for (i, fty) in variant_info.args.iter().enumerate() {
+ let fty = cx.rcx.fcx.resolve_type_vars_if_possible(
+ cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty));
+ try!(iterate_over_potentially_unsafe_regions_in_type(
+ cx,
+ TypeContext::EnumVariant {
+ def_id: did,
+ variant: variant_info.name,
+ arg_index: i,
+ },
+ fty,
+ depth+1));
}
- _ => {}
- };
+ }
+ Ok(())
+ }
- // You might be tempted to pop breadcrumbs here after
- // processing type's internals above, but then you hit
- // exponential time blowup e.g. on
- // compile-fail/huge-struct.rs. Instead, we do not remove
- // anything from the breadcrumbs vector during any particular
- // traversal, and instead clear it after the whole traversal
- // is done.
+ ty::TyTuple(ref tys) |
+ ty::TyClosure(_, box ty::ClosureSubsts { upvar_tys: ref tys, .. }) => {
+ for ty in tys {
+ try!(iterate_over_potentially_unsafe_regions_in_type(
+ cx, context, ty, depth+1))
+ }
+ Ok(())
}
- }
- return Ok(());
-}
+ ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => {
+ // these always come with a witness of liveness (references
+ // explicitly, pointers implicitly, parameters by the
+ // caller).
+ Ok(())
+ }
-enum DtorKind<'tcx> {
- // Type has an associated drop method with this def id
- KnownDropMethod(ast::DefId),
+ ty::TyBareFn(..) => {
+ // FIXME(#26656): this type is always destruction-safe, but
+ // it implicitly witnesses Self: Fn, which can be false.
+ Ok(())
+ }
- // Type has no destructor (or its dtor is known to be pure
- // with respect to lifetimes), though its *substructure*
- // may carry a destructor.
- PureRecur,
+ ty::TyInfer(..) | ty::TyError => {
+ tcx.sess.delay_span_bug(cx.span, "unresolved type in regionck");
+ Ok(())
+ }
- // Type may have impure destructor that is unknown;
- // e.g. `Box<Trait+'a>`
- Unknown(ty::ExistentialBounds<'tcx>),
+ // these are always dtorck
+ ty::TyTrait(..) | ty::TyProjection(_) => unreachable!(),
+ }
}
fn has_dtor_of_interest<'tcx>(tcx: &ty::ctxt<'tcx>,
- dtor_kind: DtorKind,
- typ: ty::Ty<'tcx>,
+ ty: ty::Ty<'tcx>,
span: Span) -> bool {
- let has_dtor_of_interest: bool;
-
- match dtor_kind {
- DtorKind::PureRecur => {
- has_dtor_of_interest = false;
- debug!("typ: {:?} has no dtor, and thus is uninteresting",
- typ);
- }
- DtorKind::Unknown(bounds) => {
- match bounds.region_bound {
- ty::ReStatic => {
- debug!("trait: {:?} has 'static bound, and thus is uninteresting",
- typ);
- has_dtor_of_interest = false;
- }
- ty::ReEmpty => {
- debug!("trait: {:?} has empty region bound, and thus is uninteresting",
- typ);
- has_dtor_of_interest = false;
+ match ty.sty {
+ ty::TyEnum(def_id, _) | ty::TyStruct(def_id, _) => {
+ let dtor_method_did = match tcx.destructor_for_type.borrow().get(&def_id) {
+ Some(def_id) => *def_id,
+ None => {
+ debug!("ty: {:?} has no dtor, and thus isn't a dropck type", ty);
+ return false;
}
- r => {
- debug!("trait: {:?} has non-static bound: {:?}; assumed interesting",
- typ, r);
- has_dtor_of_interest = true;
- }
- }
- }
- DtorKind::KnownDropMethod(dtor_method_did) => {
- let impl_did = ty::impl_of_method(tcx, dtor_method_did)
+ };
+ let impl_did = tcx.impl_of_method(dtor_method_did)
.unwrap_or_else(|| {
tcx.sess.span_bug(
span, "no Drop impl found for drop method")
});
- let dtor_typescheme = ty::lookup_item_type(tcx, impl_did);
+ let dtor_typescheme = tcx.lookup_item_type(impl_did);
let dtor_generics = dtor_typescheme.generics;
let mut has_pred_of_interest = false;
continue;
}
- for pred in ty::lookup_predicates(tcx, item_def_id).predicates {
+ for pred in tcx.lookup_predicates(item_def_id).predicates {
let result = match pred {
ty::Predicate::Equate(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::Trait(ty::Binder(ref t_pred)) => {
let def_id = t_pred.trait_ref.def_id;
- if ty::trait_items(tcx, def_id).len() != 0 {
+ if tcx.trait_items(def_id).len() != 0 {
// If trait has items, assume it adds
// capability to access borrowed data.
true
if result {
has_pred_of_interest = true;
- debug!("typ: {:?} has interesting dtor due to generic preds, e.g. {:?}",
- typ, pred);
+ debug!("ty: {:?} has interesting dtor due to generic preds, e.g. {:?}",
+ ty, pred);
break 'items;
}
}
let has_region_param_of_interest =
dtor_generics.has_region_params(subst::TypeSpace);
- has_dtor_of_interest =
+ let has_dtor_of_interest =
has_region_param_of_interest ||
has_pred_of_interest;
if has_dtor_of_interest {
- debug!("typ: {:?} has interesting dtor, due to \
+ debug!("ty: {:?} has interesting dtor, due to \
region params: {} or pred: {}",
- typ,
+ ty,
has_region_param_of_interest,
has_pred_of_interest);
} else {
- debug!("typ: {:?} has dtor, but it is uninteresting",
- typ);
+ debug!("ty: {:?} has dtor, but it is uninteresting", ty);
}
+ has_dtor_of_interest
}
+ ty::TyTrait(..) | ty::TyProjection(..) => {
+ debug!("ty: {:?} isn't known, and therefore is a dropck type", ty);
+ true
+ },
+ _ => false
}
-
- return has_dtor_of_interest;
}
use check::{self, FnCtxt, NoPreference, PreferMutLvalue, callee, demand};
use check::UnresolvedTypeAction;
-use middle::mem_categorization::Typer;
use middle::subst::{self};
use middle::traits;
use middle::ty::{self, Ty};
-use middle::ty::{MethodCall, MethodCallee, MethodObject, MethodOrigin,
- MethodParam, MethodStatic, MethodTraitObject, MethodTypeParam};
use middle::ty_fold::TypeFoldable;
use middle::infer;
use middle::infer::InferCtxt;
use syntax::ast;
use syntax::codemap::Span;
-use std::iter::repeat;
struct ConfirmContext<'a, 'tcx:'a> {
fcx: &'a FnCtxt<'a, 'tcx>,
unadjusted_self_ty: Ty<'tcx>,
pick: probe::Pick<'tcx>,
supplied_method_types: Vec<Ty<'tcx>>)
- -> MethodCallee<'tcx>
+ -> ty::MethodCallee<'tcx>
{
debug!("confirm(unadjusted_self_ty={:?}, pick={:?}, supplied_method_types={:?})",
unadjusted_self_ty,
unadjusted_self_ty: Ty<'tcx>,
pick: probe::Pick<'tcx>,
supplied_method_types: Vec<Ty<'tcx>>)
- -> MethodCallee<'tcx>
+ -> ty::MethodCallee<'tcx>
{
// Adjust the self expression the user provided and obtain the adjusted type.
let self_ty = self.adjust_self_ty(unadjusted_self_ty, &pick);
self.enforce_illegal_method_limitations(&pick);
// Create substitutions for the method's type parameters.
- let (rcvr_substs, method_origin) =
- self.fresh_receiver_substs(self_ty, &pick);
- let (method_types, method_regions) =
- self.instantiate_method_substs(&pick, supplied_method_types);
- let all_substs = rcvr_substs.with_method(method_types, method_regions);
+ let rcvr_substs = self.fresh_receiver_substs(self_ty, &pick);
+ let all_substs =
+ self.instantiate_method_substs(
+ &pick,
+ supplied_method_types,
+ rcvr_substs);
+
debug!("all_substs={:?}", all_substs);
// Create the final signature for the method, replacing late-bound regions.
// Create the final `MethodCallee`.
let method_ty = pick.item.as_opt_method().unwrap();
- let fty = ty::mk_bare_fn(self.tcx(), None, self.tcx().mk_bare_fn(ty::BareFnTy {
+ let fty = self.tcx().mk_fn(None, self.tcx().mk_bare_fn(ty::BareFnTy {
sig: ty::Binder(method_sig),
unsafety: method_ty.fty.unsafety,
abi: method_ty.fty.abi.clone(),
}));
- let callee = MethodCallee {
- origin: method_origin,
+ let callee = ty::MethodCallee {
+ def_id: pick.item.def_id(),
ty: fty,
- substs: all_substs
+ substs: self.tcx().mk_substs(all_substs)
};
// If this is an `&mut self` method, bias the receiver
let region = self.infcx().next_region_var(infer::Autoref(self.span));
let autoref = ty::AutoPtr(self.tcx().mk_region(region), mutbl);
(Some(autoref), pick.unsize.map(|target| {
- ty::adjust_ty_for_autoref(self.tcx(), target, Some(autoref))
+ target.adjust_for_autoref(self.tcx(), Some(autoref))
}))
} else {
// No unsizing should be performed without autoref (at
if let Some(target) = unsize {
target
} else {
- ty::adjust_ty_for_autoref(self.tcx(), autoderefd_ty, autoref)
+ autoderefd_ty.adjust_for_autoref(self.tcx(), autoref)
}
}
fn fresh_receiver_substs(&mut self,
self_ty: Ty<'tcx>,
pick: &probe::Pick<'tcx>)
- -> (subst::Substs<'tcx>, MethodOrigin<'tcx>)
+ -> subst::Substs<'tcx>
{
match pick.kind {
- probe::InherentImplPick(impl_def_id) => {
- assert!(ty::impl_trait_ref(self.tcx(), impl_def_id).is_none(),
+ probe::InherentImplPick => {
+ let impl_def_id = pick.item.container().id();
+ assert!(self.tcx().impl_trait_ref(impl_def_id).is_none(),
"impl {:?} is not an inherent impl", impl_def_id);
- let impl_polytype = check::impl_self_ty(self.fcx, self.span, impl_def_id);
-
- (impl_polytype.substs, MethodStatic(pick.item.def_id()))
+ check::impl_self_ty(self.fcx, self.span, impl_def_id).substs
}
- probe::ObjectPick(trait_def_id, method_num, vtable_index) => {
+ probe::ObjectPick => {
+ let trait_def_id = pick.item.container().id();
self.extract_trait_ref(self_ty, |this, object_ty, data| {
// The object data has no entry for the Self
// Type. For the purposes of this method call, we
original_poly_trait_ref,
upcast_trait_ref,
trait_def_id);
- let substs = upcast_trait_ref.substs.clone();
- let origin = MethodTraitObject(MethodObject {
- trait_ref: upcast_trait_ref,
- object_trait_id: trait_def_id,
- method_num: method_num,
- vtable_index: vtable_index,
- });
- (substs, origin)
+ upcast_trait_ref.substs.clone()
})
}
- probe::ExtensionImplPick(impl_def_id, method_num) => {
+ probe::ExtensionImplPick(impl_def_id) => {
// The method being invoked is the method as defined on the trait,
// so return the substitutions from the trait. Consider:
//
self.fcx.instantiate_type_scheme(
self.span,
&impl_polytype.substs,
- &ty::impl_trait_ref(self.tcx(), impl_def_id).unwrap());
- let origin = MethodTypeParam(MethodParam { trait_ref: impl_trait_ref.clone(),
- method_num: method_num,
- impl_def_id: Some(impl_def_id) });
- (impl_trait_ref.substs.clone(), origin)
+ &self.tcx().impl_trait_ref(impl_def_id).unwrap());
+ impl_trait_ref.substs.clone()
}
- probe::TraitPick(trait_def_id, method_num) => {
- let trait_def = ty::lookup_trait_def(self.tcx(), trait_def_id);
+ probe::TraitPick => {
+ let trait_def_id = pick.item.container().id();
+ let trait_def = self.tcx().lookup_trait_def(trait_def_id);
// Make a trait reference `$0 : Trait<$1...$n>`
// consisting entirely of type variables. Later on in
// the process we will unify the transformed-self-type
// of the method with the actual type in order to
// unify some of these variables.
- let substs = self.infcx().fresh_substs_for_trait(self.span,
- &trait_def.generics,
- self.infcx().next_ty_var());
-
- let trait_ref =
- ty::TraitRef::new(trait_def_id, self.tcx().mk_substs(substs.clone()));
- let origin = MethodTypeParam(MethodParam { trait_ref: trait_ref,
- method_num: method_num,
- impl_def_id: None });
- (substs, origin)
+ self.infcx().fresh_substs_for_trait(self.span,
+ &trait_def.generics,
+ self.infcx().next_ty_var())
}
- probe::WhereClausePick(ref poly_trait_ref, method_num) => {
+ probe::WhereClausePick(ref poly_trait_ref) => {
// Where clauses can have bound regions in them. We need to instantiate
// those to convert from a poly-trait-ref to a trait-ref.
- let trait_ref = self.replace_late_bound_regions_with_fresh_var(&*poly_trait_ref);
- let substs = trait_ref.substs.clone();
- let origin = MethodTypeParam(MethodParam { trait_ref: trait_ref,
- method_num: method_num,
- impl_def_id: None });
- (substs, origin)
+ self.replace_late_bound_regions_with_fresh_var(&*poly_trait_ref).substs.clone()
}
}
}
fn instantiate_method_substs(&mut self,
pick: &probe::Pick<'tcx>,
- supplied_method_types: Vec<Ty<'tcx>>)
- -> (Vec<Ty<'tcx>>, Vec<ty::Region>)
+ supplied_method_types: Vec<Ty<'tcx>>,
+ substs: subst::Substs<'tcx>)
+ -> subst::Substs<'tcx>
{
// Determine the values for the generic parameters of the method.
// If they were not explicitly supplied, just construct fresh
// variables.
let num_supplied_types = supplied_method_types.len();
- let num_method_types = pick.item.as_opt_method().unwrap()
- .generics.types.len(subst::FnSpace);
- let method_types = {
- if num_supplied_types == 0 {
- self.fcx.infcx().next_ty_vars(num_method_types)
- } else if num_method_types == 0 {
- span_err!(self.tcx().sess, self.span, E0035,
- "does not take type parameters");
- self.fcx.infcx().next_ty_vars(num_method_types)
- } else if num_supplied_types != num_method_types {
- span_err!(self.tcx().sess, self.span, E0036,
- "incorrect number of type parameters given for this method");
- repeat(self.tcx().types.err).take(num_method_types).collect()
- } else {
- supplied_method_types
- }
- };
+ let method = pick.item.as_opt_method().unwrap();
+ let method_types = method.generics.types.get_slice(subst::FnSpace);
+ let num_method_types = method_types.len();
+
// Create subst for early-bound lifetime parameters, combining
// parameters from the type and those from the method.
pick.item.as_opt_method().unwrap()
.generics.regions.get_slice(subst::FnSpace));
- (method_types, method_regions)
+ let subst::Substs { types, regions } = substs;
+ let regions = regions.map(|r| r.with_vec(subst::FnSpace, method_regions));
+ let mut final_substs = subst::Substs { types: types, regions: regions };
+
+ if num_supplied_types == 0 {
+ self.fcx.infcx().type_vars_for_defs(
+ self.span,
+ subst::FnSpace,
+ &mut final_substs,
+ method_types);
+ } else if num_method_types == 0 {
+ span_err!(self.tcx().sess, self.span, E0035,
+ "does not take type parameters");
+ self.fcx.infcx().type_vars_for_defs(
+ self.span,
+ subst::FnSpace,
+ &mut final_substs,
+ method_types);
+ } else if num_supplied_types != num_method_types {
+ span_err!(self.tcx().sess, self.span, E0036,
+ "incorrect number of type parameters given for this method");
+ final_substs.types.replace(
+ subst::FnSpace,
+ vec![self.tcx().types.err; num_method_types]);
+ } else {
+ final_substs.types.replace(subst::FnSpace, supplied_method_types);
+ }
+
+ return final_substs;
}
fn unify_receivers(&mut self,
/// auto-derefs, indices, etc from `Deref` and `Index` into `DerefMut` and `IndexMut`
/// respectively.
fn fixup_derefs_on_method_receiver_if_necessary(&self,
- method_callee: &MethodCallee) {
+ method_callee: &ty::MethodCallee) {
let sig = match method_callee.ty.sty {
ty::TyBareFn(_, ref f) => f.sig.clone(),
_ => return,
};
match sig.0.inputs[0].sty {
- ty::TyRef(_, ty::mt {
+ ty::TyRef(_, ty::TypeAndMut {
ty: _,
mutbl: ast::MutMutable,
}) => {}
// Count autoderefs.
let autoderef_count = match self.fcx
.inh
- .adjustments
+ .tables
.borrow()
+ .adjustments
.get(&expr.id) {
Some(&ty::AdjustDerefRef(ref adj)) => adj.autoderefs,
Some(_) | None => 0,
// expects. This is annoying and horrible. We
// ought to recode this routine so it doesn't
// (ab)use the normal type checking paths.
- let adj = self.fcx.inh.adjustments.borrow().get(&base_expr.id).cloned();
+ let adj = self.fcx.inh.tables.borrow().adjustments.get(&base_expr.id)
+ .cloned();
let (autoderefs, unsize) = match adj {
Some(ty::AdjustDerefRef(adr)) => match adr.autoref {
None => {
}
Some(ty::AutoPtr(_, _)) => {
(adr.autoderefs, adr.unsize.map(|target| {
- ty::deref(target, false)
- .expect("fixup: AutoPtr is not &T").ty
+ target.builtin_deref(false)
+ .expect("fixup: AutoPtr is not &T").ty
}))
}
Some(_) => {
let result = check::try_index_step(
self.fcx,
- MethodCall::expr(expr.id),
+ ty::MethodCall::expr(expr.id),
expr,
&**base_expr,
adjusted_base_ty,
ast::ExprUnary(ast::UnDeref, ref base_expr) => {
// if this is an overloaded deref, then re-evaluate with
// a preference for mut
- let method_call = MethodCall::expr(expr.id);
- if self.fcx.inh.method_map.borrow().contains_key(&method_call) {
+ let method_call = ty::MethodCall::expr(expr.id);
+ if self.fcx.inh.tables.borrow().method_map.contains_key(&method_call) {
check::try_overloaded_deref(
self.fcx,
expr.span,
use middle::privacy::{AllPublic, DependsOn, LastPrivate, LastMod};
use middle::subst;
use middle::traits;
-use middle::ty::{self, AsPredicate, ToPolyTraitRef, TraitRef};
+use middle::ty::{self, ToPredicate, ToPolyTraitRef, TraitRef};
use middle::infer;
use syntax::ast::DefId;
TraitSource(/* trait id */ ast::DefId),
}
-type ItemIndex = usize; // just for doc purposes
-
/// Determines whether the type `self_ty` supports a method name `method_name` or not.
pub fn exists<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
span: Span,
m_name,
trait_def_id);
- let trait_def = ty::lookup_trait_def(fcx.tcx(), trait_def_id);
+ let trait_def = fcx.tcx().lookup_trait_def(trait_def_id);
+
+ let type_parameter_defs = trait_def.generics.types.get_slice(subst::TypeSpace);
+ let expected_number_of_input_types = type_parameter_defs.len();
+
+ assert_eq!(trait_def.generics.types.len(subst::FnSpace), 0);
+ assert!(trait_def.generics.regions.is_empty());
- let expected_number_of_input_types = trait_def.generics.types.len(subst::TypeSpace);
- let input_types = match opt_input_types {
+ // Construct a trait-reference `self_ty : Trait<input_tys>`
+ let mut substs = subst::Substs::new_trait(Vec::new(), Vec::new(), self_ty);
+
+ match opt_input_types {
Some(input_types) => {
assert_eq!(expected_number_of_input_types, input_types.len());
- input_types
+ substs.types.replace(subst::ParamSpace::TypeSpace, input_types);
}
None => {
- fcx.inh.infcx.next_ty_vars(expected_number_of_input_types)
+ fcx.inh.infcx.type_vars_for_defs(
+ span,
+ subst::ParamSpace::TypeSpace,
+ &mut substs,
+ type_parameter_defs);
}
- };
-
- assert_eq!(trait_def.generics.types.len(subst::FnSpace), 0);
- assert!(trait_def.generics.regions.is_empty());
+ }
- // Construct a trait-reference `self_ty : Trait<input_tys>`
- let substs = subst::Substs::new_trait(input_types, Vec::new(), self_ty);
let trait_ref = ty::TraitRef::new(trait_def_id, fcx.tcx().mk_substs(substs));
// Construct an obligation
let poly_trait_ref = trait_ref.to_poly_trait_ref();
let obligation = traits::Obligation::misc(span,
fcx.body_id,
- poly_trait_ref.as_predicate());
+ poly_trait_ref.to_predicate());
// Now we want to know if this can be matched
- let mut selcx = traits::SelectionContext::new(fcx.infcx(), fcx);
+ let mut selcx = traits::SelectionContext::new(fcx.infcx());
if !selcx.evaluate_obligation(&obligation) {
debug!("--> Cannot match obligation");
return None; // Cannot be matched, no such method resolution is possible.
// Trait must have a method named `m_name` and it should not have
// type parameters or early-bound regions.
let tcx = fcx.tcx();
- let (method_num, method_ty) = trait_item(tcx, trait_def_id, m_name)
- .and_then(|(idx, item)| item.as_opt_method().map(|m| (idx, m)))
- .unwrap();
+ let method_item = trait_item(tcx, trait_def_id, m_name).unwrap();
+ let method_ty = method_item.as_opt_method().unwrap();
assert_eq!(method_ty.generics.types.len(subst::FnSpace), 0);
assert_eq!(method_ty.generics.regions.len(subst::FnSpace), 0);
- debug!("lookup_in_trait_adjusted: method_num={} method_ty={:?}",
- method_num, method_ty);
+ debug!("lookup_in_trait_adjusted: method_item={:?} method_ty={:?}",
+ method_item, method_ty);
// Instantiate late-bound regions and substitute the trait
// parameters into the method type to get the actual method type.
&method_ty.fty.sig).0;
let fn_sig = fcx.instantiate_type_scheme(span, trait_ref.substs, &fn_sig);
let transformed_self_ty = fn_sig.inputs[0];
- let fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(ty::BareFnTy {
+ let fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy {
sig: ty::Binder(fn_sig),
unsafety: method_ty.fty.unsafety,
abi: method_ty.fty.abi.clone(),
// Trait method is fn(&self) or fn(&mut self), need an
// autoref. Pull the region etc out of the type of first argument.
match transformed_self_ty.sty {
- ty::TyRef(region, ty::mt { mutbl, ty: _ }) => {
+ ty::TyRef(region, ty::TypeAndMut { mutbl, ty: _ }) => {
fcx.write_adjustment(self_expr.id,
ty::AdjustDerefRef(ty::AutoDerefRef {
autoderefs: autoderefs,
}
let callee = ty::MethodCallee {
- origin: ty::MethodTypeParam(ty::MethodParam{trait_ref: trait_ref.clone(),
- method_num: method_num,
- impl_def_id: None}),
+ def_id: method_item.def_id(),
ty: fty,
- substs: trait_ref.substs.clone()
+ substs: trait_ref.substs
};
debug!("callee = {:?}", callee);
let pick = try!(probe::probe(fcx, span, mode, method_name, self_ty, expr_id));
let def_id = pick.item.def_id();
let mut lp = LastMod(AllPublic);
- let provenance = match pick.kind {
- probe::InherentImplPick(impl_def_id) => {
- if pick.item.vis() != ast::Public {
- lp = LastMod(DependsOn(def_id));
- }
- def::FromImpl(impl_def_id)
+ if let probe::InherentImplPick = pick.kind {
+ if pick.item.vis() != ast::Public {
+ lp = LastMod(DependsOn(def_id));
}
- _ => def::FromTrait(pick.item.container().id())
- };
+ }
let def_result = match pick.item {
- ty::ImplOrTraitItem::MethodTraitItem(..) => def::DefMethod(def_id, provenance),
- ty::ImplOrTraitItem::ConstTraitItem(..) => def::DefAssociatedConst(def_id, provenance),
+ ty::ImplOrTraitItem::MethodTraitItem(..) => def::DefMethod(def_id),
+ ty::ImplOrTraitItem::ConstTraitItem(..) => def::DefAssociatedConst(def_id),
ty::ImplOrTraitItem::TypeTraitItem(..) => {
fcx.tcx().sess.span_bug(span, "resolve_ufcs: probe picked associated type");
}
}
-/// Find item with name `item_name` defined in `trait_def_id` and return it, along with its
-/// index (or `None`, if no such item).
+/// Find item with name `item_name` defined in `trait_def_id`
+/// and return it, or `None`, if no such item.
fn trait_item<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
item_name: ast::Name)
- -> Option<(usize, ty::ImplOrTraitItem<'tcx>)>
+ -> Option<ty::ImplOrTraitItem<'tcx>>
{
- let trait_items = ty::trait_items(tcx, trait_def_id);
- trait_items
- .iter()
- .enumerate()
- .find(|&(_, ref item)| item.name() == item_name)
- .map(|(num, item)| (num, (*item).clone()))
+ let trait_items = tcx.trait_items(trait_def_id);
+ trait_items.iter()
+ .find(|item| item.name() == item_name)
+ .cloned()
}
fn impl_item<'tcx>(tcx: &ty::ctxt<'tcx>,
let impl_items = impl_items.get(&impl_def_id).unwrap();
impl_items
.iter()
- .map(|&did| ty::impl_or_trait_item(tcx, did.def_id()))
+ .map(|&did| tcx.impl_or_trait_item(did.def_id()))
.find(|m| m.name() == item_name)
}
use super::MethodError;
use super::NoMatchData;
-use super::ItemIndex;
use super::{CandidateSource, ImplSource, TraitSource};
use super::suggest;
use middle::subst::Subst;
use middle::traits;
use middle::ty::{self, RegionEscape, Ty, ToPolyTraitRef, TraitRef};
+use middle::ty::HasTypeFlags;
use middle::ty_fold::TypeFoldable;
use middle::infer;
use middle::infer::InferCtxt;
#[derive(Debug)]
enum CandidateKind<'tcx> {
- InherentImplCandidate(/* Impl */ ast::DefId, subst::Substs<'tcx>,
+ InherentImplCandidate(subst::Substs<'tcx>,
/* Normalize obligations */ Vec<traits::PredicateObligation<'tcx>>),
- ObjectCandidate(/* Trait */ ast::DefId, /* method_num */ usize, /* vtable index */ usize),
- ExtensionImplCandidate(/* Impl */ ast::DefId, ty::TraitRef<'tcx>,
- subst::Substs<'tcx>, ItemIndex,
+ ExtensionImplCandidate(/* Impl */ ast::DefId, subst::Substs<'tcx>,
/* Normalize obligations */ Vec<traits::PredicateObligation<'tcx>>),
- ClosureCandidate(/* Trait */ ast::DefId, ItemIndex),
- WhereClauseCandidate(ty::PolyTraitRef<'tcx>, ItemIndex),
- ProjectionCandidate(ast::DefId, ItemIndex),
+ ObjectCandidate,
+ TraitCandidate,
+ WhereClauseCandidate(/* Trait */ ty::PolyTraitRef<'tcx>),
}
#[derive(Debug)]
#[derive(Clone,Debug)]
pub enum PickKind<'tcx> {
- InherentImplPick(/* Impl */ ast::DefId),
- ObjectPick(/* Trait */ ast::DefId, /* method_num */ usize, /* real_index */ usize),
- ExtensionImplPick(/* Impl */ ast::DefId, ItemIndex),
- TraitPick(/* Trait */ ast::DefId, ItemIndex),
- WhereClausePick(/* Trait */ ty::PolyTraitRef<'tcx>, ItemIndex),
+ InherentImplPick,
+ ExtensionImplPick(/* Impl */ ast::DefId),
+ ObjectPick,
+ TraitPick,
+ WhereClausePick(/* Trait */ ty::PolyTraitRef<'tcx>),
}
pub type PickResult<'tcx> = Result<Pick<'tcx>, MethodError<'tcx>>;
match final_ty.sty {
ty::TyArray(elem_ty, _) => {
- let slice_ty = ty::mk_vec(fcx.tcx(), elem_ty, None);
steps.push(CandidateStep {
- self_ty: slice_ty,
+ self_ty: fcx.tcx().mk_slice(elem_ty),
autoderefs: dereferences,
unsize: true
});
let lang_def_id = self.tcx().lang_items.slice_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyRawPtr(ty::mt { ty: _, mutbl: ast::MutImmutable }) => {
+ ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: ast::MutImmutable }) => {
let lang_def_id = self.tcx().lang_items.const_ptr_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyRawPtr(ty::mt { ty: _, mutbl: ast::MutMutable }) => {
+ ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: ast::MutMutable }) => {
let lang_def_id = self.tcx().lang_items.mut_ptr_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
fn assemble_inherent_impl_for_primitive(&mut self, lang_def_id: Option<ast::DefId>) {
if let Some(impl_def_id) = lang_def_id {
- ty::populate_implementations_for_primitive_if_necessary(self.tcx(), impl_def_id);
+ self.tcx().populate_implementations_for_primitive_if_necessary(impl_def_id);
self.assemble_inherent_impl_probe(impl_def_id);
}
fn assemble_inherent_impl_candidates_for_type(&mut self, def_id: ast::DefId) {
// Read the inherent implementation candidates for this type from the
// metadata if necessary.
- ty::populate_inherent_implementations_for_type_if_necessary(self.tcx(), def_id);
+ self.tcx().populate_inherent_implementations_for_type_if_necessary(def_id);
if let Some(impl_infos) = self.tcx().inherent_impls.borrow().get(&def_id) {
for &impl_def_id in impl_infos.iter() {
// We can't use normalize_associated_types_in as it will pollute the
// fcx's fulfillment context after this probe is over.
let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id);
- let mut selcx = &mut traits::SelectionContext::new(self.fcx.infcx(), self.fcx);
+ let mut selcx = &mut traits::SelectionContext::new(self.fcx.infcx());
let traits::Normalized { value: xform_self_ty, obligations } =
traits::normalize(selcx, cause, &xform_self_ty);
debug!("assemble_inherent_impl_probe: xform_self_ty = {:?}",
self.inherent_candidates.push(Candidate {
xform_self_ty: xform_self_ty,
item: item,
- kind: InherentImplCandidate(impl_def_id, impl_substs, obligations)
+ kind: InherentImplCandidate(impl_substs, obligations)
});
}
debug!("assemble_inherent_candidates_from_object(self_ty={:?})",
self_ty);
- let tcx = self.tcx();
-
// It is illegal to invoke a method on a trait instance that
// refers to the `Self` type. An error will be reported by
// `enforce_object_limitations()` if the method refers to the
// itself. Hence, a `&self` method will wind up with an
// argument type like `&Trait`.
let trait_ref = data.principal_trait_ref_with_self_ty(self.tcx(), self_ty);
- self.elaborate_bounds(&[trait_ref.clone()], |this, new_trait_ref, item, item_num| {
+ self.elaborate_bounds(&[trait_ref], |this, new_trait_ref, item| {
let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref);
- let vtable_index =
- traits::get_vtable_index_of_object_method(tcx,
- trait_ref.clone(),
- new_trait_ref.def_id,
- item_num);
-
let xform_self_ty = this.xform_self_ty(&item,
new_trait_ref.self_ty(),
new_trait_ref.substs);
this.inherent_candidates.push(Candidate {
xform_self_ty: xform_self_ty,
item: item,
- kind: ObjectCandidate(new_trait_ref.def_id, item_num, vtable_index)
+ kind: ObjectCandidate
});
});
}
// FIXME -- Do we want to commit to this behavior for param bounds?
let bounds: Vec<_> =
- self.fcx.inh.param_env.caller_bounds
+ self.fcx.inh.infcx.parameter_environment.caller_bounds
.iter()
.filter_map(|predicate| {
match *predicate {
})
.collect();
- self.elaborate_bounds(&bounds, |this, poly_trait_ref, item, item_num| {
+ self.elaborate_bounds(&bounds, |this, poly_trait_ref, item| {
let trait_ref =
this.erase_late_bound_regions(&poly_trait_ref);
// artifacts. This means it is safe to put into the
// `WhereClauseCandidate` and (eventually) into the
// `WhereClausePick`.
- assert!(trait_ref.substs.types.iter().all(|&t| !ty::type_needs_infer(t)));
+ assert!(!trait_ref.substs.types.needs_infer());
this.inherent_candidates.push(Candidate {
xform_self_ty: xform_self_ty,
item: item,
- kind: WhereClauseCandidate(poly_trait_ref, item_num)
+ kind: WhereClauseCandidate(poly_trait_ref)
});
});
}
&mut ProbeContext<'b, 'tcx>,
ty::PolyTraitRef<'tcx>,
ty::ImplOrTraitItem<'tcx>,
- usize,
),
{
debug!("elaborate_bounds(bounds={:?})", bounds);
let tcx = self.tcx();
for bound_trait_ref in traits::transitive_bounds(tcx, bounds) {
- let (pos, item) = match trait_item(tcx,
- bound_trait_ref.def_id(),
- self.item_name) {
+ let item = match trait_item(tcx,
+ bound_trait_ref.def_id(),
+ self.item_name) {
Some(v) => v,
None => { continue; }
};
if !self.has_applicable_self(&item) {
self.record_static_candidate(TraitSource(bound_trait_ref.def_id()));
} else {
- mk_cand(self, bound_trait_ref, item, pos);
+ mk_cand(self, bound_trait_ref, item);
}
}
}
// Check whether `trait_def_id` defines a method with suitable name:
let trait_items =
- ty::trait_items(self.tcx(), trait_def_id);
- let matching_index =
+ self.tcx().trait_items(trait_def_id);
+ let maybe_item =
trait_items.iter()
- .position(|item| item.name() == self.item_name);
- let matching_index = match matching_index {
+ .find(|item| item.name() == self.item_name);
+ let item = match maybe_item {
Some(i) => i,
None => { return Ok(()); }
};
- let ref item = (&*trait_items)[matching_index];
// Check whether `trait_def_id` defines a method with suitable name:
if !self.has_applicable_self(item) {
return Ok(());
}
- self.assemble_extension_candidates_for_trait_impls(trait_def_id,
- item.clone(),
- matching_index);
+ self.assemble_extension_candidates_for_trait_impls(trait_def_id, item.clone());
- try!(self.assemble_closure_candidates(trait_def_id,
- item.clone(),
- matching_index));
+ try!(self.assemble_closure_candidates(trait_def_id, item.clone()));
- self.assemble_projection_candidates(trait_def_id,
- item.clone(),
- matching_index);
+ self.assemble_projection_candidates(trait_def_id, item.clone());
- self.assemble_where_clause_candidates(trait_def_id,
- item.clone(),
- matching_index);
+ self.assemble_where_clause_candidates(trait_def_id, item.clone());
Ok(())
}
fn assemble_extension_candidates_for_trait_impls(&mut self,
trait_def_id: ast::DefId,
- item: ty::ImplOrTraitItem<'tcx>,
- item_index: usize)
+ item: ty::ImplOrTraitItem<'tcx>)
{
- let trait_def = ty::lookup_trait_def(self.tcx(), trait_def_id);
+ let trait_def = self.tcx().lookup_trait_def(trait_def_id);
// FIXME(arielb1): can we use for_each_relevant_impl here?
trait_def.for_each_impl(self.tcx(), |impl_def_id| {
debug!("impl_substs={:?}", impl_substs);
let impl_trait_ref =
- ty::impl_trait_ref(self.tcx(), impl_def_id)
+ self.tcx().impl_trait_ref(impl_def_id)
.unwrap() // we know this is a trait impl
.subst(self.tcx(), &impl_substs);
// as it will pollute the fcx's fulfillment context after this probe
// is over.
let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id);
- let mut selcx = &mut traits::SelectionContext::new(self.fcx.infcx(), self.fcx);
+ let mut selcx = &mut traits::SelectionContext::new(self.fcx.infcx());
let traits::Normalized { value: xform_self_ty, obligations } =
traits::normalize(selcx, cause, &xform_self_ty);
self.extension_candidates.push(Candidate {
xform_self_ty: xform_self_ty,
item: item.clone(),
- kind: ExtensionImplCandidate(impl_def_id,
- impl_trait_ref,
- impl_substs,
- item_index,
- obligations)
+ kind: ExtensionImplCandidate(impl_def_id, impl_substs, obligations)
});
});
}
None => { return true; }
};
- let impl_type = ty::lookup_item_type(self.tcx(), impl_def_id);
+ let impl_type = self.tcx().lookup_item_type(impl_def_id);
let impl_simplified_type =
match fast_reject::simplify_type(self.tcx(), impl_type.ty, false) {
Some(simplified_type) => simplified_type,
fn assemble_closure_candidates(&mut self,
trait_def_id: ast::DefId,
- item: ty::ImplOrTraitItem<'tcx>,
- item_index: usize)
+ item: ty::ImplOrTraitItem<'tcx>)
-> Result<(), MethodError<'tcx>>
{
// Check if this is one of the Fn,FnMut,FnOnce traits.
_ => continue,
};
- let closure_kinds = self.fcx.inh.closure_kinds.borrow();
+ let closure_kinds = &self.fcx.inh.tables.borrow().closure_kinds;
let closure_kind = match closure_kinds.get(&closure_def_id) {
Some(&k) => k,
None => {
// for the purposes of our method lookup, we only take
// receiver type into account, so we can just substitute
// fresh types here to use during substitution and subtyping.
- let trait_def = ty::lookup_trait_def(self.tcx(), trait_def_id);
+ let trait_def = self.tcx().lookup_trait_def(trait_def_id);
let substs = self.infcx().fresh_substs_for_trait(self.span,
&trait_def.generics,
step.self_ty);
self.inherent_candidates.push(Candidate {
xform_self_ty: xform_self_ty,
item: item.clone(),
- kind: ClosureCandidate(trait_def_id, item_index)
+ kind: TraitCandidate
});
}
fn assemble_projection_candidates(&mut self,
trait_def_id: ast::DefId,
- item: ty::ImplOrTraitItem<'tcx>,
- item_index: usize)
+ item: ty::ImplOrTraitItem<'tcx>)
{
debug!("assemble_projection_candidates(\
trait_def_id={:?}, \
- item={:?}, \
- item_index={})",
+ item={:?})",
trait_def_id,
- item,
- item_index);
+ item);
for step in self.steps.iter() {
debug!("assemble_projection_candidates: step={:?}",
debug!("assemble_projection_candidates: projection_trait_ref={:?}",
projection_trait_ref);
- let trait_predicates = ty::lookup_predicates(self.tcx(),
- projection_trait_ref.def_id);
+ let trait_predicates = self.tcx().lookup_predicates(projection_trait_ref.def_id);
let bounds = trait_predicates.instantiate(self.tcx(), projection_trait_ref.substs);
let predicates = bounds.predicates.into_vec();
debug!("assemble_projection_candidates: predicates={:?}",
self.extension_candidates.push(Candidate {
xform_self_ty: xform_self_ty,
item: item.clone(),
- kind: ProjectionCandidate(trait_def_id, item_index)
+ kind: TraitCandidate
});
}
}
fn assemble_where_clause_candidates(&mut self,
trait_def_id: ast::DefId,
- item: ty::ImplOrTraitItem<'tcx>,
- item_index: usize)
+ item: ty::ImplOrTraitItem<'tcx>)
{
debug!("assemble_where_clause_candidates(trait_def_id={:?})",
trait_def_id);
- let caller_predicates = self.fcx.inh.param_env.caller_bounds.clone();
+ let caller_predicates = self.fcx.inh.infcx.parameter_environment.caller_bounds.clone();
for poly_bound in traits::elaborate_predicates(self.tcx(), caller_predicates)
.filter_map(|p| p.to_opt_poly_trait_ref())
.filter(|b| b.def_id() == trait_def_id)
self.extension_candidates.push(Candidate {
xform_self_ty: xform_self_ty,
item: item.clone(),
- kind: WhereClauseCandidate(poly_bound, item_index)
+ kind: WhereClauseCandidate(poly_bound)
});
}
}
match source {
TraitSource(id) => id,
ImplSource(impl_id) => {
- match ty::trait_id_of_impl(tcx, impl_id) {
+ match tcx.trait_id_of_impl(impl_id) {
Some(id) => id,
None =>
tcx.sess.span_bug(span,
fn pick_step(&mut self, step: &CandidateStep<'tcx>) -> Option<PickResult<'tcx>> {
debug!("pick_step: step={:?}", step);
- if ty::type_is_error(step.self_ty) {
+ if step.self_ty.references_error() {
return None;
}
// Search through mutabilities in order to find one where pick works:
[ast::MutImmutable, ast::MutMutable].iter().filter_map(|&m| {
- let autoref_ty = ty::mk_rptr(tcx, region, ty::mt {
+ let autoref_ty = tcx.mk_ref(region, ty::TypeAndMut {
ty: step.self_ty,
mutbl: m
});
}
applicable_candidates.pop().map(|probe| {
- let pick = probe.to_unadjusted_pick();
- Ok(pick)
+ Ok(probe.to_unadjusted_pick())
})
}
// clauses) that must be considered. Make sure that those
// match as well (or at least may match, sometimes we
// don't have enough information to fully evaluate).
- match probe.kind {
- InherentImplCandidate(impl_def_id, ref substs, ref ref_obligations) |
- ExtensionImplCandidate(impl_def_id, _, ref substs, _, ref ref_obligations) => {
- let selcx = &mut traits::SelectionContext::new(self.infcx(), self.fcx);
- let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id);
-
- // Check whether the impl imposes obligations we have to worry about.
- let impl_bounds = ty::lookup_predicates(self.tcx(), impl_def_id);
- let impl_bounds = impl_bounds.instantiate(self.tcx(), substs);
- let traits::Normalized { value: impl_bounds,
- obligations: norm_obligations } =
- traits::normalize(selcx, cause.clone(), &impl_bounds);
-
- // Convert the bounds into obligations.
- let obligations =
- traits::predicates_for_generics(cause.clone(),
- &impl_bounds);
- debug!("impl_obligations={:?}", obligations);
-
- // Evaluate those obligations to see if they might possibly hold.
- let mut all_true = true;
- for o in obligations.iter()
- .chain(norm_obligations.iter())
- .chain(ref_obligations.iter()) {
- if !selcx.evaluate_obligation(o) {
- all_true = false;
- if let &ty::Predicate::Trait(ref pred) = &o.predicate {
- possibly_unsatisfied_predicates.push(pred.0.trait_ref);
- }
- }
- }
- all_true
+ let (impl_def_id, substs, ref_obligations) = match probe.kind {
+ InherentImplCandidate(ref substs, ref ref_obligations) => {
+ (probe.item.container().id(), substs, ref_obligations)
+ }
+
+ ExtensionImplCandidate(impl_def_id, ref substs, ref ref_obligations) => {
+ (impl_def_id, substs, ref_obligations)
}
- ProjectionCandidate(..) |
ObjectCandidate(..) |
- ClosureCandidate(..) |
+ TraitCandidate |
WhereClauseCandidate(..) => {
// These have no additional conditions to check.
- true
+ return true;
+ }
+ };
+
+ let selcx = &mut traits::SelectionContext::new(self.infcx());
+ let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id);
+
+ // Check whether the impl imposes obligations we have to worry about.
+ let impl_bounds = self.tcx().lookup_predicates(impl_def_id);
+ let impl_bounds = impl_bounds.instantiate(self.tcx(), substs);
+ let traits::Normalized { value: impl_bounds,
+ obligations: norm_obligations } =
+ traits::normalize(selcx, cause.clone(), &impl_bounds);
+
+ // Convert the bounds into obligations.
+ let obligations =
+ traits::predicates_for_generics(cause.clone(),
+ &impl_bounds);
+ debug!("impl_obligations={:?}", obligations);
+
+ // Evaluate those obligations to see if they might possibly hold.
+ let mut all_true = true;
+ for o in obligations.iter()
+ .chain(norm_obligations.iter())
+ .chain(ref_obligations.iter()) {
+ if !selcx.evaluate_obligation(o) {
+ all_true = false;
+ if let &ty::Predicate::Trait(ref pred) = &o.predicate {
+ possibly_unsatisfied_predicates.push(pred.0.trait_ref);
+ }
}
}
+ all_true
})
}
probes: &[&Candidate<'tcx>])
-> Option<Pick<'tcx>> {
// Do all probes correspond to the same trait?
- let trait_data = match probes[0].to_trait_data() {
- Some(data) => data,
- None => return None,
- };
- if probes[1..].iter().any(|p| p.to_trait_data() != Some(trait_data)) {
+ let container = probes[0].item.container();
+ match container {
+ ty::TraitContainer(_) => {}
+ ty::ImplContainer(_) => return None
+ }
+ if probes[1..].iter().any(|p| p.item.container() != container) {
return None;
}
// If so, just use this trait and call it a day.
- let (trait_def_id, item_num) = trait_data;
- let item = probes[0].item.clone();
Some(Pick {
- item: item,
- kind: TraitPick(trait_def_id, item_num),
+ item: probes[0].item.clone(),
+ kind: TraitPick,
autoderefs: 0,
autoref: None,
unsize: None
return impl_ty;
}
- let placeholder;
+ let mut placeholder;
let mut substs = substs;
if
!method.generics.types.is_empty_in(subst::FnSpace) ||
!method.generics.regions.is_empty_in(subst::FnSpace)
{
- let method_types =
- self.infcx().next_ty_vars(
- method.generics.types.len(subst::FnSpace));
-
// In general, during probe we erase regions. See
// `impl_self_ty()` for an explanation.
let method_regions =
.map(|_| ty::ReStatic)
.collect();
- placeholder = (*substs).clone().with_method(method_types, method_regions);
+ placeholder = (*substs).clone().with_method(Vec::new(), method_regions);
+
+ self.infcx().type_vars_for_defs(
+ self.span,
+ subst::FnSpace,
+ &mut placeholder,
+ method.generics.types.get_slice(subst::FnSpace));
+
substs = &placeholder;
}
impl_def_id: ast::DefId)
-> (Ty<'tcx>, subst::Substs<'tcx>)
{
- let impl_pty = ty::lookup_item_type(self.tcx(), impl_def_id);
+ let impl_pty = self.tcx().lookup_item_type(impl_def_id);
let type_vars =
impl_pty.generics.types.map(
fn erase_late_bound_regions<T>(&self, value: &ty::Binder<T>) -> T
where T : TypeFoldable<'tcx>
{
- ty::erase_late_bound_regions(self.tcx(), value)
+ self.tcx().erase_late_bound_regions(value)
}
}
let impl_items = impl_items.get(&impl_def_id).unwrap();
impl_items
.iter()
- .map(|&did| ty::impl_or_trait_item(tcx, did.def_id()))
+ .map(|&did| tcx.impl_or_trait_item(did.def_id()))
.find(|item| item.name() == item_name)
}
-/// Find item with name `item_name` defined in `trait_def_id` and return it,
-/// along with its index (or `None`, if no such item).
+/// Find item with name `item_name` defined in `trait_def_id`
+/// and return it, or `None`, if no such item.
fn trait_item<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
item_name: ast::Name)
- -> Option<(usize, ty::ImplOrTraitItem<'tcx>)>
+ -> Option<ty::ImplOrTraitItem<'tcx>>
{
- let trait_items = ty::trait_items(tcx, trait_def_id);
+ let trait_items = tcx.trait_items(trait_def_id);
debug!("trait_method; items: {:?}", trait_items);
- trait_items
- .iter()
- .enumerate()
- .find(|&(_, ref item)| item.name() == item_name)
- .map(|(num, ref item)| (num, (*item).clone()))
+ trait_items.iter()
+ .find(|item| item.name() == item_name)
+ .cloned()
}
impl<'tcx> Candidate<'tcx> {
Pick {
item: self.item.clone(),
kind: match self.kind {
- InherentImplCandidate(def_id, _, _) => {
- InherentImplPick(def_id)
- }
- ObjectCandidate(def_id, item_num, real_index) => {
- ObjectPick(def_id, item_num, real_index)
+ InherentImplCandidate(_, _) => InherentImplPick,
+ ExtensionImplCandidate(def_id, _, _) => {
+ ExtensionImplPick(def_id)
}
- ExtensionImplCandidate(def_id, _, _, index, _) => {
- ExtensionImplPick(def_id, index)
- }
- ClosureCandidate(trait_def_id, index) => {
- TraitPick(trait_def_id, index)
- }
- WhereClauseCandidate(ref trait_ref, index) => {
+ ObjectCandidate => ObjectPick,
+ TraitCandidate => TraitPick,
+ WhereClauseCandidate(ref trait_ref) => {
// Only trait derived from where-clauses should
// appear here, so they should not contain any
// inference variables or other artifacts. This
// means they are safe to put into the
// `WhereClausePick`.
- assert!(trait_ref.substs().types.iter().all(|&t| !ty::type_needs_infer(t)));
+ assert!(!trait_ref.substs().types.needs_infer());
- WhereClausePick((*trait_ref).clone(), index)
- }
- ProjectionCandidate(def_id, index) => {
- TraitPick(def_id, index)
+ WhereClausePick(trait_ref.clone())
}
},
autoderefs: 0,
fn to_source(&self) -> CandidateSource {
match self.kind {
- InherentImplCandidate(def_id, _, _) => ImplSource(def_id),
- ObjectCandidate(def_id, _, _) => TraitSource(def_id),
- ExtensionImplCandidate(def_id, _, _, _, _) => ImplSource(def_id),
- ClosureCandidate(trait_def_id, _) => TraitSource(trait_def_id),
- WhereClauseCandidate(ref trait_ref, _) => TraitSource(trait_ref.def_id()),
- ProjectionCandidate(trait_def_id, _) => TraitSource(trait_def_id),
- }
- }
-
- fn to_trait_data(&self) -> Option<(ast::DefId, ItemIndex)> {
- match self.kind {
- InherentImplCandidate(..) => {
- None
- }
- ObjectCandidate(trait_def_id, item_num, _) => {
- Some((trait_def_id, item_num))
- }
- ClosureCandidate(trait_def_id, item_num) => {
- Some((trait_def_id, item_num))
- }
- ExtensionImplCandidate(_, ref trait_ref, _, item_num, _) => {
- Some((trait_ref.def_id, item_num))
- }
- WhereClauseCandidate(ref trait_ref, item_num) => {
- Some((trait_ref.def_id(), item_num))
- }
- ProjectionCandidate(trait_def_id, item_num) => {
- Some((trait_def_id, item_num))
+ InherentImplCandidate(_, _) => {
+ ImplSource(self.item.container().id())
}
+ ExtensionImplCandidate(def_id, _, _) => ImplSource(def_id),
+ ObjectCandidate |
+ TraitCandidate |
+ WhereClauseCandidate(_) => TraitSource(self.item.container().id()),
}
}
}
use astconv::AstConv;
use check::{self, FnCtxt};
-use middle::ty::{self, Ty, ToPolyTraitRef, AsPredicate};
+use middle::ty::{self, Ty, ToPolyTraitRef, ToPredicate, HasTypeFlags};
use middle::def;
use middle::lang_items::FnOnceTraitLangItem;
use middle::subst::Substs;
error: MethodError<'tcx>)
{
// avoid suggestions when we don't know what's going on.
- if ty::type_is_error(rcvr_ty) {
+ if rcvr_ty.references_error() {
return
}
// If the item has the name of a field, give a help note
if let (&ty::TyStruct(did, substs), Some(expr)) = (&rcvr_ty.sty, rcvr_expr) {
- let fields = ty::lookup_struct_fields(cx, did);
+ let fields = cx.lookup_struct_fields(did);
if let Some(field) = fields.iter().find(|f| f.name == item_name) {
let expr_string = match cx.sess.codemap().span_to_snippet(expr.span) {
};
// Determine if the field can be used as a function in some way
- let field_ty = ty::lookup_field_type(cx, did, field.id, substs);
+ let field_ty = cx.lookup_field_type(did, field.id, substs);
if let Ok(fn_once_trait_did) = cx.lang_items.require(FnOnceTraitLangItem) {
let infcx = fcx.infcx();
infcx.probe(|_| {
let poly_trait_ref = trait_ref.to_poly_trait_ref();
let obligation = Obligation::misc(span,
fcx.body_id,
- poly_trait_ref.as_predicate());
- let mut selcx = SelectionContext::new(infcx, fcx);
+ poly_trait_ref.to_predicate());
+ let mut selcx = SelectionContext::new(infcx);
if selcx.evaluate_obligation(&obligation) {
span_stored_function();
p.self_ty(),
p))
.collect::<Vec<_>>()
- .connect(", ");
+ .join(", ");
cx.sess.fileline_note(
span,
&format!("the method `{}` exists but the \
invoked on this closure as we have not yet inferred what \
kind of closure it is",
item_name,
- ty::item_path_str(fcx.tcx(), trait_def_id));
+ fcx.tcx().item_path_str(trait_def_id));
let msg = if let Some(callee) = rcvr_expr {
format!("{}; use overloaded call notation instead (e.g., `{}()`)",
msg, pprust::expr_to_string(callee))
let impl_ty = check::impl_self_ty(fcx, span, impl_did).ty;
- let insertion = match ty::impl_trait_ref(fcx.tcx(), impl_did) {
+ let insertion = match fcx.tcx().impl_trait_ref(impl_did) {
None => format!(""),
- Some(trait_ref) => format!(" of the trait `{}`",
- ty::item_path_str(fcx.tcx(),
- trait_ref.def_id)),
+ Some(trait_ref) => {
+ format!(" of the trait `{}`",
+ fcx.tcx().item_path_str(trait_ref.def_id))
+ }
};
span_note!(fcx.sess(), item_span,
impl_ty);
}
CandidateSource::TraitSource(trait_did) => {
- let (_, item) = trait_item(fcx.tcx(), trait_did, item_name).unwrap();
+ let item = trait_item(fcx.tcx(), trait_did, item_name).unwrap();
let item_span = fcx.tcx().map.def_id_span(item.def_id(), span);
span_note!(fcx.sess(), item_span,
"candidate #{} is defined in the trait `{}`",
idx + 1,
- ty::item_path_str(fcx.tcx(), trait_did));
+ fcx.tcx().item_path_str(trait_did));
}
}
}
fcx.sess().fileline_help(span,
&*format!("candidate #{}: use `{}`",
i + 1,
- ty::item_path_str(fcx.tcx(), *trait_did)))
+ fcx.tcx().item_path_str(*trait_did)))
}
return
fcx.sess().fileline_help(span,
&*format!("candidate #{}: `{}`",
i + 1,
- ty::item_path_str(fcx.tcx(), trait_info.def_id)))
+ fcx.tcx().item_path_str(trait_info.def_id)))
}
}
}
use middle::astconv_util::{check_path_args, NO_TPS, NO_REGIONS};
use middle::def;
use middle::infer;
-use middle::mem_categorization as mc;
-use middle::mem_categorization::McResult;
+use middle::infer::type_variable;
use middle::pat_util::{self, pat_id_map};
use middle::privacy::{AllPublic, LastMod};
use middle::region::{self, CodeExtent};
use middle::traits::{self, report_fulfillment_errors};
use middle::ty::{FnSig, GenericPredicates, TypeScheme};
use middle::ty::{Disr, ParamTy, ParameterEnvironment};
-use middle::ty::{self, HasProjectionTypes, RegionEscape, ToPolyTraitRef, Ty};
-use middle::ty::liberate_late_bound_regions;
-use middle::ty::{MethodCall, MethodCallee, MethodMap};
+use middle::ty::{self, HasTypeFlags, RegionEscape, ToPolyTraitRef, Ty};
+use middle::ty::{MethodCall, MethodCallee};
use middle::ty_fold::{TypeFolder, TypeFoldable};
-use rscope::RegionScope;
+use require_c_abi_if_variadic;
+use rscope::{ElisionFailureInfo, RegionScope};
use session::Session;
use {CrateCtxt, lookup_full_def, require_same_types};
use TypeAndSubsts;
use util::lev_distance::lev_distance;
use std::cell::{Cell, Ref, RefCell};
+use std::collections::HashSet;
use std::mem::replace;
-use std::iter::repeat;
use std::slice;
use syntax::{self, abi, attr};
use syntax::attr::AttrMetaMethods;
use syntax::ast::{self, DefId, Visibility};
use syntax::ast_util::{self, local_def};
use syntax::codemap::{self, Span};
-use syntax::feature_gate;
+use syntax::feature_gate::emit_feature_err;
use syntax::owned_slice::OwnedSlice;
-use syntax::parse::token;
+use syntax::parse::token::{self, InternedString};
use syntax::print::pprust;
use syntax::ptr::P;
use syntax::visit::{self, Visitor};
pub struct Inherited<'a, 'tcx: 'a> {
infcx: infer::InferCtxt<'a, 'tcx>,
locals: RefCell<NodeMap<Ty<'tcx>>>,
- param_env: ty::ParameterEnvironment<'a, 'tcx>,
- // Temporary tables:
- node_types: RefCell<NodeMap<Ty<'tcx>>>,
- item_substs: RefCell<NodeMap<ty::ItemSubsts<'tcx>>>,
- adjustments: RefCell<NodeMap<ty::AutoAdjustment<'tcx>>>,
- method_map: MethodMap<'tcx>,
- upvar_capture_map: RefCell<ty::UpvarCaptureMap>,
- closure_tys: RefCell<DefIdMap<ty::ClosureTy<'tcx>>>,
- closure_kinds: RefCell<DefIdMap<ty::ClosureKind>>,
+ tables: &'a RefCell<ty::Tables<'tcx>>,
// A mapping from each fn's id to its signature, with all bound
// regions replaced with free ones. Unlike the other tables, this
// one is never copied into the tcx: it is only used by regionck.
fn_sig_map: RefCell<NodeMap<Vec<Ty<'tcx>>>>,
- // Tracks trait obligations incurred during this function body.
- fulfillment_cx: RefCell<traits::FulfillmentContext<'tcx>>,
-
// When we process a call like `c()` where `c` is a closure type,
// we may not have decided yet whether `c` is a `Fn`, `FnMut`, or
// `FnOnce` closure. In that case, we defer full resolution of the
match *self {
ExpectHasType(ety) => {
let ety = fcx.infcx().shallow_resolve(ety);
- if !ty::type_is_ty_var(ety) {
+ if !ety.is_ty_var() {
ExpectHasType(ety)
} else {
NoExpectation
pub struct UnsafetyState {
pub def: ast::NodeId,
pub unsafety: ast::Unsafety,
+ pub unsafe_push_count: u32,
from_fn: bool
}
impl UnsafetyState {
pub fn function(unsafety: ast::Unsafety, def: ast::NodeId) -> UnsafetyState {
- UnsafetyState { def: def, unsafety: unsafety, from_fn: true }
+ UnsafetyState { def: def, unsafety: unsafety, unsafe_push_count: 0, from_fn: true }
}
pub fn recurse(&mut self, blk: &ast::Block) -> UnsafetyState {
ast::Unsafety::Unsafe if self.from_fn => *self,
unsafety => {
- let (unsafety, def) = match blk.rules {
- ast::UnsafeBlock(..) => (ast::Unsafety::Unsafe, blk.id),
- ast::DefaultBlock => (unsafety, self.def),
+ let (unsafety, def, count) = match blk.rules {
+ ast::PushUnsafeBlock(..) =>
+ (unsafety, blk.id, self.unsafe_push_count.checked_add(1).unwrap()),
+ ast::PopUnsafeBlock(..) =>
+ (unsafety, blk.id, self.unsafe_push_count.checked_sub(1).unwrap()),
+ ast::UnsafeBlock(..) =>
+ (ast::Unsafety::Unsafe, blk.id, self.unsafe_push_count),
+ ast::DefaultBlock =>
+ (unsafety, self.def, self.unsafe_push_count),
};
UnsafetyState{ def: def,
- unsafety: unsafety,
- from_fn: false }
+ unsafety: unsafety,
+ unsafe_push_count: count,
+ from_fn: false }
}
}
}
ccx: &'a CrateCtxt<'a, 'tcx>,
}
-impl<'a, 'tcx> mc::Typer<'tcx> for FnCtxt<'a, 'tcx> {
- fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
- let ty = self.node_ty(id);
- self.resolve_type_vars_or_error(&ty)
- }
- fn expr_ty_adjusted(&self, expr: &ast::Expr) -> McResult<Ty<'tcx>> {
- let ty = self.adjust_expr_ty(expr, self.inh.adjustments.borrow().get(&expr.id));
- self.resolve_type_vars_or_error(&ty)
- }
- fn type_moves_by_default(&self, span: Span, ty: Ty<'tcx>) -> bool {
- let ty = self.infcx().resolve_type_vars_if_possible(&ty);
- !traits::type_known_to_meet_builtin_bound(self.infcx(), self, ty, ty::BoundCopy, span)
- }
- fn node_method_ty(&self, method_call: ty::MethodCall)
- -> Option<Ty<'tcx>> {
- self.inh.method_map.borrow()
- .get(&method_call)
- .map(|method| method.ty)
- .map(|ty| self.infcx().resolve_type_vars_if_possible(&ty))
- }
- fn node_method_origin(&self, method_call: ty::MethodCall)
- -> Option<ty::MethodOrigin<'tcx>>
- {
- self.inh.method_map.borrow()
- .get(&method_call)
- .map(|method| method.origin.clone())
- }
- fn adjustments(&self) -> &RefCell<NodeMap<ty::AutoAdjustment<'tcx>>> {
- &self.inh.adjustments
- }
- fn is_method_call(&self, id: ast::NodeId) -> bool {
- self.inh.method_map.borrow().contains_key(&ty::MethodCall::expr(id))
- }
- fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option<CodeExtent> {
- self.param_env().temporary_scope(rvalue_id)
- }
- fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
- self.inh.upvar_capture_map.borrow().get(&upvar_id).cloned()
- }
-}
-
-impl<'a, 'tcx> ty::ClosureTyper<'tcx> for FnCtxt<'a, 'tcx> {
- fn param_env<'b>(&'b self) -> &'b ty::ParameterEnvironment<'b,'tcx> {
- &self.inh.param_env
- }
-
- fn closure_kind(&self,
- def_id: ast::DefId)
- -> Option<ty::ClosureKind>
- {
- self.inh.closure_kinds.borrow().get(&def_id).cloned()
- }
-
- fn closure_type(&self,
- def_id: ast::DefId,
- substs: &subst::Substs<'tcx>)
- -> ty::ClosureTy<'tcx>
- {
- self.inh.closure_tys.borrow().get(&def_id).unwrap().subst(self.tcx(), substs)
- }
-
- fn closure_upvars(&self,
- def_id: ast::DefId,
- substs: &Substs<'tcx>)
- -> Option<Vec<ty::ClosureUpvar<'tcx>>>
- {
- ty::closure_upvars(self, def_id, substs)
- }
-}
-
impl<'a, 'tcx> Inherited<'a, 'tcx> {
fn new(tcx: &'a ty::ctxt<'tcx>,
+ tables: &'a RefCell<ty::Tables<'tcx>>,
param_env: ty::ParameterEnvironment<'a, 'tcx>)
-> Inherited<'a, 'tcx> {
+
Inherited {
- infcx: infer::new_infer_ctxt(tcx),
+ infcx: infer::new_infer_ctxt(tcx, tables, Some(param_env), true),
locals: RefCell::new(NodeMap()),
- param_env: param_env,
- node_types: RefCell::new(NodeMap()),
- item_substs: RefCell::new(NodeMap()),
- adjustments: RefCell::new(NodeMap()),
- method_map: RefCell::new(FnvHashMap()),
- upvar_capture_map: RefCell::new(FnvHashMap()),
- closure_tys: RefCell::new(DefIdMap()),
- closure_kinds: RefCell::new(DefIdMap()),
+ tables: tables,
fn_sig_map: RefCell::new(NodeMap()),
- fulfillment_cx: RefCell::new(traits::FulfillmentContext::new(true)),
deferred_call_resolutions: RefCell::new(DefIdMap()),
deferred_cast_checks: RefCell::new(Vec::new()),
}
}
fn normalize_associated_types_in<T>(&self,
- typer: &ty::ClosureTyper<'tcx>,
span: Span,
body_id: ast::NodeId,
value: &T)
-> T
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
- let mut fulfillment_cx = self.fulfillment_cx.borrow_mut();
+ let mut fulfillment_cx = self.infcx.fulfillment_cx.borrow_mut();
assoc::normalize_associated_types_in(&self.infcx,
- typer,
- &mut *fulfillment_cx, span,
+ &mut fulfillment_cx,
+ span,
body_id,
value)
}
}
}
-fn static_inherited_fields<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>)
+fn static_inherited_fields<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
+ tables: &'a RefCell<ty::Tables<'tcx>>)
-> Inherited<'a, 'tcx> {
// It's kind of a kludge to manufacture a fake function context
// and statement context, but we might as well do write the code only once
- let param_env = ty::empty_parameter_environment(ccx.tcx);
- Inherited::new(ccx.tcx, param_env)
+ let param_env = ccx.tcx.empty_parameter_environment();
+ Inherited::new(ccx.tcx, &tables, param_env)
}
struct CheckItemTypesVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> }
{
match raw_fty.sty {
ty::TyBareFn(_, ref fn_ty) => {
- let inh = Inherited::new(ccx.tcx, param_env);
+ let tables = RefCell::new(ty::Tables::empty());
+ let inh = Inherited::new(ccx.tcx, &tables, param_env);
// Compute the fty from point of view of inside fn.
let fn_sig =
- fn_ty.sig.subst(ccx.tcx, &inh.param_env.free_substs);
+ fn_ty.sig.subst(ccx.tcx, &inh.infcx.parameter_environment.free_substs);
let fn_sig =
- liberate_late_bound_regions(ccx.tcx,
- region::DestructionScopeData::new(body.id),
- &fn_sig);
+ ccx.tcx.liberate_late_bound_regions(region::DestructionScopeData::new(body.id),
+ &fn_sig);
let fn_sig =
- inh.normalize_associated_types_in(&inh.param_env, body.span, body.id, &fn_sig);
+ inh.normalize_associated_types_in(body.span,
+ body.id,
+ &fn_sig);
let fcx = check_fn(ccx, fn_ty.unsafety, fn_id, &fn_sig,
decl, fn_id, body, &inh);
traits::VariableType(p.id));
debug!("Pattern binding {} is assigned to {} with type {:?}",
- token::get_ident(path1.node),
+ path1.node,
self.fcx.infcx().ty_to_string(
self.fcx.inh.locals.borrow().get(&p.id).unwrap().clone()),
var_ty);
check_representable(tcx, span, id, "struct");
check_instantiable(tcx, span, id);
- if ty::lookup_simd(tcx, local_def(id)) {
+ if tcx.lookup_simd(local_def(id)) {
check_simd(tcx, span, id);
}
}
pub fn check_item_type<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx ast::Item) {
debug!("check_item_type(it.id={}, it.ident={})",
it.id,
- ty::item_path_str(ccx.tcx, local_def(it.id)));
+ ccx.tcx.item_path_str(local_def(it.id)));
let _indenter = indenter();
match it.node {
// Consts can play a role in type-checking, so they are included here.
}
ast::ItemFn(..) => {} // entirely within check_item_body
ast::ItemImpl(_, _, _, _, _, ref impl_items) => {
- debug!("ItemImpl {} with id {}", token::get_ident(it.ident), it.id);
- match ty::impl_trait_ref(ccx.tcx, local_def(it.id)) {
+ debug!("ItemImpl {} with id {}", it.ident, it.id);
+ match ccx.tcx.impl_trait_ref(local_def(it.id)) {
Some(impl_trait_ref) => {
check_impl_items_against_trait(ccx,
it.span,
check_struct(ccx, it.id, it.span);
}
ast::ItemTy(ref t, ref generics) => {
- let pty_ty = ty::node_id_to_type(ccx.tcx, it.id);
+ let pty_ty = ccx.tcx.node_id_to_type(it.id);
check_bounds_are_used(ccx, t.span, &generics.ty_params, pty_ty);
}
ast::ItemForeignMod(ref m) => {
}
} else {
for item in &m.items {
- let pty = ty::lookup_item_type(ccx.tcx, local_def(item.id));
+ let pty = ccx.tcx.lookup_item_type(local_def(item.id));
if !pty.generics.types.is_empty() {
span_err!(ccx.tcx.sess, item.span, E0044,
"foreign items may not have type parameters");
}
if let ast::ForeignItemFn(ref fn_decl, _) = item.node {
- if fn_decl.variadic && m.abi != abi::C {
- span_err!(ccx.tcx.sess, item.span, E0045,
- "variadic function must have C calling convention");
- }
+ require_c_abi_if_variadic(ccx.tcx, fn_decl, m.abi, item.span);
}
}
}
pub fn check_item_body<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx ast::Item) {
debug!("check_item_body(it.id={}, it.ident={})",
it.id,
- ty::item_path_str(ccx.tcx, local_def(it.id)));
+ ccx.tcx.item_path_str(local_def(it.id)));
let _indenter = indenter();
match it.node {
ast::ItemFn(ref decl, _, _, _, _, ref body) => {
- let fn_pty = ty::lookup_item_type(ccx.tcx, ast_util::local_def(it.id));
+ let fn_pty = ccx.tcx.lookup_item_type(ast_util::local_def(it.id));
let param_env = ParameterEnvironment::for_item(ccx.tcx, it.id);
check_bare_fn(ccx, &**decl, &**body, it.id, it.span, fn_pty.ty, param_env);
}
ast::ItemImpl(_, _, _, _, _, ref impl_items) => {
- debug!("ItemImpl {} with id {}", token::get_ident(it.ident), it.id);
+ debug!("ItemImpl {} with id {}", it.ident, it.id);
- let impl_pty = ty::lookup_item_type(ccx.tcx, ast_util::local_def(it.id));
+ let impl_pty = ccx.tcx.lookup_item_type(ast_util::local_def(it.id));
for impl_item in impl_items {
match impl_item.node {
}
}
ast::ItemTrait(_, _, _, ref trait_items) => {
- let trait_def = ty::lookup_trait_def(ccx.tcx, local_def(it.id));
+ let trait_def = ccx.tcx.lookup_trait_def(local_def(it.id));
for trait_item in trait_items {
match trait_item.node {
ast::ConstTraitItem(_, Some(ref expr)) => {
Position::ArgumentNamed(s) if s == "Self" => (),
// So is `{A}` if A is a type parameter
Position::ArgumentNamed(s) => match types.iter().find(|t| {
- t.ident.as_str() == s
+ t.ident.name == s
}) {
Some(_) => (),
None => {
span_err!(ccx.tcx.sess, attr.span, E0230,
"there is no type parameter \
{} on trait {}",
- s, item.ident.as_str());
+ s, item.ident);
}
},
// `{:1}` and `{}` are not to be used
item_generics, id);
let param_env = ParameterEnvironment::for_item(ccx.tcx, id);
- let fty = ty::node_id_to_type(ccx.tcx, id);
+ let fty = ccx.tcx.node_id_to_type(id);
debug!("check_method_body: fty={:?}", fty);
check_bare_fn(ccx, &sig.decl, body, id, span, fty, param_env);
impl_items: &[P<ast::ImplItem>]) {
// Locate trait methods
let tcx = ccx.tcx;
- let trait_items = ty::trait_items(tcx, impl_trait_ref.def_id);
+ let trait_items = tcx.trait_items(impl_trait_ref.def_id);
+ let mut overridden_associated_type = None;
// Check existing impl methods to see if they are both present in trait
// and compatible with trait signature
for impl_item in impl_items {
+ let ty_impl_item = ccx.tcx.impl_or_trait_item(local_def(impl_item.id));
+ let ty_trait_item = trait_items.iter()
+ .find(|ac| ac.name() == ty_impl_item.name())
+ .unwrap_or_else(|| {
+ // This is checked by resolve
+ tcx.sess.span_bug(impl_item.span,
+ &format!("impl-item `{}` is not a member of `{:?}`",
+ ty_impl_item.name(),
+ impl_trait_ref));
+ });
match impl_item.node {
ast::ConstImplItem(..) => {
- let impl_const_def_id = local_def(impl_item.id);
- let impl_const_ty = ty::impl_or_trait_item(ccx.tcx,
- impl_const_def_id);
+ let impl_const = match ty_impl_item {
+ ty::ConstTraitItem(ref cti) => cti,
+ _ => tcx.sess.span_bug(impl_item.span, "non-const impl-item for const")
+ };
// Find associated const definition.
- let opt_associated_const =
- trait_items.iter()
- .find(|ac| ac.name() == impl_const_ty.name());
- match opt_associated_const {
- Some(associated_const) => {
- match (associated_const, &impl_const_ty) {
- (&ty::ConstTraitItem(ref const_trait),
- &ty::ConstTraitItem(ref const_impl)) => {
- compare_const_impl(ccx.tcx,
- &const_impl,
- impl_item.span,
- &const_trait,
- &*impl_trait_ref);
- }
- _ => {
- span_err!(tcx.sess, impl_item.span, E0323,
- "item `{}` is an associated const, \
- which doesn't match its trait `{:?}`",
- token::get_name(impl_const_ty.name()),
- impl_trait_ref)
- }
- }
- }
- None => {
- // This is `span_bug` as it should have already been
- // caught in resolve.
- tcx.sess.span_bug(
- impl_item.span,
- &format!(
- "associated const `{}` is not a member of \
- trait `{:?}`",
- token::get_name(impl_const_ty.name()),
- impl_trait_ref));
- }
+ if let &ty::ConstTraitItem(ref trait_const) = ty_trait_item {
+ compare_const_impl(ccx.tcx,
+ &impl_const,
+ impl_item.span,
+ trait_const,
+ &*impl_trait_ref);
+ } else {
+ span_err!(tcx.sess, impl_item.span, E0323,
+ "item `{}` is an associated const, \
+ which doesn't match its trait `{:?}`",
+ impl_const.name,
+ impl_trait_ref)
}
}
ast::MethodImplItem(ref sig, ref body) => {
check_trait_fn_not_const(ccx, impl_item.span, sig.constness);
- let impl_method_def_id = local_def(impl_item.id);
- let impl_item_ty = ty::impl_or_trait_item(ccx.tcx,
- impl_method_def_id);
-
- // If this is an impl of a trait method, find the
- // corresponding method definition in the trait.
- let opt_trait_method_ty =
- trait_items.iter()
- .find(|ti| ti.name() == impl_item_ty.name());
- match opt_trait_method_ty {
- Some(trait_method_ty) => {
- match (trait_method_ty, &impl_item_ty) {
- (&ty::MethodTraitItem(ref trait_method_ty),
- &ty::MethodTraitItem(ref impl_method_ty)) => {
- compare_impl_method(ccx.tcx,
- &**impl_method_ty,
- impl_item.span,
- body.id,
- &**trait_method_ty,
- &*impl_trait_ref);
- }
- _ => {
- span_err!(tcx.sess, impl_item.span, E0324,
- "item `{}` is an associated method, \
- which doesn't match its trait `{:?}`",
- token::get_name(impl_item_ty.name()),
- impl_trait_ref)
- }
- }
- }
- None => {
- // This is span_bug as it should have already been
- // caught in resolve.
- tcx.sess.span_bug(
- impl_item.span,
- &format!("method `{}` is not a member of trait `{:?}`",
- token::get_name(impl_item_ty.name()),
- impl_trait_ref));
- }
+ let impl_method = match ty_impl_item {
+ ty::MethodTraitItem(ref mti) => mti,
+ _ => tcx.sess.span_bug(impl_item.span, "non-method impl-item for method")
+ };
+
+ if let &ty::MethodTraitItem(ref trait_method) = ty_trait_item {
+ compare_impl_method(ccx.tcx,
+ &impl_method,
+ impl_item.span,
+ body.id,
+ &trait_method,
+ &impl_trait_ref);
+ } else {
+ span_err!(tcx.sess, impl_item.span, E0324,
+ "item `{}` is an associated method, \
+ which doesn't match its trait `{:?}`",
+ impl_method.name,
+ impl_trait_ref)
}
}
ast::TypeImplItem(_) => {
- let typedef_def_id = local_def(impl_item.id);
- let typedef_ty = ty::impl_or_trait_item(ccx.tcx,
- typedef_def_id);
-
- // If this is an impl of an associated type, find the
- // corresponding type definition in the trait.
- let opt_associated_type =
- trait_items.iter()
- .find(|ti| ti.name() == typedef_ty.name());
- match opt_associated_type {
- Some(associated_type) => {
- match (associated_type, &typedef_ty) {
- (&ty::TypeTraitItem(_), &ty::TypeTraitItem(_)) => {}
- _ => {
- span_err!(tcx.sess, impl_item.span, E0325,
- "item `{}` is an associated type, \
- which doesn't match its trait `{:?}`",
- token::get_name(typedef_ty.name()),
- impl_trait_ref)
- }
- }
- }
- None => {
- // This is `span_bug` as it should have already been
- // caught in resolve.
- tcx.sess.span_bug(
- impl_item.span,
- &format!(
- "associated type `{}` is not a member of \
- trait `{:?}`",
- token::get_name(typedef_ty.name()),
- impl_trait_ref));
+ let impl_type = match ty_impl_item {
+ ty::TypeTraitItem(ref tti) => tti,
+ _ => tcx.sess.span_bug(impl_item.span, "non-type impl-item for type")
+ };
+
+ if let &ty::TypeTraitItem(ref at) = ty_trait_item {
+ if let Some(_) = at.ty {
+ overridden_associated_type = Some(impl_item);
}
+ } else {
+ span_err!(tcx.sess, impl_item.span, E0325,
+ "item `{}` is an associated type, \
+ which doesn't match its trait `{:?}`",
+ impl_type.name,
+ impl_trait_ref)
}
}
ast::MacImplItem(_) => tcx.sess.span_bug(impl_item.span,
}
// Check for missing items from trait
- let provided_methods = ty::provided_trait_methods(tcx, impl_trait_ref.def_id);
- let associated_consts = ty::associated_consts(tcx, impl_trait_ref.def_id);
+ let provided_methods = tcx.provided_trait_methods(impl_trait_ref.def_id);
+ let associated_consts = tcx.associated_consts(impl_trait_ref.def_id);
let mut missing_items = Vec::new();
+ let mut invalidated_items = Vec::new();
+ let associated_type_overridden = overridden_associated_type.is_some();
for trait_item in trait_items.iter() {
match *trait_item {
ty::ConstTraitItem(ref associated_const) => {
let is_provided =
associated_consts.iter().any(|ac| ac.default.is_some() &&
ac.name == associated_const.name);
- if !is_implemented && !is_provided {
- missing_items.push(format!("`{}`",
- token::get_name(associated_const.name)));
+ if !is_implemented {
+ if !is_provided {
+ missing_items.push(associated_const.name);
+ } else if associated_type_overridden {
+ invalidated_items.push(associated_const.name);
+ }
}
}
ty::MethodTraitItem(ref trait_method) => {
});
let is_provided =
provided_methods.iter().any(|m| m.name == trait_method.name);
- if !is_implemented && !is_provided {
- missing_items.push(format!("`{}`", token::get_name(trait_method.name)));
+ if !is_implemented {
+ if !is_provided {
+ missing_items.push(trait_method.name);
+ } else if associated_type_overridden {
+ invalidated_items.push(trait_method.name);
+ }
}
}
ty::TypeTraitItem(ref associated_type) => {
}
});
let is_provided = associated_type.ty.is_some();
- if !is_implemented && !is_provided {
- missing_items.push(format!("`{}`", token::get_name(associated_type.name)));
+ if !is_implemented {
+ if !is_provided {
+ missing_items.push(associated_type.name);
+ } else if associated_type_overridden {
+ invalidated_items.push(associated_type.name);
+ }
}
}
}
if !missing_items.is_empty() {
span_err!(tcx.sess, impl_span, E0046,
- "not all trait items implemented, missing: {}",
- missing_items.connect(", "));
+ "not all trait items implemented, missing: `{}`",
+ missing_items.iter()
+ .map(|name| name.to_string())
+ .collect::<Vec<_>>().join("`, `"))
+ }
+
+ if !invalidated_items.is_empty() {
+ let invalidator = overridden_associated_type.unwrap();
+ span_err!(tcx.sess, invalidator.span, E0399,
+ "the following trait items need to be reimplemented \
+ as `{}` was overridden: `{}`",
+ invalidator.ident,
+ invalidated_items.iter()
+ .map(|name| name.to_string())
+ .collect::<Vec<_>>().join("`, `"))
}
}
format!("cast to unsized type: `{}` as `{}`", actual, tstr)
}, t_expr, None);
match t_expr.sty {
- ty::TyRef(_, ty::mt { mutbl: mt, .. }) => {
+ ty::TyRef(_, ty::TypeAndMut { mutbl: mt, .. }) => {
let mtstr = match mt {
ast::MutMutable => "mut ",
ast::MutImmutable => ""
};
- if ty::type_is_trait(t_cast) {
+ if t_cast.is_trait() {
match fcx.tcx().sess.codemap().span_to_snippet(t_span) {
Ok(s) => {
fcx.tcx().sess.span_suggestion(t_span,
fn get_item_type_scheme(&self, _: Span, id: ast::DefId)
-> Result<ty::TypeScheme<'tcx>, ErrorReported>
{
- Ok(ty::lookup_item_type(self.tcx(), id))
+ Ok(self.tcx().lookup_item_type(id))
}
fn get_trait_def(&self, _: Span, id: ast::DefId)
-> Result<&'tcx ty::TraitDef<'tcx>, ErrorReported>
{
- Ok(ty::lookup_trait_def(self.tcx(), id))
+ Ok(self.tcx().lookup_trait_def(id))
}
fn ensure_super_predicates(&self, _: Span, _: ast::DefId) -> Result<(), ErrorReported> {
}
fn get_free_substs(&self) -> Option<&Substs<'tcx>> {
- Some(&self.inh.param_env.free_substs)
+ Some(&self.inh.infcx.parameter_environment.free_substs)
}
fn get_type_parameter_bounds(&self,
-> Result<Vec<ty::PolyTraitRef<'tcx>>, ErrorReported>
{
let def = self.tcx().type_parameter_def(node_id);
- let r = self.inh.param_env.caller_bounds
+ let r = self.inh.infcx.parameter_environment
+ .caller_bounds
.iter()
.filter_map(|predicate| {
match *predicate {
assoc_name: ast::Name)
-> bool
{
- let trait_def = ty::lookup_trait_def(self.ccx.tcx, trait_def_id);
+ let trait_def = self.ccx.tcx.lookup_trait_def(trait_def_id);
trait_def.associated_type_names.contains(&assoc_name)
}
- fn ty_infer(&self, _span: Span) -> Ty<'tcx> {
- self.infcx().next_ty_var()
+ fn ty_infer(&self,
+ ty_param_def: Option<ty::TypeParameterDef<'tcx>>,
+ substs: Option<&mut subst::Substs<'tcx>>,
+ space: Option<subst::ParamSpace>,
+ span: Span) -> Ty<'tcx> {
+ // Grab the default doing subsitution
+ let default = ty_param_def.and_then(|def| {
+ def.default.map(|ty| type_variable::Default {
+ ty: ty.subst_spanned(self.tcx(), substs.as_ref().unwrap(), Some(span)),
+ origin_span: span,
+ def_id: def.default_def_id
+ })
+ });
+
+ let ty_var = self.infcx().next_ty_var_with_default(default);
+
+ // Finally we add the type variable to the substs
+ match substs {
+ None => ty_var,
+ Some(substs) => { substs.types.push(space.unwrap(), ty_var); ty_var }
+ }
}
fn projected_ty_from_poly_trait_ref(&self,
}
pub fn param_env(&self) -> &ty::ParameterEnvironment<'a,'tcx> {
- &self.inh.param_env
+ &self.inh.infcx.parameter_environment
}
pub fn sess(&self) -> &Session {
fn resolve_type_vars_if_possible(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
debug!("resolve_type_vars_if_possible(ty={:?})", ty);
- // No ty::infer()? Nothing needs doing.
- if !ty::type_has_ty_infer(ty) {
+ // No TyInfer()? Nothing needs doing.
+ if !ty.has_infer_types() {
debug!("resolve_type_vars_if_possible: ty={:?}", ty);
return ty;
}
// If `ty` is a type variable, see whether we already know what it is.
ty = self.infcx().resolve_type_vars_if_possible(&ty);
- if !ty::type_has_ty_infer(ty) {
+ if !ty.has_infer_types() {
debug!("resolve_type_vars_if_possible: ty={:?}", ty);
return ty;
}
// If not, try resolving any new fcx obligations that have cropped up.
self.select_new_obligations();
ty = self.infcx().resolve_type_vars_if_possible(&ty);
- if !ty::type_has_ty_infer(ty) {
+ if !ty.has_infer_types() {
debug!("resolve_type_vars_if_possible: ty={:?}", ty);
return ty;
}
ty
}
- /// Resolves all type variables in `t` and then, if any were left
- /// unresolved, substitutes an error type. This is used after the
- /// main checking when doing a second pass before writeback. The
- /// justification is that writeback will produce an error for
- /// these unconstrained type variables.
- fn resolve_type_vars_or_error(&self, t: &Ty<'tcx>) -> mc::McResult<Ty<'tcx>> {
- let t = self.infcx().resolve_type_vars_if_possible(t);
- if ty::type_has_ty_infer(t) || ty::type_is_error(t) { Err(()) } else { Ok(t) }
- }
-
fn record_deferred_call_resolution(&self,
closure_def_id: ast::DefId,
r: DeferredCallResolutionHandler<'tcx>) {
}
}
- /// Apply "fallbacks" to some types
- /// ! gets replaced with (), unconstrained ints with i32, and unconstrained floats with f64.
- pub fn default_type_parameters(&self) {
- use middle::ty::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat, Neither};
- for (_, &mut ref ty) in &mut *self.inh.node_types.borrow_mut() {
- let resolved = self.infcx().resolve_type_vars_if_possible(ty);
- if self.infcx().type_var_diverges(resolved) {
- demand::eqtype(self, codemap::DUMMY_SP, *ty, ty::mk_nil(self.tcx()));
- } else {
- match self.infcx().type_is_unconstrained_numeric(resolved) {
- UnconstrainedInt => {
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.i32)
- },
- UnconstrainedFloat => {
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.f64)
- }
- Neither => { }
- }
- }
- }
- }
-
#[inline]
pub fn write_ty(&self, node_id: ast::NodeId, ty: Ty<'tcx>) {
debug!("write_ty({}, {:?}) in fcx {}",
node_id, ty, self.tag());
- self.inh.node_types.borrow_mut().insert(node_id, ty);
+ self.inh.tables.borrow_mut().node_types.insert(node_id, ty);
}
pub fn write_substs(&self, node_id: ast::NodeId, substs: ty::ItemSubsts<'tcx>) {
substs,
self.tag());
- self.inh.item_substs.borrow_mut().insert(node_id, substs);
+ self.inh.tables.borrow_mut().item_substs.insert(node_id, substs);
}
}
return;
}
- self.inh.adjustments.borrow_mut().insert(node_id, adj);
+ self.inh.tables.borrow_mut().adjustments.insert(node_id, adj);
}
/// Basically whenever we are converting from a type scheme into
substs: &Substs<'tcx>,
value: &T)
-> T
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
let value = value.subst(self.tcx(), substs);
let result = self.normalize_associated_types_in(span, &value);
fn normalize_associated_types_in<T>(&self, span: Span, value: &T) -> T
- where T : TypeFoldable<'tcx> + HasProjectionTypes
+ where T : TypeFoldable<'tcx> + HasTypeFlags
{
- self.inh.normalize_associated_types_in(self, span, self.body_id, value)
+ self.inh.normalize_associated_types_in(span, self.body_id, value)
}
fn normalize_associated_type(&self,
let cause = traits::ObligationCause::new(span,
self.body_id,
traits::ObligationCauseCode::MiscObligation);
- self.inh.fulfillment_cx
+ self.inh
+ .infcx
+ .fulfillment_cx
.borrow_mut()
.normalize_projection_type(self.infcx(),
- self,
ty::ProjectionTy {
trait_ref: trait_ref,
item_name: item_name,
-> TypeAndSubsts<'tcx>
{
let type_scheme =
- ty::lookup_item_type(self.tcx(), def_id);
+ self.tcx().lookup_item_type(def_id);
let type_predicates =
- ty::lookup_predicates(self.tcx(), def_id);
+ self.tcx().lookup_predicates(def_id);
let substs =
self.infcx().fresh_substs_for_generics(
span,
let tcx = self.tcx();
let ty::TypeScheme { generics, ty: decl_ty } =
- ty::lookup_item_type(tcx, did);
+ tcx.lookup_item_type(did);
let substs = astconv::ast_path_substs_for_ty(self, self,
path.span,
}
pub fn write_nil(&self, node_id: ast::NodeId) {
- self.write_ty(node_id, ty::mk_nil(self.tcx()));
+ self.write_ty(node_id, self.tcx().mk_nil());
}
pub fn write_error(&self, node_id: ast::NodeId) {
self.write_ty(node_id, self.tcx().types.err);
-> bool
{
traits::type_known_to_meet_builtin_bound(self.infcx(),
- self.param_env(),
ty,
ty::BoundSized,
span)
builtin_bound: ty::BuiltinBound,
cause: traits::ObligationCause<'tcx>)
{
- self.inh.fulfillment_cx.borrow_mut()
+ self.inh.infcx.fulfillment_cx.borrow_mut()
.register_builtin_bound(self.infcx(), ty, builtin_bound, cause);
}
{
debug!("register_predicate({:?})",
obligation);
- self.inh.fulfillment_cx
+ self.inh.infcx.fulfillment_cx
.borrow_mut()
.register_predicate_obligation(self.infcx(), obligation);
}
}
pub fn expr_ty(&self, ex: &ast::Expr) -> Ty<'tcx> {
- match self.inh.node_types.borrow().get(&ex.id) {
+ match self.inh.tables.borrow().node_types.get(&ex.id) {
Some(&t) => t,
None => {
self.tcx().sess.bug(&format!("no type for expr in fcx {}",
let raw_ty = self.expr_ty(expr);
let raw_ty = self.infcx().shallow_resolve(raw_ty);
let resolve_ty = |ty: Ty<'tcx>| self.infcx().resolve_type_vars_if_possible(&ty);
- ty::adjust_ty(self.tcx(),
- expr.span,
- expr.id,
- raw_ty,
- adjustment,
- |method_call| self.inh.method_map.borrow()
- .get(&method_call)
- .map(|method| resolve_ty(method.ty)))
+ raw_ty.adjust(self.tcx(), expr.span, expr.id, adjustment, |method_call| {
+ self.inh.tables.borrow().method_map.get(&method_call)
+ .map(|method| resolve_ty(method.ty))
+ })
}
pub fn node_ty(&self, id: ast::NodeId) -> Ty<'tcx> {
- match self.inh.node_types.borrow().get(&id) {
+ match self.inh.tables.borrow().node_types.get(&id) {
Some(&t) => t,
None if self.err_count_since_creation() != 0 => self.tcx().types.err,
None => {
}
pub fn item_substs(&self) -> Ref<NodeMap<ty::ItemSubsts<'tcx>>> {
- self.inh.item_substs.borrow()
+ // NOTE: @jroesch this is hack that appears to be fixed on nightly, will monitor if
+ // it changes when we upgrade the snapshot compiler
+ fn project_item_susbts<'a, 'tcx>(tables: &'a ty::Tables<'tcx>)
+ -> &'a NodeMap<ty::ItemSubsts<'tcx>> {
+ &tables.item_substs
+ }
+
+ Ref::map(self.inh.tables.borrow(), project_item_susbts)
}
pub fn opt_node_ty_substs<F>(&self,
f: F) where
F: FnOnce(&ty::ItemSubsts<'tcx>),
{
- match self.inh.item_substs.borrow().get(&id) {
+ match self.inh.tables.borrow().item_substs.get(&id) {
Some(s) => { f(s) }
None => { }
}
origin: infer::TypeOrigin,
sub: Ty<'tcx>,
sup: Ty<'tcx>)
- -> Result<(), ty::type_err<'tcx>> {
+ -> Result<(), ty::TypeError<'tcx>> {
infer::mk_subty(self.infcx(), a_is_expected, origin, sub, sup)
}
origin: infer::TypeOrigin,
sub: Ty<'tcx>,
sup: Ty<'tcx>)
- -> Result<(), ty::type_err<'tcx>> {
+ -> Result<(), ty::TypeError<'tcx>> {
infer::mk_eqty(self.infcx(), a_is_expected, origin, sub, sup)
}
sp: Span,
mk_msg: M,
actual_ty: Ty<'tcx>,
- err: Option<&ty::type_err<'tcx>>) where
+ err: Option<&ty::TypeError<'tcx>>) where
M: FnOnce(String) -> String,
{
self.infcx().type_error_message(sp, mk_msg, actual_ty, err);
sp: Span,
e: Ty<'tcx>,
a: Ty<'tcx>,
- err: &ty::type_err<'tcx>) {
+ err: &ty::TypeError<'tcx>) {
self.infcx().report_mismatched_types(sp, e, a, err)
}
region: ty::Region,
cause: traits::ObligationCause<'tcx>)
{
- let mut fulfillment_cx = self.inh.fulfillment_cx.borrow_mut();
+ let mut fulfillment_cx = self.inh.infcx.fulfillment_cx.borrow_mut();
fulfillment_cx.register_region_obligation(ty, region, cause);
}
pub fn lookup_field_ty(&self,
span: Span,
class_id: ast::DefId,
- items: &[ty::field_ty],
+ items: &[ty::FieldTy],
fieldname: ast::Name,
substs: &subst::Substs<'tcx>)
-> Option<Ty<'tcx>>
{
let o_field = items.iter().find(|f| f.name == fieldname);
- o_field.map(|f| ty::lookup_field_type(self.tcx(), class_id, f.id, substs))
+ o_field.map(|f| self.tcx().lookup_field_type(class_id, f.id, substs))
.map(|t| self.normalize_associated_types_in(span, &t))
}
pub fn lookup_tup_field_ty(&self,
span: Span,
class_id: ast::DefId,
- items: &[ty::field_ty],
+ items: &[ty::FieldTy],
idx: usize,
substs: &subst::Substs<'tcx>)
-> Option<Ty<'tcx>>
{
let o_field = if idx < items.len() { Some(&items[idx]) } else { None };
- o_field.map(|f| ty::lookup_field_type(self.tcx(), class_id, f.id, substs))
+ o_field.map(|f| self.tcx().lookup_field_type(class_id, f.id, substs))
.map(|t| self.normalize_associated_types_in(span, &t))
}
}
}
+ /// Apply "fallbacks" to some types
+ /// ! gets replaced with (), unconstrained ints with i32, and unconstrained floats with f64.
+ fn default_type_parameters(&self) {
+ use middle::ty::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat, Neither};
+ for ty in &self.infcx().unsolved_variables() {
+ let resolved = self.infcx().resolve_type_vars_if_possible(ty);
+ if self.infcx().type_var_diverges(resolved) {
+ demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil());
+ } else {
+ match self.infcx().type_is_unconstrained_numeric(resolved) {
+ UnconstrainedInt => {
+ demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.i32)
+ },
+ UnconstrainedFloat => {
+ demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.f64)
+ }
+ Neither => { }
+ }
+ }
+ }
+ }
+
fn select_all_obligations_and_apply_defaults(&self) {
- debug!("select_all_obligations_and_apply_defaults");
+ if self.tcx().sess.features.borrow().default_type_parameter_fallback {
+ self.new_select_all_obligations_and_apply_defaults();
+ } else {
+ self.old_select_all_obligations_and_apply_defaults();
+ }
+ }
+ // Implements old type inference fallback algorithm
+ fn old_select_all_obligations_and_apply_defaults(&self) {
self.select_obligations_where_possible();
self.default_type_parameters();
self.select_obligations_where_possible();
}
+ fn new_select_all_obligations_and_apply_defaults(&self) {
+ use middle::ty::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat, Neither};
+
+ // For the time being this errs on the side of being memory wasteful but provides better
+ // error reporting.
+ // let type_variables = self.infcx().type_variables.clone();
+
+ // There is a possibility that this algorithm will have to run an arbitrary number of times
+ // to terminate so we bound it by the compiler's recursion limit.
+ for _ in (0..self.tcx().sess.recursion_limit.get()) {
+ // First we try to solve all obligations, it is possible that the last iteration
+ // has made it possible to make more progress.
+ self.select_obligations_where_possible();
+
+ let mut conflicts = Vec::new();
+
+ // Collect all unsolved type, integral and floating point variables.
+ let unsolved_variables = self.inh.infcx.unsolved_variables();
+
+ // We must collect the defaults *before* we do any unification. Because we have
+ // directly attached defaults to the type variables any unification that occurs
+ // will erase defaults causing conflicting defaults to be completely ignored.
+ let default_map: FnvHashMap<_, _> =
+ unsolved_variables
+ .iter()
+ .filter_map(|t| self.infcx().default(t).map(|d| (t, d)))
+ .collect();
+
+ let mut unbound_tyvars = HashSet::new();
+
+ debug!("select_all_obligations_and_apply_defaults: defaults={:?}", default_map);
+
+ // We loop over the unsolved variables, resolving them and if they are
+ // and unconstrainted numberic type we add them to the set of unbound
+ // variables. We do this so we only apply literal fallback to type
+ // variables without defaults.
+ for ty in &unsolved_variables {
+ let resolved = self.infcx().resolve_type_vars_if_possible(ty);
+ if self.infcx().type_var_diverges(resolved) {
+ demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil());
+ } else {
+ match self.infcx().type_is_unconstrained_numeric(resolved) {
+ UnconstrainedInt | UnconstrainedFloat => {
+ unbound_tyvars.insert(resolved);
+ },
+ Neither => {}
+ }
+ }
+ }
+
+ // We now remove any numeric types that also have defaults, and instead insert
+ // the type variable with a defined fallback.
+ for ty in &unsolved_variables {
+ if let Some(_default) = default_map.get(ty) {
+ let resolved = self.infcx().resolve_type_vars_if_possible(ty);
+
+ debug!("select_all_obligations_and_apply_defaults: ty: {:?} with default: {:?}",
+ ty, _default);
+
+ match resolved.sty {
+ ty::TyInfer(ty::TyVar(_)) => {
+ unbound_tyvars.insert(ty);
+ }
+
+ ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) => {
+ unbound_tyvars.insert(ty);
+ if unbound_tyvars.contains(resolved) {
+ unbound_tyvars.remove(resolved);
+ }
+ }
+
+ _ => {}
+ }
+ }
+ }
+
+ // If there are no more fallbacks to apply at this point we have applied all possible
+ // defaults and type inference will procede as normal.
+ if unbound_tyvars.is_empty() {
+ break;
+ }
+
+ // Finally we go through each of the unbound type variables and unify them with
+ // the proper fallback, reporting a conflicting default error if any of the
+ // unifications fail. We know it must be a conflicting default because the
+ // variable would only be in `unbound_tyvars` and have a concrete value if
+ // it had been solved by previously applying a default.
+
+ // We wrap this in a transaction for error reporting, if we detect a conflict
+ // we will rollback the inference context to its prior state so we can probe
+ // for conflicts and correctly report them.
+
+
+ let _ = self.infcx().commit_if_ok(|_: &infer::CombinedSnapshot| {
+ for ty in &unbound_tyvars {
+ if self.infcx().type_var_diverges(ty) {
+ demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil());
+ } else {
+ match self.infcx().type_is_unconstrained_numeric(ty) {
+ UnconstrainedInt => {
+ demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.i32)
+ },
+ UnconstrainedFloat => {
+ demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.f64)
+ }
+ Neither => {
+ if let Some(default) = default_map.get(ty) {
+ let default = default.clone();
+ match infer::mk_eqty(self.infcx(), false,
+ infer::Misc(default.origin_span),
+ ty, default.ty) {
+ Ok(()) => {}
+ Err(_) => {
+ conflicts.push((*ty, default));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // If there are conflicts we rollback, otherwise commit
+ if conflicts.len() > 0 {
+ Err(())
+ } else {
+ Ok(())
+ }
+ });
+
+ if conflicts.len() > 0 {
+ // Loop through each conflicting default, figuring out the default that caused
+ // a unification failure and then report an error for each.
+ for (conflict, default) in conflicts {
+ let conflicting_default =
+ self.find_conflicting_default(&unbound_tyvars, &default_map, conflict)
+ .unwrap_or(type_variable::Default {
+ ty: self.infcx().next_ty_var(),
+ origin_span: codemap::DUMMY_SP,
+ def_id: local_def(0) // what do I put here?
+ });
+
+ // This is to ensure that we elimnate any non-determinism from the error
+ // reporting by fixing an order, it doesn't matter what order we choose
+ // just that it is consistent.
+ let (first_default, second_default) =
+ if default.def_id < conflicting_default.def_id {
+ (default, conflicting_default)
+ } else {
+ (conflicting_default, default)
+ };
+
+
+ self.infcx().report_conflicting_default_types(
+ first_default.origin_span,
+ first_default,
+ second_default)
+ }
+ }
+ }
+
+ self.select_obligations_where_possible();
+ }
+
+ // For use in error handling related to default type parameter fallback. We explicitly
+ // apply the default that caused conflict first to a local version of the type variable
+ // table then apply defaults until we find a conflict. That default must be the one
+ // that caused conflict earlier.
+ fn find_conflicting_default(&self,
+ unbound_vars: &HashSet<Ty<'tcx>>,
+ default_map: &FnvHashMap<&Ty<'tcx>, type_variable::Default<'tcx>>,
+ conflict: Ty<'tcx>)
+ -> Option<type_variable::Default<'tcx>> {
+ use middle::ty::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat, Neither};
+
+ // Ensure that we apply the conflicting default first
+ let mut unbound_tyvars = Vec::with_capacity(unbound_vars.len() + 1);
+ unbound_tyvars.push(conflict);
+ unbound_tyvars.extend(unbound_vars.iter());
+
+ let mut result = None;
+ // We run the same code as above applying defaults in order, this time when
+ // we find the conflict we just return it for error reporting above.
+
+ // We also run this inside snapshot that never commits so we can do error
+ // reporting for more then one conflict.
+ for ty in &unbound_tyvars {
+ if self.infcx().type_var_diverges(ty) {
+ demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil());
+ } else {
+ match self.infcx().type_is_unconstrained_numeric(ty) {
+ UnconstrainedInt => {
+ demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.i32)
+ },
+ UnconstrainedFloat => {
+ demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.f64)
+ },
+ Neither => {
+ if let Some(default) = default_map.get(ty) {
+ let default = default.clone();
+ match infer::mk_eqty(self.infcx(), false,
+ infer::Misc(default.origin_span),
+ ty, default.ty) {
+ Ok(()) => {}
+ Err(_) => {
+ result = Some(default);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return result;
+ }
+
fn select_all_obligations_or_error(&self) {
debug!("select_all_obligations_or_error");
assert!(self.inh.deferred_call_resolutions.borrow().is_empty());
self.select_all_obligations_and_apply_defaults();
- let mut fulfillment_cx = self.inh.fulfillment_cx.borrow_mut();
- match fulfillment_cx.select_all_or_error(self.infcx(), self) {
+
+ let mut fulfillment_cx = self.inh.infcx.fulfillment_cx.borrow_mut();
+ match fulfillment_cx.select_all_or_error(self.infcx()) {
Ok(()) => { }
Err(errors) => { report_fulfillment_errors(self.infcx(), &errors); }
}
/// Select as many obligations as we can at present.
fn select_obligations_where_possible(&self) {
match
- self.inh.fulfillment_cx
+ self.inh.infcx.fulfillment_cx
.borrow_mut()
- .select_where_possible(self.infcx(), self)
+ .select_where_possible(self.infcx())
{
Ok(()) => { }
Err(errors) => { report_fulfillment_errors(self.infcx(), &errors); }
/// work.
fn select_new_obligations(&self) {
match
- self.inh.fulfillment_cx
+ self.inh.infcx.fulfillment_cx
.borrow_mut()
- .select_new_obligations(self.infcx(), self)
+ .select_new_obligations(self.infcx())
{
Ok(()) => { }
Err(errors) => { report_fulfillment_errors(self.infcx(), &errors); }
}
fn anon_regions(&self, span: Span, count: usize)
- -> Result<Vec<ty::Region>, Option<Vec<(String, usize)>>> {
+ -> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>> {
Ok((0..count).map(|_| {
self.infcx().next_region_var(infer::MiscVariable(span))
}).collect())
}
UnresolvedTypeAction::Ignore => {
// We can continue even when the type cannot be resolved
- // (i.e. it is an inference variable) because `ty::deref`
+ // (i.e. it is an inference variable) because `Ty::builtin_deref`
// and `try_overloaded_deref` both simply return `None`
// in such a case without producing spurious errors.
fcx.resolve_type_vars_if_possible(t)
}
};
- if ty::type_is_error(resolved_t) {
+ if resolved_t.references_error() {
return (resolved_t, autoderefs, None);
}
}
// Otherwise, deref if type is derefable:
- let mt = match ty::deref(resolved_t, false) {
+ let mt = match resolved_t.builtin_deref(false) {
Some(mt) => Some(mt),
None => {
let method_call =
base_expr: Option<&ast::Expr>,
base_ty: Ty<'tcx>,
lvalue_pref: LvaluePreference)
- -> Option<ty::mt<'tcx>>
+ -> Option<ty::TypeAndMut<'tcx>>
{
// Try DerefMut first, if preferred.
let method = match (lvalue_pref, fcx.tcx().lang_items.deref_mut_trait()) {
fn make_overloaded_lvalue_return_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
method_call: Option<MethodCall>,
method: Option<MethodCallee<'tcx>>)
- -> Option<ty::mt<'tcx>>
+ -> Option<ty::TypeAndMut<'tcx>>
{
match method {
Some(method) => {
// extract method method return type, which will be &T;
// all LB regions should have been instantiated during method lookup
- let ret_ty = ty::ty_fn_ret(method.ty);
- let ret_ty = ty::no_late_bound_regions(fcx.tcx(), &ret_ty).unwrap().unwrap();
+ let ret_ty = method.ty.fn_ret();
+ let ret_ty = fcx.tcx().no_late_bound_regions(&ret_ty).unwrap().unwrap();
if let Some(method_call) = method_call {
- fcx.inh.method_map.borrow_mut().insert(method_call, method);
+ fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
}
// method returns &T, but the type as visible to user is T, so deref
- ty::deref(ret_ty, true)
+ ret_ty.builtin_deref(true)
}
None => None,
}
// After we have fully autoderef'd, if the resulting type is [T; n], then
// do a final unsized coercion to yield [T].
if let ty::TyArray(element_ty, _) = ty.sty {
- let adjusted_ty = ty::mk_vec(fcx.tcx(), element_ty, None);
+ let adjusted_ty = fcx.tcx().mk_slice(element_ty);
try_index_step(fcx, MethodCall::expr(expr.id), expr, base_expr,
adjusted_ty, autoderefs, true, lvalue_pref, idx_ty)
} else {
let input_ty = fcx.infcx().next_ty_var();
// First, try built-in indexing.
- match (ty::index(adjusted_ty), &index_ty.sty) {
+ match (adjusted_ty.builtin_index(), &index_ty.sty) {
(Some(ty), &ty::TyUint(ast::TyUs)) | (Some(ty), &ty::TyInfer(ty::IntVar(_))) => {
debug!("try_index_step: success, using built-in indexing");
// If we had `[T; N]`, we should've caught it before unsizing to `[T]`.
tuple_arguments: TupleArgumentsFlag,
expected: Expectation<'tcx>)
-> ty::FnOutput<'tcx> {
- if ty::type_is_error(method_fn_ty) {
+ if method_fn_ty.references_error() {
let err_inputs = err_args(fcx.tcx(), args_no_rcvr.len());
let err_inputs = match tuple_arguments {
DontTupleArguments => err_inputs,
- TupleArguments => vec![ty::mk_tup(fcx.tcx(), err_inputs)],
+ TupleArguments => vec![fcx.tcx().mk_tup(err_inputs)],
};
check_argument_types(fcx,
// The special-cased logic below has three functions:
// 1. Provide as good of an expected type as possible.
let expected = expected_arg_tys.get(i).map(|&ty| {
- Expectation::rvalue_hint(ty)
+ Expectation::rvalue_hint(fcx.tcx(), ty)
});
check_expr_with_unifier(fcx, &**arg,
let tcx = fcx.ccx.tcx;
match lit.node {
- ast::LitStr(..) => ty::mk_str_slice(tcx, tcx.mk_region(ty::ReStatic), ast::MutImmutable),
+ ast::LitStr(..) => tcx.mk_static_str(),
ast::LitBinary(ref v) => {
- ty::mk_rptr(tcx, tcx.mk_region(ty::ReStatic), ty::mt {
- ty: ty::mk_vec(tcx, tcx.types.u8, Some(v.len())),
- mutbl: ast::MutImmutable,
- })
+ tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
+ tcx.mk_array(tcx.types.u8, v.len()))
}
ast::LitByte(_) => tcx.types.u8,
ast::LitChar(_) => tcx.types.char,
- ast::LitInt(_, ast::SignedIntLit(t, _)) => ty::mk_mach_int(tcx, t),
- ast::LitInt(_, ast::UnsignedIntLit(t)) => ty::mk_mach_uint(tcx, t),
+ ast::LitInt(_, ast::SignedIntLit(t, _)) => tcx.mk_mach_int(t),
+ ast::LitInt(_, ast::UnsignedIntLit(t)) => tcx.mk_mach_uint(t),
ast::LitInt(_, ast::UnsuffixedIntLit(_)) => {
let opt_ty = expected.to_option(fcx).and_then(|ty| {
match ty.sty {
}
});
opt_ty.unwrap_or_else(
- || ty::mk_int_var(tcx, fcx.infcx().next_int_var_id()))
+ || tcx.mk_int_var(fcx.infcx().next_int_var_id()))
}
- ast::LitFloat(_, t) => ty::mk_mach_float(tcx, t),
+ ast::LitFloat(_, t) => tcx.mk_mach_float(t),
ast::LitFloatUnsuffixed(_) => {
let opt_ty = expected.to_option(fcx).and_then(|ty| {
match ty.sty {
}
});
opt_ty.unwrap_or_else(
- || ty::mk_float_var(tcx, fcx.infcx().next_float_var_id()))
+ || tcx.mk_float_var(fcx.infcx().next_float_var_id()))
}
ast::LitBool(_) => tcx.types.bool
}
-> TypeAndSubsts<'tcx> {
let tcx = fcx.tcx();
- let ity = ty::lookup_item_type(tcx, did);
- let (n_tps, rps, raw_ty) =
- (ity.generics.types.len(subst::TypeSpace),
+ let ity = tcx.lookup_item_type(did);
+ let (tps, rps, raw_ty) =
+ (ity.generics.types.get_slice(subst::TypeSpace),
ity.generics.regions.get_slice(subst::TypeSpace),
ity.ty);
+ debug!("impl_self_ty: tps={:?} rps={:?} raw_ty={:?}", tps, rps, raw_ty);
+
let rps = fcx.inh.infcx.region_vars_for_defs(span, rps);
- let tps = fcx.inh.infcx.next_ty_vars(n_tps);
- let substs = subst::Substs::new_type(tps, rps);
+ let mut substs = subst::Substs::new(
+ VecPerParamSpace::empty(),
+ VecPerParamSpace::new(rps, Vec::new(), Vec::new()));
+ fcx.inh.infcx.type_vars_for_defs(span, ParamSpace::TypeSpace, &mut substs, tps);
let substd_ty = fcx.instantiate_type_scheme(span, &substs, &raw_ty);
TypeAndSubsts { substs: substs, ty: substd_ty }
/// Invariant:
/// If an expression has any sub-expressions that result in a type error,
-/// inspecting that expression's type with `ty::type_is_error` will return
+/// inspecting that expression's type with `ty.references_error()` will return
/// true. Likewise, if an expression is known to diverge, inspecting its
/// type with `ty::type_is_bot` will return true (n.b.: since Rust is
/// strict, _|_ can appear in the type of an expression that does not,
Ok(method) => {
let method_ty = method.ty;
let method_call = MethodCall::expr(expr.id);
- fcx.inh.method_map.borrow_mut().insert(method_call, method);
+ fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
method_ty
}
Err(error) => {
infer::IfExpressionWithNoElse(sp),
false,
then_ty,
- ty::mk_nil(fcx.tcx()))
+ fcx.tcx().mk_nil())
}
};
let cond_ty = fcx.expr_ty(cond_expr);
- let if_ty = if ty::type_is_error(cond_ty) {
+ let if_ty = if cond_ty.references_error() {
fcx.tcx().types.err
} else {
branches_ty
match base_t.sty {
ty::TyStruct(base_id, substs) => {
debug!("struct named {:?}", base_t);
- let fields = ty::lookup_struct_fields(tcx, base_id);
+ let fields = tcx.lookup_struct_fields(base_id);
fcx.lookup_field_ty(expr.span, base_id, &fields[..],
field.node.name, &(*substs))
}
field.span,
|actual| {
format!("attempted to take value of method `{}` on type \
- `{}`", token::get_ident(field.node), actual)
+ `{}`", field.node, actual)
},
expr_t, None);
format!("attempted access of field `{}` on \
type `{}`, but no field with that \
name was found",
- token::get_ident(field.node),
+ field.node,
actual)
},
expr_t, None);
fn suggest_field_names<'tcx>(id : DefId,
field : &ast::SpannedIdent,
tcx : &ty::ctxt<'tcx>,
- skip : Vec<&str>) {
- let ident = token::get_ident(field.node);
- let name = &ident;
+ skip : Vec<InternedString>) {
+ let name = field.node.name.as_str();
// only find fits with at least one matching letter
let mut best_dist = name.len();
- let fields = ty::lookup_struct_fields(tcx, id);
+ let fields = tcx.lookup_struct_fields(id);
let mut best = None;
for elem in &fields {
let n = elem.name.as_str();
// ignore already set fields
- if skip.iter().any(|&x| x == n) {
+ if skip.iter().any(|x| *x == n) {
continue;
}
// ignore private fields from non-local crates
if id.krate != ast::LOCAL_CRATE && elem.vis != Visibility::Public {
continue;
}
- let dist = lev_distance(n, name);
+ let dist = lev_distance(&n, &name);
if dist < best_dist {
best = Some(n);
best_dist = dist;
|base_t, _| {
match base_t.sty {
ty::TyStruct(base_id, substs) => {
- tuple_like = ty::is_tuple_struct(tcx, base_id);
+ tuple_like = tcx.is_tuple_struct(base_id);
if tuple_like {
debug!("tuple struct named {:?}", base_t);
- let fields = ty::lookup_struct_fields(tcx, base_id);
+ let fields = tcx.lookup_struct_fields(base_id);
fcx.lookup_tup_field_ty(expr.span, base_id, &fields[..],
idx.node, &(*substs))
} else {
class_id: ast::DefId,
node_id: ast::NodeId,
substitutions: &'tcx subst::Substs<'tcx>,
- field_types: &[ty::field_ty],
+ field_types: &[ty::FieldTy],
ast_fields: &'tcx [ast::Field],
check_completeness: bool,
enum_id_opt: Option<ast::DefId>) {
field.ident.span,
|actual| match enum_id_opt {
Some(enum_id) => {
- let variant_type = ty::enum_variant_with_id(tcx,
- enum_id,
+ let variant_type = tcx.enum_variant_with_id(enum_id,
class_id);
format!("struct variant `{}::{}` has no field named `{}`",
actual, variant_type.name.as_str(),
- token::get_ident(field.ident.node))
+ field.ident.node)
}
None => {
format!("structure `{}` has no field named `{}`",
actual,
- token::get_ident(field.ident.node))
+ field.ident.node)
}
},
struct_ty,
let skip_fields = ast_fields.iter().map(|ref x| x.ident.node.name.as_str());
let actual_id = match enum_id_opt {
Some(_) => class_id,
- None => ty::ty_to_def_id(struct_ty).unwrap()
+ None => struct_ty.ty_to_def_id().unwrap()
};
suggest_field_names(actual_id, &field.ident, tcx, skip_fields.collect());
error_happened = true;
Some((_, true)) => {
span_err!(fcx.tcx().sess, field.ident.span, E0062,
"field `{}` specified more than once",
- token::get_ident(field.ident.node));
+ field.ident.node);
error_happened = true;
}
Some((field_id, false)) => {
expected_field_type =
- ty::lookup_field_type(
- tcx, class_id, field_id, substitutions);
+ tcx.lookup_field_type(class_id, field_id, substitutions);
expected_field_type =
fcx.normalize_associated_types_in(
field.span, &expected_field_type);
let (_, seen) = *class_field_map.get(&name).unwrap();
if !seen {
missing_fields.push(
- format!("`{}`", &token::get_name(name)))
+ format!("`{}`", name))
}
}
span_err!(tcx.sess, span, E0063,
"missing field{}: {}",
if missing_fields.len() == 1 {""} else {"s"},
- missing_fields.connect(", "));
+ missing_fields.join(", "));
}
}
if !error_happened {
- fcx.write_ty(node_id, ty::mk_struct(fcx.ccx.tcx,
- class_id, substitutions));
+ fcx.write_ty(node_id, fcx.ccx.tcx.mk_struct(class_id, substitutions));
}
}
} = fcx.instantiate_type(span, class_id);
// Look up and check the fields.
- let class_fields = ty::lookup_struct_fields(tcx, class_id);
+ let class_fields = tcx.lookup_struct_fields(class_id);
check_struct_or_variant_fields(fcx,
struct_type,
span,
fields,
base_expr.is_none(),
None);
- if ty::type_is_error(fcx.node_ty(id)) {
+ if fcx.node_ty(id).references_error() {
struct_type = tcx.types.err;
}
} = fcx.instantiate_type(span, enum_id);
// Look up and check the enum variant fields.
- let variant_fields = ty::lookup_struct_fields(tcx, variant_id);
+ let variant_fields = tcx.lookup_struct_fields(variant_id);
check_struct_or_variant_fields(fcx,
enum_type,
span,
let def_id = definition.def_id();
let referent_ty = fcx.expr_ty(&**subexpr);
if tcx.lang_items.exchange_heap() == Some(def_id) {
- fcx.write_ty(id, ty::mk_uniq(tcx, referent_ty));
+ fcx.write_ty(id, tcx.mk_box(referent_ty));
checked = true
}
}
match unop {
ast::UnUniq => match ty.sty {
ty::TyBox(ty) => {
- Expectation::rvalue_hint(ty)
+ Expectation::rvalue_hint(tcx, ty)
}
_ => {
NoExpectation
fcx, &**oprnd, expected_inner, lvalue_pref);
let mut oprnd_t = fcx.expr_ty(&**oprnd);
- if !ty::type_is_error(oprnd_t) {
+ if !oprnd_t.references_error() {
match unop {
ast::UnUniq => {
- oprnd_t = ty::mk_uniq(tcx, oprnd_t);
+ oprnd_t = tcx.mk_box(oprnd_t);
}
ast::UnDeref => {
oprnd_t = structurally_resolved_type(fcx, expr.span, oprnd_t);
- oprnd_t = match ty::deref(oprnd_t, true) {
+ oprnd_t = match oprnd_t.builtin_deref(true) {
Some(mt) => mt.ty,
None => match try_overloaded_deref(fcx, expr.span,
Some(MethodCall::expr(expr.id)),
ast::UnNot => {
oprnd_t = structurally_resolved_type(fcx, oprnd.span,
oprnd_t);
- if !(ty::type_is_integral(oprnd_t) ||
- oprnd_t.sty == ty::TyBool) {
+ if !(oprnd_t.is_integral() || oprnd_t.sty == ty::TyBool) {
oprnd_t = op::check_user_unop(fcx, "!", "not",
tcx.lang_items.not_trait(),
expr, &**oprnd, oprnd_t, unop);
ast::UnNeg => {
oprnd_t = structurally_resolved_type(fcx, oprnd.span,
oprnd_t);
- if !(ty::type_is_integral(oprnd_t) ||
- ty::type_is_fp(oprnd_t)) {
+ if !(oprnd_t.is_integral() || oprnd_t.is_fp()) {
oprnd_t = op::check_user_unop(fcx, "-", "neg",
tcx.lang_items.neg_trait(),
expr, &**oprnd, oprnd_t, unop);
}
- if let ty::TyUint(_) = oprnd_t.sty {
- if !tcx.sess.features.borrow().negate_unsigned {
- feature_gate::emit_feature_err(
- &tcx.sess.parse_sess.span_diagnostic,
- "negate_unsigned",
- expr.span,
- "unary negation of unsigned integers may be removed in the future");
- }
- }
}
}
}
let hint = expected.only_has_type(fcx).map_or(NoExpectation, |ty| {
match ty.sty {
ty::TyRef(_, ref mt) | ty::TyRawPtr(ref mt) => {
- if ty::expr_is_lval(fcx.tcx(), &**oprnd) {
+ if fcx.tcx().expr_is_lval(&**oprnd) {
// Lvalues may legitimately have unsized types.
// For example, dereferences of a fat pointer and
// the last field of a struct can be unsized.
ExpectHasType(mt.ty)
} else {
- Expectation::rvalue_hint(mt.ty)
+ Expectation::rvalue_hint(tcx, mt.ty)
}
}
_ => NoExpectation
hint,
lvalue_pref);
- let tm = ty::mt { ty: fcx.expr_ty(&**oprnd), mutbl: mutbl };
- let oprnd_t = if ty::type_is_error(tm.ty) {
+ let tm = ty::TypeAndMut { ty: fcx.expr_ty(&**oprnd), mutbl: mutbl };
+ let oprnd_t = if tm.ty.references_error() {
tcx.types.err
} else {
// Note: at this point, we cannot say what the best lifetime
// value whose address was taken can actually be made to live
// as long as it needs to live.
let region = fcx.infcx().next_region_var(infer::AddrOfRegion(expr.span));
- ty::mk_rptr(tcx, tcx.mk_region(region), tm)
+ tcx.mk_ref(tcx.mk_region(region), tm)
};
fcx.write_ty(id, oprnd_t);
}
match *expr_opt {
None =>
if let Err(_) = fcx.mk_eqty(false, infer::Misc(expr.span),
- result_type, ty::mk_nil(fcx.tcx())) {
+ result_type, fcx.tcx().mk_nil()) {
span_err!(tcx.sess, expr.span, E0069,
"`return;` in a function whose return type is \
not `()`");
check_expr_with_lvalue_pref(fcx, &**lhs, PreferMutLvalue);
let tcx = fcx.tcx();
- if !ty::expr_is_lval(tcx, &**lhs) {
+ if !tcx.expr_is_lval(&**lhs) {
span_err!(tcx.sess, expr.span, E0070,
- "illegal left-hand side expression");
+ "invalid left-hand side expression");
}
let lhs_ty = fcx.expr_ty(&**lhs);
fcx.require_expr_have_sized_type(&**lhs, traits::AssignmentLhsSized);
- if ty::type_is_error(lhs_ty) || ty::type_is_error(rhs_ty) {
+ if lhs_ty.references_error() || rhs_ty.references_error() {
fcx.write_error(id);
} else {
fcx.write_nil(id);
check_block_no_value(fcx, &**body);
let cond_ty = fcx.expr_ty(&**cond);
let body_ty = fcx.node_ty(body.id);
- if ty::type_is_error(cond_ty) || ty::type_is_error(body_ty) {
+ if cond_ty.references_error() || body_ty.references_error() {
fcx.write_error(id);
}
else {
let arg_tys = args.iter().map(|a| fcx.expr_ty(&**a));
let args_err = arg_tys.fold(false,
|rest_err, a| {
- rest_err || ty::type_is_error(a)});
+ rest_err || a.references_error()});
if args_err {
fcx.write_error(id);
}
let t_expr = fcx.expr_ty(e);
// Eagerly check for some obvious errors.
- if ty::type_is_error(t_expr) {
+ if t_expr.references_error() {
fcx.write_error(id);
} else if !fcx.type_is_known_to_be_sized(t_cast, expr.span) {
report_cast_to_unsized_type(fcx, expr.span, t.span, e.span, t_cast, t_expr, id);
t
}
};
- let typ = ty::mk_vec(tcx, typ, Some(args.len()));
+ let typ = tcx.mk_array(typ, args.len());
fcx.write_ty(id, typ);
}
ast::ExprRepeat(ref element, ref count_expr) => {
check_expr_has_type(fcx, &**count_expr, tcx.types.usize);
- let count = ty::eval_repeat_count(fcx.tcx(), &**count_expr);
+ let count = fcx.tcx().eval_repeat_count(&**count_expr);
let uty = match expected {
ExpectHasType(uty) => {
ty::BoundCopy);
}
- if ty::type_is_error(element_ty) {
+ if element_ty.references_error() {
fcx.write_error(id);
} else {
- let t = ty::mk_vec(tcx, t, Some(count));
+ let t = tcx.mk_array(t, count);
fcx.write_ty(id, t);
}
}
fcx.expr_ty(&**e)
}
};
- err_field = err_field || ty::type_is_error(t);
+ err_field = err_field || t.references_error();
t
}).collect();
if err_field {
fcx.write_error(id);
} else {
- let typ = ty::mk_tup(tcx, elt_ts);
+ let typ = tcx.mk_tup(elt_ts);
fcx.write_ty(id, typ);
}
}
let def = lookup_full_def(tcx, path.span, id);
let struct_id = match def {
def::DefVariant(enum_id, variant_id, true) => {
+ if let &Some(ref base_expr) = base_expr {
+ span_err!(tcx.sess, base_expr.span, E0436,
+ "functional record update syntax requires a struct");
+ fcx.write_error(base_expr.id);
+ }
check_struct_enum_variant(fcx, id, expr.span, enum_id,
variant_id, &fields[..]);
enum_id
},
def => {
// Verify that this was actually a struct.
- let typ = ty::lookup_item_type(fcx.ccx.tcx, def.def_id());
+ let typ = fcx.ccx.tcx.lookup_item_type(def.def_id());
match typ.ty.sty {
ty::TyStruct(struct_did, _) => {
check_struct_constructor(fcx,
// the resulting structure type. This is needed to handle type
// parameters correctly.
let actual_structure_type = fcx.expr_ty(&*expr);
- if !ty::type_is_error(actual_structure_type) {
+ if !actual_structure_type.references_error() {
let type_and_substs = fcx.instantiate_struct_literal_ty(struct_id, path);
match fcx.mk_subty(false,
infer::Misc(path.span),
.ty_to_string(
actual_structure_type),
type_error);
- ty::note_and_explain_type_err(tcx, &type_error, path.span);
+ tcx.note_and_explain_type_err(&type_error, path.span);
}
}
}
let base_t = fcx.expr_ty(&**base);
let idx_t = fcx.expr_ty(&**idx);
- if ty::type_is_error(base_t) {
+ if base_t.references_error() {
fcx.write_ty(id, base_t);
- } else if ty::type_is_error(idx_t) {
+ } else if idx_t.references_error() {
fcx.write_ty(id, idx_t);
} else {
let base_t = structurally_resolved_type(fcx, expr.span, base_t);
(Some(ty), None) | (None, Some(ty)) => {
Some(ty)
}
- (Some(t_start), Some(t_end)) if (ty::type_is_error(t_start) ||
- ty::type_is_error(t_end)) => {
+ (Some(t_start), Some(t_end)) if (t_start.references_error() ||
+ t_end.references_error()) => {
Some(fcx.tcx().types.err)
}
(Some(t_start), Some(t_end)) => {
// some bounds, then we'll need to check `t_start` against them here.
let range_type = match idx_type {
- Some(idx_type) if ty::type_is_error(idx_type) => {
+ Some(idx_type) if idx_type.references_error() => {
fcx.tcx().types.err
}
Some(idx_type) => {
};
if let Some(did) = did {
- let predicates = ty::lookup_predicates(tcx, did);
+ let predicates = tcx.lookup_predicates(did);
let substs = Substs::new_type(vec![idx_type], vec![]);
let bounds = fcx.instantiate_bounds(expr.span, &substs, &predicates);
fcx.add_obligations_for_parameters(
traits::ItemObligation(did)),
&bounds);
- ty::mk_struct(tcx, did, tcx.mk_substs(substs))
+ tcx.mk_struct(did, tcx.mk_substs(substs))
} else {
span_err!(tcx.sess, expr.span, E0236, "no lang item for range syntax");
fcx.tcx().types.err
// Neither start nor end => RangeFull
if let Some(did) = tcx.lang_items.range_full_struct() {
let substs = Substs::new_type(vec![], vec![]);
- ty::mk_struct(tcx, did, tcx.mk_substs(substs))
+ tcx.mk_struct(did, tcx.mk_substs(substs))
} else {
span_err!(tcx.sess, expr.span, E0237, "no lang item for range syntax");
fcx.tcx().types.err
node_id: ast::NodeId) -> bool {
match def {
def::DefAssociatedConst(..) => {
- if ty::type_has_params(ty) || ty::type_has_self(ty) {
+ if ty.has_param_types() || ty.has_self_ty() {
span_err!(fcx.sess(), span, E0329,
"Associated consts cannot depend \
on type parameters or Self.");
Some((opt_self_ty, &path.segments, path_res.base_def))
} else {
let mut def = path_res.base_def;
- let ty_segments = path.segments.init();
+ let ty_segments = path.segments.split_last().unwrap().1;
let base_ty_end = path.segments.len() - path_res.depth;
let ty = astconv::finish_resolving_def_to_ty(fcx, fcx, span,
PathParamMode::Optional,
/// which still is useful, because it informs integer literals and the like.
/// See the test case `test/run-pass/coerce-expect-unsized.rs` and #20169
/// for examples of where this comes up,.
- fn rvalue_hint(ty: Ty<'tcx>) -> Expectation<'tcx> {
- match ty.sty {
+ fn rvalue_hint(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
+ match tcx.struct_tail(ty).sty {
ty::TySlice(_) | ty::TyTrait(..) => {
ExpectRvalueLikeUnsized(ty)
}
if let Some(ref init) = local.init {
check_decl_initializer(fcx, local, &**init);
let init_ty = fcx.expr_ty(&**init);
- if ty::type_is_error(init_ty) {
+ if init_ty.references_error() {
fcx.write_ty(local.id, init_ty);
}
}
};
_match::check_pat(&pcx, &*local.pat, t);
let pat_ty = fcx.node_ty(local.pat.id);
- if ty::type_is_error(pat_ty) {
+ if pat_ty.references_error() {
fcx.write_ty(local.id, pat_ty);
}
}
check_decl_local(fcx, &**l);
let l_t = fcx.node_ty(l.id);
saw_bot = saw_bot || fcx.infcx().type_var_diverges(l_t);
- saw_err = saw_err || ty::type_is_error(l_t);
+ saw_err = saw_err || l_t.references_error();
}
ast::DeclItem(_) => {/* ignore for now */ }
}
ast::StmtExpr(ref expr, id) => {
node_id = id;
// Check with expected type of ()
- check_expr_has_type(fcx, &**expr, ty::mk_nil(fcx.tcx()));
+ check_expr_has_type(fcx, &**expr, fcx.tcx().mk_nil());
let expr_ty = fcx.expr_ty(&**expr);
saw_bot = saw_bot || fcx.infcx().type_var_diverges(expr_ty);
- saw_err = saw_err || ty::type_is_error(expr_ty);
+ saw_err = saw_err || expr_ty.references_error();
}
ast::StmtSemi(ref expr, id) => {
node_id = id;
check_expr(fcx, &**expr);
let expr_ty = fcx.expr_ty(&**expr);
saw_bot |= fcx.infcx().type_var_diverges(expr_ty);
- saw_err |= ty::type_is_error(expr_ty);
+ saw_err |= expr_ty.references_error();
}
ast::StmtMac(..) => fcx.ccx.tcx.sess.bug("unexpanded macro")
}
}
pub fn check_block_no_value<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, blk: &'tcx ast::Block) {
- check_block_with_expected(fcx, blk, ExpectHasType(ty::mk_nil(fcx.tcx())));
+ check_block_with_expected(fcx, blk, ExpectHasType(fcx.tcx().mk_nil()));
let blkty = fcx.node_ty(blk.id);
- if ty::type_is_error(blkty) {
+ if blkty.references_error() {
fcx.write_error(blk.id);
} else {
- let nilty = ty::mk_nil(fcx.tcx());
+ let nilty = fcx.tcx().mk_nil();
demand::suptype(fcx, blk.span, nilty, blkty);
}
}
warned = true;
}
any_diverges = any_diverges || fcx.infcx().type_var_diverges(s_ty);
- any_err = any_err || ty::type_is_error(s_ty);
+ any_err = any_err || s_ty.references_error();
}
match blk.expr {
None => if any_err {
fn check_const_in_type<'a,'tcx>(ccx: &'a CrateCtxt<'a,'tcx>,
expr: &'tcx ast::Expr,
expected_type: Ty<'tcx>) {
- let inh = static_inherited_fields(ccx);
+ let tables = RefCell::new(ty::Tables::empty());
+ let inh = static_inherited_fields(ccx, &tables);
let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(expected_type), expr.id);
check_const_with_ty(&fcx, expr.span, expr, expected_type);
}
sp: Span,
e: &'tcx ast::Expr,
id: ast::NodeId) {
- let inh = static_inherited_fields(ccx);
- let rty = ty::node_id_to_type(ccx.tcx, id);
+ let tables = RefCell::new(ty::Tables::empty());
+ let inh = static_inherited_fields(ccx, &tables);
+ let rty = ccx.tcx.node_id_to_type(id);
let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(rty), e.id);
- let declty = fcx.ccx.tcx.tcache.borrow().get(&local_def(id)).unwrap().ty;
+ let declty = fcx.ccx.tcx.lookup_item_type(local_def(id)).ty;
check_const_with_ty(&fcx, sp, e, declty);
}
/// Checks whether a type can be represented in memory. In particular, it
/// identifies types that contain themselves without indirection through a
-/// pointer, which would mean their size is unbounded. This is different from
-/// the question of whether a type can be instantiated. See the definition of
-/// `check_instantiable`.
+/// pointer, which would mean their size is unbounded.
pub fn check_representable(tcx: &ty::ctxt,
sp: Span,
item_id: ast::NodeId,
designation: &str) -> bool {
- let rty = ty::node_id_to_type(tcx, item_id);
+ let rty = tcx.node_id_to_type(item_id);
// Check that it is possible to represent this type. This call identifies
// (1) types that contain themselves and (2) types that contain a different
// recursive type. It is only necessary to throw an error on those that
// contain themselves. For case 2, there must be an inner type that will be
// caught by case 1.
- match ty::is_type_representable(tcx, sp, rty) {
+ match rty.is_representable(tcx, sp) {
ty::SelfRecursive => {
- span_err!(tcx.sess, sp, E0072,
- "illegal recursive {} type; \
- wrap the inner value in a box to make it representable",
- designation);
+ span_err!(tcx.sess, sp, E0072, "invalid recursive {} type", designation);
+ tcx.sess.fileline_help(sp, "wrap the inner value in a box to make it representable");
return false
}
ty::Representable | ty::ContainsRecursive => (),
return true
}
-/// Checks whether a type can be created without an instance of itself.
-/// This is similar but different from the question of whether a type
-/// can be represented. For example, the following type:
-///
-/// enum foo { None, Some(foo) }
-///
-/// is instantiable but is not representable. Similarly, the type
-///
-/// enum foo { Some(@foo) }
-///
-/// is representable, but not instantiable.
+/// Checks whether a type can be constructed at runtime without
+/// an existing instance of that type.
pub fn check_instantiable(tcx: &ty::ctxt,
sp: Span,
- item_id: ast::NodeId)
- -> bool {
- let item_ty = ty::node_id_to_type(tcx, item_id);
- if !ty::is_instantiable(tcx, item_ty) {
- span_err!(tcx.sess, sp, E0073,
- "this type cannot be instantiated without an \
- instance of itself");
- fileline_help!(tcx.sess, sp, "consider using `Option<{:?}>`",
- item_ty);
- false
- } else {
- true
+ item_id: ast::NodeId) {
+ let item_ty = tcx.node_id_to_type(item_id);
+ if !item_ty.is_instantiable(tcx) &&
+ !tcx.sess.features.borrow().static_recursion {
+ emit_feature_err(&tcx.sess.parse_sess.span_diagnostic,
+ "static_recursion",
+ sp,
+ "this type cannot be instantiated at runtime \
+ without an instance of itself");
}
}
pub fn check_simd(tcx: &ty::ctxt, sp: Span, id: ast::NodeId) {
- let t = ty::node_id_to_type(tcx, id);
- if ty::type_needs_subst(t) {
+ let t = tcx.node_id_to_type(id);
+ if t.needs_subst() {
span_err!(tcx.sess, sp, E0074, "SIMD vector cannot be generic");
return;
}
match t.sty {
ty::TyStruct(did, substs) => {
- let fields = ty::lookup_struct_fields(tcx, did);
+ let fields = tcx.lookup_struct_fields(did);
if fields.is_empty() {
span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty");
return;
}
- let e = ty::lookup_field_type(tcx, did, fields[0].id, substs);
+ let e = tcx.lookup_field_type(did, fields[0].id, substs);
if !fields.iter().all(
- |f| ty::lookup_field_type(tcx, did, f.id, substs) == e) {
+ |f| tcx.lookup_field_type(did, f.id, substs) == e) {
span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous");
return;
}
- if !ty::type_is_machine(e) {
+ if !e.is_machine() {
span_err!(tcx.sess, sp, E0077,
"SIMD vector element type should be machine type");
return;
hint: attr::ReprAttr) {
#![allow(trivial_numeric_casts)]
- let rty = ty::node_id_to_type(ccx.tcx, id);
+ let rty = ccx.tcx.node_id_to_type(id);
let mut disr_vals: Vec<ty::Disr> = Vec::new();
- let inh = static_inherited_fields(ccx);
+ let tables = RefCell::new(ty::Tables::empty());
+ let inh = static_inherited_fields(ccx, &tables);
let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(rty), id);
- let (_, repr_type_ty) = ty::enum_repr_type(ccx.tcx, Some(&hint));
+ let (_, repr_type_ty) = ccx.tcx.enum_repr_type(Some(&hint));
for v in vs {
if let Some(ref e) = v.node.disr_expr {
check_const_with_ty(&fcx, e.span, e, repr_type_ty);
// ty::enum_variants guards against discriminant overflows, so
// we need not check for that.
- let variants = ty::enum_variants(ccx.tcx, def_id);
+ let variants = ccx.tcx.enum_variants(def_id);
for (v, variant) in vs.iter().zip(variants.iter()) {
let current_disr_val = variant.disr_val;
}
}
- let hint = *ty::lookup_repr_hints(ccx.tcx, ast::DefId { krate: ast::LOCAL_CRATE, node: id })
+ let hint = *ccx.tcx.lookup_repr_hints(ast::DefId { krate: ast::LOCAL_CRATE, node: id })
.get(0).unwrap_or(&attr::ReprAny);
if hint != attr::ReprAny && vs.len() <= 1 {
do_check(ccx, vs, id, hint);
check_representable(ccx.tcx, sp, id, "enum");
-
- // Check that it is possible to instantiate this enum:
- //
- // This *sounds* like the same that as representable, but it's
- // not. See def'n of `check_instantiable()` for details.
check_instantiable(ccx.tcx, sp, id);
}
(ty::TypeScheme { generics: ty::Generics::empty(), ty: typ },
ty::GenericPredicates::empty())
}
- def::DefFn(id, _) | def::DefMethod(id, _) |
+ def::DefFn(id, _) | def::DefMethod(id) |
def::DefStatic(id, _) | def::DefVariant(_, id, _) |
- def::DefStruct(id) | def::DefConst(id) | def::DefAssociatedConst(id, _) => {
- (ty::lookup_item_type(fcx.tcx(), id), ty::lookup_predicates(fcx.tcx(), id))
+ def::DefStruct(id) | def::DefConst(id) | def::DefAssociatedConst(id) => {
+ (fcx.tcx().lookup_item_type(id), fcx.tcx().lookup_predicates(id))
}
def::DefTrait(_) |
def::DefTy(..) |
assert!(!segments.is_empty());
- let mut ufcs_method = None;
+ let mut ufcs_associated = None;
let mut segment_spaces: Vec<_>;
match def {
// Case 1 and 1b. Reference to a *type* or *enum variant*.
def::DefTyParam(..) => {
// Everything but the final segment should have no
// parameters at all.
- segment_spaces = repeat(None).take(segments.len() - 1).collect();
+ segment_spaces = vec![None; segments.len() - 1];
segment_spaces.push(Some(subst::TypeSpace));
}
def::DefFn(..) |
def::DefConst(..) |
def::DefStatic(..) => {
- segment_spaces = repeat(None).take(segments.len() - 1).collect();
+ segment_spaces = vec![None; segments.len() - 1];
segment_spaces.push(Some(subst::FnSpace));
}
// Case 3. Reference to a method.
- def::DefMethod(_, provenance) => {
- match provenance {
- def::FromTrait(trait_did) => {
+ def::DefMethod(def_id) => {
+ let container = fcx.tcx().impl_or_trait_item(def_id).container();
+ match container {
+ ty::TraitContainer(trait_did) => {
callee::check_legal_trait_for_method_call(fcx.ccx, span, trait_did)
}
- def::FromImpl(_) => {}
+ ty::ImplContainer(_) => {}
}
if segments.len() >= 2 {
- segment_spaces = repeat(None).take(segments.len() - 2).collect();
+ segment_spaces = vec![None; segments.len() - 2];
segment_spaces.push(Some(subst::TypeSpace));
segment_spaces.push(Some(subst::FnSpace));
} else {
// `<T>::method` will end up here, and so can `T::method`.
let self_ty = opt_self_ty.expect("UFCS sugared method missing Self");
segment_spaces = vec![Some(subst::FnSpace)];
- ufcs_method = Some((provenance, self_ty));
+ ufcs_associated = Some((container, self_ty));
}
}
- def::DefAssociatedConst(_, provenance) => {
- match provenance {
- def::FromTrait(trait_did) => {
+ def::DefAssociatedConst(def_id) => {
+ let container = fcx.tcx().impl_or_trait_item(def_id).container();
+ match container {
+ ty::TraitContainer(trait_did) => {
callee::check_legal_trait_for_method_call(fcx.ccx, span, trait_did)
}
- def::FromImpl(_) => {}
+ ty::ImplContainer(_) => {}
}
if segments.len() >= 2 {
- segment_spaces = repeat(None).take(segments.len() - 2).collect();
+ segment_spaces = vec![None; segments.len() - 2];
segment_spaces.push(Some(subst::TypeSpace));
segment_spaces.push(None);
} else {
+ // `<T>::CONST` will end up here, and so can `T::CONST`.
+ let self_ty = opt_self_ty.expect("UFCS sugared const missing Self");
segment_spaces = vec![None];
+ ufcs_associated = Some((container, self_ty));
}
}
def::DefRegion(..) |
def::DefLabel(..) |
def::DefUpvar(..) => {
- segment_spaces = repeat(None).take(segments.len()).collect();
+ segment_spaces = vec![None; segments.len()];
}
}
assert_eq!(segment_spaces.len(), segments.len());
// In `<T as Trait<A, B>>::method`, `A` and `B` are mandatory, but
// `opt_self_ty` can also be Some for `Foo::method`, where Foo's
// type parameters are not mandatory.
- let require_type_space = opt_self_ty.is_some() && ufcs_method.is_none();
+ let require_type_space = opt_self_ty.is_some() && ufcs_associated.is_none();
debug!("segment_spaces={:?}", segment_spaces);
// variables. If the user provided some types, we may still need
// to add defaults. If the user provided *too many* types, that's
// a problem.
- for &space in &ParamSpace::all() {
+ for &space in &[subst::SelfSpace, subst::TypeSpace, subst::FnSpace] {
adjust_type_parameters(fcx, span, space, type_defs,
require_type_space, &mut substs);
assert_eq!(substs.types.len(space), type_defs.len(space));
let ty_substituted = fcx.instantiate_type_scheme(span, &substs, &type_scheme.ty);
- if let Some((def::FromImpl(impl_def_id), self_ty)) = ufcs_method {
+ if let Some((ty::ImplContainer(impl_def_id), self_ty)) = ufcs_associated {
// In the case of `Foo<T>::method` and `<Foo<T>>::method`, if `method`
// is inherent, there is no `Self` parameter, instead, the impl needs
// type parameters, which we can infer by unifying the provided `Self`
// with the substituted impl type.
- let impl_scheme = ty::lookup_item_type(fcx.tcx(), impl_def_id);
+ let impl_scheme = fcx.tcx().lookup_item_type(impl_def_id);
assert_eq!(substs.types.len(subst::TypeSpace),
impl_scheme.generics.types.len(subst::TypeSpace));
assert_eq!(substs.regions().len(subst::TypeSpace),
let input_tys: Vec<Ty> =
data.inputs.iter().map(|ty| fcx.to_ty(&**ty)).collect();
- let tuple_ty =
- ty::mk_tup(fcx.tcx(), input_tys);
+ let tuple_ty = fcx.tcx().mk_tup(input_tys);
if type_count >= 1 {
substs.types.push(space, tuple_ty);
data.output.as_ref().map(|ty| fcx.to_ty(&**ty));
let output_ty =
- output_ty.unwrap_or(ty::mk_nil(fcx.tcx()));
+ output_ty.unwrap_or(fcx.tcx().mk_nil());
if type_count >= 2 {
substs.types.push(space, output_ty);
// Nothing specified at all: supply inference variables for
// everything.
if provided_len == 0 && !(require_type_space && space == subst::TypeSpace) {
- substs.types.replace(space, fcx.infcx().next_ty_vars(desired.len()));
+ substs.types.replace(space, Vec::new());
+ fcx.infcx().type_vars_for_defs(span, space, substs, &desired[..]);
return;
}
if required_len == 1 {""} else {"s"},
provided_len,
if provided_len == 1 {""} else {"s"});
- substs.types.replace(space, repeat(fcx.tcx().types.err).take(desired.len()).collect());
+ substs.types.replace(space, vec![fcx.tcx().types.err; desired.len()]);
return;
}
{
let mut ty = fcx.resolve_type_vars_if_possible(ty);
- if ty::type_is_ty_var(ty) {
+ if ty.is_ty_var() {
let alternative = f();
// If not, error.
- if ty::type_is_ty_var(alternative) || ty::type_is_error(alternative) {
+ if alternative.is_ty_var() || alternative.references_error() {
fcx.type_error_message(sp, |_actual| {
"the type of this value must be known in this context".to_string()
}, ty, None);
// make a vector of booleans initially false, set to true when used
if tps.is_empty() { return; }
- let mut tps_used: Vec<_> = repeat(false).take(tps.len()).collect();
+ let mut tps_used = vec![false; tps.len()];
- ty::walk_ty(ty, |t| {
- match t.sty {
- ty::TyParam(ParamTy {idx, ..}) => {
- debug!("Found use of ty param num {}", idx);
- tps_used[idx as usize] = true;
- }
- _ => ()
- }
- });
+ for leaf_ty in ty.walk() {
+ if let ty::TyParam(ParamTy {idx, ..}) = leaf_ty.sty {
+ debug!("Found use of ty param num {}", idx);
+ tps_used[idx as usize] = true;
+ }
+ }
for (i, b) in tps_used.iter().enumerate() {
if !*b {
span_err!(ccx.tcx.sess, span, E0091,
"type parameter `{}` is unused",
- token::get_ident(tps[i].ident));
+ tps[i].ident);
}
}
}
pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) {
fn param<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, n: u32) -> Ty<'tcx> {
let name = token::intern(&format!("P{}", n));
- ty::mk_param(ccx.tcx, subst::FnSpace, n, name)
+ ccx.tcx.mk_param(subst::FnSpace, n, name)
}
let tcx = ccx.tcx;
- let name = token::get_ident(it.ident);
+ let name = it.ident.name.as_str();
let (n_tps, inputs, output) = if name.starts_with("atomic_") {
let split : Vec<&str> = name.split('_').collect();
assert!(split.len() >= 2, "Atomic intrinsic not correct format");
//We only care about the operation here
let (n_tps, inputs, output) = match split[1] {
- "cxchg" => (1, vec!(ty::mk_mut_ptr(tcx, param(ccx, 0)),
+ "cxchg" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)),
param(ccx, 0),
param(ccx, 0)),
param(ccx, 0)),
- "load" => (1, vec!(ty::mk_imm_ptr(tcx, param(ccx, 0))),
+ "load" => (1, vec!(tcx.mk_imm_ptr(param(ccx, 0))),
param(ccx, 0)),
- "store" => (1, vec!(ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0)),
- ty::mk_nil(tcx)),
+ "store" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0)),
+ tcx.mk_nil()),
"xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" |
"min" | "umax" | "umin" => {
- (1, vec!(ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0)),
+ (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0)),
param(ccx, 0))
}
"fence" | "singlethreadfence" => {
- (0, Vec::new(), ty::mk_nil(tcx))
+ (0, Vec::new(), tcx.mk_nil())
}
op => {
span_err!(tcx.sess, it.span, E0092,
(0, Vec::new(), ty::FnDiverging)
} else {
let (n_tps, inputs, output) = match &name[..] {
- "breakpoint" => (0, Vec::new(), ty::mk_nil(tcx)),
+ "breakpoint" => (0, Vec::new(), tcx.mk_nil()),
"size_of" |
"pref_align_of" | "min_align_of" => (1, Vec::new(), ccx.tcx.types.usize),
"size_of_val" | "min_align_of_val" => {
(1, vec![
- ty::mk_imm_rptr(tcx,
- tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1),
+ tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1),
ty::BrAnon(0))),
param(ccx, 0))
], ccx.tcx.types.usize)
}
"init" | "init_dropped" => (1, Vec::new(), param(ccx, 0)),
"uninit" => (1, Vec::new(), param(ccx, 0)),
- "forget" => (1, vec!( param(ccx, 0) ), ty::mk_nil(tcx)),
+ "forget" => (1, vec!( param(ccx, 0) ), tcx.mk_nil()),
"transmute" => (2, vec!( param(ccx, 0) ), param(ccx, 1)),
"move_val_init" => {
(1,
vec!(
- ty::mk_mut_rptr(tcx,
- tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1),
- ty::BrAnon(0))),
- param(ccx, 0)),
+ tcx.mk_mut_ptr(param(ccx, 0)),
param(ccx, 0)
),
- ty::mk_nil(tcx))
+ tcx.mk_nil())
}
"drop_in_place" => {
- (1, vec![ty::mk_mut_ptr(tcx, param(ccx, 0))], ty::mk_nil(tcx))
+ (1, vec![tcx.mk_mut_ptr(param(ccx, 0))], tcx.mk_nil())
}
"needs_drop" => (1, Vec::new(), ccx.tcx.types.bool),
- "type_name" => (1, Vec::new(), ty::mk_str_slice(tcx, tcx.mk_region(ty::ReStatic),
- ast::MutImmutable)),
+ "type_name" => (1, Vec::new(), tcx.mk_static_str()),
"type_id" => (1, Vec::new(), ccx.tcx.types.u64),
"offset" | "arith_offset" => {
(1,
vec!(
- ty::mk_ptr(tcx, ty::mt {
+ tcx.mk_ptr(ty::TypeAndMut {
ty: param(ccx, 0),
mutbl: ast::MutImmutable
}),
ccx.tcx.types.isize
),
- ty::mk_ptr(tcx, ty::mt {
+ tcx.mk_ptr(ty::TypeAndMut {
ty: param(ccx, 0),
mutbl: ast::MutImmutable
}))
"copy" | "copy_nonoverlapping" => {
(1,
vec!(
- ty::mk_ptr(tcx, ty::mt {
+ tcx.mk_ptr(ty::TypeAndMut {
ty: param(ccx, 0),
mutbl: ast::MutImmutable
}),
- ty::mk_ptr(tcx, ty::mt {
+ tcx.mk_ptr(ty::TypeAndMut {
ty: param(ccx, 0),
mutbl: ast::MutMutable
}),
tcx.types.usize,
),
- ty::mk_nil(tcx))
+ tcx.mk_nil())
}
"volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => {
(1,
vec!(
- ty::mk_ptr(tcx, ty::mt {
+ tcx.mk_ptr(ty::TypeAndMut {
ty: param(ccx, 0),
mutbl: ast::MutMutable
}),
- ty::mk_ptr(tcx, ty::mt {
+ tcx.mk_ptr(ty::TypeAndMut {
ty: param(ccx, 0),
mutbl: ast::MutImmutable
}),
tcx.types.usize,
),
- ty::mk_nil(tcx))
+ tcx.mk_nil())
}
"write_bytes" | "volatile_set_memory" => {
(1,
vec!(
- ty::mk_ptr(tcx, ty::mt {
+ tcx.mk_ptr(ty::TypeAndMut {
ty: param(ccx, 0),
mutbl: ast::MutMutable
}),
tcx.types.u8,
tcx.types.usize,
),
- ty::mk_nil(tcx))
+ tcx.mk_nil())
}
"sqrtf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32),
"sqrtf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64),
"bswap64" => (0, vec!( tcx.types.u64 ), tcx.types.u64),
"volatile_load" =>
- (1, vec!( ty::mk_imm_ptr(tcx, param(ccx, 0)) ), param(ccx, 0)),
+ (1, vec!( tcx.mk_imm_ptr(param(ccx, 0)) ), param(ccx, 0)),
"volatile_store" =>
- (1, vec!( ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0) ), ty::mk_nil(tcx)),
+ (1, vec!( tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0) ), tcx.mk_nil()),
"i8_add_with_overflow" | "i8_sub_with_overflow" | "i8_mul_with_overflow" =>
(0, vec!(tcx.types.i8, tcx.types.i8),
- ty::mk_tup(tcx, vec!(tcx.types.i8, tcx.types.bool))),
+ tcx.mk_tup(vec!(tcx.types.i8, tcx.types.bool))),
"i16_add_with_overflow" | "i16_sub_with_overflow" | "i16_mul_with_overflow" =>
(0, vec!(tcx.types.i16, tcx.types.i16),
- ty::mk_tup(tcx, vec!(tcx.types.i16, tcx.types.bool))),
+ tcx.mk_tup(vec!(tcx.types.i16, tcx.types.bool))),
"i32_add_with_overflow" | "i32_sub_with_overflow" | "i32_mul_with_overflow" =>
(0, vec!(tcx.types.i32, tcx.types.i32),
- ty::mk_tup(tcx, vec!(tcx.types.i32, tcx.types.bool))),
+ tcx.mk_tup(vec!(tcx.types.i32, tcx.types.bool))),
"i64_add_with_overflow" | "i64_sub_with_overflow" | "i64_mul_with_overflow" =>
(0, vec!(tcx.types.i64, tcx.types.i64),
- ty::mk_tup(tcx, vec!(tcx.types.i64, tcx.types.bool))),
+ tcx.mk_tup(vec!(tcx.types.i64, tcx.types.bool))),
"u8_add_with_overflow" | "u8_sub_with_overflow" | "u8_mul_with_overflow" =>
(0, vec!(tcx.types.u8, tcx.types.u8),
- ty::mk_tup(tcx, vec!(tcx.types.u8, tcx.types.bool))),
+ tcx.mk_tup(vec!(tcx.types.u8, tcx.types.bool))),
"u16_add_with_overflow" | "u16_sub_with_overflow" | "u16_mul_with_overflow" =>
(0, vec!(tcx.types.u16, tcx.types.u16),
- ty::mk_tup(tcx, vec!(tcx.types.u16, tcx.types.bool))),
+ tcx.mk_tup(vec!(tcx.types.u16, tcx.types.bool))),
"u32_add_with_overflow" | "u32_sub_with_overflow" | "u32_mul_with_overflow"=>
(0, vec!(tcx.types.u32, tcx.types.u32),
- ty::mk_tup(tcx, vec!(tcx.types.u32, tcx.types.bool))),
+ tcx.mk_tup(vec!(tcx.types.u32, tcx.types.bool))),
"u64_add_with_overflow" | "u64_sub_with_overflow" | "u64_mul_with_overflow" =>
(0, vec!(tcx.types.u64, tcx.types.u64),
- ty::mk_tup(tcx, vec!(tcx.types.u64, tcx.types.bool))),
+ tcx.mk_tup(vec!(tcx.types.u64, tcx.types.bool))),
"unchecked_udiv" | "unchecked_sdiv" | "unchecked_urem" | "unchecked_srem" =>
(1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)),
"overflowing_add" | "overflowing_sub" | "overflowing_mul" =>
(1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)),
- "return_address" => (0, vec![], ty::mk_imm_ptr(tcx, tcx.types.u8)),
+ "return_address" => (0, vec![], tcx.mk_imm_ptr(tcx.types.u8)),
- "assume" => (0, vec![tcx.types.bool], ty::mk_nil(tcx)),
+ "assume" => (0, vec![tcx.types.bool], tcx.mk_nil()),
"discriminant_value" => (1, vec![
- ty::mk_imm_rptr(tcx,
- tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1),
+ tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1),
ty::BrAnon(0))),
param(ccx, 0))], tcx.types.u64),
+ "try" => {
+ let mut_u8 = tcx.mk_mut_ptr(tcx.types.u8);
+ let fn_ty = ty::BareFnTy {
+ unsafety: ast::Unsafety::Normal,
+ abi: abi::Rust,
+ sig: ty::Binder(FnSig {
+ inputs: vec![mut_u8],
+ output: ty::FnOutput::FnConverging(tcx.mk_nil()),
+ variadic: false,
+ }),
+ };
+ let fn_ty = tcx.mk_bare_fn(fn_ty);
+ (0, vec![tcx.mk_fn(None, fn_ty), mut_u8], mut_u8)
+ }
+
ref other => {
span_err!(tcx.sess, it.span, E0093,
"unrecognized intrinsic function: `{}`", *other);
};
(n_tps, inputs, ty::FnConverging(output))
};
- let fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(ty::BareFnTy {
+ let fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Unsafe,
abi: abi::RustIntrinsic,
sig: ty::Binder(FnSig {
variadic: false,
}),
}));
- let i_ty = ty::lookup_item_type(ccx.tcx, local_def(it.id));
+ let i_ty = ccx.tcx.lookup_item_type(local_def(it.id));
let i_n_tps = i_ty.generics.types.len(subst::FnSpace);
if i_n_tps != n_tps {
span_err!(tcx.sess, it.span, E0094,
structurally_resolved_type,
};
use middle::traits;
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, HasTypeFlags};
use syntax::ast;
use syntax::ast_util;
use syntax::parse::token;
fcx.write_nil(expr.id);
} else {
// error types are considered "builtin"
- assert!(!ty::type_is_error(lhs_ty) || !ty::type_is_error(rhs_ty));
+ assert!(!lhs_ty.references_error() || !rhs_ty.references_error());
span_err!(tcx.sess, lhs_expr.span, E0368,
"binary assignment operation `{}=` cannot be applied to types `{}` and `{}`",
ast_util::binop_to_string(op.node),
}
let tcx = fcx.tcx();
- if !ty::expr_is_lval(tcx, lhs_expr) {
- span_err!(tcx.sess, lhs_expr.span, E0067, "illegal left-hand side expression");
+ if !tcx.expr_is_lval(lhs_expr) {
+ span_err!(tcx.sess, lhs_expr.span, E0067, "invalid left-hand side expression");
}
fcx.require_expr_have_sized_type(lhs_expr, traits::AssignmentLhsSized);
// traits, because their return type is not bool. Perhaps this
// should change, but for now if LHS is SIMD we go down a
// different path that bypassess all traits.
- if ty::type_is_simd(fcx.tcx(), lhs_ty) {
+ if lhs_ty.is_simd(fcx.tcx()) {
check_expr_coercable_to_type(fcx, rhs_expr, lhs_ty);
let rhs_ty = fcx.resolve_type_vars_if_possible(fcx.expr_ty(lhs_expr));
let return_ty = enforce_builtin_binop_types(fcx, lhs_expr, lhs_ty, rhs_expr, rhs_ty, op);
match BinOpCategory::from(op) {
BinOpCategory::Shortcircuit => {
// && and || are a simple case.
- demand::suptype(fcx, lhs_expr.span, ty::mk_bool(tcx), lhs_ty);
- check_expr_coercable_to_type(fcx, rhs_expr, ty::mk_bool(tcx));
- fcx.write_ty(expr.id, ty::mk_bool(tcx));
+ demand::suptype(fcx, lhs_expr.span, tcx.mk_bool(), lhs_ty);
+ check_expr_coercable_to_type(fcx, rhs_expr, tcx.mk_bool());
+ fcx.write_ty(expr.id, tcx.mk_bool());
}
_ => {
// Otherwise, we always treat operators as if they are
// can't pin this down to a specific impl.
let rhs_ty = fcx.resolve_type_vars_if_possible(rhs_ty);
if
- !ty::type_is_ty_var(lhs_ty) &&
- !ty::type_is_ty_var(rhs_ty) &&
+ !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() &&
is_builtin_binop(fcx.tcx(), lhs_ty, rhs_ty, op)
{
let builtin_return_ty =
let tcx = fcx.tcx();
match BinOpCategory::from(op) {
BinOpCategory::Shortcircuit => {
- demand::suptype(fcx, lhs_expr.span, ty::mk_bool(tcx), lhs_ty);
- demand::suptype(fcx, rhs_expr.span, ty::mk_bool(tcx), rhs_ty);
- ty::mk_bool(tcx)
+ demand::suptype(fcx, lhs_expr.span, tcx.mk_bool(), lhs_ty);
+ demand::suptype(fcx, rhs_expr.span, tcx.mk_bool(), rhs_ty);
+ tcx.mk_bool()
}
BinOpCategory::Shift => {
// For integers, the shift amount can be of any integral
// type. For simd, the type must match exactly.
- if ty::type_is_simd(tcx, lhs_ty) {
+ if lhs_ty.is_simd(tcx) {
demand::suptype(fcx, rhs_expr.span, lhs_ty, rhs_ty);
}
demand::suptype(fcx, rhs_expr.span, lhs_ty, rhs_ty);
// if this is simd, result is same as lhs, else bool
- if ty::type_is_simd(tcx, lhs_ty) {
- let unit_ty = ty::simd_type(tcx, lhs_ty);
+ if lhs_ty.is_simd(tcx) {
+ let unit_ty = lhs_ty.simd_type(tcx);
debug!("enforce_builtin_binop_types: lhs_ty={:?} unit_ty={:?}",
lhs_ty,
unit_ty);
- if !ty::type_is_integral(unit_ty) {
+ if !unit_ty.is_integral() {
tcx.sess.span_err(
lhs_expr.span,
&format!("binary comparison operation `{}` not supported \
lhs_ty
}
} else {
- ty::mk_bool(tcx)
+ tcx.mk_bool()
}
}
}
Ok(return_ty) => return_ty,
Err(()) => {
// error types are considered "builtin"
- if !ty::type_is_error(lhs_ty) {
+ if !lhs_ty.references_error() {
span_err!(fcx.tcx().sess, lhs_expr.span, E0369,
"binary operation `{}` cannot be applied to type `{}`",
ast_util::binop_to_string(op.node),
// HACK(eddyb) Fully qualified path to work around a resolve bug.
let method_call = ::middle::ty::MethodCall::expr(expr.id);
- fcx.inh.method_map.borrow_mut().insert(method_call, method);
+ fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
// extract return type for method; all late bound regions
// should have been instantiated by now
- let ret_ty = ty::ty_fn_ret(method_ty);
- Ok(ty::no_late_bound_regions(fcx.tcx(), &ret_ty).unwrap().unwrap())
+ let ret_ty = method_ty.fn_ret();
+ Ok(fcx.tcx().no_late_bound_regions(&ret_ty).unwrap().unwrap())
}
None => {
Err(())
}
BinOpCategory::Shift => {
- ty::type_is_error(lhs) || ty::type_is_error(rhs) ||
- ty::type_is_integral(lhs) && ty::type_is_integral(rhs) ||
- ty::type_is_simd(cx, lhs) && ty::type_is_simd(cx, rhs)
+ lhs.references_error() || rhs.references_error() ||
+ lhs.is_integral() && rhs.is_integral() ||
+ lhs.is_simd(cx) && rhs.is_simd(cx)
}
BinOpCategory::Math => {
- ty::type_is_error(lhs) || ty::type_is_error(rhs) ||
- ty::type_is_integral(lhs) && ty::type_is_integral(rhs) ||
- ty::type_is_floating_point(lhs) && ty::type_is_floating_point(rhs) ||
- ty::type_is_simd(cx, lhs) && ty::type_is_simd(cx, rhs)
+ lhs.references_error() || rhs.references_error() ||
+ lhs.is_integral() && rhs.is_integral() ||
+ lhs.is_floating_point() && rhs.is_floating_point() ||
+ lhs.is_simd(cx) && rhs.is_simd(cx)
}
BinOpCategory::Bitwise => {
- ty::type_is_error(lhs) || ty::type_is_error(rhs) ||
- ty::type_is_integral(lhs) && ty::type_is_integral(rhs) ||
- ty::type_is_floating_point(lhs) && ty::type_is_floating_point(rhs) ||
- ty::type_is_simd(cx, lhs) && ty::type_is_simd(cx, rhs) ||
- ty::type_is_bool(lhs) && ty::type_is_bool(rhs)
+ lhs.references_error() || rhs.references_error() ||
+ lhs.is_integral() && rhs.is_integral() ||
+ lhs.is_floating_point() && rhs.is_floating_point() ||
+ lhs.is_simd(cx) && rhs.is_simd(cx) ||
+ lhs.is_bool() && rhs.is_bool()
}
BinOpCategory::Comparison => {
- ty::type_is_error(lhs) || ty::type_is_error(rhs) ||
- ty::type_is_scalar(lhs) && ty::type_is_scalar(rhs) ||
- ty::type_is_simd(cx, lhs) && ty::type_is_simd(cx, rhs)
+ lhs.references_error() || rhs.references_error() ||
+ lhs.is_scalar() && rhs.is_scalar() ||
+ lhs.is_simd(cx) && rhs.is_simd(cx)
}
}
}
-
use middle::implicator;
use middle::mem_categorization as mc;
use middle::region::CodeExtent;
-use middle::subst::Substs;
use middle::traits;
-use middle::ty::{self, ClosureTyper, ReScope, Ty, MethodCall};
+use middle::ty::{self, ReScope, Ty, MethodCall, HasTypeFlags};
use middle::infer::{self, GenericKind};
use middle::pat_util;
pub fn regionck_item(fcx: &FnCtxt, item: &ast::Item) {
let mut rcx = Rcx::new(fcx, RepeatingScope(item.id), item.id, Subject(item.id));
let tcx = fcx.tcx();
- rcx.free_region_map.relate_free_regions_from_predicates(tcx, &fcx.inh.param_env.caller_bounds);
+ rcx.free_region_map
+ .relate_free_regions_from_predicates(tcx, &fcx.infcx().parameter_environment.caller_bounds);
rcx.visit_region_obligations(item.id);
rcx.resolve_regions_and_report_errors();
}
}
let tcx = fcx.tcx();
- rcx.free_region_map.relate_free_regions_from_predicates(tcx, &fcx.inh.param_env.caller_bounds);
+ rcx.free_region_map
+ .relate_free_regions_from_predicates(tcx, &fcx.infcx().parameter_environment.caller_bounds);
rcx.resolve_regions_and_report_errors();
// INTERNALS
pub struct Rcx<'a, 'tcx: 'a> {
- fcx: &'a FnCtxt<'a, 'tcx>,
+ pub fcx: &'a FnCtxt<'a, 'tcx>,
region_bound_pairs: Vec<(ty::Region, GenericKind<'tcx>)>,
}
fn resolve_method_type(&self, method_call: MethodCall) -> Option<Ty<'tcx>> {
- let method_ty = self.fcx.inh.method_map.borrow()
+ let method_ty = self.fcx.inh.tables.borrow().method_map
.get(&method_call).map(|method| method.ty);
method_ty.map(|method_ty| self.resolve_type(method_ty))
}
/// Try to resolve the type for the given node.
pub fn resolve_expr_type_adjusted(&mut self, expr: &ast::Expr) -> Ty<'tcx> {
let ty_unadjusted = self.resolve_node_type(expr.id);
- if ty::type_is_error(ty_unadjusted) {
+ if ty_unadjusted.references_error() {
ty_unadjusted
} else {
- let tcx = self.fcx.tcx();
- ty::adjust_ty(tcx, expr.span, expr.id, ty_unadjusted,
- self.fcx.inh.adjustments.borrow().get(&expr.id),
- |method_call| self.resolve_method_type(method_call))
+ ty_unadjusted.adjust(
+ self.fcx.tcx(), expr.span, expr.id,
+ self.fcx.inh.tables.borrow().adjustments.get(&expr.id),
+ |method_call| self.resolve_method_type(method_call))
}
}
// Make a copy of the region obligations vec because we'll need
// to be able to borrow the fulfillment-cx below when projecting.
let region_obligations =
- self.fcx.inh.fulfillment_cx.borrow()
- .region_obligations(node_id)
- .to_vec();
+ self.fcx
+ .inh
+ .infcx
+ .fulfillment_cx
+ .borrow()
+ .region_obligations(node_id)
+ .to_vec();
for r_o in ®ion_obligations {
debug!("visit_region_obligations: r_o={:?}",
// Processing the region obligations should not cause the list to grow further:
assert_eq!(region_obligations.len(),
- self.fcx.inh.fulfillment_cx.borrow().region_obligations(node_id).len());
+ self.fcx.inh.infcx.fulfillment_cx.borrow().region_obligations(node_id).len());
}
/// This method populates the region map's `free_region_map`. It walks over the transformed
debug!("relate_free_regions(t={:?})", ty);
let body_scope = CodeExtent::from_node_id(body_id);
let body_scope = ty::ReScope(body_scope);
- let implications = implicator::implications(self.fcx.infcx(), self.fcx, body_id,
+ let implications = implicator::implications(self.fcx.infcx(), body_id,
ty, body_scope, span);
// Record any relations between free regions that we observe into the free-region-map.
self.region_bound_pairs.push((r_a, generic_b.clone()));
}
implicator::Implication::RegionSubRegion(..) |
- implicator::Implication::RegionSubClosure(..) |
implicator::Implication::Predicate(..) => {
// In principle, we could record (and take
// advantage of) every relationship here, but
type_must_outlive(rcx, infer::ExprTypeIsNotInScope(expr_ty, expr.span),
expr_ty, ty::ReScope(CodeExtent::from_node_id(expr.id)));
- let method_call = MethodCall::expr(expr.id);
- let has_method_map = rcx.fcx.inh.method_map.borrow().contains_key(&method_call);
+ let has_method_map = rcx.fcx.infcx().is_method_call(expr.id);
// Check any autoderefs or autorefs that appear.
- if let Some(adjustment) = rcx.fcx.inh.adjustments.borrow().get(&expr.id) {
+ let adjustment = rcx.fcx.inh.tables.borrow().adjustments.get(&expr.id).map(|a| a.clone());
+ if let Some(adjustment) = adjustment {
debug!("adjustment={:?}", adjustment);
- match *adjustment {
+ match adjustment {
ty::AdjustDerefRef(ty::AutoDerefRef {autoderefs, ref autoref, ..}) => {
let expr_ty = rcx.resolve_node_type(expr.id);
constrain_autoderefs(rcx, expr, autoderefs, expr_ty);
// If necessary, constrain destructors in the unadjusted form of this
// expression.
let cmt_result = {
- let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
mc.cat_expr_unadjusted(expr)
};
match cmt_result {
// If necessary, constrain destructors in this expression. This will be
// the adjusted form if there is an adjustment.
let cmt_result = {
- let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
mc.cat_expr(expr)
};
match cmt_result {
ast::ExprUnary(ast::UnDeref, ref base) => {
// For *a, the lifetime of a must enclose the deref
let method_call = MethodCall::expr(expr.id);
- let base_ty = match rcx.fcx.inh.method_map.borrow().get(&method_call) {
+ let base_ty = match rcx.fcx.inh.tables.borrow().method_map.get(&method_call) {
Some(method) => {
constrain_call(rcx, expr, Some(&**base),
None::<ast::Expr>.iter(), true);
let fn_ret = // late-bound regions in overloaded method calls are instantiated
- ty::no_late_bound_regions(rcx.tcx(), &ty::ty_fn_ret(method.ty)).unwrap();
+ rcx.tcx().no_late_bound_regions(&method.ty.fn_ret()).unwrap();
fn_ret.unwrap()
}
None => rcx.resolve_node_type(base.id)
let method_call = MethodCall::autoderef(deref_expr.id, i as u32);
debug!("constrain_autoderefs: method_call={:?} (of {:?} total)", method_call, derefs);
- derefd_ty = match rcx.fcx.inh.method_map.borrow().get(&method_call) {
+ let method = rcx.fcx.inh.tables.borrow().method_map.get(&method_call).map(|m| m.clone());
+
+ derefd_ty = match method {
Some(method) => {
debug!("constrain_autoderefs: #{} is overloaded, method={:?}",
i, method);
// Treat overloaded autoderefs as if an AutoRef adjustment
// was applied on the base type, as that is always the case.
- let fn_sig = ty::ty_fn_sig(method.ty);
+ let fn_sig = method.ty.fn_sig();
let fn_sig = // late-bound regions should have been instantiated
- ty::no_late_bound_regions(rcx.tcx(), fn_sig).unwrap();
+ rcx.tcx().no_late_bound_regions(fn_sig).unwrap();
let self_ty = fn_sig.inputs[0];
let (m, r) = match self_ty.sty {
ty::TyRef(r, ref m) => (m.mutbl, r),
r, m);
{
- let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let self_cmt = ignore_err!(mc.cat_expr_autoderefd(deref_expr, i));
debug!("constrain_autoderefs: self_cmt={:?}",
self_cmt);
r_deref_expr, *r_ptr);
}
- match ty::deref(derefd_ty, true) {
+ match derefd_ty.builtin_deref(true) {
Some(mt) => derefd_ty = mt.ty,
/* if this type can't be dereferenced, then there's already an error
in the session saying so. Just bail out for now */
// is going to fail anyway, so just stop here and let typeck
// report errors later on in the writeback phase.
let ty0 = rcx.resolve_node_type(id);
- let ty = ty::adjust_ty(tcx, origin.span(), id, ty0,
- rcx.fcx.inh.adjustments.borrow().get(&id),
- |method_call| rcx.resolve_method_type(method_call));
+ let ty = ty0.adjust(tcx, origin.span(), id,
+ rcx.fcx.inh.tables.borrow().adjustments.get(&id),
+ |method_call| rcx.resolve_method_type(method_call));
debug!("constrain_regions_in_type_of_node(\
ty={}, ty0={}, id={}, minimum_lifetime={:?})",
ty, ty0,
debug!("link_addr_of(expr={:?}, base={:?})", expr, base);
let cmt = {
- let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
ignore_err!(mc.cat_expr(base))
};
None => { return; }
Some(ref expr) => &**expr,
};
- let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let discr_cmt = ignore_err!(mc.cat_expr(init_expr));
link_pattern(rcx, mc, discr_cmt, &*local.pat);
}
/// linked to the lifetime of its guarantor (if any).
fn link_match(rcx: &Rcx, discr: &ast::Expr, arms: &[ast::Arm]) {
debug!("regionck::for_match()");
- let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let discr_cmt = ignore_err!(mc.cat_expr(discr));
debug!("discr_cmt={:?}", discr_cmt);
for arm in arms {
/// linked to the lifetime of its guarantor (if any).
fn link_fn_args(rcx: &Rcx, body_scope: CodeExtent, args: &[ast::Arg]) {
debug!("regionck::link_fn_args(body_scope={:?})", body_scope);
- let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
for arg in args {
let arg_ty = rcx.fcx.node_ty(arg.id);
let re_scope = ty::ReScope(body_scope);
/// Link lifetimes of any ref bindings in `root_pat` to the pointers found in the discriminant, if
/// needed.
-fn link_pattern<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- mc: mc::MemCategorizationContext<FnCtxt<'a, 'tcx>>,
+fn link_pattern<'t, 'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
+ mc: mc::MemCategorizationContext<'t, 'a, 'tcx>,
discr_cmt: mc::cmt<'tcx>,
root_pat: &ast::Pat) {
debug!("link_pattern(discr_cmt={:?}, root_pat={:?})",
autoref: &ty::AutoRef)
{
debug!("link_autoref(autoref={:?})", autoref);
- let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let expr_cmt = ignore_err!(mc.cat_expr_autoderefd(expr, autoderefs));
debug!("expr_cmt={:?}", expr_cmt);
callee_scope: CodeExtent) {
debug!("link_by_ref(expr={:?}, callee_scope={:?})",
expr, callee_scope);
- let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let expr_cmt = ignore_err!(mc.cat_expr(expr));
let borrow_region = ty::ReScope(callee_scope);
link_region(rcx, expr.span, &borrow_region, ty::ImmBorrow, expr_cmt);
id, mutbl, cmt_borrowed);
let rptr_ty = rcx.resolve_node_type(id);
- if !ty::type_is_error(rptr_ty) {
- let tcx = rcx.fcx.ccx.tcx;
+ if let ty::TyRef(&r, _) = rptr_ty.sty {
debug!("rptr_ty={}", rptr_ty);
- let r = ty::ty_region(tcx, span, rptr_ty);
link_region(rcx, span, &r, ty::BorrowKind::from_mutbl(mutbl),
cmt_borrowed);
}
// Detect by-ref upvar `x`:
let cause = match note {
mc::NoteUpvarRef(ref upvar_id) => {
- let upvar_capture_map = rcx.fcx.inh.upvar_capture_map.borrow_mut();
+ let upvar_capture_map = &rcx.fcx.inh.tables.borrow_mut().upvar_capture_map;
match upvar_capture_map.get(upvar_id) {
Some(&ty::UpvarCapture::ByRef(ref upvar_borrow)) => {
// The mutability of the upvar may have been modified
ty,
region);
- let implications = implicator::implications(rcx.fcx.infcx(), rcx.fcx, rcx.body_id,
+ let implications = implicator::implications(rcx.fcx.infcx(), rcx.body_id,
ty, region, origin.span());
for implication in implications {
debug!("implication: {:?}", implication);
let o1 = infer::ReferenceOutlivesReferent(ty, origin.span());
generic_must_outlive(rcx, o1, r_a, generic_b);
}
- implicator::Implication::RegionSubClosure(_, r_a, def_id, substs) => {
- closure_must_outlive(rcx, origin.clone(), r_a, def_id, substs);
- }
implicator::Implication::Predicate(def_id, predicate) => {
let cause = traits::ObligationCause::new(origin.span(),
rcx.body_id,
}
}
-fn closure_must_outlive<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
- origin: infer::SubregionOrigin<'tcx>,
- region: ty::Region,
- def_id: ast::DefId,
- substs: &'tcx Substs<'tcx>) {
- debug!("closure_must_outlive(region={:?}, def_id={:?}, substs={:?})",
- region, def_id, substs);
-
- let upvars = rcx.fcx.closure_upvars(def_id, substs).unwrap();
- for upvar in upvars {
- let var_id = upvar.def.def_id().local_id();
- type_must_outlive(
- rcx, infer::FreeVariable(origin.span(), var_id),
- upvar.ty, region);
- }
-}
-
fn generic_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region,
generic: &GenericKind<'tcx>) {
- let param_env = &rcx.fcx.inh.param_env;
+ let param_env = &rcx.fcx.inh.infcx.parameter_environment;
debug!("param_must_outlive(region={:?}, generic={:?})",
region,
generic);
// To start, collect bounds from user:
- let mut param_bounds =
- ty::required_region_bounds(rcx.tcx(),
- generic.to_ty(rcx.tcx()),
- param_env.caller_bounds.clone());
+ let mut param_bounds = rcx.tcx().required_region_bounds(generic.to_ty(rcx.tcx()),
+ param_env.caller_bounds.clone());
// In the case of a projection T::Foo, we may be able to extract bounds from the trait def:
match *generic {
debug!("projection_bounds(projection_ty={:?})",
projection_ty);
- let ty = ty::mk_projection(tcx, projection_ty.trait_ref.clone(), projection_ty.item_name);
+ let ty = tcx.mk_projection(projection_ty.trait_ref.clone(), projection_ty.item_name);
// Say we have a projection `<T as SomeTrait<'a>>::SomeType`. We are interested
// in looking for a trait definition like:
// ```
//
// we can thus deduce that `<T as SomeTrait<'a>>::SomeType : 'a`.
- let trait_predicates = ty::lookup_predicates(tcx, projection_ty.trait_ref.def_id);
+ let trait_predicates = tcx.lookup_predicates(projection_ty.trait_ref.def_id);
let predicates = trait_predicates.predicates.as_slice().to_vec();
traits::elaborate_predicates(tcx, predicates)
.filter_map(|predicate| {
use super::FnCtxt;
+use check::demand;
use middle::expr_use_visitor as euv;
use middle::mem_categorization as mc;
-use middle::ty::{self};
+use middle::ty::{self, Ty};
use middle::infer::{InferCtxt, UpvarRegion};
use std::collections::HashSet;
use syntax::ast;
visit::walk_expr(self, expr);
}
- fn visit_fn(&mut self,
- fn_kind: visit::FnKind<'v>,
- decl: &'v ast::FnDecl,
- block: &'v ast::Block,
- span: Span,
- _id: ast::NodeId)
- {
- match fn_kind {
- visit::FkItemFn(..) | visit::FkMethod(..) => {
- // ignore nested fn items
- }
- visit::FkFnBlock => {
- visit::walk_fn(self, fn_kind, decl, block, span);
- }
- }
- }
+ // Skip all items; they aren't in the same context.
+ fn visit_item(&mut self, _: &'v ast::Item) { }
}
impl<'a,'tcx> SeedBorrowKind<'a,'tcx> {
_body: &ast::Block)
{
let closure_def_id = ast_util::local_def(expr.id);
- if !self.fcx.inh.closure_kinds.borrow().contains_key(&closure_def_id) {
+ if !self.fcx.inh.tables.borrow().closure_kinds.contains_key(&closure_def_id) {
self.closures_with_inferred_kinds.insert(expr.id);
- self.fcx.inh.closure_kinds.borrow_mut().insert(closure_def_id, ty::FnClosureKind);
+ self.fcx.inh.tables.borrow_mut().closure_kinds
+ .insert(closure_def_id, ty::FnClosureKind);
debug!("check_closure: adding closure_id={:?} to closures_with_inferred_kinds",
closure_def_id);
}
- ty::with_freevars(self.tcx(), expr.id, |freevars| {
+ self.tcx().with_freevars(expr.id, |freevars| {
for freevar in freevars {
let var_node_id = freevar.def.local_node_id();
let upvar_id = ty::UpvarId { var_id: var_node_id,
}
};
- self.fcx.inh.upvar_capture_map.borrow_mut().insert(upvar_id, capture_kind);
+ self.fcx.inh.tables.borrow_mut().upvar_capture_map.insert(upvar_id, capture_kind);
}
});
}
AdjustBorrowKind { fcx: fcx, closures_with_inferred_kinds: closures_with_inferred_kinds }
}
- fn analyze_closure(&mut self, id: ast::NodeId, decl: &ast::FnDecl, body: &ast::Block) {
+ fn analyze_closure(&mut self,
+ id: ast::NodeId,
+ span: Span,
+ decl: &ast::FnDecl,
+ body: &ast::Block) {
/*!
* Analysis starting point.
*/
- self.visit_block(body);
-
- debug!("analyzing closure `{}` with fn body id `{}`", id, body.id);
+ debug!("analyze_closure(id={:?}, body.id={:?})", id, body.id);
- let mut euv = euv::ExprUseVisitor::new(self, self.fcx);
- euv.walk_fn(decl, body);
+ {
+ let mut euv = euv::ExprUseVisitor::new(self, self.fcx.infcx());
+ euv.walk_fn(decl, body);
+ }
- // If we had not yet settled on a closure kind for this closure,
- // then we should have by now. Process and remove any deferred resolutions.
- //
- // Interesting fact: all calls to this closure must come
- // *after* its definition. Initially, I thought that some
- // kind of fixed-point iteration would be required, due to the
- // possibility of twisted examples like this one:
- //
- // ```rust
- // let mut closure0 = None;
- // let vec = vec!(1, 2, 3);
+ // Now that we've analyzed the closure, we know how each
+ // variable is borrowed, and we know what traits the closure
+ // implements (Fn vs FnMut etc). We now have some updates to do
+ // with that information.
//
- // loop {
- // {
- // let closure1 = || {
- // match closure0.take() {
- // Some(c) => {
- // return c(); // (*) call to `closure0` before it is defined
- // }
- // None => { }
- // }
- // };
- // closure1();
- // }
- //
- // closure0 = || vec;
- // }
- // ```
- //
- // However, this turns out to be wrong. Examples like this
- // fail to compile because the type of the variable `c` above
- // is an inference variable. And in fact since closure types
- // cannot be written, there is no way to make this example
- // work without a boxed closure. This implies that we can't
- // have two closures that recursively call one another without
- // some form of boxing (and hence explicit writing of a
- // closure kind) involved. Huzzah. -nmatsakis
+ // Note that no closure type C may have an upvar of type C
+ // (though it may reference itself via a trait object). This
+ // results from the desugaring of closures to a struct like
+ // `Foo<..., UV0...UVn>`. If one of those upvars referenced
+ // C, then the type would have infinite size (and the
+ // inference algorithm will reject it).
+
+ // Extract the type variables UV0...UVn.
+ let closure_substs = match self.fcx.node_ty(id).sty {
+ ty::TyClosure(_, ref substs) => substs,
+ ref t => {
+ self.fcx.tcx().sess.span_bug(
+ span,
+ &format!("type of closure expr {:?} is not a closure {:?}",
+ id, t));
+ }
+ };
+
+ // Equate the type variables with the actual types.
+ let final_upvar_tys = self.final_upvar_tys(id);
+ debug!("analyze_closure: id={:?} closure_substs={:?} final_upvar_tys={:?}",
+ id, closure_substs, final_upvar_tys);
+ for (&upvar_ty, final_upvar_ty) in closure_substs.upvar_tys.iter().zip(final_upvar_tys) {
+ demand::eqtype(self.fcx, span, final_upvar_ty, upvar_ty);
+ }
+
+ // Now we must process and remove any deferred resolutions,
+ // since we have a concrete closure kind.
let closure_def_id = ast_util::local_def(id);
if self.closures_with_inferred_kinds.contains(&id) {
let mut deferred_call_resolutions =
}
}
+ // Returns a list of `ClosureUpvar`s for each upvar.
+ fn final_upvar_tys(&mut self, closure_id: ast::NodeId) -> Vec<Ty<'tcx>> {
+ // Presently an unboxed closure type cannot "escape" out of a
+ // function, so we will only encounter ones that originated in the
+ // local crate or were inlined into it along with some function.
+ // This may change if abstract return types of some sort are
+ // implemented.
+ let tcx = self.fcx.tcx();
+ tcx.with_freevars(closure_id, |freevars| {
+ freevars.iter()
+ .map(|freevar| {
+ let freevar_def_id = freevar.def.def_id();
+ let freevar_ty = self.fcx.node_ty(freevar_def_id.node);
+ let upvar_id = ty::UpvarId {
+ var_id: freevar_def_id.node,
+ closure_expr_id: closure_id
+ };
+ let capture = self.fcx.infcx().upvar_capture(upvar_id).unwrap();
+
+ debug!("freevar_def_id={:?} freevar_ty={:?} capture={:?}",
+ freevar_def_id, freevar_ty, capture);
+
+ match capture {
+ ty::UpvarCapture::ByValue => freevar_ty,
+ ty::UpvarCapture::ByRef(borrow) =>
+ tcx.mk_ref(tcx.mk_region(borrow.region),
+ ty::TypeAndMut {
+ ty: freevar_ty,
+ mutbl: borrow.kind.to_mutbl_lossy(),
+ }),
+ }
+ })
+ .collect()
+ })
+ }
+
fn adjust_upvar_borrow_kind_for_consume(&self,
cmt: mc::cmt<'tcx>,
mode: euv::ConsumeMode)
// to move out of an upvar, this must be a FnOnce closure
self.adjust_closure_kind(upvar_id.closure_expr_id, ty::FnOnceClosureKind);
- let mut upvar_capture_map = self.fcx.inh.upvar_capture_map.borrow_mut();
+ let upvar_capture_map =
+ &mut self.fcx.inh.tables.borrow_mut().upvar_capture_map;
upvar_capture_map.insert(upvar_id, ty::UpvarCapture::ByValue);
}
mc::NoteClosureEnv(upvar_id) => {
// upvar, then we need to modify the
// borrow_kind of the upvar to make sure it
// is inferred to mutable if necessary
- let mut upvar_capture_map = self.fcx.inh.upvar_capture_map.borrow_mut();
- let ub = upvar_capture_map.get_mut(&upvar_id).unwrap();
- self.adjust_upvar_borrow_kind(upvar_id, ub, borrow_kind);
+ {
+ let upvar_capture_map = &mut self.fcx.inh.tables.borrow_mut().upvar_capture_map;
+ let ub = upvar_capture_map.get_mut(&upvar_id).unwrap();
+ self.adjust_upvar_borrow_kind(upvar_id, ub, borrow_kind);
+ }
// also need to be in an FnMut closure since this is not an ImmBorrow
self.adjust_closure_kind(upvar_id.closure_expr_id, ty::FnMutClosureKind);
}
let closure_def_id = ast_util::local_def(closure_id);
- let mut closure_kinds = self.fcx.inh.closure_kinds.borrow_mut();
+ let closure_kinds = &mut self.fcx.inh.tables.borrow_mut().closure_kinds;
let existing_kind = *closure_kinds.get(&closure_def_id).unwrap();
debug!("adjust_closure_kind: closure_id={}, existing_kind={:?}, new_kind={:?}",
span: Span,
id: ast::NodeId)
{
- match fn_kind {
- visit::FkItemFn(..) | visit::FkMethod(..) => {
- // ignore nested fn items
- }
- visit::FkFnBlock => {
- self.analyze_closure(id, decl, body);
- visit::walk_fn(self, fn_kind, decl, body, span);
- }
- }
+ visit::walk_fn(self, fn_kind, decl, body, span);
+ self.analyze_closure(id, span, decl, body);
}
+
+ // Skip all items; they aren't in the same context.
+ fn visit_item(&mut self, _: &'v ast::Item) { }
}
impl<'a,'tcx> euv::Delegate<'tcx> for AdjustBorrowKind<'a,'tcx> {
use middle::subst::{self, TypeSpace, FnSpace, ParamSpace, SelfSpace};
use middle::traits;
use middle::ty::{self, Ty};
-use middle::ty::liberate_late_bound_regions;
use middle::ty_fold::{TypeFolder, TypeFoldable, super_fold_ty};
+use std::cell::RefCell;
use std::collections::HashSet;
use syntax::ast;
use syntax::ast_util::local_def;
use syntax::codemap::{DUMMY_SP, Span};
-use syntax::parse::token::{self, special_idents};
+use syntax::parse::token::special_idents;
use syntax::visit;
use syntax::visit::Visitor;
let ccx = self.ccx;
debug!("check_item_well_formed(it.id={}, it.ident={})",
item.id,
- ty::item_path_str(ccx.tcx, local_def(item.id)));
+ ccx.tcx.item_path_str(local_def(item.id)));
match item.node {
/// Right now we check that every default trait implementation
self.check_impl(item);
}
ast::ItemImpl(_, ast::ImplPolarity::Negative, _, Some(_), _, _) => {
- let trait_ref = ty::impl_trait_ref(ccx.tcx,
- local_def(item.id)).unwrap();
- ty::populate_implementations_for_trait_if_necessary(ccx.tcx, trait_ref.def_id);
+ let trait_ref = ccx.tcx.impl_trait_ref(local_def(item.id)).unwrap();
+ ccx.tcx.populate_implementations_for_trait_if_necessary(trait_ref.def_id);
match ccx.tcx.lang_items.to_builtin_kind(trait_ref.def_id) {
Some(ty::BoundSend) | Some(ty::BoundSync) => {}
Some(_) | None => {
- if !ty::trait_has_default_impl(ccx.tcx, trait_ref.def_id) {
+ if !ccx.tcx.trait_has_default_impl(trait_ref.def_id) {
span_err!(ccx.tcx.sess, item.span, E0192,
"negative impls are only allowed for traits with \
default impls (e.g., `Send` and `Sync`)")
}
ast::ItemTrait(_, _, _, ref items) => {
let trait_predicates =
- ty::lookup_predicates(ccx.tcx, local_def(item.id));
+ ccx.tcx.lookup_predicates(local_def(item.id));
reject_non_type_param_bounds(ccx.tcx, item.span, &trait_predicates);
- if ty::trait_has_default_impl(ccx.tcx, local_def(item.id)) {
+ if ccx.tcx.trait_has_default_impl(local_def(item.id)) {
if !items.is_empty() {
span_err!(ccx.tcx.sess, item.span, E0380,
"traits with default impls (`e.g. unsafe impl \
{
let ccx = self.ccx;
let item_def_id = local_def(item.id);
- let type_scheme = ty::lookup_item_type(ccx.tcx, item_def_id);
- let type_predicates = ty::lookup_predicates(ccx.tcx, item_def_id);
+ let type_scheme = ccx.tcx.lookup_item_type(item_def_id);
+ let type_predicates = ccx.tcx.lookup_predicates(item_def_id);
reject_non_type_param_bounds(ccx.tcx, item.span, &type_predicates);
- let param_env =
- ty::construct_parameter_environment(ccx.tcx,
- item.span,
- &type_scheme.generics,
- &type_predicates,
- item.id);
- let inh = Inherited::new(ccx.tcx, param_env);
+ let param_env = ccx.tcx.construct_parameter_environment(item.span,
+ &type_scheme.generics,
+ &type_predicates,
+ item.id);
+ let tables = RefCell::new(ty::Tables::empty());
+ let inh = Inherited::new(ccx.tcx, &tables, param_env);
let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(type_scheme.ty), item.id);
f(self, &fcx);
fcx.select_all_obligations_or_error();
}
// For DST, all intermediate types must be sized.
- if !variant.fields.is_empty() {
- for field in variant.fields.init() {
+ if let Some((_, fields)) = variant.fields.split_last() {
+ for field in fields {
fcx.register_builtin_bound(
field.ty,
ty::BoundSized,
Some(&mut this.cache));
debug!("check_item_type at bounds_checker.scope: {:?}", bounds_checker.scope);
- let type_scheme = ty::lookup_item_type(fcx.tcx(), local_def(item.id));
+ let type_scheme = fcx.tcx().lookup_item_type(local_def(item.id));
let item_ty = fcx.instantiate_type_scheme(item.span,
- &fcx.inh.param_env.free_substs,
+ &fcx.inh
+ .infcx
+ .parameter_environment
+ .free_substs,
&type_scheme.ty);
bounds_checker.check_traits_in_ty(item_ty, item.span);
// Find the impl self type as seen from the "inside" --
// that is, with all type parameters converted from bound
// to free.
- let self_ty = ty::node_id_to_type(fcx.tcx(), item.id);
+ let self_ty = fcx.tcx().node_id_to_type(item.id);
let self_ty = fcx.instantiate_type_scheme(item.span,
- &fcx.inh.param_env.free_substs,
+ &fcx.inh
+ .infcx
+ .parameter_environment
+ .free_substs,
&self_ty);
bounds_checker.check_traits_in_ty(self_ty, item.span);
// Similarly, obtain an "inside" reference to the trait
// that the impl implements.
- let trait_ref = match ty::impl_trait_ref(fcx.tcx(), local_def(item.id)) {
+ let trait_ref = match fcx.tcx().impl_trait_ref(local_def(item.id)) {
None => { return; }
Some(t) => { t }
};
let trait_ref = fcx.instantiate_type_scheme(item.span,
- &fcx.inh.param_env.free_substs,
+ &fcx.inh
+ .infcx
+ .parameter_environment
+ .free_substs,
&trait_ref);
// We are stricter on the trait-ref in an impl than the
// Find the supertrait bounds. This will add `int:Bar`.
let poly_trait_ref = ty::Binder(trait_ref);
- let predicates = ty::lookup_super_predicates(fcx.tcx(), poly_trait_ref.def_id());
+ let predicates = fcx.tcx().lookup_super_predicates(poly_trait_ref.def_id());
let predicates = predicates.instantiate_supertrait(fcx.tcx(), &poly_trait_ref);
let predicates = {
- let selcx = &mut traits::SelectionContext::new(fcx.infcx(), fcx);
+ let selcx = &mut traits::SelectionContext::new(fcx.infcx());
traits::normalize(selcx, cause.clone(), &predicates)
};
for predicate in predicates.value.predicates {
ast_generics: &ast::Generics)
{
let item_def_id = local_def(item.id);
- let ty_predicates = ty::lookup_predicates(self.tcx(), item_def_id);
- let variances = ty::item_variances(self.tcx(), item_def_id);
+ let ty_predicates = self.tcx().lookup_predicates(item_def_id);
+ let variances = self.tcx().item_variances(item_def_id);
let mut constrained_parameters: HashSet<_> =
variances.types
span,
&format!("consider removing `{}` or using a marker such as `{}`",
param_name,
- ty::item_path_str(self.tcx(), def_id)));
+ self.tcx().item_path_str(def_id)));
}
None => {
// no lang items, no help!
if impl_params.contains(&method_param.name) {
span_err!(tcx.sess, span, E0194,
"type parameter `{}` shadows another type parameter of the same name",
- token::get_name(method_param.name));
+ method_param.name);
}
}
}
match fk {
visit::FkFnBlock | visit::FkItemFn(..) => {}
visit::FkMethod(..) => {
- match ty::impl_or_trait_item(self.tcx(), local_def(id)) {
+ match self.tcx().impl_or_trait_item(local_def(id)) {
ty::ImplOrTraitItem::MethodTraitItem(ty_method) => {
reject_shadowing_type_parameters(self.tcx(), span, &ty_method.generics)
}
fn visit_trait_item(&mut self, trait_item: &'v ast::TraitItem) {
if let ast::MethodTraitItem(_, None) = trait_item.node {
- match ty::impl_or_trait_item(self.tcx(), local_def(trait_item.id)) {
+ match self.tcx().impl_or_trait_item(local_def(trait_item.id)) {
ty::ImplOrTraitItem::MethodTraitItem(ty_method) => {
reject_non_type_param_bounds(
self.tcx(),
/// Note that it does not (currently, at least) check that `A : Copy` (that check is delegated
/// to the point where impl `A : Trait<B>` is implemented).
pub fn check_trait_ref(&mut self, trait_ref: &ty::TraitRef<'tcx>, span: Span) {
- let trait_predicates = ty::lookup_predicates(self.fcx.tcx(), trait_ref.def_id);
+ let trait_predicates = self.fcx.tcx().lookup_predicates(trait_ref.def_id);
let bounds = self.fcx.instantiate_bounds(span,
trait_ref.substs,
where T : TypeFoldable<'tcx>
{
self.binding_count += 1;
- let value = liberate_late_bound_regions(
- self.fcx.tcx(),
+ let value = self.fcx.tcx().liberate_late_bound_regions(
region::DestructionScopeData::new(self.scope),
binder);
debug!("BoundsChecker::fold_binder: late-bound regions replaced: {:?} at scope: {:?}",
match t.sty{
ty::TyStruct(type_id, substs) |
ty::TyEnum(type_id, substs) => {
- let type_predicates = ty::lookup_predicates(self.fcx.tcx(), type_id);
+ let type_predicates = self.fcx.tcx().lookup_predicates(type_id);
let bounds = self.fcx.instantiate_bounds(self.span, substs,
&type_predicates);
struct_def.fields
.iter()
.map(|field| {
- let field_ty = ty::node_id_to_type(fcx.tcx(), field.node.id);
+ let field_ty = fcx.tcx().node_id_to_type(field.node.id);
let field_ty = fcx.instantiate_type_scheme(field.span,
- &fcx.inh.param_env.free_substs,
+ &fcx.inh
+ .infcx
+ .parameter_environment
+ .free_substs,
&field_ty);
AdtField { ty: field_ty, span: field.span }
})
.map(|variant| {
match variant.node.kind {
ast::TupleVariantKind(ref args) if !args.is_empty() => {
- let ctor_ty = ty::node_id_to_type(fcx.tcx(), variant.node.id);
+ let ctor_ty = fcx.tcx().node_id_to_type(variant.node.id);
// the regions in the argument types come from the
// enum def'n, and hence will all be early bound
- let arg_tys =
- ty::no_late_bound_regions(
- fcx.tcx(), &ty::ty_fn_args(ctor_ty)).unwrap();
+ let arg_tys = fcx.tcx().no_late_bound_regions(&ctor_ty.fn_args()).unwrap();
AdtVariant {
fields: args.iter().enumerate().map(|(index, arg)| {
let arg_ty = arg_tys[index];
let arg_ty =
fcx.instantiate_type_scheme(variant.span,
- &fcx.inh.param_env.free_substs,
+ &fcx.inh
+ .infcx
+ .parameter_environment
+ .free_substs,
&arg_ty);
AdtField {
ty: arg_ty,
let rhs_ty = self.fcx.node_ty(rhs.id);
let rhs_ty = self.fcx.infcx().resolve_type_vars_if_possible(&rhs_ty);
- if ty::type_is_scalar(lhs_ty) && ty::type_is_scalar(rhs_ty) {
- self.fcx.inh.method_map.borrow_mut().remove(&MethodCall::expr(e.id));
+ if lhs_ty.is_scalar() && rhs_ty.is_scalar() {
+ self.fcx.inh.tables.borrow_mut().method_map.remove(&MethodCall::expr(e.id));
// weird but true: the by-ref binops put an
// adjustment on the lhs but not the rhs; the
// adjustment for rhs is kind of baked into the
// system.
if !ast_util::is_by_value_binop(op.node) {
- self.fcx.inh.adjustments.borrow_mut().remove(&lhs.id);
+ self.fcx.inh.tables.borrow_mut().adjustments.remove(&lhs.id);
}
}
}
return;
}
- self.visit_node_id(ResolvingExpr(s.span), ty::stmt_node_id(s));
+ self.visit_node_id(ResolvingExpr(s.span), ast_util::stmt_id(s));
visit::walk_stmt(self, s);
}
debug!("Type for pattern binding {} (id {}) resolved to {:?}",
pat_to_string(p),
p.id,
- ty::node_id_to_type(self.tcx(), p.id));
+ self.tcx().node_id_to_type(p.id));
visit::walk_pat(self, p);
}
return;
}
- for (upvar_id, upvar_capture) in self.fcx.inh.upvar_capture_map.borrow().iter() {
+ for (upvar_id, upvar_capture) in self.fcx.inh.tables.borrow().upvar_capture_map.iter() {
let new_upvar_capture = match *upvar_capture {
ty::UpvarCapture::ByValue => ty::UpvarCapture::ByValue,
ty::UpvarCapture::ByRef(ref upvar_borrow) => {
debug!("Upvar capture for {:?} resolved to {:?}",
upvar_id,
new_upvar_capture);
- self.fcx.tcx().upvar_capture_map.borrow_mut().insert(*upvar_id, new_upvar_capture);
+ self.fcx.tcx()
+ .tables
+ .borrow_mut()
+ .upvar_capture_map
+ .insert(*upvar_id, new_upvar_capture);
}
}
return
}
- for (def_id, closure_ty) in self.fcx.inh.closure_tys.borrow().iter() {
+ for (def_id, closure_ty) in self.fcx.inh.tables.borrow().closure_tys.iter() {
let closure_ty = self.resolve(closure_ty, ResolvingClosure(*def_id));
- self.fcx.tcx().closure_tys.borrow_mut().insert(*def_id, closure_ty);
+ self.fcx.tcx().tables.borrow_mut().closure_tys.insert(*def_id, closure_ty);
}
- for (def_id, &closure_kind) in self.fcx.inh.closure_kinds.borrow().iter() {
- self.fcx.tcx().closure_kinds.borrow_mut().insert(*def_id, closure_kind);
+ for (def_id, &closure_kind) in self.fcx.inh.tables.borrow().closure_kinds.iter() {
+ self.fcx.tcx().tables.borrow_mut().closure_kinds.insert(*def_id, closure_kind);
}
}
}
fn visit_adjustments(&self, reason: ResolveReason, id: ast::NodeId) {
- match self.fcx.inh.adjustments.borrow_mut().remove(&id) {
+ let adjustments = self.fcx.inh.tables.borrow_mut().adjustments.remove(&id);
+ match adjustments {
None => {
debug!("No adjustments for node {}", id);
}
}
};
debug!("Adjustments for node {}: {:?}", id, resolved_adjustment);
- self.tcx().adjustments.borrow_mut().insert(
+ self.tcx().tables.borrow_mut().adjustments.insert(
id, resolved_adjustment);
}
}
reason: ResolveReason,
method_call: MethodCall) {
// Resolve any method map entry
- match self.fcx.inh.method_map.borrow_mut().remove(&method_call) {
+ let new_method = match self.fcx.inh.tables.borrow_mut().method_map.remove(&method_call) {
Some(method) => {
debug!("writeback::resolve_method_map_entry(call={:?}, entry={:?})",
method_call,
method);
let new_method = MethodCallee {
- origin: self.resolve(&method.origin, reason),
+ def_id: method.def_id,
ty: self.resolve(&method.ty, reason),
- substs: self.resolve(&method.substs, reason),
+ substs: self.tcx().mk_substs(self.resolve(method.substs, reason)),
};
- self.tcx().method_map.borrow_mut().insert(
+ Some(new_method)
+ }
+ None => None
+ };
+
+ //NB(jroesch): We need to match twice to avoid a double borrow which would cause an ICE
+ match new_method {
+ Some(method) => {
+ self.tcx().tables.borrow_mut().method_map.insert(
method_call,
- new_method);
+ method);
}
None => {}
}
ResolvingLocal(s) => s,
ResolvingPattern(s) => s,
ResolvingUpvar(upvar_id) => {
- ty::expr_span(tcx, upvar_id.closure_expr_id)
+ tcx.expr_span(upvar_id.closure_expr_id)
}
ResolvingClosure(did) => {
if did.krate == ast::LOCAL_CRATE {
- ty::expr_span(tcx, did.node)
+ tcx.expr_span(did.node)
} else {
DUMMY_SP
}
reason: reason }
}
- fn report_error(&self, e: infer::fixup_err) {
+ fn report_error(&self, e: infer::FixupError) {
self.writeback_errors.set(true);
if !self.tcx.sess.has_errors() {
match self.reason {
let span = self.reason.span(self.tcx);
span_err!(self.tcx.sess, span, E0104,
"cannot resolve lifetime for captured variable `{}`: {}",
- ty::local_var_name_str(self.tcx, upvar_id.var_id).to_string(),
+ self.tcx.local_var_name_str(upvar_id.var_id).to_string(),
infer::fixup_err_to_string(e));
}
fn check_implementation(&self, item: &Item) {
let tcx = self.crate_context.tcx;
let impl_did = local_def(item.id);
- let self_type = ty::lookup_item_type(tcx, impl_did);
+ let self_type = tcx.lookup_item_type(impl_did);
// If there are no traits, then this implementation must have a
// base type.
let impl_items = self.create_impl_from_item(item);
- if let Some(trait_ref) = ty::impl_trait_ref(self.crate_context.tcx,
- impl_did) {
+ if let Some(trait_ref) = self.crate_context.tcx.impl_trait_ref(impl_did) {
debug!("(checking implementation) adding impl for trait '{:?}', item '{}'",
trait_ref,
item.ident);
debug!("instantiate_default_methods(impl_id={:?}, trait_ref={:?})",
impl_id, trait_ref);
- let impl_type_scheme = ty::lookup_item_type(tcx, impl_id);
+ let impl_type_scheme = tcx.lookup_item_type(impl_id);
- let prov = ty::provided_trait_methods(tcx, trait_ref.def_id);
+ let prov = tcx.provided_trait_methods(trait_ref.def_id);
for trait_method in &prov {
// Synthesize an ID.
let new_id = tcx.sess.next_node_id();
// impl, plus its own.
let new_polytype = ty::TypeScheme {
generics: new_method_ty.generics.clone(),
- ty: ty::mk_bare_fn(tcx, Some(new_did),
- tcx.mk_bare_fn(new_method_ty.fty.clone()))
+ ty: tcx.mk_fn(Some(new_did),
+ tcx.mk_bare_fn(new_method_ty.fty.clone()))
};
debug!("new_polytype={:?}", new_polytype);
- tcx.tcache.borrow_mut().insert(new_did, new_polytype);
+ tcx.register_item_type(new_did, new_polytype);
tcx.predicates.borrow_mut().insert(new_did, new_method_ty.predicates.clone());
tcx.impl_or_trait_items
.borrow_mut()
fn add_trait_impl(&self, impl_trait_ref: ty::TraitRef<'tcx>, impl_def_id: DefId) {
debug!("add_trait_impl: impl_trait_ref={:?} impl_def_id={:?}",
impl_trait_ref, impl_def_id);
- let trait_def = ty::lookup_trait_def(self.crate_context.tcx,
- impl_trait_ref.def_id);
+ let trait_def = self.crate_context.tcx.lookup_trait_def(impl_trait_ref.def_id);
trait_def.record_impl(self.crate_context.tcx, impl_def_id, impl_trait_ref);
}
}
}).collect();
- if let Some(trait_ref) = ty::impl_trait_ref(self.crate_context.tcx,
- local_def(item.id)) {
- self.instantiate_default_methods(local_def(item.id),
- &trait_ref,
- &mut items);
+ let def_id = local_def(item.id);
+ if let Some(trait_ref) = self.crate_context.tcx.impl_trait_ref(def_id) {
+ self.instantiate_default_methods(def_id, &trait_ref, &mut items);
}
items
let drop_trait = match tcx.lang_items.drop_trait() {
Some(id) => id, None => { return }
};
- ty::populate_implementations_for_trait_if_necessary(tcx, drop_trait);
- let drop_trait = ty::lookup_trait_def(tcx, drop_trait);
+ tcx.populate_implementations_for_trait_if_necessary(drop_trait);
+ let drop_trait = tcx.lookup_trait_def(drop_trait);
let impl_items = tcx.impl_items.borrow();
}
let method_def_id = items[0];
- let self_type = ty::lookup_item_type(tcx, impl_did);
+ let self_type = tcx.lookup_item_type(impl_did);
match self_type.ty.sty {
ty::TyEnum(type_def_id, _) |
ty::TyStruct(type_def_id, _) |
Some(id) => id,
None => return,
};
- ty::populate_implementations_for_trait_if_necessary(tcx, copy_trait);
- let copy_trait = ty::lookup_trait_def(tcx, copy_trait);
+ tcx.populate_implementations_for_trait_if_necessary(copy_trait);
+ let copy_trait = tcx.lookup_trait_def(copy_trait);
copy_trait.for_each_impl(tcx, |impl_did| {
debug!("check_implementations_of_copy: impl_did={:?}",
return
}
- let self_type = ty::lookup_item_type(tcx, impl_did);
+ let self_type = tcx.lookup_item_type(impl_did);
debug!("check_implementations_of_copy: self_type={:?} (bound)",
self_type);
debug!("check_implementations_of_copy: self_type={:?} (free)",
self_type);
- match ty::can_type_implement_copy(¶m_env, span, self_type) {
+ match param_env.can_type_implement_copy(self_type, span) {
Ok(()) => {}
Err(ty::FieldDoesNotImplementCopy(name)) => {
span_err!(tcx.sess, span, E0204,
"the trait `Copy` may not be \
implemented for this type; field \
`{}` does not implement `Copy`",
- token::get_name(name))
+ name)
}
Err(ty::VariantDoesNotImplementCopy(name)) => {
span_err!(tcx.sess, span, E0205,
"the trait `Copy` may not be \
implemented for this type; variant \
`{}` does not implement `Copy`",
- token::get_name(name))
+ name)
}
Err(ty::TypeIsStructural) => {
span_err!(tcx.sess, span, E0206,
}
};
- let trait_def = ty::lookup_trait_def(tcx, coerce_unsized_trait);
+ let trait_def = tcx.lookup_trait_def(coerce_unsized_trait);
trait_def.for_each_impl(tcx, |impl_did| {
debug!("check_implementations_of_coerce_unsized: impl_did={:?}",
return;
}
- let source = ty::lookup_item_type(tcx, impl_did).ty;
- let trait_ref = ty::impl_trait_ref(self.crate_context.tcx,
- impl_did).unwrap();
+ let source = tcx.lookup_item_type(impl_did).ty;
+ let trait_ref = self.crate_context.tcx.impl_trait_ref(impl_did).unwrap();
let target = *trait_ref.substs.types.get(subst::TypeSpace, 0);
debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (bound)",
source, target);
debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (free)",
source, target);
- let infcx = new_infer_ctxt(tcx);
+ let infcx = new_infer_ctxt(tcx, &tcx.tables, Some(param_env), true);
- let check_mutbl = |mt_a: ty::mt<'tcx>, mt_b: ty::mt<'tcx>,
+ let check_mutbl = |mt_a: ty::TypeAndMut<'tcx>, mt_b: ty::TypeAndMut<'tcx>,
mk_ptr: &Fn(Ty<'tcx>) -> Ty<'tcx>| {
if (mt_a.mutbl, mt_b.mutbl) == (ast::MutImmutable, ast::MutMutable) {
infcx.report_mismatched_types(span, mk_ptr(mt_b.ty),
- target, &ty::terr_mutability);
+ target, &ty::TypeError::Mutability);
}
(mt_a.ty, mt_b.ty, unsize_trait, None)
};
(&ty::TyRef(r_a, mt_a), &ty::TyRef(r_b, mt_b)) => {
infer::mk_subr(&infcx, infer::RelateObjectBound(span), *r_b, *r_a);
- check_mutbl(mt_a, mt_b, &|ty| ty::mk_imm_rptr(tcx, r_b, ty))
+ check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty))
}
(&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) |
(&ty::TyRawPtr(mt_a), &ty::TyRawPtr(mt_b)) => {
- check_mutbl(mt_a, mt_b, &|ty| ty::mk_imm_ptr(tcx, ty))
+ check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty))
}
(&ty::TyStruct(def_id_a, substs_a), &ty::TyStruct(def_id_b, substs_b)) => {
if def_id_a != def_id_b {
- let source_path = ty::item_path_str(tcx, def_id_a);
- let target_path = ty::item_path_str(tcx, def_id_b);
+ let source_path = tcx.item_path_str(def_id_a);
+ let target_path = tcx.item_path_str(def_id_b);
span_err!(tcx.sess, span, E0377,
"the trait `CoerceUnsized` may only be implemented \
for a coercion between structures with the same \
}
let origin = infer::Misc(span);
- let fields = ty::lookup_struct_fields(tcx, def_id_a);
+ let fields = tcx.lookup_struct_fields(def_id_a);
let diff_fields = fields.iter().enumerate().filter_map(|(i, f)| {
- let ty = ty::lookup_field_type_unsubstituted(tcx, def_id_a, f.id);
+ let ty = tcx.lookup_field_type_unsubstituted(def_id_a, f.id);
let (a, b) = (ty.subst(tcx, substs_a), ty.subst(tcx, substs_b));
if infcx.sub_types(false, origin, b, a).is_ok() {
None
} else {
name.to_string()
}, a, b)
- }).collect::<Vec<_>>().connect(", "));
+ }).collect::<Vec<_>>().join(", "));
return;
}
}
};
- let mut fulfill_cx = traits::FulfillmentContext::new(true);
+ let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
// Register an obligation for `A: Trait<B>`.
let cause = traits::ObligationCause::misc(span, impl_did.node);
fulfill_cx.register_predicate_obligation(&infcx, predicate);
// Check that all transitive obligations are satisfied.
- if let Err(errors) = fulfill_cx.select_all_or_error(&infcx, ¶m_env) {
+ if let Err(errors) = fulfill_cx.select_all_or_error(&infcx) {
traits::report_fulfillment_errors(&infcx, &errors);
}
// Finally, resolve all regions.
let mut free_regions = FreeRegionMap::new();
- free_regions.relate_free_regions_from_predicates(tcx, ¶m_env.caller_bounds);
+ free_regions.relate_free_regions_from_predicates(tcx, &infcx.parameter_environment
+ .caller_bounds);
infcx.resolve_regions_and_report_errors(&free_regions, impl_did.node);
if let Some(kind) = kind {
provided_source: Option<ast::DefId>)
-> ty::Method<'tcx>
{
- let combined_substs = ty::make_substs_for_receiver_types(tcx, trait_ref, method);
+ let combined_substs = tcx.make_substs_for_receiver_types(trait_ref, method);
debug!("subst_receiver_types_in_method_ty: combined_substs={:?}",
combined_substs);
pub fn check_coherence(crate_context: &CrateCtxt) {
CoherenceChecker {
crate_context: crate_context,
- inference_context: new_infer_ctxt(crate_context.tcx),
+ inference_context: new_infer_ctxt(crate_context.tcx, &crate_context.tcx.tables, None, true),
inherent_impls: RefCell::new(FnvHashMap()),
}.check(crate_context.tcx.map.krate());
unsafety::check(crate_context.tcx);
// defined in this crate.
debug!("coherence2::orphan check: inherent impl {}",
self.tcx.map.node_to_string(item.id));
- let self_ty = ty::lookup_item_type(self.tcx, def_id).ty;
+ let self_ty = self.tcx.lookup_item_type(def_id).ty;
match self_ty.sty {
ty::TyEnum(def_id, _) |
ty::TyStruct(def_id, _) => {
self.check_def_id(item, data.principal_def_id());
}
ty::TyBox(..) => {
- self.check_def_id(item, self.tcx.lang_items.owned_box().unwrap());
+ match self.tcx.lang_items.require_owned_box() {
+ Ok(trait_id) => self.check_def_id(item, trait_id),
+ Err(msg) => self.tcx.sess.span_fatal(item.span, &msg),
+ }
}
ty::TyChar => {
self.check_primitive_impl(def_id,
"[T]",
item.span);
}
- ty::TyRawPtr(ty::mt { ty: _, mutbl: ast::MutImmutable }) => {
+ ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: ast::MutImmutable }) => {
self.check_primitive_impl(def_id,
self.tcx.lang_items.const_ptr_impl(),
"const_ptr",
"*const T",
item.span);
}
- ty::TyRawPtr(ty::mt { ty: _, mutbl: ast::MutMutable }) => {
+ ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: ast::MutMutable }) => {
self.check_primitive_impl(def_id,
self.tcx.lang_items.mut_ptr_impl(),
"mut_ptr",
// "Trait" impl
debug!("coherence2::orphan check: trait impl {}",
self.tcx.map.node_to_string(item.id));
- let trait_ref = ty::impl_trait_ref(self.tcx, def_id).unwrap();
+ let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
let trait_def_id = trait_ref.def_id;
match traits::orphan_check(self.tcx, def_id) {
Ok(()) => { }
debug!("trait_ref={:?} trait_def_id={:?} trait_has_default_impl={}",
trait_ref,
trait_def_id,
- ty::trait_has_default_impl(self.tcx, trait_def_id));
+ self.tcx.trait_has_default_impl(trait_def_id));
if
- ty::trait_has_default_impl(self.tcx, trait_def_id) &&
+ self.tcx.trait_has_default_impl(trait_def_id) &&
trait_def_id.krate != ast::LOCAL_CRATE
{
let self_ty = trait_ref.self_ty();
"cross-crate traits with a default impl, like `{}`, \
can only be implemented for a struct/enum type \
defined in the current crate",
- ty::item_path_str(self.tcx, trait_def_id)))
+ self.tcx.item_path_str(trait_def_id)))
}
}
_ => {
"cross-crate traits with a default impl, like `{}`, \
can only be implemented for a struct/enum type, \
not `{}`",
- ty::item_path_str(self.tcx, trait_def_id),
+ self.tcx.item_path_str(trait_def_id),
self_ty))
}
};
// "Trait" impl
debug!("coherence2::orphan check: default trait impl {}",
self.tcx.map.node_to_string(item.id));
- let trait_ref = ty::impl_trait_ref(self.tcx, def_id).unwrap();
+ let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
if trait_ref.def_id.krate != ast::LOCAL_CRATE {
span_err!(self.tcx.sess, item.span, E0318,
"cannot create default implementations for traits outside the \
let trait_defs: Vec<_> = self.tcx.trait_defs.borrow().values().cloned().collect();
for trait_def in trait_defs {
- ty::populate_implementations_for_trait_if_necessary(
- self.tcx,
- trait_def.trait_ref.def_id);
+ self.tcx.populate_implementations_for_trait_if_necessary(trait_def.trait_ref.def_id);
self.check_for_overlapping_impls_of_trait(trait_def);
}
}
impl1_def_id,
impl2_def_id);
- let infcx = infer::new_infer_ctxt(self.tcx);
+ let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, None, false);
if traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id) {
self.report_overlap_error(trait_def_id, impl1_def_id, impl2_def_id);
}
span_err!(self.tcx.sess, self.span_of_impl(impl1), E0119,
"conflicting implementations for trait `{}`",
- ty::item_path_str(self.tcx, trait_def_id));
+ self.tcx.item_path_str(trait_def_id));
self.report_overlap_note(impl1, impl2);
}
// general orphan/coherence rules, it must always be
// in this crate.
let impl_def_id = ast_util::local_def(item.id);
- let trait_ref = ty::impl_trait_ref(self.tcx, impl_def_id).unwrap();
+ let trait_ref = self.tcx.impl_trait_ref(impl_def_id).unwrap();
let prev_default_impl = self.default_impls.insert(trait_ref.def_id, item.id);
match prev_default_impl {
Some(prev_id) => {
}
ast::ItemImpl(_, _, _, Some(_), ref self_ty, _) => {
let impl_def_id = ast_util::local_def(item.id);
- let trait_ref = ty::impl_trait_ref(self.tcx, impl_def_id).unwrap();
+ let trait_ref = self.tcx.impl_trait_ref(impl_def_id).unwrap();
let trait_def_id = trait_ref.def_id;
match trait_ref.self_ty().sty {
ty::TyTrait(ref data) => {
// giving a misleading message below.
span_err!(self.tcx.sess, self_ty.span, E0372,
"the trait `{}` cannot be made into an object",
- ty::item_path_str(self.tcx, data.principal_def_id()));
+ self.tcx.item_path_str(data.principal_def_id()));
} else {
let mut supertrait_def_ids =
traits::supertrait_def_ids(self.tcx, data.principal_def_id());
"the object type `{}` automatically \
implements the trait `{}`",
trait_ref.self_ty(),
- ty::item_path_str(self.tcx, trait_def_id));
+ self.tcx.item_path_str(trait_def_id));
}
}
}
fn check_unsafety_coherence(&mut self, item: &'v ast::Item,
unsafety: ast::Unsafety,
polarity: ast::ImplPolarity) {
- match ty::impl_trait_ref(self.tcx, ast_util::local_def(item.id)) {
+ match self.tcx.impl_trait_ref(ast_util::local_def(item.id)) {
None => {
// Inherent impl.
match unsafety {
}
Some(trait_ref) => {
- let trait_def = ty::lookup_trait_def(self.tcx, trait_ref.def_id);
+ let trait_def = self.tcx.lookup_trait_def(trait_ref.def_id);
match (trait_def.unsafety, unsafety, polarity) {
(ast::Unsafety::Unsafe,
ast::Unsafety::Unsafe, ast::ImplPolarity::Negative) => {
use middle::region;
use middle::resolve_lifetime;
use middle::subst::{Substs, FnSpace, ParamSpace, SelfSpace, TypeSpace, VecPerParamSpace};
-use middle::ty::{AsPredicate, ImplContainer, ImplOrTraitItemContainer, TraitContainer};
+use middle::ty::{ToPredicate, ImplContainer, ImplOrTraitItemContainer, TraitContainer};
use middle::ty::{self, RegionEscape, ToPolyTraitRef, Ty, TypeScheme};
use middle::ty_fold::{self, TypeFolder, TypeFoldable};
use middle::infer;
use syntax::ast_util::local_def;
use syntax::codemap::Span;
use syntax::parse::token::special_idents;
-use syntax::parse::token;
use syntax::ptr::P;
use syntax::visit;
AstConvRequest::GetTraitDef(def_id) => {
tcx.sess.note(
&format!("the cycle begins when processing `{}`...",
- ty::item_path_str(tcx, def_id)));
+ tcx.item_path_str(def_id)));
}
AstConvRequest::EnsureSuperPredicates(def_id) => {
tcx.sess.note(
&format!("the cycle begins when computing the supertraits of `{}`...",
- ty::item_path_str(tcx, def_id)));
+ tcx.item_path_str(def_id)));
}
AstConvRequest::GetTypeParameterBounds(id) => {
let def = tcx.type_parameter_def(id);
AstConvRequest::GetTraitDef(def_id) => {
tcx.sess.note(
&format!("...which then requires processing `{}`...",
- ty::item_path_str(tcx, def_id)));
+ tcx.item_path_str(def_id)));
}
AstConvRequest::EnsureSuperPredicates(def_id) => {
tcx.sess.note(
&format!("...which then requires computing the supertraits of `{}`...",
- ty::item_path_str(tcx, def_id)));
+ tcx.item_path_str(def_id)));
}
AstConvRequest::GetTypeParameterBounds(id) => {
let def = tcx.type_parameter_def(id);
AstConvRequest::GetTraitDef(def_id) => {
tcx.sess.note(
&format!("...which then again requires processing `{}`, completing the cycle.",
- ty::item_path_str(tcx, def_id)));
+ tcx.item_path_str(def_id)));
}
AstConvRequest::EnsureSuperPredicates(def_id) => {
tcx.sess.note(
&format!("...which then again requires computing the supertraits of `{}`, \
completing the cycle.",
- ty::item_path_str(tcx, def_id)));
+ tcx.item_path_str(def_id)));
}
AstConvRequest::GetTypeParameterBounds(id) => {
let def = tcx.type_parameter_def(id);
let tcx = self.tcx;
if trait_id.krate != ast::LOCAL_CRATE {
- return ty::lookup_trait_def(tcx, trait_id)
+ return tcx.lookup_trait_def(trait_id)
}
let item = match tcx.map.get(trait_id.node) {
if trait_def_id.krate == ast::LOCAL_CRATE {
trait_defines_associated_type_named(self.ccx, trait_def_id.node, assoc_name)
} else {
- let trait_def = ty::lookup_trait_def(self.tcx(), trait_def_id);
+ let trait_def = self.tcx().lookup_trait_def(trait_def_id);
trait_def.associated_type_names.contains(&assoc_name)
}
}
- fn ty_infer(&self, span: Span) -> Ty<'tcx> {
+ fn ty_infer(&self,
+ _ty_param_def: Option<ty::TypeParameterDef<'tcx>>,
+ _substs: Option<&mut Substs<'tcx>>,
+ _space: Option<ParamSpace>,
+ span: Span) -> Ty<'tcx> {
span_err!(self.tcx().sess, span, E0121,
"the type placeholder `_` is not allowed within types on item signatures");
self.tcx().types.err
item_name: ast::Name)
-> Ty<'tcx>
{
- ty::mk_projection(self.tcx(), trait_ref, item_name)
+ self.tcx().mk_projection(trait_ref, item_name)
}
}
// `where T:Foo`.
let def = astconv.tcx().type_parameter_def(node_id);
- let ty = ty::mk_param_from_def(astconv.tcx(), &def);
+ let ty = astconv.tcx().mk_param_from_def(&def);
let from_ty_params =
self.ty_params
ast::TupleVariantKind(ref args) if !args.is_empty() => {
let rs = ExplicitRscope;
let input_tys: Vec<_> = args.iter().map(|va| icx.to_ty(&rs, &*va.ty)).collect();
- ty::mk_ctor_fn(tcx, variant_def_id, &input_tys, enum_scheme.ty)
+ tcx.mk_ctor_fn(variant_def_id, &input_tys, enum_scheme.ty)
}
ast::TupleVariantKind(_) => {
ty: result_ty
};
- tcx.tcache.borrow_mut().insert(variant_def_id, variant_scheme.clone());
+ tcx.register_item_type(variant_def_id, variant_scheme.clone());
tcx.predicates.borrow_mut().insert(variant_def_id, enum_predicates.clone());
write_ty_to_tcx(tcx, variant.node.id, result_ty);
}
container,
None);
- let fty = ty::mk_bare_fn(ccx.tcx, Some(def_id),
- ccx.tcx.mk_bare_fn(ty_method.fty.clone()));
+ let fty = ccx.tcx.mk_fn(Some(def_id),
+ ccx.tcx.mk_bare_fn(ty_method.fty.clone()));
debug!("method {} (id {}) has type {:?}",
ident, id, fty);
- ccx.tcx.tcache.borrow_mut().insert(def_id,TypeScheme {
+ ccx.tcx.register_item_type(def_id, TypeScheme {
generics: ty_method.generics.clone(),
ty: fty
});
struct_predicates: &ty::GenericPredicates<'tcx>,
v: &ast::StructField,
origin: ast::DefId)
- -> ty::field_ty
+ -> ty::FieldTy
{
let tt = ccx.icx(struct_predicates).to_ty(&ExplicitRscope, &*v.node.ty);
write_ty_to_tcx(ccx.tcx, v.node.id, tt);
/* add the field to the tcache */
- ccx.tcx.tcache.borrow_mut().insert(local_def(v.node.id),
- ty::TypeScheme {
- generics: struct_generics.clone(),
- ty: tt
- });
+ ccx.tcx.register_item_type(local_def(v.node.id),
+ ty::TypeScheme {
+ generics: struct_generics.clone(),
+ ty: tt
+ });
ccx.tcx.predicates.borrow_mut().insert(local_def(v.node.id),
struct_predicates.clone());
match v.node.kind {
ast::NamedField(ident, visibility) => {
- ty::field_ty {
+ ty::FieldTy {
name: ident.name,
id: local_def(v.node.id),
vis: visibility,
}
}
ast::UnnamedField(visibility) => {
- ty::field_ty {
+ ty::FieldTy {
name: special_idents::unnamed_field.name,
id: local_def(v.node.id),
vis: visibility,
rcvr_ty_generics,
rcvr_ty_predicates);
- let tcx = ccx.tcx;
- let mut seen_methods = FnvHashSet();
- for (sig, id, ident, vis, span) in methods {
- if !seen_methods.insert(ident.name) {
- let fn_desc = match sig.explicit_self.node {
- ast::SelfStatic => "associated function",
- _ => "method",
- };
- span_err!(tcx.sess, span, E0201, "duplicate {}", fn_desc);
- }
-
+ for (sig, id, ident, vis, _span) in methods {
convert_method(ccx,
container,
sig,
fn convert_item(ccx: &CrateCtxt, it: &ast::Item) {
let tcx = ccx.tcx;
- debug!("convert: item {} with id {}", token::get_ident(it.ident), it.id);
+ debug!("convert: item {} with id {}", it.ident, it.id);
match it.node {
// These don't define types.
ast::ItemExternCrate(_) | ast::ItemUse(_) |
ast_trait_ref,
None);
- ty::record_trait_has_default_impl(tcx, trait_ref.def_id);
+ tcx.record_trait_has_default_impl(trait_ref.def_id);
tcx.impl_trait_refs.borrow_mut().insert(local_def(it.id), Some(trait_ref));
}
let selfty = ccx.icx(&ty_predicates).to_ty(&ExplicitRscope, &**selfty);
write_ty_to_tcx(tcx, it.id, selfty);
- tcx.tcache.borrow_mut().insert(local_def(it.id),
- TypeScheme { generics: ty_generics.clone(),
- ty: selfty });
+ tcx.register_item_type(local_def(it.id),
+ TypeScheme { generics: ty_generics.clone(),
+ ty: selfty });
tcx.predicates.borrow_mut().insert(local_def(it.id),
ty_predicates.clone());
+ if let &Some(ref ast_trait_ref) = opt_trait_ref {
+ tcx.impl_trait_refs.borrow_mut().insert(
+ local_def(it.id),
+ Some(astconv::instantiate_mono_trait_ref(&ccx.icx(&ty_predicates),
+ &ExplicitRscope,
+ ast_trait_ref,
+ Some(selfty)))
+ );
+ } else {
+ tcx.impl_trait_refs.borrow_mut().insert(local_def(it.id), None);
+ }
+
// If there is a trait reference, treat the methods as always public.
// This is to work around some incorrect behavior in privacy checking:
};
// Convert all the associated consts.
+ // Also, check if there are any duplicate associated items
+ let mut seen_type_items = FnvHashSet();
+ let mut seen_value_items = FnvHashSet();
+
for impl_item in impl_items {
+ let seen_items = match impl_item.node {
+ ast::TypeImplItem(_) => &mut seen_type_items,
+ _ => &mut seen_value_items,
+ };
+ if !seen_items.insert(impl_item.ident.name) {
+ let desc = match impl_item.node {
+ ast::ConstImplItem(_, _) => "associated constant",
+ ast::TypeImplItem(_) => "associated type",
+ ast::MethodImplItem(ref sig, _) =>
+ match sig.explicit_self.node {
+ ast::SelfStatic => "associated function",
+ _ => "method",
+ },
+ _ => "associated item",
+ };
+
+ span_err!(tcx.sess, impl_item.span, E0201, "duplicate {}", desc);
+ }
+
if let ast::ConstImplItem(ref ty, ref expr) = impl_item.node {
let ty = ccx.icx(&ty_predicates)
.to_ty(&ExplicitRscope, &*ty);
- tcx.tcache.borrow_mut().insert(local_def(impl_item.id),
- TypeScheme {
- generics: ty_generics.clone(),
- ty: ty,
- });
+ tcx.register_item_type(local_def(impl_item.id),
+ TypeScheme {
+ generics: ty_generics.clone(),
+ ty: ty,
+ });
convert_associated_const(ccx, ImplContainer(local_def(it.id)),
impl_item.ident, impl_item.id,
impl_item.vis.inherit_from(parent_visibility),
}
}
- if let &Some(ref ast_trait_ref) = opt_trait_ref {
- tcx.impl_trait_refs.borrow_mut().insert(
- local_def(it.id),
- Some(astconv::instantiate_mono_trait_ref(&ccx.icx(&ty_predicates),
- &ExplicitRscope,
- ast_trait_ref,
- Some(selfty)))
- );
- } else {
- tcx.impl_trait_refs.borrow_mut().insert(local_def(it.id), None);
- }
-
enforce_impl_params_are_constrained(tcx,
generics,
local_def(it.id),
let _: Result<(), ErrorReported> = // any error is already reported, can ignore
ccx.ensure_super_predicates(it.span, local_def(it.id));
convert_trait_predicates(ccx, it);
- let trait_predicates = ty::lookup_predicates(tcx, local_def(it.id));
+ let trait_predicates = tcx.lookup_predicates(local_def(it.id));
debug!("convert: trait_bounds={:?}", trait_predicates);
ast::ConstTraitItem(ref ty, ref default) => {
let ty = ccx.icx(&trait_predicates)
.to_ty(&ExplicitRscope, ty);
- tcx.tcache.borrow_mut().insert(local_def(trait_item.id),
- TypeScheme {
- generics: trait_def.generics.clone(),
- ty: ty,
- });
+ tcx.register_item_type(local_def(trait_item.id),
+ TypeScheme {
+ generics: trait_def.generics.clone(),
+ ty: ty,
+ });
convert_associated_const(ccx, TraitContainer(local_def(it.id)),
trait_item.ident, trait_item.id,
ast::Public, ty, default.as_ref().map(|d| &**d));
convert_methods(ccx,
TraitContainer(local_def(it.id)),
methods,
- ty::mk_self_type(tcx),
+ tcx.mk_self_type(),
&trait_def.generics,
&trait_predicates);
check_method_self_type(ccx,
&BindingRscope::new(),
ccx.method_ty(trait_item.id),
- ty::mk_self_type(tcx),
+ tcx.mk_self_type(),
&sig.explicit_self,
it.id)
}
Some(prev_span) => {
span_err!(tcx.sess, f.span, E0124,
"field `{}` is already declared",
- token::get_name(result.name));
+ result.name);
span_note!(tcx.sess, *prev_span, "previously declared here");
true
},
tcx.struct_fields.borrow_mut().insert(local_def(id), Rc::new(field_tys));
let substs = mk_item_substs(ccx, &scheme.generics);
- let selfty = ty::mk_struct(tcx, local_def(id), tcx.mk_substs(substs));
+ let selfty = tcx.mk_struct(local_def(id), tcx.mk_substs(substs));
// If this struct is enum-like or tuple-like, create the type of its
// constructor.
// Enum-like.
write_ty_to_tcx(tcx, ctor_id, selfty);
- tcx.tcache.borrow_mut().insert(local_def(ctor_id), scheme);
+ tcx.register_item_type(local_def(ctor_id), scheme);
tcx.predicates.borrow_mut().insert(local_def(ctor_id), predicates);
} else if struct_def.fields[0].node.kind.is_unnamed() {
// Tuple-like.
let inputs: Vec<_> =
struct_def.fields
.iter()
- .map(|field| tcx.tcache.borrow().get(&local_def(field.node.id))
- .unwrap()
- .ty)
+ .map(|field| tcx.lookup_item_type(
+ local_def(field.node.id)).ty)
.collect();
- let ctor_fn_ty = ty::mk_ctor_fn(tcx,
- local_def(ctor_id),
+ let ctor_fn_ty = tcx.mk_ctor_fn(local_def(ctor_id),
&inputs[..],
selfty);
write_ty_to_tcx(tcx, ctor_id, ctor_fn_ty);
- tcx.tcache.borrow_mut().insert(local_def(ctor_id),
- TypeScheme {
- generics: scheme.generics,
- ty: ctor_fn_ty
- });
+ tcx.register_item_type(local_def(ctor_id),
+ TypeScheme {
+ generics: scheme.generics,
+ ty: ctor_fn_ty
+ });
tcx.predicates.borrow_mut().insert(local_def(ctor_id), predicates);
}
}
let trait_def = trait_def_of_item(ccx, item);
let self_predicate = ty::GenericPredicates {
predicates: VecPerParamSpace::new(vec![],
- vec![trait_def.trait_ref.as_predicate()],
+ vec![trait_def.trait_ref.to_predicate()],
vec![])
};
let scope = &(generics, &self_predicate);
// Convert the bounds that follow the colon, e.g. `Bar+Zed` in `trait Foo : Bar+Zed`.
- let self_param_ty = ty::mk_self_type(tcx);
+ let self_param_ty = tcx.mk_self_type();
let superbounds1 = compute_bounds(&ccx.icx(scope),
self_param_ty,
bounds,
_ => tcx.sess.span_bug(it.span, "trait_def_of_item invoked on non-trait"),
};
- let paren_sugar = ty::has_attr(tcx, def_id, "rustc_paren_sugar");
+ let paren_sugar = tcx.has_attr(def_id, "rustc_paren_sugar");
if paren_sugar && !ccx.tcx.sess.features.borrow().unboxed_closures {
ccx.tcx.sess.span_err(
it.span,
generics.ty_params
.iter()
.enumerate()
- .map(|(i, def)| ty::mk_param(tcx, TypeSpace,
+ .map(|(i, def)| tcx.mk_param(TypeSpace,
i as u32, def.ident.name))
.collect();
// ...and also create the `Self` parameter.
- let self_ty = ty::mk_self_type(tcx);
+ let self_ty = tcx.mk_self_type();
Substs::new_trait(types, regions, self_ty)
}
}
};
- let super_predicates = ty::lookup_super_predicates(ccx.tcx, def_id);
+ let super_predicates = ccx.tcx.lookup_super_predicates(def_id);
// `ty_generic_predicates` below will consider the bounds on the type
// parameters (including `Self`) and the explicit where-clauses,
// Add in a predicate that `Self:Trait` (where `Trait` is the
// current trait). This is needed for builtin bounds.
- let self_predicate = trait_def.trait_ref.to_poly_trait_ref().as_predicate();
+ let self_predicate = trait_def.trait_ref.to_poly_trait_ref().to_predicate();
base_predicates.predicates.push(SelfSpace, self_predicate);
// add in the explicit where-clauses
}
};
- let assoc_ty = ty::mk_projection(ccx.tcx,
- self_trait_ref,
- trait_item.ident.name);
+ let assoc_ty = ccx.tcx.mk_projection(self_trait_ref,
+ trait_item.ident.name);
let bounds = compute_bounds(&ccx.icx(&(ast_generics, trait_predicates)),
assoc_ty,
-> ty::TypeScheme<'tcx>
{
if def_id.krate != ast::LOCAL_CRATE {
- return ty::lookup_item_type(ccx.tcx, def_id);
+ return ccx.tcx.lookup_item_type(def_id);
}
match ccx.tcx.map.find(def_id.node) {
ast::ItemFn(ref decl, unsafety, _, abi, ref generics, _) => {
let ty_generics = ty_generics_for_fn(ccx, generics, &ty::Generics::empty());
let tofd = astconv::ty_of_bare_fn(&ccx.icx(generics), unsafety, abi, &**decl);
- let ty = ty::mk_bare_fn(tcx, Some(local_def(it.id)), tcx.mk_bare_fn(tofd));
+ let ty = tcx.mk_fn(Some(local_def(it.id)), tcx.mk_bare_fn(tofd));
ty::TypeScheme { ty: ty, generics: ty_generics }
}
ast::ItemTy(ref t, ref generics) => {
// Create a new generic polytype.
let ty_generics = ty_generics_for_type_or_impl(ccx, generics);
let substs = mk_item_substs(ccx, &ty_generics);
- let t = ty::mk_enum(tcx, local_def(it.id), tcx.mk_substs(substs));
+ let t = tcx.mk_enum(local_def(it.id), tcx.mk_substs(substs));
ty::TypeScheme { ty: t, generics: ty_generics }
}
ast::ItemStruct(_, ref generics) => {
let ty_generics = ty_generics_for_type_or_impl(ccx, generics);
let substs = mk_item_substs(ccx, &ty_generics);
- let t = ty::mk_struct(tcx, local_def(it.id), tcx.mk_substs(substs));
+ let t = tcx.mk_struct(local_def(it.id), tcx.mk_substs(substs));
ty::TypeScheme { ty: t, generics: ty_generics }
}
ast::ItemDefaultImpl(..) |
assert!(prev_predicates.is_none());
// Debugging aid.
- if ty::has_attr(tcx, local_def(it.id), "rustc_object_lifetime_default") {
+ if tcx.has_attr(local_def(it.id), "rustc_object_lifetime_default") {
let object_lifetime_default_reprs: String =
scheme.generics.types.iter()
.map(|t| match t.object_lifetime_default {
d => format!("{:?}", d),
})
.collect::<Vec<String>>()
- .connect(",");
+ .join(",");
tcx.sess.span_err(it.span, &object_lifetime_default_reprs);
}
// the node id for the Self type parameter.
let param_id = trait_id;
+ let parent = ccx.tcx.map.get_parent(param_id);
+
let def = ty::TypeParameterDef {
space: SelfSpace,
index: 0,
name: special_idents::type_self.name,
def_id: local_def(param_id),
+ default_def_id: local_def(parent),
default: None,
object_lifetime_default: ty::ObjectLifetimeDefault::BaseDefault,
};
match unbound {
Some(ref tpb) => {
// FIXME(#8559) currently requires the unbound to be built-in.
- let trait_def_id = ty::trait_ref_to_def_id(tcx, tpb);
+ let trait_def_id = tcx.trait_ref_to_def_id(tpb);
match kind_id {
Ok(kind_id) if trait_def_id != kind_id => {
tcx.sess.span_warn(span,
"default bound relaxed for a type parameter, but \
this does nothing because the given bound is not \
a default. Only `?Sized` is supported");
- ty::try_add_builtin_trait(tcx, kind_id, bounds);
+ tcx.try_add_builtin_trait(kind_id, bounds);
}
_ => {}
}
}
_ if kind_id.is_ok() => {
- ty::try_add_builtin_trait(tcx, kind_id.unwrap(), bounds);
+ tcx.try_add_builtin_trait(kind_id.unwrap(), bounds);
}
// No lang item for Sized, so we can't add it as a bound.
None => {}
for bound in ¶m.bounds {
let bound_region = ast_region_to_region(ccx.tcx, bound);
let outlives = ty::Binder(ty::OutlivesPredicate(region, bound_region));
- result.predicates.push(space, outlives.as_predicate());
+ result.predicates.push(space, outlives.to_predicate());
}
}
poly_trait_ref,
&mut projections);
- result.predicates.push(space, trait_ref.as_predicate());
+ result.predicates.push(space, trait_ref.to_predicate());
for projection in &projections {
- result.predicates.push(space, projection.as_predicate());
+ result.predicates.push(space, projection.to_predicate());
}
}
result
}
+fn convert_default_type_parameter<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
+ path: &P<ast::Ty>,
+ space: ParamSpace,
+ index: u32)
+ -> Ty<'tcx>
+{
+ let ty = ast_ty_to_ty(&ccx.icx(&()), &ExplicitRscope, &path);
+
+ for leaf_ty in ty.walk() {
+ if let ty::TyParam(p) = leaf_ty.sty {
+ if p.space == space && p.idx >= index {
+ span_err!(ccx.tcx.sess, path.span, E0128,
+ "type parameters with a default cannot use \
+ forward declared identifiers");
+
+ return ccx.tcx.types.err
+ }
+ }
+ }
+
+ ty
+}
+
fn get_or_create_type_parameter_def<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
ast_generics: &ast::Generics,
space: ParamSpace,
None => { }
}
- let default = match param.default {
- None => None,
- Some(ref path) => {
- let ty = ast_ty_to_ty(&ccx.icx(&()), &ExplicitRscope, &**path);
- let cur_idx = index;
-
- ty::walk_ty(ty, |t| {
- match t.sty {
- ty::TyParam(p) => if p.idx > cur_idx {
- span_err!(tcx.sess, path.span, E0128,
- "type parameters with a default cannot use \
- forward declared identifiers");
- },
- _ => {}
- }
- });
-
- Some(ty)
- }
- };
+ let default = param.default.as_ref().map(
+ |def| convert_default_type_parameter(ccx, def, space, index)
+ );
let object_lifetime_default =
compute_object_lifetime_default(ccx, param.id,
¶m.bounds, &ast_generics.where_clause);
+ let parent = tcx.map.get_parent(param.id);
+
let def = ty::TypeParameterDef {
space: space,
index: index,
name: param.ident.name,
def_id: local_def(param.id),
+ default_def_id: local_def(parent),
default: default,
object_lifetime_default: object_lifetime_default,
};
let mut projections = Vec::new();
let pred = conv_poly_trait_ref(astconv, param_ty, tr, &mut projections);
projections.into_iter()
- .map(|p| p.as_predicate())
- .chain(Some(pred.as_predicate()))
+ .map(|p| p.to_predicate())
+ .chain(Some(pred.to_predicate()))
.collect()
}
ast::RegionTyParamBound(ref lifetime) => {
ast::Return(ref ty) =>
ty::FnConverging(ast_ty_to_ty(&ccx.icx(ast_generics), &rb, &**ty)),
ast::DefaultReturn(..) =>
- ty::FnConverging(ty::mk_nil(ccx.tcx)),
+ ty::FnConverging(ccx.tcx.mk_nil()),
ast::NoReturn(..) =>
ty::FnDiverging
};
- let t_fn = ty::mk_bare_fn(
- ccx.tcx,
- None,
+ let t_fn = ccx.tcx.mk_fn(None,
ccx.tcx.mk_bare_fn(ty::BareFnTy {
abi: abi,
unsafety: ast::Unsafety::Unsafe,
{
let types =
ty_generics.types.map(
- |def| ty::mk_param_from_def(ccx.tcx, def));
+ |def| ccx.tcx.mk_param_from_def(def));
let regions =
ty_generics.regions.map(
let required_type_free =
liberate_early_bound_regions(
tcx, body_scope,
- &ty::liberate_late_bound_regions(
- tcx, body_scope, &ty::Binder(required_type)));
+ &tcx.liberate_late_bound_regions(body_scope, &ty::Binder(required_type)));
// The "base type" comes from the impl. It too may have late-bound
// regions from the method.
let base_type_free =
liberate_early_bound_regions(
tcx, body_scope,
- &ty::liberate_late_bound_regions(
- tcx, body_scope, &ty::Binder(base_type)));
+ &tcx.liberate_late_bound_regions(body_scope, &ty::Binder(base_type)));
debug!("required_type={:?} required_type_free={:?} \
base_type={:?} base_type_free={:?}",
base_type,
base_type_free);
- let infcx = infer::new_infer_ctxt(tcx);
+ let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, false);
drop(::require_same_types(tcx,
Some(&infcx),
false,
* before we really have a `ParameterEnvironment` to check.
*/
- ty_fold::fold_regions(tcx, value, |region, _| {
+ ty_fold::fold_regions(tcx, value, &mut false, |region, _| {
match region {
ty::ReEarlyBound(data) => {
let def_id = local_def(data.param_id);
impl_def_id: ast::DefId,
impl_items: &[P<ast::ImplItem>])
{
- let impl_scheme = ty::lookup_item_type(tcx, impl_def_id);
- let impl_predicates = ty::lookup_predicates(tcx, impl_def_id);
- let impl_trait_ref = ty::impl_trait_ref(tcx, impl_def_id);
+ let impl_scheme = tcx.lookup_item_type(impl_def_id);
+ let impl_predicates = tcx.lookup_predicates(impl_def_id);
+ let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
// The trait reference is an input, so find all type parameters
// reachable from there, to start (if this is an inherent impl,
let lifetimes_in_associated_types: HashSet<_> =
impl_items.iter()
- .map(|item| ty::impl_or_trait_item(tcx, local_def(item.id)))
+ .map(|item| tcx.impl_or_trait_item(local_def(item.id)))
.filter_map(|item| match item {
ty::TypeTraitItem(ref assoc_ty) => assoc_ty.ty,
ty::ConstTraitItem(..) | ty::MethodTraitItem(..) => None
/// by `ty` (see RFC 447).
pub fn parameters_for_type<'tcx>(ty: Ty<'tcx>) -> Vec<Parameter> {
let mut result = vec![];
- ty::maybe_walk_ty(ty, |t| {
+ ty.maybe_walk(|t| {
if let ty::TyProjection(..) = t.sty {
false // projections are not injective.
} else {
```
"##,
-E0030: r##"
-When matching against a range, the compiler verifies that the range is
-non-empty. Range patterns include both end-points, so this is equivalent to
-requiring the start of the range to be less than or equal to the end of the
-range.
-
-For example:
-
-```
-match 5u32 {
- // This range is ok, albeit pointless.
- 1 ... 1 => ...
- // This range is empty, and the compiler can tell.
- 1000 ... 5 => ...
-}
-```
-"##,
-
E0033: r##"
This error indicates that a pointer to a trait type cannot be implicitly
dereferenced by a pattern. Every trait defines a type, but because the
```
"##,
+E0044: r##"
+You can't use type parameters on foreign items. Example of erroneous code:
+
+```
+extern { fn some_func<T>(x: T); }
+```
+
+To fix this, replace the type parameter with the specializations that you
+need:
+
+```
+extern { fn some_func_i32(x: i32); }
+extern { fn some_func_i64(x: i64); }
+```
+"##,
+
E0045: r##"
Rust only supports variadic parameters for interoperability with C code in its
FFI. As such, variadic parameters can only be used with functions which are
```
Using this declaration, it must be called with at least one argument, so
-simply calling `printf()` is illegal. But the following uses are allowed:
+simply calling `printf()` is invalid. But the following uses are allowed:
```
unsafe {
```
"##,
+E0071: r##"
+You tried to use a structure initialization with a non-structure type.
+Example of erroneous code:
+
+```
+enum Foo { FirstValue };
+
+let u = Foo::FirstValue { value: 0i32 }; // error: Foo::FirstValue
+ // isn't a structure!
+// or even simpler, if the structure wasn't defined at all:
+let u = RandomName { random_field: 0i32 }; // error: RandomName
+ // isn't a structure!
+```
+
+To fix this, please check:
+ * Did you spell it right?
+ * Did you accidentaly used an enum as a struct?
+ * Did you accidentaly make an enum when you intended to use a struct?
+
+Here is the previous code with all missing information:
+
+```
+struct Inner {
+ value: i32
+}
+
+enum Foo {
+ FirstValue(Inner)
+}
+
+fn main() {
+ let u = Foo::FirstValue(Inner { value: 0i32 });
+
+ let t = Inner { value: 0i32 };
+}
+```
+"##,
+
E0072: r##"
When defining a recursive struct or enum, any use of the type being defined
from inside the definition must occur behind a pointer (like `Box` or `&`).
Consider the following erroneous definition of a type for a list of bytes:
```
-// error, illegal recursive struct type
+// error, invalid recursive struct type
struct ListNode {
head: u8,
tail: Option<ListNode>,
Now it's possible to create at least one instance of `Foo`: `Foo { x: None }`.
"##,
+E0074: r##"
+When using the `#[simd]` attribute on a tuple struct, the components of the
+tuple struct must all be of a concrete, nongeneric type so the compiler can
+reason about how to use SIMD with them. This error will occur if the types
+are generic.
+
+```
+#[simd]
+struct Bad<T>(T, T, T); // This will cause an error
+
+#[simd]
+struct Good(u32, u32, u32); // This will not
+```
+"##,
+
+E0075: r##"
+The `#[simd]` attribute can only be applied to non empty tuple structs, because
+it doesn't make sense to try to use SIMD operations when there are no values to
+operate on.
+
+```
+#[simd]
+struct Bad; // This will cause an error
+
+#[simd]
+struct Good(u32); // This will not
+```
+"##,
+
+E0076: r##"
+When using the `#[simd]` attribute to automatically use SIMD operations in tuple
+struct, the types in the struct must all be of the same type, or the compiler
+will trigger this error.
+
+```
+#[simd]
+struct Bad(u16, u32, u32); // This will cause an error
+
+#[simd]
+struct Good(u32, u32, u32); // This will not
+```
+
+"##,
+
+E0077: r##"
+When using the `#[simd]` attribute on a tuple struct, the elements in the tuple
+must be machine types so SIMD operations can be applied to them.
+
+```
+#[simd]
+struct Bad(String); // This will cause an error
+
+#[simd]
+struct Good(u32, u32, u32); // This will not
+```
+"##,
+
E0081: r##"
Enum discriminants are used to differentiate enum variants stored in memory.
This error indicates that the same value was used for two or more variants,
parameters.
"##,
+E0088: r##"
+You gave too many lifetime parameters. Erroneous code example:
+
+```
+fn f() {}
+
+fn main() {
+ f::<'static>() // error: too many lifetime parameters provided
+}
+```
+
+Please check you give the right number of lifetime parameters. Example:
+
+```
+fn f() {}
+
+fn main() {
+ f() // ok!
+}
+```
+
+It's also important to note that the Rust compiler can generally
+determine the lifetime by itself. Example:
+
+```
+struct Foo {
+ value: String
+}
+
+impl Foo {
+ // it can be written like this
+ fn get_value<'a>(&'a self) -> &'a str { &self.value }
+ // but the compiler works fine with this too:
+ fn without_lifetime(&self) -> &str { &self.value }
+}
+
+fn main() {
+ let f = Foo { value: "hello".to_owned() };
+
+ println!("{}", f.get_value());
+ println!("{}", f.without_lifetime());
+}
+```
+"##,
+
E0089: r##"
Not enough type parameters were supplied for a function. For example:
```
"##,
+E0091: r##"
+You gave an unnecessary type parameter in a type alias. Erroneous code
+example:
+
+```
+type Foo<T> = u32; // error: type parameter `T` is unused
+// or:
+type Foo<A,B> = Box<A>; // error: type parameter `B` is unused
+```
+
+Please check you didn't write too many type parameters. Example:
+
+```
+type Foo = u32; // ok!
+type Foo<A> = Box<A>; // ok!
+```
+"##,
+
+E0092: r##"
+You tried to declare an undefined atomic operation function.
+Erroneous code example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn atomic_foo(); // error: unrecognized atomic operation
+ // function
+}
+```
+
+Please check you didn't make a mistake in the function's name. All intrinsic
+functions are defined in librustc_trans/trans/intrinsic.rs and in
+libcore/intrinsics.rs in the Rust source code. Example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn atomic_fence(); // ok!
+}
+```
+"##,
+
+E0093: r##"
+You declared an unknown intrinsic function. Erroneous code example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn foo(); // error: unrecognized intrinsic function: `foo`
+}
+
+fn main() {
+ unsafe {
+ foo();
+ }
+}
+```
+
+Please check you didn't make a mistake in the function's name. All intrinsic
+functions are defined in librustc_trans/trans/intrinsic.rs and in
+libcore/intrinsics.rs in the Rust source code. Example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn atomic_fence(); // ok!
+}
+
+fn main() {
+ unsafe {
+ atomic_fence();
+ }
+}
+```
+"##,
+
+E0094: r##"
+You gave an invalid number of type parameters to an intrinsic function.
+Erroneous code example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn size_of<T, U>() -> usize; // error: intrinsic has wrong number
+ // of type parameters
+}
+```
+
+Please check that you provided the right number of lifetime parameters
+and verify with the function declaration in the Rust source code.
+Example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn size_of<T>() -> usize; // ok!
+}
+```
+"##,
+
+E0101: r##"
+You hit this error because the compiler the compiler lacks information
+to determine a type for this expression. Erroneous code example:
+
+```
+fn main() {
+ let x = |_| {}; // error: cannot determine a type for this expression
+}
+```
+
+You have two possibilities to solve this situation:
+ * Give an explicit definition of the expression
+ * Infer the expression
+
+Examples:
+
+```
+fn main() {
+ let x = |_ : u32| {}; // ok!
+ // or:
+ let x = |_| {};
+ x(0u32);
+}
+```
+"##,
+
E0106: r##"
This error indicates that a lifetime is missing from a type. If it is an error
inside a function signature, the problem may be with failing to adhere to the
```
"##,
-E0121: r##"
-In order to be consistent with Rust's lack of global type inference, type
-placeholders are disallowed by design in item signatures.
+E0117: r##"
+This error indicates a violation of one of Rust's orphan rules for trait
+implementations. The rule prohibits any implementation of a foreign trait (a
+trait defined in another crate) where
-Examples of this error include:
+ - the type that is implementing the trait is foreign
+ - all of the parameters being passed to the trait (if there are any) are also
+ foreign.
-```
-fn foo() -> _ { 5 } // error, explicitly write out the return type instead
+Here's one example of this error:
-static BAR: _ = "test"; // error, explicitly write out the type instead
```
-"##,
-
-E0131: r##"
-It is not possible to define `main` with type parameters, or even with function
-parameters. When `main` is present, it must take no arguments and return `()`.
-"##,
+impl Drop for u32 {}
+```
-E0132: r##"
-It is not possible to declare type parameters on a function that has the `start`
-attribute. Such a function must have the following type signature:
+To avoid this kind of error, ensure that at least one local type is referenced
+by the `impl`:
```
-fn(isize, *const *const u8) -> isize
+pub struct Foo; // you define your type in your crate
+
+impl Drop for Foo { // and you can implement the trait on it!
+ // code of trait implementation here
+}
+
+impl From<Foo> for i32 { // or you use a type from your crate as
+ // a type parameter
+ fn from(i: Foo) -> i32 {
+ 0
+ }
+}
```
-"##,
-E0166: r##"
-This error means that the compiler found a return expression in a function
-marked as diverging. A function diverges if it has `!` in the place of the
-return type in its signature. For example:
+Alternatively, define a trait locally and implement that instead:
```
-fn foo() -> ! { return; } // error
+trait Bar {
+ fn get(&self) -> usize;
+}
+
+impl Bar for u32 {
+ fn get(&self) -> usize { 0 }
+}
```
-For a function that diverges, every control path in the function must never
-return, for example with a `loop` that never breaks or a call to another
-diverging function (such as `panic!()`).
-"##,
+For information on the design of the orphan rules, see [RFC 1023].
-E0178: r##"
-In types, the `+` type operator has low precedence, so it is often necessary
-to use parentheses.
+[RFC 1023]: https://github.com/rust-lang/rfcs/pull/1023
+"##,
-For example:
+E0119: r##"
+There are conflicting trait implementations for the same type.
+Example of erroneous code:
```
-trait Foo {}
-
-struct Bar<'a> {
- w: &'a Foo + Copy, // error, use &'a (Foo + Copy)
- x: &'a Foo + 'a, // error, use &'a (Foo + 'a)
- y: &'a mut Foo + 'a, // error, use &'a mut (Foo + 'a)
- z: fn() -> Foo + 'a, // error, use fn() -> (Foo + 'a)
+trait MyTrait {
+ fn get(&self) -> usize;
}
-```
-More details can be found in [RFC 438].
+impl<T> MyTrait for T {
+ fn get(&self) -> usize { 0 }
+}
-[RFC 438]: https://github.com/rust-lang/rfcs/pull/438
-"##,
+struct Foo {
+ value: usize
+}
-E0184: r##"
-Explicitly implementing both Drop and Copy for a type is currently disallowed.
-This feature can make some sense in theory, but the current implementation is
-incorrect and can lead to memory unsafety (see [issue #20126][iss20126]), so
-it has been disabled for now.
+impl MyTrait for Foo { // error: conflicting implementations for trait
+ // `MyTrait`
+ fn get(&self) -> usize { self.value }
+}
+```
-[iss20126]: https://github.com/rust-lang/rust/issues/20126
-"##,
+When looking for the implementation for the trait, the compiler finds
+both the `impl<T> MyTrait for T` where T is all types and the `impl
+MyTrait for Foo`. Since a trait cannot be implemented multiple times,
+this is an error. So, when you write:
-E0185: r##"
-An associated function for a trait was defined to be static, but an
-implementation of the trait declared the same function to be a method (i.e. to
-take a `self` parameter).
+```
+impl<T> MyTrait for T {
+ fn get(&self) -> usize { 0 }
+}
+```
-Here's an example of this error:
+This makes the trait implemented on all types in the scope. So if you
+try to implement it on another one after that, the implementations will
+conflict. Example:
```
-trait Foo {
- fn foo();
+trait MyTrait {
+ fn get(&self) -> usize;
}
-struct Bar;
+impl<T> MyTrait for T {
+ fn get(&self) -> usize { 0 }
+}
-impl Foo for Bar {
- // error, method `foo` has a `&self` declaration in the impl, but not in
- // the trait
- fn foo(&self) {}
+struct Foo;
+
+fn main() {
+ let f = Foo;
+
+ f.get(); // the trait is implemented so we can use it
}
+```
"##,
-E0186: r##"
-An associated function for a trait was defined to be a method (i.e. to take a
-`self` parameter), but an implementation of the trait declared the same function
-to be static.
-
-Here's an example of this error:
+E0120: r##"
+An attempt was made to implement Drop on a trait, which is not allowed: only
+structs and enums can implement Drop. An example causing this error:
```
-trait Foo {
- fn foo(&self);
+trait MyTrait {}
+
+impl Drop for MyTrait {
+ fn drop(&mut self) {}
+}
+```
+
+A workaround for this problem is to wrap the trait up in a struct, and implement
+Drop on that. An example is shown below:
+
+```
+trait MyTrait {}
+struct MyWrapper<T: MyTrait> { foo: T }
+
+impl <T: MyTrait> Drop for MyWrapper<T> {
+ fn drop(&mut self) {}
+}
+
+```
+
+Alternatively, wrapping trait objects requires something like the following:
+
+```
+trait MyTrait {}
+
+//or Box<MyTrait>, if you wanted an owned trait object
+struct MyWrapper<'a> { foo: &'a MyTrait }
+
+impl <'a> Drop for MyWrapper<'a> {
+ fn drop(&mut self) {}
+}
+```
+"##,
+
+E0121: r##"
+In order to be consistent with Rust's lack of global type inference, type
+placeholders are disallowed by design in item signatures.
+
+Examples of this error include:
+
+```
+fn foo() -> _ { 5 } // error, explicitly write out the return type instead
+
+static BAR: _ = "test"; // error, explicitly write out the type instead
+```
+"##,
+
+E0124: r##"
+You declared two fields of a struct with the same name. Erroneous code
+example:
+
+```
+struct Foo {
+ field1: i32,
+ field1: i32 // error: field is already declared
+}
+```
+
+Please verify that the field names have been correctly spelled. Example:
+
+```
+struct Foo {
+ field1: i32,
+ field2: i32 // ok!
+}
+```
+"##,
+
+E0128: r##"
+Type parameter defaults can only use parameters that occur before them.
+Erroneous code example:
+
+```
+pub struct Foo<T=U, U=()> {
+ field1: T,
+ filed2: U,
+}
+// error: type parameters with a default cannot use forward declared
+// identifiers
+```
+
+Since type parameters are evaluated in-order, you may be able to fix this issue
+by doing:
+
+```
+pub struct Foo<U=(), T=U> {
+ field1: T,
+ filed2: U,
+}
+```
+
+Please also verify that this wasn't because of a name-clash and rename the type
+parameter if so.
+"##,
+
+E0130: r##"
+You declared a pattern as an argument in a foreign function declaration.
+Erroneous code example:
+
+```
+extern {
+ fn foo((a, b): (u32, u32)); // error: patterns aren't allowed in foreign
+ // function declarations
+}
+```
+
+Please replace the pattern argument with a regular one. Example:
+
+```
+struct SomeStruct {
+ a: u32,
+ b: u32,
+}
+
+extern {
+ fn foo(s: SomeStruct); // ok!
+}
+// or
+extern {
+ fn foo(a: (u32, u32)); // ok!
+}
+```
+"##,
+
+E0131: r##"
+It is not possible to define `main` with type parameters, or even with function
+parameters. When `main` is present, it must take no arguments and return `()`.
+"##,
+
+E0132: r##"
+It is not possible to declare type parameters on a function that has the `start`
+attribute. Such a function must have the following type signature:
+
+```
+fn(isize, *const *const u8) -> isize
+```
+"##,
+
+E0159: r##"
+You tried to use a trait as a struct constructor. Erroneous code example:
+
+```
+trait TraitNotAStruct {}
+
+TraitNotAStruct{ value: 0 }; // error: use of trait `TraitNotAStruct` as a
+ // struct constructor
+```
+
+Please verify you used the correct type name or please implement the trait
+on a struct and use this struct constructor. Example:
+
+```
+trait TraitNotAStruct {}
+
+struct Foo {
+ value: i32
+}
+
+Foo{ value: 0 }; // ok!
+```
+"##,
+
+E0166: r##"
+This error means that the compiler found a return expression in a function
+marked as diverging. A function diverges if it has `!` in the place of the
+return type in its signature. For example:
+
+```
+fn foo() -> ! { return; } // error
+```
+
+For a function that diverges, every control path in the function must never
+return, for example with a `loop` that never breaks or a call to another
+diverging function (such as `panic!()`).
+"##,
+
+E0172: r##"
+This error means that an attempt was made to specify the type of a variable with
+a combination of a concrete type and a trait. Consider the following example:
+
+```
+fn foo(bar: i32+std::fmt::Display) {}
+```
+
+The code is trying to specify that we want to receive a signed 32-bit integer
+which also implements `Display`. This doesn't make sense: when we pass `i32`, a
+concrete type, it implicitly includes all of the traits that it implements.
+This includes `Display`, `Debug`, `Clone`, and a host of others.
+
+If `i32` implements the trait we desire, there's no need to specify the trait
+separately. If it does not, then we need to `impl` the trait for `i32` before
+passing it into `foo`. Either way, a fixed definition for `foo` will look like
+the following:
+
+```
+fn foo(bar: i32) {}
+```
+
+To learn more about traits, take a look at the Book:
+
+https://doc.rust-lang.org/book/traits.html
+"##,
+
+E0178: r##"
+In types, the `+` type operator has low precedence, so it is often necessary
+to use parentheses.
+
+For example:
+
+```
+trait Foo {}
+
+struct Bar<'a> {
+ w: &'a Foo + Copy, // error, use &'a (Foo + Copy)
+ x: &'a Foo + 'a, // error, use &'a (Foo + 'a)
+ y: &'a mut Foo + 'a, // error, use &'a mut (Foo + 'a)
+ z: fn() -> Foo + 'a, // error, use fn() -> (Foo + 'a)
+}
+```
+
+More details can be found in [RFC 438].
+
+[RFC 438]: https://github.com/rust-lang/rfcs/pull/438
+"##,
+
+E0184: r##"
+Explicitly implementing both Drop and Copy for a type is currently disallowed.
+This feature can make some sense in theory, but the current implementation is
+incorrect and can lead to memory unsafety (see [issue #20126][iss20126]), so
+it has been disabled for now.
+
+[iss20126]: https://github.com/rust-lang/rust/issues/20126
+"##,
+
+E0185: r##"
+An associated function for a trait was defined to be static, but an
+implementation of the trait declared the same function to be a method (i.e. to
+take a `self` parameter).
+
+Here's an example of this error:
+
+```
+trait Foo {
+ fn foo();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ // error, method `foo` has a `&self` declaration in the impl, but not in
+ // the trait
+ fn foo(&self) {}
+}
+"##,
+
+E0186: r##"
+An associated function for a trait was defined to be a method (i.e. to take a
+`self` parameter), but an implementation of the trait declared the same function
+to be static.
+
+Here's an example of this error:
+
+```
+trait Foo {
+ fn foo(&self);
}
struct Bar;
// the impl
fn foo() {}
}
+```
+"##,
+
+E0191: r##"
+Trait objects need to have all associated types specified. Erroneous code
+example:
+
+```
+trait Trait {
+ type Bar;
+}
+
+type Foo = Trait; // error: the value of the associated type `Bar` (from
+ // the trait `Trait`) must be specified
+```
+
+Please verify you specified all associated types of the trait and that you
+used the right trait. Example:
+
+```
+trait Trait {
+ type Bar;
+}
+
+type Foo = Trait<Bar=i32>; // ok!
+```
"##,
E0192: r##"
rfcs/blob/master/text/0019-opt-in-builtin-traits.md).
"##,
+E0195: r##"
+Your method's lifetime parameters do not match the trait declaration.
+Erroneous code example:
+
+```
+trait Trait {
+ fn bar<'a,'b:'a>(x: &'a str, y: &'b str);
+}
+
+struct Foo;
+
+impl Trait for Foo {
+ fn bar<'a,'b>(x: &'a str, y: &'b str) {
+ // error: lifetime parameters or bounds on method `bar`
+ // do not match the trait declaration
+ }
+}
+```
+
+The lifetime constraint `'b` for bar() implementation does not match the
+trait declaration. Ensure lifetime declarations match exactly in both trait
+declaration and implementation. Example:
+
+```
+trait Trait {
+ fn t<'a,'b:'a>(x: &'a str, y: &'b str);
+}
+
+struct Foo;
+
+impl Trait for Foo {
+ fn t<'a,'b:'a>(x: &'a str, y: &'b str) { // ok!
+ }
+}
+```
+"##,
+
E0197: r##"
Inherent implementations (one that do not implement a trait but provide
methods associated with a type) are always safe because they are not
"##,
E0201: r##"
-It is an error to define an associated function more than once.
+It is an error to define two associated items (like methods, associated types,
+associated functions, etc.) with the same identifier.
For example:
impl Foo {
fn bar(&self) -> bool { self.0 > 5 }
-
- // error: duplicate associated function
- fn bar() {}
+ fn bar() {} // error: duplicate associated function
}
trait Baz {
+ type Quux;
fn baz(&self) -> bool;
}
impl Baz for Foo {
+ type Quux = u32;
+
fn baz(&self) -> bool { true }
// error: duplicate method
fn baz(&self) -> bool { self.0 > 5 }
+
+ // error: duplicate associated type
+ type Quux = u32;
}
```
"##,
```
"##,
+E0207: r##"
+You declared an unused type parameter when implementing a trait on an object.
+Erroneous code example:
+
+```
+trait MyTrait {
+ fn get(&self) -> usize;
+}
+
+struct Foo;
+
+impl<T> MyTrait for Foo {
+ fn get(&self) -> usize {
+ 0
+ }
+}
+```
+
+Please check your object definition and remove unused type
+parameter(s). Example:
+
+```
+trait MyTrait {
+ fn get(&self) -> usize;
+}
+
+struct Foo;
+
+impl MyTrait for Foo {
+ fn get(&self) -> usize {
+ 0
+ }
+}
+```
+"##,
+
+E0210: r##"
+This error indicates a violation of one of Rust's orphan rules for trait
+implementations. The rule concerns the use of type parameters in an
+implementation of a foreign trait (a trait defined in another crate), and
+states that type parameters must be "covered" by a local type. To understand
+what this means, it is perhaps easiest to consider a few examples.
+
+If `ForeignTrait` is a trait defined in some external crate `foo`, then the
+following trait `impl` is an error:
+
+```
+extern crate foo;
+use foo::ForeignTrait;
+
+impl<T> ForeignTrait for T { ... } // error
+```
+
+To work around this, it can be covered with a local type, `MyType`:
+
+```
+struct MyType<T>(T);
+impl<T> ForeignTrait for MyType<T> { ... } // Ok
+```
+
+For another example of an error, suppose there's another trait defined in `foo`
+named `ForeignTrait2` that takes two type parameters. Then this `impl` results
+in the same rule violation:
+
+```
+struct MyType2;
+impl<T> ForeignTrait2<T, MyType<T>> for MyType2 { ... } // error
+```
+
+The reason for this is that there are two appearances of type parameter `T` in
+the `impl` header, both as parameters for `ForeignTrait2`. The first appearance
+is uncovered, and so runs afoul of the orphan rule.
+
+Consider one more example:
+
+```
+impl<T> ForeignTrait2<MyType<T>, T> for MyType2 { ... } // Ok
+```
+
+This only differs from the previous `impl` in that the parameters `T` and
+`MyType<T>` for `ForeignTrait2` have been swapped. This example does *not*
+violate the orphan rule; it is permitted.
+
+To see why that last example was allowed, you need to understand the general
+rule. Unfortunately this rule is a bit tricky to state. Consider an `impl`:
+
+```
+impl<P1, ..., Pm> ForeignTrait<T1, ..., Tn> for T0 { ... }
+```
+
+where `P1, ..., Pm` are the type parameters of the `impl` and `T0, ..., Tn`
+are types. One of the types `T0, ..., Tn` must be a local type (this is another
+orphan rule, see the explanation for E0117). Let `i` be the smallest integer
+such that `Ti` is a local type. Then no type parameter can appear in any of the
+`Tj` for `j < i`.
+
+For information on the design of the orphan rules, see [RFC 1023].
+
+[RFC 1023]: https://github.com/rust-lang/rfcs/pull/1023
+"##,
+
+E0211: r##"
+You used an intrinsic function which doesn't correspond to its
+definition. Erroneous code example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn size_of<T>(); // error: intrinsic has wrong type
+}
+```
+
+Please check the function definition. Example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn size_of<T>() -> usize;
+}
+```
+"##,
+
+E0220: r##"
+You used an associated type which isn't defined in the trait.
+Erroneous code example:
+
+```
+trait Trait {
+ type Bar;
+}
+
+type Foo = Trait<F=i32>; // error: associated type `F` not found for
+ // `Trait`
+```
+
+Please verify you used the right trait or you didn't misspell the
+associated type name. Example:
+
+```
+trait Trait {
+ type Bar;
+}
+
+type Foo = Trait<Bar=i32>; // ok!
+```
+"##,
+
+E0223: r##"
+An attempt was made to retrieve an associated type, but the type was ambiguous.
+For example:
+
+```
+trait MyTrait {type X; }
+
+fn main() {
+ let foo: MyTrait::X;
+}
+```
+
+The problem here is that we're attempting to take the type of X from MyTrait.
+Unfortunately, the type of X is not defined, because it's only made concrete in
+implementations of the trait. A working version of this code might look like:
+
+```
+trait MyTrait {type X; }
+struct MyStruct;
+
+impl MyTrait for MyStruct {
+ type X = u32;
+}
+
+fn main() {
+ let foo: <MyStruct as MyTrait>::X;
+}
+```
+
+This syntax specifies that we want the X type from MyTrait, as made concrete in
+MyStruct. The reason that we cannot simply use `MyStruct::X` is that MyStruct
+might implement two different traits with identically-named associated types.
+This syntax allows disambiguation between the two.
+"##,
+
+E0225: r##"
+You attempted to use multiple types as bounds for a closure or trait object.
+Rust does not currently support this. A simple example that causes this error:
+
+```
+fn main() {
+ let _: Box<std::io::Read+std::io::Write>;
+}
+```
+
+Builtin traits are an exception to this rule: it's possible to have bounds of
+one non-builtin type, plus any number of builtin types. For example, the
+following compiles correctly:
+
+```
+fn main() {
+ let _: Box<std::io::Read+Copy+Sync>;
+}
+```
+"##,
+
+E0232: r##"
+The attribute must have a value. Erroneous code example:
+
+```
+#[rustc_on_unimplemented] // error: this attribute must have a value
+trait Bar {}
+```
+
+Please supply the missing value of the attribute. Example:
+
+```
+#[rustc_on_unimplemented = "foo"] // ok!
+trait Bar {}
+```
+"##,
+
E0243: r##"
This error indicates that not enough type parameters were found in a type or
trait.
```
"##,
+E0327: r##"
+You cannot use associated items other than constant items as patterns. This
+includes method items. Example of erroneous code:
+
+```
+enum B {}
+
+impl B {
+ fn bb() -> i32 { 0 }
+}
+
+fn main() {
+ match 0 {
+ B::bb => {} // error: associated items in match patterns must
+ // be constants
+ }
+}
+```
+
+Please check that you're not using a method as a pattern. Example:
+
+```
+enum B {
+ ba,
+ bb
+}
+
+fn main() {
+ match B::ba {
+ B::bb => {} // ok!
+ _ => {}
+ }
+}
+```
+"##,
+
E0368: r##"
This error indicates that a binary assignment operator like `+=` or `^=` was
applied to the wrong types. For example:
Default impls are only allowed for traits with no methods or associated items.
For more information see the [opt-in builtin traits RFC](https://github.com/rust
-lang/rfcs/blob/master/text/0019-opt-in-builtin-traits.md).
+"##,
+
+E0391: r##"
+This error indicates that some types or traits depend on each other
+and therefore cannot be constructed.
+
+The following example contains a circular dependency between two traits:
+
+```
+trait FirstTrait : SecondTrait {
+
+}
+
+trait SecondTrait : FirstTrait {
+
+}
+```
+"##,
+
+E0392: r##"
+This error indicates that a type or lifetime parameter has been declared
+but not actually used. Here is an example that demonstrates the error:
+
+```
+enum Foo<T> {
+ Bar
+}
+```
+
+If the type parameter was included by mistake, this error can be fixed
+by simply removing the type parameter, as shown below:
+
+```
+enum Foo {
+ Bar
+}
+```
+
+Alternatively, if the type parameter was intentionally inserted, it must be
+used. A simple fix is shown below:
+
+```
+enum Foo<T> {
+ Bar(T)
+}
+```
+
+This error may also commonly be found when working with unsafe code. For
+example, when using raw pointers one may wish to specify the lifetime for
+which the pointed-at data is valid. An initial attempt (below) causes this
+error:
+
+```
+struct Foo<'a, T> {
+ x: *const T
+}
+```
+
+We want to express the constraint that Foo should not outlive `'a`, because
+the data pointed to by `T` is only valid for that lifetime. The problem is
+that there are no actual uses of `'a`. It's possible to work around this
+by adding a PhantomData type to the struct, using it to tell the compiler
+to act as if the struct contained a borrowed reference `&'a T`:
+
+```
+use std::marker::PhantomData;
+
+struct Foo<'a, T: 'a> {
+ x: *const T,
+ phantom: PhantomData<&'a T>
+}
+```
+
+PhantomData can also be used to express information about unused type
+parameters. You can read more about it in the API documentation:
+
+https://doc.rust-lang.org/std/marker/struct.PhantomData.html
"##
}
register_diagnostics! {
- E0044, // foreign items may not have type parameters
E0068,
- E0071,
- E0074,
- E0075,
- E0076,
- E0077,
E0085,
E0086,
- E0088,
E0090,
- E0091,
- E0092,
- E0093,
- E0094,
- E0101,
E0102,
E0103,
E0104,
- E0117,
E0118,
- E0119,
- E0120,
E0122,
E0123,
- E0124,
E0127,
- E0128,
E0129,
- E0130,
E0141,
- E0159,
E0163,
E0164,
E0167,
E0168,
- E0172,
E0173, // manual implementations of unboxed closure traits are experimental
E0174, // explicit use of unboxed closure methods are experimental
E0182,
E0188, // can not cast a immutable reference to a mutable pointer
E0189, // deprecated: can only cast a boxed pointer to a boxed object
E0190, // deprecated: can only cast a &-pointer to an &-object
- E0191, // value of the associated type must be specified
E0193, // cannot bound type where clause bounds may only be attached to types
// involving type parameters
E0194,
- E0195, // lifetime parameters or bounds on method do not match the trait declaration
E0196, // cannot determine a type for this closure
E0203, // type parameter has more than one relaxed default bound,
// and only one is supported
- E0207, // type parameter is not constrained by the impl trait, self type, or predicate
E0208,
E0209, // builtin traits can only be implemented on structs or enums
- E0210, // type parameter is not constrained by any local type
- E0211,
E0212, // cannot extract an associated type from a higher-ranked trait bound
E0213, // associated types are not accepted in this context
E0214, // parenthesized parameters may only be used with a trait
- E0215, // angle-bracket notation is not stable with `Fn`
- E0216, // parenthetical notation is only stable with `Fn`
+// E0215, // angle-bracket notation is not stable with `Fn`
+// E0216, // parenthetical notation is only stable with `Fn`
E0217, // ambiguous associated type, defined in multiple supertraits
E0218, // no associated type defined
E0219, // associated type defined in higher-ranked supertrait
- E0220, // associated type not found for type parameter
E0221, // ambiguous associated type in bounds
- //E0222, // Error code E0045 (variadic function must have C calling
- // convention) duplicate
- E0223, // ambiguous associated type
+// E0222, // Error code E0045 (variadic function must have C calling
+ // convention) duplicate
E0224, // at least one non-builtin train is required for an object type
- E0225, // only the builtin traits can be used as closure or object bounds
E0226, // only a single explicit lifetime bound is permitted
E0227, // ambiguous lifetime bound, explicit lifetime bound required
E0228, // explicit lifetime bound required
E0229, // associated type bindings are not allowed here
E0230, // there is no type parameter on trait
E0231, // only named substitution parameters are allowed
- E0232, // this attribute must have a value
E0233,
E0234,
E0235, // structure constructor specifies a structure of type but
E0241,
E0242, // internal error looking up a definition
E0245, // not a trait
- E0246, // illegal recursive type
+ E0246, // invalid recursive type
E0247, // found module name used as a type
E0248, // found value name used as a type
E0319, // trait impls for defaulted traits allowed just for structs/enums
E0323, // implemented an associated const when another trait item expected
E0324, // implemented a method when another trait item expected
E0325, // implemented an associated type when another trait item expected
- E0327, // referred to method instead of constant in match pattern
E0328, // cannot implement Unsize explicitly
E0329, // associated const depends on type parameter or Self.
E0366, // dropck forbid specialization to concrete type or region
// between structures with the same definition
E0390, // only a single inherent implementation marked with
// `#[lang = \"{}\"]` is allowed for the `{}` primitive
- E0391, // unsupported cyclic reference between types/traits detected
- E0392, // parameter `{}` is never used
- E0393 // the type parameter `{}` must be explicitly specified in an object
+ E0393, // the type parameter `{}` must be explicitly specified in an object
// type because its default value `{}` references the type `Self`"
+ E0399, // trait items need to be implemented because the associated
+ // type `{}` was overridden
+ E0436 // functional record update requires a struct
}
#![feature(ref_slice)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]
-#![feature(slice_extras)]
+#![feature(slice_splits)]
#![feature(staged_api)]
#![feature(vec_push_all)]
+#![feature(cell_extras)]
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
use middle::def;
use middle::infer;
use middle::subst;
-use middle::ty::{self, Ty};
+use middle::ty::{self, Ty, HasTypeFlags};
use rustc::ast_map;
use session::config;
use util::common::time;
// Functions that write types into the node type table
fn write_ty_to_tcx<'tcx>(tcx: &ty::ctxt<'tcx>, node_id: ast::NodeId, ty: Ty<'tcx>) {
debug!("write_ty_to_tcx({}, {:?})", node_id, ty);
- assert!(!ty::type_needs_infer(ty));
+ assert!(!ty.needs_infer());
tcx.node_type_insert(node_id, ty);
}
node_id,
item_substs);
- assert!(item_substs.substs.types.all(|t| !ty::type_needs_infer(*t)));
+ assert!(!item_substs.substs.types.needs_infer());
- tcx.item_substs.borrow_mut().insert(node_id, item_substs);
+ tcx.tables.borrow_mut().item_substs.insert(node_id, item_substs);
}
}
}
}
+fn require_c_abi_if_variadic(tcx: &ty::ctxt,
+ decl: &ast::FnDecl,
+ abi: abi::Abi,
+ span: Span) {
+ if decl.variadic && abi != abi::C {
+ span_err!(tcx.sess, span, E0045,
+ "variadic function must have C calling convention");
+ }
+}
+
fn require_same_types<'a, 'tcx, M>(tcx: &ty::ctxt<'tcx>,
maybe_infcx: Option<&infer::InferCtxt<'a, 'tcx>>,
t1_is_expected: bool,
{
let result = match maybe_infcx {
None => {
- let infcx = infer::new_infer_ctxt(tcx);
+ let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, false);
infer::mk_eqty(&infcx, t1_is_expected, infer::Misc(span), t1, t2)
}
Some(infcx) => {
Ok(_) => true,
Err(ref terr) => {
span_err!(tcx.sess, span, E0211, "{}: {}", msg(), terr);
- ty::note_and_explain_type_err(tcx, terr, span);
+ tcx.note_and_explain_type_err(terr, span);
false
}
}
main_id: ast::NodeId,
main_span: Span) {
let tcx = ccx.tcx;
- let main_t = ty::node_id_to_type(tcx, main_id);
+ let main_t = tcx.node_id_to_type(main_id);
match main_t.sty {
ty::TyBareFn(..) => {
match tcx.map.find(main_id) {
}
_ => ()
}
- let se_ty = ty::mk_bare_fn(tcx, Some(local_def(main_id)), tcx.mk_bare_fn(ty::BareFnTy {
+ let se_ty = tcx.mk_fn(Some(local_def(main_id)), tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: Vec::new(),
- output: ty::FnConverging(ty::mk_nil(tcx)),
+ output: ty::FnConverging(tcx.mk_nil()),
variadic: false
})
}));
start_id: ast::NodeId,
start_span: Span) {
let tcx = ccx.tcx;
- let start_t = ty::node_id_to_type(tcx, start_id);
+ let start_t = tcx.node_id_to_type(start_id);
match start_t.sty {
ty::TyBareFn(..) => {
match tcx.map.find(start_id) {
_ => ()
}
- let se_ty = ty::mk_bare_fn(tcx, Some(local_def(start_id)), tcx.mk_bare_fn(ty::BareFnTy {
+ let se_ty = tcx.mk_fn(Some(local_def(start_id)), tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec!(
tcx.types.isize,
- ty::mk_imm_ptr(tcx, ty::mk_imm_ptr(tcx, tcx.types.u8))
+ tcx.mk_imm_ptr(tcx.mk_imm_ptr(tcx.types.u8))
),
output: ty::FnConverging(tcx.types.isize),
variadic: false,
use middle::ty_fold;
use std::cell::Cell;
-use std::iter::repeat;
use syntax::codemap::Span;
+#[derive(Clone)]
+pub struct ElisionFailureInfo {
+ pub name: String,
+ pub lifetime_count: usize,
+ pub have_bound_regions: bool
+}
+
+pub type ElidedLifetime = Result<ty::Region, Option<Vec<ElisionFailureInfo>>>;
+
/// Defines strategies for handling regions that are omitted. For
/// example, if one writes the type `&Foo`, then the lifetime of
/// this reference has been omitted. When converting this
fn anon_regions(&self,
span: Span,
count: usize)
- -> Result<Vec<ty::Region>, Option<Vec<(String, usize)>>>;
+ -> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>>;
/// If an object omits any explicit lifetime bound, and none can
/// be derived from the object traits, what should we use? If
/// computing `object_lifetime_default` (in particular, in legacy
/// modes, it may not be relevant).
fn base_object_lifetime_default(&self, span: Span) -> ty::Region;
-
- /// Used to issue warnings in Rust 1.2, not needed after that.
- /// True if the result of `object_lifetime_default` will change in 1.3.
- fn object_lifetime_default_will_change_in_1_3(&self) -> bool {
- false
- }
-
- /// Used to issue warnings in Rust 1.2, not needed after that.
- /// True if the result of `base_object_lifetime_default` differs
- /// from the result of `object_lifetime_default`.
- fn base_object_lifetime_default_differs(&self) -> bool {
- false
- }
}
// A scope in which all regions must be explicitly named. This is used
fn anon_regions(&self,
_span: Span,
_count: usize)
- -> Result<Vec<ty::Region>, Option<Vec<(String, usize)>>> {
+ -> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>> {
Err(None)
}
}
// Same as `ExplicitRscope`, but provides some extra information for diagnostics
-pub struct UnelidableRscope(Vec<(String, usize)>);
+pub struct UnelidableRscope(Option<Vec<ElisionFailureInfo>>);
impl UnelidableRscope {
- pub fn new(v: Vec<(String, usize)>) -> UnelidableRscope {
+ pub fn new(v: Option<Vec<ElisionFailureInfo>>) -> UnelidableRscope {
UnelidableRscope(v)
}
}
fn anon_regions(&self,
_span: Span,
_count: usize)
- -> Result<Vec<ty::Region>, Option<Vec<(String, usize)>>> {
+ -> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>> {
let UnelidableRscope(ref v) = *self;
- Err(Some(v.clone()))
+ Err(v.clone())
}
fn object_lifetime_default(&self, span: Span) -> Option<ty::Region> {
fn anon_regions(&self,
_span: Span,
count: usize)
- -> Result<Vec<ty::Region>, Option<Vec<(String, usize)>>>
+ -> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>>
{
- Ok(repeat(self.default).take(count).collect())
+ Ok(vec![self.default; count])
}
}
fn anon_regions(&self,
_: Span,
count: usize)
- -> Result<Vec<ty::Region>, Option<Vec<(String, usize)>>>
+ -> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>>
{
Ok((0..count).map(|_| self.next_region()).collect())
}
None,
ty::ObjectLifetimeDefault::BaseDefault =>
- if false { // this will become the behavior in Rust 1.3
- Some(self.base_object_lifetime_default(span))
- } else {
- self.base_scope.object_lifetime_default(span)
- },
+ // NB: This behavior changed in Rust 1.3.
+ Some(self.base_object_lifetime_default(span)),
ty::ObjectLifetimeDefault::Specific(r) =>
Some(r),
}
fn base_object_lifetime_default(&self, span: Span) -> ty::Region {
- assert!(false, "this code should not execute until Rust 1.3");
self.base_scope.base_object_lifetime_default(span)
}
- fn object_lifetime_default_will_change_in_1_3(&self) -> bool {
- debug!("object_lifetime_default_will_change_in_1_3: {:?}", self.default);
-
- match self.default {
- ty::ObjectLifetimeDefault::Ambiguous |
- ty::ObjectLifetimeDefault::Specific(_) =>
- false,
-
- ty::ObjectLifetimeDefault::BaseDefault =>
- self.base_scope.base_object_lifetime_default_differs()
- }
- }
-
- fn base_object_lifetime_default_differs(&self) -> bool {
- debug!("base_object_lifetime_default_differs: {:?}", self.default);
-
- match self.default {
- ty::ObjectLifetimeDefault::Ambiguous |
- ty::ObjectLifetimeDefault::Specific(_) =>
- true,
-
- ty::ObjectLifetimeDefault::BaseDefault =>
- self.base_scope.base_object_lifetime_default_differs(),
- }
- }
-
fn anon_regions(&self,
span: Span,
count: usize)
- -> Result<Vec<ty::Region>, Option<Vec<(String, usize)>>>
+ -> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>>
{
self.base_scope.anon_regions(span, count)
}
fn anon_regions(&self,
span: Span,
count: usize)
- -> Result<Vec<ty::Region>, Option<Vec<(String, usize)>>>
+ -> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>>
{
match self.base_scope.anon_regions(span, count) {
Ok(mut v) => {
param_id={}, \
inf_index={:?}, \
initial_variance={:?})",
- ty::item_path_str(self.tcx, ast_util::local_def(item_id)),
+ self.tcx.item_path_str(ast_util::local_def(item_id)),
item_id, kind, space, index, param_id, inf_index,
initial_variance);
}
match item.node {
ast::ItemEnum(ref enum_definition, _) => {
- let scheme = ty::lookup_item_type(tcx, did);
+ let scheme = tcx.lookup_item_type(did);
// Not entirely obvious: constraints on structs/enums do not
// affect the variance of their type parameters. See discussion
}
ast::ItemStruct(..) => {
- let scheme = ty::lookup_item_type(tcx, did);
+ let scheme = tcx.lookup_item_type(did);
// Not entirely obvious: constraints on structs/enums do not
// affect the variance of their type parameters. See discussion
//
// self.add_constraints_from_generics(&scheme.generics);
- let struct_fields = ty::lookup_struct_fields(tcx, did);
+ let struct_fields = tcx.lookup_struct_fields(did);
for field_info in &struct_fields {
assert_eq!(field_info.id.krate, ast::LOCAL_CRATE);
- let field_ty = ty::node_id_to_type(tcx, field_info.id.node);
+ let field_ty = tcx.node_id_to_type(field_info.id.node);
self.add_constraints_from_ty(&scheme.generics, field_ty, self.covariant);
}
}
ast::ItemTrait(..) => {
- let trait_def = ty::lookup_trait_def(tcx, did);
+ let trait_def = tcx.lookup_trait_def(did);
self.add_constraints_from_trait_ref(&trait_def.generics,
trait_def.trait_ref,
self.invariant);
} else {
// Parameter on an item defined within another crate:
// variance already inferred, just look it up.
- let variances = ty::item_variances(self.tcx(), item_def_id);
+ let variances = self.tcx().item_variances(item_def_id);
let variance = match kind {
TypeParam => *variances.types.get(space, index),
RegionParam => *variances.regions.get(space, index),
trait_ref,
variance);
- let trait_def = ty::lookup_trait_def(self.tcx(), trait_ref.def_id);
+ let trait_def = self.tcx().lookup_trait_def(trait_ref.def_id);
self.add_constraints_from_substs(
generics,
ty::TyEnum(def_id, substs) |
ty::TyStruct(def_id, substs) => {
- let item_type = ty::lookup_item_type(self.tcx(), def_id);
+ let item_type = self.tcx().lookup_item_type(def_id);
// All type parameters on enums and structs should be
// in the TypeSpace.
ty::TyProjection(ref data) => {
let trait_ref = &data.trait_ref;
- let trait_def = ty::lookup_trait_def(self.tcx(), trait_ref.def_id);
+ let trait_def = self.tcx().lookup_trait_def(trait_ref.def_id);
self.add_constraints_from_substs(
generics,
trait_ref.def_id,
/// appearing in a context with ambient variance `variance`
fn add_constraints_from_mt(&mut self,
generics: &ty::Generics<'tcx>,
- mt: &ty::mt<'tcx>,
+ mt: &ty::TypeAndMut<'tcx>,
variance: VarianceTermPtr<'a>) {
match mt.mutbl {
ast::MutMutable => {
// For unit testing: check for a special "rustc_variance"
// attribute and report an error with various results if found.
- if ty::has_attr(tcx, item_def_id, "rustc_variance") {
+ if tcx.has_attr(item_def_id, "rustc_variance") {
span_err!(tcx.sess, tcx.map.span(item_id), E0208, "{:?}", item_variances);
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Character manipulation (`char` type, Unicode Scalar Value)
+//! A Unicode scalar value
//!
//! This module provides the `CharExt` trait, as well as its
//! implementation for the primitive `char` type, in order to allow
//! basic character manipulation.
//!
-//! A `char` actually represents a
-//! *[Unicode Scalar
-//! Value](http://www.unicode.org/glossary/#unicode_scalar_value)*, as it can
+//! A `char` represents a
+//! *[Unicode scalar
+//! value](http://www.unicode.org/glossary/#unicode_scalar_value)*, as it can
//! contain any Unicode code point except high-surrogate and low-surrogate code
//! points.
//!
//! (inclusive) are allowed. A `char` can always be safely cast to a `u32`;
//! however the converse is not always true due to the above range limits
//! and, as such, should be performed via the `from_u32` function.
+//!
+//! *[See also the `char` primitive type](../primitive.char.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "char")]
use core::char::CharExt as C;
use core::option::Option::{self, Some, None};
use tables::{derived_property, property, general_category, conversions, charwidth};
// stable reexports
-pub use core::char::{MAX, from_u32, from_digit, EscapeUnicode, EscapeDefault};
+pub use core::char::{MAX, from_u32, from_u32_unchecked, from_digit, EscapeUnicode, EscapeDefault};
// unstable reexports
#[allow(deprecated)]
fn next(&mut self) -> Option<char> { self.0.next() }
}
-/// An iterator over the titlecase mapping of a given character, returned from
-/// the [`to_titlecase` method](../primitive.char.html#method.to_titlecase) on
-/// characters.
-#[unstable(feature = "unicode", reason = "recently added")]
-pub struct ToTitlecase(CaseMappingIter);
-
-#[stable(feature = "unicode_case_mapping", since = "1.2.0")]
-impl Iterator for ToTitlecase {
- type Item = char;
- fn next(&mut self) -> Option<char> { self.0.next() }
-}
-
enum CaseMappingIter {
Three(char, char, char),
/// In both of these examples, 'ß' takes two bytes to encode.
///
/// ```
- /// # #![feature(unicode)]
+ /// #![feature(unicode)]
+ ///
/// let mut b = [0; 2];
///
/// let result = 'ß'.encode_utf8(&mut b);
/// A buffer that's too small:
///
/// ```
- /// # #![feature(unicode)]
+ /// #![feature(unicode)]
+ ///
/// let mut b = [0; 1];
///
/// let result = 'ß'.encode_utf8(&mut b);
/// In both of these examples, 'ß' takes one `u16` to encode.
///
/// ```
- /// # #![feature(unicode)]
+ /// #![feature(unicode)]
+ ///
/// let mut b = [0; 1];
///
/// let result = 'ß'.encode_utf16(&mut b);
/// A buffer that's too small:
///
/// ```
- /// # #![feature(unicode)]
+ /// #![feature(unicode)]
+ ///
/// let mut b = [0; 0];
///
/// let result = 'ß'.encode_utf8(&mut b);
ToLowercase(CaseMappingIter::new(conversions::to_lower(self)))
}
- /// Converts a character to its titlecase equivalent.
- ///
- /// This performs complex unconditional mappings with no tailoring.
- /// See `to_uppercase()` for references and more information.
- ///
- /// This differs from `to_uppercase()` since Unicode contains
- /// digraphs and ligature characters.
- /// For example, U+01F3 “dz” and U+FB01 “fi”
- /// map to U+01F1 “DZ” and U+0046 U+0069 “Fi”, respectively.
- ///
- /// # Return value
- ///
- /// Returns an iterator which yields the characters corresponding to the
- /// titlecase equivalent of the character. If no conversion is possible then
- /// an iterator with just the input character is returned.
- #[unstable(feature = "unicode", reason = "recently added")]
- #[inline]
- pub fn to_titlecase(self) -> ToTitlecase {
- ToTitlecase(CaseMappingIter::new(conversions::to_title(self)))
- }
-
/// Converts a character to its uppercase equivalent.
///
/// This performs complex unconditional mappings with no tailoring:
}
}
- pub fn to_title(c: char) -> [char; 3] {
- match bsearch_case_table(c, to_titlecase_table) {
- None => [c, '\0', '\0'],
- Some(index) => to_titlecase_table[index].1
- }
- }
-
fn bsearch_case_table(c: char, table: &'static [(char, [char; 3])]) -> Option<usize> {
match table.binary_search_by(|&(key, _)| {
if c == key { Equal }
['\u{118bf}', '\0', '\0'])
];
- const to_titlecase_table: &'static [(char, [char; 3])] = &[
- ('\u{61}', ['\u{41}', '\0', '\0']), ('\u{62}', ['\u{42}', '\0', '\0']), ('\u{63}',
- ['\u{43}', '\0', '\0']), ('\u{64}', ['\u{44}', '\0', '\0']), ('\u{65}', ['\u{45}', '\0',
- '\0']), ('\u{66}', ['\u{46}', '\0', '\0']), ('\u{67}', ['\u{47}', '\0', '\0']), ('\u{68}',
- ['\u{48}', '\0', '\0']), ('\u{69}', ['\u{49}', '\0', '\0']), ('\u{6a}', ['\u{4a}', '\0',
- '\0']), ('\u{6b}', ['\u{4b}', '\0', '\0']), ('\u{6c}', ['\u{4c}', '\0', '\0']), ('\u{6d}',
- ['\u{4d}', '\0', '\0']), ('\u{6e}', ['\u{4e}', '\0', '\0']), ('\u{6f}', ['\u{4f}', '\0',
- '\0']), ('\u{70}', ['\u{50}', '\0', '\0']), ('\u{71}', ['\u{51}', '\0', '\0']), ('\u{72}',
- ['\u{52}', '\0', '\0']), ('\u{73}', ['\u{53}', '\0', '\0']), ('\u{74}', ['\u{54}', '\0',
- '\0']), ('\u{75}', ['\u{55}', '\0', '\0']), ('\u{76}', ['\u{56}', '\0', '\0']), ('\u{77}',
- ['\u{57}', '\0', '\0']), ('\u{78}', ['\u{58}', '\0', '\0']), ('\u{79}', ['\u{59}', '\0',
- '\0']), ('\u{7a}', ['\u{5a}', '\0', '\0']), ('\u{b5}', ['\u{39c}', '\0', '\0']), ('\u{df}',
- ['\u{53}', '\u{73}', '\0']), ('\u{e0}', ['\u{c0}', '\0', '\0']), ('\u{e1}', ['\u{c1}', '\0',
- '\0']), ('\u{e2}', ['\u{c2}', '\0', '\0']), ('\u{e3}', ['\u{c3}', '\0', '\0']), ('\u{e4}',
- ['\u{c4}', '\0', '\0']), ('\u{e5}', ['\u{c5}', '\0', '\0']), ('\u{e6}', ['\u{c6}', '\0',
- '\0']), ('\u{e7}', ['\u{c7}', '\0', '\0']), ('\u{e8}', ['\u{c8}', '\0', '\0']), ('\u{e9}',
- ['\u{c9}', '\0', '\0']), ('\u{ea}', ['\u{ca}', '\0', '\0']), ('\u{eb}', ['\u{cb}', '\0',
- '\0']), ('\u{ec}', ['\u{cc}', '\0', '\0']), ('\u{ed}', ['\u{cd}', '\0', '\0']), ('\u{ee}',
- ['\u{ce}', '\0', '\0']), ('\u{ef}', ['\u{cf}', '\0', '\0']), ('\u{f0}', ['\u{d0}', '\0',
- '\0']), ('\u{f1}', ['\u{d1}', '\0', '\0']), ('\u{f2}', ['\u{d2}', '\0', '\0']), ('\u{f3}',
- ['\u{d3}', '\0', '\0']), ('\u{f4}', ['\u{d4}', '\0', '\0']), ('\u{f5}', ['\u{d5}', '\0',
- '\0']), ('\u{f6}', ['\u{d6}', '\0', '\0']), ('\u{f8}', ['\u{d8}', '\0', '\0']), ('\u{f9}',
- ['\u{d9}', '\0', '\0']), ('\u{fa}', ['\u{da}', '\0', '\0']), ('\u{fb}', ['\u{db}', '\0',
- '\0']), ('\u{fc}', ['\u{dc}', '\0', '\0']), ('\u{fd}', ['\u{dd}', '\0', '\0']), ('\u{fe}',
- ['\u{de}', '\0', '\0']), ('\u{ff}', ['\u{178}', '\0', '\0']), ('\u{101}', ['\u{100}', '\0',
- '\0']), ('\u{103}', ['\u{102}', '\0', '\0']), ('\u{105}', ['\u{104}', '\0', '\0']),
- ('\u{107}', ['\u{106}', '\0', '\0']), ('\u{109}', ['\u{108}', '\0', '\0']), ('\u{10b}',
- ['\u{10a}', '\0', '\0']), ('\u{10d}', ['\u{10c}', '\0', '\0']), ('\u{10f}', ['\u{10e}',
- '\0', '\0']), ('\u{111}', ['\u{110}', '\0', '\0']), ('\u{113}', ['\u{112}', '\0', '\0']),
- ('\u{115}', ['\u{114}', '\0', '\0']), ('\u{117}', ['\u{116}', '\0', '\0']), ('\u{119}',
- ['\u{118}', '\0', '\0']), ('\u{11b}', ['\u{11a}', '\0', '\0']), ('\u{11d}', ['\u{11c}',
- '\0', '\0']), ('\u{11f}', ['\u{11e}', '\0', '\0']), ('\u{121}', ['\u{120}', '\0', '\0']),
- ('\u{123}', ['\u{122}', '\0', '\0']), ('\u{125}', ['\u{124}', '\0', '\0']), ('\u{127}',
- ['\u{126}', '\0', '\0']), ('\u{129}', ['\u{128}', '\0', '\0']), ('\u{12b}', ['\u{12a}',
- '\0', '\0']), ('\u{12d}', ['\u{12c}', '\0', '\0']), ('\u{12f}', ['\u{12e}', '\0', '\0']),
- ('\u{131}', ['\u{49}', '\0', '\0']), ('\u{133}', ['\u{132}', '\0', '\0']), ('\u{135}',
- ['\u{134}', '\0', '\0']), ('\u{137}', ['\u{136}', '\0', '\0']), ('\u{13a}', ['\u{139}',
- '\0', '\0']), ('\u{13c}', ['\u{13b}', '\0', '\0']), ('\u{13e}', ['\u{13d}', '\0', '\0']),
- ('\u{140}', ['\u{13f}', '\0', '\0']), ('\u{142}', ['\u{141}', '\0', '\0']), ('\u{144}',
- ['\u{143}', '\0', '\0']), ('\u{146}', ['\u{145}', '\0', '\0']), ('\u{148}', ['\u{147}',
- '\0', '\0']), ('\u{149}', ['\u{2bc}', '\u{4e}', '\0']), ('\u{14b}', ['\u{14a}', '\0',
- '\0']), ('\u{14d}', ['\u{14c}', '\0', '\0']), ('\u{14f}', ['\u{14e}', '\0', '\0']),
- ('\u{151}', ['\u{150}', '\0', '\0']), ('\u{153}', ['\u{152}', '\0', '\0']), ('\u{155}',
- ['\u{154}', '\0', '\0']), ('\u{157}', ['\u{156}', '\0', '\0']), ('\u{159}', ['\u{158}',
- '\0', '\0']), ('\u{15b}', ['\u{15a}', '\0', '\0']), ('\u{15d}', ['\u{15c}', '\0', '\0']),
- ('\u{15f}', ['\u{15e}', '\0', '\0']), ('\u{161}', ['\u{160}', '\0', '\0']), ('\u{163}',
- ['\u{162}', '\0', '\0']), ('\u{165}', ['\u{164}', '\0', '\0']), ('\u{167}', ['\u{166}',
- '\0', '\0']), ('\u{169}', ['\u{168}', '\0', '\0']), ('\u{16b}', ['\u{16a}', '\0', '\0']),
- ('\u{16d}', ['\u{16c}', '\0', '\0']), ('\u{16f}', ['\u{16e}', '\0', '\0']), ('\u{171}',
- ['\u{170}', '\0', '\0']), ('\u{173}', ['\u{172}', '\0', '\0']), ('\u{175}', ['\u{174}',
- '\0', '\0']), ('\u{177}', ['\u{176}', '\0', '\0']), ('\u{17a}', ['\u{179}', '\0', '\0']),
- ('\u{17c}', ['\u{17b}', '\0', '\0']), ('\u{17e}', ['\u{17d}', '\0', '\0']), ('\u{17f}',
- ['\u{53}', '\0', '\0']), ('\u{180}', ['\u{243}', '\0', '\0']), ('\u{183}', ['\u{182}', '\0',
- '\0']), ('\u{185}', ['\u{184}', '\0', '\0']), ('\u{188}', ['\u{187}', '\0', '\0']),
- ('\u{18c}', ['\u{18b}', '\0', '\0']), ('\u{192}', ['\u{191}', '\0', '\0']), ('\u{195}',
- ['\u{1f6}', '\0', '\0']), ('\u{199}', ['\u{198}', '\0', '\0']), ('\u{19a}', ['\u{23d}',
- '\0', '\0']), ('\u{19e}', ['\u{220}', '\0', '\0']), ('\u{1a1}', ['\u{1a0}', '\0', '\0']),
- ('\u{1a3}', ['\u{1a2}', '\0', '\0']), ('\u{1a5}', ['\u{1a4}', '\0', '\0']), ('\u{1a8}',
- ['\u{1a7}', '\0', '\0']), ('\u{1ad}', ['\u{1ac}', '\0', '\0']), ('\u{1b0}', ['\u{1af}',
- '\0', '\0']), ('\u{1b4}', ['\u{1b3}', '\0', '\0']), ('\u{1b6}', ['\u{1b5}', '\0', '\0']),
- ('\u{1b9}', ['\u{1b8}', '\0', '\0']), ('\u{1bd}', ['\u{1bc}', '\0', '\0']), ('\u{1bf}',
- ['\u{1f7}', '\0', '\0']), ('\u{1c4}', ['\u{1c5}', '\0', '\0']), ('\u{1c5}', ['\u{1c5}',
- '\0', '\0']), ('\u{1c6}', ['\u{1c5}', '\0', '\0']), ('\u{1c7}', ['\u{1c8}', '\0', '\0']),
- ('\u{1c8}', ['\u{1c8}', '\0', '\0']), ('\u{1c9}', ['\u{1c8}', '\0', '\0']), ('\u{1ca}',
- ['\u{1cb}', '\0', '\0']), ('\u{1cb}', ['\u{1cb}', '\0', '\0']), ('\u{1cc}', ['\u{1cb}',
- '\0', '\0']), ('\u{1ce}', ['\u{1cd}', '\0', '\0']), ('\u{1d0}', ['\u{1cf}', '\0', '\0']),
- ('\u{1d2}', ['\u{1d1}', '\0', '\0']), ('\u{1d4}', ['\u{1d3}', '\0', '\0']), ('\u{1d6}',
- ['\u{1d5}', '\0', '\0']), ('\u{1d8}', ['\u{1d7}', '\0', '\0']), ('\u{1da}', ['\u{1d9}',
- '\0', '\0']), ('\u{1dc}', ['\u{1db}', '\0', '\0']), ('\u{1dd}', ['\u{18e}', '\0', '\0']),
- ('\u{1df}', ['\u{1de}', '\0', '\0']), ('\u{1e1}', ['\u{1e0}', '\0', '\0']), ('\u{1e3}',
- ['\u{1e2}', '\0', '\0']), ('\u{1e5}', ['\u{1e4}', '\0', '\0']), ('\u{1e7}', ['\u{1e6}',
- '\0', '\0']), ('\u{1e9}', ['\u{1e8}', '\0', '\0']), ('\u{1eb}', ['\u{1ea}', '\0', '\0']),
- ('\u{1ed}', ['\u{1ec}', '\0', '\0']), ('\u{1ef}', ['\u{1ee}', '\0', '\0']), ('\u{1f0}',
- ['\u{4a}', '\u{30c}', '\0']), ('\u{1f1}', ['\u{1f2}', '\0', '\0']), ('\u{1f2}', ['\u{1f2}',
- '\0', '\0']), ('\u{1f3}', ['\u{1f2}', '\0', '\0']), ('\u{1f5}', ['\u{1f4}', '\0', '\0']),
- ('\u{1f9}', ['\u{1f8}', '\0', '\0']), ('\u{1fb}', ['\u{1fa}', '\0', '\0']), ('\u{1fd}',
- ['\u{1fc}', '\0', '\0']), ('\u{1ff}', ['\u{1fe}', '\0', '\0']), ('\u{201}', ['\u{200}',
- '\0', '\0']), ('\u{203}', ['\u{202}', '\0', '\0']), ('\u{205}', ['\u{204}', '\0', '\0']),
- ('\u{207}', ['\u{206}', '\0', '\0']), ('\u{209}', ['\u{208}', '\0', '\0']), ('\u{20b}',
- ['\u{20a}', '\0', '\0']), ('\u{20d}', ['\u{20c}', '\0', '\0']), ('\u{20f}', ['\u{20e}',
- '\0', '\0']), ('\u{211}', ['\u{210}', '\0', '\0']), ('\u{213}', ['\u{212}', '\0', '\0']),
- ('\u{215}', ['\u{214}', '\0', '\0']), ('\u{217}', ['\u{216}', '\0', '\0']), ('\u{219}',
- ['\u{218}', '\0', '\0']), ('\u{21b}', ['\u{21a}', '\0', '\0']), ('\u{21d}', ['\u{21c}',
- '\0', '\0']), ('\u{21f}', ['\u{21e}', '\0', '\0']), ('\u{223}', ['\u{222}', '\0', '\0']),
- ('\u{225}', ['\u{224}', '\0', '\0']), ('\u{227}', ['\u{226}', '\0', '\0']), ('\u{229}',
- ['\u{228}', '\0', '\0']), ('\u{22b}', ['\u{22a}', '\0', '\0']), ('\u{22d}', ['\u{22c}',
- '\0', '\0']), ('\u{22f}', ['\u{22e}', '\0', '\0']), ('\u{231}', ['\u{230}', '\0', '\0']),
- ('\u{233}', ['\u{232}', '\0', '\0']), ('\u{23c}', ['\u{23b}', '\0', '\0']), ('\u{23f}',
- ['\u{2c7e}', '\0', '\0']), ('\u{240}', ['\u{2c7f}', '\0', '\0']), ('\u{242}', ['\u{241}',
- '\0', '\0']), ('\u{247}', ['\u{246}', '\0', '\0']), ('\u{249}', ['\u{248}', '\0', '\0']),
- ('\u{24b}', ['\u{24a}', '\0', '\0']), ('\u{24d}', ['\u{24c}', '\0', '\0']), ('\u{24f}',
- ['\u{24e}', '\0', '\0']), ('\u{250}', ['\u{2c6f}', '\0', '\0']), ('\u{251}', ['\u{2c6d}',
- '\0', '\0']), ('\u{252}', ['\u{2c70}', '\0', '\0']), ('\u{253}', ['\u{181}', '\0', '\0']),
- ('\u{254}', ['\u{186}', '\0', '\0']), ('\u{256}', ['\u{189}', '\0', '\0']), ('\u{257}',
- ['\u{18a}', '\0', '\0']), ('\u{259}', ['\u{18f}', '\0', '\0']), ('\u{25b}', ['\u{190}',
- '\0', '\0']), ('\u{25c}', ['\u{a7ab}', '\0', '\0']), ('\u{260}', ['\u{193}', '\0', '\0']),
- ('\u{261}', ['\u{a7ac}', '\0', '\0']), ('\u{263}', ['\u{194}', '\0', '\0']), ('\u{265}',
- ['\u{a78d}', '\0', '\0']), ('\u{266}', ['\u{a7aa}', '\0', '\0']), ('\u{268}', ['\u{197}',
- '\0', '\0']), ('\u{269}', ['\u{196}', '\0', '\0']), ('\u{26b}', ['\u{2c62}', '\0', '\0']),
- ('\u{26c}', ['\u{a7ad}', '\0', '\0']), ('\u{26f}', ['\u{19c}', '\0', '\0']), ('\u{271}',
- ['\u{2c6e}', '\0', '\0']), ('\u{272}', ['\u{19d}', '\0', '\0']), ('\u{275}', ['\u{19f}',
- '\0', '\0']), ('\u{27d}', ['\u{2c64}', '\0', '\0']), ('\u{280}', ['\u{1a6}', '\0', '\0']),
- ('\u{283}', ['\u{1a9}', '\0', '\0']), ('\u{287}', ['\u{a7b1}', '\0', '\0']), ('\u{288}',
- ['\u{1ae}', '\0', '\0']), ('\u{289}', ['\u{244}', '\0', '\0']), ('\u{28a}', ['\u{1b1}',
- '\0', '\0']), ('\u{28b}', ['\u{1b2}', '\0', '\0']), ('\u{28c}', ['\u{245}', '\0', '\0']),
- ('\u{292}', ['\u{1b7}', '\0', '\0']), ('\u{29e}', ['\u{a7b0}', '\0', '\0']), ('\u{345}',
- ['\u{399}', '\0', '\0']), ('\u{371}', ['\u{370}', '\0', '\0']), ('\u{373}', ['\u{372}',
- '\0', '\0']), ('\u{377}', ['\u{376}', '\0', '\0']), ('\u{37b}', ['\u{3fd}', '\0', '\0']),
- ('\u{37c}', ['\u{3fe}', '\0', '\0']), ('\u{37d}', ['\u{3ff}', '\0', '\0']), ('\u{390}',
- ['\u{399}', '\u{308}', '\u{301}']), ('\u{3ac}', ['\u{386}', '\0', '\0']), ('\u{3ad}',
- ['\u{388}', '\0', '\0']), ('\u{3ae}', ['\u{389}', '\0', '\0']), ('\u{3af}', ['\u{38a}',
- '\0', '\0']), ('\u{3b0}', ['\u{3a5}', '\u{308}', '\u{301}']), ('\u{3b1}', ['\u{391}', '\0',
- '\0']), ('\u{3b2}', ['\u{392}', '\0', '\0']), ('\u{3b3}', ['\u{393}', '\0', '\0']),
- ('\u{3b4}', ['\u{394}', '\0', '\0']), ('\u{3b5}', ['\u{395}', '\0', '\0']), ('\u{3b6}',
- ['\u{396}', '\0', '\0']), ('\u{3b7}', ['\u{397}', '\0', '\0']), ('\u{3b8}', ['\u{398}',
- '\0', '\0']), ('\u{3b9}', ['\u{399}', '\0', '\0']), ('\u{3ba}', ['\u{39a}', '\0', '\0']),
- ('\u{3bb}', ['\u{39b}', '\0', '\0']), ('\u{3bc}', ['\u{39c}', '\0', '\0']), ('\u{3bd}',
- ['\u{39d}', '\0', '\0']), ('\u{3be}', ['\u{39e}', '\0', '\0']), ('\u{3bf}', ['\u{39f}',
- '\0', '\0']), ('\u{3c0}', ['\u{3a0}', '\0', '\0']), ('\u{3c1}', ['\u{3a1}', '\0', '\0']),
- ('\u{3c2}', ['\u{3a3}', '\0', '\0']), ('\u{3c3}', ['\u{3a3}', '\0', '\0']), ('\u{3c4}',
- ['\u{3a4}', '\0', '\0']), ('\u{3c5}', ['\u{3a5}', '\0', '\0']), ('\u{3c6}', ['\u{3a6}',
- '\0', '\0']), ('\u{3c7}', ['\u{3a7}', '\0', '\0']), ('\u{3c8}', ['\u{3a8}', '\0', '\0']),
- ('\u{3c9}', ['\u{3a9}', '\0', '\0']), ('\u{3ca}', ['\u{3aa}', '\0', '\0']), ('\u{3cb}',
- ['\u{3ab}', '\0', '\0']), ('\u{3cc}', ['\u{38c}', '\0', '\0']), ('\u{3cd}', ['\u{38e}',
- '\0', '\0']), ('\u{3ce}', ['\u{38f}', '\0', '\0']), ('\u{3d0}', ['\u{392}', '\0', '\0']),
- ('\u{3d1}', ['\u{398}', '\0', '\0']), ('\u{3d5}', ['\u{3a6}', '\0', '\0']), ('\u{3d6}',
- ['\u{3a0}', '\0', '\0']), ('\u{3d7}', ['\u{3cf}', '\0', '\0']), ('\u{3d9}', ['\u{3d8}',
- '\0', '\0']), ('\u{3db}', ['\u{3da}', '\0', '\0']), ('\u{3dd}', ['\u{3dc}', '\0', '\0']),
- ('\u{3df}', ['\u{3de}', '\0', '\0']), ('\u{3e1}', ['\u{3e0}', '\0', '\0']), ('\u{3e3}',
- ['\u{3e2}', '\0', '\0']), ('\u{3e5}', ['\u{3e4}', '\0', '\0']), ('\u{3e7}', ['\u{3e6}',
- '\0', '\0']), ('\u{3e9}', ['\u{3e8}', '\0', '\0']), ('\u{3eb}', ['\u{3ea}', '\0', '\0']),
- ('\u{3ed}', ['\u{3ec}', '\0', '\0']), ('\u{3ef}', ['\u{3ee}', '\0', '\0']), ('\u{3f0}',
- ['\u{39a}', '\0', '\0']), ('\u{3f1}', ['\u{3a1}', '\0', '\0']), ('\u{3f2}', ['\u{3f9}',
- '\0', '\0']), ('\u{3f3}', ['\u{37f}', '\0', '\0']), ('\u{3f5}', ['\u{395}', '\0', '\0']),
- ('\u{3f8}', ['\u{3f7}', '\0', '\0']), ('\u{3fb}', ['\u{3fa}', '\0', '\0']), ('\u{430}',
- ['\u{410}', '\0', '\0']), ('\u{431}', ['\u{411}', '\0', '\0']), ('\u{432}', ['\u{412}',
- '\0', '\0']), ('\u{433}', ['\u{413}', '\0', '\0']), ('\u{434}', ['\u{414}', '\0', '\0']),
- ('\u{435}', ['\u{415}', '\0', '\0']), ('\u{436}', ['\u{416}', '\0', '\0']), ('\u{437}',
- ['\u{417}', '\0', '\0']), ('\u{438}', ['\u{418}', '\0', '\0']), ('\u{439}', ['\u{419}',
- '\0', '\0']), ('\u{43a}', ['\u{41a}', '\0', '\0']), ('\u{43b}', ['\u{41b}', '\0', '\0']),
- ('\u{43c}', ['\u{41c}', '\0', '\0']), ('\u{43d}', ['\u{41d}', '\0', '\0']), ('\u{43e}',
- ['\u{41e}', '\0', '\0']), ('\u{43f}', ['\u{41f}', '\0', '\0']), ('\u{440}', ['\u{420}',
- '\0', '\0']), ('\u{441}', ['\u{421}', '\0', '\0']), ('\u{442}', ['\u{422}', '\0', '\0']),
- ('\u{443}', ['\u{423}', '\0', '\0']), ('\u{444}', ['\u{424}', '\0', '\0']), ('\u{445}',
- ['\u{425}', '\0', '\0']), ('\u{446}', ['\u{426}', '\0', '\0']), ('\u{447}', ['\u{427}',
- '\0', '\0']), ('\u{448}', ['\u{428}', '\0', '\0']), ('\u{449}', ['\u{429}', '\0', '\0']),
- ('\u{44a}', ['\u{42a}', '\0', '\0']), ('\u{44b}', ['\u{42b}', '\0', '\0']), ('\u{44c}',
- ['\u{42c}', '\0', '\0']), ('\u{44d}', ['\u{42d}', '\0', '\0']), ('\u{44e}', ['\u{42e}',
- '\0', '\0']), ('\u{44f}', ['\u{42f}', '\0', '\0']), ('\u{450}', ['\u{400}', '\0', '\0']),
- ('\u{451}', ['\u{401}', '\0', '\0']), ('\u{452}', ['\u{402}', '\0', '\0']), ('\u{453}',
- ['\u{403}', '\0', '\0']), ('\u{454}', ['\u{404}', '\0', '\0']), ('\u{455}', ['\u{405}',
- '\0', '\0']), ('\u{456}', ['\u{406}', '\0', '\0']), ('\u{457}', ['\u{407}', '\0', '\0']),
- ('\u{458}', ['\u{408}', '\0', '\0']), ('\u{459}', ['\u{409}', '\0', '\0']), ('\u{45a}',
- ['\u{40a}', '\0', '\0']), ('\u{45b}', ['\u{40b}', '\0', '\0']), ('\u{45c}', ['\u{40c}',
- '\0', '\0']), ('\u{45d}', ['\u{40d}', '\0', '\0']), ('\u{45e}', ['\u{40e}', '\0', '\0']),
- ('\u{45f}', ['\u{40f}', '\0', '\0']), ('\u{461}', ['\u{460}', '\0', '\0']), ('\u{463}',
- ['\u{462}', '\0', '\0']), ('\u{465}', ['\u{464}', '\0', '\0']), ('\u{467}', ['\u{466}',
- '\0', '\0']), ('\u{469}', ['\u{468}', '\0', '\0']), ('\u{46b}', ['\u{46a}', '\0', '\0']),
- ('\u{46d}', ['\u{46c}', '\0', '\0']), ('\u{46f}', ['\u{46e}', '\0', '\0']), ('\u{471}',
- ['\u{470}', '\0', '\0']), ('\u{473}', ['\u{472}', '\0', '\0']), ('\u{475}', ['\u{474}',
- '\0', '\0']), ('\u{477}', ['\u{476}', '\0', '\0']), ('\u{479}', ['\u{478}', '\0', '\0']),
- ('\u{47b}', ['\u{47a}', '\0', '\0']), ('\u{47d}', ['\u{47c}', '\0', '\0']), ('\u{47f}',
- ['\u{47e}', '\0', '\0']), ('\u{481}', ['\u{480}', '\0', '\0']), ('\u{48b}', ['\u{48a}',
- '\0', '\0']), ('\u{48d}', ['\u{48c}', '\0', '\0']), ('\u{48f}', ['\u{48e}', '\0', '\0']),
- ('\u{491}', ['\u{490}', '\0', '\0']), ('\u{493}', ['\u{492}', '\0', '\0']), ('\u{495}',
- ['\u{494}', '\0', '\0']), ('\u{497}', ['\u{496}', '\0', '\0']), ('\u{499}', ['\u{498}',
- '\0', '\0']), ('\u{49b}', ['\u{49a}', '\0', '\0']), ('\u{49d}', ['\u{49c}', '\0', '\0']),
- ('\u{49f}', ['\u{49e}', '\0', '\0']), ('\u{4a1}', ['\u{4a0}', '\0', '\0']), ('\u{4a3}',
- ['\u{4a2}', '\0', '\0']), ('\u{4a5}', ['\u{4a4}', '\0', '\0']), ('\u{4a7}', ['\u{4a6}',
- '\0', '\0']), ('\u{4a9}', ['\u{4a8}', '\0', '\0']), ('\u{4ab}', ['\u{4aa}', '\0', '\0']),
- ('\u{4ad}', ['\u{4ac}', '\0', '\0']), ('\u{4af}', ['\u{4ae}', '\0', '\0']), ('\u{4b1}',
- ['\u{4b0}', '\0', '\0']), ('\u{4b3}', ['\u{4b2}', '\0', '\0']), ('\u{4b5}', ['\u{4b4}',
- '\0', '\0']), ('\u{4b7}', ['\u{4b6}', '\0', '\0']), ('\u{4b9}', ['\u{4b8}', '\0', '\0']),
- ('\u{4bb}', ['\u{4ba}', '\0', '\0']), ('\u{4bd}', ['\u{4bc}', '\0', '\0']), ('\u{4bf}',
- ['\u{4be}', '\0', '\0']), ('\u{4c2}', ['\u{4c1}', '\0', '\0']), ('\u{4c4}', ['\u{4c3}',
- '\0', '\0']), ('\u{4c6}', ['\u{4c5}', '\0', '\0']), ('\u{4c8}', ['\u{4c7}', '\0', '\0']),
- ('\u{4ca}', ['\u{4c9}', '\0', '\0']), ('\u{4cc}', ['\u{4cb}', '\0', '\0']), ('\u{4ce}',
- ['\u{4cd}', '\0', '\0']), ('\u{4cf}', ['\u{4c0}', '\0', '\0']), ('\u{4d1}', ['\u{4d0}',
- '\0', '\0']), ('\u{4d3}', ['\u{4d2}', '\0', '\0']), ('\u{4d5}', ['\u{4d4}', '\0', '\0']),
- ('\u{4d7}', ['\u{4d6}', '\0', '\0']), ('\u{4d9}', ['\u{4d8}', '\0', '\0']), ('\u{4db}',
- ['\u{4da}', '\0', '\0']), ('\u{4dd}', ['\u{4dc}', '\0', '\0']), ('\u{4df}', ['\u{4de}',
- '\0', '\0']), ('\u{4e1}', ['\u{4e0}', '\0', '\0']), ('\u{4e3}', ['\u{4e2}', '\0', '\0']),
- ('\u{4e5}', ['\u{4e4}', '\0', '\0']), ('\u{4e7}', ['\u{4e6}', '\0', '\0']), ('\u{4e9}',
- ['\u{4e8}', '\0', '\0']), ('\u{4eb}', ['\u{4ea}', '\0', '\0']), ('\u{4ed}', ['\u{4ec}',
- '\0', '\0']), ('\u{4ef}', ['\u{4ee}', '\0', '\0']), ('\u{4f1}', ['\u{4f0}', '\0', '\0']),
- ('\u{4f3}', ['\u{4f2}', '\0', '\0']), ('\u{4f5}', ['\u{4f4}', '\0', '\0']), ('\u{4f7}',
- ['\u{4f6}', '\0', '\0']), ('\u{4f9}', ['\u{4f8}', '\0', '\0']), ('\u{4fb}', ['\u{4fa}',
- '\0', '\0']), ('\u{4fd}', ['\u{4fc}', '\0', '\0']), ('\u{4ff}', ['\u{4fe}', '\0', '\0']),
- ('\u{501}', ['\u{500}', '\0', '\0']), ('\u{503}', ['\u{502}', '\0', '\0']), ('\u{505}',
- ['\u{504}', '\0', '\0']), ('\u{507}', ['\u{506}', '\0', '\0']), ('\u{509}', ['\u{508}',
- '\0', '\0']), ('\u{50b}', ['\u{50a}', '\0', '\0']), ('\u{50d}', ['\u{50c}', '\0', '\0']),
- ('\u{50f}', ['\u{50e}', '\0', '\0']), ('\u{511}', ['\u{510}', '\0', '\0']), ('\u{513}',
- ['\u{512}', '\0', '\0']), ('\u{515}', ['\u{514}', '\0', '\0']), ('\u{517}', ['\u{516}',
- '\0', '\0']), ('\u{519}', ['\u{518}', '\0', '\0']), ('\u{51b}', ['\u{51a}', '\0', '\0']),
- ('\u{51d}', ['\u{51c}', '\0', '\0']), ('\u{51f}', ['\u{51e}', '\0', '\0']), ('\u{521}',
- ['\u{520}', '\0', '\0']), ('\u{523}', ['\u{522}', '\0', '\0']), ('\u{525}', ['\u{524}',
- '\0', '\0']), ('\u{527}', ['\u{526}', '\0', '\0']), ('\u{529}', ['\u{528}', '\0', '\0']),
- ('\u{52b}', ['\u{52a}', '\0', '\0']), ('\u{52d}', ['\u{52c}', '\0', '\0']), ('\u{52f}',
- ['\u{52e}', '\0', '\0']), ('\u{561}', ['\u{531}', '\0', '\0']), ('\u{562}', ['\u{532}',
- '\0', '\0']), ('\u{563}', ['\u{533}', '\0', '\0']), ('\u{564}', ['\u{534}', '\0', '\0']),
- ('\u{565}', ['\u{535}', '\0', '\0']), ('\u{566}', ['\u{536}', '\0', '\0']), ('\u{567}',
- ['\u{537}', '\0', '\0']), ('\u{568}', ['\u{538}', '\0', '\0']), ('\u{569}', ['\u{539}',
- '\0', '\0']), ('\u{56a}', ['\u{53a}', '\0', '\0']), ('\u{56b}', ['\u{53b}', '\0', '\0']),
- ('\u{56c}', ['\u{53c}', '\0', '\0']), ('\u{56d}', ['\u{53d}', '\0', '\0']), ('\u{56e}',
- ['\u{53e}', '\0', '\0']), ('\u{56f}', ['\u{53f}', '\0', '\0']), ('\u{570}', ['\u{540}',
- '\0', '\0']), ('\u{571}', ['\u{541}', '\0', '\0']), ('\u{572}', ['\u{542}', '\0', '\0']),
- ('\u{573}', ['\u{543}', '\0', '\0']), ('\u{574}', ['\u{544}', '\0', '\0']), ('\u{575}',
- ['\u{545}', '\0', '\0']), ('\u{576}', ['\u{546}', '\0', '\0']), ('\u{577}', ['\u{547}',
- '\0', '\0']), ('\u{578}', ['\u{548}', '\0', '\0']), ('\u{579}', ['\u{549}', '\0', '\0']),
- ('\u{57a}', ['\u{54a}', '\0', '\0']), ('\u{57b}', ['\u{54b}', '\0', '\0']), ('\u{57c}',
- ['\u{54c}', '\0', '\0']), ('\u{57d}', ['\u{54d}', '\0', '\0']), ('\u{57e}', ['\u{54e}',
- '\0', '\0']), ('\u{57f}', ['\u{54f}', '\0', '\0']), ('\u{580}', ['\u{550}', '\0', '\0']),
- ('\u{581}', ['\u{551}', '\0', '\0']), ('\u{582}', ['\u{552}', '\0', '\0']), ('\u{583}',
- ['\u{553}', '\0', '\0']), ('\u{584}', ['\u{554}', '\0', '\0']), ('\u{585}', ['\u{555}',
- '\0', '\0']), ('\u{586}', ['\u{556}', '\0', '\0']), ('\u{587}', ['\u{535}', '\u{582}',
- '\0']), ('\u{1d79}', ['\u{a77d}', '\0', '\0']), ('\u{1d7d}', ['\u{2c63}', '\0', '\0']),
- ('\u{1e01}', ['\u{1e00}', '\0', '\0']), ('\u{1e03}', ['\u{1e02}', '\0', '\0']), ('\u{1e05}',
- ['\u{1e04}', '\0', '\0']), ('\u{1e07}', ['\u{1e06}', '\0', '\0']), ('\u{1e09}', ['\u{1e08}',
- '\0', '\0']), ('\u{1e0b}', ['\u{1e0a}', '\0', '\0']), ('\u{1e0d}', ['\u{1e0c}', '\0',
- '\0']), ('\u{1e0f}', ['\u{1e0e}', '\0', '\0']), ('\u{1e11}', ['\u{1e10}', '\0', '\0']),
- ('\u{1e13}', ['\u{1e12}', '\0', '\0']), ('\u{1e15}', ['\u{1e14}', '\0', '\0']), ('\u{1e17}',
- ['\u{1e16}', '\0', '\0']), ('\u{1e19}', ['\u{1e18}', '\0', '\0']), ('\u{1e1b}', ['\u{1e1a}',
- '\0', '\0']), ('\u{1e1d}', ['\u{1e1c}', '\0', '\0']), ('\u{1e1f}', ['\u{1e1e}', '\0',
- '\0']), ('\u{1e21}', ['\u{1e20}', '\0', '\0']), ('\u{1e23}', ['\u{1e22}', '\0', '\0']),
- ('\u{1e25}', ['\u{1e24}', '\0', '\0']), ('\u{1e27}', ['\u{1e26}', '\0', '\0']), ('\u{1e29}',
- ['\u{1e28}', '\0', '\0']), ('\u{1e2b}', ['\u{1e2a}', '\0', '\0']), ('\u{1e2d}', ['\u{1e2c}',
- '\0', '\0']), ('\u{1e2f}', ['\u{1e2e}', '\0', '\0']), ('\u{1e31}', ['\u{1e30}', '\0',
- '\0']), ('\u{1e33}', ['\u{1e32}', '\0', '\0']), ('\u{1e35}', ['\u{1e34}', '\0', '\0']),
- ('\u{1e37}', ['\u{1e36}', '\0', '\0']), ('\u{1e39}', ['\u{1e38}', '\0', '\0']), ('\u{1e3b}',
- ['\u{1e3a}', '\0', '\0']), ('\u{1e3d}', ['\u{1e3c}', '\0', '\0']), ('\u{1e3f}', ['\u{1e3e}',
- '\0', '\0']), ('\u{1e41}', ['\u{1e40}', '\0', '\0']), ('\u{1e43}', ['\u{1e42}', '\0',
- '\0']), ('\u{1e45}', ['\u{1e44}', '\0', '\0']), ('\u{1e47}', ['\u{1e46}', '\0', '\0']),
- ('\u{1e49}', ['\u{1e48}', '\0', '\0']), ('\u{1e4b}', ['\u{1e4a}', '\0', '\0']), ('\u{1e4d}',
- ['\u{1e4c}', '\0', '\0']), ('\u{1e4f}', ['\u{1e4e}', '\0', '\0']), ('\u{1e51}', ['\u{1e50}',
- '\0', '\0']), ('\u{1e53}', ['\u{1e52}', '\0', '\0']), ('\u{1e55}', ['\u{1e54}', '\0',
- '\0']), ('\u{1e57}', ['\u{1e56}', '\0', '\0']), ('\u{1e59}', ['\u{1e58}', '\0', '\0']),
- ('\u{1e5b}', ['\u{1e5a}', '\0', '\0']), ('\u{1e5d}', ['\u{1e5c}', '\0', '\0']), ('\u{1e5f}',
- ['\u{1e5e}', '\0', '\0']), ('\u{1e61}', ['\u{1e60}', '\0', '\0']), ('\u{1e63}', ['\u{1e62}',
- '\0', '\0']), ('\u{1e65}', ['\u{1e64}', '\0', '\0']), ('\u{1e67}', ['\u{1e66}', '\0',
- '\0']), ('\u{1e69}', ['\u{1e68}', '\0', '\0']), ('\u{1e6b}', ['\u{1e6a}', '\0', '\0']),
- ('\u{1e6d}', ['\u{1e6c}', '\0', '\0']), ('\u{1e6f}', ['\u{1e6e}', '\0', '\0']), ('\u{1e71}',
- ['\u{1e70}', '\0', '\0']), ('\u{1e73}', ['\u{1e72}', '\0', '\0']), ('\u{1e75}', ['\u{1e74}',
- '\0', '\0']), ('\u{1e77}', ['\u{1e76}', '\0', '\0']), ('\u{1e79}', ['\u{1e78}', '\0',
- '\0']), ('\u{1e7b}', ['\u{1e7a}', '\0', '\0']), ('\u{1e7d}', ['\u{1e7c}', '\0', '\0']),
- ('\u{1e7f}', ['\u{1e7e}', '\0', '\0']), ('\u{1e81}', ['\u{1e80}', '\0', '\0']), ('\u{1e83}',
- ['\u{1e82}', '\0', '\0']), ('\u{1e85}', ['\u{1e84}', '\0', '\0']), ('\u{1e87}', ['\u{1e86}',
- '\0', '\0']), ('\u{1e89}', ['\u{1e88}', '\0', '\0']), ('\u{1e8b}', ['\u{1e8a}', '\0',
- '\0']), ('\u{1e8d}', ['\u{1e8c}', '\0', '\0']), ('\u{1e8f}', ['\u{1e8e}', '\0', '\0']),
- ('\u{1e91}', ['\u{1e90}', '\0', '\0']), ('\u{1e93}', ['\u{1e92}', '\0', '\0']), ('\u{1e95}',
- ['\u{1e94}', '\0', '\0']), ('\u{1e96}', ['\u{48}', '\u{331}', '\0']), ('\u{1e97}',
- ['\u{54}', '\u{308}', '\0']), ('\u{1e98}', ['\u{57}', '\u{30a}', '\0']), ('\u{1e99}',
- ['\u{59}', '\u{30a}', '\0']), ('\u{1e9a}', ['\u{41}', '\u{2be}', '\0']), ('\u{1e9b}',
- ['\u{1e60}', '\0', '\0']), ('\u{1ea1}', ['\u{1ea0}', '\0', '\0']), ('\u{1ea3}', ['\u{1ea2}',
- '\0', '\0']), ('\u{1ea5}', ['\u{1ea4}', '\0', '\0']), ('\u{1ea7}', ['\u{1ea6}', '\0',
- '\0']), ('\u{1ea9}', ['\u{1ea8}', '\0', '\0']), ('\u{1eab}', ['\u{1eaa}', '\0', '\0']),
- ('\u{1ead}', ['\u{1eac}', '\0', '\0']), ('\u{1eaf}', ['\u{1eae}', '\0', '\0']), ('\u{1eb1}',
- ['\u{1eb0}', '\0', '\0']), ('\u{1eb3}', ['\u{1eb2}', '\0', '\0']), ('\u{1eb5}', ['\u{1eb4}',
- '\0', '\0']), ('\u{1eb7}', ['\u{1eb6}', '\0', '\0']), ('\u{1eb9}', ['\u{1eb8}', '\0',
- '\0']), ('\u{1ebb}', ['\u{1eba}', '\0', '\0']), ('\u{1ebd}', ['\u{1ebc}', '\0', '\0']),
- ('\u{1ebf}', ['\u{1ebe}', '\0', '\0']), ('\u{1ec1}', ['\u{1ec0}', '\0', '\0']), ('\u{1ec3}',
- ['\u{1ec2}', '\0', '\0']), ('\u{1ec5}', ['\u{1ec4}', '\0', '\0']), ('\u{1ec7}', ['\u{1ec6}',
- '\0', '\0']), ('\u{1ec9}', ['\u{1ec8}', '\0', '\0']), ('\u{1ecb}', ['\u{1eca}', '\0',
- '\0']), ('\u{1ecd}', ['\u{1ecc}', '\0', '\0']), ('\u{1ecf}', ['\u{1ece}', '\0', '\0']),
- ('\u{1ed1}', ['\u{1ed0}', '\0', '\0']), ('\u{1ed3}', ['\u{1ed2}', '\0', '\0']), ('\u{1ed5}',
- ['\u{1ed4}', '\0', '\0']), ('\u{1ed7}', ['\u{1ed6}', '\0', '\0']), ('\u{1ed9}', ['\u{1ed8}',
- '\0', '\0']), ('\u{1edb}', ['\u{1eda}', '\0', '\0']), ('\u{1edd}', ['\u{1edc}', '\0',
- '\0']), ('\u{1edf}', ['\u{1ede}', '\0', '\0']), ('\u{1ee1}', ['\u{1ee0}', '\0', '\0']),
- ('\u{1ee3}', ['\u{1ee2}', '\0', '\0']), ('\u{1ee5}', ['\u{1ee4}', '\0', '\0']), ('\u{1ee7}',
- ['\u{1ee6}', '\0', '\0']), ('\u{1ee9}', ['\u{1ee8}', '\0', '\0']), ('\u{1eeb}', ['\u{1eea}',
- '\0', '\0']), ('\u{1eed}', ['\u{1eec}', '\0', '\0']), ('\u{1eef}', ['\u{1eee}', '\0',
- '\0']), ('\u{1ef1}', ['\u{1ef0}', '\0', '\0']), ('\u{1ef3}', ['\u{1ef2}', '\0', '\0']),
- ('\u{1ef5}', ['\u{1ef4}', '\0', '\0']), ('\u{1ef7}', ['\u{1ef6}', '\0', '\0']), ('\u{1ef9}',
- ['\u{1ef8}', '\0', '\0']), ('\u{1efb}', ['\u{1efa}', '\0', '\0']), ('\u{1efd}', ['\u{1efc}',
- '\0', '\0']), ('\u{1eff}', ['\u{1efe}', '\0', '\0']), ('\u{1f00}', ['\u{1f08}', '\0',
- '\0']), ('\u{1f01}', ['\u{1f09}', '\0', '\0']), ('\u{1f02}', ['\u{1f0a}', '\0', '\0']),
- ('\u{1f03}', ['\u{1f0b}', '\0', '\0']), ('\u{1f04}', ['\u{1f0c}', '\0', '\0']), ('\u{1f05}',
- ['\u{1f0d}', '\0', '\0']), ('\u{1f06}', ['\u{1f0e}', '\0', '\0']), ('\u{1f07}', ['\u{1f0f}',
- '\0', '\0']), ('\u{1f10}', ['\u{1f18}', '\0', '\0']), ('\u{1f11}', ['\u{1f19}', '\0',
- '\0']), ('\u{1f12}', ['\u{1f1a}', '\0', '\0']), ('\u{1f13}', ['\u{1f1b}', '\0', '\0']),
- ('\u{1f14}', ['\u{1f1c}', '\0', '\0']), ('\u{1f15}', ['\u{1f1d}', '\0', '\0']), ('\u{1f20}',
- ['\u{1f28}', '\0', '\0']), ('\u{1f21}', ['\u{1f29}', '\0', '\0']), ('\u{1f22}', ['\u{1f2a}',
- '\0', '\0']), ('\u{1f23}', ['\u{1f2b}', '\0', '\0']), ('\u{1f24}', ['\u{1f2c}', '\0',
- '\0']), ('\u{1f25}', ['\u{1f2d}', '\0', '\0']), ('\u{1f26}', ['\u{1f2e}', '\0', '\0']),
- ('\u{1f27}', ['\u{1f2f}', '\0', '\0']), ('\u{1f30}', ['\u{1f38}', '\0', '\0']), ('\u{1f31}',
- ['\u{1f39}', '\0', '\0']), ('\u{1f32}', ['\u{1f3a}', '\0', '\0']), ('\u{1f33}', ['\u{1f3b}',
- '\0', '\0']), ('\u{1f34}', ['\u{1f3c}', '\0', '\0']), ('\u{1f35}', ['\u{1f3d}', '\0',
- '\0']), ('\u{1f36}', ['\u{1f3e}', '\0', '\0']), ('\u{1f37}', ['\u{1f3f}', '\0', '\0']),
- ('\u{1f40}', ['\u{1f48}', '\0', '\0']), ('\u{1f41}', ['\u{1f49}', '\0', '\0']), ('\u{1f42}',
- ['\u{1f4a}', '\0', '\0']), ('\u{1f43}', ['\u{1f4b}', '\0', '\0']), ('\u{1f44}', ['\u{1f4c}',
- '\0', '\0']), ('\u{1f45}', ['\u{1f4d}', '\0', '\0']), ('\u{1f50}', ['\u{3a5}', '\u{313}',
- '\0']), ('\u{1f51}', ['\u{1f59}', '\0', '\0']), ('\u{1f52}', ['\u{3a5}', '\u{313}',
- '\u{300}']), ('\u{1f53}', ['\u{1f5b}', '\0', '\0']), ('\u{1f54}', ['\u{3a5}', '\u{313}',
- '\u{301}']), ('\u{1f55}', ['\u{1f5d}', '\0', '\0']), ('\u{1f56}', ['\u{3a5}', '\u{313}',
- '\u{342}']), ('\u{1f57}', ['\u{1f5f}', '\0', '\0']), ('\u{1f60}', ['\u{1f68}', '\0', '\0']),
- ('\u{1f61}', ['\u{1f69}', '\0', '\0']), ('\u{1f62}', ['\u{1f6a}', '\0', '\0']), ('\u{1f63}',
- ['\u{1f6b}', '\0', '\0']), ('\u{1f64}', ['\u{1f6c}', '\0', '\0']), ('\u{1f65}', ['\u{1f6d}',
- '\0', '\0']), ('\u{1f66}', ['\u{1f6e}', '\0', '\0']), ('\u{1f67}', ['\u{1f6f}', '\0',
- '\0']), ('\u{1f70}', ['\u{1fba}', '\0', '\0']), ('\u{1f71}', ['\u{1fbb}', '\0', '\0']),
- ('\u{1f72}', ['\u{1fc8}', '\0', '\0']), ('\u{1f73}', ['\u{1fc9}', '\0', '\0']), ('\u{1f74}',
- ['\u{1fca}', '\0', '\0']), ('\u{1f75}', ['\u{1fcb}', '\0', '\0']), ('\u{1f76}', ['\u{1fda}',
- '\0', '\0']), ('\u{1f77}', ['\u{1fdb}', '\0', '\0']), ('\u{1f78}', ['\u{1ff8}', '\0',
- '\0']), ('\u{1f79}', ['\u{1ff9}', '\0', '\0']), ('\u{1f7a}', ['\u{1fea}', '\0', '\0']),
- ('\u{1f7b}', ['\u{1feb}', '\0', '\0']), ('\u{1f7c}', ['\u{1ffa}', '\0', '\0']), ('\u{1f7d}',
- ['\u{1ffb}', '\0', '\0']), ('\u{1f80}', ['\u{1f88}', '\0', '\0']), ('\u{1f81}', ['\u{1f89}',
- '\0', '\0']), ('\u{1f82}', ['\u{1f8a}', '\0', '\0']), ('\u{1f83}', ['\u{1f8b}', '\0',
- '\0']), ('\u{1f84}', ['\u{1f8c}', '\0', '\0']), ('\u{1f85}', ['\u{1f8d}', '\0', '\0']),
- ('\u{1f86}', ['\u{1f8e}', '\0', '\0']), ('\u{1f87}', ['\u{1f8f}', '\0', '\0']), ('\u{1f90}',
- ['\u{1f98}', '\0', '\0']), ('\u{1f91}', ['\u{1f99}', '\0', '\0']), ('\u{1f92}', ['\u{1f9a}',
- '\0', '\0']), ('\u{1f93}', ['\u{1f9b}', '\0', '\0']), ('\u{1f94}', ['\u{1f9c}', '\0',
- '\0']), ('\u{1f95}', ['\u{1f9d}', '\0', '\0']), ('\u{1f96}', ['\u{1f9e}', '\0', '\0']),
- ('\u{1f97}', ['\u{1f9f}', '\0', '\0']), ('\u{1fa0}', ['\u{1fa8}', '\0', '\0']), ('\u{1fa1}',
- ['\u{1fa9}', '\0', '\0']), ('\u{1fa2}', ['\u{1faa}', '\0', '\0']), ('\u{1fa3}', ['\u{1fab}',
- '\0', '\0']), ('\u{1fa4}', ['\u{1fac}', '\0', '\0']), ('\u{1fa5}', ['\u{1fad}', '\0',
- '\0']), ('\u{1fa6}', ['\u{1fae}', '\0', '\0']), ('\u{1fa7}', ['\u{1faf}', '\0', '\0']),
- ('\u{1fb0}', ['\u{1fb8}', '\0', '\0']), ('\u{1fb1}', ['\u{1fb9}', '\0', '\0']), ('\u{1fb2}',
- ['\u{1fba}', '\u{345}', '\0']), ('\u{1fb3}', ['\u{1fbc}', '\0', '\0']), ('\u{1fb4}',
- ['\u{386}', '\u{345}', '\0']), ('\u{1fb6}', ['\u{391}', '\u{342}', '\0']), ('\u{1fb7}',
- ['\u{391}', '\u{342}', '\u{345}']), ('\u{1fbe}', ['\u{399}', '\0', '\0']), ('\u{1fc2}',
- ['\u{1fca}', '\u{345}', '\0']), ('\u{1fc3}', ['\u{1fcc}', '\0', '\0']), ('\u{1fc4}',
- ['\u{389}', '\u{345}', '\0']), ('\u{1fc6}', ['\u{397}', '\u{342}', '\0']), ('\u{1fc7}',
- ['\u{397}', '\u{342}', '\u{345}']), ('\u{1fd0}', ['\u{1fd8}', '\0', '\0']), ('\u{1fd1}',
- ['\u{1fd9}', '\0', '\0']), ('\u{1fd2}', ['\u{399}', '\u{308}', '\u{300}']), ('\u{1fd3}',
- ['\u{399}', '\u{308}', '\u{301}']), ('\u{1fd6}', ['\u{399}', '\u{342}', '\0']), ('\u{1fd7}',
- ['\u{399}', '\u{308}', '\u{342}']), ('\u{1fe0}', ['\u{1fe8}', '\0', '\0']), ('\u{1fe1}',
- ['\u{1fe9}', '\0', '\0']), ('\u{1fe2}', ['\u{3a5}', '\u{308}', '\u{300}']), ('\u{1fe3}',
- ['\u{3a5}', '\u{308}', '\u{301}']), ('\u{1fe4}', ['\u{3a1}', '\u{313}', '\0']), ('\u{1fe5}',
- ['\u{1fec}', '\0', '\0']), ('\u{1fe6}', ['\u{3a5}', '\u{342}', '\0']), ('\u{1fe7}',
- ['\u{3a5}', '\u{308}', '\u{342}']), ('\u{1ff2}', ['\u{1ffa}', '\u{345}', '\0']),
- ('\u{1ff3}', ['\u{1ffc}', '\0', '\0']), ('\u{1ff4}', ['\u{38f}', '\u{345}', '\0']),
- ('\u{1ff6}', ['\u{3a9}', '\u{342}', '\0']), ('\u{1ff7}', ['\u{3a9}', '\u{342}', '\u{345}']),
- ('\u{214e}', ['\u{2132}', '\0', '\0']), ('\u{2170}', ['\u{2160}', '\0', '\0']), ('\u{2171}',
- ['\u{2161}', '\0', '\0']), ('\u{2172}', ['\u{2162}', '\0', '\0']), ('\u{2173}', ['\u{2163}',
- '\0', '\0']), ('\u{2174}', ['\u{2164}', '\0', '\0']), ('\u{2175}', ['\u{2165}', '\0',
- '\0']), ('\u{2176}', ['\u{2166}', '\0', '\0']), ('\u{2177}', ['\u{2167}', '\0', '\0']),
- ('\u{2178}', ['\u{2168}', '\0', '\0']), ('\u{2179}', ['\u{2169}', '\0', '\0']), ('\u{217a}',
- ['\u{216a}', '\0', '\0']), ('\u{217b}', ['\u{216b}', '\0', '\0']), ('\u{217c}', ['\u{216c}',
- '\0', '\0']), ('\u{217d}', ['\u{216d}', '\0', '\0']), ('\u{217e}', ['\u{216e}', '\0',
- '\0']), ('\u{217f}', ['\u{216f}', '\0', '\0']), ('\u{2184}', ['\u{2183}', '\0', '\0']),
- ('\u{24d0}', ['\u{24b6}', '\0', '\0']), ('\u{24d1}', ['\u{24b7}', '\0', '\0']), ('\u{24d2}',
- ['\u{24b8}', '\0', '\0']), ('\u{24d3}', ['\u{24b9}', '\0', '\0']), ('\u{24d4}', ['\u{24ba}',
- '\0', '\0']), ('\u{24d5}', ['\u{24bb}', '\0', '\0']), ('\u{24d6}', ['\u{24bc}', '\0',
- '\0']), ('\u{24d7}', ['\u{24bd}', '\0', '\0']), ('\u{24d8}', ['\u{24be}', '\0', '\0']),
- ('\u{24d9}', ['\u{24bf}', '\0', '\0']), ('\u{24da}', ['\u{24c0}', '\0', '\0']), ('\u{24db}',
- ['\u{24c1}', '\0', '\0']), ('\u{24dc}', ['\u{24c2}', '\0', '\0']), ('\u{24dd}', ['\u{24c3}',
- '\0', '\0']), ('\u{24de}', ['\u{24c4}', '\0', '\0']), ('\u{24df}', ['\u{24c5}', '\0',
- '\0']), ('\u{24e0}', ['\u{24c6}', '\0', '\0']), ('\u{24e1}', ['\u{24c7}', '\0', '\0']),
- ('\u{24e2}', ['\u{24c8}', '\0', '\0']), ('\u{24e3}', ['\u{24c9}', '\0', '\0']), ('\u{24e4}',
- ['\u{24ca}', '\0', '\0']), ('\u{24e5}', ['\u{24cb}', '\0', '\0']), ('\u{24e6}', ['\u{24cc}',
- '\0', '\0']), ('\u{24e7}', ['\u{24cd}', '\0', '\0']), ('\u{24e8}', ['\u{24ce}', '\0',
- '\0']), ('\u{24e9}', ['\u{24cf}', '\0', '\0']), ('\u{2c30}', ['\u{2c00}', '\0', '\0']),
- ('\u{2c31}', ['\u{2c01}', '\0', '\0']), ('\u{2c32}', ['\u{2c02}', '\0', '\0']), ('\u{2c33}',
- ['\u{2c03}', '\0', '\0']), ('\u{2c34}', ['\u{2c04}', '\0', '\0']), ('\u{2c35}', ['\u{2c05}',
- '\0', '\0']), ('\u{2c36}', ['\u{2c06}', '\0', '\0']), ('\u{2c37}', ['\u{2c07}', '\0',
- '\0']), ('\u{2c38}', ['\u{2c08}', '\0', '\0']), ('\u{2c39}', ['\u{2c09}', '\0', '\0']),
- ('\u{2c3a}', ['\u{2c0a}', '\0', '\0']), ('\u{2c3b}', ['\u{2c0b}', '\0', '\0']), ('\u{2c3c}',
- ['\u{2c0c}', '\0', '\0']), ('\u{2c3d}', ['\u{2c0d}', '\0', '\0']), ('\u{2c3e}', ['\u{2c0e}',
- '\0', '\0']), ('\u{2c3f}', ['\u{2c0f}', '\0', '\0']), ('\u{2c40}', ['\u{2c10}', '\0',
- '\0']), ('\u{2c41}', ['\u{2c11}', '\0', '\0']), ('\u{2c42}', ['\u{2c12}', '\0', '\0']),
- ('\u{2c43}', ['\u{2c13}', '\0', '\0']), ('\u{2c44}', ['\u{2c14}', '\0', '\0']), ('\u{2c45}',
- ['\u{2c15}', '\0', '\0']), ('\u{2c46}', ['\u{2c16}', '\0', '\0']), ('\u{2c47}', ['\u{2c17}',
- '\0', '\0']), ('\u{2c48}', ['\u{2c18}', '\0', '\0']), ('\u{2c49}', ['\u{2c19}', '\0',
- '\0']), ('\u{2c4a}', ['\u{2c1a}', '\0', '\0']), ('\u{2c4b}', ['\u{2c1b}', '\0', '\0']),
- ('\u{2c4c}', ['\u{2c1c}', '\0', '\0']), ('\u{2c4d}', ['\u{2c1d}', '\0', '\0']), ('\u{2c4e}',
- ['\u{2c1e}', '\0', '\0']), ('\u{2c4f}', ['\u{2c1f}', '\0', '\0']), ('\u{2c50}', ['\u{2c20}',
- '\0', '\0']), ('\u{2c51}', ['\u{2c21}', '\0', '\0']), ('\u{2c52}', ['\u{2c22}', '\0',
- '\0']), ('\u{2c53}', ['\u{2c23}', '\0', '\0']), ('\u{2c54}', ['\u{2c24}', '\0', '\0']),
- ('\u{2c55}', ['\u{2c25}', '\0', '\0']), ('\u{2c56}', ['\u{2c26}', '\0', '\0']), ('\u{2c57}',
- ['\u{2c27}', '\0', '\0']), ('\u{2c58}', ['\u{2c28}', '\0', '\0']), ('\u{2c59}', ['\u{2c29}',
- '\0', '\0']), ('\u{2c5a}', ['\u{2c2a}', '\0', '\0']), ('\u{2c5b}', ['\u{2c2b}', '\0',
- '\0']), ('\u{2c5c}', ['\u{2c2c}', '\0', '\0']), ('\u{2c5d}', ['\u{2c2d}', '\0', '\0']),
- ('\u{2c5e}', ['\u{2c2e}', '\0', '\0']), ('\u{2c61}', ['\u{2c60}', '\0', '\0']), ('\u{2c65}',
- ['\u{23a}', '\0', '\0']), ('\u{2c66}', ['\u{23e}', '\0', '\0']), ('\u{2c68}', ['\u{2c67}',
- '\0', '\0']), ('\u{2c6a}', ['\u{2c69}', '\0', '\0']), ('\u{2c6c}', ['\u{2c6b}', '\0',
- '\0']), ('\u{2c73}', ['\u{2c72}', '\0', '\0']), ('\u{2c76}', ['\u{2c75}', '\0', '\0']),
- ('\u{2c81}', ['\u{2c80}', '\0', '\0']), ('\u{2c83}', ['\u{2c82}', '\0', '\0']), ('\u{2c85}',
- ['\u{2c84}', '\0', '\0']), ('\u{2c87}', ['\u{2c86}', '\0', '\0']), ('\u{2c89}', ['\u{2c88}',
- '\0', '\0']), ('\u{2c8b}', ['\u{2c8a}', '\0', '\0']), ('\u{2c8d}', ['\u{2c8c}', '\0',
- '\0']), ('\u{2c8f}', ['\u{2c8e}', '\0', '\0']), ('\u{2c91}', ['\u{2c90}', '\0', '\0']),
- ('\u{2c93}', ['\u{2c92}', '\0', '\0']), ('\u{2c95}', ['\u{2c94}', '\0', '\0']), ('\u{2c97}',
- ['\u{2c96}', '\0', '\0']), ('\u{2c99}', ['\u{2c98}', '\0', '\0']), ('\u{2c9b}', ['\u{2c9a}',
- '\0', '\0']), ('\u{2c9d}', ['\u{2c9c}', '\0', '\0']), ('\u{2c9f}', ['\u{2c9e}', '\0',
- '\0']), ('\u{2ca1}', ['\u{2ca0}', '\0', '\0']), ('\u{2ca3}', ['\u{2ca2}', '\0', '\0']),
- ('\u{2ca5}', ['\u{2ca4}', '\0', '\0']), ('\u{2ca7}', ['\u{2ca6}', '\0', '\0']), ('\u{2ca9}',
- ['\u{2ca8}', '\0', '\0']), ('\u{2cab}', ['\u{2caa}', '\0', '\0']), ('\u{2cad}', ['\u{2cac}',
- '\0', '\0']), ('\u{2caf}', ['\u{2cae}', '\0', '\0']), ('\u{2cb1}', ['\u{2cb0}', '\0',
- '\0']), ('\u{2cb3}', ['\u{2cb2}', '\0', '\0']), ('\u{2cb5}', ['\u{2cb4}', '\0', '\0']),
- ('\u{2cb7}', ['\u{2cb6}', '\0', '\0']), ('\u{2cb9}', ['\u{2cb8}', '\0', '\0']), ('\u{2cbb}',
- ['\u{2cba}', '\0', '\0']), ('\u{2cbd}', ['\u{2cbc}', '\0', '\0']), ('\u{2cbf}', ['\u{2cbe}',
- '\0', '\0']), ('\u{2cc1}', ['\u{2cc0}', '\0', '\0']), ('\u{2cc3}', ['\u{2cc2}', '\0',
- '\0']), ('\u{2cc5}', ['\u{2cc4}', '\0', '\0']), ('\u{2cc7}', ['\u{2cc6}', '\0', '\0']),
- ('\u{2cc9}', ['\u{2cc8}', '\0', '\0']), ('\u{2ccb}', ['\u{2cca}', '\0', '\0']), ('\u{2ccd}',
- ['\u{2ccc}', '\0', '\0']), ('\u{2ccf}', ['\u{2cce}', '\0', '\0']), ('\u{2cd1}', ['\u{2cd0}',
- '\0', '\0']), ('\u{2cd3}', ['\u{2cd2}', '\0', '\0']), ('\u{2cd5}', ['\u{2cd4}', '\0',
- '\0']), ('\u{2cd7}', ['\u{2cd6}', '\0', '\0']), ('\u{2cd9}', ['\u{2cd8}', '\0', '\0']),
- ('\u{2cdb}', ['\u{2cda}', '\0', '\0']), ('\u{2cdd}', ['\u{2cdc}', '\0', '\0']), ('\u{2cdf}',
- ['\u{2cde}', '\0', '\0']), ('\u{2ce1}', ['\u{2ce0}', '\0', '\0']), ('\u{2ce3}', ['\u{2ce2}',
- '\0', '\0']), ('\u{2cec}', ['\u{2ceb}', '\0', '\0']), ('\u{2cee}', ['\u{2ced}', '\0',
- '\0']), ('\u{2cf3}', ['\u{2cf2}', '\0', '\0']), ('\u{2d00}', ['\u{10a0}', '\0', '\0']),
- ('\u{2d01}', ['\u{10a1}', '\0', '\0']), ('\u{2d02}', ['\u{10a2}', '\0', '\0']), ('\u{2d03}',
- ['\u{10a3}', '\0', '\0']), ('\u{2d04}', ['\u{10a4}', '\0', '\0']), ('\u{2d05}', ['\u{10a5}',
- '\0', '\0']), ('\u{2d06}', ['\u{10a6}', '\0', '\0']), ('\u{2d07}', ['\u{10a7}', '\0',
- '\0']), ('\u{2d08}', ['\u{10a8}', '\0', '\0']), ('\u{2d09}', ['\u{10a9}', '\0', '\0']),
- ('\u{2d0a}', ['\u{10aa}', '\0', '\0']), ('\u{2d0b}', ['\u{10ab}', '\0', '\0']), ('\u{2d0c}',
- ['\u{10ac}', '\0', '\0']), ('\u{2d0d}', ['\u{10ad}', '\0', '\0']), ('\u{2d0e}', ['\u{10ae}',
- '\0', '\0']), ('\u{2d0f}', ['\u{10af}', '\0', '\0']), ('\u{2d10}', ['\u{10b0}', '\0',
- '\0']), ('\u{2d11}', ['\u{10b1}', '\0', '\0']), ('\u{2d12}', ['\u{10b2}', '\0', '\0']),
- ('\u{2d13}', ['\u{10b3}', '\0', '\0']), ('\u{2d14}', ['\u{10b4}', '\0', '\0']), ('\u{2d15}',
- ['\u{10b5}', '\0', '\0']), ('\u{2d16}', ['\u{10b6}', '\0', '\0']), ('\u{2d17}', ['\u{10b7}',
- '\0', '\0']), ('\u{2d18}', ['\u{10b8}', '\0', '\0']), ('\u{2d19}', ['\u{10b9}', '\0',
- '\0']), ('\u{2d1a}', ['\u{10ba}', '\0', '\0']), ('\u{2d1b}', ['\u{10bb}', '\0', '\0']),
- ('\u{2d1c}', ['\u{10bc}', '\0', '\0']), ('\u{2d1d}', ['\u{10bd}', '\0', '\0']), ('\u{2d1e}',
- ['\u{10be}', '\0', '\0']), ('\u{2d1f}', ['\u{10bf}', '\0', '\0']), ('\u{2d20}', ['\u{10c0}',
- '\0', '\0']), ('\u{2d21}', ['\u{10c1}', '\0', '\0']), ('\u{2d22}', ['\u{10c2}', '\0',
- '\0']), ('\u{2d23}', ['\u{10c3}', '\0', '\0']), ('\u{2d24}', ['\u{10c4}', '\0', '\0']),
- ('\u{2d25}', ['\u{10c5}', '\0', '\0']), ('\u{2d27}', ['\u{10c7}', '\0', '\0']), ('\u{2d2d}',
- ['\u{10cd}', '\0', '\0']), ('\u{a641}', ['\u{a640}', '\0', '\0']), ('\u{a643}', ['\u{a642}',
- '\0', '\0']), ('\u{a645}', ['\u{a644}', '\0', '\0']), ('\u{a647}', ['\u{a646}', '\0',
- '\0']), ('\u{a649}', ['\u{a648}', '\0', '\0']), ('\u{a64b}', ['\u{a64a}', '\0', '\0']),
- ('\u{a64d}', ['\u{a64c}', '\0', '\0']), ('\u{a64f}', ['\u{a64e}', '\0', '\0']), ('\u{a651}',
- ['\u{a650}', '\0', '\0']), ('\u{a653}', ['\u{a652}', '\0', '\0']), ('\u{a655}', ['\u{a654}',
- '\0', '\0']), ('\u{a657}', ['\u{a656}', '\0', '\0']), ('\u{a659}', ['\u{a658}', '\0',
- '\0']), ('\u{a65b}', ['\u{a65a}', '\0', '\0']), ('\u{a65d}', ['\u{a65c}', '\0', '\0']),
- ('\u{a65f}', ['\u{a65e}', '\0', '\0']), ('\u{a661}', ['\u{a660}', '\0', '\0']), ('\u{a663}',
- ['\u{a662}', '\0', '\0']), ('\u{a665}', ['\u{a664}', '\0', '\0']), ('\u{a667}', ['\u{a666}',
- '\0', '\0']), ('\u{a669}', ['\u{a668}', '\0', '\0']), ('\u{a66b}', ['\u{a66a}', '\0',
- '\0']), ('\u{a66d}', ['\u{a66c}', '\0', '\0']), ('\u{a681}', ['\u{a680}', '\0', '\0']),
- ('\u{a683}', ['\u{a682}', '\0', '\0']), ('\u{a685}', ['\u{a684}', '\0', '\0']), ('\u{a687}',
- ['\u{a686}', '\0', '\0']), ('\u{a689}', ['\u{a688}', '\0', '\0']), ('\u{a68b}', ['\u{a68a}',
- '\0', '\0']), ('\u{a68d}', ['\u{a68c}', '\0', '\0']), ('\u{a68f}', ['\u{a68e}', '\0',
- '\0']), ('\u{a691}', ['\u{a690}', '\0', '\0']), ('\u{a693}', ['\u{a692}', '\0', '\0']),
- ('\u{a695}', ['\u{a694}', '\0', '\0']), ('\u{a697}', ['\u{a696}', '\0', '\0']), ('\u{a699}',
- ['\u{a698}', '\0', '\0']), ('\u{a69b}', ['\u{a69a}', '\0', '\0']), ('\u{a723}', ['\u{a722}',
- '\0', '\0']), ('\u{a725}', ['\u{a724}', '\0', '\0']), ('\u{a727}', ['\u{a726}', '\0',
- '\0']), ('\u{a729}', ['\u{a728}', '\0', '\0']), ('\u{a72b}', ['\u{a72a}', '\0', '\0']),
- ('\u{a72d}', ['\u{a72c}', '\0', '\0']), ('\u{a72f}', ['\u{a72e}', '\0', '\0']), ('\u{a733}',
- ['\u{a732}', '\0', '\0']), ('\u{a735}', ['\u{a734}', '\0', '\0']), ('\u{a737}', ['\u{a736}',
- '\0', '\0']), ('\u{a739}', ['\u{a738}', '\0', '\0']), ('\u{a73b}', ['\u{a73a}', '\0',
- '\0']), ('\u{a73d}', ['\u{a73c}', '\0', '\0']), ('\u{a73f}', ['\u{a73e}', '\0', '\0']),
- ('\u{a741}', ['\u{a740}', '\0', '\0']), ('\u{a743}', ['\u{a742}', '\0', '\0']), ('\u{a745}',
- ['\u{a744}', '\0', '\0']), ('\u{a747}', ['\u{a746}', '\0', '\0']), ('\u{a749}', ['\u{a748}',
- '\0', '\0']), ('\u{a74b}', ['\u{a74a}', '\0', '\0']), ('\u{a74d}', ['\u{a74c}', '\0',
- '\0']), ('\u{a74f}', ['\u{a74e}', '\0', '\0']), ('\u{a751}', ['\u{a750}', '\0', '\0']),
- ('\u{a753}', ['\u{a752}', '\0', '\0']), ('\u{a755}', ['\u{a754}', '\0', '\0']), ('\u{a757}',
- ['\u{a756}', '\0', '\0']), ('\u{a759}', ['\u{a758}', '\0', '\0']), ('\u{a75b}', ['\u{a75a}',
- '\0', '\0']), ('\u{a75d}', ['\u{a75c}', '\0', '\0']), ('\u{a75f}', ['\u{a75e}', '\0',
- '\0']), ('\u{a761}', ['\u{a760}', '\0', '\0']), ('\u{a763}', ['\u{a762}', '\0', '\0']),
- ('\u{a765}', ['\u{a764}', '\0', '\0']), ('\u{a767}', ['\u{a766}', '\0', '\0']), ('\u{a769}',
- ['\u{a768}', '\0', '\0']), ('\u{a76b}', ['\u{a76a}', '\0', '\0']), ('\u{a76d}', ['\u{a76c}',
- '\0', '\0']), ('\u{a76f}', ['\u{a76e}', '\0', '\0']), ('\u{a77a}', ['\u{a779}', '\0',
- '\0']), ('\u{a77c}', ['\u{a77b}', '\0', '\0']), ('\u{a77f}', ['\u{a77e}', '\0', '\0']),
- ('\u{a781}', ['\u{a780}', '\0', '\0']), ('\u{a783}', ['\u{a782}', '\0', '\0']), ('\u{a785}',
- ['\u{a784}', '\0', '\0']), ('\u{a787}', ['\u{a786}', '\0', '\0']), ('\u{a78c}', ['\u{a78b}',
- '\0', '\0']), ('\u{a791}', ['\u{a790}', '\0', '\0']), ('\u{a793}', ['\u{a792}', '\0',
- '\0']), ('\u{a797}', ['\u{a796}', '\0', '\0']), ('\u{a799}', ['\u{a798}', '\0', '\0']),
- ('\u{a79b}', ['\u{a79a}', '\0', '\0']), ('\u{a79d}', ['\u{a79c}', '\0', '\0']), ('\u{a79f}',
- ['\u{a79e}', '\0', '\0']), ('\u{a7a1}', ['\u{a7a0}', '\0', '\0']), ('\u{a7a3}', ['\u{a7a2}',
- '\0', '\0']), ('\u{a7a5}', ['\u{a7a4}', '\0', '\0']), ('\u{a7a7}', ['\u{a7a6}', '\0',
- '\0']), ('\u{a7a9}', ['\u{a7a8}', '\0', '\0']), ('\u{fb00}', ['\u{46}', '\u{66}', '\0']),
- ('\u{fb01}', ['\u{46}', '\u{69}', '\0']), ('\u{fb02}', ['\u{46}', '\u{6c}', '\0']),
- ('\u{fb03}', ['\u{46}', '\u{66}', '\u{69}']), ('\u{fb04}', ['\u{46}', '\u{66}', '\u{6c}']),
- ('\u{fb05}', ['\u{53}', '\u{74}', '\0']), ('\u{fb06}', ['\u{53}', '\u{74}', '\0']),
- ('\u{fb13}', ['\u{544}', '\u{576}', '\0']), ('\u{fb14}', ['\u{544}', '\u{565}', '\0']),
- ('\u{fb15}', ['\u{544}', '\u{56b}', '\0']), ('\u{fb16}', ['\u{54e}', '\u{576}', '\0']),
- ('\u{fb17}', ['\u{544}', '\u{56d}', '\0']), ('\u{ff41}', ['\u{ff21}', '\0', '\0']),
- ('\u{ff42}', ['\u{ff22}', '\0', '\0']), ('\u{ff43}', ['\u{ff23}', '\0', '\0']), ('\u{ff44}',
- ['\u{ff24}', '\0', '\0']), ('\u{ff45}', ['\u{ff25}', '\0', '\0']), ('\u{ff46}', ['\u{ff26}',
- '\0', '\0']), ('\u{ff47}', ['\u{ff27}', '\0', '\0']), ('\u{ff48}', ['\u{ff28}', '\0',
- '\0']), ('\u{ff49}', ['\u{ff29}', '\0', '\0']), ('\u{ff4a}', ['\u{ff2a}', '\0', '\0']),
- ('\u{ff4b}', ['\u{ff2b}', '\0', '\0']), ('\u{ff4c}', ['\u{ff2c}', '\0', '\0']), ('\u{ff4d}',
- ['\u{ff2d}', '\0', '\0']), ('\u{ff4e}', ['\u{ff2e}', '\0', '\0']), ('\u{ff4f}', ['\u{ff2f}',
- '\0', '\0']), ('\u{ff50}', ['\u{ff30}', '\0', '\0']), ('\u{ff51}', ['\u{ff31}', '\0',
- '\0']), ('\u{ff52}', ['\u{ff32}', '\0', '\0']), ('\u{ff53}', ['\u{ff33}', '\0', '\0']),
- ('\u{ff54}', ['\u{ff34}', '\0', '\0']), ('\u{ff55}', ['\u{ff35}', '\0', '\0']), ('\u{ff56}',
- ['\u{ff36}', '\0', '\0']), ('\u{ff57}', ['\u{ff37}', '\0', '\0']), ('\u{ff58}', ['\u{ff38}',
- '\0', '\0']), ('\u{ff59}', ['\u{ff39}', '\0', '\0']), ('\u{ff5a}', ['\u{ff3a}', '\0',
- '\0']), ('\u{10428}', ['\u{10400}', '\0', '\0']), ('\u{10429}', ['\u{10401}', '\0', '\0']),
- ('\u{1042a}', ['\u{10402}', '\0', '\0']), ('\u{1042b}', ['\u{10403}', '\0', '\0']),
- ('\u{1042c}', ['\u{10404}', '\0', '\0']), ('\u{1042d}', ['\u{10405}', '\0', '\0']),
- ('\u{1042e}', ['\u{10406}', '\0', '\0']), ('\u{1042f}', ['\u{10407}', '\0', '\0']),
- ('\u{10430}', ['\u{10408}', '\0', '\0']), ('\u{10431}', ['\u{10409}', '\0', '\0']),
- ('\u{10432}', ['\u{1040a}', '\0', '\0']), ('\u{10433}', ['\u{1040b}', '\0', '\0']),
- ('\u{10434}', ['\u{1040c}', '\0', '\0']), ('\u{10435}', ['\u{1040d}', '\0', '\0']),
- ('\u{10436}', ['\u{1040e}', '\0', '\0']), ('\u{10437}', ['\u{1040f}', '\0', '\0']),
- ('\u{10438}', ['\u{10410}', '\0', '\0']), ('\u{10439}', ['\u{10411}', '\0', '\0']),
- ('\u{1043a}', ['\u{10412}', '\0', '\0']), ('\u{1043b}', ['\u{10413}', '\0', '\0']),
- ('\u{1043c}', ['\u{10414}', '\0', '\0']), ('\u{1043d}', ['\u{10415}', '\0', '\0']),
- ('\u{1043e}', ['\u{10416}', '\0', '\0']), ('\u{1043f}', ['\u{10417}', '\0', '\0']),
- ('\u{10440}', ['\u{10418}', '\0', '\0']), ('\u{10441}', ['\u{10419}', '\0', '\0']),
- ('\u{10442}', ['\u{1041a}', '\0', '\0']), ('\u{10443}', ['\u{1041b}', '\0', '\0']),
- ('\u{10444}', ['\u{1041c}', '\0', '\0']), ('\u{10445}', ['\u{1041d}', '\0', '\0']),
- ('\u{10446}', ['\u{1041e}', '\0', '\0']), ('\u{10447}', ['\u{1041f}', '\0', '\0']),
- ('\u{10448}', ['\u{10420}', '\0', '\0']), ('\u{10449}', ['\u{10421}', '\0', '\0']),
- ('\u{1044a}', ['\u{10422}', '\0', '\0']), ('\u{1044b}', ['\u{10423}', '\0', '\0']),
- ('\u{1044c}', ['\u{10424}', '\0', '\0']), ('\u{1044d}', ['\u{10425}', '\0', '\0']),
- ('\u{1044e}', ['\u{10426}', '\0', '\0']), ('\u{1044f}', ['\u{10427}', '\0', '\0']),
- ('\u{118c0}', ['\u{118a0}', '\0', '\0']), ('\u{118c1}', ['\u{118a1}', '\0', '\0']),
- ('\u{118c2}', ['\u{118a2}', '\0', '\0']), ('\u{118c3}', ['\u{118a3}', '\0', '\0']),
- ('\u{118c4}', ['\u{118a4}', '\0', '\0']), ('\u{118c5}', ['\u{118a5}', '\0', '\0']),
- ('\u{118c6}', ['\u{118a6}', '\0', '\0']), ('\u{118c7}', ['\u{118a7}', '\0', '\0']),
- ('\u{118c8}', ['\u{118a8}', '\0', '\0']), ('\u{118c9}', ['\u{118a9}', '\0', '\0']),
- ('\u{118ca}', ['\u{118aa}', '\0', '\0']), ('\u{118cb}', ['\u{118ab}', '\0', '\0']),
- ('\u{118cc}', ['\u{118ac}', '\0', '\0']), ('\u{118cd}', ['\u{118ad}', '\0', '\0']),
- ('\u{118ce}', ['\u{118ae}', '\0', '\0']), ('\u{118cf}', ['\u{118af}', '\0', '\0']),
- ('\u{118d0}', ['\u{118b0}', '\0', '\0']), ('\u{118d1}', ['\u{118b1}', '\0', '\0']),
- ('\u{118d2}', ['\u{118b2}', '\0', '\0']), ('\u{118d3}', ['\u{118b3}', '\0', '\0']),
- ('\u{118d4}', ['\u{118b4}', '\0', '\0']), ('\u{118d5}', ['\u{118b5}', '\0', '\0']),
- ('\u{118d6}', ['\u{118b6}', '\0', '\0']), ('\u{118d7}', ['\u{118b7}', '\0', '\0']),
- ('\u{118d8}', ['\u{118b8}', '\0', '\0']), ('\u{118d9}', ['\u{118b9}', '\0', '\0']),
- ('\u{118da}', ['\u{118ba}', '\0', '\0']), ('\u{118db}', ['\u{118bb}', '\0', '\0']),
- ('\u{118dc}', ['\u{118bc}', '\0', '\0']), ('\u{118dd}', ['\u{118bd}', '\0', '\0']),
- ('\u{118de}', ['\u{118be}', '\0', '\0']), ('\u{118df}', ['\u{118bf}', '\0', '\0'])
- ];
-
}
pub mod charwidth {
/// # Examples
///
/// ```
-/// # #![feature(unicode)]
+/// #![feature(unicode)]
+///
/// extern crate rustc_unicode;
///
/// use rustc_unicode::str::Utf16Item::{ScalarValue, LoneSurrogate};
record_extern_fqn(cx, did, clean::TypeStatic);
clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
- def::DefConst(did) | def::DefAssociatedConst(did, _) => {
+ def::DefConst(did) | def::DefAssociatedConst(did) => {
record_extern_fqn(cx, did, clean::TypeConst);
clean::ConstantItem(build_const(cx, tcx, did))
}
pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Trait {
- let def = ty::lookup_trait_def(tcx, did);
- let trait_items = ty::trait_items(tcx, did).clean(cx);
- let predicates = ty::lookup_predicates(tcx, did);
+ let def = tcx.lookup_trait_def(did);
+ let trait_items = tcx.trait_items(did).clean(cx);
+ let predicates = tcx.lookup_predicates(did);
let generics = (&def.generics, &predicates, subst::TypeSpace).clean(cx);
let generics = filter_non_trait_generics(did, generics);
let (generics, supertrait_bounds) = separate_supertrait_bounds(generics);
}
fn build_external_function(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Function {
- let t = ty::lookup_item_type(tcx, did);
+ let t = tcx.lookup_item_type(did);
let (decl, style, abi) = match t.ty.sty {
ty::TyBareFn(_, ref f) => ((did, &f.sig).clean(cx), f.unsafety, f.abi),
_ => panic!("bad function"),
};
- let predicates = ty::lookup_predicates(tcx, did);
+ let predicates = tcx.lookup_predicates(did);
clean::Function {
decl: decl,
generics: (&t.generics, &predicates, subst::FnSpace).clean(cx),
fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
- let t = ty::lookup_item_type(tcx, did);
- let predicates = ty::lookup_predicates(tcx, did);
- let fields = ty::lookup_struct_fields(tcx, did);
+ let t = tcx.lookup_item_type(did);
+ let predicates = tcx.lookup_predicates(did);
+ let fields = tcx.lookup_struct_fields(did);
clean::Struct {
struct_type: match &*fields {
}
fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
- let t = ty::lookup_item_type(tcx, did);
- let predicates = ty::lookup_predicates(tcx, did);
+ let t = tcx.lookup_item_type(did);
+ let predicates = tcx.lookup_predicates(did);
match t.ty.sty {
ty::TyEnum(edid, _) if !csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
variants_stripped: false,
- variants: ty::enum_variants(tcx, edid).clean(cx),
+ variants: tcx.enum_variants(edid).clean(cx),
})
}
_ => {}
pub fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
- ty::populate_inherent_implementations_for_type_if_necessary(tcx, did);
+ tcx.populate_inherent_implementations_for_type_if_necessary(did);
let mut impls = Vec::new();
match tcx.inherent_impls.borrow().get(&did) {
});
}
- let predicates = ty::lookup_predicates(tcx, did);
+ let predicates = tcx.lookup_predicates(did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
.filter_map(|did| {
let did = did.def_id();
- let impl_item = ty::impl_or_trait_item(tcx, did);
+ let impl_item = tcx.impl_or_trait_item(did);
match impl_item {
ty::ConstTraitItem(ref assoc_const) => {
let did = assoc_const.def_id;
- let type_scheme = ty::lookup_item_type(tcx, did);
+ let type_scheme = tcx.lookup_item_type(did);
let default = match assoc_const.default {
Some(_) => Some(const_eval::lookup_const_by_id(tcx, did, None)
.unwrap().span.to_src(cx)),
}
}).collect::<Vec<_>>();
let polarity = csearch::get_impl_polarity(tcx, did);
- let ty = ty::lookup_item_type(tcx, did);
+ let ty = tcx.lookup_item_type(did);
let trait_ = associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(polyt, _) => polyt.trait_,
debug!("got snippet {}", sn);
clean::Constant {
- type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
+ type_: tcx.lookup_item_type(did).ty.clean(cx),
expr: sn
}
}
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
- type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
+ type_: tcx.lookup_item_type(did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
impl Clean<Lifetime> for ast::Lifetime {
fn clean(&self, _: &DocContext) -> Lifetime {
- Lifetime(token::get_name(self.name).to_string())
+ Lifetime(self.name.to_string())
}
}
impl Clean<Lifetime> for ast::LifetimeDef {
fn clean(&self, _: &DocContext) -> Lifetime {
- Lifetime(token::get_name(self.lifetime.name).to_string())
+ Lifetime(self.lifetime.name.to_string())
}
}
impl Clean<Lifetime> for ty::RegionParameterDef {
fn clean(&self, _: &DocContext) -> Lifetime {
- Lifetime(token::get_name(self.name).to_string())
+ Lifetime(self.name.to_string())
}
}
match *self {
ty::ReStatic => Some(Lifetime::statik()),
ty::ReLateBound(_, ty::BrNamed(_, name)) =>
- Some(Lifetime(token::get_name(name).to_string())),
+ Some(Lifetime(name.to_string())),
ty::ReEarlyBound(ref data) => Some(Lifetime(data.name.clean(cx))),
ty::ReLateBound(..) |
let provided = match self.container {
ty::ImplContainer(..) => false,
ty::TraitContainer(did) => {
- ty::provided_trait_methods(cx.tcx(), did).iter().any(|m| {
+ cx.tcx().provided_trait_methods(did).iter().any(|m| {
m.def_id == self.def_id
})
}
ty::TyProjection(ref data) => data.clean(cx),
- ty::TyParam(ref p) => Generic(token::get_name(p.name).to_string()),
+ ty::TyParam(ref p) => Generic(p.name.to_string()),
ty::TyClosure(..) => Tuple(vec![]), // FIXME(pcwalton)
}
}
-impl Clean<Item> for ty::field_ty {
+impl Clean<Item> for ty::FieldTy {
fn clean(&self, cx: &DocContext) -> Item {
use syntax::parse::token::special_idents::unnamed_field;
use rustc::metadata::csearch;
(Some(self.name), Some(attr_map.get(&self.id.node).unwrap()))
};
- let ty = ty::lookup_item_type(cx.tcx(), self.id);
+ let ty = cx.tcx().lookup_item_type(self.id);
Item {
name: name.clean(cx),
impl Clean<Span> for syntax::codemap::Span {
fn clean(&self, cx: &DocContext) -> Span {
+ if *self == DUMMY_SP {
+ return Span::empty();
+ }
+
let cm = cx.sess().codemap();
let filename = cm.span_to_filename(*self);
let lo = cm.lookup_char_pos(self.lo);
fn path_to_string(p: &ast::Path) -> String {
let mut s = String::new();
let mut first = true;
- for i in p.segments.iter().map(|x| token::get_ident(x.identifier)) {
+ for i in p.segments.iter().map(|x| x.identifier.name.as_str()) {
if !first || p.global {
s.push_str("::");
} else {
impl Clean<String> for ast::Ident {
fn clean(&self, _: &DocContext) -> String {
- token::get_ident(*self).to_string()
+ self.to_string()
}
}
impl Clean<String> for ast::Name {
fn clean(&self, _: &DocContext) -> String {
- token::get_name(*self).to_string()
+ self.to_string()
}
}
match p.node {
PatWild(PatWildSingle) => "_".to_string(),
PatWild(PatWildMulti) => "..".to_string(),
- PatIdent(_, ref p, _) => token::get_ident(p.node).to_string(),
+ PatIdent(_, ref p, _) => p.node.to_string(),
PatEnum(ref p, _) => path_to_string(p),
PatQPath(..) => panic!("tried to get argument name from PatQPath, \
which is not allowed in function arguments"),
PatStruct(ref name, ref fields, etc) => {
format!("{} {{ {}{} }}", path_to_string(name),
fields.iter().map(|&Spanned { node: ref fp, .. }|
- format!("{}: {}", fp.ident.as_str(), name_from_pat(&*fp.pat)))
- .collect::<Vec<String>>().connect(", "),
+ format!("{}: {}", fp.ident, name_from_pat(&*fp.pat)))
+ .collect::<Vec<String>>().join(", "),
if etc { ", ..." } else { "" }
)
},
PatTup(ref elts) => format!("({})", elts.iter().map(|p| name_from_pat(&**p))
- .collect::<Vec<String>>().connect(", ")),
+ .collect::<Vec<String>>().join(", ")),
PatBox(ref p) => name_from_pat(&**p),
PatRegion(ref p, _) => name_from_pat(&**p),
PatLit(..) => {
let begin = begin.iter().map(|p| name_from_pat(&**p));
let mid = mid.as_ref().map(|p| format!("..{}", name_from_pat(&**p))).into_iter();
let end = end.iter().map(|p| name_from_pat(&**p));
- format!("[{}]", begin.chain(mid).chain(end).collect::<Vec<_>>().connect(", "))
+ format!("[{}]", begin.chain(mid).chain(end).collect::<Vec<_>>().join(", "))
},
PatMac(..) => {
warn!("can't document the name of a function argument \
ast::TyFloat(ast::TyF64) => return Primitive(F64),
},
def::DefSelfTy(..) if path.segments.len() == 1 => {
- return Generic(token::get_name(special_idents::type_self.name).to_string());
+ return Generic(special_idents::type_self.name.to_string());
}
def::DefSelfTy(..) | def::DefTyParam(..) => true,
_ => false,
// are actually located on the trait/impl itself, so we need to load
// all of the generics from there and then look for bounds that are
// applied to this associated type in question.
- let def = ty::lookup_trait_def(cx.tcx(), did);
- let predicates = ty::lookup_predicates(cx.tcx(), did);
+ let def = cx.tcx().lookup_trait_def(did);
+ let predicates = cx.tcx().lookup_predicates(did);
let generics = (&def.generics, &predicates, subst::TypeSpace).clean(cx);
generics.where_predicates.iter().filter_map(|pred| {
let (name, self_type, trait_, bounds) = match *pred {
use std::collections::HashMap;
use rustc::middle::subst;
-use rustc::middle::ty;
use syntax::ast;
use clean::PathParameters as PP;
if child == trait_ {
return true
}
- let def = ty::lookup_trait_def(cx.tcx(), child);
- let predicates = ty::lookup_predicates(cx.tcx(), child);
+ let def = cx.tcx().lookup_trait_def(child);
+ let predicates = cx.tcx().lookup_predicates(child);
let generics = (&def.generics, &predicates, subst::TypeSpace).clean(cx);
generics.where_predicates.iter().filter_map(|pred| {
match *pred {
#[cfg(any(target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
mod os {
use libc;
match href(did) {
Some((url, shortty, fqp)) => {
try!(write!(w, "<a class='{}' href='{}' title='{}'>{}</a>",
- shortty, url, fqp.connect("::"), last.name));
+ shortty, url, fqp.join("::"), last.name));
}
_ => try!(write!(w, "{}", last.name)),
}
}
}
+impl fmt::Display for clean::Impl {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ try!(write!(f, "impl{} ", self.generics));
+ if let Some(ref ty) = self.trait_ {
+ try!(write!(f, "{}{} for ",
+ if self.polarity == Some(clean::ImplPolarity::Negative) { "!" } else { "" },
+ *ty));
+ }
+ try!(write!(f, "{}{}", self.for_, WhereClause(&self.generics)));
+ Ok(())
+ }
+}
+
impl fmt::Display for clean::Arguments {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for (i, input) in self.values.iter().enumerate() {
continue
},
token::Shebang(s) => {
- try!(write!(out, "{}", Escape(s.as_str())));
+ try!(write!(out, "{}", Escape(&s.as_str())));
continue
},
// If this '&' token is directly adjacent to another token, assume
// keywords are also included in the identifier set
token::Ident(ident, _is_mod_sep) => {
- match &token::get_ident(ident)[..] {
+ match &*ident.name.as_str() {
"ref" | "mut" => "kw-2",
"self" => "self",
<div class="search-container">
<input class="search-input" name="search"
autocomplete="off"
- placeholder="Click or press 'S' to search, '?' for more options..."
+ placeholder="Click or press ‘S’ to search, ‘?’ for more options…"
type="search">
</div>
</form>
<section class="footer"></section>
<div id="help" class="hidden">
- <div class="shortcuts">
- <h1>Keyboard shortcuts</h1>
- <dl>
- <dt>?</dt>
- <dd>Show this help dialog</dd>
- <dt>S</dt>
- <dd>Focus the search field</dd>
- <dt>⇤</dt>
- <dd>Move up in search results</dd>
- <dt>⇥</dt>
- <dd>Move down in search results</dd>
- <dt>⏎</dt>
- <dd>Go to active search result</dd>
- </dl>
- </div>
- <div class="infos">
- <h1>Search tricks</h1>
- <p>
- Prefix searches with a type followed by a colon (e.g.
- <code>fn:</code>) to restrict the search to a given type.
- </p>
- <p>
- Accepted types are: <code>fn</code>, <code>mod</code>,
- <code>struct</code>, <code>enum</code>,
- <code>trait</code>, <code>typedef</code> (or
- <code>tdef</code>).
- </p>
- <p>
- Search functions by type signature (e.g.
- <code>vec -> usize</code>)
- </p>
+ <div>
+ <div class="shortcuts">
+ <h1>Keyboard Shortcuts</h1>
+
+ <dl>
+ <dt>?</dt>
+ <dd>Show this help dialog</dd>
+ <dt>S</dt>
+ <dd>Focus the search field</dd>
+ <dt>⇤</dt>
+ <dd>Move up in search results</dd>
+ <dt>⇥</dt>
+ <dd>Move down in search results</dd>
+ <dt>⏎</dt>
+ <dd>Go to active search result</dd>
+ </dl>
+ </div>
+
+ <div class="infos">
+ <h1>Search Tricks</h1>
+
+ <p>
+ Prefix searches with a type followed by a colon (e.g.
+ <code>fn:</code>) to restrict the search to a given type.
+ </p>
+
+ <p>
+ Accepted types are: <code>fn</code>, <code>mod</code>,
+ <code>struct</code>, <code>enum</code>,
+ <code>trait</code>, <code>typedef</code> (or
+ <code>tdef</code>).
+ </p>
+
+ <p>
+ Search functions by type signature (e.g.
+ <code>vec -> usize</code>)
+ </p>
+ </div>
</div>
</div>
fn collapse_whitespace(s: &str) -> String {
s.split(|c: char| c.is_whitespace()).filter(|s| {
!s.is_empty()
- }).collect::<Vec<_>>().connect(" ")
+ }).collect::<Vec<_>>().join(" ")
}
thread_local!(static USED_HEADER_MAP: RefCell<HashMap<String, usize>> = {
let lines = origtext.lines().filter(|l| {
stripped_filtered_line(*l).is_none()
});
- let text = lines.collect::<Vec<&str>>().connect("\n");
+ let text = lines.collect::<Vec<&str>>().join("\n");
if rendered { return }
PLAYGROUND_KRATE.with(|krate| {
let mut s = String::new();
krate.borrow().as_ref().map(|krate| {
let test = origtext.lines().map(|l| {
stripped_filtered_line(l).unwrap_or(l)
- }).collect::<Vec<&str>>().connect("\n");
+ }).collect::<Vec<&str>>().join("\n");
let krate = krate.as_ref().map(|s| &**s);
let test = test::maketest(&test, krate, false,
&Default::default());
// Transform the contents of the header into a hyphenated string
let id = s.split_whitespace().map(|s| s.to_ascii_lowercase())
- .collect::<Vec<String>>().connect("-");
+ .collect::<Vec<String>>().join("-");
// This is a terrible hack working around how hoedown gives us rendered
// html for text rather than the raw text.
let lines = text.lines().map(|l| {
stripped_filtered_line(l).unwrap_or(l)
});
- let text = lines.collect::<Vec<&str>>().connect("\n");
+ let text = lines.collect::<Vec<&str>>().join("\n");
tests.add_test(text.to_string(),
block_info.should_panic, block_info.no_run,
block_info.ignore, block_info.test_harness);
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
//! both occur before the crate is rendered.
pub use self::ExternalLocation::*;
-use std::ascii::OwnedAsciiExt;
+use std::ascii::AsciiExt;
use std::cell::RefCell;
use std::cmp::Ordering;
use std::collections::{BTreeMap, HashMap, HashSet};
/// Metadata about an implementor of a trait.
pub struct Implementor {
pub def_id: ast::DefId,
- pub generics: clean::Generics,
- pub trait_: clean::Type,
- pub for_: clean::Type,
pub stability: Option<clean::Stability>,
- pub polarity: Option<clean::ImplPolarity>,
+ pub impl_: clean::Impl,
}
/// Metadata about implementations for a type.
let inputs: Vec<String> = self.inputs.iter().map(|ref t| {
format!("{}", t)
}).collect();
- try!(write!(f, "{{\"inputs\":[{}],\"output\":", inputs.connect(",")));
+ try!(write!(f, "{{\"inputs\":[{}],\"output\":", inputs.join(",")));
match self.output {
Some(ref t) => try!(write!(f, "{}", t)),
search_index.push(IndexItem {
ty: shortty(item),
name: item.name.clone().unwrap(),
- path: fqp[..fqp.len() - 1].connect("::"),
+ path: fqp[..fqp.len() - 1].join("::"),
desc: shorter(item.doc_value()),
parent: Some(did),
search_type: get_index_search_type(&item, parent_basename),
// Add all the static files. These may already exist, but we just
// overwrite them anyway to make sure that they're fresh and up-to-date.
try!(write(cx.dst.join("jquery.js"),
- include_bytes!("static/jquery-2.1.0.min.js")));
+ include_bytes!("static/jquery-2.1.4.min.js")));
try!(write(cx.dst.join("main.js"), include_bytes!("static/main.js")));
try!(write(cx.dst.join("playpen.js"), include_bytes!("static/playpen.js")));
try!(write(cx.dst.join("main.css"), include_bytes!("static/main.css")));
// going on). If they're in different crates then the crate defining
// the trait will be interested in our implementation.
if imp.def_id.krate == did.krate { continue }
- try!(write!(&mut f, r#""impl{} {}{} for {}","#,
- imp.generics,
- if imp.polarity == Some(clean::ImplPolarity::Negative) { "!" } else { "" },
- imp.trait_, imp.for_));
+ try!(write!(&mut f, r#""{}","#, imp.impl_));
}
try!(writeln!(&mut f, r"];"));
try!(writeln!(&mut f, "{}", r"
Some(clean::ResolvedPath{ did, .. }) => {
self.implementors.entry(did).or_insert(vec![]).push(Implementor {
def_id: item.def_id,
- generics: i.generics.clone(),
- trait_: i.trait_.as_ref().unwrap().clone(),
- for_: i.for_.clone(),
stability: item.stability.clone(),
- polarity: i.polarity.clone(),
+ impl_: i.clone(),
});
}
Some(..) | None => {}
self.search_index.push(IndexItem {
ty: shortty(&item),
name: s.to_string(),
- path: path.connect("::").to_string(),
+ path: path.join("::").to_string(),
desc: shorter(item.doc_value()),
parent: parent,
search_type: get_index_search_type(&item, parent_basename),
*slot.borrow_mut() = cx.current.clone();
});
- let mut title = cx.current.connect("::");
+ let mut title = cx.current.join("::");
if pushname {
if !title.is_empty() {
title.push_str("::");
Some(format!("{root}src/{krate}/{path}.html#{href}",
root = self.cx.root_path,
krate = self.cx.layout.krate,
- path = path.connect("/"),
+ path = path.join("/"),
href = href))
// If this item is not part of the local crate, then things get a little
};
Some(format!("{root}{path}/{file}?gotosrc={goto}",
root = root,
- path = path[..path.len() - 1].connect("/"),
+ path = path[..path.len() - 1].join("/"),
file = item_path(self.item),
goto = self.item.def_id.node))
}
}
fn full_path(cx: &Context, item: &clean::Item) -> String {
- let mut s = cx.current.connect("::");
+ let mut s = cx.current.join("::");
s.push_str("::");
s.push_str(item.name.as_ref().unwrap());
return s
(*line).chars().any(|chr|{
!chr.is_whitespace()
})
- }).collect::<Vec<_>>().connect("\n"),
+ }).collect::<Vec<_>>().join("\n"),
None => "".to_string()
}
}
match cache.implementors.get(&it.def_id) {
Some(implementors) => {
for i in implementors {
- try!(writeln!(w, "<li><code>impl{} {} for {}{}</code></li>",
- i.generics, i.trait_, i.for_, WhereClause(&i.generics)));
+ try!(writeln!(w, "<li><code>{}</code></li>", i.impl_));
}
}
None => {}
try!(write!(w, r#"<script type="text/javascript" async
src="{root_path}/implementors/{path}/{ty}.{name}.js">
</script>"#,
- root_path = repeat("..").take(cx.current.len()).collect::<Vec<_>>().connect("/"),
+ root_path = vec![".."; cx.current.len()].join("/"),
path = if ast_util::is_local(it.def_id) {
- cx.current.connect("/")
+ cx.current.join("/")
} else {
let path = &cache.external_paths[&it.def_id];
- path[..path.len() - 1].connect("/")
+ path[..path.len() - 1].join("/")
},
ty = shortty(it).to_static_str(),
name = *it.name.as_ref().unwrap()));
fn render_impl(w: &mut fmt::Formatter, i: &Impl, link: AssocItemLink,
render_header: bool) -> fmt::Result {
if render_header {
- try!(write!(w, "<h3 class='impl'><code>impl{} ",
- i.impl_.generics));
- if let Some(clean::ImplPolarity::Negative) = i.impl_.polarity {
- try!(write!(w, "!"));
- }
- if let Some(ref ty) = i.impl_.trait_ {
- try!(write!(w, "{} for ", *ty));
- }
- try!(write!(w, "{}{}</code></h3>", i.impl_.for_,
- WhereClause(&i.impl_.generics)));
+ try!(write!(w, "<h3 class='impl'><code>{}</code></h3>", i.impl_));
if let Some(ref dox) = i.dox {
try!(write!(w, "<div class='docblock'>{}</div>", Markdown(dox)));
}
// Consider `self` an argument as well.
if let Some(name) = parent {
- inputs.push(Type { name: Some(name.into_ascii_lowercase()) });
+ inputs.push(Type { name: Some(name.to_ascii_lowercase()) });
}
inputs.extend(&mut decl.inputs.values.iter().map(|arg| {
}
fn get_index_type(clean_type: &clean::Type) -> Type {
- Type { name: get_index_type_name(clean_type).map(|s| s.into_ascii_lowercase()) }
+ Type { name: get_index_type_name(clean_type).map(|s| s.to_ascii_lowercase()) }
}
fn get_index_type_name(clean_type: &clean::Type) -> Option<String> {
+++ /dev/null
-/*! jQuery v2.1.0 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */
-!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k="".trim,l={},m=a.document,n="2.1.0",o=function(a,b){return new o.fn.init(a,b)},p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};o.fn=o.prototype={jquery:n,constructor:o,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=o.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return o.each(this,a,b)},map:function(a){return this.pushStack(o.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},o.extend=o.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||o.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(o.isPlainObject(d)||(e=o.isArray(d)))?(e?(e=!1,f=c&&o.isArray(c)?c:[]):f=c&&o.isPlainObject(c)?c:{},g[b]=o.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},o.extend({expando:"jQuery"+(n+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===o.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){return a-parseFloat(a)>=0},isPlainObject:function(a){if("object"!==o.type(a)||a.nodeType||o.isWindow(a))return!1;try{if(a.constructor&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(b){return!1}return!0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=o.trim(a),a&&(1===a.indexOf("use strict")?(b=m.createElement("script"),b.text=a,m.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=s(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":k.call(a)},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?o.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:g.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=s(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(c=a[b],b=a,a=c),o.isFunction(a)?(e=d.call(arguments,2),f=function(){return a.apply(b||this,e.concat(d.call(arguments)))},f.guid=a.guid=a.guid||o.guid++,f):void 0},now:Date.now,support:l}),o.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=a.length,c=o.type(a);return"function"===c||o.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s="sizzle"+-new Date,t=a.document,u=0,v=0,w=eb(),x=eb(),y=eb(),z=function(a,b){return a===b&&(j=!0),0},A="undefined",B=1<<31,C={}.hasOwnProperty,D=[],E=D.pop,F=D.push,G=D.push,H=D.slice,I=D.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},J="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",K="[\\x20\\t\\r\\n\\f]",L="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",M=L.replace("w","w#"),N="\\["+K+"*("+L+")"+K+"*(?:([*^$|!~]?=)"+K+"*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|("+M+")|)|)"+K+"*\\]",O=":("+L+")(?:\\(((['\"])((?:\\\\.|[^\\\\])*?)\\3|((?:\\\\.|[^\\\\()[\\]]|"+N.replace(3,8)+")*)|.*)\\)|)",P=new RegExp("^"+K+"+|((?:^|[^\\\\])(?:\\\\.)*)"+K+"+$","g"),Q=new RegExp("^"+K+"*,"+K+"*"),R=new RegExp("^"+K+"*([>+~]|"+K+")"+K+"*"),S=new RegExp("="+K+"*([^\\]'\"]*?)"+K+"*\\]","g"),T=new RegExp(O),U=new RegExp("^"+M+"$"),V={ID:new RegExp("^#("+L+")"),CLASS:new RegExp("^\\.("+L+")"),TAG:new RegExp("^("+L.replace("w","w*")+")"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+K+"*(even|odd|(([+-]|)(\\d*)n|)"+K+"*(?:([+-]|)"+K+"*(\\d+)|))"+K+"*\\)|)","i"),bool:new RegExp("^(?:"+J+")$","i"),needsContext:new RegExp("^"+K+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+K+"*((?:-\\d)?\\d*)"+K+"*\\)|)(?=[^-]|$)","i")},W=/^(?:input|select|textarea|button)$/i,X=/^h\d$/i,Y=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,$=/[+~]/,_=/'|\\/g,ab=new RegExp("\\\\([\\da-f]{1,6}"+K+"?|("+K+")|.)","ig"),bb=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{G.apply(D=H.call(t.childNodes),t.childNodes),D[t.childNodes.length].nodeType}catch(cb){G={apply:D.length?function(a,b){F.apply(a,H.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function db(a,b,d,e){var f,g,h,i,j,m,p,q,u,v;if((b?b.ownerDocument||b:t)!==l&&k(b),b=b||l,d=d||[],!a||"string"!=typeof a)return d;if(1!==(i=b.nodeType)&&9!==i)return[];if(n&&!e){if(f=Z.exec(a))if(h=f[1]){if(9===i){if(g=b.getElementById(h),!g||!g.parentNode)return d;if(g.id===h)return d.push(g),d}else if(b.ownerDocument&&(g=b.ownerDocument.getElementById(h))&&r(b,g)&&g.id===h)return d.push(g),d}else{if(f[2])return G.apply(d,b.getElementsByTagName(a)),d;if((h=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return G.apply(d,b.getElementsByClassName(h)),d}if(c.qsa&&(!o||!o.test(a))){if(q=p=s,u=b,v=9===i&&a,1===i&&"object"!==b.nodeName.toLowerCase()){m=ob(a),(p=b.getAttribute("id"))?q=p.replace(_,"\\$&"):b.setAttribute("id",q),q="[id='"+q+"'] ",j=m.length;while(j--)m[j]=q+pb(m[j]);u=$.test(a)&&mb(b.parentNode)||b,v=m.join(",")}if(v)try{return G.apply(d,u.querySelectorAll(v)),d}catch(w){}finally{p||b.removeAttribute("id")}}}return xb(a.replace(P,"$1"),b,d,e)}function eb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function fb(a){return a[s]=!0,a}function gb(a){var b=l.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function hb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function ib(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||B)-(~a.sourceIndex||B);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function jb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function kb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function lb(a){return fb(function(b){return b=+b,fb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function mb(a){return a&&typeof a.getElementsByTagName!==A&&a}c=db.support={},f=db.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},k=db.setDocument=function(a){var b,e=a?a.ownerDocument||a:t,g=e.defaultView;return e!==l&&9===e.nodeType&&e.documentElement?(l=e,m=e.documentElement,n=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){k()},!1):g.attachEvent&&g.attachEvent("onunload",function(){k()})),c.attributes=gb(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=gb(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Y.test(e.getElementsByClassName)&&gb(function(a){return a.innerHTML="<div class='a'></div><div class='a i'></div>",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=gb(function(a){return m.appendChild(a).id=s,!e.getElementsByName||!e.getElementsByName(s).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==A&&n){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ab,bb);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ab,bb);return function(a){var c=typeof a.getAttributeNode!==A&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==A?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==A&&n?b.getElementsByClassName(a):void 0},p=[],o=[],(c.qsa=Y.test(e.querySelectorAll))&&(gb(function(a){a.innerHTML="<select t=''><option selected=''></option></select>",a.querySelectorAll("[t^='']").length&&o.push("[*^$]="+K+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||o.push("\\["+K+"*(?:value|"+J+")"),a.querySelectorAll(":checked").length||o.push(":checked")}),gb(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&o.push("name"+K+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||o.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),o.push(",.*:")})),(c.matchesSelector=Y.test(q=m.webkitMatchesSelector||m.mozMatchesSelector||m.oMatchesSelector||m.msMatchesSelector))&&gb(function(a){c.disconnectedMatch=q.call(a,"div"),q.call(a,"[s!='']:x"),p.push("!=",O)}),o=o.length&&new RegExp(o.join("|")),p=p.length&&new RegExp(p.join("|")),b=Y.test(m.compareDocumentPosition),r=b||Y.test(m.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},z=b?function(a,b){if(a===b)return j=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===t&&r(t,a)?-1:b===e||b.ownerDocument===t&&r(t,b)?1:i?I.call(i,a)-I.call(i,b):0:4&d?-1:1)}:function(a,b){if(a===b)return j=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],k=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:i?I.call(i,a)-I.call(i,b):0;if(f===g)return ib(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)k.unshift(c);while(h[d]===k[d])d++;return d?ib(h[d],k[d]):h[d]===t?-1:k[d]===t?1:0},e):l},db.matches=function(a,b){return db(a,null,null,b)},db.matchesSelector=function(a,b){if((a.ownerDocument||a)!==l&&k(a),b=b.replace(S,"='$1']"),!(!c.matchesSelector||!n||p&&p.test(b)||o&&o.test(b)))try{var d=q.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return db(b,l,null,[a]).length>0},db.contains=function(a,b){return(a.ownerDocument||a)!==l&&k(a),r(a,b)},db.attr=function(a,b){(a.ownerDocument||a)!==l&&k(a);var e=d.attrHandle[b.toLowerCase()],f=e&&C.call(d.attrHandle,b.toLowerCase())?e(a,b,!n):void 0;return void 0!==f?f:c.attributes||!n?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},db.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},db.uniqueSort=function(a){var b,d=[],e=0,f=0;if(j=!c.detectDuplicates,i=!c.sortStable&&a.slice(0),a.sort(z),j){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return i=null,a},e=db.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=db.selectors={cacheLength:50,createPseudo:fb,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ab,bb),a[3]=(a[4]||a[5]||"").replace(ab,bb),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||db.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&db.error(a[0]),a},PSEUDO:function(a){var b,c=!a[5]&&a[2];return V.CHILD.test(a[0])?null:(a[3]&&void 0!==a[4]?a[2]=a[4]:c&&T.test(c)&&(b=ob(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ab,bb).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=w[a+" "];return b||(b=new RegExp("(^|"+K+")"+a+"("+K+"|$)"))&&w(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==A&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=db.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),t=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&t){k=q[s]||(q[s]={}),j=k[a]||[],n=j[0]===u&&j[1],m=j[0]===u&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[u,n,m];break}}else if(t&&(j=(b[s]||(b[s]={}))[a])&&j[0]===u)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(t&&((l[s]||(l[s]={}))[a]=[u,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||db.error("unsupported pseudo: "+a);return e[s]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?fb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=I.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:fb(function(a){var b=[],c=[],d=g(a.replace(P,"$1"));return d[s]?fb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:fb(function(a){return function(b){return db(a,b).length>0}}),contains:fb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:fb(function(a){return U.test(a||"")||db.error("unsupported lang: "+a),a=a.replace(ab,bb).toLowerCase(),function(b){var c;do if(c=n?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===m},focus:function(a){return a===l.activeElement&&(!l.hasFocus||l.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return X.test(a.nodeName)},input:function(a){return W.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:lb(function(){return[0]}),last:lb(function(a,b){return[b-1]}),eq:lb(function(a,b,c){return[0>c?c+b:c]}),even:lb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:lb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:lb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:lb(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=jb(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=kb(b);function nb(){}nb.prototype=d.filters=d.pseudos,d.setFilters=new nb;function ob(a,b){var c,e,f,g,h,i,j,k=x[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=Q.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=R.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(P," ")}),h=h.slice(c.length));for(g in d.filter)!(e=V[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?db.error(a):x(a,i).slice(0)}function pb(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function qb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=v++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[u,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[s]||(b[s]={}),(h=i[d])&&h[0]===u&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function rb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function sb(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function tb(a,b,c,d,e,f){return d&&!d[s]&&(d=tb(d)),e&&!e[s]&&(e=tb(e,f)),fb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||wb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:sb(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=sb(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?I.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=sb(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):G.apply(g,r)})}function ub(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],i=g||d.relative[" "],j=g?1:0,k=qb(function(a){return a===b},i,!0),l=qb(function(a){return I.call(b,a)>-1},i,!0),m=[function(a,c,d){return!g&&(d||c!==h)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>j;j++)if(c=d.relative[a[j].type])m=[qb(rb(m),c)];else{if(c=d.filter[a[j].type].apply(null,a[j].matches),c[s]){for(e=++j;f>e;e++)if(d.relative[a[e].type])break;return tb(j>1&&rb(m),j>1&&pb(a.slice(0,j-1).concat({value:" "===a[j-2].type?"*":""})).replace(P,"$1"),c,e>j&&ub(a.slice(j,e)),f>e&&ub(a=a.slice(e)),f>e&&pb(a))}m.push(c)}return rb(m)}function vb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,i,j,k){var m,n,o,p=0,q="0",r=f&&[],s=[],t=h,v=f||e&&d.find.TAG("*",k),w=u+=null==t?1:Math.random()||.1,x=v.length;for(k&&(h=g!==l&&g);q!==x&&null!=(m=v[q]);q++){if(e&&m){n=0;while(o=a[n++])if(o(m,g,i)){j.push(m);break}k&&(u=w)}c&&((m=!o&&m)&&p--,f&&r.push(m))}if(p+=q,c&&q!==p){n=0;while(o=b[n++])o(r,s,g,i);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=E.call(j));s=sb(s)}G.apply(j,s),k&&!f&&s.length>0&&p+b.length>1&&db.uniqueSort(j)}return k&&(u=w,h=t),r};return c?fb(f):f}g=db.compile=function(a,b){var c,d=[],e=[],f=y[a+" "];if(!f){b||(b=ob(a)),c=b.length;while(c--)f=ub(b[c]),f[s]?d.push(f):e.push(f);f=y(a,vb(e,d))}return f};function wb(a,b,c){for(var d=0,e=b.length;e>d;d++)db(a,b[d],c);return c}function xb(a,b,e,f){var h,i,j,k,l,m=ob(a);if(!f&&1===m.length){if(i=m[0]=m[0].slice(0),i.length>2&&"ID"===(j=i[0]).type&&c.getById&&9===b.nodeType&&n&&d.relative[i[1].type]){if(b=(d.find.ID(j.matches[0].replace(ab,bb),b)||[])[0],!b)return e;a=a.slice(i.shift().value.length)}h=V.needsContext.test(a)?0:i.length;while(h--){if(j=i[h],d.relative[k=j.type])break;if((l=d.find[k])&&(f=l(j.matches[0].replace(ab,bb),$.test(i[0].type)&&mb(b.parentNode)||b))){if(i.splice(h,1),a=f.length&&pb(i),!a)return G.apply(e,f),e;break}}}return g(a,m)(f,b,!n,e,$.test(a)&&mb(b.parentNode)||b),e}return c.sortStable=s.split("").sort(z).join("")===s,c.detectDuplicates=!!j,k(),c.sortDetached=gb(function(a){return 1&a.compareDocumentPosition(l.createElement("div"))}),gb(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||hb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&gb(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||hb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),gb(function(a){return null==a.getAttribute("disabled")})||hb(J,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),db}(a);o.find=t,o.expr=t.selectors,o.expr[":"]=o.expr.pseudos,o.unique=t.uniqueSort,o.text=t.getText,o.isXMLDoc=t.isXML,o.contains=t.contains;var u=o.expr.match.needsContext,v=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,w=/^.[^:#\[\.,]*$/;function x(a,b,c){if(o.isFunction(b))return o.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return o.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(w.test(b))return o.filter(b,a,c);b=o.filter(b,a)}return o.grep(a,function(a){return g.call(b,a)>=0!==c})}o.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?o.find.matchesSelector(d,a)?[d]:[]:o.find.matches(a,o.grep(b,function(a){return 1===a.nodeType}))},o.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(o(a).filter(function(){for(b=0;c>b;b++)if(o.contains(e[b],this))return!0}));for(b=0;c>b;b++)o.find(a,e[b],d);return d=this.pushStack(c>1?o.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(x(this,a||[],!1))},not:function(a){return this.pushStack(x(this,a||[],!0))},is:function(a){return!!x(this,"string"==typeof a&&u.test(a)?o(a):a||[],!1).length}});var y,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=o.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||y).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof o?b[0]:b,o.merge(this,o.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:m,!0)),v.test(c[1])&&o.isPlainObject(b))for(c in b)o.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}return d=m.getElementById(c[2]),d&&d.parentNode&&(this.length=1,this[0]=d),this.context=m,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):o.isFunction(a)?"undefined"!=typeof y.ready?y.ready(a):a(o):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),o.makeArray(a,this))};A.prototype=o.fn,y=o(m);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};o.extend({dir:function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&o(a).is(c))break;d.push(a)}return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),o.fn.extend({has:function(a){var b=o(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(o.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=u.test(a)||"string"!=typeof a?o(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&o.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?o.unique(f):f)},index:function(a){return a?"string"==typeof a?g.call(o(a),this[0]):g.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(o.unique(o.merge(this.get(),o(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){while((a=a[b])&&1!==a.nodeType);return a}o.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return o.dir(a,"parentNode")},parentsUntil:function(a,b,c){return o.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return o.dir(a,"nextSibling")},prevAll:function(a){return o.dir(a,"previousSibling")},nextUntil:function(a,b,c){return o.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return o.dir(a,"previousSibling",c)},siblings:function(a){return o.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return o.sibling(a.firstChild)},contents:function(a){return a.contentDocument||o.merge([],a.childNodes)}},function(a,b){o.fn[a]=function(c,d){var e=o.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=o.filter(d,e)),this.length>1&&(C[a]||o.unique(e),B.test(a)&&e.reverse()),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return o.each(a.match(E)||[],function(a,c){b[c]=!0}),b}o.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):o.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(b=a.memory&&l,c=!0,g=e||0,e=0,f=h.length,d=!0;h&&f>g;g++)if(h[g].apply(l[0],l[1])===!1&&a.stopOnFalse){b=!1;break}d=!1,h&&(i?i.length&&j(i.shift()):b?h=[]:k.disable())},k={add:function(){if(h){var c=h.length;!function g(b){o.each(b,function(b,c){var d=o.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&g(c)})}(arguments),d?f=h.length:b&&(e=c,j(b))}return this},remove:function(){return h&&o.each(arguments,function(a,b){var c;while((c=o.inArray(b,h,c))>-1)h.splice(c,1),d&&(f>=c&&f--,g>=c&&g--)}),this},has:function(a){return a?o.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],f=0,this},disable:function(){return h=i=b=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,b||k.disable(),this},locked:function(){return!i},fireWith:function(a,b){return!h||c&&!i||(b=b||[],b=[a,b.slice?b.slice():b],d?i.push(b):j(b)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!c}};return k},o.extend({Deferred:function(a){var b=[["resolve","done",o.Callbacks("once memory"),"resolved"],["reject","fail",o.Callbacks("once memory"),"rejected"],["notify","progress",o.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return o.Deferred(function(c){o.each(b,function(b,f){var g=o.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&o.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?o.extend(a,d):d}},e={};return d.pipe=d.then,o.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&o.isFunction(a.promise)?e:0,g=1===f?a:o.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&o.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;o.fn.ready=function(a){return o.ready.promise().done(a),this},o.extend({isReady:!1,readyWait:1,holdReady:function(a){a?o.readyWait++:o.ready(!0)},ready:function(a){(a===!0?--o.readyWait:o.isReady)||(o.isReady=!0,a!==!0&&--o.readyWait>0||(H.resolveWith(m,[o]),o.fn.trigger&&o(m).trigger("ready").off("ready")))}});function I(){m.removeEventListener("DOMContentLoaded",I,!1),a.removeEventListener("load",I,!1),o.ready()}o.ready.promise=function(b){return H||(H=o.Deferred(),"complete"===m.readyState?setTimeout(o.ready):(m.addEventListener("DOMContentLoaded",I,!1),a.addEventListener("load",I,!1))),H.promise(b)},o.ready.promise();var J=o.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===o.type(c)){e=!0;for(h in c)o.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,o.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(o(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f};o.acceptData=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function K(){Object.defineProperty(this.cache={},0,{get:function(){return{}}}),this.expando=o.expando+Math.random()}K.uid=1,K.accepts=o.acceptData,K.prototype={key:function(a){if(!K.accepts(a))return 0;var b={},c=a[this.expando];if(!c){c=K.uid++;try{b[this.expando]={value:c},Object.defineProperties(a,b)}catch(d){b[this.expando]=c,o.extend(a,b)}}return this.cache[c]||(this.cache[c]={}),c},set:function(a,b,c){var d,e=this.key(a),f=this.cache[e];if("string"==typeof b)f[b]=c;else if(o.isEmptyObject(f))o.extend(this.cache[e],b);else for(d in b)f[d]=b[d];return f},get:function(a,b){var c=this.cache[this.key(a)];return void 0===b?c:c[b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,o.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=this.key(a),g=this.cache[f];if(void 0===b)this.cache[f]={};else{o.isArray(b)?d=b.concat(b.map(o.camelCase)):(e=o.camelCase(b),b in g?d=[b,e]:(d=e,d=d in g?[d]:d.match(E)||[])),c=d.length;while(c--)delete g[d[c]]}},hasData:function(a){return!o.isEmptyObject(this.cache[a[this.expando]]||{})},discard:function(a){a[this.expando]&&delete this.cache[a[this.expando]]}};var L=new K,M=new K,N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(O,"-$1").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?o.parseJSON(c):c}catch(e){}M.set(a,b,c)}else c=void 0;return c}o.extend({hasData:function(a){return M.hasData(a)||L.hasData(a)},data:function(a,b,c){return M.access(a,b,c)},removeData:function(a,b){M.remove(a,b)},_data:function(a,b,c){return L.access(a,b,c)},_removeData:function(a,b){L.remove(a,b)}}),o.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=M.get(f),1===f.nodeType&&!L.get(f,"hasDataAttrs"))){c=g.length;
-while(c--)d=g[c].name,0===d.indexOf("data-")&&(d=o.camelCase(d.slice(5)),P(f,d,e[d]));L.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){M.set(this,a)}):J(this,function(b){var c,d=o.camelCase(a);if(f&&void 0===b){if(c=M.get(f,a),void 0!==c)return c;if(c=M.get(f,d),void 0!==c)return c;if(c=P(f,d,void 0),void 0!==c)return c}else this.each(function(){var c=M.get(this,d);M.set(this,d,b),-1!==a.indexOf("-")&&void 0!==c&&M.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){M.remove(this,a)})}}),o.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=L.get(a,b),c&&(!d||o.isArray(c)?d=L.access(a,b,o.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=o.queue(a,b),d=c.length,e=c.shift(),f=o._queueHooks(a,b),g=function(){o.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return L.get(a,c)||L.access(a,c,{empty:o.Callbacks("once memory").add(function(){L.remove(a,[b+"queue",c])})})}}),o.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?o.queue(this[0],a):void 0===b?this:this.each(function(){var c=o.queue(this,a,b);o._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&o.dequeue(this,a)})},dequeue:function(a){return this.each(function(){o.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=o.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=L.get(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var Q=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,R=["Top","Right","Bottom","Left"],S=function(a,b){return a=b||a,"none"===o.css(a,"display")||!o.contains(a.ownerDocument,a)},T=/^(?:checkbox|radio)$/i;!function(){var a=m.createDocumentFragment(),b=a.appendChild(m.createElement("div"));b.innerHTML="<input type='radio' checked='checked' name='t'/>",l.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="<textarea>x</textarea>",l.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var U="undefined";l.focusinBubbles="onfocusin"in a;var V=/^key/,W=/^(?:mouse|contextmenu)|click/,X=/^(?:focusinfocus|focusoutblur)$/,Y=/^([^.]*)(?:\.(.+)|)$/;function Z(){return!0}function $(){return!1}function _(){try{return m.activeElement}catch(a){}}o.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,p,q,r=L.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=o.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return typeof o!==U&&o.event.triggered!==b.type?o.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(E)||[""],j=b.length;while(j--)h=Y.exec(b[j])||[],n=q=h[1],p=(h[2]||"").split(".").sort(),n&&(l=o.event.special[n]||{},n=(e?l.delegateType:l.bindType)||n,l=o.event.special[n]||{},k=o.extend({type:n,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&o.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[n])||(m=i[n]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(n,g,!1)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),o.event.global[n]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,p,q,r=L.hasData(a)&&L.get(a);if(r&&(i=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=Y.exec(b[j])||[],n=q=h[1],p=(h[2]||"").split(".").sort(),n){l=o.event.special[n]||{},n=(d?l.delegateType:l.bindType)||n,m=i[n]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||o.removeEvent(a,n,r.handle),delete i[n])}else for(n in i)o.event.remove(a,n+b[j],c,d,!0);o.isEmptyObject(i)&&(delete r.handle,L.remove(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,p=[d||m],q=j.call(b,"type")?b.type:b,r=j.call(b,"namespace")?b.namespace.split("."):[];if(g=h=d=d||m,3!==d.nodeType&&8!==d.nodeType&&!X.test(q+o.event.triggered)&&(q.indexOf(".")>=0&&(r=q.split("."),q=r.shift(),r.sort()),k=q.indexOf(":")<0&&"on"+q,b=b[o.expando]?b:new o.Event(q,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=r.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:o.makeArray(c,[b]),n=o.event.special[q]||{},e||!n.trigger||n.trigger.apply(d,c)!==!1)){if(!e&&!n.noBubble&&!o.isWindow(d)){for(i=n.delegateType||q,X.test(i+q)||(g=g.parentNode);g;g=g.parentNode)p.push(g),h=g;h===(d.ownerDocument||m)&&p.push(h.defaultView||h.parentWindow||a)}f=0;while((g=p[f++])&&!b.isPropagationStopped())b.type=f>1?i:n.bindType||q,l=(L.get(g,"events")||{})[b.type]&&L.get(g,"handle"),l&&l.apply(g,c),l=k&&g[k],l&&l.apply&&o.acceptData(g)&&(b.result=l.apply(g,c),b.result===!1&&b.preventDefault());return b.type=q,e||b.isDefaultPrevented()||n._default&&n._default.apply(p.pop(),c)!==!1||!o.acceptData(d)||k&&o.isFunction(d[q])&&!o.isWindow(d)&&(h=d[k],h&&(d[k]=null),o.event.triggered=q,d[q](),o.event.triggered=void 0,h&&(d[k]=h)),b.result}},dispatch:function(a){a=o.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(L.get(this,"events")||{})[a.type]||[],k=o.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=o.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(g.namespace))&&(a.handleObj=g,a.data=g.data,e=((o.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==e&&(a.result=e)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!==this;i=i.parentNode||this)if(i.disabled!==!0||"click"!==a.type){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?o(e,this).index(i)>=0:o.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button;return null==a.pageX&&null!=b.clientX&&(c=a.target.ownerDocument||m,d=c.documentElement,e=c.body,a.pageX=b.clientX+(d&&d.scrollLeft||e&&e.scrollLeft||0)-(d&&d.clientLeft||e&&e.clientLeft||0),a.pageY=b.clientY+(d&&d.scrollTop||e&&e.scrollTop||0)-(d&&d.clientTop||e&&e.clientTop||0)),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},fix:function(a){if(a[o.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=W.test(e)?this.mouseHooks:V.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new o.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=m),3===a.target.nodeType&&(a.target=a.target.parentNode),g.filter?g.filter(a,f):a},special:{load:{noBubble:!0},focus:{trigger:function(){return this!==_()&&this.focus?(this.focus(),!1):void 0},delegateType:"focusin"},blur:{trigger:function(){return this===_()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return"checkbox"===this.type&&this.click&&o.nodeName(this,"input")?(this.click(),!1):void 0},_default:function(a){return o.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=o.extend(new o.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?o.event.trigger(e,null,b):o.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},o.removeEvent=function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)},o.Event=function(a,b){return this instanceof o.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.getPreventDefault&&a.getPreventDefault()?Z:$):this.type=a,b&&o.extend(this,b),this.timeStamp=a&&a.timeStamp||o.now(),void(this[o.expando]=!0)):new o.Event(a,b)},o.Event.prototype={isDefaultPrevented:$,isPropagationStopped:$,isImmediatePropagationStopped:$,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=Z,a&&a.preventDefault&&a.preventDefault()},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=Z,a&&a.stopPropagation&&a.stopPropagation()},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=Z,this.stopPropagation()}},o.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){o.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!o.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),l.focusinBubbles||o.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){o.event.simulate(b,a.target,o.event.fix(a),!0)};o.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=L.access(d,b);e||d.addEventListener(a,c,!0),L.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=L.access(d,b)-1;e?L.access(d,b,e):(d.removeEventListener(a,c,!0),L.remove(d,b))}}}),o.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(g in a)this.on(g,b,c,a[g],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=$;else if(!d)return this;return 1===e&&(f=d,d=function(a){return o().off(a),f.apply(this,arguments)},d.guid=f.guid||(f.guid=o.guid++)),this.each(function(){o.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,o(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=$),this.each(function(){o.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){o.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?o.event.trigger(a,b,c,!0):void 0}});var ab=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,bb=/<([\w:]+)/,cb=/<|&#?\w+;/,db=/<(?:script|style|link)/i,eb=/checked\s*(?:[^=]|=\s*.checked.)/i,fb=/^$|\/(?:java|ecma)script/i,gb=/^true\/(.*)/,hb=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,ib={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};ib.optgroup=ib.option,ib.tbody=ib.tfoot=ib.colgroup=ib.caption=ib.thead,ib.th=ib.td;function jb(a,b){return o.nodeName(a,"table")&&o.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function kb(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function lb(a){var b=gb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function mb(a,b){for(var c=0,d=a.length;d>c;c++)L.set(a[c],"globalEval",!b||L.get(b[c],"globalEval"))}function nb(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(L.hasData(a)&&(f=L.access(a),g=L.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)o.event.add(b,e,j[e][c])}M.hasData(a)&&(h=M.access(a),i=o.extend({},h),M.set(b,i))}}function ob(a,b){var c=a.getElementsByTagName?a.getElementsByTagName(b||"*"):a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&o.nodeName(a,b)?o.merge([a],c):c}function pb(a,b){var c=b.nodeName.toLowerCase();"input"===c&&T.test(a.type)?b.checked=a.checked:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}o.extend({clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=o.contains(a.ownerDocument,a);if(!(l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||o.isXMLDoc(a)))for(g=ob(h),f=ob(a),d=0,e=f.length;e>d;d++)pb(f[d],g[d]);if(b)if(c)for(f=f||ob(a),g=g||ob(h),d=0,e=f.length;e>d;d++)nb(f[d],g[d]);else nb(a,h);return g=ob(h,"script"),g.length>0&&mb(g,!i&&ob(a,"script")),h},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k=b.createDocumentFragment(),l=[],m=0,n=a.length;n>m;m++)if(e=a[m],e||0===e)if("object"===o.type(e))o.merge(l,e.nodeType?[e]:e);else if(cb.test(e)){f=f||k.appendChild(b.createElement("div")),g=(bb.exec(e)||["",""])[1].toLowerCase(),h=ib[g]||ib._default,f.innerHTML=h[1]+e.replace(ab,"<$1></$2>")+h[2],j=h[0];while(j--)f=f.lastChild;o.merge(l,f.childNodes),f=k.firstChild,f.textContent=""}else l.push(b.createTextNode(e));k.textContent="",m=0;while(e=l[m++])if((!d||-1===o.inArray(e,d))&&(i=o.contains(e.ownerDocument,e),f=ob(k.appendChild(e),"script"),i&&mb(f),c)){j=0;while(e=f[j++])fb.test(e.type||"")&&c.push(e)}return k},cleanData:function(a){for(var b,c,d,e,f,g,h=o.event.special,i=0;void 0!==(c=a[i]);i++){if(o.acceptData(c)&&(f=c[L.expando],f&&(b=L.cache[f]))){if(d=Object.keys(b.events||{}),d.length)for(g=0;void 0!==(e=d[g]);g++)h[e]?o.event.remove(c,e):o.removeEvent(c,e,b.handle);L.cache[f]&&delete L.cache[f]}delete M.cache[c[M.expando]]}}}),o.fn.extend({text:function(a){return J(this,function(a){return void 0===a?o.text(this):this.empty().each(function(){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&(this.textContent=a)})},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=jb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=jb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?o.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||o.cleanData(ob(c)),c.parentNode&&(b&&o.contains(c.ownerDocument,c)&&mb(ob(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(o.cleanData(ob(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return o.clone(this,a,b)})},html:function(a){return J(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!db.test(a)&&!ib[(bb.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(ab,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(o.cleanData(ob(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,o.cleanData(ob(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,k=this.length,m=this,n=k-1,p=a[0],q=o.isFunction(p);if(q||k>1&&"string"==typeof p&&!l.checkClone&&eb.test(p))return this.each(function(c){var d=m.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(k&&(c=o.buildFragment(a,this[0].ownerDocument,!1,this),d=c.firstChild,1===c.childNodes.length&&(c=d),d)){for(f=o.map(ob(c,"script"),kb),g=f.length;k>j;j++)h=c,j!==n&&(h=o.clone(h,!0,!0),g&&o.merge(f,ob(h,"script"))),b.call(this[j],h,j);if(g)for(i=f[f.length-1].ownerDocument,o.map(f,lb),j=0;g>j;j++)h=f[j],fb.test(h.type||"")&&!L.access(h,"globalEval")&&o.contains(i,h)&&(h.src?o._evalUrl&&o._evalUrl(h.src):o.globalEval(h.textContent.replace(hb,"")))}return this}}),o.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){o.fn[a]=function(a){for(var c,d=[],e=o(a),g=e.length-1,h=0;g>=h;h++)c=h===g?this:this.clone(!0),o(e[h])[b](c),f.apply(d,c.get());return this.pushStack(d)}});var qb,rb={};function sb(b,c){var d=o(c.createElement(b)).appendTo(c.body),e=a.getDefaultComputedStyle?a.getDefaultComputedStyle(d[0]).display:o.css(d[0],"display");return d.detach(),e}function tb(a){var b=m,c=rb[a];return c||(c=sb(a,b),"none"!==c&&c||(qb=(qb||o("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=qb[0].contentDocument,b.write(),b.close(),c=sb(a,b),qb.detach()),rb[a]=c),c}var ub=/^margin/,vb=new RegExp("^("+Q+")(?!px)[a-z%]+$","i"),wb=function(a){return a.ownerDocument.defaultView.getComputedStyle(a,null)};function xb(a,b,c){var d,e,f,g,h=a.style;return c=c||wb(a),c&&(g=c.getPropertyValue(b)||c[b]),c&&(""!==g||o.contains(a.ownerDocument,a)||(g=o.style(a,b)),vb.test(g)&&ub.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0!==g?g+"":g}function yb(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d="padding:0;margin:0;border:0;display:block;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box",e=m.documentElement,f=m.createElement("div"),g=m.createElement("div");g.style.backgroundClip="content-box",g.cloneNode(!0).style.backgroundClip="",l.clearCloneStyle="content-box"===g.style.backgroundClip,f.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",f.appendChild(g);function h(){g.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%",e.appendChild(f);var d=a.getComputedStyle(g,null);b="1%"!==d.top,c="4px"===d.width,e.removeChild(f)}a.getComputedStyle&&o.extend(l,{pixelPosition:function(){return h(),b},boxSizingReliable:function(){return null==c&&h(),c},reliableMarginRight:function(){var b,c=g.appendChild(m.createElement("div"));return c.style.cssText=g.style.cssText=d,c.style.marginRight=c.style.width="0",g.style.width="1px",e.appendChild(f),b=!parseFloat(a.getComputedStyle(c,null).marginRight),e.removeChild(f),g.innerHTML="",b}})}(),o.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var zb=/^(none|table(?!-c[ea]).+)/,Ab=new RegExp("^("+Q+")(.*)$","i"),Bb=new RegExp("^([+-])=("+Q+")","i"),Cb={position:"absolute",visibility:"hidden",display:"block"},Db={letterSpacing:0,fontWeight:400},Eb=["Webkit","O","Moz","ms"];function Fb(a,b){if(b in a)return b;var c=b[0].toUpperCase()+b.slice(1),d=b,e=Eb.length;while(e--)if(b=Eb[e]+c,b in a)return b;return d}function Gb(a,b,c){var d=Ab.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Hb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=o.css(a,c+R[f],!0,e)),d?("content"===c&&(g-=o.css(a,"padding"+R[f],!0,e)),"margin"!==c&&(g-=o.css(a,"border"+R[f]+"Width",!0,e))):(g+=o.css(a,"padding"+R[f],!0,e),"padding"!==c&&(g+=o.css(a,"border"+R[f]+"Width",!0,e)));return g}function Ib(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=wb(a),g="border-box"===o.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=xb(a,b,f),(0>e||null==e)&&(e=a.style[b]),vb.test(e))return e;d=g&&(l.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Hb(a,b,c||(g?"border":"content"),d,f)+"px"}function Jb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=L.get(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&S(d)&&(f[g]=L.access(d,"olddisplay",tb(d.nodeName)))):f[g]||(e=S(d),(c&&"none"!==c||!e)&&L.set(d,"olddisplay",e?c:o.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}o.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=xb(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":"cssFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=o.camelCase(b),i=a.style;return b=o.cssProps[h]||(o.cssProps[h]=Fb(i,h)),g=o.cssHooks[b]||o.cssHooks[h],void 0===c?g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b]:(f=typeof c,"string"===f&&(e=Bb.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(o.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||o.cssNumber[h]||(c+="px"),l.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),g&&"set"in g&&void 0===(c=g.set(a,c,d))||(i[b]="",i[b]=c)),void 0)}},css:function(a,b,c,d){var e,f,g,h=o.camelCase(b);return b=o.cssProps[h]||(o.cssProps[h]=Fb(a.style,h)),g=o.cssHooks[b]||o.cssHooks[h],g&&"get"in g&&(e=g.get(a,!0,c)),void 0===e&&(e=xb(a,b,d)),"normal"===e&&b in Db&&(e=Db[b]),""===c||c?(f=parseFloat(e),c===!0||o.isNumeric(f)?f||0:e):e}}),o.each(["height","width"],function(a,b){o.cssHooks[b]={get:function(a,c,d){return c?0===a.offsetWidth&&zb.test(o.css(a,"display"))?o.swap(a,Cb,function(){return Ib(a,b,d)}):Ib(a,b,d):void 0},set:function(a,c,d){var e=d&&wb(a);return Gb(a,c,d?Hb(a,b,d,"border-box"===o.css(a,"boxSizing",!1,e),e):0)}}}),o.cssHooks.marginRight=yb(l.reliableMarginRight,function(a,b){return b?o.swap(a,{display:"inline-block"},xb,[a,"marginRight"]):void 0}),o.each({margin:"",padding:"",border:"Width"},function(a,b){o.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+R[d]+b]=f[d]||f[d-2]||f[0];return e}},ub.test(a)||(o.cssHooks[a+b].set=Gb)}),o.fn.extend({css:function(a,b){return J(this,function(a,b,c){var d,e,f={},g=0;if(o.isArray(b)){for(d=wb(a),e=b.length;e>g;g++)f[b[g]]=o.css(a,b[g],!1,d);return f}return void 0!==c?o.style(a,b,c):o.css(a,b)},a,b,arguments.length>1)},show:function(){return Jb(this,!0)},hide:function(){return Jb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){S(this)?o(this).show():o(this).hide()})}});function Kb(a,b,c,d,e){return new Kb.prototype.init(a,b,c,d,e)}o.Tween=Kb,Kb.prototype={constructor:Kb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(o.cssNumber[c]?"":"px")},cur:function(){var a=Kb.propHooks[this.prop];return a&&a.get?a.get(this):Kb.propHooks._default.get(this)},run:function(a){var b,c=Kb.propHooks[this.prop];return this.pos=b=this.options.duration?o.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Kb.propHooks._default.set(this),this}},Kb.prototype.init.prototype=Kb.prototype,Kb.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=o.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){o.fx.step[a.prop]?o.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[o.cssProps[a.prop]]||o.cssHooks[a.prop])?o.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Kb.propHooks.scrollTop=Kb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},o.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},o.fx=Kb.prototype.init,o.fx.step={};var Lb,Mb,Nb=/^(?:toggle|show|hide)$/,Ob=new RegExp("^(?:([+-])=|)("+Q+")([a-z%]*)$","i"),Pb=/queueHooks$/,Qb=[Vb],Rb={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=Ob.exec(b),f=e&&e[3]||(o.cssNumber[a]?"":"px"),g=(o.cssNumber[a]||"px"!==f&&+d)&&Ob.exec(o.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,o.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function Sb(){return setTimeout(function(){Lb=void 0}),Lb=o.now()}function Tb(a,b){var c,d=0,e={height:a};for(b=b?1:0;4>d;d+=2-b)c=R[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function Ub(a,b,c){for(var d,e=(Rb[b]||[]).concat(Rb["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function Vb(a,b,c){var d,e,f,g,h,i,j,k=this,l={},m=a.style,n=a.nodeType&&S(a),p=L.get(a,"fxshow");c.queue||(h=o._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,k.always(function(){k.always(function(){h.unqueued--,o.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[m.overflow,m.overflowX,m.overflowY],j=o.css(a,"display"),"none"===j&&(j=tb(a.nodeName)),"inline"===j&&"none"===o.css(a,"float")&&(m.display="inline-block")),c.overflow&&(m.overflow="hidden",k.always(function(){m.overflow=c.overflow[0],m.overflowX=c.overflow[1],m.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],Nb.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(n?"hide":"show")){if("show"!==e||!p||void 0===p[d])continue;n=!0}l[d]=p&&p[d]||o.style(a,d)}if(!o.isEmptyObject(l)){p?"hidden"in p&&(n=p.hidden):p=L.access(a,"fxshow",{}),f&&(p.hidden=!n),n?o(a).show():k.done(function(){o(a).hide()}),k.done(function(){var b;L.remove(a,"fxshow");for(b in l)o.style(a,b,l[b])});for(d in l)g=Ub(n?p[d]:0,d,k),d in p||(p[d]=g.start,n&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function Wb(a,b){var c,d,e,f,g;for(c in a)if(d=o.camelCase(c),e=b[d],f=a[c],o.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=o.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function Xb(a,b,c){var d,e,f=0,g=Qb.length,h=o.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=Lb||Sb(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:o.extend({},b),opts:o.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:Lb||Sb(),duration:c.duration,tweens:[],createTween:function(b,c){var d=o.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(Wb(k,j.opts.specialEasing);g>f;f++)if(d=Qb[f].call(j,a,k,j.opts))return d;return o.map(k,Ub,j),o.isFunction(j.opts.start)&&j.opts.start.call(a,j),o.fx.timer(o.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}o.Animation=o.extend(Xb,{tweener:function(a,b){o.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],Rb[c]=Rb[c]||[],Rb[c].unshift(b)},prefilter:function(a,b){b?Qb.unshift(a):Qb.push(a)}}),o.speed=function(a,b,c){var d=a&&"object"==typeof a?o.extend({},a):{complete:c||!c&&b||o.isFunction(a)&&a,duration:a,easing:c&&b||b&&!o.isFunction(b)&&b};return d.duration=o.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in o.fx.speeds?o.fx.speeds[d.duration]:o.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){o.isFunction(d.old)&&d.old.call(this),d.queue&&o.dequeue(this,d.queue)},d},o.fn.extend({fadeTo:function(a,b,c,d){return this.filter(S).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=o.isEmptyObject(a),f=o.speed(b,c,d),g=function(){var b=Xb(this,o.extend({},a),f);(e||L.get(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=o.timers,g=L.get(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&Pb.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&o.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=L.get(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=o.timers,g=d?d.length:0;for(c.finish=!0,o.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),o.each(["toggle","show","hide"],function(a,b){var c=o.fn[b];o.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(Tb(b,!0),a,d,e)}}),o.each({slideDown:Tb("show"),slideUp:Tb("hide"),slideToggle:Tb("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){o.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),o.timers=[],o.fx.tick=function(){var a,b=0,c=o.timers;for(Lb=o.now();b<c.length;b++)a=c[b],a()||c[b]!==a||c.splice(b--,1);c.length||o.fx.stop(),Lb=void 0},o.fx.timer=function(a){o.timers.push(a),a()?o.fx.start():o.timers.pop()},o.fx.interval=13,o.fx.start=function(){Mb||(Mb=setInterval(o.fx.tick,o.fx.interval))},o.fx.stop=function(){clearInterval(Mb),Mb=null},o.fx.speeds={slow:600,fast:200,_default:400},o.fn.delay=function(a,b){return a=o.fx?o.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a=m.createElement("input"),b=m.createElement("select"),c=b.appendChild(m.createElement("option"));a.type="checkbox",l.checkOn=""!==a.value,l.optSelected=c.selected,b.disabled=!0,l.optDisabled=!c.disabled,a=m.createElement("input"),a.value="t",a.type="radio",l.radioValue="t"===a.value}();var Yb,Zb,$b=o.expr.attrHandle;o.fn.extend({attr:function(a,b){return J(this,o.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){o.removeAttr(this,a)})}}),o.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===U?o.prop(a,b,c):(1===f&&o.isXMLDoc(a)||(b=b.toLowerCase(),d=o.attrHooks[b]||(o.expr.match.bool.test(b)?Zb:Yb)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=o.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void o.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=o.propFix[c]||c,o.expr.match.bool.test(c)&&(a[d]=!1),a.removeAttribute(c)},attrHooks:{type:{set:function(a,b){if(!l.radioValue&&"radio"===b&&o.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),Zb={set:function(a,b,c){return b===!1?o.removeAttr(a,c):a.setAttribute(c,c),c}},o.each(o.expr.match.bool.source.match(/\w+/g),function(a,b){var c=$b[b]||o.find.attr;$b[b]=function(a,b,d){var e,f;
-return d||(f=$b[b],$b[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,$b[b]=f),e}});var _b=/^(?:input|select|textarea|button)$/i;o.fn.extend({prop:function(a,b){return J(this,o.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[o.propFix[a]||a]})}}),o.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!o.isXMLDoc(a),f&&(b=o.propFix[b]||b,e=o.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){return a.hasAttribute("tabindex")||_b.test(a.nodeName)||a.href?a.tabIndex:-1}}}}),l.optSelected||(o.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null}}),o.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){o.propFix[this.toLowerCase()]=this});var ac=/[\t\r\n\f]/g;o.fn.extend({addClass:function(a){var b,c,d,e,f,g,h="string"==typeof a&&a,i=0,j=this.length;if(o.isFunction(a))return this.each(function(b){o(this).addClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(E)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ac," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=o.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0===arguments.length||"string"==typeof a&&a,i=0,j=this.length;if(o.isFunction(a))return this.each(function(b){o(this).removeClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(E)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ac," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?o.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(o.isFunction(a)?function(c){o(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=o(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===U||"boolean"===c)&&(this.className&&L.set(this,"__className__",this.className),this.className=this.className||a===!1?"":L.get(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(ac," ").indexOf(b)>=0)return!0;return!1}});var bc=/\r/g;o.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=o.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,o(this).val()):a,null==e?e="":"number"==typeof e?e+="":o.isArray(e)&&(e=o.map(e,function(a){return null==a?"":a+""})),b=o.valHooks[this.type]||o.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=o.valHooks[e.type]||o.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(bc,""):null==c?"":c)}}}),o.extend({valHooks:{select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(l.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&o.nodeName(c.parentNode,"optgroup"))){if(b=o(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=o.makeArray(b),g=e.length;while(g--)d=e[g],(d.selected=o.inArray(o(d).val(),f)>=0)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),o.each(["radio","checkbox"],function(){o.valHooks[this]={set:function(a,b){return o.isArray(b)?a.checked=o.inArray(o(a).val(),b)>=0:void 0}},l.checkOn||(o.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})}),o.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){o.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),o.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var cc=o.now(),dc=/\?/;o.parseJSON=function(a){return JSON.parse(a+"")},o.parseXML=function(a){var b,c;if(!a||"string"!=typeof a)return null;try{c=new DOMParser,b=c.parseFromString(a,"text/xml")}catch(d){b=void 0}return(!b||b.getElementsByTagName("parsererror").length)&&o.error("Invalid XML: "+a),b};var ec,fc,gc=/#.*$/,hc=/([?&])_=[^&]*/,ic=/^(.*?):[ \t]*([^\r\n]*)$/gm,jc=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,kc=/^(?:GET|HEAD)$/,lc=/^\/\//,mc=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,nc={},oc={},pc="*/".concat("*");try{fc=location.href}catch(qc){fc=m.createElement("a"),fc.href="",fc=fc.href}ec=mc.exec(fc.toLowerCase())||[];function rc(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(o.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function sc(a,b,c,d){var e={},f=a===oc;function g(h){var i;return e[h]=!0,o.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function tc(a,b){var c,d,e=o.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&o.extend(!0,a,d),a}function uc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function vc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}o.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:fc,type:"GET",isLocal:jc.test(ec[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":pc,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":o.parseJSON,"text xml":o.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?tc(tc(a,o.ajaxSettings),b):tc(o.ajaxSettings,a)},ajaxPrefilter:rc(nc),ajaxTransport:rc(oc),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=o.ajaxSetup({},b),l=k.context||k,m=k.context&&(l.nodeType||l.jquery)?o(l):o.event,n=o.Deferred(),p=o.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!f){f={};while(b=ic.exec(e))f[b[1].toLowerCase()]=b[2]}b=f[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?e:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return c&&c.abort(b),x(0,b),this}};if(n.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||fc)+"").replace(gc,"").replace(lc,ec[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=o.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(h=mc.exec(k.url.toLowerCase()),k.crossDomain=!(!h||h[1]===ec[1]&&h[2]===ec[2]&&(h[3]||("http:"===h[1]?"80":"443"))===(ec[3]||("http:"===ec[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=o.param(k.data,k.traditional)),sc(nc,k,b,v),2===t)return v;i=k.global,i&&0===o.active++&&o.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!kc.test(k.type),d=k.url,k.hasContent||(k.data&&(d=k.url+=(dc.test(d)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=hc.test(d)?d.replace(hc,"$1_="+cc++):d+(dc.test(d)?"&":"?")+"_="+cc++)),k.ifModified&&(o.lastModified[d]&&v.setRequestHeader("If-Modified-Since",o.lastModified[d]),o.etag[d]&&v.setRequestHeader("If-None-Match",o.etag[d])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+pc+"; q=0.01":""):k.accepts["*"]);for(j in k.headers)v.setRequestHeader(j,k.headers[j]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(j in{success:1,error:1,complete:1})v[j](k[j]);if(c=sc(oc,k,b,v)){v.readyState=1,i&&m.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,c.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,f,h){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),c=void 0,e=h||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,f&&(u=uc(k,v,f)),u=vc(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(o.lastModified[d]=w),w=v.getResponseHeader("etag"),w&&(o.etag[d]=w)),204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?n.resolveWith(l,[r,x,v]):n.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,i&&m.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),i&&(m.trigger("ajaxComplete",[v,k]),--o.active||o.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return o.get(a,b,c,"json")},getScript:function(a,b){return o.get(a,void 0,b,"script")}}),o.each(["get","post"],function(a,b){o[b]=function(a,c,d,e){return o.isFunction(c)&&(e=e||d,d=c,c=void 0),o.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),o.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){o.fn[b]=function(a){return this.on(b,a)}}),o._evalUrl=function(a){return o.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},o.fn.extend({wrapAll:function(a){var b;return o.isFunction(a)?this.each(function(b){o(this).wrapAll(a.call(this,b))}):(this[0]&&(b=o(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this)},wrapInner:function(a){return this.each(o.isFunction(a)?function(b){o(this).wrapInner(a.call(this,b))}:function(){var b=o(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=o.isFunction(a);return this.each(function(c){o(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){o.nodeName(this,"body")||o(this).replaceWith(this.childNodes)}).end()}}),o.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0},o.expr.filters.visible=function(a){return!o.expr.filters.hidden(a)};var wc=/%20/g,xc=/\[\]$/,yc=/\r?\n/g,zc=/^(?:submit|button|image|reset|file)$/i,Ac=/^(?:input|select|textarea|keygen)/i;function Bc(a,b,c,d){var e;if(o.isArray(b))o.each(b,function(b,e){c||xc.test(a)?d(a,e):Bc(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==o.type(b))d(a,b);else for(e in b)Bc(a+"["+e+"]",b[e],c,d)}o.param=function(a,b){var c,d=[],e=function(a,b){b=o.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=o.ajaxSettings&&o.ajaxSettings.traditional),o.isArray(a)||a.jquery&&!o.isPlainObject(a))o.each(a,function(){e(this.name,this.value)});else for(c in a)Bc(c,a[c],b,e);return d.join("&").replace(wc,"+")},o.fn.extend({serialize:function(){return o.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=o.prop(this,"elements");return a?o.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!o(this).is(":disabled")&&Ac.test(this.nodeName)&&!zc.test(a)&&(this.checked||!T.test(a))}).map(function(a,b){var c=o(this).val();return null==c?null:o.isArray(c)?o.map(c,function(a){return{name:b.name,value:a.replace(yc,"\r\n")}}):{name:b.name,value:c.replace(yc,"\r\n")}}).get()}}),o.ajaxSettings.xhr=function(){try{return new XMLHttpRequest}catch(a){}};var Cc=0,Dc={},Ec={0:200,1223:204},Fc=o.ajaxSettings.xhr();a.ActiveXObject&&o(a).on("unload",function(){for(var a in Dc)Dc[a]()}),l.cors=!!Fc&&"withCredentials"in Fc,l.ajax=Fc=!!Fc,o.ajaxTransport(function(a){var b;return l.cors||Fc&&!a.crossDomain?{send:function(c,d){var e,f=a.xhr(),g=++Cc;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)f.setRequestHeader(e,c[e]);b=function(a){return function(){b&&(delete Dc[g],b=f.onload=f.onerror=null,"abort"===a?f.abort():"error"===a?d(f.status,f.statusText):d(Ec[f.status]||f.status,f.statusText,"string"==typeof f.responseText?{text:f.responseText}:void 0,f.getAllResponseHeaders()))}},f.onload=b(),f.onerror=b("error"),b=Dc[g]=b("abort"),f.send(a.hasContent&&a.data||null)},abort:function(){b&&b()}}:void 0}),o.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return o.globalEval(a),a}}}),o.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),o.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(d,e){b=o("<script>").prop({async:!0,charset:a.scriptCharset,src:a.url}).on("load error",c=function(a){b.remove(),c=null,a&&e("error"===a.type?404:200,a.type)}),m.head.appendChild(b[0])},abort:function(){c&&c()}}}});var Gc=[],Hc=/(=)\?(?=&|$)|\?\?/;o.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=Gc.pop()||o.expando+"_"+cc++;return this[a]=!0,a}}),o.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(Hc.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&Hc.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=o.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(Hc,"$1"+e):b.jsonp!==!1&&(b.url+=(dc.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||o.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,Gc.push(e)),g&&o.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),o.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||m;var d=v.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=o.buildFragment([a],b,e),e&&e.length&&o(e).remove(),o.merge([],d.childNodes))};var Ic=o.fn.load;o.fn.load=function(a,b,c){if("string"!=typeof a&&Ic)return Ic.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=a.slice(h),a=a.slice(0,h)),o.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(e="POST"),g.length>0&&o.ajax({url:a,type:e,dataType:"html",data:b}).done(function(a){f=arguments,g.html(d?o("<div>").append(o.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,f||[a.responseText,b,a])}),this},o.expr.filters.animated=function(a){return o.grep(o.timers,function(b){return a===b.elem}).length};var Jc=a.document.documentElement;function Kc(a){return o.isWindow(a)?a:9===a.nodeType&&a.defaultView}o.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=o.css(a,"position"),l=o(a),m={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=o.css(a,"top"),i=o.css(a,"left"),j=("absolute"===k||"fixed"===k)&&(f+i).indexOf("auto")>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),o.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),"using"in b?b.using.call(a,m):l.css(m)}},o.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){o.offset.setOffset(this,a,b)});var b,c,d=this[0],e={top:0,left:0},f=d&&d.ownerDocument;if(f)return b=f.documentElement,o.contains(b,d)?(typeof d.getBoundingClientRect!==U&&(e=d.getBoundingClientRect()),c=Kc(f),{top:e.top+c.pageYOffset-b.clientTop,left:e.left+c.pageXOffset-b.clientLeft}):e},position:function(){if(this[0]){var a,b,c=this[0],d={top:0,left:0};return"fixed"===o.css(c,"position")?b=c.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),o.nodeName(a[0],"html")||(d=a.offset()),d.top+=o.css(a[0],"borderTopWidth",!0),d.left+=o.css(a[0],"borderLeftWidth",!0)),{top:b.top-d.top-o.css(c,"marginTop",!0),left:b.left-d.left-o.css(c,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||Jc;while(a&&!o.nodeName(a,"html")&&"static"===o.css(a,"position"))a=a.offsetParent;return a||Jc})}}),o.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(b,c){var d="pageYOffset"===c;o.fn[b]=function(e){return J(this,function(b,e,f){var g=Kc(b);return void 0===f?g?g[c]:b[e]:void(g?g.scrollTo(d?a.pageXOffset:f,d?f:a.pageYOffset):b[e]=f)},b,e,arguments.length,null)}}),o.each(["top","left"],function(a,b){o.cssHooks[b]=yb(l.pixelPosition,function(a,c){return c?(c=xb(a,b),vb.test(c)?o(a).position()[b]+"px":c):void 0})}),o.each({Height:"height",Width:"width"},function(a,b){o.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){o.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return J(this,function(b,c,d){var e;return o.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?o.css(b,c,g):o.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),o.fn.size=function(){return this.length},o.fn.andSelf=o.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return o});var Lc=a.jQuery,Mc=a.$;return o.noConflict=function(b){return a.$===o&&(a.$=Mc),b&&a.jQuery===o&&(a.jQuery=Lc),o},typeof b===U&&(a.jQuery=a.$=o),o});
--- /dev/null
+/*! jQuery v2.1.4 | (c) 2005, 2015 jQuery Foundation, Inc. | jquery.org/license */
+!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l=a.document,m="2.1.4",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return n.each(this,a,b)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){return!n.isArray(a)&&a-parseFloat(a)+1>=0},isPlainObject:function(a){return"object"!==n.type(a)||a.nodeType||n.isWindow(a)?!1:a.constructor&&!j.call(a.constructor.prototype,"isPrototypeOf")?!1:!0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf("use strict")?(b=l.createElement("script"),b.text=a,l.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=s(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:g.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=s(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(e=d.call(arguments,2),f=function(){return a.apply(b||this,e.concat(d.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:k}),n.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function s(a){var b="length"in a&&a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N=M.replace("w","w#"),O="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+N+"))|)"+L+"*\\]",P=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+O+")*)|.*)\\)|)",Q=new RegExp(L+"+","g"),R=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),S=new RegExp("^"+L+"*,"+L+"*"),T=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),U=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),V=new RegExp(P),W=new RegExp("^"+N+"$"),X={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M.replace("w","w*")+")"),ATTR:new RegExp("^"+O),PSEUDO:new RegExp("^"+P),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,aa=/[+~]/,ba=/'|\\/g,ca=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),da=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ea=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(fa){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],k=b.nodeType,"string"!=typeof a||!a||1!==k&&9!==k&&11!==k)return d;if(!e&&p){if(11!==k&&(f=_.exec(a)))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return H.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName)return H.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=1!==k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(ba,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+ra(o[l]);w=aa.test(a)&&pa(b.parentNode)||b,x=o.join(",")}if(x)try{return H.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function oa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function pa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=g.documentElement,e=g.defaultView,e&&e!==e.top&&(e.addEventListener?e.addEventListener("unload",ea,!1):e.attachEvent&&e.attachEvent("onunload",ea)),p=!f(g),c.attributes=ja(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ja(function(a){return a.appendChild(g.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(g.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!g.getElementsByName||!g.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ca,da);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ca,da);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(g.querySelectorAll))&&(ja(function(a){o.appendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\f]' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ja(function(a){var b=g.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",P)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===g||a.ownerDocument===v&&t(v,a)?-1:b===g||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,h=[a],i=[b];if(!e||!f)return a===g?-1:b===g?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?la(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},g):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=ga.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ca,da),a[3]=(a[3]||a[4]||a[5]||"").replace(ca,da),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ca,da).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(Q," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(ca,da),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return W.test(a||"")||ga.error("unsupported lang: "+a),a=a.replace(ca,da).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:oa(function(){return[0]}),last:oa(function(a,b){return[b-1]}),eq:oa(function(a,b,c){return[0>c?c+b:c]}),even:oa(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:oa(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:oa(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:oa(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=ma(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=na(b);function qa(){}qa.prototype=d.filters=d.pseudos,d.setFilters=new qa,g=ga.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=S.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=T.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(R," ")}),h=h.slice(c.length));for(g in d.filter)!(e=X[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?ga.error(a):z(a,i).slice(0)};function ra(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function sa(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function ta(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ua(a,b,c){for(var d=0,e=b.length;e>d;d++)ga(a,b[d],c);return c}function va(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function wa(a,b,c,d,e,f){return d&&!d[u]&&(d=wa(d)),e&&!e[u]&&(e=wa(e,f)),ia(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ua(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:va(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=va(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=va(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function xa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=sa(function(a){return a===b},h,!0),l=sa(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[sa(ta(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return wa(i>1&&ta(m),i>1&&ra(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&xa(a.slice(i,e)),f>e&&xa(a=a.slice(e)),f>e&&ra(a))}m.push(c)}return ta(m)}function ya(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=F.call(i));s=va(s)}H.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&ga.uniqueSort(i)}return k&&(w=v,j=t),r};return c?ia(f):f}return h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=xa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,ya(e,d)),f.selector=a}return f},i=ga.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ca,da),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ca,da),aa.test(j[0].type)&&pa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&ra(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,aa.test(a)&&pa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ja(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||ka("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ka("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ja(function(a){return null==a.getAttribute("disabled")})||ka(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),ga}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=n.expr.match.needsContext,v=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,w=/^.[^:#\[\.,]*$/;function x(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(w.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return g.call(b,a)>=0!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(x(this,a||[],!1))},not:function(a){return this.pushStack(x(this,a||[],!0))},is:function(a){return!!x(this,"string"==typeof a&&u.test(a)?n(a):a||[],!1).length}});var y,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=n.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||y).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:l,!0)),v.test(c[1])&&n.isPlainObject(b))for(c in b)n.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}return d=l.getElementById(c[2]),d&&d.parentNode&&(this.length=1,this[0]=d),this.context=l,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?"undefined"!=typeof y.ready?y.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};A.prototype=n.fn,y=n(l);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};n.extend({dir:function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=u.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.unique(f):f)},index:function(a){return a?"string"==typeof a?g.call(n(a),this[0]):g.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.unique(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return n.dir(a,"parentNode")},parentsUntil:function(a,b,c){return n.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return n.dir(a,"nextSibling")},prevAll:function(a){return n.dir(a,"previousSibling")},nextUntil:function(a,b,c){return n.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return n.dir(a,"previousSibling",c)},siblings:function(a){return n.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return n.sibling(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(C[a]||n.unique(e),B.test(a)&&e.reverse()),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return n.each(a.match(E)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):n.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(b=a.memory&&l,c=!0,g=e||0,e=0,f=h.length,d=!0;h&&f>g;g++)if(h[g].apply(l[0],l[1])===!1&&a.stopOnFalse){b=!1;break}d=!1,h&&(i?i.length&&j(i.shift()):b?h=[]:k.disable())},k={add:function(){if(h){var c=h.length;!function g(b){n.each(b,function(b,c){var d=n.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&g(c)})}(arguments),d?f=h.length:b&&(e=c,j(b))}return this},remove:function(){return h&&n.each(arguments,function(a,b){var c;while((c=n.inArray(b,h,c))>-1)h.splice(c,1),d&&(f>=c&&f--,g>=c&&g--)}),this},has:function(a){return a?n.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],f=0,this},disable:function(){return h=i=b=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,b||k.disable(),this},locked:function(){return!i},fireWith:function(a,b){return!h||c&&!i||(b=b||[],b=[a,b.slice?b.slice():b],d?i.push(b):j(b)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!c}};return k},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&n.isFunction(a.promise)?e:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(H.resolveWith(l,[n]),n.fn.triggerHandler&&(n(l).triggerHandler("ready"),n(l).off("ready"))))}});function I(){l.removeEventListener("DOMContentLoaded",I,!1),a.removeEventListener("load",I,!1),n.ready()}n.ready.promise=function(b){return H||(H=n.Deferred(),"complete"===l.readyState?setTimeout(n.ready):(l.addEventListener("DOMContentLoaded",I,!1),a.addEventListener("load",I,!1))),H.promise(b)},n.ready.promise();var J=n.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)n.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f};n.acceptData=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function K(){Object.defineProperty(this.cache={},0,{get:function(){return{}}}),this.expando=n.expando+K.uid++}K.uid=1,K.accepts=n.acceptData,K.prototype={key:function(a){if(!K.accepts(a))return 0;var b={},c=a[this.expando];if(!c){c=K.uid++;try{b[this.expando]={value:c},Object.defineProperties(a,b)}catch(d){b[this.expando]=c,n.extend(a,b)}}return this.cache[c]||(this.cache[c]={}),c},set:function(a,b,c){var d,e=this.key(a),f=this.cache[e];if("string"==typeof b)f[b]=c;else if(n.isEmptyObject(f))n.extend(this.cache[e],b);else for(d in b)f[d]=b[d];return f},get:function(a,b){var c=this.cache[this.key(a)];return void 0===b?c:c[b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=this.key(a),g=this.cache[f];if(void 0===b)this.cache[f]={};else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in g?d=[b,e]:(d=e,d=d in g?[d]:d.match(E)||[])),c=d.length;while(c--)delete g[d[c]]}},hasData:function(a){return!n.isEmptyObject(this.cache[a[this.expando]]||{})},discard:function(a){a[this.expando]&&delete this.cache[a[this.expando]]}};var L=new K,M=new K,N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(O,"-$1").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}M.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return M.hasData(a)||L.hasData(a)},data:function(a,b,c){
+return M.access(a,b,c)},removeData:function(a,b){M.remove(a,b)},_data:function(a,b,c){return L.access(a,b,c)},_removeData:function(a,b){L.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=M.get(f),1===f.nodeType&&!L.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d])));L.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){M.set(this,a)}):J(this,function(b){var c,d=n.camelCase(a);if(f&&void 0===b){if(c=M.get(f,a),void 0!==c)return c;if(c=M.get(f,d),void 0!==c)return c;if(c=P(f,d,void 0),void 0!==c)return c}else this.each(function(){var c=M.get(this,d);M.set(this,d,b),-1!==a.indexOf("-")&&void 0!==c&&M.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){M.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=L.get(a,b),c&&(!d||n.isArray(c)?d=L.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return L.get(a,c)||L.access(a,c,{empty:n.Callbacks("once memory").add(function(){L.remove(a,[b+"queue",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?n.queue(this[0],a):void 0===b?this:this.each(function(){var c=n.queue(this,a,b);n._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&n.dequeue(this,a)})},dequeue:function(a){return this.each(function(){n.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=n.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=L.get(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var Q=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,R=["Top","Right","Bottom","Left"],S=function(a,b){return a=b||a,"none"===n.css(a,"display")||!n.contains(a.ownerDocument,a)},T=/^(?:checkbox|radio)$/i;!function(){var a=l.createDocumentFragment(),b=a.appendChild(l.createElement("div")),c=l.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="<textarea>x</textarea>",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var U="undefined";k.focusinBubbles="onfocusin"in a;var V=/^key/,W=/^(?:mouse|pointer|contextmenu)|click/,X=/^(?:focusinfocus|focusoutblur)$/,Y=/^([^.]*)(?:\.(.+)|)$/;function Z(){return!0}function $(){return!1}function _(){try{return l.activeElement}catch(a){}}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return typeof n!==U&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(E)||[""],j=b.length;while(j--)h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g,!1)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=L.hasData(a)&&L.get(a);if(r&&(i=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=Y.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&(delete r.handle,L.remove(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,m,o,p=[d||l],q=j.call(b,"type")?b.type:b,r=j.call(b,"namespace")?b.namespace.split("."):[];if(g=h=d=d||l,3!==d.nodeType&&8!==d.nodeType&&!X.test(q+n.event.triggered)&&(q.indexOf(".")>=0&&(r=q.split("."),q=r.shift(),r.sort()),k=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=r.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:n.makeArray(c,[b]),o=n.event.special[q]||{},e||!o.trigger||o.trigger.apply(d,c)!==!1)){if(!e&&!o.noBubble&&!n.isWindow(d)){for(i=o.delegateType||q,X.test(i+q)||(g=g.parentNode);g;g=g.parentNode)p.push(g),h=g;h===(d.ownerDocument||l)&&p.push(h.defaultView||h.parentWindow||a)}f=0;while((g=p[f++])&&!b.isPropagationStopped())b.type=f>1?i:o.bindType||q,m=(L.get(g,"events")||{})[b.type]&&L.get(g,"handle"),m&&m.apply(g,c),m=k&&g[k],m&&m.apply&&n.acceptData(g)&&(b.result=m.apply(g,c),b.result===!1&&b.preventDefault());return b.type=q,e||b.isDefaultPrevented()||o._default&&o._default.apply(p.pop(),c)!==!1||!n.acceptData(d)||k&&n.isFunction(d[q])&&!n.isWindow(d)&&(h=d[k],h&&(d[k]=null),n.event.triggered=q,d[q](),n.event.triggered=void 0,h&&(d[k]=h)),b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(L.get(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(g.namespace))&&(a.handleObj=g,a.data=g.data,e=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==e&&(a.result=e)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!==this;i=i.parentNode||this)if(i.disabled!==!0||"click"!==a.type){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>=0:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button;return null==a.pageX&&null!=b.clientX&&(c=a.target.ownerDocument||l,d=c.documentElement,e=c.body,a.pageX=b.clientX+(d&&d.scrollLeft||e&&e.scrollLeft||0)-(d&&d.clientLeft||e&&e.clientLeft||0),a.pageY=b.clientY+(d&&d.scrollTop||e&&e.scrollTop||0)-(d&&d.clientTop||e&&e.clientTop||0)),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},fix:function(a){if(a[n.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=W.test(e)?this.mouseHooks:V.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new n.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=l),3===a.target.nodeType&&(a.target=a.target.parentNode),g.filter?g.filter(a,f):a},special:{load:{noBubble:!0},focus:{trigger:function(){return this!==_()&&this.focus?(this.focus(),!1):void 0},delegateType:"focusin"},blur:{trigger:function(){return this===_()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return"checkbox"===this.type&&this.click&&n.nodeName(this,"input")?(this.click(),!1):void 0},_default:function(a){return n.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=n.extend(new n.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?n.event.trigger(e,null,b):n.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},n.removeEvent=function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)},n.Event=function(a,b){return this instanceof n.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?Z:$):this.type=a,b&&n.extend(this,b),this.timeStamp=a&&a.timeStamp||n.now(),void(this[n.expando]=!0)):new n.Event(a,b)},n.Event.prototype={isDefaultPrevented:$,isPropagationStopped:$,isImmediatePropagationStopped:$,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=Z,a&&a.preventDefault&&a.preventDefault()},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=Z,a&&a.stopPropagation&&a.stopPropagation()},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=Z,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},n.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){n.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!n.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),k.focusinBubbles||n.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){n.event.simulate(b,a.target,n.event.fix(a),!0)};n.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=L.access(d,b);e||d.addEventListener(a,c,!0),L.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=L.access(d,b)-1;e?L.access(d,b,e):(d.removeEventListener(a,c,!0),L.remove(d,b))}}}),n.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(g in a)this.on(g,b,c,a[g],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=$;else if(!d)return this;return 1===e&&(f=d,d=function(a){return n().off(a),f.apply(this,arguments)},d.guid=f.guid||(f.guid=n.guid++)),this.each(function(){n.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,n(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=$),this.each(function(){n.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){n.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?n.event.trigger(a,b,c,!0):void 0}});var aa=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,ba=/<([\w:]+)/,ca=/<|&#?\w+;/,da=/<(?:script|style|link)/i,ea=/checked\s*(?:[^=]|=\s*.checked.)/i,fa=/^$|\/(?:java|ecma)script/i,ga=/^true\/(.*)/,ha=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,ia={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};ia.optgroup=ia.option,ia.tbody=ia.tfoot=ia.colgroup=ia.caption=ia.thead,ia.th=ia.td;function ja(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function ka(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function la(a){var b=ga.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function ma(a,b){for(var c=0,d=a.length;d>c;c++)L.set(a[c],"globalEval",!b||L.get(b[c],"globalEval"))}function na(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(L.hasData(a)&&(f=L.access(a),g=L.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}M.hasData(a)&&(h=M.access(a),i=n.extend({},h),M.set(b,i))}}function oa(a,b){var c=a.getElementsByTagName?a.getElementsByTagName(b||"*"):a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function pa(a,b){var c=b.nodeName.toLowerCase();"input"===c&&T.test(a.type)?b.checked=a.checked:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}n.extend({clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=oa(h),f=oa(a),d=0,e=f.length;e>d;d++)pa(f[d],g[d]);if(b)if(c)for(f=f||oa(a),g=g||oa(h),d=0,e=f.length;e>d;d++)na(f[d],g[d]);else na(a,h);return g=oa(h,"script"),g.length>0&&ma(g,!i&&oa(a,"script")),h},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k=b.createDocumentFragment(),l=[],m=0,o=a.length;o>m;m++)if(e=a[m],e||0===e)if("object"===n.type(e))n.merge(l,e.nodeType?[e]:e);else if(ca.test(e)){f=f||k.appendChild(b.createElement("div")),g=(ba.exec(e)||["",""])[1].toLowerCase(),h=ia[g]||ia._default,f.innerHTML=h[1]+e.replace(aa,"<$1></$2>")+h[2],j=h[0];while(j--)f=f.lastChild;n.merge(l,f.childNodes),f=k.firstChild,f.textContent=""}else l.push(b.createTextNode(e));k.textContent="",m=0;while(e=l[m++])if((!d||-1===n.inArray(e,d))&&(i=n.contains(e.ownerDocument,e),f=oa(k.appendChild(e),"script"),i&&ma(f),c)){j=0;while(e=f[j++])fa.test(e.type||"")&&c.push(e)}return k},cleanData:function(a){for(var b,c,d,e,f=n.event.special,g=0;void 0!==(c=a[g]);g++){if(n.acceptData(c)&&(e=c[L.expando],e&&(b=L.cache[e]))){if(b.events)for(d in b.events)f[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);L.cache[e]&&delete L.cache[e]}delete M.cache[c[M.expando]]}}}),n.fn.extend({text:function(a){return J(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&(this.textContent=a)})},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=ja(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=ja(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?n.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||n.cleanData(oa(c)),c.parentNode&&(b&&n.contains(c.ownerDocument,c)&&ma(oa(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(oa(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return J(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!da.test(a)&&!ia[(ba.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(aa,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(oa(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,n.cleanData(oa(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,m=this,o=l-1,p=a[0],q=n.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&ea.test(p))return this.each(function(c){var d=m.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(c=n.buildFragment(a,this[0].ownerDocument,!1,this),d=c.firstChild,1===c.childNodes.length&&(c=d),d)){for(f=n.map(oa(c,"script"),ka),g=f.length;l>j;j++)h=c,j!==o&&(h=n.clone(h,!0,!0),g&&n.merge(f,oa(h,"script"))),b.call(this[j],h,j);if(g)for(i=f[f.length-1].ownerDocument,n.map(f,la),j=0;g>j;j++)h=f[j],fa.test(h.type||"")&&!L.access(h,"globalEval")&&n.contains(i,h)&&(h.src?n._evalUrl&&n._evalUrl(h.src):n.globalEval(h.textContent.replace(ha,"")))}return this}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),g=e.length-1,h=0;g>=h;h++)c=h===g?this:this.clone(!0),n(e[h])[b](c),f.apply(d,c.get());return this.pushStack(d)}});var qa,ra={};function sa(b,c){var d,e=n(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:n.css(e[0],"display");return e.detach(),f}function ta(a){var b=l,c=ra[a];return c||(c=sa(a,b),"none"!==c&&c||(qa=(qa||n("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=qa[0].contentDocument,b.write(),b.close(),c=sa(a,b),qa.detach()),ra[a]=c),c}var ua=/^margin/,va=new RegExp("^("+Q+")(?!px)[a-z%]+$","i"),wa=function(b){return b.ownerDocument.defaultView.opener?b.ownerDocument.defaultView.getComputedStyle(b,null):a.getComputedStyle(b,null)};function xa(a,b,c){var d,e,f,g,h=a.style;return c=c||wa(a),c&&(g=c.getPropertyValue(b)||c[b]),c&&(""!==g||n.contains(a.ownerDocument,a)||(g=n.style(a,b)),va.test(g)&&ua.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0!==g?g+"":g}function ya(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d=l.documentElement,e=l.createElement("div"),f=l.createElement("div");if(f.style){f.style.backgroundClip="content-box",f.cloneNode(!0).style.backgroundClip="",k.clearCloneStyle="content-box"===f.style.backgroundClip,e.style.cssText="border:0;width:0;height:0;top:0;left:-9999px;margin-top:1px;position:absolute",e.appendChild(f);function g(){f.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute",f.innerHTML="",d.appendChild(e);var g=a.getComputedStyle(f,null);b="1%"!==g.top,c="4px"===g.width,d.removeChild(e)}a.getComputedStyle&&n.extend(k,{pixelPosition:function(){return g(),b},boxSizingReliable:function(){return null==c&&g(),c},reliableMarginRight:function(){var b,c=f.appendChild(l.createElement("div"));return c.style.cssText=f.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",c.style.marginRight=c.style.width="0",f.style.width="1px",d.appendChild(e),b=!parseFloat(a.getComputedStyle(c,null).marginRight),d.removeChild(e),f.removeChild(c),b}})}}(),n.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var za=/^(none|table(?!-c[ea]).+)/,Aa=new RegExp("^("+Q+")(.*)$","i"),Ba=new RegExp("^([+-])=("+Q+")","i"),Ca={position:"absolute",visibility:"hidden",display:"block"},Da={letterSpacing:"0",fontWeight:"400"},Ea=["Webkit","O","Moz","ms"];function Fa(a,b){if(b in a)return b;var c=b[0].toUpperCase()+b.slice(1),d=b,e=Ea.length;while(e--)if(b=Ea[e]+c,b in a)return b;return d}function Ga(a,b,c){var d=Aa.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Ha(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=n.css(a,c+R[f],!0,e)),d?("content"===c&&(g-=n.css(a,"padding"+R[f],!0,e)),"margin"!==c&&(g-=n.css(a,"border"+R[f]+"Width",!0,e))):(g+=n.css(a,"padding"+R[f],!0,e),"padding"!==c&&(g+=n.css(a,"border"+R[f]+"Width",!0,e)));return g}function Ia(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=wa(a),g="border-box"===n.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=xa(a,b,f),(0>e||null==e)&&(e=a.style[b]),va.test(e))return e;d=g&&(k.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Ha(a,b,c||(g?"border":"content"),d,f)+"px"}function Ja(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=L.get(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&S(d)&&(f[g]=L.access(d,"olddisplay",ta(d.nodeName)))):(e=S(d),"none"===c&&e||L.set(d,"olddisplay",e?c:n.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}n.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=xa(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":"cssFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=n.camelCase(b),i=a.style;return b=n.cssProps[h]||(n.cssProps[h]=Fa(i,h)),g=n.cssHooks[b]||n.cssHooks[h],void 0===c?g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b]:(f=typeof c,"string"===f&&(e=Ba.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(n.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||n.cssNumber[h]||(c+="px"),k.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),g&&"set"in g&&void 0===(c=g.set(a,c,d))||(i[b]=c)),void 0)}},css:function(a,b,c,d){var e,f,g,h=n.camelCase(b);return b=n.cssProps[h]||(n.cssProps[h]=Fa(a.style,h)),g=n.cssHooks[b]||n.cssHooks[h],g&&"get"in g&&(e=g.get(a,!0,c)),void 0===e&&(e=xa(a,b,d)),"normal"===e&&b in Da&&(e=Da[b]),""===c||c?(f=parseFloat(e),c===!0||n.isNumeric(f)?f||0:e):e}}),n.each(["height","width"],function(a,b){n.cssHooks[b]={get:function(a,c,d){return c?za.test(n.css(a,"display"))&&0===a.offsetWidth?n.swap(a,Ca,function(){return Ia(a,b,d)}):Ia(a,b,d):void 0},set:function(a,c,d){var e=d&&wa(a);return Ga(a,c,d?Ha(a,b,d,"border-box"===n.css(a,"boxSizing",!1,e),e):0)}}}),n.cssHooks.marginRight=ya(k.reliableMarginRight,function(a,b){return b?n.swap(a,{display:"inline-block"},xa,[a,"marginRight"]):void 0}),n.each({margin:"",padding:"",border:"Width"},function(a,b){n.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+R[d]+b]=f[d]||f[d-2]||f[0];return e}},ua.test(a)||(n.cssHooks[a+b].set=Ga)}),n.fn.extend({css:function(a,b){return J(this,function(a,b,c){var d,e,f={},g=0;if(n.isArray(b)){for(d=wa(a),e=b.length;e>g;g++)f[b[g]]=n.css(a,b[g],!1,d);return f}return void 0!==c?n.style(a,b,c):n.css(a,b)},a,b,arguments.length>1)},show:function(){return Ja(this,!0)},hide:function(){return Ja(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){S(this)?n(this).show():n(this).hide()})}});function Ka(a,b,c,d,e){return new Ka.prototype.init(a,b,c,d,e)}n.Tween=Ka,Ka.prototype={constructor:Ka,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(n.cssNumber[c]?"":"px")},cur:function(){var a=Ka.propHooks[this.prop];return a&&a.get?a.get(this):Ka.propHooks._default.get(this)},run:function(a){var b,c=Ka.propHooks[this.prop];return this.options.duration?this.pos=b=n.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Ka.propHooks._default.set(this),this}},Ka.prototype.init.prototype=Ka.prototype,Ka.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=n.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){n.fx.step[a.prop]?n.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[n.cssProps[a.prop]]||n.cssHooks[a.prop])?n.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Ka.propHooks.scrollTop=Ka.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},n.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},n.fx=Ka.prototype.init,n.fx.step={};var La,Ma,Na=/^(?:toggle|show|hide)$/,Oa=new RegExp("^(?:([+-])=|)("+Q+")([a-z%]*)$","i"),Pa=/queueHooks$/,Qa=[Va],Ra={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=Oa.exec(b),f=e&&e[3]||(n.cssNumber[a]?"":"px"),g=(n.cssNumber[a]||"px"!==f&&+d)&&Oa.exec(n.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,n.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function Sa(){return setTimeout(function(){La=void 0}),La=n.now()}function Ta(a,b){var c,d=0,e={height:a};for(b=b?1:0;4>d;d+=2-b)c=R[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function Ua(a,b,c){for(var d,e=(Ra[b]||[]).concat(Ra["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function Va(a,b,c){var d,e,f,g,h,i,j,k,l=this,m={},o=a.style,p=a.nodeType&&S(a),q=L.get(a,"fxshow");c.queue||(h=n._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,l.always(function(){l.always(function(){h.unqueued--,n.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[o.overflow,o.overflowX,o.overflowY],j=n.css(a,"display"),k="none"===j?L.get(a,"olddisplay")||ta(a.nodeName):j,"inline"===k&&"none"===n.css(a,"float")&&(o.display="inline-block")),c.overflow&&(o.overflow="hidden",l.always(function(){o.overflow=c.overflow[0],o.overflowX=c.overflow[1],o.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],Na.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(p?"hide":"show")){if("show"!==e||!q||void 0===q[d])continue;p=!0}m[d]=q&&q[d]||n.style(a,d)}else j=void 0;if(n.isEmptyObject(m))"inline"===("none"===j?ta(a.nodeName):j)&&(o.display=j);else{q?"hidden"in q&&(p=q.hidden):q=L.access(a,"fxshow",{}),f&&(q.hidden=!p),p?n(a).show():l.done(function(){n(a).hide()}),l.done(function(){var b;L.remove(a,"fxshow");for(b in m)n.style(a,b,m[b])});for(d in m)g=Ua(p?q[d]:0,d,l),d in q||(q[d]=g.start,p&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function Wa(a,b){var c,d,e,f,g;for(c in a)if(d=n.camelCase(c),e=b[d],f=a[c],n.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=n.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function Xa(a,b,c){var d,e,f=0,g=Qa.length,h=n.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=La||Sa(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:n.extend({},b),opts:n.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:La||Sa(),duration:c.duration,tweens:[],createTween:function(b,c){var d=n.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(Wa(k,j.opts.specialEasing);g>f;f++)if(d=Qa[f].call(j,a,k,j.opts))return d;return n.map(k,Ua,j),n.isFunction(j.opts.start)&&j.opts.start.call(a,j),n.fx.timer(n.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}n.Animation=n.extend(Xa,{tweener:function(a,b){n.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],Ra[c]=Ra[c]||[],Ra[c].unshift(b)},prefilter:function(a,b){b?Qa.unshift(a):Qa.push(a)}}),n.speed=function(a,b,c){var d=a&&"object"==typeof a?n.extend({},a):{complete:c||!c&&b||n.isFunction(a)&&a,duration:a,easing:c&&b||b&&!n.isFunction(b)&&b};return d.duration=n.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in n.fx.speeds?n.fx.speeds[d.duration]:n.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){n.isFunction(d.old)&&d.old.call(this),d.queue&&n.dequeue(this,d.queue)},d},n.fn.extend({fadeTo:function(a,b,c,d){return this.filter(S).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=n.isEmptyObject(a),f=n.speed(b,c,d),g=function(){var b=Xa(this,n.extend({},a),f);(e||L.get(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=n.timers,g=L.get(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&Pa.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&n.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=L.get(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=n.timers,g=d?d.length:0;for(c.finish=!0,n.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),n.each(["toggle","show","hide"],function(a,b){var c=n.fn[b];n.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(Ta(b,!0),a,d,e)}}),n.each({slideDown:Ta("show"),slideUp:Ta("hide"),slideToggle:Ta("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){n.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),n.timers=[],n.fx.tick=function(){var a,b=0,c=n.timers;for(La=n.now();b<c.length;b++)a=c[b],a()||c[b]!==a||c.splice(b--,1);c.length||n.fx.stop(),La=void 0},n.fx.timer=function(a){n.timers.push(a),a()?n.fx.start():n.timers.pop()},n.fx.interval=13,n.fx.start=function(){Ma||(Ma=setInterval(n.fx.tick,n.fx.interval))},n.fx.stop=function(){clearInterval(Ma),Ma=null},n.fx.speeds={slow:600,fast:200,_default:400},n.fn.delay=function(a,b){return a=n.fx?n.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a=l.createElement("input"),b=l.createElement("select"),c=b.appendChild(l.createElement("option"));a.type="checkbox",k.checkOn=""!==a.value,k.optSelected=c.selected,b.disabled=!0,k.optDisabled=!c.disabled,a=l.createElement("input"),a.value="t",a.type="radio",k.radioValue="t"===a.value}();var Ya,Za,$a=n.expr.attrHandle;n.fn.extend({attr:function(a,b){return J(this,n.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){n.removeAttr(this,a)})}}),n.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===U?n.prop(a,b,c):(1===f&&n.isXMLDoc(a)||(b=b.toLowerCase(),d=n.attrHooks[b]||(n.expr.match.bool.test(b)?Za:Ya)),
+void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=n.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void n.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=n.propFix[c]||c,n.expr.match.bool.test(c)&&(a[d]=!1),a.removeAttribute(c)},attrHooks:{type:{set:function(a,b){if(!k.radioValue&&"radio"===b&&n.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),Za={set:function(a,b,c){return b===!1?n.removeAttr(a,c):a.setAttribute(c,c),c}},n.each(n.expr.match.bool.source.match(/\w+/g),function(a,b){var c=$a[b]||n.find.attr;$a[b]=function(a,b,d){var e,f;return d||(f=$a[b],$a[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,$a[b]=f),e}});var _a=/^(?:input|select|textarea|button)$/i;n.fn.extend({prop:function(a,b){return J(this,n.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[n.propFix[a]||a]})}}),n.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!n.isXMLDoc(a),f&&(b=n.propFix[b]||b,e=n.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){return a.hasAttribute("tabindex")||_a.test(a.nodeName)||a.href?a.tabIndex:-1}}}}),k.optSelected||(n.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null}}),n.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){n.propFix[this.toLowerCase()]=this});var ab=/[\t\r\n\f]/g;n.fn.extend({addClass:function(a){var b,c,d,e,f,g,h="string"==typeof a&&a,i=0,j=this.length;if(n.isFunction(a))return this.each(function(b){n(this).addClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(E)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ab," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=n.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0===arguments.length||"string"==typeof a&&a,i=0,j=this.length;if(n.isFunction(a))return this.each(function(b){n(this).removeClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(E)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ab," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?n.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(n.isFunction(a)?function(c){n(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=n(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===U||"boolean"===c)&&(this.className&&L.set(this,"__className__",this.className),this.className=this.className||a===!1?"":L.get(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(ab," ").indexOf(b)>=0)return!0;return!1}});var bb=/\r/g;n.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=n.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,n(this).val()):a,null==e?e="":"number"==typeof e?e+="":n.isArray(e)&&(e=n.map(e,function(a){return null==a?"":a+""})),b=n.valHooks[this.type]||n.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=n.valHooks[e.type]||n.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(bb,""):null==c?"":c)}}}),n.extend({valHooks:{option:{get:function(a){var b=n.find.attr(a,"value");return null!=b?b:n.trim(n.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(k.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&n.nodeName(c.parentNode,"optgroup"))){if(b=n(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=n.makeArray(b),g=e.length;while(g--)d=e[g],(d.selected=n.inArray(d.value,f)>=0)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),n.each(["radio","checkbox"],function(){n.valHooks[this]={set:function(a,b){return n.isArray(b)?a.checked=n.inArray(n(a).val(),b)>=0:void 0}},k.checkOn||(n.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})}),n.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){n.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),n.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var cb=n.now(),db=/\?/;n.parseJSON=function(a){return JSON.parse(a+"")},n.parseXML=function(a){var b,c;if(!a||"string"!=typeof a)return null;try{c=new DOMParser,b=c.parseFromString(a,"text/xml")}catch(d){b=void 0}return(!b||b.getElementsByTagName("parsererror").length)&&n.error("Invalid XML: "+a),b};var eb=/#.*$/,fb=/([?&])_=[^&]*/,gb=/^(.*?):[ \t]*([^\r\n]*)$/gm,hb=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,ib=/^(?:GET|HEAD)$/,jb=/^\/\//,kb=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,lb={},mb={},nb="*/".concat("*"),ob=a.location.href,pb=kb.exec(ob.toLowerCase())||[];function qb(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(n.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function rb(a,b,c,d){var e={},f=a===mb;function g(h){var i;return e[h]=!0,n.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function sb(a,b){var c,d,e=n.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&n.extend(!0,a,d),a}function tb(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function ub(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}n.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:ob,type:"GET",isLocal:hb.test(pb[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":nb,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":n.parseJSON,"text xml":n.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?sb(sb(a,n.ajaxSettings),b):sb(n.ajaxSettings,a)},ajaxPrefilter:qb(lb),ajaxTransport:qb(mb),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=n.ajaxSetup({},b),l=k.context||k,m=k.context&&(l.nodeType||l.jquery)?n(l):n.event,o=n.Deferred(),p=n.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!f){f={};while(b=gb.exec(e))f[b[1].toLowerCase()]=b[2]}b=f[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?e:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return c&&c.abort(b),x(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||ob)+"").replace(eb,"").replace(jb,pb[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=n.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(h=kb.exec(k.url.toLowerCase()),k.crossDomain=!(!h||h[1]===pb[1]&&h[2]===pb[2]&&(h[3]||("http:"===h[1]?"80":"443"))===(pb[3]||("http:"===pb[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=n.param(k.data,k.traditional)),rb(lb,k,b,v),2===t)return v;i=n.event&&k.global,i&&0===n.active++&&n.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!ib.test(k.type),d=k.url,k.hasContent||(k.data&&(d=k.url+=(db.test(d)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=fb.test(d)?d.replace(fb,"$1_="+cb++):d+(db.test(d)?"&":"?")+"_="+cb++)),k.ifModified&&(n.lastModified[d]&&v.setRequestHeader("If-Modified-Since",n.lastModified[d]),n.etag[d]&&v.setRequestHeader("If-None-Match",n.etag[d])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+nb+"; q=0.01":""):k.accepts["*"]);for(j in k.headers)v.setRequestHeader(j,k.headers[j]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(j in{success:1,error:1,complete:1})v[j](k[j]);if(c=rb(mb,k,b,v)){v.readyState=1,i&&m.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,c.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,f,h){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),c=void 0,e=h||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,f&&(u=tb(k,v,f)),u=ub(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(n.lastModified[d]=w),w=v.getResponseHeader("etag"),w&&(n.etag[d]=w)),204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?o.resolveWith(l,[r,x,v]):o.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,i&&m.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),i&&(m.trigger("ajaxComplete",[v,k]),--n.active||n.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return n.get(a,b,c,"json")},getScript:function(a,b){return n.get(a,void 0,b,"script")}}),n.each(["get","post"],function(a,b){n[b]=function(a,c,d,e){return n.isFunction(c)&&(e=e||d,d=c,c=void 0),n.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),n._evalUrl=function(a){return n.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},n.fn.extend({wrapAll:function(a){var b;return n.isFunction(a)?this.each(function(b){n(this).wrapAll(a.call(this,b))}):(this[0]&&(b=n(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this)},wrapInner:function(a){return this.each(n.isFunction(a)?function(b){n(this).wrapInner(a.call(this,b))}:function(){var b=n(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=n.isFunction(a);return this.each(function(c){n(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){n.nodeName(this,"body")||n(this).replaceWith(this.childNodes)}).end()}}),n.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0},n.expr.filters.visible=function(a){return!n.expr.filters.hidden(a)};var vb=/%20/g,wb=/\[\]$/,xb=/\r?\n/g,yb=/^(?:submit|button|image|reset|file)$/i,zb=/^(?:input|select|textarea|keygen)/i;function Ab(a,b,c,d){var e;if(n.isArray(b))n.each(b,function(b,e){c||wb.test(a)?d(a,e):Ab(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==n.type(b))d(a,b);else for(e in b)Ab(a+"["+e+"]",b[e],c,d)}n.param=function(a,b){var c,d=[],e=function(a,b){b=n.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=n.ajaxSettings&&n.ajaxSettings.traditional),n.isArray(a)||a.jquery&&!n.isPlainObject(a))n.each(a,function(){e(this.name,this.value)});else for(c in a)Ab(c,a[c],b,e);return d.join("&").replace(vb,"+")},n.fn.extend({serialize:function(){return n.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=n.prop(this,"elements");return a?n.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!n(this).is(":disabled")&&zb.test(this.nodeName)&&!yb.test(a)&&(this.checked||!T.test(a))}).map(function(a,b){var c=n(this).val();return null==c?null:n.isArray(c)?n.map(c,function(a){return{name:b.name,value:a.replace(xb,"\r\n")}}):{name:b.name,value:c.replace(xb,"\r\n")}}).get()}}),n.ajaxSettings.xhr=function(){try{return new XMLHttpRequest}catch(a){}};var Bb=0,Cb={},Db={0:200,1223:204},Eb=n.ajaxSettings.xhr();a.attachEvent&&a.attachEvent("onunload",function(){for(var a in Cb)Cb[a]()}),k.cors=!!Eb&&"withCredentials"in Eb,k.ajax=Eb=!!Eb,n.ajaxTransport(function(a){var b;return k.cors||Eb&&!a.crossDomain?{send:function(c,d){var e,f=a.xhr(),g=++Bb;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)f.setRequestHeader(e,c[e]);b=function(a){return function(){b&&(delete Cb[g],b=f.onload=f.onerror=null,"abort"===a?f.abort():"error"===a?d(f.status,f.statusText):d(Db[f.status]||f.status,f.statusText,"string"==typeof f.responseText?{text:f.responseText}:void 0,f.getAllResponseHeaders()))}},f.onload=b(),f.onerror=b("error"),b=Cb[g]=b("abort");try{f.send(a.hasContent&&a.data||null)}catch(h){if(b)throw h}},abort:function(){b&&b()}}:void 0}),n.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return n.globalEval(a),a}}}),n.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),n.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(d,e){b=n("<script>").prop({async:!0,charset:a.scriptCharset,src:a.url}).on("load error",c=function(a){b.remove(),c=null,a&&e("error"===a.type?404:200,a.type)}),l.head.appendChild(b[0])},abort:function(){c&&c()}}}});var Fb=[],Gb=/(=)\?(?=&|$)|\?\?/;n.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=Fb.pop()||n.expando+"_"+cb++;return this[a]=!0,a}}),n.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(Gb.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&Gb.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=n.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(Gb,"$1"+e):b.jsonp!==!1&&(b.url+=(db.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||n.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,Fb.push(e)),g&&n.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),n.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||l;var d=v.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=n.buildFragment([a],b,e),e&&e.length&&n(e).remove(),n.merge([],d.childNodes))};var Hb=n.fn.load;n.fn.load=function(a,b,c){if("string"!=typeof a&&Hb)return Hb.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=n.trim(a.slice(h)),a=a.slice(0,h)),n.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(e="POST"),g.length>0&&n.ajax({url:a,type:e,dataType:"html",data:b}).done(function(a){f=arguments,g.html(d?n("<div>").append(n.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,f||[a.responseText,b,a])}),this},n.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){n.fn[b]=function(a){return this.on(b,a)}}),n.expr.filters.animated=function(a){return n.grep(n.timers,function(b){return a===b.elem}).length};var Ib=a.document.documentElement;function Jb(a){return n.isWindow(a)?a:9===a.nodeType&&a.defaultView}n.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=n.css(a,"position"),l=n(a),m={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=n.css(a,"top"),i=n.css(a,"left"),j=("absolute"===k||"fixed"===k)&&(f+i).indexOf("auto")>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),n.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),"using"in b?b.using.call(a,m):l.css(m)}},n.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){n.offset.setOffset(this,a,b)});var b,c,d=this[0],e={top:0,left:0},f=d&&d.ownerDocument;if(f)return b=f.documentElement,n.contains(b,d)?(typeof d.getBoundingClientRect!==U&&(e=d.getBoundingClientRect()),c=Jb(f),{top:e.top+c.pageYOffset-b.clientTop,left:e.left+c.pageXOffset-b.clientLeft}):e},position:function(){if(this[0]){var a,b,c=this[0],d={top:0,left:0};return"fixed"===n.css(c,"position")?b=c.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),n.nodeName(a[0],"html")||(d=a.offset()),d.top+=n.css(a[0],"borderTopWidth",!0),d.left+=n.css(a[0],"borderLeftWidth",!0)),{top:b.top-d.top-n.css(c,"marginTop",!0),left:b.left-d.left-n.css(c,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||Ib;while(a&&!n.nodeName(a,"html")&&"static"===n.css(a,"position"))a=a.offsetParent;return a||Ib})}}),n.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(b,c){var d="pageYOffset"===c;n.fn[b]=function(e){return J(this,function(b,e,f){var g=Jb(b);return void 0===f?g?g[c]:b[e]:void(g?g.scrollTo(d?a.pageXOffset:f,d?f:a.pageYOffset):b[e]=f)},b,e,arguments.length,null)}}),n.each(["top","left"],function(a,b){n.cssHooks[b]=ya(k.pixelPosition,function(a,c){return c?(c=xa(a,b),va.test(c)?n(a).position()[b]+"px":c):void 0})}),n.each({Height:"height",Width:"width"},function(a,b){n.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){n.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return J(this,function(b,c,d){var e;return n.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?n.css(b,c,g):n.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),n.fn.size=function(){return this.length},n.fn.andSelf=n.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return n});var Kb=a.jQuery,Lb=a.$;return n.noConflict=function(b){return a.$===n&&(a.$=Lb),b&&a.jQuery===n&&(a.jQuery=Kb),n},typeof b===U&&(a.jQuery=a.$=n),n});
border-radius: 3px;
padding: 0 0.2em;
}
+.docblock pre code {
+ padding: 0;
+}
pre {
background-color: #F5F5F5;
padding: 14px;
/* Everything else */
-.js-only, .hidden { display: none; }
+.js-only, .hidden { display: none !important; }
.sidebar {
padding: 10px;
tr.result span.primitive::after { content: ' (primitive type)'; font-style: italic; }
+body.blur > :not(#help) {
+ filter: blur(8px);
+ -webkit-filter: blur(8px);
+ opacity: .7;
+}
+
#help {
+ width: 100%;
+ height: 100vh;
+ position: fixed;
+ top: 0;
+ left: 0;
+ display: flex;
+ justify-content: center;
+ align-items: center;
+}
+#help > div {
+ flex: 0 0 auto;
background: #e9e9e9;
- border-radius: 4px;
box-shadow: 0 0 6px rgba(0,0,0,.2);
- position: absolute;
- top: 300px;
- left: 50%;
- margin-top: -125px;
- margin-left: -275px;
width: 550px;
height: 300px;
border: 1px solid #bfbfbf;
}
-
#help dt {
float: left;
- border-radius: 3px;
+ border-radius: 4px;
border: 1px solid #bfbfbf;
background: #fff;
width: 23px;
#help dd { margin: 5px 33px; }
#help .infos { padding-left: 0; }
#help h1 { margin-top: 0; }
-#help div {
+#help > div div {
width: 50%;
float: left;
padding: 20px;
highlightSourceLines(null);
$(window).on('hashchange', highlightSourceLines);
- $(document).on('keyup', function handleKeyboardShortcut(e) {
- if (document.activeElement.tagName === 'INPUT') {
+ // Gets the human-readable string for the virtual-key code of the
+ // given KeyboardEvent, ev.
+ //
+ // This function is meant as a polyfill for KeyboardEvent#key,
+ // since it is not supported in Trident. We also test for
+ // KeyboardEvent#keyCode because the handleShortcut handler is
+ // also registered for the keydown event, because Blink doesn't fire
+ // keypress on hitting the Escape key.
+ //
+ // So I guess you could say things are getting pretty interoperable.
+ function getVirtualKey(ev) {
+ if ("key" in ev && typeof ev.key != "undefined")
+ return ev.key;
+
+ var c = ev.charCode || ev.keyCode;
+ if (c == 27)
+ return "Escape";
+ return String.fromCharCode(c);
+ }
+
+ function handleShortcut(ev) {
+ if (document.activeElement.tagName == "INPUT")
return;
- }
- if (e.which === 191) { // question mark
- if (e.shiftKey && $('#help').hasClass('hidden')) {
- e.preventDefault();
- $('#help').removeClass('hidden');
+ switch (getVirtualKey(ev)) {
+ case "Escape":
+ if (!$("#help").hasClass("hidden")) {
+ ev.preventDefault();
+ $("#help").addClass("hidden");
+ $("body").removeClass("blur");
+ } else if (!$("#search").hasClass("hidden")) {
+ ev.preventDefault();
+ $("#search").addClass("hidden");
+ $("#main").removeClass("hidden");
}
- } else if (e.which === 27) { // esc
- if (!$('#help').hasClass('hidden')) {
- e.preventDefault();
- $('#help').addClass('hidden');
- } else if (!$('#search').hasClass('hidden')) {
- e.preventDefault();
- $('#search').addClass('hidden');
- $('#main').removeClass('hidden');
+ break;
+
+ case "s":
+ case "S":
+ ev.preventDefault();
+ focusSearchBar();
+ break;
+
+ case "?":
+ if (ev.shiftKey && $("#help").hasClass("hidden")) {
+ ev.preventDefault();
+ $("#help").removeClass("hidden");
+ $("body").addClass("blur");
}
- } else if (e.which === 83) { // S
- e.preventDefault();
- $('.search-input').focus();
+ break;
}
- }).on('click', function(e) {
- if (!$(e.target).closest('#help').length) {
- $('#help').addClass('hidden');
+ }
+
+ $(document).on("keypress", handleShortcut);
+ $(document).on("keydown", handleShortcut);
+ $(document).on("click", function(ev) {
+ if (!$(e.target).closest("#help > div").length) {
+ $("#help").addClass("hidden");
+ $("body").removeClass("blur");
}
});
document.location.href = url;
});
+
/**
* A function to compute the Levenshtein distance between two strings
* Licensed under the Creative Commons Attribution-ShareAlike 3.0 Unported
// Push and pop states are used to add search results to the browser
// history.
if (browserSupportsHistoryApi()) {
+ // Store the previous <title> so we can revert back to it later.
+ var previousTitle = $(document).prop("title");
+
$(window).on('popstate', function(e) {
var params = getQueryStringParams();
// When browsing back from search results the main page
$('#main.content').removeClass('hidden');
$('#search.content').addClass('hidden');
}
+ // Revert to the previous title manually since the History
+ // API ignores the title parameter.
+ $(document).prop("title", previousTitle);
// When browsing forward to search results the previous
// search will be repeated, so the currentResults are
// cleared to ensure the search is successful.
}());
}());
+
+// Sets the focus on the search bar at the top of the page
+function focusSearchBar() {
+ $('.search-input').focus();
+}
return;
}
- var elements = document.querySelectorAll('pre.rust');
+ var featureRegexp = new RegExp('^\s*#!\\[feature\\(\.*?\\)\\]');
+ var elements = document.querySelectorAll('pre.rust-example-rendered');
Array.prototype.forEach.call(elements, function(el) {
el.onmouseover = function(e) {
a.setAttribute('class', 'test-arrow');
var code = el.previousElementSibling.textContent;
+
+ var channel = '';
+ if (featureRegexp.test(code)) {
+ channel = '&version=nightly';
+ }
+
a.setAttribute('href', window.playgroundUrl + '?code=' +
- encodeURIComponent(code));
+ encodeURIComponent(code) + channel);
a.setAttribute('target', '_blank');
el.appendChild(a);
#![feature(box_syntax)]
#![feature(dynamic_lib)]
#![feature(libc)]
-#![feature(owned_ascii_ext)]
#![feature(path_ext)]
#![feature(path_relative_from)]
#![feature(rustc_private)]
#![feature(set_stdio)]
-#![feature(slice_extras)]
#![feature(slice_patterns)]
#![feature(staged_api)]
-#![feature(subslice_offset)]
#![feature(test)]
#![feature(unicode)]
#![feature(vec_push_all)]
let res = std::thread::Builder::new().stack_size(STACK_SIZE).spawn(move || {
let s = env::args().collect::<Vec<_>>();
main_args(&s)
- }).unwrap().join().unwrap();
+ }).unwrap().join().unwrap_or(101);
process::exit(res as i32);
}
}
pub fn main_args(args: &[String]) -> isize {
- let matches = match getopts::getopts(args.tail(), &opts()) {
+ let matches = match getopts::getopts(&args[1..], &opts()) {
Ok(m) => m,
Err(err) => {
println!("{}", err);
!matches.opt_present("markdown-no-toc")),
(false, false) => {}
}
-
let out = match acquire_input(input, externs, &matches) {
Ok(out) => out,
Err(s) => {
info!("starting to run rustc");
let (tx, rx) = channel();
- std::thread::spawn(move || {
+ rustc_driver::monitor(move || {
use rustc::session::config::Input;
tx.send(core::run_core(paths, cfgs, externs, Input::File(cr),
triple)).unwrap();
- }).join().map_err(|_| "rustc failed").unwrap();
+ });
let (mut krate, analysis) = rx.recv().unwrap();
info!("finished with rustc");
let mut analysis = Some(analysis);
/// Separate any lines at the start of the file that begin with `%`.
fn extract_leading_metadata<'a>(s: &'a str) -> (Vec<&'a str>, &'a str) {
let mut metadata = Vec::new();
+ let mut count = 0;
for line in s.lines() {
if line.starts_with("%") {
// remove %<whitespace>
- metadata.push(line[1..].trim_left())
+ metadata.push(line[1..].trim_left());
+ count += line.len() + 1;
} else {
- let line_start_byte = s.subslice_offset(line);
- return (metadata, &s[line_start_byte..]);
+ return (metadata, &s[count..]);
}
}
// if we're here, then all lines were metadata % lines.
if !lines.is_empty() {
let mut unindented = vec![ lines[0].trim().to_string() ];
- unindented.push_all(&lines.tail().iter().map(|&line| {
+ unindented.push_all(&lines[1..].iter().map(|&line| {
if line.chars().all(|c| c.is_whitespace()) {
line.to_string()
} else {
line[min_indent..].to_string()
}
}).collect::<Vec<_>>());
- unindented.connect("\n")
+ unindented.join("\n")
} else {
s.to_string()
}
let s = self.current_header.as_ref().map(|s| &**s).unwrap_or("");
format!("{}_{}", s, self.cnt)
} else {
- format!("{}_{}", self.names.connect("::"), self.cnt)
+ format!("{}_{}", self.names.join("::"), self.cnt)
};
self.cnt += 1;
let libs = self.libs.clone();
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// VecMap
+#![allow(deprecated)]
+
//! Implementations of serialization for structures found in libcollections
use std::usize;
/// # Examples
///
/// ```
- /// # #![feature(rustc_private)]
+ /// #![feature(rustc_private)]
+ ///
/// extern crate serialize;
/// use serialize::hex::ToHex;
///
/// This converts a string literal to hexadecimal and back.
///
/// ```
- /// # #![feature(rustc_private)]
+ /// #![feature(rustc_private)]
+ ///
/// extern crate serialize;
/// use serialize::hex::{FromHex, ToHex};
///
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The fixed-size array type (`[T; n]`).
-
-#![doc(primitive = "array")]
/// Extension methods for ASCII-subset only operations on owned strings
#[unstable(feature = "owned_ascii_ext",
reason = "would prefer to do this in a more general way")]
+#[deprecated(since = "1.3.0",
+ reason = "hasn't yet proved essential to be in the standard library")]
+#[allow(deprecated)]
pub trait OwnedAsciiExt {
/// Converts the string to ASCII upper case:
/// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
/// # Examples
///
/// ```
- /// # #![feature(ascii)]
+ /// #![feature(ascii)]
+ ///
/// use std::ascii::AsciiExt;
///
/// let mut ascii = 'a';
/// # Examples
///
/// ```
- /// # #![feature(ascii)]
+ /// #![feature(ascii)]
+ ///
/// use std::ascii::AsciiExt;
///
/// let mut ascii = 'A';
}
#[inline]
+ #[allow(deprecated)]
fn to_ascii_uppercase(&self) -> String {
self.to_string().into_ascii_uppercase()
}
#[inline]
+ #[allow(deprecated)]
fn to_ascii_lowercase(&self) -> String {
self.to_string().into_ascii_lowercase()
}
}
}
+#[allow(deprecated)]
impl OwnedAsciiExt for String {
#[inline]
fn into_ascii_uppercase(self) -> String {
}
#[inline]
+ #[allow(deprecated)]
fn to_ascii_uppercase(&self) -> Vec<u8> {
self.to_vec().into_ascii_uppercase()
}
#[inline]
+ #[allow(deprecated)]
fn to_ascii_lowercase(&self) -> Vec<u8> {
self.to_vec().into_ascii_lowercase()
}
}
}
+#[allow(deprecated)]
impl OwnedAsciiExt for Vec<u8> {
#[inline]
fn into_ascii_uppercase(mut self) -> Vec<u8> {
use char::from_u32;
#[test]
- fn test_ascii() {
- assert!("banana".chars().all(|c| c.is_ascii()));
- assert!(!"ประเทศไทย中华Việt Nam".chars().all(|c| c.is_ascii()));
- }
+ fn test_is_ascii() {
+ assert!(b"".is_ascii());
+ assert!(b"banana\0\x7F".is_ascii());
+ assert!(b"banana\0\x7F".iter().all(|b| b.is_ascii()));
+ assert!(!b"Vi\xe1\xbb\x87t Nam".is_ascii());
+ assert!(!b"Vi\xe1\xbb\x87t Nam".iter().all(|b| b.is_ascii()));
+ assert!(!b"\xe1\xbb\x87".iter().any(|b| b.is_ascii()));
- #[test]
- fn test_ascii_vec() {
assert!("".is_ascii());
- assert!("a".is_ascii());
- assert!(!"\u{2009}".is_ascii());
+ assert!("banana\0\u{7F}".is_ascii());
+ assert!("banana\0\u{7F}".chars().all(|c| c.is_ascii()));
+ assert!(!"ประเทศไทย中华Việt Nam".chars().all(|c| c.is_ascii()));
+ assert!(!"ประเทศไทย中华ệ ".chars().any(|c| c.is_ascii()));
}
#[test]
}
}
+ #[test]
+ fn test_make_ascii_lower_case() {
+ macro_rules! test {
+ ($from: expr, $to: expr) => {
+ {
+ let mut x = $from;
+ x.make_ascii_lowercase();
+ assert_eq!(x, $to);
+ }
+ }
+ }
+ test!(b'A', b'a');
+ test!(b'a', b'a');
+ test!(b'!', b'!');
+ test!('A', 'a');
+ test!('À', 'À');
+ test!('a', 'a');
+ test!('!', '!');
+ test!(b"H\xc3\x89".to_vec(), b"h\xc3\x89");
+ test!("HİKß".to_string(), "hİKß");
+ }
+
+
+ #[test]
+ fn test_make_ascii_upper_case() {
+ macro_rules! test {
+ ($from: expr, $to: expr) => {
+ {
+ let mut x = $from;
+ x.make_ascii_uppercase();
+ assert_eq!(x, $to);
+ }
+ }
+ }
+ test!(b'a', b'A');
+ test!(b'A', b'A');
+ test!(b'!', b'!');
+ test!('a', 'A');
+ test!('à', 'à');
+ test!('A', 'A');
+ test!('!', '!');
+ test!(b"h\xc3\xa9".to_vec(), b"H\xc3\xa9");
+ test!("hıKß".to_string(), "HıKß");
+
+ let mut x = "Hello".to_string();
+ x[..3].make_ascii_uppercase(); // Test IndexMut on String.
+ assert_eq!(x, "HELlo")
+ }
+
#[test]
fn test_eq_ignore_ascii_case() {
assert!("url()URL()uRl()Ürl".eq_ignore_ascii_case("url()url()url()Ürl"));
+++ /dev/null
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The boolean type
-
-#![doc(primitive = "bool")]
-#![stable(feature = "rust1", since = "1.0.0")]
/// # Examples
///
/// ```
- /// # #![feature(hashmap_hasher)]
+ /// #![feature(hashmap_hasher)]
+ ///
/// use std::collections::HashMap;
/// use std::collections::hash_map::RandomState;
///
/// # Examples
///
/// ```
- /// # #![feature(hashmap_hasher)]
+ /// #![feature(hashmap_hasher)]
+ ///
/// use std::collections::HashMap;
/// use std::collections::hash_map::RandomState;
///
/// # Examples
///
/// ```
- /// # #![feature(drain)]
+ /// #![feature(drain)]
+ ///
/// use std::collections::HashMap;
///
/// let mut a = HashMap::new();
use super::HashMap;
use super::Entry::{Occupied, Vacant};
- use iter::{range_inclusive, repeat};
+ use iter::range_inclusive;
use cell::RefCell;
use rand::{thread_rng, Rng};
#[test]
fn test_drops() {
DROP_VECTOR.with(|slot| {
- *slot.borrow_mut() = repeat(0).take(200).collect();
+ *slot.borrow_mut() = vec![0; 200];
});
{
#[test]
fn test_move_iter_drops() {
DROP_VECTOR.with(|v| {
- *v.borrow_mut() = repeat(0).take(200).collect();
+ *v.borrow_mut() = vec![0; 200];
});
let hm = {
/// # Examples
///
/// ```
- /// # #![feature(hashmap_hasher)]
+ /// #![feature(hashmap_hasher)]
+ ///
/// use std::collections::HashSet;
/// use std::collections::hash_map::RandomState;
///
/// # Examples
///
/// ```
- /// # #![feature(hashmap_hasher)]
+ /// #![feature(hashmap_hasher)]
+ ///
/// use std::collections::HashSet;
/// use std::collections::hash_map::RandomState;
///
/// A structure which is a factory for instances of `Hasher` which implement the
/// default trait.
///
-/// This struct has is 0-sized and does not need construction.
+/// This struct is 0-sized and does not need construction.
pub struct DefaultState<H>(marker::PhantomData<H>);
impl<H: Default + hash::Hasher> HashState for DefaultState<H> {
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
fn test_errors_do_not_crash() {
// Open /dev/null as a library to get an error, and make sure
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
mod dl {
use prelude::v1::*;
use sys::os;
use os::windows::prelude::*;
use ptr;
- use sys::c::compat::kernel32::SetThreadErrorMode;
+ use sys::c::SetThreadErrorMode;
pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> {
// disable "dll load failed" error dialog.
//! Inspection and manipulation of the process's environment.
//!
//! This module contains methods to inspect various aspects such as
-//! environment varibles, process arguments, the current directory, and various
+//! environment variables, process arguments, the current directory, and various
//! other important directories.
#![stable(feature = "env", since = "1.0.0")]
///
/// * Current directory does not exist.
/// * There are insufficient permissions to access the current directory.
-/// * The internal buffer is not large enough to hold the path.
///
/// # Examples
///
/// Returns the page size of the current architecture in bytes.
#[unstable(feature = "page_size", reason = "naming and/or location may change")]
+#[deprecated(since = "1.3.0",
+ reason = "hasn't seen enough usage to justify inclusion")]
pub fn page_size() -> usize {
os_imp::page_size()
}
/// - freebsd
/// - dragonfly
/// - bitrig
+ /// - netbsd
/// - openbsd
/// - android
/// - windows
pub const EXE_EXTENSION: &'static str = "";
}
+#[cfg(target_os = "netbsd")]
+mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "netbsd";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
#[cfg(target_os = "openbsd")]
mod os {
pub const FAMILY: &'static str = "unix";
// copied from any.rs
impl Error + 'static {
/// Returns true if the boxed type is the same as `T`
- #[unstable(feature = "error_downcast", reason = "recently added")]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
#[inline]
pub fn is<T: Error + 'static>(&self) -> bool {
// Get TypeId of the type this function is instantiated with
/// Returns some reference to the boxed value if it is of type `T`, or
/// `None` if it isn't.
- #[unstable(feature = "error_downcast", reason = "recently added")]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
#[inline]
pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
if self.is::<T>() {
/// Returns some mutable reference to the boxed value if it is of type `T`, or
/// `None` if it isn't.
- #[unstable(feature = "error_downcast", reason = "recently added")]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
#[inline]
pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
if self.is::<T>() {
impl Error + 'static + Send {
/// Forwards to the method defined on the type `Any`.
- #[unstable(feature = "error_downcast", reason = "recently added")]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
#[inline]
pub fn is<T: Error + 'static>(&self) -> bool {
<Error + 'static>::is::<T>(self)
}
/// Forwards to the method defined on the type `Any`.
- #[unstable(feature = "error_downcast", reason = "recently added")]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
#[inline]
pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
<Error + 'static>::downcast_ref::<T>(self)
}
/// Forwards to the method defined on the type `Any`.
- #[unstable(feature = "error_downcast", reason = "recently added")]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
+ <Error + 'static>::downcast_mut::<T>(self)
+ }
+}
+
+impl Error + 'static + Send + Sync {
+ /// Forwards to the method defined on the type `Any`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn is<T: Error + 'static>(&self) -> bool {
+ <Error + 'static>::is::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `Any`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
+ <Error + 'static>::downcast_ref::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `Any`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
#[inline]
pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
<Error + 'static>::downcast_mut::<T>(self)
impl Error {
#[inline]
- #[unstable(feature = "error_downcast", reason = "recently added")]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
/// Attempt to downcast the box to a concrete type.
pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Error>> {
if self.is::<T>() {
impl Error + Send {
#[inline]
- #[unstable(feature = "error_downcast", reason = "recently added")]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
/// Attempt to downcast the box to a concrete type.
- pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Error + Send>> {
+ pub fn downcast<T: Error + 'static>(self: Box<Self>)
+ -> Result<Box<T>, Box<Error + Send>> {
let err: Box<Error> = self;
<Error>::downcast(err).map_err(|s| unsafe {
// reapply the Send marker
})
}
}
+
+impl Error + Send + Sync {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ /// Attempt to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>)
+ -> Result<Box<T>, Box<Self>> {
+ let err: Box<Error> = self;
+ <Error>::downcast(err).map_err(|s| unsafe {
+ // reapply the Send+Sync marker
+ transmute::<Box<Error>, Box<Error + Send + Sync>>(s)
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use prelude::v1::*;
+ use super::Error;
+ use fmt;
+
+ #[derive(Debug, PartialEq)]
+ struct A;
+ #[derive(Debug, PartialEq)]
+ struct B;
+
+ impl fmt::Display for A {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "A")
+ }
+ }
+ impl fmt::Display for B {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "B")
+ }
+ }
+
+ impl Error for A {
+ fn description(&self) -> &str { "A-desc" }
+ }
+ impl Error for B {
+ fn description(&self) -> &str { "A-desc" }
+ }
+
+ #[test]
+ fn downcasting() {
+ let mut a = A;
+ let mut a = &mut a as &mut (Error + 'static);
+ assert_eq!(a.downcast_ref::<A>(), Some(&A));
+ assert_eq!(a.downcast_ref::<B>(), None);
+ assert_eq!(a.downcast_mut::<A>(), Some(&mut A));
+ assert_eq!(a.downcast_mut::<B>(), None);
+
+ let a: Box<Error> = Box::new(A);
+ match a.downcast::<B>() {
+ Ok(..) => panic!("expected error"),
+ Err(e) => assert_eq!(*e.downcast::<A>().unwrap(), A),
+ }
+ }
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use borrow::{Cow, ToOwned};
+use ascii;
+use borrow::{Cow, ToOwned, Borrow};
use boxed::Box;
-use clone::Clone;
use convert::{Into, From};
use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering};
use error::Error;
-use fmt;
+use fmt::{self, Write};
use io;
use iter::Iterator;
use libc;
/// }
/// # }
/// ```
-#[derive(PartialEq, PartialOrd, Eq, Ord, Hash)]
+#[derive(PartialEq, PartialOrd, Eq, Ord, Hash, Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct CString {
inner: Box<[u8]>,
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Clone for CString {
- fn clone(&self) -> Self {
- CString { inner: self.inner.to_owned().into_boxed_slice() }
- }
-}
-
#[stable(feature = "rust1", since = "1.0.0")]
impl Deref for CString {
type Target = CStr;
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for CString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- fmt::Debug::fmt(&String::from_utf8_lossy(self.as_bytes()), f)
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "cstr_debug", since = "1.3.0")]
+impl fmt::Debug for CStr {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ try!(write!(f, "\""));
+ for byte in self.to_bytes().iter().flat_map(|&b| ascii::escape_default(b)) {
+ try!(f.write_char(byte as char));
+ }
+ write!(f, "\"")
}
}
+#[stable(feature = "cstr_borrow", since = "1.3.0")]
+impl Borrow<CStr> for CString {
+ fn borrow(&self) -> &CStr { self }
+}
+
impl NulError {
/// Returns the position of the nul byte in the slice that was provided to
/// `CString::new`.
}
}
+#[stable(feature = "cstr_borrow", since = "1.3.0")]
+impl ToOwned for CStr {
+ type Owned = CString;
+
+ fn to_owned(&self) -> CString {
+ unsafe { CString::from_vec_unchecked(self.to_bytes().to_vec()) }
+ }
+}
+
#[cfg(test)]
mod tests {
use prelude::v1::*;
#[test]
fn formatted() {
- let s = CString::new(&b"12"[..]).unwrap();
- assert_eq!(format!("{:?}", s), "\"12\"");
+ let s = CString::new(&b"abc\x01\x02\n\xE2\x80\xA6\xFF"[..]).unwrap();
+ assert_eq!(format!("{:?}", s), r#""abc\x01\x02\n\xe2\x80\xa6\xff""#);
}
#[test]
assert_eq!(CStr::from_ptr(ptr).to_string_lossy(), Owned::<str>(format!("123\u{FFFD}")));
}
}
+
+ #[test]
+ fn to_owned() {
+ let data = b"123\0";
+ let ptr = data.as_ptr() as *const libc::c_char;
+
+ let owned = unsafe { CStr::from_ptr(ptr).to_owned() };
+ assert_eq!(owned.as_bytes_with_nul(), data);
+ }
+
+ #[test]
+ fn equal_hash() {
+ use hash;
+
+ let data = b"123\xE2\xFA\xA6\0";
+ let ptr = data.as_ptr() as *const libc::c_char;
+ let cstr: &'static CStr = unsafe { CStr::from_ptr(ptr) };
+
+ let cstr_hash = hash::hash::<_, hash::SipHasher>(&cstr);
+ let cstring_hash =
+ hash::hash::<_, hash::SipHasher>(&CString::new(&data[..data.len() - 1]).unwrap());
+
+ assert_eq!(cstr_hash, cstring_hash);
+ }
}
use fmt;
use ffi::OsString;
-use io::{self, Error, ErrorKind, SeekFrom, Seek, Read, Write};
+use io::{self, SeekFrom, Seek, Read, Write};
use path::{Path, PathBuf};
use sys::fs as fs_imp;
-use sys_common::{AsInnerMut, FromInner, AsInner};
+use sys_common::io::read_to_end_uninitialized;
+use sys_common::{AsInnerMut, FromInner, AsInner, IntoInner};
use vec::Vec;
/// A reference to an open file on the filesystem.
/// will be extended to `size` and have all of the intermediate data filled
/// in with 0s.
///
+ /// # Errors
+ ///
+ /// This function will return an error if the file is not opened for writing.
+ ///
/// # Examples
///
/// ```no_run
/// use std::fs::File;
///
/// # fn foo() -> std::io::Result<()> {
- /// let mut f = try!(File::open("foo.txt"));
- /// try!(f.set_len(0));
+ /// let mut f = try!(File::create("foo.txt"));
+ /// try!(f.set_len(10));
/// # Ok(())
/// # }
/// ```
File { inner: f }
}
}
+impl IntoInner<fs_imp::File> for File {
+ fn into_inner(self) -> fs_imp::File {
+ self.inner
+ }
+}
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ unsafe { read_to_end_uninitialized(self, buf) }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for File {
/// ```no_run
/// use std::fs::OpenOptions;
///
- /// let file = OpenOptions::new().append(true).open("foo.txt");
+ /// let file = OpenOptions::new().write(true).append(true).open("foo.txt");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
/// ```no_run
/// use std::fs::OpenOptions;
///
- /// let file = OpenOptions::new().truncate(true).open("foo.txt");
+ /// let file = OpenOptions::new().write(true).truncate(true).open("foo.txt");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
pub fn is_symlink(&self) -> bool { self.0.is_symlink() }
}
+impl AsInner<fs_imp::FileType> for FileType {
+ fn as_inner(&self) -> &fs_imp::FileType { &self.0 }
+}
+
impl FromInner<fs_imp::FilePermissions> for Permissions {
fn from_inner(f: fs_imp::FilePermissions) -> Permissions {
Permissions(f)
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<u64> {
- let from = from.as_ref();
- let to = to.as_ref();
- if !from.is_file() {
- return Err(Error::new(ErrorKind::InvalidInput,
- "the source path is not an existing file"))
- }
-
- let mut reader = try!(File::open(from));
- let mut writer = try!(File::create(to));
- let perm = try!(reader.metadata()).permissions();
-
- let ret = try!(io::copy(&mut reader, &mut writer));
- try!(set_permissions(to, perm));
- Ok(ret)
+ fs_imp::copy(from.as_ref(), to.as_ref())
}
/// Creates a new hard link on the filesystem.
reason = "the argument type of u64 is not quite appropriate for \
this function and may change if the standard library \
gains a type to represent a moment in time")]
+#[deprecated(since = "1.3.0",
+ reason = "will never be stabilized as-is and its replacement will \
+ likely have a totally new API")]
pub fn set_file_times<P: AsRef<Path>>(path: P, accessed: u64,
modified: u64) -> io::Result<()> {
fs_imp::utimes(path.as_ref(), accessed, modified)
}
}
+ #[test]
+ fn copy_src_does_not_exist() {
+ let tmpdir = tmpdir();
+ let from = Path2::new("test/nonexistent-bogus-path");
+ let to = tmpdir.join("out.txt");
+ check!(check!(File::create(&to)).write(b"hello"));
+ assert!(fs::copy(&from, &to).is_err());
+ assert!(!from.exists());
+ let mut v = Vec::new();
+ check!(check!(File::open(&to)).read_to_end(&mut v));
+ assert_eq!(v, b"hello");
+ }
+
#[test]
fn copy_file_ok() {
let tmpdir = tmpdir();
check!(fs::set_permissions(&out, attr.permissions()));
}
+ #[cfg(windows)]
+ #[test]
+ fn copy_file_preserves_streams() {
+ let tmp = tmpdir();
+ check!(check!(File::create(tmp.join("in.txt:bunny"))).write("carrot".as_bytes()));
+ assert_eq!(check!(fs::copy(tmp.join("in.txt"), tmp.join("out.txt"))), 6);
+ assert_eq!(check!(tmp.join("out.txt").metadata()).len(), 0);
+ let mut v = Vec::new();
+ check!(check!(File::open(tmp.join("out.txt:bunny"))).read_to_end(&mut v));
+ assert_eq!(v, b"carrot".to_vec());
+ }
+
#[cfg(not(windows))] // FIXME(#10264) operation not permitted?
#[test]
fn symlinks_work() {
use error;
use fmt;
use io::{self, DEFAULT_BUF_SIZE, Error, ErrorKind, SeekFrom};
-use ptr;
-use iter;
-/// Wraps a `Read` and buffers input from it
+/// The `BufReader` struct adds buffering to any reader.
///
/// It can be excessively inefficient to work directly with a `Read` instance.
/// For example, every call to `read` on `TcpStream` results in a system call.
///
/// # Examples
///
-/// ```no_run
+/// ```
/// use std::io::prelude::*;
/// use std::io::BufReader;
/// use std::fs::File;
}
impl<R: Read> BufReader<R> {
- /// Creates a new `BufReader` with a default buffer capacity
+ /// Creates a new `BufReader` with a default buffer capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::BufReader;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut f = try!(File::open("log.txt"));
+ /// let mut reader = BufReader::new(f);
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(inner: R) -> BufReader<R> {
BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
}
- /// Creates a new `BufReader` with the specified buffer capacity
+ /// Creates a new `BufReader` with the specified buffer capacity.
+ ///
+ /// # Examples
+ ///
+ /// Creating a buffer with ten bytes of capacity:
+ ///
+ /// ```
+ /// use std::io::BufReader;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut f = try!(File::open("log.txt"));
+ /// let mut reader = BufReader::with_capacity(10, f);
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(cap: usize, inner: R) -> BufReader<R> {
- let mut buf = Vec::with_capacity(cap);
- buf.extend(iter::repeat(0).take(cap));
BufReader {
inner: inner,
- buf: buf,
+ buf: vec![0; cap],
pos: 0,
cap: 0,
}
}
/// Gets a reference to the underlying reader.
+ ///
+ /// It is inadvisable to directly read from the underlying reader.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::BufReader;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut f1 = try!(File::open("log.txt"));
+ /// let mut reader = BufReader::new(f1);
+ ///
+ /// let f2 = reader.get_ref();
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_ref(&self) -> &R { &self.inner }
/// Gets a mutable reference to the underlying reader.
///
- /// # Warning
- ///
/// It is inadvisable to directly read from the underlying reader.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::BufReader;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut f1 = try!(File::open("log.txt"));
+ /// let mut reader = BufReader::new(f1);
+ ///
+ /// let f2 = reader.get_mut();
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut R { &mut self.inner }
/// Unwraps this `BufReader`, returning the underlying reader.
///
/// Note that any leftover data in the internal buffer is lost.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::BufReader;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut f1 = try!(File::open("log.txt"));
+ /// let mut reader = BufReader::new(f1);
+ ///
+ /// let f2 = reader.into_inner();
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_inner(self) -> R { self.inner }
}
}
}
-/// Wraps a Writer and buffers output to it
+/// Wraps a writer and buffers its output.
///
-/// It can be excessively inefficient to work directly with a `Write`. For
-/// example, every call to `write` on `TcpStream` results in a system call. A
-/// `BufWriter` keeps an in memory buffer of data and writes it to the
-/// underlying `Write` in large, infrequent batches.
+/// It can be excessively inefficient to work directly with something that
+/// implements `Write`. For example, every call to `write` on `TcpStream`
+/// results in a system call. A `BufWriter` keeps an in-memory buffer of data
+/// and writes it to an underlying writer in large, infrequent batches.
///
/// The buffer will be written out when the writer is dropped.
+///
+/// # Examples
+///
+/// Let's write the numbers one through ten to a `TcpStream`:
+///
+/// ```no_run
+/// use std::io::prelude::*;
+/// use std::net::TcpStream;
+///
+/// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
+///
+/// for i in 1..10 {
+/// stream.write(&[i]).unwrap();
+/// }
+/// ```
+///
+/// Because we're not buffering, we write each one in turn, incurring the
+/// overhead of a system call per byte written. We can fix this with a
+/// `BufWriter`:
+///
+/// ```no_run
+/// use std::io::prelude::*;
+/// use std::io::BufWriter;
+/// use std::net::TcpStream;
+///
+/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+///
+/// for i in 1..10 {
+/// stream.write(&[i]).unwrap();
+/// }
+/// ```
+///
+/// By wrapping the stream with a `BufWriter`, these ten writes are all grouped
+/// together by the buffer, and will all be written out in one system call when
+/// the `stream` is dropped.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct BufWriter<W: Write> {
inner: Option<W>,
/// An error returned by `into_inner` which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::io::BufWriter;
+/// use std::net::TcpStream;
+///
+/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+///
+/// // do stuff with the stream
+///
+/// // we want to get our `TcpStream` back, so let's try:
+///
+/// let stream = match stream.into_inner() {
+/// Ok(s) => s,
+/// Err(e) => {
+/// // Here, e is an IntoInnerError
+/// panic!("An error occurred");
+/// }
+/// };
+/// ```
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoInnerError<W>(W, Error);
impl<W: Write> BufWriter<W> {
- /// Creates a new `BufWriter` with a default buffer capacity
+ /// Creates a new `BufWriter` with a default buffer capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
- /// Creates a new `BufWriter` with the specified buffer capacity
+ /// Creates a new `BufWriter` with the specified buffer capacity.
+ ///
+ /// # Examples
+ ///
+ /// Creating a buffer with a buffer of a hundred bytes.
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:34254").unwrap();
+ /// let mut buffer = BufWriter::with_capacity(100, stream);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(cap: usize, inner: W) -> BufWriter<W> {
BufWriter {
}
}
if written > 0 {
- // NB: would be better expressed as .remove(0..n) if it existed
- unsafe {
- ptr::copy(self.buf.as_ptr().offset(written as isize),
- self.buf.as_mut_ptr(),
- len - written);
- }
+ self.buf.drain(..written);
}
- self.buf.truncate(len - written);
ret
}
/// Gets a reference to the underlying writer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // we can use reference just like buffer
+ /// let reference = buffer.get_ref();
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_ref(&self) -> &W { self.inner.as_ref().unwrap() }
- /// Gets a mutable reference to the underlying write.
+ /// Gets a mutable reference to the underlying writer.
///
- /// # Warning
+ /// It is inadvisable to directly write to the underlying writer.
///
- /// It is inadvisable to directly read from the underlying writer.
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // we can use reference just like buffer
+ /// let reference = buffer.get_mut();
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut W { self.inner.as_mut().unwrap() }
/// Unwraps this `BufWriter`, returning the underlying writer.
///
/// The buffer is written out before returning the writer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // unwrap the TcpStream and flush the buffer
+ /// let stream = buffer.into_inner().unwrap();
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
match self.flush_buf() {
}
impl<W> IntoInnerError<W> {
- /// Returns the error which caused the call to `into_inner` to fail.
+ /// Returns the error which caused the call to `into_inner()` to fail.
///
/// This error was returned when attempting to write the internal buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // do stuff with the stream
+ ///
+ /// // we want to get our `TcpStream` back, so let's try:
+ ///
+ /// let stream = match stream.into_inner() {
+ /// Ok(s) => s,
+ /// Err(e) => {
+ /// // Here, e is an IntoInnerError, let's log the inner error.
+ /// //
+ /// // We'll just 'log' to stdout for this example.
+ /// println!("{}", e.error());
+ ///
+ /// panic!("An unexpected error occurred.");
+ /// }
+ /// };
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn error(&self) -> &Error { &self.1 }
///
/// The returned object can be used for error recovery, such as
/// re-inspecting the buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // do stuff with the stream
+ ///
+ /// // we want to get our `TcpStream` back, so let's try:
+ ///
+ /// let stream = match stream.into_inner() {
+ /// Ok(s) => s,
+ /// Err(e) => {
+ /// // Here, e is a IntoInnerError, let's re-examine the buffer:
+ /// let buffer = e.into_inner();
+ ///
+ /// // do stuff to try to recover
+ ///
+ /// // afterwards, let's just return the stream
+ /// buffer.into_inner().unwrap()
+ /// }
+ /// };
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_inner(self) -> W { self.0 }
}
}
}
-/// Wraps a Writer and buffers output to it, flushing whenever a newline
+/// Wraps a writer and buffers output to it, flushing whenever a newline
/// (`0x0a`, `'\n'`) is detected.
///
-/// The buffer will be written out when the writer is dropped.
+/// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output.
+/// But it only does this batched write when it goes out of scope, or when the
+/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
+/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
+/// does exactly that.
+///
+/// [bufwriter]: struct.BufWriter.html
+///
+/// If there's still a partial line in the buffer when the `LineWriter` is
+/// dropped, it will flush those contents.
+///
+/// # Examples
+///
+/// We can use `LineWriter` to write one line at a time, significantly
+/// reducing the number of actual writes to the file.
+///
+/// ```
+/// use std::fs::File;
+/// use std::io::prelude::*;
+/// use std::io::LineWriter;
+///
+/// # fn foo() -> std::io::Result<()> {
+/// let road_not_taken = b"I shall be telling this with a sigh
+/// Somewhere ages and ages hence:
+/// Two roads diverged in a wood, and I -
+/// I took the one less traveled by,
+/// And that has made all the difference.";
+///
+/// let file = try!(File::create("poem.txt"));
+/// let mut file = LineWriter::new(file);
+///
+/// for &byte in road_not_taken.iter() {
+/// file.write(&[byte]).unwrap();
+/// }
+///
+/// // let's check we did the right thing.
+/// let mut file = try!(File::open("poem.txt"));
+/// let mut contents = String::new();
+///
+/// try!(file.read_to_string(&mut contents));
+///
+/// assert_eq!(contents.as_bytes(), &road_not_taken[..]);
+/// # Ok(())
+/// # }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct LineWriter<W: Write> {
inner: BufWriter<W>,
}
impl<W: Write> LineWriter<W> {
- /// Creates a new `LineWriter`
+ /// Creates a new `LineWriter`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs::File;
+ /// use std::io::LineWriter;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let file = try!(File::create("poem.txt"));
+ /// let file = LineWriter::new(file);
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(inner: W) -> LineWriter<W> {
// Lines typically aren't that long, don't use a giant buffer
/// Creates a new `LineWriter` with a specified capacity for the internal
/// buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs::File;
+ /// use std::io::LineWriter;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let file = try!(File::create("poem.txt"));
+ /// let file = LineWriter::with_capacity(100, file);
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(cap: usize, inner: W) -> LineWriter<W> {
LineWriter { inner: BufWriter::with_capacity(cap, inner) }
}
/// Gets a reference to the underlying writer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs::File;
+ /// use std::io::LineWriter;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let file = try!(File::create("poem.txt"));
+ /// let file = LineWriter::new(file);
+ ///
+ /// let reference = file.get_ref();
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_ref(&self) -> &W { self.inner.get_ref() }
///
/// Caution must be taken when calling methods on the mutable reference
/// returned as extra writes could corrupt the output stream.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs::File;
+ /// use std::io::LineWriter;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let file = try!(File::create("poem.txt"));
+ /// let mut file = LineWriter::new(file);
+ ///
+ /// // we can use reference just like file
+ /// let reference = file.get_mut();
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut W { self.inner.get_mut() }
/// Unwraps this `LineWriter`, returning the underlying writer.
///
/// The internal buffer is written out before returning the writer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs::File;
+ /// use std::io::LineWriter;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let file = try!(File::create("poem.txt"));
+ ///
+ /// let writer: LineWriter<File> = LineWriter::new(file);
+ ///
+ /// let file: File = try!(writer.into_inner());
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
self.inner.into_inner().map_err(|IntoInnerError(buf, e)| {
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: Write> Write for LineWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- match buf.rposition_elem(&b'\n') {
+ match buf.iter().rposition(|b| *b == b'\n') {
Some(i) => {
let n = try!(self.inner.write(&buf[..i + 1]));
if n != i + 1 { return Ok(n) }
/// Gets a mutable reference to the underlying stream.
///
- /// # Warning
- ///
/// It is inadvisable to read directly from or write directly to the
/// underlying stream.
pub fn get_mut(&mut self) -> &mut S {
use cmp;
use io::{self, SeekFrom, Error, ErrorKind};
-use iter::repeat;
use slice;
-/// A `Cursor` is a type which wraps a non-I/O object to provide a `Seek`
-/// implementation.
+/// A `Cursor` wraps another type and provides it with a
+/// [`Seek`](trait.Seek.html) implementation.
///
-/// Cursors are typically used with memory buffer objects in order to allow
-/// `Seek`, `Read`, and `Write` implementations. For example, common cursor types
-/// include `Cursor<Vec<u8>>` and `Cursor<&[u8]>`.
+/// Cursors are typically used with in-memory buffers to allow them to
+/// implement `Read` and/or `Write`, allowing these buffers to be used
+/// anywhere you might use a reader or writer that does actual I/O.
///
-/// Implementations of the I/O traits for `Cursor<T>` are currently not generic
-/// over `T` itself. Instead, specific implementations are provided for various
-/// in-memory buffer types like `Vec<u8>` and `&[u8]`.
+/// The standard library implements some I/O traits on various types which
+/// are commonly used as a buffer, like `Cursor<Vec<u8>>` and `Cursor<&[u8]>`.
+///
+/// # Examples
+///
+/// We may want to write bytes to a [`File`][file] in our production
+/// code, but use an in-memory buffer in our tests. We can do this with
+/// `Cursor`:
+///
+/// [file]: ../fs/struct.File.html
+///
+/// ```no_run
+/// use std::io::prelude::*;
+/// use std::io::{self, SeekFrom};
+/// use std::fs::File;
+///
+/// // a library function we've written
+/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
+/// try!(writer.seek(SeekFrom::End(-10)));
+///
+/// for i in 0..10 {
+/// try!(writer.write(&[i]));
+/// }
+///
+/// // all went well
+/// Ok(())
+/// }
+///
+/// # fn foo() -> io::Result<()> {
+/// // Here's some code that uses this library function.
+/// //
+/// // We might want to use a BufReader here for efficiency, but let's
+/// // keep this example focused.
+/// let mut file = try!(File::create("foo.txt"));
+///
+/// try!(write_ten_bytes_at_end(&mut file));
+/// # Ok(())
+/// # }
+///
+/// // now let's write a test
+/// #[test]
+/// fn test_writes_bytes() {
+/// // setting up a real File is much more slow than an in-memory buffer,
+/// // let's use a cursor instead
+/// use std::io::Cursor;
+/// let mut buff = Cursor::new(vec![0; 15]);
+///
+/// write_ten_bytes(&mut buff).unwrap();
+///
+/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+/// }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Clone, Debug)]
pub struct Cursor<T> {
impl<T> Cursor<T> {
/// Creates a new cursor wrapping the provided underlying I/O object.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ ///
+ /// let buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(inner: T) -> Cursor<T> {
Cursor { pos: 0, inner: inner }
}
/// Consumes this cursor, returning the underlying value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ ///
+ /// let buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ ///
+ /// let vec = buff.into_inner();
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_inner(self) -> T { self.inner }
/// Gets a reference to the underlying value in this cursor.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ ///
+ /// let buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ ///
+ /// let reference = buff.get_ref();
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_ref(&self) -> &T { &self.inner }
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying value as it may corrupt this cursor's position.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ ///
+ /// let mut buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ ///
+ /// let reference = buff.get_mut();
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut T { &mut self.inner }
- /// Returns the current value of this cursor
+ /// Returns the current position of this cursor.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ /// use std::io::prelude::*;
+ /// use std::io::SeekFrom;
+ ///
+ /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(buff.position(), 0);
+ ///
+ /// buff.seek(SeekFrom::Current(2)).unwrap();
+ /// assert_eq!(buff.position(), 2);
+ ///
+ /// buff.seek(SeekFrom::Current(-1)).unwrap();
+ /// assert_eq!(buff.position(), 1);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn position(&self) -> u64 { self.pos }
- /// Sets the value of this cursor
+ /// Sets the position of this cursor.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ ///
+ /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(buff.position(), 0);
+ ///
+ /// buff.set_position(2);
+ /// assert_eq!(buff.position(), 2);
+ ///
+ /// buff.set_position(4);
+ /// assert_eq!(buff.position(), 4);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn set_position(&mut self, pos: u64) { self.pos = pos; }
}
// currently are
let pos = self.position();
let amt = pos.saturating_sub(self.inner.len() as u64);
- self.inner.extend(repeat(0).take(amt as usize));
+ // use `resize` so that the zero filling is as efficient as possible
+ let len = self.inner.len();
+ self.inner.resize(len + amt as usize, 0);
// Figure out what bytes will be used to overwrite what's currently
// there (left), and what will be appended on the end (right)
use result;
use sys;
-/// A type for results generated by I/O related functions where the `Err` type
-/// is hard-wired to `io::Error`.
+/// A specialized [`Result`][result] type for I/O operations.
+///
+/// [result]: ../result/enum.Result.html
+///
+/// This type is broadly used across `std::io` for any operation which may
+/// produce an error.
///
/// This typedef is generally used to avoid writing out `io::Error` directly and
-/// is otherwise a direct mapping to `std::result::Result`.
+/// is otherwise a direct mapping to `Result`.
+///
+/// While usual Rust style is to import types directly, aliases of `Result`
+/// often are not, to make it easier to distinguish between them. `Result` is
+/// generally assumed to be `std::result::Result`, and so users of this alias
+/// will generally use `io::Result` instead of shadowing the prelude's import
+/// of `std::result::Result`.
+///
+/// # Examples
+///
+/// A convenience function that bubbles an `io::Result` to its caller:
+///
+/// ```
+/// use std::io;
+///
+/// fn get_string() -> io::Result<String> {
+/// let mut buffer = String::new();
+///
+/// try!(io::stdin().read_line(&mut buffer));
+///
+/// Ok(buffer)
+/// }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub type Result<T> = result::Result<T, Error>;
///
/// If this `Error` was constructed via `new` then this function will
/// return `Some`, otherwise it will return `None`.
- #[unstable(feature = "io_error_inner",
- reason = "recently added and requires UFCS to downcast")]
+ #[stable(feature = "io_error_inner", since = "1.3.0")]
pub fn get_ref(&self) -> Option<&(error::Error+Send+Sync+'static)> {
match self.repr {
Repr::Os(..) => None,
///
/// If this `Error` was constructed via `new` then this function will
/// return `Some`, otherwise it will return `None`.
- #[unstable(feature = "io_error_inner",
- reason = "recently added and requires UFCS to downcast")]
+ #[stable(feature = "io_error_inner", since = "1.3.0")]
pub fn get_mut(&mut self) -> Option<&mut (error::Error+Send+Sync+'static)> {
match self.repr {
Repr::Os(..) => None,
///
/// If this `Error` was constructed via `new` then this function will
/// return `Some`, otherwise it will return `None`.
- #[unstable(feature = "io_error_inner",
- reason = "recently added and requires UFCS to downcast")]
+ #[stable(feature = "io_error_inner", since = "1.3.0")]
pub fn into_inner(self) -> Option<Box<error::Error+Send+Sync>> {
match self.repr {
Repr::Os(..) => None,
// we have to call all of these UFCS style right now since method
// resolution won't implicitly drop the Send+Sync bounds
let mut err = Error::new(ErrorKind::Other, TestError);
- assert!(error::Error::is::<TestError>(err.get_ref().unwrap()));
+ assert!(err.get_ref().unwrap().is::<TestError>());
assert_eq!("asdf", err.get_ref().unwrap().description());
- assert!(error::Error::is::<TestError>(err.get_mut().unwrap()));
+ assert!(err.get_mut().unwrap().is::<TestError>());
let extracted = err.into_inner().unwrap();
- error::Error::downcast::<TestError>(extracted).unwrap();
+ extracted.downcast::<TestError>().unwrap();
}
}
// except according to those terms.
//! Traits, helpers, and type definitions for core I/O functionality.
+//!
+//! The `std::io` module contains a number of common things you'll need
+//! when doing input and output. The most core part of this module is
+//! the [`Read`][read] and [`Write`][write] traits, which provide the
+//! most general interface for reading and writing input and output.
+//!
+//! [read]: trait.Read.html
+//! [write]: trait.Write.html
+//!
+//! # Read and Write
+//!
+//! Because they are traits, they're implemented by a number of other types,
+//! and you can implement them for your types too. As such, you'll see a
+//! few different types of I/O throughout the documentation in this module:
+//! `File`s, `TcpStream`s, and somtimes even `Vec<T>`s. For example, `Read`
+//! adds a `read()` method, which we can use on `File`s:
+//!
+//! ```
+//! use std::io;
+//! use std::io::prelude::*;
+//! use std::fs::File;
+//!
+//! # fn foo() -> io::Result<()> {
+//! let mut f = try!(File::open("foo.txt"));
+//! let mut buffer = [0; 10];
+//!
+//! // read up to 10 bytes
+//! try!(f.read(&mut buffer));
+//!
+//! println!("The bytes: {:?}", buffer);
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! `Read` and `Write` are so important, implementors of the two traits have a
+//! nickname: readers and writers. So you'll sometimes see 'a reader' instead
+//! of 'a type that implements the `Read` trait'. Much easier!
+//!
+//! ## Seek and BufRead
+//!
+//! Beyond that, there are two important traits that are provided: [`Seek`][seek]
+//! and [`BufRead`][bufread]. Both of these build on top of a reader to control
+//! how the reading happens. `Seek` lets you control where the next byte is
+//! coming from:
+//!
+//! ```
+//! use std::io;
+//! use std::io::prelude::*;
+//! use std::io::SeekFrom;
+//! use std::fs::File;
+//!
+//! # fn foo() -> io::Result<()> {
+//! let mut f = try!(File::open("foo.txt"));
+//! let mut buffer = [0; 10];
+//!
+//! // skip to the last 10 bytes of the file
+//! try!(f.seek(SeekFrom::End(-10)));
+//!
+//! // read up to 10 bytes
+//! try!(f.read(&mut buffer));
+//!
+//! println!("The bytes: {:?}", buffer);
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! [seek]: trait.Seek.html
+//! [bufread]: trait.BufRead.html
+//!
+//! `BufRead` uses an internal buffer to provide a number of other ways to read, but
+//! to show it off, we'll need to talk about buffers in general. Keep reading!
+//!
+//! ## BufReader and BufWriter
+//!
+//! Byte-based interfaces are unwieldy and can be inefficient, as we'd need to be
+//! making near-constant calls to the operating system. To help with this,
+//! `std::io` comes with two structs, `BufReader` and `BufWriter`, which wrap
+//! readers and writers. The wrapper uses a buffer, reducing the number of
+//! calls and providing nicer methods for accessing exactly what you want.
+//!
+//! For example, `BufReader` works with the `BufRead` trait to add extra
+//! methods to any reader:
+//!
+//! ```
+//! use std::io;
+//! use std::io::prelude::*;
+//! use std::io::BufReader;
+//! use std::fs::File;
+//!
+//! # fn foo() -> io::Result<()> {
+//! let f = try!(File::open("foo.txt"));
+//! let mut reader = BufReader::new(f);
+//! let mut buffer = String::new();
+//!
+//! // read a line into buffer
+//! try!(reader.read_line(&mut buffer));
+//!
+//! println!("{}", buffer);
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! `BufWriter` doesn't add any new ways of writing, it just buffers every call
+//! to [`write()`][write]:
+//!
+//! ```
+//! use std::io;
+//! use std::io::prelude::*;
+//! use std::io::BufWriter;
+//! use std::fs::File;
+//!
+//! # fn foo() -> io::Result<()> {
+//! let f = try!(File::create("foo.txt"));
+//! {
+//! let mut writer = BufWriter::new(f);
+//!
+//! // write a byte to the buffer
+//! try!(writer.write(&[42]));
+//!
+//! } // the buffer is flushed once writer goes out of scope
+//!
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! [write]: trait.Write.html#tymethod.write
+//!
+//! ## Standard input and output
+//!
+//! A very common source of input is standard input:
+//!
+//! ```
+//! use std::io;
+//!
+//! # fn foo() -> io::Result<()> {
+//! let mut input = String::new();
+//!
+//! try!(io::stdin().read_line(&mut input));
+//!
+//! println!("You typed: {}", input.trim());
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! And a very common source of output is standard output:
+//!
+//! ```
+//! use std::io;
+//! use std::io::prelude::*;
+//!
+//! # fn foo() -> io::Result<()> {
+//! try!(io::stdout().write(&[42]));
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! Of course, using `io::stdout()` directly is less comon than something like
+//! `println!`.
+//!
+//! ## Iterator types
+//!
+//! A large number of the structures provided by `std::io` are for various
+//! ways of iterating over I/O. For example, `Lines` is used to split over
+//! lines:
+//!
+//! ```
+//! use std::io;
+//! use std::io::prelude::*;
+//! use std::io::BufReader;
+//! use std::fs::File;
+//!
+//! # fn foo() -> io::Result<()> {
+//! let f = try!(File::open("foo.txt"));
+//! let mut reader = BufReader::new(f);
+//!
+//! for line in reader.lines() {
+//! let line = try!(line);
+//! println!("{}", line);
+//! }
+//!
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! ## Functions
+//!
+//! There are a number of [functions][functions] that offer access to various
+//! features. For example, we can use three of these functions to copy everything
+//! from standard input to standard output:
+//!
+//! ```
+//! use std::io;
+//!
+//! # fn foo() -> io::Result<()> {
+//! try!(io::copy(&mut io::stdin(), &mut io::stdout()));
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! [functions]: #functions
+//!
+//! ## io::Result
+//!
+//! Last, but certainly not least, is [`io::Result`][result]. This type is used
+//! as the return type of many `std::io` functions that can cause an error, and
+//! can be returned from your own functions as well. Many of the examples in this
+//! module use the [`try!`][try] macro:
+//!
+//! ```
+//! use std::io;
+//!
+//! fn read_input() -> io::Result<()> {
+//! let mut input = String::new();
+//!
+//! try!(io::stdin().read_line(&mut input));
+//!
+//! println!("You typed: {}", input.trim());
+//!
+//! Ok(())
+//! }
+//! ```
+//!
+//! The return type of `read_input()`, `io::Result<()>`, is a very common type
+//! for functions which don't have a 'real' return value, but do want to return
+//! errors if they happen. In this case, the only purpose of this function is
+//! to read the line and print it, so we use use `()`.
+//!
+//! [result]: type.Result.html
+//! [try]: macro.try!.html
#![stable(feature = "rust1", since = "1.0.0")]
use rustc_unicode::str as core_str;
use error as std_error;
use fmt;
-use iter::{self, Iterator, Extend};
+use iter::{Iterator};
use marker::Sized;
use ops::{Drop, FnOnce};
use option::Option::{self, Some, None};
if new_write_size < DEFAULT_BUF_SIZE {
new_write_size *= 2;
}
- buf.extend(iter::repeat(0).take(new_write_size));
+ buf.resize(len + new_write_size, 0);
}
match r.read(&mut buf[len..]) {
ret
}
-/// A trait for objects which are byte-oriented sources.
+/// The `Read` trait allows for reading bytes from a source.
///
-/// Readers are defined by one method, `read`. Each call to `read` will attempt
-/// to pull bytes from this source into a provided buffer.
+/// Implementors of the `Read` trait are sometimes called 'readers'.
///
-/// Readers are intended to be composable with one another. Many objects
-/// throughout the I/O and related libraries take and provide types which
-/// implement the `Read` trait.
+/// Readers are defined by one required method, `read()`. Each call to `read`
+/// will attempt to pull bytes from this source into a provided buffer. A
+/// number of other methods are implemented in terms of `read()`, giving
+/// implementors a number of ways to read bytes while only needing to implement
+/// a single method.
+///
+/// Readers are intended to be composable with one another. Many implementors
+/// throughout `std::io` take and provide types which implement the `Read`
+/// trait.
+///
+/// # Examples
+///
+/// [`File`][file]s implement `Read`:
+///
+/// [file]: ../std/fs/struct.File.html
+///
+/// ```
+/// use std::io;
+/// use std::io::prelude::*;
+/// use std::fs::File;
+///
+/// # fn foo() -> io::Result<()> {
+/// let mut f = try!(File::open("foo.txt"));
+/// let mut buffer = [0; 10];
+///
+/// // read up to 10 bytes
+/// try!(f.read(&mut buffer));
+///
+/// let mut buffer = vec![0; 10];
+/// // read the whole file
+/// try!(f.read_to_end(&mut buffer));
+///
+/// // read into a String, so that you don't need to do the conversion.
+/// let mut buffer = String::new();
+/// try!(f.read_to_string(&mut buffer));
+///
+/// // and more! See the other methods for more details.
+/// # Ok(())
+/// # }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Read {
/// Pull some bytes from this source into the specified buffer, returning
/// If this function encounters any form of I/O or other error, an error
/// variant will be returned. If an error is returned then it must be
/// guaranteed that no bytes were read.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][file]s implement `Read`:
+ ///
+ /// [file]: ../std/fs/struct.File.html
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut f = try!(File::open("foo.txt"));
+ /// let mut buffer = [0; 10];
+ ///
+ /// // read 10 bytes
+ /// try!(f.read(&mut buffer[..]));
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn read(&mut self, buf: &mut [u8]) -> Result<usize>;
/// If any other read error is encountered then this function immediately
/// returns. Any bytes which have already been read will be appended to
/// `buf`.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][file]s implement `Read`:
+ ///
+ /// [file]: ../std/fs/struct.File.html
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut f = try!(File::open("foo.txt"));
+ /// let mut buffer = Vec::new();
+ ///
+ /// // read the whole file
+ /// try!(f.read_to_end(&mut buffer));
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<usize> {
read_to_end(self, buf)
/// If the data in this stream is *not* valid UTF-8 then an error is
/// returned and `buf` is unchanged.
///
- /// See `read_to_end` for other error semantics.
+ /// See [`read_to_end()`][readtoend] for other error semantics.
+ ///
+ /// [readtoend]: #method.read_to_end
+ ///
+ /// # Examples
+ ///
+ /// [`File`][file]s implement `Read`:
+ ///
+ /// [file]: ../std/fs/struct.File.html
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut f = try!(File::open("foo.txt"));
+ /// let mut buffer = String::new();
+ ///
+ /// try!(f.read_to_string(&mut buffer));
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn read_to_string(&mut self, buf: &mut String) -> Result<usize> {
// Note that we do *not* call `.read_to_end()` here. We are passing
///
/// The returned adaptor also implements `Read` and will simply borrow this
/// current reader.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][file]s implement `Read`:
+ ///
+ /// [file]: ../std/fs/struct.File.html
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::Read;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut f = try!(File::open("foo.txt"));
+ /// let mut buffer = Vec::new();
+ /// let mut other_buffer = Vec::new();
+ ///
+ /// {
+ /// let reference = f.by_ref();
+ ///
+ /// // read at most 5 bytes
+ /// try!(reference.take(5).read_to_end(&mut buffer));
+ ///
+ /// } // drop our &mut reference so we can use f again
+ ///
+ /// // original file still usable, read the rest
+ /// try!(f.read_to_end(&mut other_buffer));
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn by_ref(&mut self) -> &mut Self where Self: Sized { self }
/// R::Err>`. The yielded item is `Ok` if a byte was successfully read and
/// `Err` otherwise for I/O errors. EOF is mapped to returning `None` from
/// this iterator.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][file]s implement `Read`:
+ ///
+ /// [file]: ../std/fs/struct.File.html
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut f = try!(File::open("foo.txt"));
+ ///
+ /// for byte in f.bytes() {
+ /// println!("{}", byte.unwrap());
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn bytes(self) -> Bytes<Self> where Self: Sized {
Bytes { inner: self }
///
/// Currently this adaptor will discard intermediate data read, and should
/// be avoided if this is not desired.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][file]s implement `Read`:
+ ///
+ /// [file]: ../std/fs/struct.File.html
+ ///
+ /// ```
+ /// #![feature(io)]
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut f = try!(File::open("foo.txt"));
+ ///
+ /// for c in f.chars() {
+ /// println!("{}", c.unwrap());
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
#[unstable(feature = "io", reason = "the semantics of a partial read/write \
of where errors happen is currently \
unclear and may change")]
/// The returned `Read` instance will first read all bytes from this object
/// until EOF is encountered. Afterwards the output is equivalent to the
/// output of `next`.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][file]s implement `Read`:
+ ///
+ /// [file]: ../std/fs/struct.File.html
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut f1 = try!(File::open("foo.txt"));
+ /// let mut f2 = try!(File::open("bar.txt"));
+ ///
+ /// let mut handle = f1.chain(f2);
+ /// let mut buffer = String::new();
+ ///
+ /// // read the value into a String. We could use any Read method here,
+ /// // this is just one example.
+ /// try!(handle.read_to_string(&mut buffer));
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn chain<R: Read>(self, next: R) -> Chain<Self, R> where Self: Sized {
Chain { first: self, second: next, done_first: false }
/// `limit` bytes, after which it will always return EOF (`Ok(0)`). Any
/// read errors will not count towards the number of bytes read and future
/// calls to `read` may succeed.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][file]s implement `Read`:
+ ///
+ /// [file]: ../std/fs/struct.File.html
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut f = try!(File::open("foo.txt"));
+ /// let mut buffer = [0; 5];
+ ///
+ /// // read at most five bytes
+ /// let mut handle = f.take(5);
+ ///
+ /// try!(handle.read(&mut buffer));
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn take(self, limit: u64) -> Take<Self> where Self: Sized {
Take { inner: self, limit: limit }
/// Whenever the returned `Read` instance is read it will write the read
/// data to `out`. The current semantics of this implementation imply that
/// a `write` error will not report how much data was initially read.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][file]s implement `Read`:
+ ///
+ /// [file]: ../std/fs/struct.File.html
+ ///
+ /// ```
+ /// #![feature(io)]
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut f = try!(File::open("foo.txt"));
+ /// let mut buffer1 = Vec::with_capacity(10);
+ /// let mut buffer2 = Vec::with_capacity(10);
+ ///
+ /// // write the output to buffer1 as we read
+ /// let mut handle = f.tee(&mut buffer1);
+ ///
+ /// try!(handle.read(&mut buffer2));
+ /// # Ok(())
+ /// # }
+ /// ```
#[unstable(feature = "io", reason = "the semantics of a partial read/write \
of where errors happen is currently \
unclear and may change")]
/// A trait for objects which are byte-oriented sinks.
///
-/// The `write` method will attempt to write some data into the object,
-/// returning how many bytes were successfully written.
+/// Implementors of the `Write` trait are sometimes called 'writers'.
+///
+/// Writers are defined by two required methods, `write()` and `flush()`:
+///
+/// * The `write()` method will attempt to write some data into the object,
+/// returning how many bytes were successfully written.
+///
+/// * The `flush()` method is useful for adaptors and explicit buffers
+/// themselves for ensuring that all buffered data has been pushed out to the
+/// 'true sink'.
+///
+/// Writers are intended to be composable with one another. Many implementors
+/// throughout `std::io` take and provide types which implement the `Write`
+/// trait.
+///
+/// # Examples
+///
+/// ```
+/// use std::io::prelude::*;
+/// use std::fs::File;
///
-/// The `flush` method is useful for adaptors and explicit buffers themselves
-/// for ensuring that all buffered data has been pushed out to the "true sink".
+/// # fn foo() -> std::io::Result<()> {
+/// let mut buffer = try!(File::create("foo.txt"));
///
-/// Writers are intended to be composable with one another. Many objects
-/// throughout the I/O and related libraries take and provide types which
-/// implement the `Write` trait.
+/// try!(buffer.write(b"some bytes"));
+/// # Ok(())
+/// # }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Write {
/// Write a buffer into this object, returning how many bytes were written.
///
/// It is **not** considered an error if the entire buffer could not be
/// written to this writer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut buffer = try!(File::create("foo.txt"));
+ ///
+ /// try!(buffer.write(b"some bytes"));
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn write(&mut self, buf: &[u8]) -> Result<usize>;
///
/// It is considered an error if not all bytes could be written due to
/// I/O errors or EOF being reached.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::prelude::*;
+ /// use std::io::BufWriter;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut buffer = BufWriter::new(try!(File::create("foo.txt")));
+ ///
+ /// try!(buffer.write(b"some bytes"));
+ /// try!(buffer.flush());
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn flush(&mut self) -> Result<()>;
/// # Errors
///
/// This function will return the first error that `write` returns.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut buffer = try!(File::create("foo.txt"));
+ ///
+ /// try!(buffer.write_all(b"some bytes"));
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn write_all(&mut self, mut buf: &[u8]) -> Result<()> {
while !buf.is_empty() {
/// Writes a formatted string into this writer, returning any error
/// encountered.
///
- /// This method is primarily used to interface with the `format_args!`
- /// macro, but it is rare that this should explicitly be called. The
- /// `write!` macro should be favored to invoke this method instead.
+ /// This method is primarily used to interface with the
+ /// [`format_args!`][formatargs] macro, but it is rare that this should
+ /// explicitly be called. The [`write!`][write] macro should be favored to
+ /// invoke this method instead.
+ ///
+ /// [formatargs]: ../std/macro.format_args!.html
+ /// [write]: ../std/macro.write!.html
///
- /// This function internally uses the `write_all` method on this trait and
- /// hence will continuously write data so long as no errors are received.
- /// This also means that partial writes are not indicated in this signature.
+ /// This function internally uses the [`write_all`][writeall] method on
+ /// this trait and hence will continuously write data so long as no errors
+ /// are received. This also means that partial writes are not indicated in
+ /// this signature.
+ ///
+ /// [writeall]: #method.write_all
///
/// # Errors
///
/// This function will return any I/O error reported while formatting.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut buffer = try!(File::create("foo.txt"));
+ ///
+ /// // this call
+ /// try!(write!(buffer, "{:.*}", 2, 1.234567));
+ /// // turns into this:
+ /// try!(buffer.write_fmt(format_args!("{:.*}", 2, 1.234567)));
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<()> {
// Create a shim which translates a Write to a fmt::Write and saves
///
/// The returned adaptor also implements `Write` and will simply borrow this
/// current writer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Write;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut buffer = try!(File::create("foo.txt"));
+ ///
+ /// let reference = buffer.by_ref();
+ ///
+ /// // we can use reference just like our original buffer
+ /// try!(reference.write_all(b"some bytes"));
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn by_ref(&mut self) -> &mut Self where Self: Sized { self }
/// implementation do not precisely track where errors happen. For example
/// an error on the second call to `write` will not report that the first
/// call to `write` succeeded.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(io)]
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut buffer1 = try!(File::create("foo.txt"));
+ /// let mut buffer2 = Vec::new();
+ ///
+ /// // write the output to buffer1 as we read
+ /// let mut handle = buffer1.broadcast(&mut buffer2);
+ ///
+ /// try!(handle.write(b"some bytes"));
+ /// # Ok(())
+ /// # }
+ /// ```
#[unstable(feature = "io", reason = "the semantics of a partial read/write \
of where errors happen is currently \
unclear and may change")]
}
}
-/// An object implementing `Seek` internally has some form of cursor which can
-/// be moved within a stream of bytes.
+/// The `Seek` trait provides a cursor which can be moved within a stream of
+/// bytes.
///
/// The stream typically has a fixed size, allowing seeking relative to either
/// end or the current offset.
+///
+/// # Examples
+///
+/// [`File`][file]s implement `Seek`:
+///
+/// [file]: ../std/fs/struct.File.html
+///
+/// ```
+/// use std::io;
+/// use std::io::prelude::*;
+/// use std::fs::File;
+/// use std::io::SeekFrom;
+///
+/// # fn foo() -> io::Result<()> {
+/// let mut f = try!(File::open("foo.txt"));
+///
+/// // move the cursor 42 bytes from the start of the file
+/// try!(f.seek(SeekFrom::Start(42)));
+/// # Ok(())
+/// # }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Seek {
- /// Seek to an offset, in bytes, in a stream
+ /// Seek to an offset, in bytes, in a stream.
///
- /// A seek beyond the end of a stream is allowed, but seeking before offset
- /// 0 is an error.
+ /// A seek beyond the end of a stream is allowed, but implementation
+ /// defined.
///
/// The behavior when seeking past the end of the stream is implementation
/// defined.
///
- /// This method returns the new position within the stream if the seek
- /// operation completed successfully.
+ /// If the seek operation completed successfully,
+ /// this method returns the new position from the start of the stream.
+ /// That position can be used later with `SeekFrom::Start`.
///
/// # Errors
///
- /// Seeking to a negative offset is considered an error
+ /// Seeking to a negative offset is considered an error.
#[stable(feature = "rust1", since = "1.0.0")]
fn seek(&mut self, pos: SeekFrom) -> Result<u64>;
}
Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
Err(e) => return Err(e)
};
- match available.position_elem(&delim) {
+ match available.iter().position(|x| *x == delim) {
Some(i) => {
buf.push_all(&available[..i + 1]);
(true, i + 1)
}
}
-/// A `BufRead` is a type of reader which has some form of internal buffering to
-/// allow certain kinds of reading operations to be more optimized than others.
+/// A `BufRead` is a type of `Read`er which has an internal buffer, allowing it
+/// to perform extra ways of reading.
+///
+/// For example, reading line-by-line is inefficient without using a buffer, so
+/// if you want to read by line, you'll need `BufRead`, which includes a
+/// [`read_line()`][readline] method as well as a [`lines()`][lines] iterator.
+///
+/// [readline]: #method.read_line
+/// [lines]: #method.lines
+///
+/// # Examples
+///
+/// A locked standard input implements `BufRead`:
+///
+/// ```
+/// use std::io;
+/// use std::io::prelude::*;
+///
+/// let stdin = io::stdin();
+/// for line in stdin.lock().lines() {
+/// println!("{}", line.unwrap());
+/// }
+/// ```
+///
+/// If you have something that implements `Read`, you can use the [`BufReader`
+/// type][bufreader] to turn it into a `BufRead`.
+///
+/// For example, [`File`][file] implements `Read`, but not `BufRead`.
+/// `BufReader` to the rescue!
///
-/// This type extends the `Read` trait with a few methods that are not
-/// possible to reasonably implement with purely a read interface.
+/// [bufreader]: struct.BufReader.html
+/// [file]: ../fs/struct.File.html
+///
+/// ```
+/// use std::io::{self, BufReader};
+/// use std::io::prelude::*;
+/// use std::fs::File;
+///
+/// # fn foo() -> io::Result<()> {
+/// let f = try!(File::open("foo.txt"));
+/// let f = BufReader::new(f);
+///
+/// for line in f.lines() {
+/// println!("{}", line.unwrap());
+/// }
+///
+/// # Ok(())
+/// # }
+/// ```
///
-/// You can use the [`BufReader` wrapper type](struct.BufReader.html) to turn any
-/// reader into a buffered reader.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait BufRead: Read {
/// Fills the internal buffer of this object, returning the buffer contents.
///
- /// None of the contents will be "read" in the sense that later calling
- /// `read` may return the same contents.
+ /// This function is a lower-level call. It needs to be paired with the
+ /// [`consume`][consume] method to function properly. When calling this
+ /// method, none of the contents will be "read" in the sense that later
+ /// calling `read` may return the same contents. As such, `consume` must be
+ /// called with the number of bytes that are consumed from this buffer to
+ /// ensure that the bytes are never returned twice.
///
- /// The `consume` function must be called with the number of bytes that are
- /// consumed from this buffer returned to ensure that the bytes are never
- /// returned twice.
+ /// [consume]: #tymethod.consume
///
/// An empty buffer returned indicates that the stream has reached EOF.
///
///
/// This function will return an I/O error if the underlying reader was
/// read, but returned an error.
+ ///
+ /// # Examples
+ ///
+ /// A locked standard input implements `BufRead`:
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ ///
+ /// let stdin = io::stdin();
+ /// let mut stdin = stdin.lock();
+ ///
+ /// // we can't have two `&mut` references to `stdin`, so use a block
+ /// // to end the borrow early.
+ /// let length = {
+ /// let buffer = stdin.fill_buf().unwrap();
+ ///
+ /// // work with buffer
+ /// println!("{:?}", buffer);
+ ///
+ /// buffer.len()
+ /// };
+ ///
+ /// // ensure the bytes we worked with aren't returned again later
+ /// stdin.consume(length);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn fill_buf(&mut self) -> Result<&[u8]>;
/// Tells this buffer that `amt` bytes have been consumed from the buffer,
/// so they should no longer be returned in calls to `read`.
///
- /// This function does not perform any I/O, it simply informs this object
- /// that some amount of its buffer, returned from `fill_buf`, has been
- /// consumed and should no longer be returned.
+ /// This function is a lower-level call. It needs to be paired with the
+ /// [`fill_buf`][fillbuf] method to function properly. This function does
+ /// not perform any I/O, it simply informs this object that some amount of
+ /// its buffer, returned from `fill_buf`, has been consumed and should no
+ /// longer be returned. As such, this function may do odd things if
+ /// `fill_buf` isn't called before calling it.
+ ///
+ /// [fillbuf]: #tymethod.fill_buff
+ ///
+ /// The `amt` must be `<=` the number of bytes in the buffer returned by
+ /// `fill_buf`.
///
- /// This function is used to tell the buffer how many bytes you've consumed
- /// from the return value of `fill_buf`, and so may do odd things if
- /// `fill_buf` isn't called before calling this.
+ /// # Examples
///
- /// The `amt` must be `<=` the number of bytes in the buffer returned by `fill_buf`.
+ /// Since `consume()` is meant to be used with [`fill_buf()`][fillbuf],
+ /// that method's example includes an example of `consume()`.
#[stable(feature = "rust1", since = "1.0.0")]
fn consume(&mut self, amt: usize);
- /// Read all bytes until the delimiter `byte` is reached.
+ /// Read all bytes into `buf` until the delimiter `byte` is reached.
///
- /// This function will continue to read (and buffer) bytes from the
- /// underlying stream until the delimiter or EOF is found. Once found, all
- /// bytes up to, and including, the delimiter (if found) will be appended to
- /// `buf`.
+ /// This function will read bytes from the underlying stream until the
+ /// delimiter or EOF is found. Once found, all bytes up to, and including,
+ /// the delimiter (if found) will be appended to `buf`.
///
- /// If this buffered reader is currently at EOF, then this function will not
- /// place any more bytes into `buf` and will return `Ok(n)` where `n` is the
- /// number of bytes which were read.
+ /// If this reader is currently at EOF then this function will not modify
+ /// `buf` and will return `Ok(n)` where `n` is the number of bytes which
+ /// were read.
///
/// # Errors
///
///
/// If an I/O error is encountered then all bytes read so far will be
/// present in `buf` and its length will have been adjusted appropriately.
+ ///
+ /// # Examples
+ ///
+ /// A locked standard input implements `BufRead`. In this example, we'll
+ /// read from standard input until we see an `a` byte.
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ ///
+ /// fn foo() -> io::Result<()> {
+ /// let stdin = io::stdin();
+ /// let mut stdin = stdin.lock();
+ /// let mut buffer = Vec::new();
+ ///
+ /// try!(stdin.read_until(b'a', &mut buffer));
+ ///
+ /// println!("{:?}", buffer);
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> Result<usize> {
read_until(self, byte, buf)
}
- /// Read all bytes until a newline (the 0xA byte) is reached, and
- /// append them to the provided buffer.
+ /// Read all bytes until a newline (the 0xA byte) is reached, and append
+ /// them to the provided buffer.
///
- /// This function will continue to read (and buffer) bytes from the
- /// underlying stream until the newline delimiter (the 0xA byte) or EOF is
- /// found. Once found, all bytes up to, and including, the delimiter (if
- /// found) will be appended to `buf`.
+ /// This function will read bytes from the underlying stream until the
+ /// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes
+ /// up to, and including, the delimiter (if found) will be appended to
+ /// `buf`.
///
/// If this reader is currently at EOF then this function will not modify
/// `buf` and will return `Ok(n)` where `n` is the number of bytes which
/// return an error if the read bytes are not valid UTF-8. If an I/O error
/// is encountered then `buf` may contain some bytes already read in the
/// event that all data read so far was valid UTF-8.
+ ///
+ /// # Examples
+ ///
+ /// A locked standard input implements `BufRead`. In this example, we'll
+ /// read all of the lines from standard input. If we were to do this in
+ /// an actual project, the [`lines()`][lines] method would be easier, of
+ /// course.
+ ///
+ /// [lines]: #method.lines
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ ///
+ /// let stdin = io::stdin();
+ /// let mut stdin = stdin.lock();
+ /// let mut buffer = String::new();
+ ///
+ /// while stdin.read_line(&mut buffer).unwrap() > 0 {
+ /// // work with buffer
+ /// println!("{:?}", buffer);
+ ///
+ /// buffer.clear();
+ /// }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn read_line(&mut self, buf: &mut String) -> Result<usize> {
// Note that we are not calling the `.read_until` method here, but
///
/// This function will yield errors whenever `read_until` would have also
/// yielded an error.
+ ///
+ /// # Examples
+ ///
+ /// A locked standard input implements `BufRead`. In this example, we'll
+ /// read some input from standard input, splitting on commas.
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ ///
+ /// let stdin = io::stdin();
+ ///
+ /// for content in stdin.lock().split(b',') {
+ /// println!("{:?}", content.unwrap());
+ /// }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn split(self, byte: u8) -> Split<Self> where Self: Sized {
Split { buf: self, delim: byte }
/// The iterator returned from this function will yield instances of
/// `io::Result<String>`. Each string returned will *not* have a newline
/// byte (the 0xA byte) at the end.
+ ///
+ /// # Examples
+ ///
+ /// A locked standard input implements `BufRead`:
+ ///
+ /// ```
+ /// use std::io;
+ /// use std::io::prelude::*;
+ ///
+ /// let stdin = io::stdin();
+ ///
+ /// for line in stdin.lock().lines() {
+ /// println!("{}", line.unwrap());
+ /// }
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn lines(self) -> Lines<Self> where Self: Sized {
Lines { buf: self }
/// A `Write` adaptor which will write data to multiple locations.
///
-/// For more information, see `Write::broadcast`.
+/// This struct is generally created by calling [`broadcast()`][broadcast] on a
+/// writer. Please see the documentation of `broadcast()` for more details.
+///
+/// [broadcast]: trait.Write.html#method.broadcast
#[unstable(feature = "io", reason = "awaiting stability of Write::broadcast")]
pub struct Broadcast<T, U> {
first: T,
}
}
-/// Adaptor to chain together two instances of `Read`.
+/// Adaptor to chain together two readers.
+///
+/// This struct is generally created by calling [`chain()`][chain] on a reader.
+/// Please see the documentation of `chain()` for more details.
///
-/// For more information, see `Read::chain`.
+/// [chain]: trait.Read.html#method.chain
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Chain<T, U> {
first: T,
/// Reader adaptor which limits the bytes read from an underlying reader.
///
-/// For more information, see `Read::take`.
+/// This struct is generally created by calling [`take()`][take] on a reader.
+/// Please see the documentation of `take()` for more details.
+///
+/// [take]: trait.Read.html#method.take
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Take<T> {
inner: T,
/// An adaptor which will emit all read data to a specified writer as well.
///
-/// For more information see `Read::tee`
+/// This struct is generally created by calling [`tee()`][tee] on a reader.
+/// Please see the documentation of `tee()` for more details.
+///
+/// [tee]: trait.Read.html#method.tee
#[unstable(feature = "io", reason = "awaiting stability of Read::tee")]
pub struct Tee<R, W> {
reader: R,
}
}
-/// A bridge from implementations of `Read` to an `Iterator` of `u8`.
+/// An iterator over `u8` values of a reader.
+///
+/// This struct is generally created by calling [`bytes()`][bytes] on a reader.
+/// Please see the documentation of `bytes()` for more details.
///
-/// See `Read::bytes` for more information.
+/// [bytes]: trait.Read.html#method.bytes
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Bytes<R> {
inner: R,
}
}
-/// A bridge from implementations of `Read` to an `Iterator` of `char`.
+/// An iterator over the `char`s of a reader.
///
-/// See `Read::chars` for more information.
+/// This struct is generally created by calling [`chars()`][chars] on a reader.
+/// Please see the documentation of `chars()` for more details.
+///
+/// [chars]: trait.Read.html#method.chars
#[unstable(feature = "io", reason = "awaiting stability of Read::chars")]
pub struct Chars<R> {
inner: R,
/// An iterator over the contents of an instance of `BufRead` split on a
/// particular byte.
///
-/// See `BufRead::split` for more information.
+/// This struct is generally created by calling [`split()`][split] on a
+/// `BufRead`. Please see the documentation of `split()` for more details.
+///
+/// [split]: trait.BufRead.html#method.split
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Split<B> {
buf: B,
}
}
-/// An iterator over the lines of an instance of `BufRead` split on a newline
-/// byte.
+/// An iterator over the lines of an instance of `BufRead`.
///
-/// See `BufRead::lines` for more information.
+/// This struct is generally created by calling [`lines()`][lines] on a
+/// `BufRead`. Please see the documentation of `lines()` for more details.
+///
+/// [lines]: trait.BufRead.html#method.lines
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Lines<B> {
buf: B,
use io::prelude::*;
use io;
use super::Cursor;
+ use test;
+ use super::repeat;
#[test]
fn read_until() {
let mut v = Vec::new();
assert_eq!(c.read_to_end(&mut v).unwrap(), 1);
assert_eq!(v, b"1");
+
+ let cap = 1024 * 1024;
+ let data = (0..cap).map(|i| (i / 3) as u8).collect::<Vec<_>>();
+ let mut v = Vec::new();
+ let (a, b) = data.split_at(data.len() / 2);
+ assert_eq!(Cursor::new(a).read_to_end(&mut v).unwrap(), a.len());
+ assert_eq!(Cursor::new(b).read_to_end(&mut v).unwrap(), b.len());
+ assert_eq!(v, data);
}
#[test]
let mut buf = [0; 1];
assert_eq!(0, R.take(0).read(&mut buf).unwrap());
}
+
+ #[bench]
+ fn bench_read_to_end(b: &mut test::Bencher) {
+ b.iter(|| {
+ let mut lr = repeat(1).take(10000000);
+ let mut vec = Vec::with_capacity(1024);
+ super::read_to_end(&mut lr, &mut vec);
+ });
+ }
}
//! # #![allow(unused_imports)]
//! use std::io::prelude::*;
//! ```
-//!
-//! This module contains reexports of many core I/O traits such as `Read`,
-//! `Write` and `BufRead`. Structures and functions are not
-//! contained in this module.
#![stable(feature = "rust1", since = "1.0.0")]
use io::{self, BufReader, LineWriter};
use sync::{Arc, Mutex, MutexGuard};
use sys::stdio;
+use sys_common::io::{read_to_end_uninitialized};
use sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
use libc;
inner: MutexGuard<'a, BufReader<Maybe<StdinRaw>>>,
}
-/// Creates a new handle to the global standard input stream of this process.
+/// Constructs a new handle to the standard input of the current process.
///
-/// The handle returned refers to a globally shared buffer between all threads.
-/// Access is synchronized and can be explicitly controlled with the `lock()`
-/// method.
+/// Each handle returned is a reference to a shared global buffer whose access
+/// is synchronized via a mutex. If you need more explicit control over
+/// locking, see the [lock() method][lock].
+///
+/// [lock]: struct.Stdin.html#method.lock
+///
+/// # Examples
+///
+/// Using implicit synchronization:
+///
+/// ```
+/// use std::io::{self, Read};
+///
+/// # fn foo() -> io::Result<String> {
+/// let mut buffer = String::new();
+/// try!(io::stdin().read_to_string(&mut buffer));
+/// # Ok(buffer)
+/// # }
+/// ```
+///
+/// Using explicit synchronization:
+///
+/// ```
+/// use std::io::{self, Read};
+///
+/// # fn foo() -> io::Result<String> {
+/// let mut buffer = String::new();
+/// let stdin = io::stdin();
+/// let mut handle = stdin.lock();
///
-/// The `Read` trait is implemented for the returned value but the `BufRead`
-/// trait is not due to the global nature of the standard input stream. The
-/// locked version, `StdinLock`, implements both `Read` and `BufRead`, however.
+/// try!(handle.read_to_string(&mut buffer));
+/// # Ok(buffer)
+/// # }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdin() -> Stdin {
static INSTANCE: Lazy<Mutex<BufReader<Maybe<StdinRaw>>>> = Lazy::new(stdin_init);
///
/// For detailed semantics of this method, see the documentation on
/// `BufRead::read_line`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ ///
+ /// let mut input = String::new();
+ /// match io::stdin().read_line(&mut input) {
+ /// Ok(n) => {
+ /// println!("{} bytes read", n);
+ /// println!("{}", input);
+ /// }
+ /// Err(error) => println!("error: {}", error),
+ /// }
+ /// ```
+ ///
+ /// You can run the example one of two ways:
+ ///
+ /// - Pipe some text to it, e.g. `printf foo | path/to/executable`
+ /// - Give it text interactively by running the executable directly,
+ // in which case it will wait for the Enter key to be pressed before
+ /// continuing
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
+ pub fn read_line(&self, buf: &mut String) -> io::Result<usize> {
self.lock().read_line(buf)
}
}
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ unsafe { read_to_end_uninitialized(self, buf) }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
inner: ReentrantMutexGuard<'a, RefCell<LineWriter<Maybe<StdoutRaw>>>>,
}
-/// Constructs a new reference to the standard output of the current process.
+/// Constructs a new handle to the standard output of the current process.
///
/// Each handle returned is a reference to a shared global buffer whose access
-/// is synchronized via a mutex. Explicit control over synchronization is
-/// provided via the `lock` method.
+/// is synchronized via a mutex. If you need more explicit control over
+/// locking, see the [lock() method][lock].
+///
+/// [lock]: struct.Stdout.html#method.lock
+///
+/// # Examples
///
-/// The returned handle implements the `Write` trait.
+/// Using implicit synchronization:
+///
+/// ```
+/// use std::io::{self, Write};
+///
+/// # fn foo() -> io::Result<()> {
+/// try!(io::stdout().write(b"hello world"));
+///
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Using explicit synchronization:
+///
+/// ```
+/// use std::io::{self, Write};
+///
+/// # fn foo() -> io::Result<()> {
+/// let stdout = io::stdout();
+/// let mut handle = stdout.lock();
+///
+/// try!(handle.write(b"hello world"));
+///
+/// # Ok(())
+/// # }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdout() -> Stdout {
static INSTANCE: Lazy<ReentrantMutex<RefCell<LineWriter<Maybe<StdoutRaw>>>>>
inner: ReentrantMutexGuard<'a, RefCell<Maybe<StderrRaw>>>,
}
-/// Constructs a new reference to the standard error stream of a process.
+/// Constructs a new handle to the standard error of the current process.
+///
+/// This handle is not buffered.
+///
+/// # Examples
+///
+/// Using implicit synchronization:
+///
+/// ```
+/// use std::io::{self, Write};
+///
+/// # fn foo() -> io::Result<()> {
+/// try!(io::stderr().write(b"hello world"));
+///
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Using explicit synchronization:
+///
+/// ```
+/// use std::io::{self, Write};
+///
+/// # fn foo() -> io::Result<()> {
+/// let stderr = io::stderr();
+/// let mut handle = stderr.lock();
///
-/// Each returned handle is synchronized amongst all other handles created from
-/// this function. No handles are buffered, however.
+/// try!(handle.write(b"hello world"));
///
-/// The returned handle implements the `Write` trait.
+/// # Ok(())
+/// # }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stderr() -> Stderr {
static INSTANCE: Lazy<ReentrantMutex<RefCell<Maybe<StderrRaw>>>> = Lazy::new(stderr_init);
/// This function will return an error immediately if any call to `read` or
/// `write` returns an error. All instances of `ErrorKind::Interrupted` are
/// handled by this function and the underlying operation is retried.
+///
+/// # Examples
+///
+/// ```
+/// use std::io;
+///
+/// # fn foo() -> io::Result<()> {
+/// let mut reader: &[u8] = b"hello";
+/// let mut writer: Vec<u8> = vec![];
+///
+/// try!(io::copy(&mut reader, &mut writer));
+///
+/// assert_eq!(reader, &writer[..]);
+/// # Ok(())
+/// # }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn copy<R: Read, W: Write>(reader: &mut R, writer: &mut W) -> io::Result<u64> {
let mut buf = [0; super::DEFAULT_BUF_SIZE];
}
/// A reader which is always at EOF.
+///
+/// This struct is generally created by calling [`empty()`][empty]. Please see
+/// the documentation of `empty()` for more details.
+///
+/// [empty]: fn.empty.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Empty { _priv: () }
-/// Creates an instance of an empty reader.
+/// Constructs a new handle to an empty reader.
///
/// All reads from the returned reader will return `Ok(0)`.
+///
+/// # Examples
+///
+/// A slightly sad example of not reading anything into a buffer:
+///
+/// ```
+/// use std::io;
+/// use std::io::Read;
+///
+/// # fn foo() -> io::Result<String> {
+/// let mut buffer = String::new();
+/// try!(io::empty().read_to_string(&mut buffer));
+/// # Ok(buffer)
+/// # }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn empty() -> Empty { Empty { _priv: () } }
fn consume(&mut self, _n: usize) {}
}
-/// A reader which infinitely yields one byte.
+/// A reader which yields one byte over and over and over and over and over and...
+///
+/// This struct is generally created by calling [`repeat()`][repeat]. Please
+/// see the documentation of `repeat()` for more details.
+///
+/// [repeat]: fn.repeat.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Repeat { byte: u8 }
}
/// A writer which will move data into the void.
+///
+/// This struct is generally created by calling [`sink()`][sink]. Please
+/// see the documentation of `sink()` for more details.
+///
+/// [sink]: fn.sink.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Sink { _priv: () }
//! # The Rust Standard Library
//!
-//! The Rust Standard Library provides the essential runtime
-//! functionality for building portable Rust software.
+//! The Rust Standard Library is the foundation of portable Rust
+//! software, a set of minimal and battle-tested shared abstractions
+//! for the [broader Rust ecosystem](https://crates.io). It offers
+//! core types, like [`Vec`](vec/index.html)
+//! and [`Option`](option/index.html), library-defined [operations on
+//! language primitives](#primitives), [standard macros](#macros),
+//! [I/O](io/index.html) and [multithreading](thread/index.html), among
+//! [many other
+//! things](#what-is-in-the-standard-library-documentation?).
//!
-//! The rust standard library is available to all rust crates by
-//! default, just as if contained an `extern crate std` import at the
-//! crate root. Therefore the standard library can be accessed in
-//! `use` statements through the path `std`, as in `use std::thread`,
-//! or in expressions through the absolute path `::std`, as in
-//! `::std::thread::sleep_ms(100)`.
+//! `std` is available to all Rust crates by default, just as if each
+//! one contained an `extern crate std` import at the [crate
+//! root][book-crate-root]. Therefore the standard library can be
+//! accessed in [`use`][book-use] statements through the path `std`,
+//! as in [`use std::env`](env/index.html), or in expressions
+//! through the absolute path `::std`, as in
+//! [`::std::env::args()`](env/fn.args.html).
//!
-//! Furthermore, the standard library defines [The Rust
-//! Prelude](prelude/index.html), a small collection of items, mostly
-//! traits, that are imported into and available in every module.
+//! [book-crate-root]: ../book/crates-and-modules.html#basic-terminology:-crates-and-modules
+//! [book-use]: ../book/crates-and-modules.html#importing-modules-with-use
//!
-//! ## What is in the standard library
+//! # How to read this documentation
//!
-//! The standard library is a set of minimal, battle-tested
-//! core types and shared abstractions for the [broader Rust
-//! ecosystem](https://crates.io) to build on.
+//! If you already know the name of what you are looking for the
+//! fastest way to find it is to use the <a href="#"
+//! onclick="focusSearchBar();">search bar</a> at the top of the page.
//!
-//! The [primitive types](#primitives), though not defined in the
-//! standard library, are documented here, as are the predefined
-//! [macros](#macros).
+//! Otherwise, you may want to jump to one of these useful sections:
+//!
+//! * [`std::*` modules](#modules)
+//! * [Primitive types](#primitives)
+//! * [Standard macros](#macros)
+//! * [The Rust Prelude](prelude/index.html)
+//!
+//! If this is your first time, the documentation for the standard
+//! library is written to be casually perused. Clicking on interesting
+//! things should generally lead you to interesting places. Still,
+//! there are important bits you don't want to miss, so read on for a
+//! tour of the standard library and its documentation!
+//!
+//! Once you are familiar with the contents of the standard library
+//! you may begin to find the verbosity of the prose distracting. At
+//! this stage in your development you may want to press the **[-]**
+//! button near the top of the page to collapse it into a more
+//! skimmable view.
+//!
+//! While you are looking at that **[-]** button also notice the
+//! **[src]** button. Rust's API documentation comes with the source
+//! code and you are encouraged to read it. The standard library
+//! source is generally high quality and a peek behind the curtains is
+//! often enlightening.
+//!
+//! # What is in the standard library documentation?
+//!
+//! First of all, The Rust Standard Library is divided into a number
+//! of focused modules, [all listed further down this page](#modules).
+//! These modules are the bedrock upon which all of Rust is forged,
+//! and they have mighty names like [`std::slice`](slice/index.html)
+//! and [`std::cmp`](cmp/index.html). Modules' documentation typically
+//! includes an overview of the module along with examples, and are
+//! a smart place to start familiarizing yourself with the library.
+//!
+//! Second, implicit methods on [primitive
+//! types](../book/primitive-types.html) are documented here. This can
+//! be a source of confusion for two reasons:
+//!
+//! 1. While primitives are implemented by the compiler, the standard
+//! library implements methods directly on the primitive types (and
+//! it is the only library that does so), which are [documented in
+//! the section on primitives](#primitives).
+//! 2. The standard library exports many modules *with the same name
+//! as primitive types*. These define additional items related
+//! to the primitive type, but not the all-important methods.
+//!
+//! So for example there is a [page for the primitive type
+//! `i32`](primitive.i32.html) that lists all the methods that can be
+//! called on 32-bit integers (very useful), and there is a [page for
+//! the module `std::i32`](i32/index.html) that documents the constant
+//! values `MIN` and `MAX` (rarely useful).
+//!
+//! Note the documentation for the primitives
+//! [`str`](primitive.str.html) and [`[T]`](primitive.slice.html)
+//! (also called 'slice'). Many method calls on
+//! [`String`](string/struct.String.html) and
+//! [`Vec`](vec/struct.Vec.html) are actually calls to methods on
+//! `str` and `[T]` respectively, via [deref
+//! coercions](../book/deref-coercions.html).
+//!
+//! Third, the standard library defines [The Rust
+//! Prelude](prelude/index.html), a small collection of items - mostly
+//! traits - that are imported into every module of every crate. The
+//! traits in the prelude are pervasive, making the prelude
+//! documentation a good entry point to learning about the library.
+//!
+//! And finally, the standard library exports a number of standard
+//! macros, and [lists them on this page](#macros) (technically, not
+//! all of the standard macros are defined by the standard library -
+//! some are defined by the compiler - but they are documented here
+//! the same). Like the prelude, the standard macros are imported by
+//! default into all crates.
+//!
+//! # A Tour of The Rust Standard Library
+//!
+//! The rest of this crate documentation is dedicated to pointing
+//! out notable features of The Rust Standard Library.
//!
//! ## Containers and collections
//!
//! [`Iterator`](iter/trait.Iterator.html), which works with the `for`
//! loop to access collections.
//!
-//! The common container type, `Vec`, a growable vector backed by an array,
-//! lives in the [`vec`](vec/index.html) module. Contiguous, unsized regions
-//! of memory, `[T]`, commonly called "slices", and their borrowed versions,
-//! `&[T]`, commonly called "borrowed slices", are built-in types for which the
-//! [`slice`](slice/index.html) module defines many methods.
+//! The standard library exposes 3 common ways to deal with contiguous
+//! regions of memory:
+//!
+//! * [`Vec<T>`](vec/index.html) - A heap-allocated *vector* that is
+//! resizable at runtime.
+//! * [`[T; n]`](primitive.array.html) - An inline *array* with a
+//! fixed size at compile time.
+//! * [`[T]`](primitive.slice.html) - A dynamically sized *slice* into
+//! any other kind of contiguous storage, whether heap-allocated or
+//! not.
+//!
+//! Slices can only be handled through some kind of *pointer*, and as
+//! such come in many flavours such as:
//!
-//! `&str`, a UTF-8 string, is a built-in type, and the standard library
-//! defines methods for it on a variety of traits in the
-//! [`str`](str/index.html) module. Rust strings are immutable;
-//! use the `String` type defined in [`string`](string/index.html)
-//! for a mutable string builder.
+//! * `&[T]` - *shared slice*
+//! * `&mut [T]` - *mutable slice*
+//! * [`Box<[T]>`](boxed/index.html) - *owned slice*
+//!
+//! `str`, a UTF-8 string slice, is a primitive type, and the standard
+//! library defines [many methods for it](primitive.str.html). Rust
+//! `str`s are typically accessed as immutable references: `&str`. Use
+//! the owned `String` type defined in [`string`](string/index.html)
+//! for building and mutating strings.
//!
//! For converting to strings use the [`format!`](fmt/index.html)
//! macro, and for converting from strings use the
//! [`atomic`](sync/atomic/index.html) and
//! [`mpsc`](sync/mpsc/index.html), which contains the channel types
//! for message passing.
+//!
// Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
#![cfg_attr(stage0, feature(custom_attribute))]
#![feature(borrow_state)]
#![feature(box_raw)]
#![feature(box_syntax)]
+#![feature(char_from_unchecked)]
#![feature(char_internals)]
#![feature(clone_from_slice)]
#![feature(collections)]
#![feature(core_intrinsics)]
#![feature(core_prelude)]
#![feature(core_simd)]
+#![feature(drain)]
#![feature(fnbox)]
#![feature(heap_api)]
#![feature(int_error_internals)]
#![feature(linkage, thread_local, asm)]
#![feature(macro_reexport)]
#![feature(slice_concat_ext)]
-#![feature(slice_position_elem)]
#![feature(no_std)]
#![feature(oom)]
#![feature(optin_builtin_traits)]
+#![feature(placement_in_syntax)]
#![feature(rand)]
#![feature(raw)]
#![feature(reflect_marker)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(vec_push_all)]
+#![feature(vec_resize)]
#![feature(wrapping)]
#![feature(zero_one)]
#![cfg_attr(windows, feature(str_utf16))]
-#![cfg_attr(test, feature(float_from_str_radix, range_inclusive, float_extras))]
+#![cfg_attr(test, feature(float_from_str_radix, range_inclusive, float_extras, hash_default))]
#![cfg_attr(test, feature(test, rustc_private, float_consts))]
+#![cfg_attr(target_env = "msvc", feature(link_args))]
// Don't link to std. We are std.
#![no_std]
#[macro_use] #[no_link] extern crate rustc_bitflags;
-// Make std testable by not duplicating lang items. See #2912
+// Make std testable by not duplicating lang items and other globals. See #2912
#[cfg(test)] extern crate std as realstd;
-#[cfg(test)] pub use realstd::marker;
-#[cfg(test)] pub use realstd::ops;
-#[cfg(test)] pub use realstd::cmp;
-#[cfg(test)] pub use realstd::boxed;
-
// NB: These reexports are in the order they should be listed in rustdoc
pub use core::any;
pub use core::cell;
pub use core::clone;
-#[cfg(not(test))] pub use core::cmp;
+pub use core::cmp;
pub use core::convert;
pub use core::default;
pub use core::hash;
pub use core::intrinsics;
pub use core::iter;
-#[cfg(not(test))] pub use core::marker;
+pub use core::marker;
pub use core::mem;
-#[cfg(not(test))] pub use core::ops;
+pub use core::ops;
pub use core::ptr;
pub use core::raw;
pub use core::simd;
pub use core::option;
pub mod error;
-#[cfg(not(test))] pub use alloc::boxed;
+pub use alloc::boxed;
pub use alloc::rc;
pub use core_collections::borrow;
pub use rand::{thread_rng, ThreadRng, Rng};
}
-// Modules that exist purely to document + host impl docs for primitive types
+// Include a number of private modules that exist solely to provide
+// the rustdoc documentation for primitive types. Using `include!`
+// because rustdoc only looks for these modules at the crate level.
+include!("primitive_docs.rs");
-mod array;
-mod bool;
-mod unit;
-mod tuple;
-
-// A curious inner-module that's not exported that contains the binding
-// 'std' so that macro-expanded references to std::error and such
-// can be resolved within libstd.
-#[doc(hidden)]
+// The expansion of --test has a few references to `::std::$foo` so this module
+// is necessary to get things to compile.
+#[cfg(test)]
mod std {
- pub use sync; // used for select!()
- pub use error; // used for try!()
- pub use fmt; // used for any formatting strings
- pub use option; // used for thread_local!{}
- pub use rt; // used for panic!()
- pub use vec; // used for vec![]
- pub use cell; // used for tls!
- pub use thread; // used for thread_local!
- pub use marker; // used for tls!
-
- // The test runner calls ::std::env::args() but really wants realstd
- #[cfg(test)] pub use realstd::env as env;
- // The test runner requires std::slice::Vector, so re-export std::slice just for it.
- //
- // It is also used in vec![]
- pub use slice;
-
- pub use boxed; // used for vec![]
+ pub use option;
+ pub use realstd::env;
}
//! library. Each macro is available for use when linking against the standard
//! library.
-/// The entry point for panic of Rust threads.
-///
-/// This macro is used to inject panic into a Rust thread, causing the thread to
-/// unwind and panic entirely. Each thread's panic can be reaped as the
-/// `Box<Any>` type, and the single-argument form of the `panic!` macro will be
-/// the value which is transmitted.
-///
-/// The multi-argument form of this macro panics with a string and has the
-/// `format!` syntax for building a string.
-///
-/// # Examples
-///
-/// ```should_panic
-/// # #![allow(unreachable_code)]
-/// panic!();
-/// panic!("this is a terrible mistake!");
-/// panic!(4); // panic with the value of 4 to be collected elsewhere
-/// panic!("this is a {} {message}", "fancy", message = "message");
-/// ```
-#[macro_export]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[allow_internal_unstable]
/// The entry point for panic of Rust threads.
///
/// This macro is used to inject panic into a Rust thread, causing the thread to
/// # Examples
///
/// ```
-/// # #![feature(mpsc_select)]
+/// #![feature(mpsc_select)]
+///
/// use std::thread;
/// use std::sync::mpsc;
///
/// - 203.0.113.0/24 (TEST-NET-3)
pub fn is_documentation(&self) -> bool {
match(self.octets()[0], self.octets()[1], self.octets()[2], self.octets()[3]) {
- (192, _, 2, _) => true,
+ (192, 0, 2, _) => true,
(198, 51, 100, _) => true,
- (203, _, 113, _) => true,
+ (203, 0, 113, _) => true,
_ => false
}
}
.iter()
.map(|&seg| format!("{:x}", seg))
.collect::<Vec<String>>()
- .connect(":")
+ .join(":")
}
write!(fmt, "{}::{}",
check(&[127, 1, 2, 3], false, true, false, false, false, false, false, false);
check(&[172, 31, 254, 253], false, false, true, false, false, false, false, false);
check(&[169, 254, 253, 242], false, false, false, true, false, false, false, false);
+ check(&[192, 0, 2, 183], false, false, false, false, false, false, false, true);
+ check(&[192, 1, 2, 183], false, false, false, false, true, false, false, false);
check(&[192, 168, 254, 253], false, false, true, false, false, false, false, false);
+ check(&[198, 51, 100, 0], false, false, false, false, false, false, false, true);
+ check(&[203, 0, 113, 0], false, false, false, false, false, false, false, true);
+ check(&[203, 2, 113, 0], false, false, false, false, true, false, false, false);
check(&[224, 0, 0, 0], false, false, false, false, true, true, false, false);
check(&[239, 255, 255, 255], false, false, false, false, true, true, false, false);
- check(&[255, 255, 255, 255], false, false, false, false, false, false, true, false);
- check(&[198, 51, 100, 0], false, false, false, false, false, false, false, true);
+ check(&[255, 255, 255, 255], false, false, false, false, false, false, true, false);
}
#[test]
/// # Examples
///
/// ```no_run
-/// # #![feature(lookup_host)]
+/// #![feature(lookup_host)]
+///
/// use std::net;
///
/// # fn foo() -> std::io::Result<()> {
use fmt;
use io;
use net::{ToSocketAddrs, SocketAddr, Shutdown};
+use sys_common::io::read_to_end_uninitialized;
use sys_common::net as net_imp;
-use sys_common::{AsInner, FromInner};
+use sys_common::{AsInner, FromInner, IntoInner};
use time::Duration;
/// A structure which represents a TCP stream between a local socket and a
}
/// Sets the nodelay flag on this connection to the boolean specified.
+ #[deprecated(since = "1.3.0",
+ reason = "available through the `net2` crate on crates.io")]
+ #[unstable(feature = "tcp_extras", reason = "available externally")]
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
self.0.set_nodelay(nodelay)
}
/// If the value specified is `None`, then the keepalive flag is cleared on
/// this connection. Otherwise, the keepalive timeout will be set to the
/// specified time, in seconds.
+ #[unstable(feature = "tcp_extras", reason = "available externally")]
+ #[deprecated(since = "1.3.0",
+ reason = "available through the `net2` crate on crates.io")]
pub fn set_keepalive(&self, seconds: Option<u32>) -> io::Result<()> {
self.0.set_keepalive(seconds)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ unsafe { read_to_end_uninitialized(self, buf) }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for TcpStream {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Read for &'a TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ unsafe { read_to_end_uninitialized(self, buf) }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Write for &'a TcpStream {
fn from_inner(inner: net_imp::TcpStream) -> TcpStream { TcpStream(inner) }
}
+impl IntoInner<net_imp::TcpStream> for TcpStream {
+ fn into_inner(self) -> net_imp::TcpStream { self.0 }
+}
+
impl fmt::Debug for TcpStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
+impl IntoInner<net_imp::TcpListener> for TcpListener {
+ fn into_inner(self) -> net_imp::TcpListener { self.0 }
+}
+
impl fmt::Debug for TcpListener {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
// FIXME: re-enabled bitrig/openbsd tests once their socket timeout code
// no longer has rounding errors.
- #[cfg_attr(any(target_os = "bitrig", target_os = "openbsd"), ignore)]
+ #[cfg_attr(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"), ignore)]
#[test]
fn timeouts() {
let addr = next_test_ip4();
use io::{self, Error, ErrorKind};
use net::{ToSocketAddrs, SocketAddr, IpAddr};
use sys_common::net as net_imp;
-use sys_common::{AsInner, FromInner};
+use sys_common::{AsInner, FromInner, IntoInner};
use time::Duration;
/// A User Datagram Protocol socket.
}
/// Sets the broadcast flag on or off.
+ #[deprecated(since = "1.3.0",
+ reason = "available through the `net2` crate on crates.io")]
+ #[unstable(feature = "udp_extras", reason = "available externally")]
pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
self.0.set_broadcast(on)
}
/// Sets the multicast loop flag to the specified value.
///
/// This lets multicast packets loop back to local sockets (if enabled)
+ #[deprecated(since = "1.3.0",
+ reason = "available through the `net2` crate on crates.io")]
+ #[unstable(feature = "udp_extras", reason = "available externally")]
pub fn set_multicast_loop(&self, on: bool) -> io::Result<()> {
self.0.set_multicast_loop(on)
}
/// Joins a multicast IP address (becomes a member of it).
+ #[deprecated(since = "1.3.0",
+ reason = "available through the `net2` crate on crates.io")]
+ #[unstable(feature = "udp_extras", reason = "available externally")]
pub fn join_multicast(&self, multi: &IpAddr) -> io::Result<()> {
self.0.join_multicast(multi)
}
/// Leaves a multicast IP address (drops membership from it).
+ #[deprecated(since = "1.3.0",
+ reason = "available through the `net2` crate on crates.io")]
+ #[unstable(feature = "udp_extras", reason = "available externally")]
pub fn leave_multicast(&self, multi: &IpAddr) -> io::Result<()> {
self.0.leave_multicast(multi)
}
/// Sets the multicast TTL.
+ #[deprecated(since = "1.3.0",
+ reason = "available through the `net2` crate on crates.io")]
+ #[unstable(feature = "udp_extras", reason = "available externally")]
pub fn set_multicast_time_to_live(&self, ttl: i32) -> io::Result<()> {
self.0.multicast_time_to_live(ttl)
}
/// Sets this socket's TTL.
+ #[deprecated(since = "1.3.0",
+ reason = "available through the `net2` crate on crates.io")]
+ #[unstable(feature = "udp_extras", reason = "available externally")]
pub fn set_time_to_live(&self, ttl: i32) -> io::Result<()> {
self.0.time_to_live(ttl)
}
fn from_inner(inner: net_imp::UdpSocket) -> UdpSocket { UdpSocket(inner) }
}
+impl IntoInner<net_imp::UdpSocket> for UdpSocket {
+ fn into_inner(self) -> net_imp::UdpSocket { self.0 }
+}
+
impl fmt::Debug for UdpSocket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
assert_eq!(format!("{:?}", udpsock), compare);
}
- // FIXME: re-enabled bitrig/openbsd tests once their socket timeout code
+ // FIXME: re-enabled bitrig/openbsd/netbsd tests once their socket timeout code
// no longer has rounding errors.
- #[cfg_attr(any(target_os = "bitrig", target_os = "openbsd"), ignore)]
+ #[cfg_attr(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"), ignore)]
#[test]
fn timeouts() {
let addr = next_test_ip4();
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for 32-bits floats (`f32` type)
+//! The 32-bit floating point type.
+//!
+//! *[See also the `f32` primitive type](../primitive.f32.html).*
#![stable(feature = "rust1", since = "1.0.0")]
#![allow(missing_docs)]
-#![allow(unsigned_negation)]
-#![doc(primitive = "f32")]
use prelude::v1::*;
use core::num;
+#[cfg(not(target_env = "msvc"))]
use intrinsics;
use libc::c_int;
use num::{FpCategory, ParseFloatError};
use libc::{c_float, c_int};
extern {
- pub fn acosf(n: c_float) -> c_float;
- pub fn asinf(n: c_float) -> c_float;
- pub fn atanf(n: c_float) -> c_float;
- pub fn atan2f(a: c_float, b: c_float) -> c_float;
pub fn cbrtf(n: c_float) -> c_float;
- pub fn coshf(n: c_float) -> c_float;
pub fn erff(n: c_float) -> c_float;
pub fn erfcf(n: c_float) -> c_float;
pub fn expm1f(n: c_float) -> c_float;
pub fn log1pf(n: c_float) -> c_float;
pub fn ilogbf(n: c_float) -> c_int;
pub fn modff(n: c_float, iptr: &mut c_float) -> c_float;
- pub fn sinhf(n: c_float) -> c_float;
- pub fn tanf(n: c_float) -> c_float;
- pub fn tanhf(n: c_float) -> c_float;
pub fn tgammaf(n: c_float) -> c_float;
#[cfg_attr(all(windows, target_env = "msvc"), link_name = "__lgammaf_r")]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;
#[cfg_attr(all(windows, target_env = "msvc"), link_name = "_hypotf")]
pub fn hypotf(x: c_float, y: c_float) -> c_float;
+ }
- #[cfg(any(unix, all(windows, not(target_env = "msvc"))))]
+ // See the comments in `core::float::Float::floor` for why MSVC is special
+ // here.
+ #[cfg(not(target_env = "msvc"))]
+ extern {
+ pub fn acosf(n: c_float) -> c_float;
+ pub fn asinf(n: c_float) -> c_float;
+ pub fn atan2f(a: c_float, b: c_float) -> c_float;
+ pub fn atanf(n: c_float) -> c_float;
+ pub fn coshf(n: c_float) -> c_float;
pub fn frexpf(n: c_float, value: &mut c_int) -> c_float;
- #[cfg(any(unix, all(windows, not(target_env = "msvc"))))]
pub fn ldexpf(x: c_float, n: c_int) -> c_float;
+ pub fn sinhf(n: c_float) -> c_float;
+ pub fn tanf(n: c_float) -> c_float;
+ pub fn tanhf(n: c_float) -> c_float;
}
- #[cfg(all(windows, target_env = "msvc"))]
- pub unsafe fn ldexpf(x: c_float, n: c_int) -> c_float {
- f64::ldexp(x as f64, n as isize) as c_float
- }
+ #[cfg(target_env = "msvc")]
+ pub use self::shims::*;
+ #[cfg(target_env = "msvc")]
+ mod shims {
+ use libc::{c_float, c_int};
+
+ pub unsafe fn acosf(n: c_float) -> c_float {
+ f64::acos(n as f64) as c_float
+ }
+
+ pub unsafe fn asinf(n: c_float) -> c_float {
+ f64::asin(n as f64) as c_float
+ }
- #[cfg(all(windows, target_env = "msvc"))]
- pub unsafe fn frexpf(x: c_float, value: &mut c_int) -> c_float {
- let (a, b) = f64::frexp(x as f64);
- *value = b as c_int;
- a as c_float
+ pub unsafe fn atan2f(n: c_float, b: c_float) -> c_float {
+ f64::atan2(n as f64, b as f64) as c_float
+ }
+
+ pub unsafe fn atanf(n: c_float) -> c_float {
+ f64::atan(n as f64) as c_float
+ }
+
+ pub unsafe fn coshf(n: c_float) -> c_float {
+ f64::cosh(n as f64) as c_float
+ }
+
+ pub unsafe fn frexpf(x: c_float, value: &mut c_int) -> c_float {
+ let (a, b) = f64::frexp(x as f64);
+ *value = b as c_int;
+ a as c_float
+ }
+
+ pub unsafe fn ldexpf(x: c_float, n: c_int) -> c_float {
+ f64::ldexp(x as f64, n as isize) as c_float
+ }
+
+ pub unsafe fn sinhf(n: c_float) -> c_float {
+ f64::sinh(n as f64) as c_float
+ }
+
+ pub unsafe fn tanf(n: c_float) -> c_float {
+ f64::tan(n as f64) as c_float
+ }
+
+ pub unsafe fn tanhf(n: c_float) -> c_float {
+ f64::tanh(n as f64) as c_float
+ }
}
}
/// The floating point encoding is documented in the [Reference][floating-point].
///
/// ```
- /// # #![feature(float_extras)]
+ /// #![feature(float_extras)]
+ ///
/// use std::f32;
///
/// let num = 2.0f32;
/// Converts radians to degrees.
///
/// ```
- /// # #![feature(float_extras)]
+ /// #![feature(float_extras)]
+ ///
/// use std::f32::{self, consts};
///
/// let angle = consts::PI;
/// Converts degrees to radians.
///
/// ```
- /// # #![feature(float_extras)]
+ /// #![feature(float_extras)]
+ ///
/// use std::f32::{self, consts};
///
/// let angle = 180.0f32;
/// Constructs a floating point number of `x*2^exp`.
///
/// ```
- /// # #![feature(float_extras)]
+ /// #![feature(float_extras)]
+ ///
/// use std::f32;
/// // 3*2^2 - 12 == 0
/// let abs_difference = (f32::ldexp(3.0, 2) - 12.0).abs();
/// * `0.5 <= abs(x) < 1.0`
///
/// ```
- /// # #![feature(float_extras)]
+ /// #![feature(float_extras)]
+ ///
/// use std::f32;
///
/// let x = 4.0f32;
/// `other`.
///
/// ```
- /// # #![feature(float_extras)]
+ /// #![feature(float_extras)]
+ ///
/// use std::f32;
///
/// let x = 1.0f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn sin(self) -> f32 {
- unsafe { intrinsics::sinf32(self) }
+ return sinf(self);
+
+ // see notes in `core::f32::Float::floor`
+ #[cfg(target_env = "msvc")]
+ fn sinf(f: f32) -> f32 { (f as f64).sin() as f32 }
+ #[cfg(not(target_env = "msvc"))]
+ fn sinf(f: f32) -> f32 { unsafe { intrinsics::sinf32(f) } }
}
/// Computes the cosine of a number (in radians).
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn cos(self) -> f32 {
- unsafe { intrinsics::cosf32(self) }
+ return cosf(self);
+
+ // see notes in `core::f32::Float::floor`
+ #[cfg(target_env = "msvc")]
+ fn cosf(f: f32) -> f32 { (f as f64).cos() as f32 }
+ #[cfg(not(target_env = "msvc"))]
+ fn cosf(f: f32) -> f32 { unsafe { intrinsics::cosf32(f) } }
}
/// Computes the tangent of a number (in radians).
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for 64-bits floats (`f64` type)
+//! The 64-bit floating point type.
+//!
+//! *[See also the `f64` primitive type](../primitive.f64.html).*
#![stable(feature = "rust1", since = "1.0.0")]
#![allow(missing_docs)]
-#![doc(primitive = "f64")]
use prelude::v1::*;
/// The floating point encoding is documented in the [Reference][floating-point].
///
/// ```
- /// # #![feature(float_extras)]
+ /// #![feature(float_extras)]
+ ///
/// let num = 2.0f64;
///
/// // (8388608, -22, 1)
/// Constructs a floating point number of `x*2^exp`.
///
/// ```
- /// # #![feature(float_extras)]
+ /// #![feature(float_extras)]
+ ///
/// // 3*2^2 - 12 == 0
/// let abs_difference = (f64::ldexp(3.0, 2) - 12.0).abs();
///
/// * `0.5 <= abs(x) < 1.0`
///
/// ```
- /// # #![feature(float_extras)]
+ /// #![feature(float_extras)]
+ ///
/// let x = 4.0_f64;
///
/// // (1/2)*2^3 -> 1 * 8/2 -> 4.0
/// `other`.
///
/// ```
- /// # #![feature(float_extras)]
+ /// #![feature(float_extras)]
///
/// let x = 1.0f32;
///
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for signed 16-bits integers (`i16` type)
+//! The 16-bit signed integer type.
+//!
+//! *[See also the `i16` primitive type](../primitive.i16.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "i16")]
pub use core::i16::{BITS, BYTES, MIN, MAX};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for signed 32-bits integers (`i32` type)
+//! The 32-bit signed integer type.
+//!
+//! *[See also the `i32` primitive type](../primitive.i32.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "i32")]
pub use core::i32::{BITS, BYTES, MIN, MAX};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for signed 64-bits integers (`i64` type)
+//! The 64-bit signed integer type.
+//!
+//! *[See also the `i64` primitive type](../primitive.i64.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "i64")]
pub use core::i64::{BITS, BYTES, MIN, MAX};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for signed 8-bits integers (`i8` type)
+//! The 8-bit signed integer type.
+//!
+//! *[See also the `i8` primitive type](../primitive.i8.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "i8")]
pub use core::i8::{BITS, BYTES, MIN, MAX};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for pointer-sized signed integers (`isize` type)
+//! The pointer-sized signed integer type.
+//!
+//! *[See also the `isize` primitive type](../primitive.isize.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "isize")]
pub use core::isize::{BITS, BYTES, MIN, MAX};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for unsigned 16-bits integers (`u16` type)
+//! The 16-bit unsigned integer type.
+//!
+//! *[See also the `u16` primitive type](../primitive.u16.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "u16")]
pub use core::u16::{BITS, BYTES, MIN, MAX};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for unsigned 32-bits integers (`u32` type)
+//! The 32-bit unsigned integer type.
+//!
+//! *[See also the `u32` primitive type](../primitive.u32.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "u32")]
pub use core::u32::{BITS, BYTES, MIN, MAX};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for unsigned 64-bits integer (`u64` type)
+//! The 64-bit unsigned integer type.
+//!
+//! *[See also the `u64` primitive type](../primitive.u64.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "u64")]
pub use core::u64::{BITS, BYTES, MIN, MAX};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for unsigned 8-bits integers (`u8` type)
+//! The 8-bit unsigned integer type.
+//!
+//! *[See also the `u8` primitive type](../primitive.u8.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "u8")]
pub use core::u8::{BITS, BYTES, MIN, MAX};
// except according to those terms.
#![doc(hidden)]
-#![allow(unsigned_negation)]
macro_rules! uint_module { ($T:ident) => (
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for pointer-sized unsigned integers (`usize` type)
+//! The pointer-sized unsigned integer type.
+//!
+//! *[See also the `usize` primitive type](../primitive.usize.html).*
#![stable(feature = "rust1", since = "1.0.0")]
-#![doc(primitive = "usize")]
pub use core::usize::{BITS, BYTES, MIN, MAX};
#![stable(feature = "raw_ext", since = "1.1.0")]
-use os::raw::c_long;
-use os::unix::raw::{uid_t, gid_t};
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = i64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type dev_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type ino_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type mode_t = u16;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type nlink_t = u16;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = i64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type fflags_t = u32;
-#[stable(feature = "raw_ext", since = "1.1.0")]
-pub type blkcnt_t = i64;
-#[stable(feature = "raw_ext", since = "1.1.0")]
-pub type blksize_t = i64;
-#[stable(feature = "raw_ext", since = "1.1.0")]
-pub type dev_t = u32;
-#[stable(feature = "raw_ext", since = "1.1.0")]
-pub type fflags_t = u32;
-#[stable(feature = "raw_ext", since = "1.1.0")]
-pub type ino_t = u32;
-#[stable(feature = "raw_ext", since = "1.1.0")]
-pub type mode_t = u16;
-#[stable(feature = "raw_ext", since = "1.1.0")]
-pub type nlink_t = u16;
-#[stable(feature = "raw_ext", since = "1.1.0")]
-pub type off_t = i64;
-#[stable(feature = "raw_ext", since = "1.1.0")]
-pub type time_t = i64;
+#[doc(inline)]
+pub use self::arch::{stat, time_t};
-#[repr(C)]
-#[stable(feature = "raw_ext", since = "1.1.0")]
-pub struct stat {
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_dev: dev_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ino: ino_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mode: mode_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_nlink: nlink_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_uid: uid_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_gid: gid_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_rdev: dev_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime: time_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime_nsec: c_long,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime: time_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime_nsec: c_long,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime: time_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime_nsec: c_long,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_size: off_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_blocks: blkcnt_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_blksize: blksize_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_flags: fflags_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_gen: u32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_lspare: i32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_birthtime: time_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_birthtime_nsec: c_long,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub __unused: [u8; 2],
+#[cfg(target_arch = "x86")]
+mod arch {
+ use super::{off_t, dev_t, ino_t, mode_t, nlink_t, blksize_t, blkcnt_t, fflags_t};
+ use os::raw::c_long;
+ use os::unix::raw::{uid_t, gid_t};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i32;
+
+ #[repr(C)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: fflags_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_lspare: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [u8; 8],
+ }
}
+
+#[cfg(target_arch = "x86_64")]
+mod arch {
+ use super::{off_t, dev_t, ino_t, mode_t, nlink_t, blksize_t, blkcnt_t, fflags_t};
+ use os::raw::c_long;
+ use os::unix::raw::{uid_t, gid_t};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64;
+
+ #[repr(C)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: fflags_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_lspare: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+ }
+}
+
+
#[cfg(target_os = "linux")] pub mod linux;
#[cfg(target_os = "macos")] pub mod macos;
#[cfg(target_os = "nacl")] pub mod nacl;
+#[cfg(target_os = "netbsd")] pub mod netbsd;
#[cfg(target_os = "openbsd")] pub mod openbsd;
pub mod raw;
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! OpenBSD-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod raw;
+
+pub mod fs {
+ #![stable(feature = "raw_ext", since = "1.1.0")]
+ pub use sys::fs::MetadataExt;
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! NetBSD/OpenBSD-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+use os::raw::c_long;
+use os::unix::raw::{uid_t, gid_t};
+
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = i64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type dev_t = i32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type fflags_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type nlink_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = i64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64;
+
+#[repr(C)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: fflags_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+}
return Some(VerbatimUNC(server, share));
} else {
// \\?\path
- let idx = path.position_elem(&b'\\');
+ let idx = path.iter().position(|&b| b == b'\\');
if idx == Some(2) && path[1] == b':' {
let c = path[0];
if c.is_ascii() && (c as char).is_alphabetic() {
} else if path.starts_with(b".\\") {
// \\.\path
path = &path[2..];
- let slice = &path[.. path.position_elem(&b'\\').unwrap_or(path.len())];
+ let pos = path.iter().position(|&b| b == b'\\');
+ let slice = &path[..pos.unwrap_or(path.len())];
return Some(DeviceNS(u8_slice_as_os_str(slice)));
}
match parse_two_comps(path, is_sep_byte) {
//! The Rust Prelude
//!
//! Because `std` is required by most serious Rust software, it is
-//! imported at the topmost level of every crate by default, as if the
-//! first line of each crate was
+//! imported at the topmost level of every crate by default, as if
+//! each crate contains the following:
//!
//! ```ignore
//! extern crate std;
//! with the `std::` path prefix, as in `use std::vec`, `use std::thread::spawn`,
//! etc.
//!
-//! Additionally, `std` contains a `prelude` module that reexports many of the
-//! most common traits, types and functions. The contents of the prelude are
-//! imported into every *module* by default. Implicitly, all modules behave as if
-//! they contained the following prologue:
+//! Additionally, `std` contains a versioned *prelude* that reexports many of the
+//! most common traits, types, and functions. *The contents of the prelude are
+//! imported into every module by default*. Implicitly, all modules behave as if
+//! they contained the following [`use` statement][book-use]:
+//!
+//! [book-use]: ../../book/crates-and-modules.html#importing-modules-with-use
//!
//! ```ignore
//! use std::prelude::v1::*;
//! ```
//!
-//! The prelude is primarily concerned with exporting *traits* that are so
-//! pervasive that it would be obnoxious to import for every use, particularly
-//! those that define methods on primitive types.
+//! The prelude is primarily concerned with exporting *traits* that
+//! are so pervasive that they would be onerous to import for every use,
+//! particularly those that are commonly mentioned in [generic type
+//! bounds][book-traits].
+//!
+//! The current version of the prelude (version 1) lives in
+//! [`std::prelude::v1`](v1/index.html), and reexports the following.
+//!
+//! * `std::marker::`{
+//! [`Copy`](../marker/trait.Copy.html),
+//! [`Send`](../marker/trait.Send.html),
+//! [`Sized`](../marker/trait.Sized.html),
+//! [`Sync`](../marker/trait.Sync.html)
+//! }.
+//! The marker traits indicate fundamental properties of types.
+//! * `std::ops::`{
+//! [`Drop`](../ops/trait.Drop.html),
+//! [`Fn`](../ops/trait.Fn.html),
+//! [`FnMut`](../ops/trait.FnMut.html),
+//! [`FnOnce`](../ops/trait.FnOnce.html)
+//! }.
+//! The [destructor][book-dtor] trait and the
+//! [closure][book-closures] traits, reexported from the same
+//! [module that also defines overloaded
+//! operators](../ops/index.html).
+//! * `std::mem::`[`drop`](../mem/fn.drop.html).
+//! A convenience function for explicitly dropping a value.
+//! * `std::boxed::`[`Box`](../boxed/struct.Box.html).
+//! The owned heap pointer.
+//! * `std::borrow::`[`ToOwned`](../borrow/trait.ToOwned.html).
+//! The conversion trait that defines `to_owned`, the generic method
+//! for creating an owned type from a borrowed type.
+//! * `std::clone::`[`Clone`](../clone/trait.Clone.html).
+//! The ubiquitous trait that defines `clone`, the method for
+//! producing copies of values that are consider expensive to copy.
+//! * `std::cmp::`{
+//! [`PartialEq`](../cmp/trait.PartialEq.html),
+//! [`PartialOrd`](../cmp/trait.PartialOrd.html),
+//! [`Eq`](../cmp/trait.Eq.html),
+//! [`Ord`](../cmp/trait.Ord.html)
+//! }.
+//! The comparision traits, which implement the comparison operators
+//! and are often seen in trait bounds.
+//! * `std::convert::`{
+//! [`AsRef`](../convert/trait.AsRef.html),
+//! [`AsMut`](../convert/trait.AsMut.html),
+//! [`Into`](../convert/trait.Into.html),
+//! [`From`](../convert/trait.From.html)
+//! }.
+//! Generic conversions, used by savvy API authors to create
+//! overloaded methods.
+//! * `std::default::`[`Default`](../default/trait.Default).
+//! Types that have default values.
+//! * `std::iter::`{
+//! [`Iterator`](../iter/trait.Iterator.html),
+//! [`Extend`](../iter/trait.Extend.html),
+//! [`IntoIterator`](../iter/trait.IntoIterator.html),
+//! [`DoubleEndedIterator`](../iter/trait.DoubleEndedIterator.html),
+//! [`ExactSizeIterator`](../iter/trait.ExactSizeIterator.html)
+//! }.
+//! [Iterators][book-iter].
+//! * `std::option::Option::`{
+//! [`self`](../option/enum.Option.html),
+//! [`Some`](../option/enum.Option.html),
+//! [`None`](../option/enum.Option.html)
+//! }.
+//! The ubiquitous `Option` type and its two [variants][book-enums],
+//! `Some` and `None`.
+//! * `std::result::Result::`{
+//! [`self`](../result/enum.Result.html),
+//! [`Some`](../result/enum.Result.html),
+//! [`None`](../result/enum.Result.html)
+//! }.
+//! The ubiquitous `Result` type and its two [variants][book-enums],
+//! `Ok` and `Err`.
+//! * `std::slice::`[`SliceConcatExt`](../slice/trait.SliceConcatExt.html).
+//! An unstable extension to slices that shouldn't have to exist.
+//! * `std::string::`{
+//! [`String`](../string/struct.String.html),
+//! [`ToString`](../string/trait.ToString.html)
+//! }.
+//! Heap allocated strings.
+//! * `std::vec::`[`Vec`](../vec/struct.Vec.html).
+//! Heap allocated vectors.
+//!
+//! [book-traits]: ../../book/traits.html
+//! [book-closures]: ../../book/closures.html
+//! [book-dtor]: ../../book/drop.html
+//! [book-iter]: ../../book/iterators.html
+//! [book-enums]: ../../book/enums.html
#![stable(feature = "rust1", since = "1.0.0")]
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[doc(primitive = "bool")]
+//
+/// The boolean type.
+///
+mod prim_bool { }
+
+#[doc(primitive = "char")]
+//
+/// A Unicode scalar value.
+///
+/// A `char` represents a
+/// *[Unicode scalar
+/// value](http://www.unicode.org/glossary/#unicode_scalar_value)*, as it can
+/// contain any Unicode code point except high-surrogate and low-surrogate code
+/// points.
+///
+/// As such, only values in the ranges \[0x0,0xD7FF\] and \[0xE000,0x10FFFF\]
+/// (inclusive) are allowed. A `char` can always be safely cast to a `u32`;
+/// however the converse is not always true due to the above range limits
+/// and, as such, should be performed via the `from_u32` function.
+///
+/// *[See also the `std::char` module](char/index.html).*
+///
+mod prim_char { }
+
+#[doc(primitive = "unit")]
+//
+/// The `()` type, sometimes called "unit" or "nil".
+///
+/// The `()` type has exactly one value `()`, and is used when there
+/// is no other meaningful value that could be returned. `()` is most
+/// commonly seen implicitly: functions without a `-> ...` implicitly
+/// have return type `()`, that is, these are equivalent:
+///
+/// ```rust
+/// fn long() -> () {}
+///
+/// fn short() {}
+/// ```
+///
+/// The semicolon `;` can be used to discard the result of an
+/// expression at the end of a block, making the expression (and thus
+/// the block) evaluate to `()`. For example,
+///
+/// ```rust
+/// fn returns_i64() -> i64 {
+/// 1i64
+/// }
+/// fn returns_unit() {
+/// 1i64;
+/// }
+///
+/// let is_i64 = {
+/// returns_i64()
+/// };
+/// let is_unit = {
+/// returns_i64();
+/// };
+/// ```
+///
+mod prim_unit { }
+
+#[doc(primitive = "pointer")]
+//
+/// Raw, unsafe pointers, `*const T`, and `*mut T`.
+///
+/// Working with raw pointers in Rust is uncommon,
+/// typically limited to a few patterns.
+///
+/// Use the `null` function to create null pointers, and the `is_null` method
+/// of the `*const T` type to check for null. The `*const T` type also defines
+/// the `offset` method, for pointer math.
+///
+/// # Common ways to create raw pointers
+///
+/// ## 1. Coerce a reference (`&T`) or mutable reference (`&mut T`).
+///
+/// ```
+/// let my_num: i32 = 10;
+/// let my_num_ptr: *const i32 = &my_num;
+/// let mut my_speed: i32 = 88;
+/// let my_speed_ptr: *mut i32 = &mut my_speed;
+/// ```
+///
+/// To get a pointer to a boxed value, dereference the box:
+///
+/// ```
+/// let my_num: Box<i32> = Box::new(10);
+/// let my_num_ptr: *const i32 = &*my_num;
+/// let mut my_speed: Box<i32> = Box::new(88);
+/// let my_speed_ptr: *mut i32 = &mut *my_speed;
+/// ```
+///
+/// This does not take ownership of the original allocation
+/// and requires no resource management later,
+/// but you must not use the pointer after its lifetime.
+///
+/// ## 2. Consume a box (`Box<T>`).
+///
+/// The `into_raw` function consumes a box and returns
+/// the raw pointer. It doesn't destroy `T` or deallocate any memory.
+///
+/// ```
+/// #![feature(box_raw)]
+///
+/// let my_speed: Box<i32> = Box::new(88);
+/// let my_speed: *mut i32 = Box::into_raw(my_speed);
+///
+/// // By taking ownership of the original `Box<T>` though
+/// // we are obligated to put it together later to be destroyed.
+/// unsafe {
+/// drop(Box::from_raw(my_speed));
+/// }
+/// ```
+///
+/// Note that here the call to `drop` is for clarity - it indicates
+/// that we are done with the given value and it should be destroyed.
+///
+/// ## 3. Get it from C.
+///
+/// ```
+/// # #![feature(libc)]
+/// extern crate libc;
+///
+/// use std::mem;
+///
+/// fn main() {
+/// unsafe {
+/// let my_num: *mut i32 = libc::malloc(mem::size_of::<i32>() as libc::size_t) as *mut i32;
+/// if my_num.is_null() {
+/// panic!("failed to allocate memory");
+/// }
+/// libc::free(my_num as *mut libc::c_void);
+/// }
+/// }
+/// ```
+///
+/// Usually you wouldn't literally use `malloc` and `free` from Rust,
+/// but C APIs hand out a lot of pointers generally, so are a common source
+/// of raw pointers in Rust.
+///
+/// *[See also the `std::ptr` module](ptr/index.html).*
+///
+mod prim_pointer { }
+
+#[doc(primitive = "array")]
+//
+/// A fixed-size array, denoted `[T; N]`, for the element type, `T`, and
+/// the non-negative compile time constant size, `N`.
+///
+/// Arrays values are created either with an explicit expression that lists
+/// each element: `[x, y, z]` or a repeat expression: `[x; N]`. The repeat
+/// expression requires that the element type is `Copy`.
+///
+/// The type `[T; N]` is `Copy` if `T: Copy`.
+///
+/// Arrays of sizes from 0 to 32 (inclusive) implement the following traits
+/// if the element type allows it:
+///
+/// - `Clone`
+/// - `Debug`
+/// - `IntoIterator` (implemented for `&[T; N]` and `&mut [T; N]`)
+/// - `PartialEq`, `PartialOrd`, `Ord`, `Eq`
+/// - `Hash`
+/// - `AsRef`, `AsMut`
+///
+/// Arrays dereference to [slices (`[T]`)][slice], so their methods can be called
+/// on arrays.
+///
+/// [slice]: primitive.slice.html
+///
+/// Rust does not currently support generics over the size of an array type.
+///
+/// # Examples
+///
+/// ```
+/// let mut array: [i32; 3] = [0; 3];
+///
+/// array[1] = 1;
+/// array[2] = 2;
+///
+/// assert_eq!([1, 2], &array[1..]);
+///
+/// // This loop prints: 0 1 2
+/// for x in &array {
+/// print!("{} ", x);
+/// }
+///
+/// ```
+///
+mod prim_array { }
+
+#[doc(primitive = "slice")]
+//
+/// A dynamically-sized view into a contiguous sequence, `[T]`.
+///
+/// Slices are a view into a block of memory represented as a pointer and a
+/// length.
+///
+/// ```
+/// // slicing a Vec
+/// let vec = vec![1, 2, 3];
+/// let int_slice = &vec[..];
+/// // coercing an array to a slice
+/// let str_slice: &[&str] = &["one", "two", "three"];
+/// ```
+///
+/// Slices are either mutable or shared. The shared slice type is `&[T]`,
+/// while the mutable slice type is `&mut [T]`, where `T` represents the element
+/// type. For example, you can mutate the block of memory that a mutable slice
+/// points to:
+///
+/// ```
+/// let x = &mut [1, 2, 3];
+/// x[1] = 7;
+/// assert_eq!(x, &[1, 7, 3]);
+/// ```
+///
+/// *[See also the `std::slice` module](slice/index.html).*
+///
+mod prim_slice { }
+
+#[doc(primitive = "str")]
+//
+/// Unicode string slices.
+///
+/// Rust's `str` type is one of the core primitive types of the language. `&str`
+/// is the borrowed string type. This type of string can only be created from
+/// other strings, unless it is a `&'static str` (see below). It is not possible
+/// to move out of borrowed strings because they are owned elsewhere.
+///
+/// # Examples
+///
+/// Here's some code that uses a `&str`:
+///
+/// ```
+/// let s = "Hello, world.";
+/// ```
+///
+/// This `&str` is a `&'static str`, which is the type of string literals.
+/// They're `'static` because literals are available for the entire lifetime of
+/// the program.
+///
+/// You can get a non-`'static` `&str` by taking a slice of a `String`:
+///
+/// ```
+/// let some_string = "Hello, world.".to_string();
+/// let s = &some_string;
+/// ```
+///
+/// # Representation
+///
+/// Rust's string type, `str`, is a sequence of Unicode scalar values encoded as
+/// a stream of UTF-8 bytes. All [strings](../../reference.html#literals) are
+/// guaranteed to be validly encoded UTF-8 sequences. Additionally, strings are
+/// not null-terminated and can thus contain null bytes.
+///
+/// The actual representation of `str`s have direct mappings to slices: `&str`
+/// is the same as `&[u8]`.
+///
+/// *[See also the `std::str` module](str/index.html).*
+///
+mod prim_str { }
+
+#[doc(primitive = "tuple")]
+//
+/// A finite heterogeneous sequence, `(T, U, ..)`.
+///
+/// To access the _N_-th element of a tuple one can use `N` itself
+/// as a field of the tuple.
+///
+/// Indexing starts from zero, so `0` returns first value, `1`
+/// returns second value, and so on. In general, a tuple with _S_
+/// elements provides aforementioned fields from `0` to `S-1`.
+///
+/// If every type inside a tuple implements one of the following
+/// traits, then a tuple itself also implements it.
+///
+/// * `Clone`
+/// * `PartialEq`
+/// * `Eq`
+/// * `PartialOrd`
+/// * `Ord`
+/// * `Debug`
+/// * `Default`
+/// * `Hash`
+///
+/// # Examples
+///
+/// Accessing elements of a tuple at specified indices:
+///
+/// ```
+/// let x = ("colorless", "green", "ideas", "sleep", "furiously");
+/// assert_eq!(x.3, "sleep");
+///
+/// let v = (3, 3);
+/// let u = (1, -5);
+/// assert_eq!(v.0 * u.0 + v.1 * u.1, -12);
+/// ```
+///
+/// Using traits implemented for tuples:
+///
+/// ```
+/// let a = (1, 2);
+/// let b = (3, 4);
+/// assert!(a != b);
+///
+/// let c = b.clone();
+/// assert!(b == c);
+///
+/// let d : (u32, f32) = Default::default();
+/// assert_eq!(d, (0, 0.0f32));
+/// ```
+///
+mod prim_tuple { }
+
+#[doc(primitive = "f32")]
+/// The 32-bit floating point type.
+///
+/// *[See also the `std::f32` module](f32/index.html).*
+///
+mod prim_f32 { }
+
+#[doc(primitive = "f64")]
+//
+/// The 64-bit floating point type.
+///
+/// *[See also the `std::f64` module](f64/index.html).*
+///
+mod prim_f64 { }
+
+#[doc(primitive = "i8")]
+//
+/// The 8-bit signed integer type.
+///
+/// *[See also the `std::i8` module](i8/index.html).*
+///
+mod prim_i8 { }
+
+#[doc(primitive = "i16")]
+//
+/// The 16-bit signed integer type.
+///
+/// *[See also the `std::i16` module](i16/index.html).*
+///
+mod prim_i16 { }
+
+#[doc(primitive = "i32")]
+//
+/// The 32-bit signed integer type.
+///
+/// *[See also the `std::i32` module](i32/index.html).*
+///
+mod prim_i32 { }
+
+#[doc(primitive = "i64")]
+//
+/// The 64-bit signed integer type.
+///
+/// *[See also the `std::i64` module](i64/index.html).*
+///
+mod prim_i64 { }
+
+#[doc(primitive = "u8")]
+//
+/// The 8-bit unsigned integer type.
+///
+/// *[See also the `std::u8` module](u8/index.html).*
+///
+mod prim_u8 { }
+
+#[doc(primitive = "u16")]
+//
+/// The 16-bit unsigned integer type.
+///
+/// *[See also the `std::u16` module](u16/index.html).*
+///
+mod prim_u16 { }
+
+#[doc(primitive = "u32")]
+//
+/// The 32-bit unsigned integer type.
+///
+/// *[See also the `std::u32` module](u32/index.html).*
+///
+mod prim_u32 { }
+
+#[doc(primitive = "u64")]
+//
+/// The 64-bit unsigned integer type.
+///
+/// *[See also the `std::u64` module](u64/index.html).*
+///
+mod prim_u64 { }
+
+#[doc(primitive = "isize")]
+//
+/// The pointer-sized signed integer type.
+///
+/// *[See also the `std::isize` module](isize/index.html).*
+///
+mod prim_isize { }
+
+#[doc(primitive = "usize")]
+//
+/// The pointer-sized signed integer type.
+///
+/// *[See also the `std::usize` module](usize/index.html).*
+///
+mod prim_usize { }
+
use sync::mpsc::{channel, Receiver};
use sys::pipe::{self, AnonPipe};
use sys::process as imp;
-use sys_common::{AsInner, AsInnerMut, FromInner};
+use sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
use thread;
/// Representation of a running or exited child process.
fn as_inner(&self) -> &imp::Process { &self.handle }
}
+impl IntoInner<imp::Process> for Child {
+ fn into_inner(self) -> imp::Process { self.handle }
+}
+
/// A handle to a child procesess's stdin
#[stable(feature = "process", since = "1.0.0")]
pub struct ChildStdin {
fn as_inner(&self) -> &AnonPipe { &self.inner }
}
+impl IntoInner<AnonPipe> for ChildStdin {
+ fn into_inner(self) -> AnonPipe { self.inner }
+}
+
/// A handle to a child procesess's stdout
#[stable(feature = "process", since = "1.0.0")]
pub struct ChildStdout {
fn as_inner(&self) -> &AnonPipe { &self.inner }
}
+impl IntoInner<AnonPipe> for ChildStdout {
+ fn into_inner(self) -> AnonPipe { self.inner }
+}
+
/// A handle to a child procesess's stderr
#[stable(feature = "process", since = "1.0.0")]
pub struct ChildStderr {
fn as_inner(&self) -> &AnonPipe { &self.inner }
}
+impl IntoInner<AnonPipe> for ChildStderr {
+ fn into_inner(self) -> AnonPipe { self.inner }
+}
+
/// The `Command` type acts as a process builder, providing fine-grained control
/// over how a new process should be spawned. A default configuration can be
/// generated using `Command::new(program)`, where `program` gives a path to the
}
/// Returns the OS-assigned process identifier associated with this child.
- #[unstable(feature = "process_id", reason = "api recently added")]
+ #[stable(feature = "process_id", since = "1.3.0")]
pub fn id(&self) -> u32 {
self.handle.id()
}
use io::prelude::*;
use io::ErrorKind;
- use rt::running_on_valgrind;
use str;
use super::{Command, Output, Stdio};
assert!(status.success());
assert_eq!(output_str.trim().to_string(), "hello");
- // FIXME #7224
- if !running_on_valgrind() {
- assert_eq!(stderr, Vec::new());
- }
+ assert_eq!(stderr, Vec::new());
}
#[cfg(not(target_os="android"))]
assert!(status.success());
assert_eq!(output_str.trim().to_string(), "hello");
- // FIXME #7224
- if !running_on_valgrind() {
- assert_eq!(stderr, Vec::new());
- }
+ assert_eq!(stderr, Vec::new());
}
#[cfg(all(unix, not(target_os="android")))]
#[cfg(not(target_os="android"))]
#[test]
fn test_inherit_env() {
- use std::env;
- if running_on_valgrind() { return; }
+ use env;
let result = env_cmd().output().unwrap();
let output = String::from_utf8(result.stdout).unwrap();
#[test]
fn test_inherit_env() {
use std::env;
- if running_on_valgrind() { return; }
let mut result = env_cmd().output().unwrap();
let output = String::from_utf8(result.stdout).unwrap();
const NR_GETRANDOM: libc::c_long = 318;
#[cfg(target_arch = "x86")]
const NR_GETRANDOM: libc::c_long = 355;
- #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
- const NR_GETRANDOM: libc::c_long = 384;
- #[cfg(target_arch = "powerpc")]
+ #[cfg(any(target_arch = "arm", target_arch = "powerpc"))]
const NR_GETRANDOM: libc::c_long = 384;
+ #[cfg(any(target_arch = "aarch64"))]
+ const NR_GETRANDOM: libc::c_long = 278;
unsafe {
syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), 0)
use io;
use mem;
use rand::Rng;
- use libc::{c_int, size_t};
+ use libc::{c_int, c_void, size_t};
/// A random number generator that retrieves randomness straight from
/// the operating system. Platform sources:
_dummy: (),
}
- #[repr(C)]
- struct SecRandom;
+ // Fake definition; this is actually a struct, but we don't use the
+ // contents here.
+ type SecRandom = c_void;
#[allow(non_upper_case_globals)]
const kSecRandomDefault: *const SecRandom = 0 as *const SecRandom;
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
mod imp {
use prelude::v1::*;
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Parsing of GCC-style Language-Specific Data Area (LSDA)
+//! For details see:
+//! http://refspecs.linuxfoundation.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
+//! http://mentorembedded.github.io/cxx-abi/exceptions.pdf
+//! http://www.airs.com/blog/archives/460
+//! http://www.airs.com/blog/archives/464
+//!
+//! A reference implementation may be found in the GCC source tree
+//! (<root>/libgcc/unwind-c.c as of this writing)
+
+#![allow(non_upper_case_globals)]
+#![allow(unused)]
+
+use prelude::v1::*;
+use rt::dwarf::DwarfReader;
+use core::mem;
+
+pub const DW_EH_PE_omit : u8 = 0xFF;
+pub const DW_EH_PE_absptr : u8 = 0x00;
+
+pub const DW_EH_PE_uleb128 : u8 = 0x01;
+pub const DW_EH_PE_udata2 : u8 = 0x02;
+pub const DW_EH_PE_udata4 : u8 = 0x03;
+pub const DW_EH_PE_udata8 : u8 = 0x04;
+pub const DW_EH_PE_sleb128 : u8 = 0x09;
+pub const DW_EH_PE_sdata2 : u8 = 0x0A;
+pub const DW_EH_PE_sdata4 : u8 = 0x0B;
+pub const DW_EH_PE_sdata8 : u8 = 0x0C;
+
+pub const DW_EH_PE_pcrel : u8 = 0x10;
+pub const DW_EH_PE_textrel : u8 = 0x20;
+pub const DW_EH_PE_datarel : u8 = 0x30;
+pub const DW_EH_PE_funcrel : u8 = 0x40;
+pub const DW_EH_PE_aligned : u8 = 0x50;
+
+pub const DW_EH_PE_indirect : u8 = 0x80;
+
+#[derive(Copy, Clone)]
+pub struct EHContext {
+ pub ip: usize, // Current instruction pointer
+ pub func_start: usize, // Address of the current function
+ pub text_start: usize, // Address of the code section
+ pub data_start: usize, // Address of the data section
+}
+
+pub unsafe fn find_landing_pad(lsda: *const u8, context: &EHContext)
+ -> Option<usize> {
+ if lsda.is_null() {
+ return None;
+ }
+
+ let func_start = context.func_start;
+ let mut reader = DwarfReader::new(lsda);
+
+ let start_encoding = reader.read::<u8>();
+ // base address for landing pad offsets
+ let lpad_base = if start_encoding != DW_EH_PE_omit {
+ read_encoded_pointer(&mut reader, context, start_encoding)
+ } else {
+ func_start
+ };
+
+ let ttype_encoding = reader.read::<u8>();
+ if ttype_encoding != DW_EH_PE_omit {
+ // Rust doesn't analyze exception types, so we don't care about the type table
+ reader.read_uleb128();
+ }
+
+ let call_site_encoding = reader.read::<u8>();
+ let call_site_table_length = reader.read_uleb128();
+ let action_table = reader.ptr.offset(call_site_table_length as isize);
+ // Return addresses point 1 byte past the call instruction, which could
+ // be in the next IP range.
+ let ip = context.ip-1;
+
+ while reader.ptr < action_table {
+ let cs_start = read_encoded_pointer(&mut reader, context, call_site_encoding);
+ let cs_len = read_encoded_pointer(&mut reader, context, call_site_encoding);
+ let cs_lpad = read_encoded_pointer(&mut reader, context, call_site_encoding);
+ let cs_action = reader.read_uleb128();
+ // Callsite table is sorted by cs_start, so if we've passed the ip, we
+ // may stop searching.
+ if ip < func_start + cs_start {
+ break
+ }
+ if ip < func_start + cs_start + cs_len {
+ if cs_lpad != 0 {
+ return Some(lpad_base + cs_lpad);
+ } else {
+ return None;
+ }
+ }
+ }
+ // IP range not found: gcc's C++ personality calls terminate() here,
+ // however the rest of the languages treat this the same as cs_lpad == 0.
+ // We follow this suit.
+ return None;
+}
+
+#[inline]
+fn round_up(unrounded: usize, align: usize) -> usize {
+ assert!(align.is_power_of_two());
+ (unrounded + align - 1) & !(align - 1)
+}
+
+unsafe fn read_encoded_pointer(reader: &mut DwarfReader,
+ context: &EHContext,
+ encoding: u8) -> usize {
+ assert!(encoding != DW_EH_PE_omit);
+
+ // DW_EH_PE_aligned implies it's an absolute pointer value
+ if encoding == DW_EH_PE_aligned {
+ reader.ptr = round_up(reader.ptr as usize,
+ mem::size_of::<usize>()) as *const u8;
+ return reader.read::<usize>();
+ }
+
+ let mut result = match encoding & 0x0F {
+ DW_EH_PE_absptr => reader.read::<usize>(),
+ DW_EH_PE_uleb128 => reader.read_uleb128() as usize,
+ DW_EH_PE_udata2 => reader.read::<u16>() as usize,
+ DW_EH_PE_udata4 => reader.read::<u32>() as usize,
+ DW_EH_PE_udata8 => reader.read::<u64>() as usize,
+ DW_EH_PE_sleb128 => reader.read_sleb128() as usize,
+ DW_EH_PE_sdata2 => reader.read::<i16>() as usize,
+ DW_EH_PE_sdata4 => reader.read::<i32>() as usize,
+ DW_EH_PE_sdata8 => reader.read::<i64>() as usize,
+ _ => panic!()
+ };
+
+ result += match encoding & 0x70 {
+ DW_EH_PE_absptr => 0,
+ // relative to address of the encoded value, despite the name
+ DW_EH_PE_pcrel => reader.ptr as usize,
+ DW_EH_PE_textrel => { assert!(context.text_start != 0);
+ context.text_start },
+ DW_EH_PE_datarel => { assert!(context.data_start != 0);
+ context.data_start },
+ DW_EH_PE_funcrel => { assert!(context.func_start != 0);
+ context.func_start },
+ _ => panic!()
+ };
+
+ if encoding & DW_EH_PE_indirect != 0 {
+ result = *(result as *const usize);
+ }
+
+ result
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Utilities for parsing DWARF-encoded data streams.
+//! See http://www.dwarfstd.org,
+//! DWARF-4 standard, Section 7 - "Data Representation"
+
+// This module is used only by x86_64-pc-windows-gnu for now, but we
+// are compiling it everywhere to avoid regressions.
+#![allow(unused)]
+
+pub mod eh;
+
+use prelude::v1::*;
+use core::mem;
+
+pub struct DwarfReader {
+ pub ptr : *const u8
+}
+
+#[repr(C,packed)]
+struct Unaligned<T>(T);
+
+impl DwarfReader {
+
+ pub fn new(ptr : *const u8) -> DwarfReader {
+ DwarfReader {
+ ptr : ptr
+ }
+ }
+
+ // DWARF streams are packed, so e.g. a u32 would not necessarily be aligned
+ // on a 4-byte boundary. This may cause problems on platforms with strict
+ // alignment requirements. By wrapping data in a "packed" struct, we are
+ // telling the backend to generate "misalignment-safe" code.
+ pub unsafe fn read<T:Copy>(&mut self) -> T {
+ let Unaligned(result) = *(self.ptr as *const Unaligned<T>);
+ self.ptr = self.ptr.offset(mem::size_of::<T>() as isize);
+ result
+ }
+
+ // ULEB128 and SLEB128 encodings are defined in Section 7.6 - "Variable
+ // Length Data".
+ pub unsafe fn read_uleb128(&mut self) -> u64 {
+ let mut shift : usize = 0;
+ let mut result : u64 = 0;
+ let mut byte : u8;
+ loop {
+ byte = self.read::<u8>();
+ result |= ((byte & 0x7F) as u64) << shift;
+ shift += 7;
+ if byte & 0x80 == 0 {
+ break;
+ }
+ }
+ result
+ }
+
+ pub unsafe fn read_sleb128(&mut self) -> i64 {
+ let mut shift : usize = 0;
+ let mut result : u64 = 0;
+ let mut byte : u8;
+ loop {
+ byte = self.read::<u8>();
+ result |= ((byte & 0x7F) as u64) << shift;
+ shift += 7;
+ if byte & 0x80 == 0 {
+ break;
+ }
+ }
+ // sign-extend
+ if shift < 8 * mem::size_of::<u64>() && (byte & 0x40) != 0 {
+ result |= (!0 as u64) << shift;
+ }
+ result as i64
+ }
+}
+
+#[test]
+fn dwarf_reader() {
+ let encoded: &[u8] = &[1,
+ 2, 3,
+ 4, 5, 6, 7,
+ 0xE5, 0x8E, 0x26,
+ 0x9B, 0xF1, 0x59,
+ 0xFF, 0xFF];
+
+ let mut reader = DwarfReader::new(encoded.as_ptr());
+
+ unsafe {
+ assert!(reader.read::<u8>() == u8::to_be(1u8));
+ assert!(reader.read::<u16>() == u16::to_be(0x0203));
+ assert!(reader.read::<u32>() == u32::to_be(0x04050607));
+
+ assert!(reader.read_uleb128() == 624485);
+ assert!(reader.read_sleb128() == -624485);
+
+ assert!(reader.read::<i8>() == i8::to_be(-1));
+ }
+}
#[cfg(target_arch = "arm")]
#[repr(C)]
+#[derive(Copy, Clone)]
pub enum _Unwind_State {
_US_VIRTUAL_UNWIND_FRAME = 0,
_US_UNWIND_FRAME_STARTING = 1,
}
#[repr(C)]
+#[derive(Copy, Clone)]
pub enum _Unwind_Reason_Code {
_URC_NO_REASON = 0,
_URC_FOREIGN_EXCEPTION_CAUGHT = 1,
#[link(name = "unwind", kind = "static")]
extern {}
-#[cfg(any(target_os = "android", target_os = "openbsd"))]
+#[cfg(any(target_os = "android", target_os = "netbsd", target_os = "openbsd"))]
#[link(name = "gcc")]
extern {}
::rt::util::dumb_print(format_args!(concat!($fmt, "\n")))
} );
($fmt:expr, $($arg:expr),*) => ( {
- ::rt::util::dumb_print(format_args!(concat!($fmt, "\n"), $($arg)*))
+ ::rt::util::dumb_print(format_args!(concat!($fmt, "\n"), $($arg),*))
} )
}
} );
($str:expr, $($arg:expr),*) => ( {
if cfg!(rtdebug) {
- rterrln!($str, $($arg)*)
+ rterrln!($str, $($arg),*)
}
})
}
use usize;
// Reexport some of our utilities which are expected by other crates.
-pub use self::util::{min_stack, running_on_valgrind};
+pub use self::util::min_stack;
pub use self::unwind::{begin_unwind, begin_unwind_fmt};
// Reexport some functionality from liballoc.
mod at_exit_imp;
mod libunwind;
+mod dwarf;
+
/// The default error code of the rust runtime if the main thread panics instead
/// of exiting cleanly.
pub const DEFAULT_ERROR_CODE: isize = 101;
// own fault handlers if we hit it.
sys_common::stack::record_os_managed_stack_bounds(my_stack_bottom,
my_stack_top);
- sys::thread::guard::init();
+ let main_guard = sys::thread::guard::init();
sys::stack_overflow::init();
// Next, set up the current Thread with the guard information we just
// but we just do this to name the main thread and to give it correct
// info about the stack bounds.
let thread: Thread = NewThread::new(Some("<main>".to_string()));
- thread_info::set(sys::thread::guard::main(), thread);
+ thread_info::set(main_guard, thread);
// By default, some platforms will send a *signal* when a EPIPE error
// would otherwise be delivered. This runtime doesn't install a SIGPIPE
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#![allow(private_no_mangle_fns)]
+
use prelude::v1::*;
use any::Any;
-use libc::c_void;
use rt::libunwind as uw;
struct Exception {
}
}
-pub unsafe fn cleanup(ptr: *mut c_void) -> Box<Any + Send + 'static> {
+pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send + 'static> {
let my_ep = ptr as *mut Exception;
rtdebug!("caught {}", (*my_ep).uwe.exception_class);
let cause = (*my_ep).cause.take();
// so the behavior of __gcc_personality_v0 is perfectly adequate there, and
// - rust_eh_personality_catch, used only by rust_try(), which always catches.
//
-// Note, however, that for implementation simplicity, rust_eh_personality_catch
-// lacks code to install a landing pad, so in order to obtain exception object
-// pointer (which it needs to return upstream), rust_try() employs another trick:
-// it calls into the nested rust_try_inner(), whose landing pad does not resume
-// unwinds. Instead, it extracts the exception pointer and performs a "normal"
-// return.
-//
-// See also: rt/rust_try.ll
+// See also: rustc_trans::trans::intrinsic::trans_gnu_try
#[cfg(all(not(target_arch = "arm"),
not(all(windows, target_arch = "x86_64")),
use rt::libunwind as uw;
use libc::c_int;
- extern "C" {
+ extern {
fn __gcc_personality_v0(version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
-> uw::_Unwind_Reason_Code;
}
- #[lang="eh_personality"]
- #[no_mangle] // referenced from rust_try.ll
- #[allow(private_no_mangle_fns)]
+ #[lang = "eh_personality"]
+ #[no_mangle]
extern fn rust_eh_personality(
version: c_int,
actions: uw::_Unwind_Action,
}
}
- #[no_mangle] // referenced from rust_try.ll
- pub extern "C" fn rust_eh_personality_catch(
- _version: c_int,
+ #[lang = "eh_personality_catch"]
+ #[no_mangle]
+ pub extern fn rust_eh_personality_catch(
+ version: c_int,
actions: uw::_Unwind_Action,
- _exception_class: uw::_Unwind_Exception_Class,
- _ue_header: *mut uw::_Unwind_Exception,
- _context: *mut uw::_Unwind_Context
+ exception_class: uw::_Unwind_Exception_Class,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
- uw::_URC_INSTALL_CONTEXT
+ unsafe {
+ __gcc_personality_v0(version, actions, exception_class, ue_header,
+ context)
+ }
}
}
}
use rt::libunwind as uw;
use libc::c_int;
- extern "C" {
+ extern {
fn __gcc_personality_sj0(version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
-> uw::_Unwind_Reason_Code;
}
- #[lang="eh_personality"]
- #[no_mangle] // referenced from rust_try.ll
- pub extern "C" fn rust_eh_personality(
+ #[lang = "eh_personality"]
+ #[no_mangle]
+ pub extern fn rust_eh_personality(
version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
}
}
- #[no_mangle] // referenced from rust_try.ll
- pub extern "C" fn rust_eh_personality_catch(
- _version: c_int,
+ #[lang = "eh_personality_catch"]
+ #[no_mangle]
+ pub extern fn rust_eh_personality_catch(
+ version: c_int,
actions: uw::_Unwind_Action,
- _exception_class: uw::_Unwind_Exception_Class,
- _ue_header: *mut uw::_Unwind_Exception,
- _context: *mut uw::_Unwind_Context
+ exception_class: uw::_Unwind_Exception_Class,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
}
else { // cleanup phase
unsafe {
- __gcc_personality_sj0(_version, actions, _exception_class, _ue_header,
- _context)
+ __gcc_personality_sj0(version, actions, exception_class, ue_header,
+ context)
}
}
}
use rt::libunwind as uw;
use libc::c_int;
- extern "C" {
+ extern {
fn __gcc_personality_v0(state: uw::_Unwind_State,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context)
-> uw::_Unwind_Reason_Code;
}
- #[lang="eh_personality"]
- #[no_mangle] // referenced from rust_try.ll
- #[allow(private_no_mangle_fns)]
- extern "C" fn rust_eh_personality(
+ #[lang = "eh_personality"]
+ #[no_mangle]
+ extern fn rust_eh_personality(
state: uw::_Unwind_State,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context
}
}
- #[no_mangle] // referenced from rust_try.ll
- pub extern "C" fn rust_eh_personality_catch(
+ #[lang = "eh_personality_catch"]
+ #[no_mangle]
+ pub extern fn rust_eh_personality_catch(
state: uw::_Unwind_State,
- _ue_header: *mut uw::_Unwind_Exception,
- _context: *mut uw::_Unwind_Context
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (state as c_int & uw::_US_ACTION_MASK as c_int)
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
- uw::_URC_INSTALL_CONTEXT
- }
- }
-}
-
-// Win64 SEH (see http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx)
-//
-// This looks a bit convoluted because rather than implementing a native SEH
-// handler, GCC reuses the same personality routine as for the other
-// architectures by wrapping it with an "API translator" layer
-// (_GCC_specific_handler).
-
-#[cfg(all(windows, target_arch = "x86_64", not(test)))]
-#[doc(hidden)]
-#[allow(non_camel_case_types, non_snake_case)]
-pub mod eabi {
- pub use self::EXCEPTION_DISPOSITION::*;
- use rt::libunwind as uw;
- use libc::{c_void, c_int};
-
- #[repr(C)]
- pub struct EXCEPTION_RECORD;
- #[repr(C)]
- pub struct CONTEXT;
- #[repr(C)]
- pub struct DISPATCHER_CONTEXT;
-
- #[repr(C)]
- #[derive(Copy, Clone)]
- pub enum EXCEPTION_DISPOSITION {
- ExceptionContinueExecution,
- ExceptionContinueSearch,
- ExceptionNestedException,
- ExceptionCollidedUnwind
- }
-
- type _Unwind_Personality_Fn =
- extern "C" fn(
- version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context
- ) -> uw::_Unwind_Reason_Code;
-
- extern "C" {
- fn __gcc_personality_seh0(
- exceptionRecord: *mut EXCEPTION_RECORD,
- establisherFrame: *mut c_void,
- contextRecord: *mut CONTEXT,
- dispatcherContext: *mut DISPATCHER_CONTEXT
- ) -> EXCEPTION_DISPOSITION;
-
- fn _GCC_specific_handler(
- exceptionRecord: *mut EXCEPTION_RECORD,
- establisherFrame: *mut c_void,
- contextRecord: *mut CONTEXT,
- dispatcherContext: *mut DISPATCHER_CONTEXT,
- personality: _Unwind_Personality_Fn
- ) -> EXCEPTION_DISPOSITION;
- }
-
- #[lang="eh_personality"]
- #[no_mangle] // referenced from rust_try.ll
- #[allow(private_no_mangle_fns)]
- extern "C" fn rust_eh_personality(
- exceptionRecord: *mut EXCEPTION_RECORD,
- establisherFrame: *mut c_void,
- contextRecord: *mut CONTEXT,
- dispatcherContext: *mut DISPATCHER_CONTEXT
- ) -> EXCEPTION_DISPOSITION
- {
- unsafe {
- __gcc_personality_seh0(exceptionRecord, establisherFrame,
- contextRecord, dispatcherContext)
- }
- }
-
- #[no_mangle] // referenced from rust_try.ll
- pub extern "C" fn rust_eh_personality_catch(
- exceptionRecord: *mut EXCEPTION_RECORD,
- establisherFrame: *mut c_void,
- contextRecord: *mut CONTEXT,
- dispatcherContext: *mut DISPATCHER_CONTEXT
- ) -> EXCEPTION_DISPOSITION
- {
- extern "C" fn inner(
- _version: c_int,
- actions: uw::_Unwind_Action,
- _exception_class: uw::_Unwind_Exception_Class,
- _ue_header: *mut uw::_Unwind_Exception,
- _context: *mut uw::_Unwind_Context
- ) -> uw::_Unwind_Reason_Code
- {
- if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
- uw::_URC_HANDLER_FOUND // catch!
- }
- else { // cleanup phase
- uw::_URC_INSTALL_CONTEXT
+ unsafe {
+ __gcc_personality_v0(state, ue_header, context)
}
}
-
- unsafe {
- _GCC_specific_handler(exceptionRecord, establisherFrame,
- contextRecord, dispatcherContext,
- inner)
- }
}
}
-
//! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
//! documents linked from it.
//! These are also good reads:
-//! http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/
+//! http://mentorembedded.github.io/cxx-abi/abi-eh.html
//! http://monoinfinito.wordpress.com/series/exception-handling-in-c/
//! http://www.airs.com/blog/index.php?s=exception+frames
//!
use panicking;
use fmt;
use intrinsics;
-use libc::c_void;
use mem;
use sync::atomic::{self, Ordering};
use sys_common::mutex::Mutex;
// The actual unwinding implementation is cfg'd here, and we've got two current
// implementations. One goes through SEH on Windows and the other goes through
// libgcc via the libunwind-like API.
-#[cfg(target_env = "msvc")] #[path = "seh.rs"] #[doc(hidden)]
+
+// *-pc-windows-msvc
+#[cfg(all(windows, target_env = "msvc"))]
+#[path = "seh.rs"] #[doc(hidden)]
+pub mod imp;
+
+// x86_64-pc-windows-gnu
+#[cfg(all(windows, target_arch="x86_64", target_env="gnu"))]
+#[path = "seh64_gnu.rs"] #[doc(hidden)]
pub mod imp;
-#[cfg(not(target_env = "msvc"))] #[path = "gcc.rs"] #[doc(hidden)]
+
+// i686-pc-windows-gnu and all others
+#[cfg(any(unix, all(windows, target_arch="x86", target_env="gnu")))]
+#[path = "gcc.rs"] #[doc(hidden)]
pub mod imp;
pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: u32);
/// run.
pub unsafe fn try<F: FnOnce()>(f: F) -> Result<(), Box<Any + Send>> {
let mut f = Some(f);
- return inner_try(try_fn::<F>, &mut f as *mut _ as *mut c_void);
+ return inner_try(try_fn::<F>, &mut f as *mut _ as *mut u8);
// If an inner function were not used here, then this generic function `try`
// uses the native symbol `rust_try`, for which the code is statically
// `dllexport`, but it's easier to not have conditional `src/rt/rust_try.ll`
// files and instead just have this non-generic shim the compiler can take
// care of exposing correctly.
- unsafe fn inner_try(f: extern fn(*mut c_void), data: *mut c_void)
+ unsafe fn inner_try(f: fn(*mut u8), data: *mut u8)
-> Result<(), Box<Any + Send>> {
let prev = PANICKING.with(|s| s.get());
PANICKING.with(|s| s.set(false));
- let ep = rust_try(f, data);
+ let ep = intrinsics::try(f, data);
PANICKING.with(|s| s.set(prev));
if ep.is_null() {
Ok(())
}
}
- extern fn try_fn<F: FnOnce()>(opt_closure: *mut c_void) {
+ fn try_fn<F: FnOnce()>(opt_closure: *mut u8) {
let opt_closure = opt_closure as *mut Option<F>;
unsafe { (*opt_closure).take().unwrap()(); }
}
// When f(...) returns normally, the return value is null.
// When f(...) throws, the return value is a pointer to the caught
// exception object.
- fn rust_try(f: extern fn(*mut c_void),
- data: *mut c_void) -> *mut c_void;
+ fn rust_try(f: extern fn(*mut u8),
+ data: *mut u8) -> *mut u8;
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+//! Win64 SEH (see http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx)
+//!
+//! On Windows (currently only on MSVC), the default exception handling
+//! mechanism is Structured Exception Handling (SEH). This is quite different
+//! than Dwarf-based exception handling (e.g. what other unix platforms use) in
+//! terms of compiler internals, so LLVM is required to have a good deal of
+//! extra support for SEH. Currently this support is somewhat lacking, so what's
+//! here is the bare bones of SEH support.
+//!
+//! In a nutshell, what happens here is:
+//!
+//! 1. The `panic` function calls the standard Windows function `RaiseException`
+//! with a Rust-specific code, triggering the unwinding process.
+//! 2. All landing pads generated by the compiler (just "cleanup" landing pads)
+//! use the personality function `__C_specific_handler`, a function in the
+//! CRT, and the unwinding code in Windows will use this personality function
+//! to execute all cleanup code on the stack.
+//! 3. Eventually the "catch" code in `rust_try` (located in
+//! src/rt/rust_try_msvc_64.ll) is executed, which will ensure that the
+//! exception being caught is indeed a Rust exception, returning control back
+//! into Rust.
+//!
+//! Some specific differences from the gcc-based exception handling are:
+//!
+//! * Rust has no custom personality function, it is instead *always*
+//! __C_specific_handler, so the filtering is done in a C++-like manner
+//! instead of in the personality function itself. Note that the specific
+//! syntax for this (found in the rust_try_msvc_64.ll) is taken from an LLVM
+//! test case for SEH.
+//! * We've got some data to transmit across the unwinding boundary,
+//! specifically a `Box<Any + Send + 'static>`. In Dwarf-based unwinding this
+//! data is part of the payload of the exception, but I have not currently
+//! figured out how to do this with LLVM's bindings. Judging by some comments
+//! in the LLVM test cases this may not even be possible currently with LLVM,
+//! so this is just abandoned entirely. Instead the data is stored in a
+//! thread-local in `panic` and retrieved during `cleanup`.
+//!
+//! So given all that, the bindings here are pretty small,
+
+#![allow(bad_style)]
+
use prelude::v1::*;
use any::Any;
-use intrinsics;
-use libc::c_void;
+use libc::{c_ulong, DWORD, c_void};
+use sys_common::thread_local::StaticKey;
+
+// 0x R U S T
+const RUST_PANIC: DWORD = 0x52555354;
+static PANIC_DATA: StaticKey = StaticKey::new(None);
+
+// This function is provided by kernel32.dll
+extern "system" {
+ fn RaiseException(dwExceptionCode: DWORD,
+ dwExceptionFlags: DWORD,
+ nNumberOfArguments: DWORD,
+ lpArguments: *const c_ulong);
+}
+
+#[repr(C)]
+pub struct EXCEPTION_POINTERS {
+ ExceptionRecord: *mut EXCEPTION_RECORD,
+ ContextRecord: *mut CONTEXT,
+}
+
+enum CONTEXT {}
+
+#[repr(C)]
+struct EXCEPTION_RECORD {
+ ExceptionCode: DWORD,
+ ExceptionFlags: DWORD,
+ ExceptionRecord: *mut _EXCEPTION_RECORD,
+ ExceptionAddress: *mut c_void,
+ NumberParameters: DWORD,
+ ExceptionInformation: [*mut c_ulong; EXCEPTION_MAXIMUM_PARAMETERS],
+}
-pub unsafe fn panic(_data: Box<Any + Send + 'static>) -> ! {
- intrinsics::abort();
+enum _EXCEPTION_RECORD {}
+
+const EXCEPTION_MAXIMUM_PARAMETERS: usize = 15;
+
+pub unsafe fn panic(data: Box<Any + Send + 'static>) -> ! {
+ // See module docs above for an explanation of why `data` is stored in a
+ // thread local instead of being passed as an argument to the
+ // `RaiseException` function (which can in theory carry along arbitrary
+ // data).
+ let exception = Box::new(data);
+ rtassert!(PANIC_DATA.get().is_null());
+ PANIC_DATA.set(Box::into_raw(exception) as *mut u8);
+
+ RaiseException(RUST_PANIC, 0, 0, 0 as *const _);
+ rtabort!("could not unwind stack");
}
-pub unsafe fn cleanup(_ptr: *mut c_void) -> Box<Any + Send + 'static> {
- intrinsics::abort();
+pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send + 'static> {
+ // The `ptr` here actually corresponds to the code of the exception, and our
+ // real data is stored in our thread local.
+ rtassert!(ptr as DWORD == RUST_PANIC);
+
+ let data = PANIC_DATA.get() as *mut Box<Any + Send + 'static>;
+ PANIC_DATA.set(0 as *mut u8);
+ rtassert!(!data.is_null());
+
+ *Box::from_raw(data)
}
+// This is required by the compiler to exist (e.g. it's a lang item), but it's
+// never actually called by the compiler because __C_specific_handler is the
+// personality function that is always used. Hence this is just an aborting
+// stub.
#[lang = "eh_personality"]
-#[no_mangle]
-pub extern fn rust_eh_personality() {}
+fn rust_eh_personality() {
+ unsafe { ::intrinsics::abort() }
+}
+// This is a function referenced from `rust_try_msvc_64.ll` which is used to
+// filter the exceptions being caught by that function.
+//
+// In theory local variables can be accessed through the `rbp` parameter of this
+// function, but a comment in an LLVM test case indicates that this is not
+// implemented in LLVM, so this is just an idempotent function which doesn't
+// ferry along any other information.
+//
+// This function just takes a look at the current EXCEPTION_RECORD being thrown
+// to ensure that it's code is RUST_PANIC, which was set by the call to
+// `RaiseException` above in the `panic` function.
#[no_mangle]
-pub extern fn rust_eh_personality_catch() {}
+#[lang = "msvc_try_filter"]
+pub extern fn __rust_try_filter(eh_ptrs: *mut EXCEPTION_POINTERS,
+ _rbp: *mut u8) -> i32 {
+ unsafe {
+ ((*(*eh_ptrs).ExceptionRecord).ExceptionCode == RUST_PANIC) as i32
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Unwinding implementation of top of native Win64 SEH,
+//! however the unwind handler data (aka LSDA) uses GCC-compatible encoding.
+
+#![allow(bad_style)]
+#![allow(private_no_mangle_fns)]
+
+use prelude::v1::*;
+
+use any::Any;
+use self::EXCEPTION_DISPOSITION::*;
+use rt::dwarf::eh;
+use core::mem;
+use core::ptr;
+use simd;
+use libc::{c_void, c_ulonglong, DWORD, LPVOID};
+type ULONG_PTR = c_ulonglong;
+
+// Define our exception codes:
+// according to http://msdn.microsoft.com/en-us/library/het71c37(v=VS.80).aspx,
+// [31:30] = 3 (error), 2 (warning), 1 (info), 0 (success)
+// [29] = 1 (user-defined)
+// [28] = 0 (reserved)
+// we define bits:
+// [24:27] = type
+// [0:23] = magic
+const ETYPE: DWORD = 0b1110_u32 << 28;
+const MAGIC: DWORD = 0x525354; // "RST"
+
+const RUST_PANIC: DWORD = ETYPE | (1 << 24) | MAGIC;
+
+const EXCEPTION_NONCONTINUABLE: DWORD = 0x1; // Noncontinuable exception
+const EXCEPTION_UNWINDING: DWORD = 0x2; // Unwind is in progress
+const EXCEPTION_EXIT_UNWIND: DWORD = 0x4; // Exit unwind is in progress
+const EXCEPTION_STACK_INVALID: DWORD = 0x8; // Stack out of limits or unaligned
+const EXCEPTION_NESTED_CALL: DWORD = 0x10; // Nested exception handler call
+const EXCEPTION_TARGET_UNWIND: DWORD = 0x20; // Target unwind in progress
+const EXCEPTION_COLLIDED_UNWIND: DWORD = 0x40; // Collided exception handler call
+const EXCEPTION_UNWIND: DWORD = EXCEPTION_UNWINDING |
+ EXCEPTION_EXIT_UNWIND |
+ EXCEPTION_TARGET_UNWIND |
+ EXCEPTION_COLLIDED_UNWIND;
+
+#[repr(C)]
+pub struct EXCEPTION_RECORD {
+ ExceptionCode: DWORD,
+ ExceptionFlags: DWORD,
+ ExceptionRecord: *const EXCEPTION_RECORD,
+ ExceptionAddress: LPVOID,
+ NumberParameters: DWORD,
+ ExceptionInformation: [ULONG_PTR; 15],
+}
+
+pub type CONTEXT = c_void;
+pub type UNWIND_HISTORY_TABLE = c_void;
+
+#[repr(C)]
+pub struct RUNTIME_FUNCTION {
+ BeginAddress: DWORD,
+ EndAddress: DWORD,
+ UnwindData: DWORD,
+}
+
+#[repr(C)]
+pub struct DISPATCHER_CONTEXT {
+ ControlPc: LPVOID,
+ ImageBase: LPVOID,
+ FunctionEntry: *const RUNTIME_FUNCTION,
+ EstablisherFrame: LPVOID,
+ TargetIp: LPVOID,
+ ContextRecord: *const CONTEXT,
+ LanguageHandler: LPVOID,
+ HandlerData: *const u8,
+ HistoryTable: *const UNWIND_HISTORY_TABLE,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub enum EXCEPTION_DISPOSITION {
+ ExceptionContinueExecution,
+ ExceptionContinueSearch,
+ ExceptionNestedException,
+ ExceptionCollidedUnwind
+}
+
+// From kernel32.dll
+extern "system" {
+ fn RaiseException(dwExceptionCode: DWORD,
+ dwExceptionFlags: DWORD,
+ nNumberOfArguments: DWORD,
+ lpArguments: *const ULONG_PTR);
+
+ fn RtlUnwindEx(TargetFrame: LPVOID,
+ TargetIp: LPVOID,
+ ExceptionRecord: *const EXCEPTION_RECORD,
+ ReturnValue: LPVOID,
+ OriginalContext: *const CONTEXT,
+ HistoryTable: *const UNWIND_HISTORY_TABLE);
+}
+
+#[repr(C)]
+struct PanicData {
+ data: Box<Any + Send + 'static>
+}
+
+pub unsafe fn panic(data: Box<Any + Send + 'static>) -> ! {
+ let panic_ctx = Box::new(PanicData { data: data });
+ let params = [Box::into_raw(panic_ctx) as ULONG_PTR];
+ rtdebug!("panic: ctx={:X}", params[0]);
+ RaiseException(RUST_PANIC,
+ EXCEPTION_NONCONTINUABLE,
+ params.len() as DWORD,
+ ¶ms as *const ULONG_PTR);
+ rtabort!("could not unwind stack");
+}
+
+pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send + 'static> {
+ rtdebug!("cleanup: ctx={:X}", ptr as usize);
+ let panic_ctx = Box::from_raw(ptr as *mut PanicData);
+ return panic_ctx.data;
+}
+
+// SEH doesn't support resuming unwinds after calling a landing pad like
+// libunwind does. For this reason, MSVC compiler outlines landing pads into
+// separate functions that can be called directly from the personality function
+// but are nevertheless able to find and modify stack frame of the "parent"
+// function.
+//
+// Since this cannot be done with libdwarf-style landing pads,
+// rust_eh_personality instead catches RUST_PANICs, runs the landing pad, then
+// reraises the exception.
+//
+// Note that it makes certain assumptions about the exception:
+//
+// 1. That RUST_PANIC is non-continuable, so no lower stack frame may choose to
+// resume execution.
+// 2. That the first parameter of the exception is a pointer to an extra data
+// area (PanicData).
+// Since these assumptions do not generally hold true for foreign exceptions
+// (system faults, C++ exceptions, etc), we make no attempt to invoke our
+// landing pads (and, thus, destructors!) for anything other than RUST_PANICs.
+// This is considered acceptable, because the behavior of throwing exceptions
+// through a C ABI boundary is undefined.
+
+#[lang = "eh_personality_catch"]
+#[cfg(not(test))]
+unsafe extern fn rust_eh_personality_catch(
+ exceptionRecord: *mut EXCEPTION_RECORD,
+ establisherFrame: LPVOID,
+ contextRecord: *mut CONTEXT,
+ dispatcherContext: *mut DISPATCHER_CONTEXT
+) -> EXCEPTION_DISPOSITION
+{
+ rust_eh_personality(exceptionRecord, establisherFrame,
+ contextRecord, dispatcherContext)
+}
+
+#[lang = "eh_personality"]
+#[cfg(not(test))]
+unsafe extern fn rust_eh_personality(
+ exceptionRecord: *mut EXCEPTION_RECORD,
+ establisherFrame: LPVOID,
+ contextRecord: *mut CONTEXT,
+ dispatcherContext: *mut DISPATCHER_CONTEXT
+) -> EXCEPTION_DISPOSITION
+{
+ let er = &*exceptionRecord;
+ let dc = &*dispatcherContext;
+ rtdebug!("rust_eh_personality: code={:X}, flags={:X}, frame={:X}, ip={:X}",
+ er.ExceptionCode, er.ExceptionFlags,
+ establisherFrame as usize, dc.ControlPc as usize);
+
+ if er.ExceptionFlags & EXCEPTION_UNWIND == 0 { // we are in the dispatch phase
+ if er.ExceptionCode == RUST_PANIC {
+ if let Some(lpad) = find_landing_pad(dc) {
+ rtdebug!("unwinding to landing pad {:X}", lpad);
+
+ RtlUnwindEx(establisherFrame,
+ lpad as LPVOID,
+ exceptionRecord,
+ er.ExceptionInformation[0] as LPVOID, // pointer to PanicData
+ contextRecord,
+ dc.HistoryTable);
+ rtabort!("could not unwind");
+ }
+ }
+ }
+ ExceptionContinueSearch
+}
+
+// The `resume` instruction, found at the end of the landing pads, and whose job
+// is to resume stack unwinding, is typically lowered by LLVM into a call to
+// `_Unwind_Resume` routine. To avoid confusion with the same symbol exported
+// from libgcc, we redirect it to `rust_eh_unwind_resume`.
+// Since resolution of this symbol is done by the linker, `rust_eh_unwind_resume`
+// must be marked `pub` + `#[no_mangle]`. (Can we make it a lang item?)
+
+#[lang = "eh_unwind_resume"]
+#[cfg(not(test))]
+unsafe extern fn rust_eh_unwind_resume(panic_ctx: LPVOID) {
+ rtdebug!("rust_eh_unwind_resume: ctx={:X}", panic_ctx as usize);
+ let params = [panic_ctx as ULONG_PTR];
+ RaiseException(RUST_PANIC,
+ EXCEPTION_NONCONTINUABLE,
+ params.len() as DWORD,
+ ¶ms as *const ULONG_PTR);
+ rtabort!("could not resume unwind");
+}
+
+unsafe fn find_landing_pad(dc: &DISPATCHER_CONTEXT) -> Option<usize> {
+ let eh_ctx = eh::EHContext {
+ ip: dc.ControlPc as usize,
+ func_start: dc.ImageBase as usize + (*dc.FunctionEntry).BeginAddress as usize,
+ text_start: dc.ImageBase as usize,
+ data_start: 0
+ };
+ eh::find_landing_pad(dc.HandlerData, &eh_ctx)
+}
use env;
use fmt;
use intrinsics;
-use libc::uintptr_t;
use sync::atomic::{self, Ordering};
use sys::stdio::Stderr;
-/// Dynamically inquire about whether we're running under V.
-/// You should usually not use this unless your test definitely
-/// can't run correctly un-altered. Valgrind is there to help
-/// you notice weirdness in normal, un-doctored code paths!
-pub fn running_on_valgrind() -> bool {
- extern {
- fn rust_running_on_valgrind() -> uintptr_t;
- }
- unsafe { rust_running_on_valgrind() != 0 }
-}
-
-/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
-/// wired into it; this is a hard limit and requires rebuilding valgrind if you
-/// want to go beyond it. Normally this is not a problem, but in some tests, we
-/// produce a lot of threads casually. Making lots of threads alone might not
-/// be a problem _either_, except on OSX, the segments produced for new threads
-/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
-/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
-/// perfect storm of creating "too many mappings" for valgrind to handle when
-/// running certain stress tests in the runtime.
-pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
- (cfg!(target_os="macos")) && running_on_valgrind()
-}
-
pub fn min_stack() -> usize {
static MIN: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
match MIN.load(Ordering::SeqCst) {
//! the standard library This varies per-platform, but these libraries are
//! necessary for running libstd.
-// All platforms need to link to rustrt
-#[cfg(not(test))]
+// A few small shims in C that haven't been translated to Rust yet
+#[cfg(all(not(test), not(windows)))]
#[link(name = "rust_builtin", kind = "static")]
extern {}
#[cfg(any(target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
#[link(name = "pthread")]
extern {}
/// # Examples
///
/// ```
-/// # #![feature(static_condvar)]
+/// #![feature(static_condvar)]
+///
/// use std::sync::{StaticCondvar, CONDVAR_INIT};
///
/// static CVAR: StaticCondvar = CONDVAR_INIT;
//! # Examples
//!
//! ```
-//! # #![feature(future)]
+//! #![feature(future)]
+//!
//! use std::sync::Future;
//!
//! // a fake, for now
/// If the corresponding `Sender` has disconnected, or it disconnects while
/// this call is blocking, this call will wake up and return `Err` to
/// indicate that no more messages can ever be received on this channel.
+ /// However, since channels are buffered, messages sent before the disconnect
+ /// will still be properly received.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::mpsc;
+ /// use std::thread;
+ ///
+ /// let (send, recv) = mpsc::channel();
+ /// let handle = thread::spawn(move || {
+ /// send.send(1u8).unwrap();
+ /// });
+ ///
+ /// handle.join().unwrap();
+ ///
+ /// assert_eq!(Ok(1), recv.recv());
+ /// ```
+ ///
+ /// Buffering behavior:
+ ///
+ /// ```
+ /// use std::sync::mpsc;
+ /// use std::thread;
+ /// use std::sync::mpsc::RecvError;
+ ///
+ /// let (send, recv) = mpsc::channel();
+ /// let handle = thread::spawn(move || {
+ /// send.send(1u8).unwrap();
+ /// send.send(2).unwrap();
+ /// send.send(3).unwrap();
+ /// drop(send);
+ /// });
+ ///
+ /// // wait for the thread to join so we ensure the sender is dropped
+ /// handle.join().unwrap();
+ ///
+ /// assert_eq!(Ok(1), recv.recv());
+ /// assert_eq!(Ok(2), recv.recv());
+ /// assert_eq!(Ok(3), recv.recv());
+ /// assert_eq!(Err(RecvError), recv.recv());
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn recv(&self) -> Result<T, RecvError> {
loop {
mod tests {
use prelude::v1::*;
- use std::env;
+ use env;
use super::*;
use thread;
mod sync_tests {
use prelude::v1::*;
- use std::env;
+ use env;
use thread;
use super::*;
//! # Examples
//!
//! ```rust
-//! # #![feature(mpsc_select)]
+//! #![feature(mpsc_select)]
+//!
//! use std::sync::mpsc::channel;
//!
//! let (tx1, rx1) = channel();
/// # Examples
///
/// ```
- /// # #![feature(mpsc_select)]
+ /// #![feature(mpsc_select)]
+ ///
/// use std::sync::mpsc::Select;
///
/// let select = Select::new();
/// # Examples
///
/// ```
-/// # #![feature(static_mutex)]
+/// #![feature(static_mutex)]
+///
/// use std::sync::{StaticMutex, MUTEX_INIT};
///
/// static LOCK: StaticMutex = MUTEX_INIT;
/// # Examples
///
/// ```
-/// # #![feature(static_rwlock)]
+/// #![feature(static_rwlock)]
+///
/// use std::sync::{StaticRwLock, RW_LOCK_INIT};
///
/// static LOCK: StaticRwLock = RW_LOCK_INIT;
/// # Examples
///
/// ```
-/// # #![feature(semaphore)]
+/// #![feature(semaphore)]
+///
/// use std::sync::Semaphore;
///
/// // Create a semaphore that represents 5 resources
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+use prelude::v1::*;
+use io;
+use io::ErrorKind;
+use io::Read;
+use slice::from_raw_parts_mut;
+
+// Provides read_to_end functionality over an uninitialized buffer.
+// This function is unsafe because it calls the underlying
+// read function with a slice into uninitialized memory. The default
+// implementation of read_to_end for readers will zero out new memory in
+// the buf before passing it to read, but avoiding this zero can often
+// lead to a fairly significant performance win.
+//
+// Implementations using this method have to adhere to two guarantees:
+// * The implementation of read never reads the buffer provided.
+// * The implementation of read correctly reports how many bytes were written.
+pub unsafe fn read_to_end_uninitialized(r: &mut Read, buf: &mut Vec<u8>) -> io::Result<usize> {
+
+ let start_len = buf.len();
+ buf.reserve(16);
+
+ // Always try to read into the empty space of the vector (from the length to the capacity).
+ // If the vector ever fills up then we reserve an extra byte which should trigger the normal
+ // reallocation routines for the vector, which will likely double the size.
+ //
+ // This function is similar to the read_to_end function in std::io, but the logic about
+ // reservations and slicing is different enough that this is duplicated here.
+ loop {
+ if buf.len() == buf.capacity() {
+ buf.reserve(1);
+ }
+
+ let buf_slice = from_raw_parts_mut(buf.as_mut_ptr().offset(buf.len() as isize),
+ buf.capacity() - buf.len());
+
+ match r.read(buf_slice) {
+ Ok(0) => { return Ok(buf.len() - start_len); }
+ Ok(n) => { let len = buf.len() + n; buf.set_len(len); },
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => { }
+ Err(e) => { return Err(e); }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use prelude::v1::*;
+ use io::prelude::*;
+ use super::*;
+ use io;
+ use io::{ErrorKind, Take, Repeat, repeat};
+ use test;
+ use slice::from_raw_parts;
+
+ struct ErrorRepeat {
+ lr: Take<Repeat>
+ }
+
+ fn error_repeat(byte: u8, limit: u64) -> ErrorRepeat {
+ ErrorRepeat { lr: repeat(byte).take(limit) }
+ }
+
+ impl Read for ErrorRepeat {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let ret = self.lr.read(buf);
+ if let Ok(0) = ret {
+ return Err(io::Error::new(ErrorKind::Other, ""))
+ }
+ ret
+ }
+ }
+
+ fn init_vec_data() -> Vec<u8> {
+ let mut vec = vec![10u8; 200];
+ unsafe { vec.set_len(0); }
+ vec
+ }
+
+ fn assert_all_eq(buf: &[u8], value: u8) {
+ for n in buf {
+ assert_eq!(*n, value);
+ }
+ }
+
+ fn validate(buf: &Vec<u8>, good_read_len: usize) {
+ assert_all_eq(buf, 1u8);
+ let cap = buf.capacity();
+ let end_slice = unsafe { from_raw_parts(buf.as_ptr().offset(good_read_len as isize),
+ cap - good_read_len) };
+ assert_all_eq(end_slice, 10u8);
+ }
+
+ #[test]
+ fn read_to_end_uninit_error() {
+ let mut er = error_repeat(1,100);
+ let mut vec = init_vec_data();
+ if let Err(_) = unsafe { read_to_end_uninitialized(&mut er, &mut vec) } {
+ validate(&vec, 100);
+ } else {
+ assert!(false);
+ }
+ }
+
+ #[test]
+ fn read_to_end_uninit_zero_len_vec() {
+ let mut er = repeat(1).take(100);
+ let mut vec = Vec::new();
+ let n = unsafe{ read_to_end_uninitialized(&mut er, &mut vec).unwrap() };
+ assert_all_eq(&vec, 1u8);
+ assert_eq!(vec.len(), n);
+ }
+
+ #[test]
+ fn read_to_end_uninit_good() {
+ let mut er = repeat(1).take(100);
+ let mut vec = init_vec_data();
+ let n = unsafe{ read_to_end_uninitialized(&mut er, &mut vec).unwrap() };
+ validate(&vec, 100);
+ assert_eq!(vec.len(), n);
+ }
+
+ #[bench]
+ fn bench_uninitialized(b: &mut test::Bencher) {
+ b.iter(|| {
+ let mut lr = repeat(1).take(10000000);
+ let mut vec = Vec::with_capacity(1024);
+ unsafe { read_to_end_uninitialized(&mut lr, &mut vec) };
+ });
+ }
+}
pub mod condvar;
pub mod mutex;
pub mod net;
+pub mod io;
pub mod poison;
pub mod remutex;
pub mod rwlock;
pub fn socket(&self) -> &Socket { &self.inner }
+ pub fn into_socket(self) -> Socket { self.inner }
+
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
setsockopt(&self.inner, libc::IPPROTO_TCP, libc::TCP_NODELAY,
nodelay as c_int)
pub fn socket(&self) -> &Socket { &self.inner }
+ pub fn into_socket(self) -> Socket { self.inner }
+
pub fn socket_addr(&self) -> io::Result<SocketAddr> {
sockname(|buf, len| unsafe {
libc::getsockname(*self.inner.as_inner(), buf, len)
pub fn socket(&self) -> &Socket { &self.inner }
+ pub fn into_socket(self) -> Socket { self.inner }
+
pub fn socket_addr(&self) -> io::Result<SocketAddr> {
sockname(|buf, len| unsafe {
libc::getsockname(*self.inner.as_inner(), buf, len)
asm!("movl $$0x48+90*4, %eax
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
- #[cfg(all(target_arch = "x86",
- any(target_os = "linux", target_os = "freebsd")))]
+ #[cfg(all(target_arch = "x86", target_os = "linux"))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: usize) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
// aarch64 - FIXME(AARCH64): missing...
// powerpc - FIXME(POWERPC): missing...
// arm-ios - iOS segmented stack is disabled for now, see related notes
- // openbsd - segmented stack is disabled
+ // openbsd/bitrig/netbsd - no segmented stacks.
+ // x86-freebsd - no segmented stacks.
#[cfg(any(target_arch = "aarch64",
target_arch = "powerpc",
all(target_arch = "arm", target_os = "ios"),
+ all(target_arch = "x86", target_os = "freebsd"),
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
unsafe fn target_record_sp_limit(_: usize) {
}
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
return limit;
}
- #[cfg(all(target_arch = "x86",
- any(target_os = "linux", target_os = "freebsd")))]
+ #[cfg(all(target_arch = "x86", target_os = "linux"))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
let limit;
// aarch64 - FIXME(AARCH64): missing...
// powerpc - FIXME(POWERPC): missing...
- // arm-ios - iOS doesn't support segmented stacks yet.
- // openbsd - OpenBSD doesn't support segmented stacks.
+ // arm-ios - no segmented stacks.
+ // openbsd/bitrig/netbsd - no segmented stacks.
+ // x86-freebsd - no segmented stacks..
//
// This function might be called by runtime though
// so it is unsafe to unreachable, let's return a fixed constant.
#[cfg(any(target_arch = "aarch64",
target_arch = "powerpc",
all(target_arch = "arm", target_os = "ios"),
+ all(target_arch = "x86", target_os = "freebsd"),
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
use thread::LocalKeyState;
struct ThreadInfo {
- stack_guard: usize,
+ stack_guard: Option<usize>,
thread: Thread,
}
THREAD_INFO.with(move |c| {
if c.borrow().is_none() {
*c.borrow_mut() = Some(ThreadInfo {
- stack_guard: 0,
+ stack_guard: None,
thread: NewThread::new(None),
})
}
}
pub fn stack_guard() -> Option<usize> {
- ThreadInfo::with(|info| info.stack_guard)
+ ThreadInfo::with(|info| info.stack_guard).and_then(|o| o)
}
-pub fn set(stack_guard: usize, thread: Thread) {
+pub fn set(stack_guard: Option<usize>, thread: Thread) {
THREAD_INFO.with(|c| assert!(c.borrow().is_none()));
THREAD_INFO.with(move |c| *c.borrow_mut() = Some(ThreadInfo{
stack_guard: stack_guard,
use ascii::*;
use borrow::Cow;
+use char;
use cmp;
use fmt;
use hash::{Hash, Hasher};
use iter::FromIterator;
use mem;
use ops;
+use rustc_unicode::str::{Utf16Item, utf16_items};
use slice;
use str;
use string::String;
use sys_common::AsInner;
-use rustc_unicode::str::{Utf16Item, utf16_items};
use vec::Vec;
const UTF8_REPLACEMENT_CHARACTER: &'static [u8] = b"\xEF\xBF\xBD";
pub fn to_char(&self) -> Option<char> {
match self.value {
0xD800 ... 0xDFFF => None,
- _ => Some(unsafe { mem::transmute(self.value) })
+ _ => Some(unsafe { char::from_u32_unchecked(self.value) })
}
}
// Attempt to not use an intermediate buffer by just pushing bytes
// directly onto this string.
let slice = slice::from_raw_parts_mut(
- self.bytes.as_mut_ptr().offset(cur_len as isize),
- 4
+ self.bytes.as_mut_ptr().offset(cur_len as isize), 4
);
- let used = encode_utf8_raw(code_point.value, mem::transmute(slice))
- .unwrap_or(0);
+ let used = encode_utf8_raw(code_point.value, slice).unwrap();
self.bytes.set_len(cur_len + used);
}
}
#[inline]
pub fn as_slice(&self) -> &Wtf8 {
- unsafe { mem::transmute(&*self.bytes) }
+ unsafe { Wtf8::from_bytes_unchecked(&self.bytes) }
}
/// Reserves capacity for at least `additional` more bytes to be inserted
/// and surrogates as `\u` followed by four hexadecimal digits.
/// Example: `"a\u{D800}"` for a slice with code points [U+0061, U+D800]
impl fmt::Debug for Wtf8 {
- fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ fn write_str_escaped(f: &mut fmt::Formatter, s: &str) -> fmt::Result {
+ use fmt::Write;
+ for c in s.chars().flat_map(|c| c.escape_default()) {
+ try!(f.write_char(c))
+ }
+ Ok(())
+ }
+
try!(formatter.write_str("\""));
let mut pos = 0;
loop {
match self.next_surrogate(pos) {
None => break,
Some((surrogate_pos, surrogate)) => {
- try!(formatter.write_str(unsafe {
- // the data in this slice is valid UTF-8, transmute to &str
- mem::transmute(&self.bytes[pos .. surrogate_pos])
- }));
+ try!(write_str_escaped(
+ formatter,
+ unsafe { str::from_utf8_unchecked(
+ &self.bytes[pos .. surrogate_pos]
+ )},
+ ));
try!(write!(formatter, "\\u{{{:X}}}", surrogate));
pos = surrogate_pos + 3;
}
}
}
- try!(formatter.write_str(unsafe {
- // the data in this slice is valid UTF-8, transmute to &str
- mem::transmute(&self.bytes[pos..])
- }));
+ try!(write_str_escaped(
+ formatter,
+ unsafe { str::from_utf8_unchecked(&self.bytes[pos..]) },
+ ));
formatter.write_str("\"")
}
}
/// Since WTF-8 is a superset of UTF-8, this always succeeds.
#[inline]
pub fn from_str(value: &str) -> &Wtf8 {
- unsafe { mem::transmute(value.as_bytes()) }
+ unsafe { Wtf8::from_bytes_unchecked(value.as_bytes()) }
+ }
+
+ /// Creates a WTF-8 slice from a WTF-8 byte slice.
+ ///
+ /// Since the byte slice is not checked for valid WTF-8, this functions is
+ /// marked unsafe.
+ #[inline]
+ unsafe fn from_bytes_unchecked(value: &[u8]) -> &Wtf8 {
+ mem::transmute(value)
}
/// Returns the length, in WTF-8 bytes.
#[inline]
fn decode_surrogate_pair(lead: u16, trail: u16) -> char {
let code_point = 0x10000 + ((((lead - 0xD800) as u32) << 10) | (trail - 0xDC00) as u32);
- unsafe { mem::transmute(code_point) }
+ unsafe { char::from_u32_unchecked(code_point) }
}
/// Copied from core::str::StrPrelude::is_char_boundary
#[inline]
pub unsafe fn slice_unchecked(s: &Wtf8, begin: usize, end: usize) -> &Wtf8 {
// memory layout of an &[u8] and &Wtf8 are the same
- mem::transmute(slice::from_raw_parts(
+ Wtf8::from_bytes_unchecked(slice::from_raw_parts(
s.bytes.as_ptr().offset(begin as isize),
end - begin
))
use prelude::v1::*;
use borrow::Cow;
use super::*;
- use mem::transmute;
#[test]
fn code_point_from_u32() {
string.push_wtf8(Wtf8::from_str(" 💩"));
assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
- fn w(value: &[u8]) -> &Wtf8 { unsafe { transmute(value) } }
+ fn w(v: &[u8]) -> &Wtf8 { unsafe { Wtf8::from_bytes_unchecked(v) } }
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"\xED\xA0\xBD")); // lead
#[test]
fn wtf8buf_show() {
- let mut string = Wtf8Buf::from_str("aé 💩");
+ let mut string = Wtf8Buf::from_str("a\té 💩\r");
string.push(CodePoint::from_u32(0xD800).unwrap());
- assert_eq!(format!("{:?}", string), r#""aé 💩\u{D800}""#);
+ assert_eq!(format!("{:?}", string), r#""a\t\u{e9} \u{1f4a9}\r\u{D800}""#);
}
#[test]
}
#[test]
- fn wtf8_show() {
- let mut string = Wtf8Buf::from_str("aé 💩");
- string.push(CodePoint::from_u32(0xD800).unwrap());
- assert_eq!(format!("{:?}", string), r#""aé 💩\u{D800}""#);
+ fn wtf8buf_show_str() {
+ let text = "a\té 💩\r";
+ let mut string = Wtf8Buf::from_str(text);
+ assert_eq!(format!("{:?}", text), format!("{:?}", string));
}
#[test]
#[cfg(all(target_os = "ios", target_arch = "arm"))]
#[inline(never)]
pub fn write(w: &mut Write) -> io::Result<()> {
- use result;
-
extern {
fn backtrace(buf: *mut *mut libc::c_void,
sz: libc::c_int) -> libc::c_int;
let cnt = unsafe { backtrace(buf.as_mut_ptr(), SIZE as libc::c_int) as usize};
// skipping the first one as it is write itself
- let iter = (1..cnt).map(|i| {
- print(w, i as isize, buf[i], buf[i])
- });
- result::fold(iter, (), |_, _| ())
+ for i in 1..cnt {
+ try!(print(w, i as isize, buf[i], buf[i]))
+ }
+ Ok(())
}
#[cfg(not(all(target_os = "ios", target_arch = "arm")))]
let selfname = if cfg!(target_os = "freebsd") ||
cfg!(target_os = "dragonfly") ||
cfg!(target_os = "bitrig") ||
+ cfg!(target_os = "netbsd") ||
cfg!(target_os = "openbsd") {
env::current_exe().ok()
} else {
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
pub const FIOCLEX: libc::c_ulong = 0x20006601;
target_os = "dragonfly"))]
pub const _SC_GETPW_R_SIZE_MAX: libc::c_int = 71;
#[cfg(any(target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
pub const _SC_GETPW_R_SIZE_MAX: libc::c_int = 101;
#[cfg(target_os = "android")]
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
pub struct passwd {
pub pw_name: *mut libc::c_char,
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
mod signal_os {
use libc;
pub struct sigset_t {
bits: [u32; 4],
}
- #[cfg(any(target_os = "bitrig", target_os = "openbsd"))]
+ #[cfg(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"))]
pub type sigset_t = libc::c_uint;
// This structure has more fields, but we're not all that interested in
pub _status: libc::c_int,
pub si_addr: *mut libc::c_void
}
- #[cfg(any(target_os = "bitrig", target_os = "openbsd"))]
+ #[cfg(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"))]
#[repr(C)]
pub struct siginfo {
pub si_signo: libc::c_int,
}
#[cfg(any(target_os = "macos", target_os = "ios",
- target_os = "bitrig", target_os = "openbsd"))]
+ target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"))]
#[repr(C)]
pub struct sigaction {
pub sa_sigaction: sighandler_t,
let r = ffi::gettimeofday(&mut sys_now, ptr::null_mut());
debug_assert_eq!(r, 0);
- let nsec = dur.extra_nanos() as libc::c_long +
+ let nsec = dur.subsec_nanos() as libc::c_long +
(sys_now.tv_usec * 1000) as libc::c_long;
let extra = (nsec / 1_000_000_000) as libc::time_t;
let nsec = nsec % 1_000_000_000;
- let seconds = dur.secs() as libc::time_t;
+ let seconds = dur.as_secs() as libc::time_t;
let timeout = sys_now.tv_sec.checked_add(extra).and_then(|s| {
s.checked_add(seconds)
use fs::{self, Permissions, OpenOptions};
use io;
+use libc;
use os::raw::c_long;
use os::unix::raw;
use path::Path;
}
}
+/// Add special unix types (block/char device, fifo and socket)
+#[unstable(feature = "file_type_ext", reason = "recently added API")]
+pub trait FileTypeExt {
+ /// Returns whether this file type is a block device.
+ fn is_block_device(&self) -> bool;
+ /// Returns whether this file type is a char device.
+ fn is_char_device(&self) -> bool;
+ /// Returns whether this file type is a fifo.
+ fn is_fifo(&self) -> bool;
+ /// Returns whether this file type is a socket.
+ fn is_socket(&self) -> bool;
+}
+
+#[unstable(feature = "file_type_ext", reason = "recently added API")]
+impl FileTypeExt for fs::FileType {
+ fn is_block_device(&self) -> bool { self.as_inner().is(libc::S_IFBLK) }
+ fn is_char_device(&self) -> bool { self.as_inner().is(libc::S_IFCHR) }
+ fn is_fifo(&self) -> bool { self.as_inner().is(libc::S_IFIFO) }
+ fn is_socket(&self) -> bool { self.as_inner().is(libc::S_IFSOCK) }
+}
+
/// Unix-specific extension methods for `fs::DirEntry`
#[stable(feature = "dir_entry_ext", since = "1.1.0")]
pub trait DirEntryExt {
use net;
use os::raw;
use sys;
-use sys_common::{self, AsInner, FromInner};
+use sys_common::{self, AsInner, FromInner, IntoInner};
/// Raw file descriptors.
#[stable(feature = "rust1", since = "1.0.0")]
unsafe fn from_raw_fd(fd: RawFd) -> Self;
}
+/// A trait to express the ability to consume an object and acquire ownership of
+/// its raw file descriptor.
+#[unstable(feature = "into_raw_os", reason = "recently added API")]
+pub trait IntoRawFd {
+ /// Consumes this object, returning the raw underlying file descriptor.
+ ///
+ /// This function **transfers ownership** of the underlying file descriptor
+ /// to the caller. Callers are then the unique owners of the file descriptor
+ /// and must close the descriptor once it's no longer needed.
+ fn into_raw_fd(self) -> RawFd;
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRawFd for fs::File {
fn as_raw_fd(&self) -> RawFd {
fs::File::from_inner(sys::fs::File::from_inner(fd))
}
}
+impl IntoRawFd for fs::File {
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_fd().into_raw()
+ }
+}
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRawFd for net::TcpStream {
net::UdpSocket::from_inner(sys_common::net::UdpSocket::from_inner(socket))
}
}
+
+impl IntoRawFd for net::TcpStream {
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_socket().into_inner()
+ }
+}
+impl IntoRawFd for net::TcpListener {
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_socket().into_inner()
+ }
+}
+impl IntoRawFd for net::UdpSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_socket().into_inner()
+ }
+}
#[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")]
pub use super::ffi::{OsStrExt, OsStringExt};
#[doc(no_inline)]
- pub use super::fs::{PermissionsExt, OpenOptionsExt, MetadataExt};
+ pub use super::fs::{PermissionsExt, OpenOptionsExt, MetadataExt, FileTypeExt};
#[doc(no_inline)]
pub use super::fs::{DirEntryExt};
#[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")]
#![stable(feature = "rust1", since = "1.0.0")]
use os::unix::raw::{uid_t, gid_t};
-use os::unix::io::{FromRawFd, RawFd, AsRawFd};
+use os::unix::io::{FromRawFd, RawFd, AsRawFd, IntoRawFd};
use prelude::v1::*;
use process;
use sys;
-use sys_common::{AsInnerMut, AsInner, FromInner};
+use sys_common::{AsInnerMut, AsInner, FromInner, IntoInner};
/// Unix-specific extensions to the `std::process::Command` builder
#[stable(feature = "rust1", since = "1.0.0")]
self.as_inner().fd().raw()
}
}
+
+impl IntoRawFd for process::ChildStdin {
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_fd().into_raw()
+ }
+}
+
+impl IntoRawFd for process::ChildStdout {
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_fd().into_raw()
+ }
+}
+
+impl IntoRawFd for process::ChildStderr {
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_fd().into_raw()
+ }
+}
use ffi::{CString, CStr, OsString, OsStr};
use fmt;
-use io::{self, Error, SeekFrom};
+use io::{self, Error, ErrorKind, SeekFrom};
use libc::{self, c_int, size_t, off_t, c_char, mode_t};
use mem;
use path::{Path, PathBuf};
pub fn is_file(&self) -> bool { self.is(libc::S_IFREG) }
pub fn is_symlink(&self) -> bool { self.is(libc::S_IFLNK) }
- fn is(&self, mode: mode_t) -> bool { self.mode & libc::S_IFMT == mode }
+ pub fn is(&self, mode: mode_t) -> bool { self.mode & libc::S_IFMT == mode }
}
impl FromInner<raw::mode_t> for FilePermissions {
}
pub fn fd(&self) -> &FileDesc { &self.0 }
+
+ pub fn into_fd(self) -> FileDesc { self.0 }
}
impl DirBuilder {
readlink(&p).ok()
}
- #[cfg(not(target_os = "linux"))]
+ #[cfg(target_os = "macos")]
+ fn get_path(fd: c_int) -> Option<PathBuf> {
+ let mut buf = vec![0;libc::PATH_MAX as usize];
+ let n = unsafe { libc::fcntl(fd, libc::F_GETPATH, buf.as_ptr()) };
+ if n == -1 {
+ return None;
+ }
+ let l = buf.iter().position(|&c| c == 0).unwrap();
+ buf.truncate(l as usize);
+ Some(PathBuf::from(OsString::from_vec(buf)))
+ }
+
+ #[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn get_path(_fd: c_int) -> Option<PathBuf> {
// FIXME(#24570): implement this for other Unix platforms
None
}
- #[cfg(target_os = "linux")]
+ #[cfg(any(target_os = "linux", target_os = "macos"))]
fn get_mode(fd: c_int) -> Option<(bool, bool)> {
let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) };
if mode == -1 {
}
}
- #[cfg(not(target_os = "linux"))]
+ #[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn get_mode(_fd: c_int) -> Option<(bool, bool)> {
// FIXME(#24570): implement this for other Unix platforms
None
buf.truncate(p);
Ok(PathBuf::from(OsString::from_vec(buf)))
}
+
+pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
+ use fs::{File, PathExt, set_permissions};
+ if !from.is_file() {
+ return Err(Error::new(ErrorKind::InvalidInput,
+ "the source path is not an existing file"))
+ }
+
+ let mut reader = try!(File::open(from));
+ let mut writer = try!(File::create(to));
+ let perm = try!(reader.metadata()).permissions();
+
+ let ret = try!(io::copy(&mut reader, &mut writer));
+ try!(set_permissions(to, perm));
+ Ok(ret)
+}
#[cfg(target_os = "linux")] pub use os::linux as platform;
#[cfg(target_os = "macos")] pub use os::macos as platform;
#[cfg(target_os = "nacl")] pub use os::nacl as platform;
+#[cfg(target_os = "netbsd")] pub use os::netbsd as platform;
#[cfg(target_os = "openbsd")] pub use os::openbsd as platform;
pub mod backtrace;
use sys::c;
use net::SocketAddr;
use sys::fd::FileDesc;
-use sys_common::{AsInner, FromInner};
+use sys_common::{AsInner, FromInner, IntoInner};
use sys_common::net::{getsockopt, setsockopt};
use time::Duration;
pub fn set_timeout(&self, dur: Option<Duration>, kind: libc::c_int) -> io::Result<()> {
let timeout = match dur {
Some(dur) => {
- if dur.secs() == 0 && dur.extra_nanos() == 0 {
+ if dur.as_secs() == 0 && dur.subsec_nanos() == 0 {
return Err(io::Error::new(io::ErrorKind::InvalidInput,
"cannot set a 0 duration timeout"));
}
- let secs = if dur.secs() > libc::time_t::max_value() as u64 {
+ let secs = if dur.as_secs() > libc::time_t::max_value() as u64 {
libc::time_t::max_value()
} else {
- dur.secs() as libc::time_t
+ dur.as_secs() as libc::time_t
};
let mut timeout = libc::timeval {
tv_sec: secs,
- tv_usec: (dur.extra_nanos() / 1000) as libc::suseconds_t,
+ tv_usec: (dur.subsec_nanos() / 1000) as libc::suseconds_t,
};
if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
timeout.tv_usec = 1;
impl FromInner<c_int> for Socket {
fn from_inner(fd: c_int) -> Socket { Socket(FileDesc::new(fd)) }
}
+
+impl IntoInner<c_int> for Socket {
+ fn into_inner(self) -> c_int { self.0.into_raw() }
+}
use iter;
use libc::{self, c_int, c_char, c_void};
use mem;
-use ptr;
use path::{self, PathBuf};
+use ptr;
use slice;
use str;
use sys::c;
use sys::fd;
use vec;
-const BUF_BYTES: usize = 2048;
+const GETCWD_BUF_BYTES: usize = 2048;
const TMPBUF_SZ: usize = 128;
-fn bytes2path(b: &[u8]) -> PathBuf {
- PathBuf::from(<OsStr as OsStrExt>::from_bytes(b))
-}
-
-fn os2path(os: OsString) -> PathBuf {
- bytes2path(os.as_bytes())
-}
-
/// Returns the platform-specific value of errno
pub fn errno() -> i32 {
#[cfg(any(target_os = "macos",
__error()
}
- #[cfg(target_os = "bitrig")]
- fn errno_location() -> *const c_int {
- extern {
- fn __errno() -> *const c_int;
- }
- unsafe {
- __errno()
- }
- }
-
#[cfg(target_os = "dragonfly")]
unsafe fn errno_location() -> *const c_int {
extern { fn __dfly_error() -> *const c_int; }
__dfly_error()
}
- #[cfg(target_os = "openbsd")]
+ #[cfg(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"))]
unsafe fn errno_location() -> *const c_int {
extern { fn __errno() -> *const c_int; }
__errno()
}
pub fn getcwd() -> io::Result<PathBuf> {
- let mut buf = [0 as c_char; BUF_BYTES];
- unsafe {
- if libc::getcwd(buf.as_mut_ptr(), buf.len() as libc::size_t).is_null() {
- Err(io::Error::last_os_error())
- } else {
- Ok(bytes2path(CStr::from_ptr(buf.as_ptr()).to_bytes()))
+ let mut buf = Vec::new();
+ let mut n = GETCWD_BUF_BYTES;
+ loop {
+ unsafe {
+ buf.reserve(n);
+ let ptr = buf.as_mut_ptr() as *mut libc::c_char;
+ if !libc::getcwd(ptr, buf.capacity() as libc::size_t).is_null() {
+ let len = CStr::from_ptr(buf.as_ptr() as *const libc::c_char).to_bytes().len();
+ buf.set_len(len);
+ buf.shrink_to_fit();
+ return Ok(PathBuf::from(OsString::from_vec(buf)));
+ } else {
+ let error = io::Error::last_os_error();
+ if error.raw_os_error() != Some(libc::ERANGE) {
+ return Err(error);
+ }
+ }
+ n *= 2;
}
}
}
}
pub fn split_paths<'a>(unparsed: &'a OsStr) -> SplitPaths<'a> {
+ fn bytes_to_path(b: &[u8]) -> PathBuf {
+ PathBuf::from(<OsStr as OsStrExt>::from_bytes(b))
+ }
fn is_colon(b: &u8) -> bool { *b == b':' }
let unparsed = unparsed.as_bytes();
SplitPaths {
iter: unparsed.split(is_colon as fn(&u8) -> bool)
- .map(bytes2path as fn(&'a [u8]) -> PathBuf)
+ .map(bytes_to_path as fn(&'a [u8]) -> PathBuf)
}
}
::fs::read_link("/proc/curproc/file")
}
-#[cfg(any(target_os = "bitrig", target_os = "openbsd"))]
+#[cfg(any(target_os = "bitrig", target_os = "netbsd", target_os = "openbsd"))]
pub fn current_exe() -> io::Result<PathBuf> {
use sync::StaticMutex;
static LOCK: StaticMutex = StaticMutex::new();
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
pub fn args() -> Args {
use rt;
}
pub fn temp_dir() -> PathBuf {
- getenv("TMPDIR".as_ref()).map(os2path).unwrap_or_else(|| {
+ getenv("TMPDIR".as_ref()).map(PathBuf::from).unwrap_or_else(|| {
if cfg!(target_os = "android") {
PathBuf::from("/data/local/tmp")
} else {
pub fn home_dir() -> Option<PathBuf> {
return getenv("HOME".as_ref()).or_else(|| unsafe {
fallback()
- }).map(os2path);
+ }).map(PathBuf::from);
#[cfg(any(target_os = "android",
target_os = "ios"))]
pub fn raw(&self) -> libc::c_int { self.0.raw() }
pub fn fd(&self) -> &FileDesc { &self.0 }
+ pub fn into_fd(self) -> FileDesc { self.0 }
}
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0x7f) == 0 }
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
mod imp {
use sys_common::stack;
#[cfg(not(any(target_os = "linux",
target_os = "macos",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd")))]
mod imp {
use libc;
#[cfg(any(target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
mod os {
use libc;
#[cfg(any(target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
pub fn set_name(name: &str) {
extern {
pub fn sleep(dur: Duration) {
let mut ts = libc::timespec {
- tv_sec: dur.secs() as libc::time_t,
- tv_nsec: dur.extra_nanos() as libc::c_long,
+ tv_sec: dur.as_secs() as libc::time_t,
+ tv_nsec: dur.subsec_nanos() as libc::c_long,
};
// If we're awoken with a signal then the return value will be -1 and
#[cfg(all(not(target_os = "linux"),
not(target_os = "macos"),
not(target_os = "bitrig"),
+ not(target_os = "netbsd"),
not(target_os = "openbsd")))]
pub mod guard {
- pub unsafe fn current() -> usize { 0 }
- pub unsafe fn main() -> usize { 0 }
- pub unsafe fn init() {}
+ use prelude::v1::*;
+
+ pub unsafe fn current() -> Option<usize> { None }
+ pub unsafe fn init() -> Option<usize> { None }
}
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
#[allow(unused_imports)]
pub mod guard {
+ use prelude::v1::*;
+
use libc::{self, pthread_t};
use libc::funcs::posix88::mman::mmap;
use libc::consts::os::posix88::{PROT_NONE,
use super::{pthread_self, pthread_attr_destroy};
use sys::os;
- // These are initialized in init() and only read from after
- static mut GUARD_PAGE: usize = 0;
-
#[cfg(any(target_os = "macos",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
- unsafe fn get_stack_start() -> *mut libc::c_void {
- current() as *mut libc::c_void
+ unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
+ current().map(|s| s as *mut libc::c_void)
}
#[cfg(any(target_os = "linux", target_os = "android"))]
- unsafe fn get_stack_start() -> *mut libc::c_void {
+ unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
+ use super::pthread_attr_init;
+
+ let mut ret = None;
let mut attr: libc::pthread_attr_t = mem::zeroed();
- assert_eq!(pthread_getattr_np(pthread_self(), &mut attr), 0);
- let mut stackaddr = ptr::null_mut();
- let mut stacksize = 0;
- assert_eq!(pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize), 0);
+ assert_eq!(pthread_attr_init(&mut attr), 0);
+ if pthread_getattr_np(pthread_self(), &mut attr) == 0 {
+ let mut stackaddr = ptr::null_mut();
+ let mut stacksize = 0;
+ assert_eq!(pthread_attr_getstack(&attr, &mut stackaddr,
+ &mut stacksize), 0);
+ ret = Some(stackaddr);
+ }
assert_eq!(pthread_attr_destroy(&mut attr), 0);
- stackaddr
+ ret
}
- pub unsafe fn init() {
+ pub unsafe fn init() -> Option<usize> {
let psize = os::page_size();
- let mut stackaddr = get_stack_start();
+ let mut stackaddr = match get_stack_start() {
+ Some(addr) => addr,
+ None => return None,
+ };
// Ensure stackaddr is page aligned! A parent process might
// have reset RLIMIT_STACK to be non-page aligned. The
let offset = if cfg!(target_os = "linux") {2} else {1};
- GUARD_PAGE = stackaddr as usize + offset * psize;
- }
-
- pub unsafe fn main() -> usize {
- GUARD_PAGE
+ Some(stackaddr as usize + offset * psize)
}
#[cfg(target_os = "macos")]
- pub unsafe fn current() -> usize {
+ pub unsafe fn current() -> Option<usize> {
extern {
fn pthread_get_stackaddr_np(thread: pthread_t) -> *mut libc::c_void;
fn pthread_get_stacksize_np(thread: pthread_t) -> libc::size_t;
}
- (pthread_get_stackaddr_np(pthread_self()) as libc::size_t -
- pthread_get_stacksize_np(pthread_self())) as usize
+ Some((pthread_get_stackaddr_np(pthread_self()) as libc::size_t -
+ pthread_get_stacksize_np(pthread_self())) as usize)
}
- #[cfg(any(target_os = "openbsd", target_os = "bitrig"))]
- pub unsafe fn current() -> usize {
+ #[cfg(any(target_os = "openbsd", target_os = "netbsd", target_os = "bitrig"))]
+ pub unsafe fn current() -> Option<usize> {
#[repr(C)]
struct stack_t {
ss_sp: *mut libc::c_void,
assert_eq!(pthread_stackseg_np(pthread_self(), &mut current_stack), 0);
let extra = if cfg!(target_os = "bitrig") {3} else {1} * os::page_size();
- if pthread_main_np() == 1 {
+ Some(if pthread_main_np() == 1 {
// main thread
current_stack.ss_sp as usize - current_stack.ss_size as usize + extra
} else {
// new thread
current_stack.ss_sp as usize - current_stack.ss_size as usize
- }
+ })
}
#[cfg(any(target_os = "linux", target_os = "android"))]
- pub unsafe fn current() -> usize {
+ pub unsafe fn current() -> Option<usize> {
+ use super::pthread_attr_init;
+
+ let mut ret = None;
let mut attr: libc::pthread_attr_t = mem::zeroed();
- assert_eq!(pthread_getattr_np(pthread_self(), &mut attr), 0);
- let mut guardsize = 0;
- assert_eq!(pthread_attr_getguardsize(&attr, &mut guardsize), 0);
- if guardsize == 0 {
- panic!("there is no guard page");
+ assert_eq!(pthread_attr_init(&mut attr), 0);
+ if pthread_getattr_np(pthread_self(), &mut attr) == 0 {
+ let mut guardsize = 0;
+ assert_eq!(pthread_attr_getguardsize(&attr, &mut guardsize), 0);
+ if guardsize == 0 {
+ panic!("there is no guard page");
+ }
+ let mut stackaddr = ptr::null_mut();
+ let mut size = 0;
+ assert_eq!(pthread_attr_getstack(&attr, &mut stackaddr, &mut size), 0);
+
+ ret = Some(stackaddr as usize + guardsize as usize);
}
- let mut stackaddr = ptr::null_mut();
- let mut size = 0;
- assert_eq!(pthread_attr_getstack(&attr, &mut stackaddr, &mut size), 0);
assert_eq!(pthread_attr_destroy(&mut attr), 0);
-
- stackaddr as usize + guardsize as usize
+ return ret
}
#[cfg(any(target_os = "linux", target_os = "android"))]
#[cfg(any(target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd"))]
type pthread_key_t = ::libc::c_int;
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd")))]
type pthread_key_t = ::libc::c_uint;
// OpenBSD provide it via libc
#[cfg(not(any(target_os = "android",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd",
target_env = "musl")))]
#[link(name = "rt")]
#![allow(bad_style, dead_code, overflowing_literals)]
use libc;
+use libc::{c_uint, c_ulong};
+use libc::{DWORD, BOOL, BOOLEAN, ERROR_CALL_NOT_IMPLEMENTED, LPVOID, HANDLE};
+use libc::{LPCWSTR, LONG};
pub use self::GET_FILEEX_INFO_LEVELS::*;
pub use self::FILE_INFO_BY_HANDLE_CLASS::*;
pub const ERROR_NO_MORE_FILES: libc::DWORD = 18;
pub const TOKEN_READ: libc::DWORD = 0x20008;
pub const FILE_FLAG_OPEN_REPARSE_POINT: libc::DWORD = 0x00200000;
+pub const FILE_FLAG_BACKUP_SEMANTICS: libc::DWORD = 0x02000000;
pub const MAXIMUM_REPARSE_DATA_BUFFER_SIZE: usize = 16 * 1024;
pub const FSCTL_GET_REPARSE_POINT: libc::DWORD = 0x900a8;
pub const IO_REPARSE_TAG_SYMLINK: libc::DWORD = 0xa000000c;
+pub const IO_REPARSE_TAG_MOUNT_POINT: libc::DWORD = 0xa0000003;
+pub const FSCTL_SET_REPARSE_POINT: libc::DWORD = 0x900a4;
+pub const FSCTL_DELETE_REPARSE_POINT: libc::DWORD = 0x900ac;
pub const SYMBOLIC_LINK_FLAG_DIRECTORY: libc::DWORD = 0x1;
pub const STD_OUTPUT_HANDLE: libc::DWORD = -11i32 as libc::DWORD;
pub const STD_ERROR_HANDLE: libc::DWORD = -12i32 as libc::DWORD;
+pub const HANDLE_FLAG_INHERIT: libc::DWORD = 0x00000001;
+
+pub const PROGRESS_CONTINUE: libc::DWORD = 0;
+pub const PROGRESS_CANCEL: libc::DWORD = 1;
+pub const PROGRESS_STOP: libc::DWORD = 2;
+pub const PROGRESS_QUIET: libc::DWORD = 3;
+
+pub const TOKEN_ADJUST_PRIVILEGES: libc::DWORD = 0x0020;
+pub const SE_PRIVILEGE_ENABLED: libc::DWORD = 2;
+
#[repr(C)]
#[cfg(target_arch = "x86")]
pub struct WSADATA {
pub PathBuffer: libc::WCHAR,
}
+pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
+pub type PSRWLOCK = *mut SRWLOCK;
+pub type ULONG = c_ulong;
+pub type ULONG_PTR = c_ulong;
+pub type LPBOOL = *mut BOOL;
+
+pub type LPPROGRESS_ROUTINE = ::option::Option<unsafe extern "system" fn(
+ TotalFileSize: libc::LARGE_INTEGER,
+ TotalBytesTransferred: libc::LARGE_INTEGER,
+ StreamSize: libc::LARGE_INTEGER,
+ StreamBytesTransferred: libc::LARGE_INTEGER,
+ dwStreamNumber: DWORD,
+ dwCallbackReason: DWORD,
+ hSourceFile: HANDLE,
+ hDestinationFile: HANDLE,
+ lpData: LPVOID,
+) -> DWORD>;
+
+#[repr(C)]
+pub struct CONDITION_VARIABLE { pub ptr: LPVOID }
+#[repr(C)]
+pub struct SRWLOCK { pub ptr: LPVOID }
+#[repr(C)]
+pub struct CRITICAL_SECTION {
+ CriticalSectionDebug: LPVOID,
+ LockCount: LONG,
+ RecursionCount: LONG,
+ OwningThread: HANDLE,
+ LockSemaphore: HANDLE,
+ SpinCount: ULONG_PTR
+}
+
+pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE {
+ ptr: 0 as *mut _,
+};
+pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: 0 as *mut _ };
+
+#[repr(C)]
+pub struct LUID {
+ pub LowPart: libc::DWORD,
+ pub HighPart: libc::c_long,
+}
+
+pub type PLUID = *mut LUID;
+
+#[repr(C)]
+pub struct TOKEN_PRIVILEGES {
+ pub PrivilegeCount: libc::DWORD,
+ pub Privileges: [LUID_AND_ATTRIBUTES; 1],
+}
+
+pub type PTOKEN_PRIVILEGES = *mut TOKEN_PRIVILEGES;
+
+#[repr(C)]
+pub struct LUID_AND_ATTRIBUTES {
+ pub Luid: LUID,
+ pub Attributes: libc::DWORD,
+}
+
+#[repr(C)]
+pub struct REPARSE_MOUNTPOINT_DATA_BUFFER {
+ pub ReparseTag: libc::DWORD,
+ pub ReparseDataLength: libc::DWORD,
+ pub Reserved: libc::WORD,
+ pub ReparseTargetLength: libc::WORD,
+ pub ReparseTargetMaximumLength: libc::WORD,
+ pub Reserved1: libc::WORD,
+ pub ReparseTarget: libc::WCHAR,
+}
+
+
#[link(name = "ws2_32")]
+#[link(name = "userenv")]
extern "system" {
pub fn WSAStartup(wVersionRequested: libc::WORD,
lpWSAData: LPWSADATA) -> libc::c_int;
pub fn CancelIo(hFile: libc::HANDLE) -> libc::BOOL;
pub fn CancelIoEx(hFile: libc::HANDLE,
lpOverlapped: libc::LPOVERLAPPED) -> libc::BOOL;
-}
-
-pub mod compat {
- use prelude::v1::*;
-
- use ffi::CString;
- use libc::types::os::arch::extra::{LPCWSTR, HMODULE, LPCSTR, LPVOID};
- use sync::atomic::{AtomicUsize, Ordering};
- extern "system" {
- fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
- fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) -> LPVOID;
- }
-
- fn store_func(ptr: &AtomicUsize, module: &str, symbol: &str,
- fallback: usize) -> usize {
- let mut module: Vec<u16> = module.utf16_units().collect();
- module.push(0);
- let symbol = CString::new(symbol).unwrap();
- let func = unsafe {
- let handle = GetModuleHandleW(module.as_ptr());
- GetProcAddress(handle, symbol.as_ptr()) as usize
- };
- let value = if func == 0 {fallback} else {func};
- ptr.store(value, Ordering::SeqCst);
- value
- }
-
- /// Macro for creating a compatibility fallback for a Windows function
- ///
- /// # Examples
- /// ```
- /// compat_fn!(adll32::SomeFunctionW(_arg: LPCWSTR) {
- /// // Fallback implementation
- /// })
- /// ```
- ///
- /// Note that arguments unused by the fallback implementation should not be
- /// called `_` as they are used to be passed to the real function if
- /// available.
- macro_rules! compat_fn {
- ($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*)
- -> $rettype:ty { $fallback:expr }) => (
- #[inline(always)]
- pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
- use sync::atomic::{AtomicUsize, Ordering};
- use mem;
-
- static PTR: AtomicUsize = AtomicUsize::new(0);
-
- fn load() -> usize {
- ::sys::c::compat::store_func(&PTR,
- stringify!($module),
- stringify!($symbol),
- fallback as usize)
- }
-
- extern "system" fn fallback($($argname: $argtype),*)
- -> $rettype { $fallback }
-
- let addr = match PTR.load(Ordering::SeqCst) {
- 0 => load(),
- n => n,
- };
- let f: extern "system" fn($($argtype),*) -> $rettype =
- mem::transmute(addr);
- f($($argname),*)
- }
- )
- }
-
- /// Compatibility layer for functions in `kernel32.dll`
- ///
- /// Latest versions of Windows this is needed for:
- ///
- /// * `CreateSymbolicLinkW`: Windows XP, Windows Server 2003
- /// * `GetFinalPathNameByHandleW`: Windows XP, Windows Server 2003
- pub mod kernel32 {
- use libc::c_uint;
- use libc::types::os::arch::extra::{DWORD, LPCWSTR, BOOLEAN, HANDLE};
- use libc::consts::os::extra::ERROR_CALL_NOT_IMPLEMENTED;
- use sys::c::SetLastError;
-
- compat_fn! {
- kernel32::CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR,
- _lpTargetFileName: LPCWSTR,
- _dwFlags: DWORD) -> BOOLEAN {
- unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 }
- }
- }
-
- compat_fn! {
- kernel32::GetFinalPathNameByHandleW(_hFile: HANDLE,
- _lpszFilePath: LPCWSTR,
- _cchFilePath: DWORD,
- _dwFlags: DWORD) -> DWORD {
- unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 }
- }
- }
-
- compat_fn! {
- kernel32::SetThreadErrorMode(_dwNewMode: DWORD, _lpOldMode: *mut DWORD) -> c_uint {
- unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 }
- }
- }
- }
-}
+ pub fn InitializeCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
+ pub fn EnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
+ pub fn TryEnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION) -> BOOLEAN;
+ pub fn LeaveCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
+ pub fn DeleteCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
-extern "system" {
// FIXME - pInputControl should be PCONSOLE_READCONSOLE_CONTROL
pub fn ReadConsoleW(hConsoleInput: libc::HANDLE,
lpBuffer: libc::LPVOID,
lpCreationTime: *const libc::FILETIME,
lpLastAccessTime: *const libc::FILETIME,
lpLastWriteTime: *const libc::FILETIME) -> libc::BOOL;
- pub fn SetFileInformationByHandle(hFile: libc::HANDLE,
- FileInformationClass: FILE_INFO_BY_HANDLE_CLASS,
- lpFileInformation: libc::LPVOID,
- dwBufferSize: libc::DWORD) -> libc::BOOL;
pub fn GetTempPathW(nBufferLength: libc::DWORD,
lpBuffer: libc::LPCWSTR) -> libc::DWORD;
pub fn OpenProcessToken(ProcessHandle: libc::HANDLE,
pub fn SwitchToThread() -> libc::BOOL;
pub fn Sleep(dwMilliseconds: libc::DWORD);
pub fn GetProcessId(handle: libc::HANDLE) -> libc::DWORD;
-}
-
-#[link(name = "userenv")]
-extern "system" {
pub fn GetUserProfileDirectoryW(hToken: libc::HANDLE,
lpProfileDir: libc::LPCWSTR,
lpcchSize: *mut libc::DWORD) -> libc::BOOL;
+ pub fn SetHandleInformation(hObject: libc::HANDLE,
+ dwMask: libc::DWORD,
+ dwFlags: libc::DWORD) -> libc::BOOL;
+ pub fn CopyFileExW(lpExistingFileName: libc::LPCWSTR,
+ lpNewFileName: libc::LPCWSTR,
+ lpProgressRoutine: LPPROGRESS_ROUTINE,
+ lpData: libc::LPVOID,
+ pbCancel: LPBOOL,
+ dwCopyFlags: libc::DWORD) -> libc::BOOL;
+ pub fn LookupPrivilegeValueW(lpSystemName: libc::LPCWSTR,
+ lpName: libc::LPCWSTR,
+ lpLuid: PLUID) -> libc::BOOL;
+ pub fn AdjustTokenPrivileges(TokenHandle: libc::HANDLE,
+ DisableAllPrivileges: libc::BOOL,
+ NewState: PTOKEN_PRIVILEGES,
+ BufferLength: libc::DWORD,
+ PreviousState: PTOKEN_PRIVILEGES,
+ ReturnLength: *mut libc::DWORD) -> libc::BOOL;
+}
+
+// Functions that aren't available on Windows XP, but we still use them and just
+// provide some form of a fallback implementation.
+compat_fn! {
+ kernel32:
+
+ pub fn CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR,
+ _lpTargetFileName: LPCWSTR,
+ _dwFlags: DWORD) -> BOOLEAN {
+ SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0
+ }
+ pub fn GetFinalPathNameByHandleW(_hFile: HANDLE,
+ _lpszFilePath: LPCWSTR,
+ _cchFilePath: DWORD,
+ _dwFlags: DWORD) -> DWORD {
+ SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0
+ }
+ pub fn SetThreadErrorMode(_dwNewMode: DWORD,
+ _lpOldMode: *mut DWORD) -> c_uint {
+ SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0
+ }
+ pub fn SetThreadStackGuarantee(_size: *mut c_ulong) -> BOOL {
+ SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0
+ }
+ pub fn SetFileInformationByHandle(_hFile: HANDLE,
+ _FileInformationClass: FILE_INFO_BY_HANDLE_CLASS,
+ _lpFileInformation: LPVOID,
+ _dwBufferSize: DWORD) -> BOOL {
+ SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0
+ }
+ pub fn SleepConditionVariableSRW(ConditionVariable: PCONDITION_VARIABLE,
+ SRWLock: PSRWLOCK,
+ dwMilliseconds: DWORD,
+ Flags: ULONG) -> BOOL {
+ panic!("condition variables not available")
+ }
+ pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE)
+ -> () {
+ panic!("condition variables not available")
+ }
+ pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE)
+ -> () {
+ panic!("condition variables not available")
+ }
+ pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> () {
+ panic!("rwlocks not available")
+ }
+ pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK) -> () {
+ panic!("rwlocks not available")
+ }
+ pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK) -> () {
+ panic!("rwlocks not available")
+ }
+ pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK) -> () {
+ panic!("rwlocks not available")
+ }
+ pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN {
+ panic!("rwlocks not available")
+ }
+ pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN {
+ panic!("rwlocks not available")
+ }
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A "compatibility layer" for spanning XP and Windows 7
+//!
+//! The standard library currently binds many functions that are not available
+//! on Windows XP, but we would also like to support building executables that
+//! run on XP. To do this we specify all non-XP APIs as having a fallback
+//! implementation to do something reasonable.
+//!
+//! This dynamic runtime detection of whether a function is available is
+//! implemented with `GetModuleHandle` and `GetProcAddress` paired with a
+//! static-per-function which caches the result of the first check. In this
+//! manner we pay a semi-large one-time cost up front for detecting whether a
+//! function is available but afterwards it's just a load and a jump.
+
+use prelude::v1::*;
+
+use ffi::CString;
+use libc::{LPVOID, LPCWSTR, HMODULE, LPCSTR};
+use sync::atomic::{AtomicUsize, Ordering};
+
+extern "system" {
+ fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
+ fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) -> LPVOID;
+}
+
+pub fn lookup(module: &str, symbol: &str) -> Option<usize> {
+ let mut module: Vec<u16> = module.utf16_units().collect();
+ module.push(0);
+ let symbol = CString::new(symbol).unwrap();
+ unsafe {
+ let handle = GetModuleHandleW(module.as_ptr());
+ match GetProcAddress(handle, symbol.as_ptr()) as usize {
+ 0 => None,
+ n => Some(n),
+ }
+ }
+}
+
+pub fn store_func(ptr: &AtomicUsize, module: &str, symbol: &str,
+ fallback: usize) -> usize {
+ let value = lookup(module, symbol).unwrap_or(fallback);
+ ptr.store(value, Ordering::SeqCst);
+ value
+}
+
+macro_rules! compat_fn {
+ ($module:ident: $(
+ pub fn $symbol:ident($($argname:ident: $argtype:ty),*)
+ -> $rettype:ty {
+ $($body:expr);*
+ }
+ )*) => ($(
+ #[allow(unused_variables)]
+ pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
+ use sync::atomic::{AtomicUsize, Ordering};
+ use mem;
+ type F = unsafe extern "system" fn($($argtype),*) -> $rettype;
+
+ static PTR: AtomicUsize = AtomicUsize::new(0);
+
+ fn load() -> usize {
+ ::sys::compat::store_func(&PTR,
+ stringify!($module),
+ stringify!($symbol),
+ fallback as usize)
+ }
+ unsafe extern "system" fn fallback($($argname: $argtype),*)
+ -> $rettype {
+ $($body);*
+ }
+
+ let addr = match PTR.load(Ordering::SeqCst) {
+ 0 => load(),
+ n => n,
+ };
+ mem::transmute::<usize, F>(addr)($($argname),*)
+ }
+ )*)
+}
use cell::UnsafeCell;
use libc::{self, DWORD};
-use sys::os;
+use sys::c;
use sys::mutex::{self, Mutex};
-use sys::sync as ffi;
+use sys::os;
use time::Duration;
-pub struct Condvar { inner: UnsafeCell<ffi::CONDITION_VARIABLE> }
+pub struct Condvar { inner: UnsafeCell<c::CONDITION_VARIABLE> }
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
impl Condvar {
pub const fn new() -> Condvar {
- Condvar { inner: UnsafeCell::new(ffi::CONDITION_VARIABLE_INIT) }
+ Condvar { inner: UnsafeCell::new(c::CONDITION_VARIABLE_INIT) }
}
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) {
- let r = ffi::SleepConditionVariableSRW(self.inner.get(),
- mutex::raw(mutex),
- libc::INFINITE,
- 0);
+ let r = c::SleepConditionVariableSRW(self.inner.get(),
+ mutex::raw(mutex),
+ libc::INFINITE,
+ 0);
debug_assert!(r != 0);
}
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
- let r = ffi::SleepConditionVariableSRW(self.inner.get(),
- mutex::raw(mutex),
- super::dur2timeout(dur),
- 0);
+ let r = c::SleepConditionVariableSRW(self.inner.get(),
+ mutex::raw(mutex),
+ super::dur2timeout(dur),
+ 0);
if r == 0 {
const ERROR_TIMEOUT: DWORD = 0x5B4;
debug_assert_eq!(os::errno() as usize, ERROR_TIMEOUT as usize);
#[inline]
pub unsafe fn notify_one(&self) {
- ffi::WakeConditionVariable(self.inner.get())
+ c::WakeConditionVariable(self.inner.get())
}
#[inline]
pub unsafe fn notify_all(&self) {
- ffi::WakeAllConditionVariable(self.inner.get())
+ c::WakeAllConditionVariable(self.inner.get())
}
pub unsafe fn destroy(&self) {
use fs;
use os::windows::raw;
use net;
-use sys_common::{self, AsInner, FromInner};
+use sys_common::{self, AsInner, FromInner, IntoInner};
use sys;
/// Raw HANDLEs.
unsafe fn from_raw_handle(handle: RawHandle) -> Self;
}
+/// A trait to express the ability to consume an object and acquire ownership of
+/// its raw `HANDLE`.
+#[unstable(feature = "into_raw_os", reason = "recently added API")]
+pub trait IntoRawHandle {
+ /// Consumes this object, returning the raw underlying handle.
+ ///
+ /// This function **transfers ownership** of the underlying handle to the
+ /// caller. Callers are then the unique owners of the handle and must close
+ /// it once it's no longer needed.
+ fn into_raw_handle(self) -> RawHandle;
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRawHandle for fs::File {
fn as_raw_handle(&self) -> RawHandle {
}
}
+impl IntoRawHandle for fs::File {
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_handle().into_raw() as *mut _
+ }
+}
+
/// Extract raw sockets.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait AsRawSocket {
unsafe fn from_raw_socket(sock: RawSocket) -> Self;
}
+/// A trait to express the ability to consume an object and acquire ownership of
+/// its raw `SOCKET`.
+#[unstable(feature = "into_raw_os", reason = "recently added API")]
+pub trait IntoRawSocket {
+ /// Consumes this object, returning the raw underlying socket.
+ ///
+ /// This function **transfers ownership** of the underlying socket to the
+ /// caller. Callers are then the unique owners of the socket and must close
+ /// it once it's no longer needed.
+ fn into_raw_socket(self) -> RawSocket;
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRawSocket for net::TcpStream {
fn as_raw_socket(&self) -> RawSocket {
net::UdpSocket::from_inner(sys_common::net::UdpSocket::from_inner(sock))
}
}
+
+impl IntoRawSocket for net::TcpStream {
+ fn into_raw_socket(self) -> RawSocket {
+ self.into_inner().into_socket().into_inner()
+ }
+}
+
+impl IntoRawSocket for net::TcpListener {
+ fn into_raw_socket(self) -> RawSocket {
+ self.into_inner().into_socket().into_inner()
+ }
+}
+
+impl IntoRawSocket for net::UdpSocket {
+ fn into_raw_socket(self) -> RawSocket {
+ self.into_inner().into_socket().into_inner()
+ }
+}
#![stable(feature = "process_extensions", since = "1.2.0")]
-use os::windows::io::{FromRawHandle, RawHandle, AsRawHandle};
+use os::windows::io::{FromRawHandle, RawHandle, AsRawHandle, IntoRawHandle};
use process;
use sys;
-use sys_common::{AsInner, FromInner};
+use sys_common::{AsInner, FromInner, IntoInner};
#[stable(feature = "process_extensions", since = "1.2.0")]
impl FromRawHandle for process::Stdio {
}
}
+impl IntoRawHandle for process::Child {
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_handle().into_raw() as *mut _
+ }
+}
+
#[stable(feature = "process_extensions", since = "1.2.0")]
impl AsRawHandle for process::ChildStdin {
fn as_raw_handle(&self) -> RawHandle {
self.as_inner().handle().raw() as *mut _
}
}
+
+impl IntoRawHandle for process::ChildStdin {
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_handle().into_raw() as *mut _
+ }
+}
+
+impl IntoRawHandle for process::ChildStdout {
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_handle().into_raw() as *mut _
+ }
+}
+
+impl IntoRawHandle for process::ChildStderr {
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_handle().into_raw() as *mut _
+ }
+}
pub struct FileAttr {
data: c::WIN32_FILE_ATTRIBUTE_DATA,
- is_symlink: bool,
+ reparse_tag: libc::DWORD,
}
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub enum FileType {
- Dir, File, Symlink, ReparsePoint
+ Dir, File, Symlink, ReparsePoint, MountPoint,
}
pub struct ReadDir {
pub fn file_type(&self) -> io::Result<FileType> {
Ok(FileType::new(self.data.dwFileAttributes,
- self.data.dwReserved0 == c::IO_REPARSE_TAG_SYMLINK))
+ /* reparse_tag = */ self.data.dwReserved0))
}
pub fn metadata(&self) -> io::Result<FileAttr> {
nFileSizeHigh: self.data.nFileSizeHigh,
nFileSizeLow: self.data.nFileSizeLow,
},
- is_symlink: self.data.dwReserved0 == c::IO_REPARSE_TAG_SYMLINK,
+ reparse_tag: self.data.dwReserved0,
})
}
}
}
impl File {
- fn open_reparse_point(path: &Path) -> io::Result<File> {
+ fn open_reparse_point(path: &Path, write: bool) -> io::Result<File> {
let mut opts = OpenOptions::new();
- opts.read(true);
- opts.flags_and_attributes(c::FILE_FLAG_OPEN_REPARSE_POINT);
+ opts.read(!write);
+ opts.write(write);
+ opts.flags_and_attributes(c::FILE_FLAG_OPEN_REPARSE_POINT |
+ c::FILE_FLAG_BACKUP_SEMANTICS);
File::open(path, &opts)
}
nFileSizeHigh: info.nFileSizeHigh,
nFileSizeLow: info.nFileSizeLow,
},
- is_symlink: false,
+ reparse_tag: 0,
};
if attr.is_reparse_point() {
- attr.is_symlink = self.is_symlink();
+ let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ if let Ok((_, buf)) = self.reparse_point(&mut b) {
+ attr.reparse_tag = buf.ReparseTag;
+ }
}
Ok(attr)
}
pub fn handle(&self) -> &Handle { &self.handle }
- fn is_symlink(&self) -> bool {
- self.readlink().is_ok()
- }
-
- fn readlink(&self) -> io::Result<PathBuf> {
- let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
- let mut bytes = 0;
+ pub fn into_handle(self) -> Handle { self.handle }
+ fn reparse_point<'a>(&self,
+ space: &'a mut [u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE])
+ -> io::Result<(libc::DWORD, &'a c::REPARSE_DATA_BUFFER)> {
unsafe {
+ let mut bytes = 0;
try!(cvt({
c::DeviceIoControl(self.handle.raw(),
c::FSCTL_GET_REPARSE_POINT,
&mut bytes,
0 as *mut _)
}));
- let buf: *const c::REPARSE_DATA_BUFFER = space.as_ptr() as *const _;
- if (*buf).ReparseTag != c::IO_REPARSE_TAG_SYMLINK {
- return Err(io::Error::new(io::ErrorKind::Other, "not a symlink"))
- }
+ Ok((bytes, &*(space.as_ptr() as *const c::REPARSE_DATA_BUFFER)))
+ }
+ }
+
+ fn readlink(&self) -> io::Result<PathBuf> {
+ let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ let (_bytes, buf) = try!(self.reparse_point(&mut space));
+ if buf.ReparseTag != c::IO_REPARSE_TAG_SYMLINK {
+ return Err(io::Error::new(io::ErrorKind::Other, "not a symlink"))
+ }
+
+ unsafe {
let info: *const c::SYMBOLIC_LINK_REPARSE_BUFFER =
- &(*buf).rest as *const _ as *const _;
+ &buf.rest as *const _ as *const _;
let path_buffer = &(*info).PathBuffer as *const _ as *const u16;
let subst_off = (*info).SubstituteNameOffset / 2;
let subst_ptr = path_buffer.offset(subst_off as isize);
Ok(PathBuf::from(OsString::from_wide(subst)))
}
}
-
- pub fn into_handle(self) -> Handle { self.handle }
}
impl FromInner<libc::HANDLE> for File {
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- // FIXME(#24570): add more info here (e.g. path, mode)
- f.debug_struct("File")
- .field("handle", &self.handle.raw())
- .finish()
+ // FIXME(#24570): add more info here (e.g. mode)
+ let mut b = f.debug_struct("File");
+ b.field("handle", &self.handle.raw());
+ if let Ok(path) = get_path(&self) {
+ b.field("path", &path);
+ }
+ b.finish()
}
}
pub fn attrs(&self) -> u32 { self.data.dwFileAttributes as u32 }
pub fn file_type(&self) -> FileType {
- FileType::new(self.data.dwFileAttributes, self.is_symlink)
+ FileType::new(self.data.dwFileAttributes, self.reparse_tag)
}
pub fn created(&self) -> u64 { self.to_u64(&self.data.ftCreationTime) }
}
impl FileType {
- fn new(attrs: libc::DWORD, is_symlink: bool) -> FileType {
+ fn new(attrs: libc::DWORD, reparse_tag: libc::DWORD) -> FileType {
if attrs & libc::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
- if is_symlink {
- FileType::Symlink
- } else {
- FileType::ReparsePoint
+ match reparse_tag {
+ c::IO_REPARSE_TAG_SYMLINK => FileType::Symlink,
+ c::IO_REPARSE_TAG_MOUNT_POINT => FileType::MountPoint,
+ _ => FileType::ReparsePoint,
}
} else if attrs & c::FILE_ATTRIBUTE_DIRECTORY != 0 {
FileType::Dir
pub fn is_dir(&self) -> bool { *self == FileType::Dir }
pub fn is_file(&self) -> bool { *self == FileType::File }
- pub fn is_symlink(&self) -> bool { *self == FileType::Symlink }
+ pub fn is_symlink(&self) -> bool {
+ *self == FileType::Symlink || *self == FileType::MountPoint
+ }
}
impl DirBuilder {
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
- let file = try!(File::open_reparse_point(p));
+ let file = try!(File::open_reparse_point(p, false));
file.readlink()
}
}
pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> {
- use sys::c::compat::kernel32::CreateSymbolicLinkW;
let src = to_utf16(src);
let dst = to_utf16(dst);
let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 };
try!(cvt(unsafe {
- CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as libc::BOOL
+ c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as libc::BOOL
}));
Ok(())
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
let attr = try!(lstat(p));
- if attr.data.dwFileAttributes & libc::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
- let opts = OpenOptions::new();
+
+ // If this is a reparse point, then we need to reopen the file to get the
+ // actual destination. We also pass the FILE_FLAG_BACKUP_SEMANTICS flag to
+ // ensure that we can open directories (this path may be a directory
+ // junction). Once the file is opened we ask the opened handle what its
+ // metadata information is.
+ if attr.is_reparse_point() {
+ let mut opts = OpenOptions::new();
+ opts.flags_and_attributes(c::FILE_FLAG_BACKUP_SEMANTICS);
let file = try!(File::open(p, &opts));
file.file_attr()
} else {
c::GetFileExInfoStandard,
&mut attr.data as *mut _ as *mut _)));
if attr.is_reparse_point() {
- attr.is_symlink = File::open_reparse_point(p).map(|f| {
- f.is_symlink()
- }).unwrap_or(false);
+ attr.reparse_tag = File::open_reparse_point(p, false).and_then(|f| {
+ let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ f.reparse_point(&mut b).map(|(_, b)| b.ReparseTag)
+ }).unwrap_or(0);
}
Ok(attr)
}
Ok(())
}
-pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
- use sys::c::compat::kernel32::GetFinalPathNameByHandleW;
-
- let mut opts = OpenOptions::new();
- opts.read(true);
- let f = try!(File::open(p, &opts));
+fn get_path(f: &File) -> io::Result<PathBuf> {
super::fill_utf16_buf(|buf, sz| unsafe {
- GetFinalPathNameByHandleW(f.handle.raw(), buf, sz,
- libc::VOLUME_NAME_DOS)
+ c::GetFinalPathNameByHandleW(f.handle.raw(), buf, sz,
+ libc::VOLUME_NAME_DOS)
}, |buf| {
PathBuf::from(OsString::from_wide(buf))
})
}
+
+pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
+ let mut opts = OpenOptions::new();
+ opts.read(true);
+ let f = try!(File::open(p, &opts));
+ get_path(&f)
+}
+
+pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
+ unsafe extern "system" fn callback(
+ _TotalFileSize: libc::LARGE_INTEGER,
+ TotalBytesTransferred: libc::LARGE_INTEGER,
+ _StreamSize: libc::LARGE_INTEGER,
+ _StreamBytesTransferred: libc::LARGE_INTEGER,
+ _dwStreamNumber: libc::DWORD,
+ _dwCallbackReason: libc::DWORD,
+ _hSourceFile: HANDLE,
+ _hDestinationFile: HANDLE,
+ lpData: libc::LPVOID,
+ ) -> libc::DWORD {
+ *(lpData as *mut i64) = TotalBytesTransferred;
+ c::PROGRESS_CONTINUE
+ }
+ let pfrom = to_utf16(from);
+ let pto = to_utf16(to);
+ let mut size = 0i64;
+ try!(cvt(unsafe {
+ c::CopyFileExW(pfrom.as_ptr(), pto.as_ptr(), Some(callback),
+ &mut size as *mut _ as *mut _, ptr::null_mut(), 0)
+ }));
+ Ok(size as u64)
+}
+
+#[test]
+fn directory_junctions_are_directories() {
+ use ffi::OsStr;
+ use env;
+ use rand::{self, StdRng, Rng};
+
+ macro_rules! t {
+ ($e:expr) => (match $e {
+ Ok(e) => e,
+ Err(e) => panic!("{} failed with: {}", stringify!($e), e),
+ })
+ }
+
+ let d = DirBuilder::new();
+ let p = env::temp_dir();
+ let mut r = rand::thread_rng();
+ let ret = p.join(&format!("rust-{}", r.next_u32()));
+ let foo = ret.join("foo");
+ let bar = ret.join("bar");
+ t!(d.mkdir(&ret));
+ t!(d.mkdir(&foo));
+ t!(d.mkdir(&bar));
+
+ t!(create_junction(&bar, &foo));
+ let metadata = stat(&bar);
+ t!(delete_junction(&bar));
+
+ t!(rmdir(&foo));
+ t!(rmdir(&bar));
+ t!(rmdir(&ret));
+
+ let metadata = t!(metadata);
+ assert!(metadata.file_type().is_dir());
+
+ // Creating a directory junction on windows involves dealing with reparse
+ // points and the DeviceIoControl function, and this code is a skeleton of
+ // what can be found here:
+ //
+ // http://www.flexhex.com/docs/articles/hard-links.phtml
+ fn create_junction(src: &Path, dst: &Path) -> io::Result<()> {
+ let f = try!(opendir(src, true));
+ let h = f.handle().raw();
+
+ unsafe {
+ let mut data = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ let mut db = data.as_mut_ptr()
+ as *mut c::REPARSE_MOUNTPOINT_DATA_BUFFER;
+ let mut buf = &mut (*db).ReparseTarget as *mut _;
+ let mut i = 0;
+ let v = br"\??\";
+ let v = v.iter().map(|x| *x as u16);
+ for c in v.chain(dst.as_os_str().encode_wide()) {
+ *buf.offset(i) = c;
+ i += 1;
+ }
+ *buf.offset(i) = 0;
+ i += 1;
+ (*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT;
+ (*db).ReparseTargetMaximumLength = (i * 2) as libc::WORD;
+ (*db).ReparseTargetLength = ((i - 1) * 2) as libc::WORD;
+ (*db).ReparseDataLength =
+ (*db).ReparseTargetLength as libc::DWORD + 12;
+
+ let mut ret = 0;
+ cvt(c::DeviceIoControl(h as *mut _,
+ c::FSCTL_SET_REPARSE_POINT,
+ data.as_ptr() as *mut _,
+ (*db).ReparseDataLength + 8,
+ 0 as *mut _, 0,
+ &mut ret,
+ 0 as *mut _)).map(|_| ())
+ }
+ }
+
+ fn opendir(p: &Path, write: bool) -> io::Result<File> {
+ unsafe {
+ let mut token = 0 as *mut _;
+ let mut tp: c::TOKEN_PRIVILEGES = mem::zeroed();
+ try!(cvt(c::OpenProcessToken(c::GetCurrentProcess(),
+ c::TOKEN_ADJUST_PRIVILEGES,
+ &mut token)));
+ let name: &OsStr = if write {
+ "SeRestorePrivilege".as_ref()
+ } else {
+ "SeBackupPrivilege".as_ref()
+ };
+ let name = name.encode_wide().chain(Some(0)).collect::<Vec<_>>();
+ try!(cvt(c::LookupPrivilegeValueW(0 as *const _,
+ name.as_ptr(),
+ &mut tp.Privileges[0].Luid)));
+ tp.PrivilegeCount = 1;
+ tp.Privileges[0].Attributes = c::SE_PRIVILEGE_ENABLED;
+ let size = mem::size_of::<c::TOKEN_PRIVILEGES>() as libc::DWORD;
+ try!(cvt(c::AdjustTokenPrivileges(token, libc::FALSE, &mut tp, size,
+ 0 as *mut _, 0 as *mut _)));
+ try!(cvt(libc::CloseHandle(token)));
+
+ File::open_reparse_point(p, write)
+ }
+ }
+
+ fn delete_junction(p: &Path) -> io::Result<()> {
+ unsafe {
+ let f = try!(opendir(p, true));
+ let h = f.handle().raw();
+ let mut data = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ let mut db = data.as_mut_ptr()
+ as *mut c::REPARSE_MOUNTPOINT_DATA_BUFFER;
+ (*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT;
+ let mut bytes = 0;
+ cvt(c::DeviceIoControl(h as *mut _,
+ c::FSCTL_DELETE_REPARSE_POINT,
+ data.as_ptr() as *mut _,
+ (*db).ReparseDataLength + 8,
+ 0 as *mut _, 0,
+ &mut bytes,
+ 0 as *mut _)).map(|_| ())
+ }
+ }
+}
use path::PathBuf;
use time::Duration;
+#[macro_use] pub mod compat;
+
pub mod backtrace;
pub mod c;
pub mod condvar;
pub mod process;
pub mod rwlock;
pub mod stack_overflow;
-pub mod sync;
pub mod thread;
pub mod thread_local;
pub mod time;
// * Nanosecond precision is rounded up
// * Greater than u32::MAX milliseconds (50 days) is rounded up to INFINITE
// (never time out).
- dur.secs().checked_mul(1000).and_then(|ms| {
- ms.checked_add((dur.extra_nanos() as u64) / 1_000_000)
+ dur.as_secs().checked_mul(1000).and_then(|ms| {
+ ms.checked_add((dur.subsec_nanos() as u64) / 1_000_000)
}).and_then(|ms| {
- ms.checked_add(if dur.extra_nanos() % 1_000_000 > 0 {1} else {0})
+ ms.checked_add(if dur.subsec_nanos() % 1_000_000 > 0 {1} else {0})
}).map(|ms| {
if ms > <libc::DWORD>::max_value() as u64 {
libc::INFINITE
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+//! System Mutexes
+//!
+//! The Windows implementation of mutexes is a little odd and it may not be
+//! immediately obvious what's going on. The primary oddness is that SRWLock is
+//! used instead of CriticalSection, and this is done because:
+//!
+//! 1. SRWLock is several times faster than CriticalSection according to
+//! benchmarks performed on both Windows 8 and Windows 7.
+//!
+//! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The
+//! Unix implementation deadlocks so consistency is preferred. See #19962 for
+//! more details.
+//!
+//! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
+//! is there there are no guarantees of fairness.
+//!
+//! The downside of this approach, however, is that SRWLock is not available on
+//! Windows XP, so we continue to have a fallback implementation where
+//! CriticalSection is used and we keep track of who's holding the mutex to
+//! detect recursive locks.
+
use prelude::v1::*;
use cell::UnsafeCell;
-use sys::sync as ffi;
use mem;
+use sync::atomic::{AtomicUsize, Ordering};
+use sys::c;
+use sys::compat;
-pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
+pub struct Mutex {
+ lock: AtomicUsize,
+ held: UnsafeCell<bool>,
+}
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
-#[inline]
-pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
- m.inner.get()
+#[derive(Clone, Copy)]
+enum Kind {
+ SRWLock = 1,
+ CriticalSection = 2,
}
-// So you might be asking why we're using SRWLock instead of CriticalSection?
-//
-// 1. SRWLock is several times faster than CriticalSection according to
-// benchmarks performed on both Windows 8 and Windows 7.
-//
-// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix
-// implementation deadlocks so consistency is preferred. See #19962 for more
-// details.
-//
-// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
-// is there there are no guarantees of fairness.
+#[inline]
+pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK {
+ debug_assert!(mem::size_of::<c::SRWLOCK>() <= mem::size_of_val(&m.lock));
+ &m.lock as *const _ as *mut _
+}
impl Mutex {
pub const fn new() -> Mutex {
- Mutex { inner: UnsafeCell::new(ffi::SRWLOCK_INIT) }
+ Mutex {
+ lock: AtomicUsize::new(0),
+ held: UnsafeCell::new(false),
+ }
}
- #[inline]
pub unsafe fn lock(&self) {
- ffi::AcquireSRWLockExclusive(self.inner.get())
+ match kind() {
+ Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
+ Kind::CriticalSection => {
+ let re = self.remutex();
+ (*re).lock();
+ if !self.flag_locked() {
+ (*re).unlock();
+ panic!("cannot recursively lock a mutex");
+ }
+ }
+ }
}
- #[inline]
pub unsafe fn try_lock(&self) -> bool {
- ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0
+ match kind() {
+ Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
+ Kind::CriticalSection => {
+ let re = self.remutex();
+ if !(*re).try_lock() {
+ false
+ } else if self.flag_locked() {
+ true
+ } else {
+ (*re).unlock();
+ false
+ }
+ }
+ }
}
- #[inline]
pub unsafe fn unlock(&self) {
- ffi::ReleaseSRWLockExclusive(self.inner.get())
+ *self.held.get() = false;
+ match kind() {
+ Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
+ Kind::CriticalSection => (*self.remutex()).unlock(),
+ }
}
- #[inline]
pub unsafe fn destroy(&self) {
- // ...
+ match kind() {
+ Kind::SRWLock => {}
+ Kind::CriticalSection => {
+ match self.lock.load(Ordering::SeqCst) {
+ 0 => {}
+ n => { Box::from_raw(n as *mut ReentrantMutex).destroy(); }
+ }
+ }
+ }
+ }
+
+ unsafe fn remutex(&self) -> *mut ReentrantMutex {
+ match self.lock.load(Ordering::SeqCst) {
+ 0 => {}
+ n => return n as *mut _,
+ }
+ let mut re = Box::new(ReentrantMutex::uninitialized());
+ re.init();
+ let re = Box::into_raw(re);
+ match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) {
+ 0 => re,
+ n => { Box::from_raw(re).destroy(); n as *mut _ }
+ }
+ }
+
+ unsafe fn flag_locked(&self) -> bool {
+ if *self.held.get() {
+ false
+ } else {
+ *self.held.get() = true;
+ true
+ }
+
}
}
-pub struct ReentrantMutex { inner: UnsafeCell<ffi::CRITICAL_SECTION> }
+fn kind() -> Kind {
+ static KIND: AtomicUsize = AtomicUsize::new(0);
+
+ let val = KIND.load(Ordering::SeqCst);
+ if val == Kind::SRWLock as usize {
+ return Kind::SRWLock
+ } else if val == Kind::CriticalSection as usize {
+ return Kind::CriticalSection
+ }
+
+ let ret = match compat::lookup("kernel32", "AcquireSRWLockExclusive") {
+ None => Kind::CriticalSection,
+ Some(..) => Kind::SRWLock,
+ };
+ KIND.store(ret as usize, Ordering::SeqCst);
+ return ret;
+}
+
+pub struct ReentrantMutex { inner: UnsafeCell<c::CRITICAL_SECTION> }
unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}
}
pub unsafe fn init(&mut self) {
- ffi::InitializeCriticalSection(self.inner.get());
+ c::InitializeCriticalSection(self.inner.get());
}
pub unsafe fn lock(&self) {
- ffi::EnterCriticalSection(self.inner.get());
+ c::EnterCriticalSection(self.inner.get());
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
- ffi::TryEnterCriticalSection(self.inner.get()) != 0
+ c::TryEnterCriticalSection(self.inner.get()) != 0
}
pub unsafe fn unlock(&self) {
- ffi::LeaveCriticalSection(self.inner.get());
+ c::LeaveCriticalSection(self.inner.get());
}
pub unsafe fn destroy(&self) {
- ffi::DeleteCriticalSection(self.inner.get());
+ c::DeleteCriticalSection(self.inner.get());
}
}
use sync::Once;
use sys;
use sys::c;
-use sys_common::{AsInner, FromInner};
+use sys_common::{AsInner, FromInner, IntoInner};
use sys_common::net::{setsockopt, getsockopt};
use time::Duration;
SocketAddr::V4(..) => libc::AF_INET,
SocketAddr::V6(..) => libc::AF_INET6,
};
- let socket = unsafe {
- c::WSASocketW(fam, ty, 0, 0 as *mut _, 0,
- c::WSA_FLAG_OVERLAPPED | c::WSA_FLAG_NO_HANDLE_INHERIT)
- };
- match socket {
- INVALID_SOCKET => Err(last_error()),
- n => Ok(Socket(n)),
- }
+ let socket = try!(unsafe {
+ match c::WSASocketW(fam, ty, 0, 0 as *mut _, 0,
+ c::WSA_FLAG_OVERLAPPED) {
+ INVALID_SOCKET => Err(last_error()),
+ n => Ok(Socket(n)),
+ }
+ });
+ try!(socket.set_no_inherit());
+ Ok(socket)
}
pub fn accept(&self, storage: *mut libc::sockaddr,
len: *mut libc::socklen_t) -> io::Result<Socket> {
- match unsafe { libc::accept(self.0, storage, len) } {
- INVALID_SOCKET => Err(last_error()),
- n => Ok(Socket(n)),
- }
+ let socket = try!(unsafe {
+ match libc::accept(self.0, storage, len) {
+ INVALID_SOCKET => Err(last_error()),
+ n => Ok(Socket(n)),
+ }
+ });
+ try!(socket.set_no_inherit());
+ Ok(socket)
}
pub fn duplicate(&self) -> io::Result<Socket> {
- unsafe {
+ let socket = try!(unsafe {
let mut info: c::WSAPROTOCOL_INFO = mem::zeroed();
try!(cvt(c::WSADuplicateSocketW(self.0,
c::GetCurrentProcessId(),
info.iSocketType,
info.iProtocol,
&mut info, 0,
- c::WSA_FLAG_OVERLAPPED |
- c::WSA_FLAG_NO_HANDLE_INHERIT) {
+ c::WSA_FLAG_OVERLAPPED) {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
- }
+ });
+ try!(socket.set_no_inherit());
+ Ok(socket)
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
Ok(Some(Duration::new(secs as u64, nsec as u32)))
}
}
+
+ fn set_no_inherit(&self) -> io::Result<()> {
+ sys::cvt(unsafe {
+ c::SetHandleInformation(self.0 as libc::HANDLE,
+ c::HANDLE_FLAG_INHERIT, 0)
+ }).map(|_| ())
+ }
}
impl Drop for Socket {
impl FromInner<libc::SOCKET> for Socket {
fn from_inner(sock: libc::SOCKET) -> Socket { Socket(sock) }
}
+
+impl IntoInner<libc::SOCKET> for Socket {
+ fn into_inner(self) -> libc::SOCKET {
+ let ret = self.0;
+ mem::forget(self);
+ ret
+ }
+}
impl Drop for Args {
fn drop(&mut self) {
+ // self.cur can be null if CommandLineToArgvW previously failed,
+ // but LocalFree ignores NULL pointers
unsafe { c::LocalFree(self.cur as *mut c_void); }
}
}
let lpCmdLine = c::GetCommandLineW();
let szArgList = c::CommandLineToArgvW(lpCmdLine, &mut nArgs);
+ // szArcList can be NULL if CommandLinToArgvW failed,
+ // but in that case nArgs is 0 so we won't actually
+ // try to read a null pointer
Args { cur: szArgList, range: 0..(nArgs as isize) }
}
}
impl AnonPipe {
pub fn handle(&self) -> &Handle { &self.inner }
+ pub fn into_handle(self) -> Handle { self.inner }
pub fn raw(&self) -> libc::HANDLE { self.inner.raw() }
}
pub fn handle(&self) -> &Handle { &self.handle }
+
+ pub fn into_handle(self) -> Handle { self.handle }
}
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
use prelude::v1::*;
use cell::UnsafeCell;
-use sys::sync as ffi;
+use sys::c;
-pub struct RWLock { inner: UnsafeCell<ffi::SRWLOCK> }
+pub struct RWLock { inner: UnsafeCell<c::SRWLOCK> }
unsafe impl Send for RWLock {}
unsafe impl Sync for RWLock {}
impl RWLock {
pub const fn new() -> RWLock {
- RWLock { inner: UnsafeCell::new(ffi::SRWLOCK_INIT) }
+ RWLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) }
}
#[inline]
pub unsafe fn read(&self) {
- ffi::AcquireSRWLockShared(self.inner.get())
+ c::AcquireSRWLockShared(self.inner.get())
}
#[inline]
pub unsafe fn try_read(&self) -> bool {
- ffi::TryAcquireSRWLockShared(self.inner.get()) != 0
+ c::TryAcquireSRWLockShared(self.inner.get()) != 0
}
#[inline]
pub unsafe fn write(&self) {
- ffi::AcquireSRWLockExclusive(self.inner.get())
+ c::AcquireSRWLockExclusive(self.inner.get())
}
#[inline]
pub unsafe fn try_write(&self) -> bool {
- ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0
+ c::TryAcquireSRWLockExclusive(self.inner.get()) != 0
}
#[inline]
pub unsafe fn read_unlock(&self) {
- ffi::ReleaseSRWLockShared(self.inner.get())
+ c::ReleaseSRWLockShared(self.inner.get())
}
#[inline]
pub unsafe fn write_unlock(&self) {
- ffi::ReleaseSRWLockExclusive(self.inner.get())
+ c::ReleaseSRWLockExclusive(self.inner.get())
}
#[inline]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use rt::util::report_overflow;
use core::prelude::*;
-use ptr;
-use mem;
+
+use libc::types::os::arch::extra::{LPVOID, DWORD, LONG};
use libc;
-use libc::types::os::arch::extra::{LPVOID, DWORD, LONG, BOOL};
+use mem;
+use ptr;
+use rt::util::report_overflow;
+use sys::c;
use sys_common::stack;
pub struct Handler {
}
pub unsafe fn make_handler() -> Handler {
- if SetThreadStackGuarantee(&mut 0x5000) == 0 {
- panic!("failed to reserve stack space for exception handling");
+ // This API isn't available on XP, so don't panic in that case and just pray
+ // it works out ok.
+ if c::SetThreadStackGuarantee(&mut 0x5000) == 0 {
+ if libc::GetLastError() as u32 != libc::ERROR_CALL_NOT_IMPLEMENTED as u32 {
+ panic!("failed to reserve stack space for exception handling");
+ }
}
Handler { _data: 0 as *mut libc::c_void }
}
+#[repr(C)]
pub struct EXCEPTION_RECORD {
pub ExceptionCode: DWORD,
pub ExceptionFlags: DWORD,
pub ExceptionInformation: [LPVOID; EXCEPTION_MAXIMUM_PARAMETERS]
}
+#[repr(C)]
pub struct EXCEPTION_POINTERS {
pub ExceptionRecord: *mut EXCEPTION_RECORD,
pub ContextRecord: LPVOID
fn AddVectoredExceptionHandler(FirstHandler: ULONG,
VectoredHandler: PVECTORED_EXCEPTION_HANDLER)
-> LPVOID;
- fn SetThreadStackGuarantee(StackSizeInBytes: *mut ULONG) -> BOOL;
}
use io::prelude::*;
use io::{self, Cursor};
-use iter::repeat;
use libc;
use ptr;
use str;
let mut utf8 = self.utf8.lock().unwrap();
// Read more if the buffer is empty
if utf8.position() as usize == utf8.get_ref().len() {
- let mut utf16: Vec<u16> = repeat(0u16).take(0x1000).collect();
+ let mut utf16 = vec![0u16; 0x1000];
let mut num = 0;
try!(cvt(unsafe {
c::ReadConsoleW(handle,
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use libc::{BOOL, DWORD, LPVOID, LONG, HANDLE, c_ulong};
-use libc::types::os::arch::extra::BOOLEAN;
-
-pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
-pub type PSRWLOCK = *mut SRWLOCK;
-pub type ULONG = c_ulong;
-pub type ULONG_PTR = c_ulong;
-
-#[repr(C)]
-pub struct CONDITION_VARIABLE { pub ptr: LPVOID }
-#[repr(C)]
-pub struct SRWLOCK { pub ptr: LPVOID }
-#[repr(C)]
-pub struct CRITICAL_SECTION {
- CriticalSectionDebug: LPVOID,
- LockCount: LONG,
- RecursionCount: LONG,
- OwningThread: HANDLE,
- LockSemaphore: HANDLE,
- SpinCount: ULONG_PTR
-}
-
-pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE {
- ptr: 0 as *mut _,
-};
-pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: 0 as *mut _ };
-
-extern "system" {
- // condition variables
- pub fn SleepConditionVariableSRW(ConditionVariable: PCONDITION_VARIABLE,
- SRWLock: PSRWLOCK,
- dwMilliseconds: DWORD,
- Flags: ULONG) -> BOOL;
- pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
- pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
-
- // slim rwlocks
- pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK);
- pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK);
- pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK);
- pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK);
- pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN;
- pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN;
-
- pub fn InitializeCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
- pub fn EnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
- pub fn TryEnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION) -> BOOLEAN;
- pub fn LeaveCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
- pub fn DeleteCriticalSection(CriticalSection: *mut CRITICAL_SECTION);
-}
}
pub mod guard {
- pub unsafe fn main() -> usize { 0 }
- pub unsafe fn current() -> usize { 0 }
- pub unsafe fn init() {}
+ use prelude::v1::*;
+
+ pub unsafe fn current() -> Option<usize> { None }
+ pub unsafe fn init() -> Option<usize> { None }
}
//
// # The article mentions crazy stuff about "/INCLUDE"?
//
-// It sure does! This seems to work for now, so maybe we'll just run into
-// that if we start linking with msvc?
+// It sure does! We include it below for MSVC targets, but it look like for GNU
+// targets we don't require it.
#[link_section = ".CRT$XLB"]
#[linkage = "external"]
LPVOID) =
on_tls_callback;
+#[cfg(all(target_env = "msvc", target_pointer_width = "64"))]
+#[link_args = "/INCLUDE:_tls_used"]
+extern {}
+#[cfg(all(target_env = "msvc", target_pointer_width = "32"))]
+#[link_args = "/INCLUDE:__tls_used"]
+extern {}
+
#[allow(warnings)]
unsafe extern "system" fn on_tls_callback(h: LPVOID,
dwReason: DWORD,
#[cfg(not(no_elf_tls))]
macro_rules! thread_local {
(static $name:ident: $t:ty = $init:expr) => (
- static $name: ::std::thread::LocalKey<$t> =
+ static $name: $crate::thread::LocalKey<$t> =
__thread_local_inner!($t, $init,
#[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
not(target_arch = "aarch64")),
thread_local)]);
);
(pub static $name:ident: $t:ty = $init:expr) => (
- pub static $name: ::std::thread::LocalKey<$t> =
+ pub static $name: $crate::thread::LocalKey<$t> =
__thread_local_inner!($t, $init,
#[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
not(target_arch = "aarch64")),
#[cfg(no_elf_tls)]
macro_rules! thread_local {
(static $name:ident: $t:ty = $init:expr) => (
- static $name: ::std::thread::LocalKey<$t> =
+ static $name: $crate::thread::LocalKey<$t> =
__thread_local_inner!($t, $init, #[]);
);
(pub static $name:ident: $t:ty = $init:expr) => (
- pub static $name: ::std::thread::LocalKey<$t> =
+ pub static $name: $crate::thread::LocalKey<$t> =
__thread_local_inner!($t, $init, #[]);
);
}
macro_rules! __thread_local_inner {
($t:ty, $init:expr, #[$($attr:meta),*]) => {{
$(#[$attr])*
- static __KEY: ::std::thread::__LocalKeyInner<$t> =
- ::std::thread::__LocalKeyInner::new();
+ static __KEY: $crate::thread::__LocalKeyInner<$t> =
+ $crate::thread::__LocalKeyInner::new();
fn __init() -> $t { $init }
- fn __getit() -> &'static ::std::thread::__LocalKeyInner<$t> { &__KEY }
- ::std::thread::LocalKey::new(__getit, __init)
+ fn __getit() -> &'static $crate::thread::__LocalKeyInner<$t> { &__KEY }
+ $crate::thread::LocalKey::new(__getit, __init)
}}
}
use cell::{Cell, UnsafeCell};
use intrinsics;
+ use ptr;
pub struct Key<T> {
inner: UnsafeCell<Option<T>>,
#[cfg(target_os = "linux")]
unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
use mem;
- use ptr;
use libc;
use sys_common::thread_local as os;
#[linkage = "extern_weak"]
static __dso_handle: *mut u8;
#[linkage = "extern_weak"]
- static __cxa_thread_atexit_impl: *const ();
+ static __cxa_thread_atexit_impl: *const libc::c_void;
}
if !__cxa_thread_atexit_impl.is_null() {
type F = unsafe extern fn(dtor: unsafe extern fn(*mut u8),
arg: *mut u8,
dso_handle: *mut u8) -> libc::c_int;
- mem::transmute::<*const (), F>(__cxa_thread_atexit_impl)
+ mem::transmute::<*const libc::c_void, F>(__cxa_thread_atexit_impl)
(dtor, t, &__dso_handle as *const _ as *mut _);
return
}
// destructor as running for this thread so calls to `get` will return
// `None`.
(*ptr).dtor_running.set(true);
- intrinsics::drop_in_place((*ptr).inner.get());
+
+ // The OSX implementation of TLS apparently had an odd aspect to it
+ // where the pointer we have may be overwritten while this destructor
+ // is running. Specifically if a TLS destructor re-accesses TLS it may
+ // trigger a re-initialization of all TLS variables, paving over at
+ // least some destroyed ones with initial values.
+ //
+ // This means that if we drop a TLS value in place on OSX that we could
+ // revert the value to its original state halfway through the
+ // destructor, which would be bad!
+ //
+ // Hence, we use `ptr::read` on OSX (to move to a "safe" location)
+ // instead of drop_in_place.
+ if cfg!(target_os = "macos") {
+ ptr::read((*ptr).inner.get());
+ } else {
+ intrinsics::drop_in_place((*ptr).inner.get());
+ }
}
}
//! value produced by the child thread, or `Err` of the value given to
//! a call to `panic!` if the child panicked.
//!
-//! ## Scoped threads
-//!
-//! The `spawn` method does not allow the child and parent threads to
-//! share any stack data, since that is not safe in general. However,
-//! `scoped` makes it possible to share the parent's stack by forcing
-//! a join before any relevant stack frames are popped:
-//!
-//! ```rust
-//! # #![feature(scoped)]
-//! use std::thread;
-//!
-//! let guard = thread::scoped(move || {
-//! // some work here
-//! });
-//!
-//! // do some other work in the meantime
-//! let output = guard.join();
-//! ```
-//!
-//! The `scoped` function doesn't return a `Thread` directly; instead,
-//! it returns a *join guard*. The join guard is an RAII-style guard
-//! that will automatically join the child thread (block until it
-//! terminates) when it is dropped. You can join the child thread in
-//! advance by calling the `join` method on the guard, which will also
-//! return the result produced by the thread. A handle to the thread
-//! itself is available via the `thread` method of the join guard.
-//!
//! ## Configuring threads
//!
//! A new thread can be configured before it is spawned via the `Builder` type,
/// upon being dropped. Because the child thread may refer to data on the
/// current thread's stack (hence the "scoped" name), it cannot be detached;
/// it *must* be joined before the relevant stack frame is popped. See the
- /// module documentation for additional details.
+ /// documentation on `thread::scoped` for additional details.
///
/// # Errors
///
/// Spawns a new *scoped* thread, returning a `JoinGuard` for it.
///
-/// The join guard can be used to explicitly join the child thread (via
-/// `join`), returning `Result<T>`, or it will implicitly join the child
-/// upon being dropped. Because the child thread may refer to data on the
-/// current thread's stack (hence the "scoped" name), it cannot be detached;
-/// it *must* be joined before the relevant stack frame is popped. See the
-/// module documentation for additional details.
+/// The `spawn` method does not allow the child and parent threads to
+/// share any stack data, since that is not safe in general. However,
+/// `scoped` makes it possible to share the parent's stack by forcing
+/// a join before any relevant stack frames are popped:
+///
+/// ```rust
+/// #![feature(scoped)]
+///
+/// use std::thread;
+///
+/// let guard = thread::scoped(move || {
+/// // some work here
+/// });
+///
+/// // do some other work in the meantime
+/// let output = guard.join();
+/// ```
+///
+/// The `scoped` function doesn't return a `Thread` directly; instead, it
+/// returns a *join guard*. The join guard can be used to explicitly join
+/// the child thread (via `join`), returning `Result<T>`, or it will
+/// implicitly join the child upon being dropped. Because the child thread
+/// may refer to data on the current thread's stack (hence the "scoped"
+/// name), it cannot be detached; it *must* be joined before the relevant
+/// stack frame is popped.
///
/// # Panics
///
/// Invokes a closure, capturing the cause of panic if one occurs.
///
-/// This function will return `Ok(())` if the closure does not panic, and will
-/// return `Err(cause)` if the closure panics. The `cause` returned is the
-/// object with which panic was originally invoked.
+/// This function will return `Ok` with the closure's result if the closure
+/// does not panic, and will return `Err(cause)` if the closure panics. The
+/// `cause` returned is the object with which panic was originally invoked.
///
/// It is currently undefined behavior to unwind from Rust code into foreign
/// code, so this function is particularly useful when Rust is called from
/// # Examples
///
/// ```
-/// # #![feature(catch_panic)]
+/// #![feature(catch_panic)]
+///
/// use std::thread;
///
/// let result = thread::catch_panic(|| {
imp::Thread::sleep(dur)
}
-/// Blocks unless or until the current thread's token is made available (may wake spuriously).
+/// Blocks unless or until the current thread's token is made available.
///
-/// See the module doc for more detail.
+/// Every thread is equipped with some basic low-level blocking support, via
+/// the `park()` function and the [`unpark()`][unpark] method. These can be
+/// used as a more CPU-efficient implementation of a spinlock.
+///
+/// [unpark]: struct.Thread.html#method.unpark
+///
+/// The API is typically used by acquiring a handle to the current thread,
+/// placing that handle in a shared data structure so that other threads can
+/// find it, and then parking (in a loop with a check for the token actually
+/// being acquired).
+///
+/// A call to `park` does not guarantee that the thread will remain parked
+/// forever, and callers should be prepared for this possibility.
+///
+/// See the [module documentation][thread] for more detail.
+///
+/// [thread]: index.html
//
// The implementation currently uses the trivial strategy of a Mutex+Condvar
// with wakeup flag, which does not actually allow spurious wakeups. In the
//! # Examples
//!
//! ```
-//! # #![feature(scoped_tls)]
+//! #![feature(scoped_tls)]
+//!
//! scoped_thread_local!(static FOO: u32);
//!
//! // Initially each scoped slot is empty.
#[allow_internal_unstable]
macro_rules! scoped_thread_local {
(static $name:ident: $t:ty) => (
- static $name: ::std::thread::ScopedKey<$t> =
+ static $name: $crate::thread::ScopedKey<$t> =
__scoped_thread_local_inner!($t);
);
(pub static $name:ident: $t:ty) => (
- pub static $name: ::std::thread::ScopedKey<$t> =
+ pub static $name: $crate::thread::ScopedKey<$t> =
__scoped_thread_local_inner!($t);
);
}
#[cfg(no_elf_tls)]
macro_rules! __scoped_thread_local_inner {
($t:ty) => {{
- static _KEY: ::std::thread::__ScopedKeyInner<$t> =
- ::std::thread::__ScopedKeyInner::new();
- fn _getit() -> &'static ::std::thread::__ScopedKeyInner<$t> { &_KEY }
- ::std::thread::ScopedKey::new(_getit)
+ static _KEY: $crate::thread::__ScopedKeyInner<$t> =
+ $crate::thread::__ScopedKeyInner::new();
+ fn _getit() -> &'static $crate::thread::__ScopedKeyInner<$t> { &_KEY }
+ $crate::thread::ScopedKey::new(_getit)
}}
}
#[cfg_attr(not(any(windows,
target_os = "android",
target_os = "ios",
+ target_os = "netbsd",
target_os = "openbsd",
target_arch = "aarch64")),
thread_local)]
- static _KEY: ::std::thread::__ScopedKeyInner<$t> =
- ::std::thread::__ScopedKeyInner::new();
- fn _getit() -> &'static ::std::thread::__ScopedKeyInner<$t> { &_KEY }
- ::std::thread::ScopedKey::new(_getit)
+ static _KEY: $crate::thread::__ScopedKeyInner<$t> =
+ $crate::thread::__ScopedKeyInner::new();
+ fn _getit() -> &'static $crate::thread::__ScopedKeyInner<$t> { &_KEY }
+ $crate::thread::ScopedKey::new(_getit)
}}
}
/// # Examples
///
/// ```
- /// # #![feature(scoped_tls)]
+ /// #![feature(scoped_tls)]
+ ///
/// scoped_thread_local!(static FOO: u32);
///
/// FOO.set(&100, || {
/// # Examples
///
/// ```no_run
- /// # #![feature(scoped_tls)]
+ /// #![feature(scoped_tls)]
+ ///
/// scoped_thread_local!(static FOO: u32);
///
/// FOO.with(|slot| {
#[cfg(not(any(windows,
target_os = "android",
target_os = "ios",
+ target_os = "netbsd",
target_os = "openbsd",
target_arch = "aarch64",
no_elf_tls)))]
#[doc(hidden)]
mod imp {
- use std::cell::Cell;
+ use cell::Cell;
pub struct KeyInner<T> { inner: Cell<*mut T> }
#[cfg(any(windows,
target_os = "android",
target_os = "ios",
+ target_os = "netbsd",
target_os = "openbsd",
target_arch = "aarch64",
no_elf_tls))]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Temporal quantification
-
-#![unstable(feature = "duration", reason = "recently added API per RFC 1040")]
-
use prelude::v1::*;
-use fmt;
use ops::{Add, Sub, Mul, Div};
use sys::time::SteadyTime;
/// # Examples
///
/// ```
-/// #![feature(duration)]
/// use std::time::Duration;
///
/// let five_seconds = Duration::new(5, 0);
/// let five_seconds_and_five_nanos = five_seconds + Duration::new(0, 5);
///
-/// assert_eq!(five_seconds_and_five_nanos.secs(), 5);
-/// assert_eq!(five_seconds_and_five_nanos.extra_nanos(), 5);
+/// assert_eq!(five_seconds_and_five_nanos.as_secs(), 5);
+/// assert_eq!(five_seconds_and_five_nanos.subsec_nanos(), 5);
///
/// let ten_millis = Duration::from_millis(10);
/// ```
+#[stable(feature = "duration", since = "1.3.0")]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct Duration {
secs: u64,
///
/// If the nanoseconds is greater than 1 billion (the number of nanoseconds
/// in a second), then it will carry over into the seconds provided.
+ #[stable(feature = "duration", since = "1.3.0")]
pub fn new(secs: u64, nanos: u32) -> Duration {
let secs = secs + (nanos / NANOS_PER_SEC) as u64;
let nanos = nanos % NANOS_PER_SEC;
}
/// Creates a new `Duration` from the specified number of seconds.
+ #[stable(feature = "duration", since = "1.3.0")]
pub fn from_secs(secs: u64) -> Duration {
Duration { secs: secs, nanos: 0 }
}
/// Creates a new `Duration` from the specified number of milliseconds.
+ #[stable(feature = "duration", since = "1.3.0")]
pub fn from_millis(millis: u64) -> Duration {
let secs = millis / MILLIS_PER_SEC;
let nanos = ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI;
///
/// The extra precision represented by this duration is ignored (e.g. extra
/// nanoseconds are not represented in the returned value).
- pub fn secs(&self) -> u64 { self.secs }
+ #[stable(feature = "duration", since = "1.3.0")]
+ pub fn as_secs(&self) -> u64 { self.secs }
+
+ #[deprecated(reason = "renamed to `as_secs`", since = "1.3.0")]
+ #[unstable(feature = "duration_deprecated")]
+ /// Returns the number of whole seconds represented by this duration.
+ ///
+ /// The extra precision represented by this duration is ignored (e.g. extra
+ /// nanoseconds are not represented in the returned value).
+ pub fn secs(&self) -> u64 { self.as_secs() }
+
+ /// Returns the nanosecond precision represented by this duration.
+ ///
+ /// This method does **not** return the length of the duration when
+ /// represented by nanoseconds. The returned number always represents a
+ /// fractional portion of a second (e.g. it is less than one billion).
+ #[stable(feature = "duration", since = "1.3.0")]
+ pub fn subsec_nanos(&self) -> u32 { self.nanos }
+ #[deprecated(reason = "renamed to `subsec_nanos`", since = "1.3.0")]
+ #[unstable(feature = "duration_deprecated")]
/// Returns the nanosecond precision represented by this duration.
///
/// This method does **not** return the length of the duration when
/// represented by nanoseconds. The returned number always represents a
/// fractional portion of a second (e.g. it is less than one billion).
- pub fn extra_nanos(&self) -> u32 { self.nanos }
+ pub fn extra_nanos(&self) -> u32 { self.subsec_nanos() }
}
impl Add for Duration {
}
}
-impl fmt::Display for Duration {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match (self.secs, self.nanos) {
- (s, 0) => write!(f, "{}s", s),
- (0, n) if n % NANOS_PER_MILLI == 0 => write!(f, "{}ms",
- n / NANOS_PER_MILLI),
- (0, n) if n % 1_000 == 0 => write!(f, "{}µs", n / 1_000),
- (0, n) => write!(f, "{}ns", n),
- (s, n) => write!(f, "{}.{}s", s,
- format!("{:09}", n).trim_right_matches('0'))
- }
- }
-}
-
#[cfg(test)]
mod tests {
use prelude::v1::*;
#[test]
fn secs() {
- assert_eq!(Duration::new(0, 0).secs(), 0);
- assert_eq!(Duration::from_secs(1).secs(), 1);
- assert_eq!(Duration::from_millis(999).secs(), 0);
- assert_eq!(Duration::from_millis(1001).secs(), 1);
+ assert_eq!(Duration::new(0, 0).as_secs(), 0);
+ assert_eq!(Duration::from_secs(1).as_secs(), 1);
+ assert_eq!(Duration::from_millis(999).as_secs(), 0);
+ assert_eq!(Duration::from_millis(1001).as_secs(), 1);
}
#[test]
fn nanos() {
- assert_eq!(Duration::new(0, 0).extra_nanos(), 0);
- assert_eq!(Duration::new(0, 5).extra_nanos(), 5);
- assert_eq!(Duration::new(0, 1_000_000_001).extra_nanos(), 1);
- assert_eq!(Duration::from_secs(1).extra_nanos(), 0);
- assert_eq!(Duration::from_millis(999).extra_nanos(), 999 * 1_000_000);
- assert_eq!(Duration::from_millis(1001).extra_nanos(), 1 * 1_000_000);
+ assert_eq!(Duration::new(0, 0).subsec_nanos(), 0);
+ assert_eq!(Duration::new(0, 5).subsec_nanos(), 5);
+ assert_eq!(Duration::new(0, 1_000_000_001).subsec_nanos(), 1);
+ assert_eq!(Duration::from_secs(1).subsec_nanos(), 0);
+ assert_eq!(Duration::from_millis(999).subsec_nanos(), 999 * 1_000_000);
+ assert_eq!(Duration::from_millis(1001).subsec_nanos(), 1 * 1_000_000);
}
#[test]
assert_eq!(Duration::new(99, 999_999_000) / 100,
Duration::new(0, 999_999_990));
}
-
- #[test]
- fn display() {
- assert_eq!(Duration::new(0, 2).to_string(), "2ns");
- assert_eq!(Duration::new(0, 2_000_000).to_string(), "2ms");
- assert_eq!(Duration::new(2, 0).to_string(), "2s");
- assert_eq!(Duration::new(2, 2).to_string(), "2.000000002s");
- assert_eq!(Duration::new(2, 2_000_000).to_string(),
- "2.002s");
- assert_eq!(Duration::new(0, 2_000_002).to_string(),
- "2000002ns");
- assert_eq!(Duration::new(2, 2_000_002).to_string(),
- "2.002000002s");
- }
}
//! Temporal quantification.
-#![unstable(feature = "time")]
+#![stable(feature = "time", since = "1.3.0")]
pub use self::duration::Duration;
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Operations on tuples
-//!
-//! To access the _N_-th element of a tuple one can use `N` itself
-//! as a field of the tuple.
-//!
-//! Indexing starts from zero, so `0` returns first value, `1`
-//! returns second value, and so on. In general, a tuple with _S_
-//! elements provides aforementioned fields from `0` to `S-1`.
-//!
-//! If every type inside a tuple implements one of the following
-//! traits, then a tuple itself also implements it.
-//!
-//! * `Clone`
-//! * `PartialEq`
-//! * `Eq`
-//! * `PartialOrd`
-//! * `Ord`
-//! * `Default`
-//!
-//! # Examples
-//!
-//! Accessing elements of a tuple at specified indices:
-//!
-//! ```
-//! let x = ("colorless", "green", "ideas", "sleep", "furiously");
-//! assert_eq!(x.3, "sleep");
-//!
-//! let v = (3, 3);
-//! let u = (1, -5);
-//! assert_eq!(v.0 * u.0 + v.1 * u.1, -12);
-//! ```
-//!
-//! Using traits implemented for tuples:
-//!
-//! ```
-//! let a = (1, 2);
-//! let b = (3, 4);
-//! assert!(a != b);
-//!
-//! let c = b.clone();
-//! assert!(b == c);
-//!
-//! let d : (u32, f32) = Default::default();
-//! assert_eq!(d, (0, 0.0f32));
-//! ```
-
-#![doc(primitive = "tuple")]
-#![stable(feature = "rust1", since = "1.0.0")]
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![doc(primitive = "unit")]
-#![stable(feature = "rust1", since = "1.0.0")]
-
-//! The `()` type, sometimes called "unit" or "nil".
-//!
-//! The `()` type has exactly one value `()`, and is used when there
-//! is no other meaningful value that could be returned. `()` is most
-//! commonly seen implicitly: functions without a `-> ...` implicitly
-//! have return type `()`, that is, these are equivalent:
-//!
-//! ```rust
-//! fn long() -> () {}
-//!
-//! fn short() {}
-//! ```
-//!
-//! The semicolon `;` can be used to discard the result of an
-//! expression at the end of a block, making the expression (and thus
-//! the block) evaluate to `()`. For example,
-//!
-//! ```rust
-//! fn returns_i64() -> i64 {
-//! 1i64
-//! }
-//! fn returns_unit() {
-//! 1i64;
-//! }
-//!
-//! let is_i64 = {
-//! returns_i64()
-//! };
-//! let is_unit = {
-//! returns_i64();
-//! };
-//! ```
OsiOS,
OsDragonfly,
OsBitrig,
+ OsNetbsd,
OsOpenbsd,
}
OsFreebsd => "freebsd".fmt(f),
OsDragonfly => "dragonfly".fmt(f),
OsBitrig => "bitrig".fmt(f),
+ OsNetbsd => "netbsd".fmt(f),
OsOpenbsd => "openbsd".fmt(f),
}
}
pub use self::KleeneOp::*;
pub use self::Lit_::*;
pub use self::LitIntType::*;
-pub use self::LocalSource::*;
pub use self::Mac_::*;
pub use self::MacStmtStyle::*;
pub use self::MetaItem_::*;
use parse::token::{InternedString, str_to_ident};
use parse::token;
use parse::lexer;
+use parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration};
use print::pprust;
use ptr::P;
impl Ident {
/// Construct an identifier with the given name and an empty context:
pub fn new(name: Name) -> Ident { Ident {name: name, ctxt: EMPTY_CTXT}}
-
- pub fn as_str<'a>(&'a self) -> &'a str {
- self.name.as_str()
- }
}
impl fmt::Debug for Ident {
impl fmt::Debug for Name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Name(nm) = *self;
- write!(f, "{:?}({})", token::get_name(*self), nm)
+ write!(f, "{}({})", self, nm)
}
}
impl fmt::Display for Name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- fmt::Display::fmt(&token::get_name(*self), f)
+ fmt::Display::fmt(&self.as_str(), f)
}
}
// one example and its non-hygienic counterpart would be:
// syntax::parse::token::Token::mtwt_eq
// syntax::ext::tt::macro_parser::token_name_eq
- panic!("not allowed to compare these idents: {}, {}. \
+ panic!("not allowed to compare these idents: {:?}, {:?}. \
Probably related to issue \\#6993", self, other);
}
}
- fn ne(&self, other: &Ident) -> bool {
- ! self.eq(other)
- }
}
/// A SyntaxContext represents a chain of macro-expandings
RustcEncodable, RustcDecodable, Clone, Copy)]
pub struct Name(pub u32);
+impl<T: AsRef<str>> PartialEq<T> for Name {
+ fn eq(&self, other: &T) -> bool {
+ self.as_str() == other.as_ref()
+ }
+}
+
impl Name {
- pub fn as_str<'a>(&'a self) -> &'a str {
- unsafe {
- // FIXME #12938: can't use copy_lifetime since &str isn't a &T
- ::std::mem::transmute::<&str,&str>(&token::get_name(*self))
- }
+ pub fn as_str(&self) -> token::InternedString {
+ token::InternedString::new_from_name(*self)
}
pub fn usize(&self) -> usize {
impl Encodable for Ident {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
- s.emit_str(&token::get_ident(*self))
+ s.emit_str(&self.name.as_str())
}
}
MacStmtWithoutBraces,
}
-/// Where a local declaration came from: either a true `let ... =
-/// ...;`, or one desugared from the pattern of a for loop.
-#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
-pub enum LocalSource {
- LocalLet,
- LocalFor,
-}
-
// FIXME (pending discussion of #1697, #2178...): local should really be
// a refinement on pat.
/// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;`
pub init: Option<P<Expr>>,
pub id: NodeId,
pub span: Span,
- pub source: LocalSource,
}
pub type Decl = Spanned<Decl_>;
pub enum BlockCheckMode {
DefaultBlock,
UnsafeBlock(UnsafeSource),
+ PushUnsafeBlock(UnsafeSource),
+ PopUnsafeBlock(UnsafeSource),
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
impl TokenTree {
pub fn len(&self) -> usize {
match *self {
- TtToken(_, token::DocComment(_)) => 2,
+ TtToken(_, token::DocComment(name)) => {
+ match doc_comment_style(&name.as_str()) {
+ AttrOuter => 2,
+ AttrInner => 3
+ }
+ }
TtToken(_, token::SpecialVarNt(..)) => 2,
TtToken(_, token::MatchNt(..)) => 3,
TtDelimited(_, ref delimed) => {
(&TtToken(sp, token::DocComment(_)), 0) => {
TtToken(sp, token::Pound)
}
- (&TtToken(sp, token::DocComment(name)), 1) => {
+ (&TtToken(sp, token::DocComment(name)), 1)
+ if doc_comment_style(&name.as_str()) == AttrInner => {
+ TtToken(sp, token::Not)
+ }
+ (&TtToken(sp, token::DocComment(name)), _) => {
+ let stripped = strip_doc_comment_decoration(&name.as_str());
TtDelimited(sp, Rc::new(Delimited {
delim: token::Bracket,
open_span: sp,
tts: vec![TtToken(sp, token::Ident(token::str_to_ident("doc"),
token::Plain)),
TtToken(sp, token::Eq),
- TtToken(sp, token::Literal(token::Str_(name), None))],
+ TtToken(sp, token::Literal(
+ token::StrRaw(token::intern(&stripped), 0), None))],
close_span: sp,
}))
}
pub fn path_name_i(idents: &[Ident]) -> String {
// FIXME: Bad copies (#2543 -- same for everything else that says "bad")
- idents.iter().map(|i| {
- token::get_ident(*i).to_string()
- }).collect::<Vec<String>>().connect("::")
+ idents.iter().map(|i| i.to_string()).collect::<Vec<String>>().join("::")
}
pub fn local_def(id: NodeId) -> DefId {
// Functions dealing with attributes and meta items
+// BitSet
+#![allow(deprecated)]
+
pub use self::StabilityLevel::*;
pub use self::ReprAttr::*;
pub use self::IntType::*;
// The reason for the current stability level. If deprecated, the
// reason for deprecation.
pub reason: Option<InternedString>,
+ // The relevant rust-lang issue
+ pub issue: Option<u32>
}
/// The available stability levels.
-> (Option<Stability>, Vec<&'a AM>) {
let mut stab: Option<Stability> = None;
- let mut deprecated: Option<(InternedString, Option<InternedString>)> = None;
+ let mut deprecated: Option<(Option<InternedString>, Option<InternedString>)> = None;
let mut used_attrs: Vec<&'a AM> = vec![];
'outer: for attr in attrs {
used_attrs.push(attr);
- let (feature, since, reason) = match attr.meta_item_list() {
+ let (feature, since, reason, issue) = match attr.meta_item_list() {
Some(metas) => {
let mut feature = None;
let mut since = None;
let mut reason = None;
+ let mut issue = None;
for meta in metas {
- if meta.name() == "feature" {
- match meta.value_str() {
- Some(v) => feature = Some(v),
- None => {
- diagnostic.span_err(meta.span, "incorrect meta item");
- continue 'outer;
+ match &*meta.name() {
+ "feature" => {
+ match meta.value_str() {
+ Some(v) => feature = Some(v),
+ None => {
+ diagnostic.span_err(meta.span, "incorrect meta item");
+ continue 'outer;
+ }
}
}
- }
- if &meta.name()[..] == "since" {
- match meta.value_str() {
- Some(v) => since = Some(v),
- None => {
- diagnostic.span_err(meta.span, "incorrect meta item");
- continue 'outer;
+ "since" => {
+ match meta.value_str() {
+ Some(v) => since = Some(v),
+ None => {
+ diagnostic.span_err(meta.span, "incorrect meta item");
+ continue 'outer;
+ }
}
}
- }
- if &meta.name()[..] == "reason" {
- match meta.value_str() {
- Some(v) => reason = Some(v),
- None => {
- diagnostic.span_err(meta.span, "incorrect meta item");
- continue 'outer;
+ "reason" => {
+ match meta.value_str() {
+ Some(v) => reason = Some(v),
+ None => {
+ diagnostic.span_err(meta.span, "incorrect meta item");
+ continue 'outer;
+ }
+ }
+ }
+ "issue" => {
+ match meta.value_str().and_then(|s| s.parse().ok()) {
+ Some(v) => issue = Some(v),
+ None => {
+ diagnostic.span_err(meta.span, "incorrect meta item");
+ continue 'outer;
+ }
}
}
+ _ => {}
}
}
- (feature, since, reason)
+ (feature, since, reason, issue)
}
None => {
diagnostic.span_err(attr.span(), "incorrect stability attribute type");
feature: feature.unwrap_or(intern_and_get_ident("bogus")),
since: since,
deprecated_since: None,
- reason: reason
+ reason: reason,
+ issue: issue,
});
} else { // "deprecated"
if deprecated.is_some() {
diagnostic.span_err(item_sp, "multiple deprecated attributes");
}
- deprecated = Some((since.unwrap_or(intern_and_get_ident("bogus")), reason));
+ deprecated = Some((since, reason));
}
}
match stab {
Some(ref mut s) => {
let (since, reason) = deprecated.unwrap();
- s.deprecated_since = Some(since);
+ s.deprecated_since = since;
s.reason = reason;
}
None => {
either stable or unstable attribute");
}
}
+ } else if stab.as_ref().map_or(false, |s| s.level == Unstable && s.issue.is_none()) {
+ // non-deprecated unstable items need to point to issues.
+ // FIXME: uncomment this error
+ // diagnostic.span_err(item_sp,
+ // "non-deprecated unstable items need to point \
+ // to an issue with `issue = \"NNN\"`");
}
(stab, used_attrs)
/// are *absolute* positions from the beginning of the codemap, not positions
/// relative to FileMaps. Methods on the CodeMap can be used to relate spans back
/// to the original source.
+/// You must be careful if the span crosses more than one file - you will not be
+/// able to use many of the functions on spans in codemap and you cannot assume
+/// that the length of the span = hi - lo; there may be space in the BytePos
+/// range between files.
#[derive(Clone, Copy, Hash)]
pub struct Span {
pub lo: BytePos,
pub bytes: usize,
}
-/// A single source in the CodeMap
+/// A single source in the CodeMap.
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
lines.get(line_number).map(|&line| {
let begin: BytePos = line - self.start_pos;
let begin = begin.to_usize();
+ // We can't use `lines.get(line_number+1)` because we might
+ // be parsing when we call this function and thus the current
+ // line is the last one we have line info for.
let slice = &src[begin..];
match slice.find('\n') {
Some(e) => &slice[..e],
Ok(self.new_filemap(path.to_str().unwrap().to_string(), src))
}
+ fn next_start_pos(&self) -> usize {
+ let files = self.files.borrow();
+ match files.last() {
+ None => 0,
+ // Add one so there is some space between files. This lets us distinguish
+ // positions in the codemap, even in the presence of zero-length files.
+ Some(last) => last.end_pos.to_usize() + 1,
+ }
+ }
+
+ /// Creates a new filemap without setting its line information. If you don't
+ /// intend to set the line information yourself, you should use new_filemap_and_lines.
pub fn new_filemap(&self, filename: FileName, mut src: String) -> Rc<FileMap> {
+ let start_pos = self.next_start_pos();
let mut files = self.files.borrow_mut();
- let start_pos = match files.last() {
- None => 0,
- Some(last) => last.end_pos.to_usize(),
- };
// Remove utf-8 BOM if any.
if src.starts_with("\u{feff}") {
src.drain(..3);
}
- // Append '\n' in case it's not already there.
- // This is a workaround to prevent CodeMap.lookup_filemap_idx from
- // accidentally overflowing into the next filemap in case the last byte
- // of span is also the last byte of filemap, which leads to incorrect
- // results from CodeMap.span_to_*.
- if !src.is_empty() && !src.ends_with("\n") {
- src.push('\n');
- }
-
let end_pos = start_pos + src.len();
let filemap = Rc::new(FileMap {
filemap
}
+ /// Creates a new filemap and sets its line information.
+ pub fn new_filemap_and_lines(&self, filename: &str, src: &str) -> Rc<FileMap> {
+ let fm = self.new_filemap(filename.to_string(), src.to_owned());
+ let mut byte_pos: u32 = 0;
+ for line in src.lines() {
+ // register the start of this line
+ fm.next_line(BytePos(byte_pos));
+
+ // update byte_pos to include this line and the \n at the end
+ byte_pos += line.len() as u32 + 1;
+ }
+ fm
+ }
+
+
/// Allocates a new FileMap representing a source file from an external
/// crate. The source code of such an "imported filemap" is not available,
/// but we still know enough to generate accurate debuginfo location
mut file_local_lines: Vec<BytePos>,
mut file_local_multibyte_chars: Vec<MultiByteChar>)
-> Rc<FileMap> {
+ let start_pos = self.next_start_pos();
let mut files = self.files.borrow_mut();
- let start_pos = match files.last() {
- None => 0,
- Some(last) => last.end_pos.to_usize(),
- };
let end_pos = Pos::from_usize(start_pos + source_len);
let start_pos = Pos::from_usize(start_pos);
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
- let FileMapAndLine {fm: f, line: a} = self.lookup_line(pos);
- let line = a + 1; // Line numbers start at 1
let chpos = self.bytepos_to_file_charpos(pos);
- let linebpos = (*f.lines.borrow())[a];
- let linechpos = self.bytepos_to_file_charpos(linebpos);
- debug!("byte pos {:?} is on the line at byte pos {:?}",
- pos, linebpos);
- debug!("char pos {:?} is on the line at char pos {:?}",
- chpos, linechpos);
- debug!("byte is on line: {}", line);
- assert!(chpos >= linechpos);
- Loc {
- file: f,
- line: line,
- col: chpos - linechpos
+ match self.lookup_line(pos) {
+ Ok(FileMapAndLine { fm: f, line: a }) => {
+ let line = a + 1; // Line numbers start at 1
+ let linebpos = (*f.lines.borrow())[a];
+ let linechpos = self.bytepos_to_file_charpos(linebpos);
+ debug!("byte pos {:?} is on the line at byte pos {:?}",
+ pos, linebpos);
+ debug!("char pos {:?} is on the line at char pos {:?}",
+ chpos, linechpos);
+ debug!("byte is on line: {}", line);
+ assert!(chpos >= linechpos);
+ Loc {
+ file: f,
+ line: line,
+ col: chpos - linechpos,
+ }
+ }
+ Err(f) => {
+ Loc {
+ file: f,
+ line: 0,
+ col: chpos,
+ }
+ }
}
}
- fn lookup_line(&self, pos: BytePos) -> FileMapAndLine {
+ // If the relevant filemap is empty, we don't return a line number.
+ fn lookup_line(&self, pos: BytePos) -> Result<FileMapAndLine, Rc<FileMap>> {
let idx = self.lookup_filemap_idx(pos);
let files = self.files.borrow();
let f = (*files)[idx].clone();
+
+ let len = f.lines.borrow().len();
+ if len == 0 {
+ return Err(f);
+ }
+
let mut a = 0;
{
let lines = f.lines.borrow();
let mut b = lines.len();
while b - a > 1 {
let m = (a + b) / 2;
- if (*lines)[m] > pos { b = m; } else { a = m; }
+ if (*lines)[m] > pos {
+ b = m;
+ } else {
+ a = m;
+ }
}
+ assert!(a <= lines.len());
}
- FileMapAndLine {fm: f, line: a}
+ Ok(FileMapAndLine { fm: f, line: a })
}
pub fn lookup_char_pos_adj(&self, pos: BytePos) -> LocWithOpt {
FileMapAndBytePos {fm: fm, pos: offset}
}
- /// Converts an absolute BytePos to a CharPos relative to the filemap and above.
+ /// Converts an absolute BytePos to a CharPos relative to the filemap.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_filemap_idx(bpos);
let files = self.files.borrow();
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes)
}
+ // Return the index of the filemap (in self.files) which contains pos.
fn lookup_filemap_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
let files = &*files;
- let len = files.len();
+ let count = files.len();
+
+ // Binary search for the filemap.
let mut a = 0;
- let mut b = len;
+ let mut b = count;
while b - a > 1 {
let m = (a + b) / 2;
if files[m].start_pos > pos {
a = m;
}
}
- // There can be filemaps with length 0. These have the same start_pos as
- // the previous filemap, but are not the filemaps we want (because they
- // are length 0, they cannot contain what we are looking for). So,
- // rewind until we find a useful filemap.
- loop {
- let lines = files[a].lines.borrow();
- let lines = lines;
- if !lines.is_empty() {
- break;
- }
- if a == 0 {
- panic!("position {} does not resolve to a source location",
- pos.to_usize());
- }
- a -= 1;
- }
- if a >= len {
- panic!("position {} does not resolve to a source location",
- pos.to_usize())
- }
+
+ assert!(a < count, "position {} does not resolve to a source location", pos.to_usize());
return a;
}
mac_span.lo <= span.lo && span.hi <= mac_span.hi
});
+ debug!("span_allows_unstable: span: {:?} call_site: {:?} callee: {:?}",
+ (span.lo, span.hi),
+ (info.call_site.lo, info.call_site.hi),
+ info.callee.span.map(|x| (x.lo, x.hi)));
debug!("span_allows_unstable: from this expansion? {}, allows unstable? {}",
span_comes_from_this_expansion,
info.callee.allow_internal_unstable);
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
fm.next_line(BytePos(0));
+ // Test we can get lines with partial line info.
assert_eq!(fm.get_line(0), Some("first line."));
- // TESTING BROKEN BEHAVIOR:
+ // TESTING BROKEN BEHAVIOR: line break declared before actual line break.
fm.next_line(BytePos(10));
assert_eq!(fm.get_line(1), Some("."));
+ fm.next_line(BytePos(12));
+ assert_eq!(fm.get_line(2), Some("second line"));
}
#[test]
fm1.next_line(BytePos(0));
fm1.next_line(BytePos(12));
- fm2.next_line(BytePos(24));
- fm3.next_line(BytePos(24));
- fm3.next_line(BytePos(34));
+ fm2.next_line(fm2.start_pos);
+ fm3.next_line(fm3.start_pos);
+ fm3.next_line(fm3.start_pos + BytePos(12));
cm
}
// Test lookup_byte_offset
let cm = init_code_map();
- let fmabp1 = cm.lookup_byte_offset(BytePos(22));
+ let fmabp1 = cm.lookup_byte_offset(BytePos(23));
assert_eq!(fmabp1.fm.name, "blork.rs");
- assert_eq!(fmabp1.pos, BytePos(22));
+ assert_eq!(fmabp1.pos, BytePos(23));
+
+ let fmabp1 = cm.lookup_byte_offset(BytePos(24));
+ assert_eq!(fmabp1.fm.name, "empty.rs");
+ assert_eq!(fmabp1.pos, BytePos(0));
- let fmabp2 = cm.lookup_byte_offset(BytePos(24));
+ let fmabp2 = cm.lookup_byte_offset(BytePos(25));
assert_eq!(fmabp2.fm.name, "blork2.rs");
assert_eq!(fmabp2.pos, BytePos(0));
}
let cp1 = cm.bytepos_to_file_charpos(BytePos(22));
assert_eq!(cp1, CharPos(22));
- let cp2 = cm.bytepos_to_file_charpos(BytePos(24));
+ let cp2 = cm.bytepos_to_file_charpos(BytePos(25));
assert_eq!(cp2, CharPos(0));
}
assert_eq!(loc1.line, 2);
assert_eq!(loc1.col, CharPos(10));
- let loc2 = cm.lookup_char_pos(BytePos(24));
+ let loc2 = cm.lookup_char_pos(BytePos(25));
assert_eq!(loc2.file.name, "blork2.rs");
assert_eq!(loc2.line, 1);
assert_eq!(loc2.col, CharPos(0));
"first line€€.\n€ second line".to_string());
fm1.next_line(BytePos(0));
- fm1.next_line(BytePos(22));
- fm2.next_line(BytePos(40));
- fm2.next_line(BytePos(58));
+ fm1.next_line(BytePos(28));
+ fm2.next_line(fm2.start_pos);
+ fm2.next_line(fm2.start_pos + BytePos(20));
fm1.record_multibyte_char(BytePos(3), 3);
fm1.record_multibyte_char(BytePos(9), 3);
fm1.record_multibyte_char(BytePos(12), 3);
fm1.record_multibyte_char(BytePos(15), 3);
fm1.record_multibyte_char(BytePos(18), 3);
- fm2.record_multibyte_char(BytePos(50), 3);
- fm2.record_multibyte_char(BytePos(53), 3);
- fm2.record_multibyte_char(BytePos(58), 3);
+ fm2.record_multibyte_char(fm2.start_pos + BytePos(10), 3);
+ fm2.record_multibyte_char(fm2.start_pos + BytePos(13), 3);
+ fm2.record_multibyte_char(fm2.start_pos + BytePos(18), 3);
cm
}
Span { lo: BytePos(left_index), hi: BytePos(right_index + 1), expn_id: NO_EXPANSION }
}
- fn new_filemap_and_lines(cm: &CodeMap, filename: &str, input: &str) -> Rc<FileMap> {
- let fm = cm.new_filemap(filename.to_string(), input.to_string());
- let mut byte_pos: u32 = 0;
- for line in input.lines() {
- // register the start of this line
- fm.next_line(BytePos(byte_pos));
-
- // update byte_pos to include this line and the \n at the end
- byte_pos += line.len() as u32 + 1;
- }
- fm
- }
-
/// Test span_to_snippet and span_to_lines for a span coverting 3
/// lines in the middle of a file.
#[test]
let cm = CodeMap::new();
let inputtext = "aaaaa\nbbbbBB\nCCC\nDDDDDddddd\neee\n";
let selection = " \n ^~\n~~~\n~~~~~ \n \n";
- new_filemap_and_lines(&cm, "blork.rs", inputtext);
+ cm.new_filemap_and_lines("blork.rs", inputtext);
let span = span_from_selection(inputtext, selection);
// check that we are extracting the text we thought we were extracting
}
pub fn fatal(&self, msg: &str) -> ! {
self.emit.borrow_mut().emit(None, msg, None, Fatal);
+
+ // Suppress the fatal error message from the panic below as we've
+ // already terminated in our own "legitimate" fashion.
+ io::set_panic(Box::new(io::sink()));
panic!(FatalError);
}
pub fn err(&self, msg: &str) {
}
}
-fn print_maybe_styled(w: &mut EmitterWriter,
- msg: &str,
- color: term::attr::Attr) -> io::Result<()> {
- match w.dst {
- Terminal(ref mut t) => {
- try!(t.attr(color));
- // If `msg` ends in a newline, we need to reset the color before
- // the newline. We're making the assumption that we end up writing
- // to a `LineBufferedWriter`, which means that emitting the reset
- // after the newline ends up buffering the reset until we print
- // another line or exit. Buffering the reset is a problem if we're
- // sharing the terminal with any other programs (e.g. other rustc
- // instances via `make -jN`).
- //
- // Note that if `msg` contains any internal newlines, this will
- // result in the `LineBufferedWriter` flushing twice instead of
- // once, which still leaves the opportunity for interleaved output
- // to be miscolored. We assume this is rare enough that we don't
- // have to worry about it.
- if msg.ends_with("\n") {
- try!(t.write_all(msg[..msg.len()-1].as_bytes()));
- try!(t.reset());
- try!(t.write_all(b"\n"));
- } else {
- try!(t.write_all(msg.as_bytes()));
- try!(t.reset());
- }
- Ok(())
- }
- Raw(ref mut w) => w.write_all(msg.as_bytes()),
- }
-}
-
-fn print_diagnostic(dst: &mut EmitterWriter, topic: &str, lvl: Level,
- msg: &str, code: Option<&str>) -> io::Result<()> {
- if !topic.is_empty() {
- try!(write!(&mut dst.dst, "{} ", topic));
- }
-
- try!(print_maybe_styled(dst,
- &format!("{}: ", lvl.to_string()),
- term::attr::ForegroundColor(lvl.color())));
- try!(print_maybe_styled(dst,
- &format!("{}", msg),
- term::attr::Bold));
-
- match code {
- Some(code) => {
- let style = term::attr::ForegroundColor(term::color::BRIGHT_MAGENTA);
- try!(print_maybe_styled(dst, &format!(" [{}]", code.clone()), style));
- }
- None => ()
- }
- try!(write!(&mut dst.dst, "\n"));
- Ok(())
-}
-
pub struct EmitterWriter {
dst: Destination,
registry: Option<diagnostics::registry::Registry>
Raw(Box<Write + Send>),
}
+/// Do not use this for messages that end in `\n` – use `println_maybe_styled` instead. See
+/// `EmitterWriter::print_maybe_styled` for details.
+macro_rules! print_maybe_styled {
+ ($writer: expr, $style: expr, $($arg: tt)*) => {
+ $writer.print_maybe_styled(format_args!($($arg)*), $style, false)
+ }
+}
+
+macro_rules! println_maybe_styled {
+ ($writer: expr, $style: expr, $($arg: tt)*) => {
+ $writer.print_maybe_styled(format_args!($($arg)*), $style, true)
+ }
+}
+
impl EmitterWriter {
pub fn stderr(color_config: ColorConfig,
registry: Option<diagnostics::registry::Registry>) -> EmitterWriter {
registry: Option<diagnostics::registry::Registry>) -> EmitterWriter {
EmitterWriter { dst: Raw(dst), registry: registry }
}
+
+ fn print_maybe_styled(&mut self,
+ args: fmt::Arguments,
+ color: term::attr::Attr,
+ print_newline_at_end: bool) -> io::Result<()> {
+ match self.dst {
+ Terminal(ref mut t) => {
+ try!(t.attr(color));
+ // If `msg` ends in a newline, we need to reset the color before
+ // the newline. We're making the assumption that we end up writing
+ // to a `LineBufferedWriter`, which means that emitting the reset
+ // after the newline ends up buffering the reset until we print
+ // another line or exit. Buffering the reset is a problem if we're
+ // sharing the terminal with any other programs (e.g. other rustc
+ // instances via `make -jN`).
+ //
+ // Note that if `msg` contains any internal newlines, this will
+ // result in the `LineBufferedWriter` flushing twice instead of
+ // once, which still leaves the opportunity for interleaved output
+ // to be miscolored. We assume this is rare enough that we don't
+ // have to worry about it.
+ try!(t.write_fmt(args));
+ try!(t.reset());
+ if print_newline_at_end {
+ t.write_all(b"\n")
+ } else {
+ Ok(())
+ }
+ }
+ Raw(ref mut w) => {
+ try!(w.write_fmt(args));
+ if print_newline_at_end {
+ w.write_all(b"\n")
+ } else {
+ Ok(())
+ }
+ }
+ }
+ }
+
+ fn print_diagnostic(&mut self, topic: &str, lvl: Level,
+ msg: &str, code: Option<&str>) -> io::Result<()> {
+ if !topic.is_empty() {
+ try!(write!(&mut self.dst, "{} ", topic));
+ }
+
+ try!(print_maybe_styled!(self, term::attr::ForegroundColor(lvl.color()),
+ "{}: ", lvl.to_string()));
+ try!(print_maybe_styled!(self, term::attr::Bold, "{}", msg));
+
+ match code {
+ Some(code) => {
+ let style = term::attr::ForegroundColor(term::color::BRIGHT_MAGENTA);
+ try!(print_maybe_styled!(self, style, " [{}]", code.clone()));
+ }
+ None => ()
+ }
+ try!(write!(&mut self.dst, "\n"));
+ Ok(())
+ }
+
+ fn emit_(&mut self, cm: &codemap::CodeMap, rsp: RenderSpan,
+ msg: &str, code: Option<&str>, lvl: Level) -> io::Result<()> {
+ let sp = rsp.span();
+
+ // We cannot check equality directly with COMMAND_LINE_SP
+ // since PartialEq is manually implemented to ignore the ExpnId
+ let ss = if sp.expn_id == COMMAND_LINE_EXPN {
+ "<command line option>".to_string()
+ } else if let EndSpan(_) = rsp {
+ let span_end = Span { lo: sp.hi, hi: sp.hi, expn_id: sp.expn_id};
+ cm.span_to_string(span_end)
+ } else {
+ cm.span_to_string(sp)
+ };
+
+ try!(self.print_diagnostic(&ss[..], lvl, msg, code));
+
+ match rsp {
+ FullSpan(_) => {
+ try!(self.highlight_lines(cm, sp, lvl, cm.span_to_lines(sp)));
+ try!(self.print_macro_backtrace(cm, sp));
+ }
+ EndSpan(_) => {
+ try!(self.end_highlight_lines(cm, sp, lvl, cm.span_to_lines(sp)));
+ try!(self.print_macro_backtrace(cm, sp));
+ }
+ Suggestion(_, ref suggestion) => {
+ try!(self.highlight_suggestion(cm, sp, suggestion));
+ try!(self.print_macro_backtrace(cm, sp));
+ }
+ FileLine(..) => {
+ // no source text in this case!
+ }
+ }
+
+ match code {
+ Some(code) =>
+ match self.registry.as_ref().and_then(|registry| registry.find_description(code)) {
+ Some(_) => {
+ try!(self.print_diagnostic(&ss[..], Help,
+ &format!("run `rustc --explain {}` to see a \
+ detailed explanation", code), None));
+ }
+ None => ()
+ },
+ None => (),
+ }
+ Ok(())
+ }
+
+ fn highlight_suggestion(&mut self,
+ cm: &codemap::CodeMap,
+ sp: Span,
+ suggestion: &str)
+ -> io::Result<()>
+ {
+ let lines = cm.span_to_lines(sp).unwrap();
+ assert!(!lines.lines.is_empty());
+
+ // To build up the result, we want to take the snippet from the first
+ // line that precedes the span, prepend that with the suggestion, and
+ // then append the snippet from the last line that trails the span.
+ let fm = &lines.file;
+
+ let first_line = &lines.lines[0];
+ let prefix = fm.get_line(first_line.line_index)
+ .map(|l| &l[..first_line.start_col.0])
+ .unwrap_or("");
+
+ let last_line = lines.lines.last().unwrap();
+ let suffix = fm.get_line(last_line.line_index)
+ .map(|l| &l[last_line.end_col.0..])
+ .unwrap_or("");
+
+ let complete = format!("{}{}{}", prefix, suggestion, suffix);
+
+ // print the suggestion without any line numbers, but leave
+ // space for them. This helps with lining up with previous
+ // snippets from the actual error being reported.
+ let fm = &*lines.file;
+ let mut lines = complete.lines();
+ for (line, line_index) in lines.by_ref().take(MAX_LINES).zip(first_line.line_index..) {
+ let elided_line_num = format!("{}", line_index+1);
+ try!(write!(&mut self.dst, "{0}:{1:2$} {3}\n",
+ fm.name, "", elided_line_num.len(), line));
+ }
+
+ // if we elided some lines, add an ellipsis
+ if lines.next().is_some() {
+ let elided_line_num = format!("{}", first_line.line_index + MAX_LINES + 1);
+ try!(write!(&mut self.dst, "{0:1$} {0:2$} ...\n",
+ "", fm.name.len(), elided_line_num.len()));
+ }
+
+ Ok(())
+ }
+
+ fn highlight_lines(&mut self,
+ cm: &codemap::CodeMap,
+ sp: Span,
+ lvl: Level,
+ lines: codemap::FileLinesResult)
+ -> io::Result<()>
+ {
+ let lines = match lines {
+ Ok(lines) => lines,
+ Err(_) => {
+ try!(write!(&mut self.dst, "(internal compiler error: unprintable span)\n"));
+ return Ok(());
+ }
+ };
+
+ let fm = &*lines.file;
+
+ let line_strings: Option<Vec<&str>> =
+ lines.lines.iter()
+ .map(|info| fm.get_line(info.line_index))
+ .collect();
+
+ let line_strings = match line_strings {
+ None => { return Ok(()); }
+ Some(line_strings) => line_strings
+ };
+
+ // Display only the first MAX_LINES lines.
+ let all_lines = lines.lines.len();
+ let display_lines = cmp::min(all_lines, MAX_LINES);
+ let display_line_infos = &lines.lines[..display_lines];
+ let display_line_strings = &line_strings[..display_lines];
+
+ // Calculate the widest number to format evenly and fix #11715
+ assert!(display_line_infos.len() > 0);
+ let mut max_line_num = display_line_infos[display_line_infos.len() - 1].line_index + 1;
+ let mut digits = 0;
+ while max_line_num > 0 {
+ max_line_num /= 10;
+ digits += 1;
+ }
+
+ // Print the offending lines
+ for (line_info, line) in display_line_infos.iter().zip(display_line_strings) {
+ try!(write!(&mut self.dst, "{}:{:>width$} {}\n",
+ fm.name,
+ line_info.line_index + 1,
+ line,
+ width=digits));
+ }
+
+ // If we elided something, put an ellipsis.
+ if display_lines < all_lines {
+ let last_line_index = display_line_infos.last().unwrap().line_index;
+ let s = format!("{}:{} ", fm.name, last_line_index + 1);
+ try!(write!(&mut self.dst, "{0:1$}...\n", "", s.len()));
+ }
+
+ // FIXME (#3260)
+ // If there's one line at fault we can easily point to the problem
+ if lines.lines.len() == 1 {
+ let lo = cm.lookup_char_pos(sp.lo);
+ let mut digits = 0;
+ let mut num = (lines.lines[0].line_index + 1) / 10;
+
+ // how many digits must be indent past?
+ while num > 0 { num /= 10; digits += 1; }
+
+ let mut s = String::new();
+ // Skip is the number of characters we need to skip because they are
+ // part of the 'filename:line ' part of the previous line.
+ let skip = fm.name.chars().count() + digits + 3;
+ for _ in 0..skip {
+ s.push(' ');
+ }
+ if let Some(orig) = fm.get_line(lines.lines[0].line_index) {
+ let mut col = skip;
+ let mut lastc = ' ';
+ let mut iter = orig.chars().enumerate();
+ for (pos, ch) in iter.by_ref() {
+ lastc = ch;
+ if pos >= lo.col.to_usize() { break; }
+ // Whenever a tab occurs on the previous line, we insert one on
+ // the error-point-squiggly-line as well (instead of a space).
+ // That way the squiggly line will usually appear in the correct
+ // position.
+ match ch {
+ '\t' => {
+ col += 8 - col%8;
+ s.push('\t');
+ },
+ _ => {
+ col += 1;
+ s.push(' ');
+ },
+ }
+ }
+
+ try!(write!(&mut self.dst, "{}", s));
+ let mut s = String::from("^");
+ let count = match lastc {
+ // Most terminals have a tab stop every eight columns by default
+ '\t' => 8 - col%8,
+ _ => 1,
+ };
+ col += count;
+ s.extend(::std::iter::repeat('~').take(count));
+
+ let hi = cm.lookup_char_pos(sp.hi);
+ if hi.col != lo.col {
+ for (pos, ch) in iter {
+ if pos >= hi.col.to_usize() { break; }
+ let count = match ch {
+ '\t' => 8 - col%8,
+ _ => 1,
+ };
+ col += count;
+ s.extend(::std::iter::repeat('~').take(count));
+ }
+ }
+
+ if s.len() > 1 {
+ // One extra squiggly is replaced by a "^"
+ s.pop();
+ }
+
+ try!(println_maybe_styled!(self, term::attr::ForegroundColor(lvl.color()),
+ "{}", s));
+ }
+ }
+ Ok(())
+ }
+
+ /// Here are the differences between this and the normal `highlight_lines`:
+ /// `end_highlight_lines` will always put arrow on the last byte of the
+ /// span (instead of the first byte). Also, when the span is too long (more
+ /// than 6 lines), `end_highlight_lines` will print the first line, then
+ /// dot dot dot, then last line, whereas `highlight_lines` prints the first
+ /// six lines.
+ #[allow(deprecated)]
+ fn end_highlight_lines(&mut self,
+ cm: &codemap::CodeMap,
+ sp: Span,
+ lvl: Level,
+ lines: codemap::FileLinesResult)
+ -> io::Result<()> {
+ let lines = match lines {
+ Ok(lines) => lines,
+ Err(_) => {
+ try!(write!(&mut self.dst, "(internal compiler error: unprintable span)\n"));
+ return Ok(());
+ }
+ };
+
+ let fm = &*lines.file;
+
+ let lines = &lines.lines[..];
+ if lines.len() > MAX_LINES {
+ if let Some(line) = fm.get_line(lines[0].line_index) {
+ try!(write!(&mut self.dst, "{}:{} {}\n", fm.name,
+ lines[0].line_index + 1, line));
+ }
+ try!(write!(&mut self.dst, "...\n"));
+ let last_line_index = lines[lines.len() - 1].line_index;
+ if let Some(last_line) = fm.get_line(last_line_index) {
+ try!(write!(&mut self.dst, "{}:{} {}\n", fm.name,
+ last_line_index + 1, last_line));
+ }
+ } else {
+ for line_info in lines {
+ if let Some(line) = fm.get_line(line_info.line_index) {
+ try!(write!(&mut self.dst, "{}:{} {}\n", fm.name,
+ line_info.line_index + 1, line));
+ }
+ }
+ }
+ let last_line_start = format!("{}:{} ", fm.name, lines[lines.len()-1].line_index + 1);
+ let hi = cm.lookup_char_pos(sp.hi);
+ let skip = last_line_start.chars().count();
+ let mut s = String::new();
+ for _ in 0..skip {
+ s.push(' ');
+ }
+ if let Some(orig) = fm.get_line(lines[0].line_index) {
+ let iter = orig.chars().enumerate();
+ for (pos, ch) in iter {
+ // Span seems to use half-opened interval, so subtract 1
+ if pos >= hi.col.to_usize() - 1 { break; }
+ // Whenever a tab occurs on the previous line, we insert one on
+ // the error-point-squiggly-line as well (instead of a space).
+ // That way the squiggly line will usually appear in the correct
+ // position.
+ match ch {
+ '\t' => s.push('\t'),
+ _ => s.push(' '),
+ }
+ }
+ }
+ s.push('^');
+ println_maybe_styled!(self, term::attr::ForegroundColor(lvl.color()),
+ "{}", s)
+ }
+
+ fn print_macro_backtrace(&mut self,
+ cm: &codemap::CodeMap,
+ sp: Span)
+ -> io::Result<()> {
+ let cs = try!(cm.with_expn_info(sp.expn_id, |expn_info| -> io::Result<_> {
+ match expn_info {
+ Some(ei) => {
+ let ss = ei.callee.span.map_or(String::new(),
+ |span| cm.span_to_string(span));
+ let (pre, post) = match ei.callee.format {
+ codemap::MacroAttribute => ("#[", "]"),
+ codemap::MacroBang => ("", "!"),
+ codemap::CompilerExpansion => ("", ""),
+ };
+ try!(self.print_diagnostic(&ss, Note,
+ &format!("in expansion of {}{}{}",
+ pre,
+ ei.callee.name,
+ post),
+ None));
+ let ss = cm.span_to_string(ei.call_site);
+ try!(self.print_diagnostic(&ss, Note, "expansion site", None));
+ Ok(Some(ei.call_site))
+ }
+ None => Ok(None)
+ }
+ }));
+ cs.map_or(Ok(()), |call_site| self.print_macro_backtrace(cm, call_site))
+ }
}
#[cfg(unix)]
cmsp: Option<(&codemap::CodeMap, Span)>,
msg: &str, code: Option<&str>, lvl: Level) {
let error = match cmsp {
- Some((cm, COMMAND_LINE_SP)) => emit(self, cm,
+ Some((cm, COMMAND_LINE_SP)) => self.emit_(cm,
FileLine(COMMAND_LINE_SP),
msg, code, lvl),
- Some((cm, sp)) => emit(self, cm, FullSpan(sp), msg, code, lvl),
- None => print_diagnostic(self, "", lvl, msg, code),
+ Some((cm, sp)) => self.emit_(cm, FullSpan(sp), msg, code, lvl),
+ None => self.print_diagnostic("", lvl, msg, code),
};
match error {
fn custom_emit(&mut self, cm: &codemap::CodeMap,
sp: RenderSpan, msg: &str, lvl: Level) {
- match emit(self, cm, sp, msg, None, lvl) {
+ match self.emit_(cm, sp, msg, None, lvl) {
Ok(()) => {}
Err(e) => panic!("failed to print diagnostics: {:?}", e),
}
}
}
-fn emit(dst: &mut EmitterWriter, cm: &codemap::CodeMap, rsp: RenderSpan,
- msg: &str, code: Option<&str>, lvl: Level) -> io::Result<()> {
- let sp = rsp.span();
-
- // We cannot check equality directly with COMMAND_LINE_SP
- // since PartialEq is manually implemented to ignore the ExpnId
- let ss = if sp.expn_id == COMMAND_LINE_EXPN {
- "<command line option>".to_string()
- } else if let EndSpan(_) = rsp {
- let span_end = Span { lo: sp.hi, hi: sp.hi, expn_id: sp.expn_id};
- cm.span_to_string(span_end)
- } else {
- cm.span_to_string(sp)
- };
-
- try!(print_diagnostic(dst, &ss[..], lvl, msg, code));
-
- match rsp {
- FullSpan(_) => {
- try!(highlight_lines(dst, cm, sp, lvl, cm.span_to_lines(sp)));
- try!(print_macro_backtrace(dst, cm, sp));
- }
- EndSpan(_) => {
- try!(end_highlight_lines(dst, cm, sp, lvl, cm.span_to_lines(sp)));
- try!(print_macro_backtrace(dst, cm, sp));
- }
- Suggestion(_, ref suggestion) => {
- try!(highlight_suggestion(dst, cm, sp, suggestion));
- try!(print_macro_backtrace(dst, cm, sp));
- }
- FileLine(..) => {
- // no source text in this case!
- }
- }
-
- match code {
- Some(code) =>
- match dst.registry.as_ref().and_then(|registry| registry.find_description(code)) {
- Some(_) => {
- try!(print_diagnostic(dst, &ss[..], Help,
- &format!("run `rustc --explain {}` to see a detailed \
- explanation", code), None));
- }
- None => ()
- },
- None => (),
- }
- Ok(())
-}
-
-fn highlight_suggestion(err: &mut EmitterWriter,
- cm: &codemap::CodeMap,
- sp: Span,
- suggestion: &str)
- -> io::Result<()>
-{
- let lines = cm.span_to_lines(sp).unwrap();
- assert!(!lines.lines.is_empty());
-
- // To build up the result, we want to take the snippet from the first
- // line that precedes the span, prepend that with the suggestion, and
- // then append the snippet from the last line that trails the span.
- let fm = &lines.file;
-
- let first_line = &lines.lines[0];
- let prefix = fm.get_line(first_line.line_index)
- .map(|l| &l[..first_line.start_col.0])
- .unwrap_or("");
-
- let last_line = lines.lines.last().unwrap();
- let suffix = fm.get_line(last_line.line_index)
- .map(|l| &l[last_line.end_col.0..])
- .unwrap_or("");
-
- let complete = format!("{}{}{}", prefix, suggestion, suffix);
-
- // print the suggestion without any line numbers, but leave
- // space for them. This helps with lining up with previous
- // snippets from the actual error being reported.
- let fm = &*lines.file;
- let mut lines = complete.lines();
- for (line, line_index) in lines.by_ref().take(MAX_LINES).zip(first_line.line_index..) {
- let elided_line_num = format!("{}", line_index+1);
- try!(write!(&mut err.dst, "{0}:{1:2$} {3}\n",
- fm.name, "", elided_line_num.len(), line));
- }
-
- // if we elided some lines, add an ellipsis
- if lines.next().is_some() {
- let elided_line_num = format!("{}", first_line.line_index + MAX_LINES + 1);
- try!(write!(&mut err.dst, "{0:1$} {0:2$} ...\n",
- "", fm.name.len(), elided_line_num.len()));
- }
-
- Ok(())
-}
-
-fn highlight_lines(err: &mut EmitterWriter,
- cm: &codemap::CodeMap,
- sp: Span,
- lvl: Level,
- lines: codemap::FileLinesResult)
- -> io::Result<()>
-{
- let lines = match lines {
- Ok(lines) => lines,
- Err(_) => {
- try!(write!(&mut err.dst, "(internal compiler error: unprintable span)\n"));
- return Ok(());
- }
- };
-
- let fm = &*lines.file;
-
- let line_strings: Option<Vec<&str>> =
- lines.lines.iter()
- .map(|info| fm.get_line(info.line_index))
- .collect();
-
- let line_strings = match line_strings {
- None => { return Ok(()); }
- Some(line_strings) => line_strings
- };
-
- // Display only the first MAX_LINES lines.
- let all_lines = lines.lines.len();
- let display_lines = cmp::min(all_lines, MAX_LINES);
- let display_line_infos = &lines.lines[..display_lines];
- let display_line_strings = &line_strings[..display_lines];
-
- // Print the offending lines
- for (line_info, line) in display_line_infos.iter().zip(display_line_strings) {
- try!(write!(&mut err.dst, "{}:{} {}\n",
- fm.name,
- line_info.line_index + 1,
- line));
- }
-
- // If we elided something, put an ellipsis.
- if display_lines < all_lines {
- let last_line_index = display_line_infos.last().unwrap().line_index;
- let s = format!("{}:{} ", fm.name, last_line_index + 1);
- try!(write!(&mut err.dst, "{0:1$}...\n", "", s.len()));
- }
-
- // FIXME (#3260)
- // If there's one line at fault we can easily point to the problem
- if lines.lines.len() == 1 {
- let lo = cm.lookup_char_pos(sp.lo);
- let mut digits = 0;
- let mut num = (lines.lines[0].line_index + 1) / 10;
-
- // how many digits must be indent past?
- while num > 0 { num /= 10; digits += 1; }
-
- let mut s = String::new();
- // Skip is the number of characters we need to skip because they are
- // part of the 'filename:line ' part of the previous line.
- let skip = fm.name.chars().count() + digits + 3;
- for _ in 0..skip {
- s.push(' ');
- }
- if let Some(orig) = fm.get_line(lines.lines[0].line_index) {
- let mut col = skip;
- let mut lastc = ' ';
- let mut iter = orig.chars().enumerate();
- for (pos, ch) in iter.by_ref() {
- lastc = ch;
- if pos >= lo.col.to_usize() { break; }
- // Whenever a tab occurs on the previous line, we insert one on
- // the error-point-squiggly-line as well (instead of a space).
- // That way the squiggly line will usually appear in the correct
- // position.
- match ch {
- '\t' => {
- col += 8 - col%8;
- s.push('\t');
- },
- _ => {
- col += 1;
- s.push(' ');
- },
- }
- }
-
- try!(write!(&mut err.dst, "{}", s));
- let mut s = String::from("^");
- let count = match lastc {
- // Most terminals have a tab stop every eight columns by default
- '\t' => 8 - col%8,
- _ => 1,
- };
- col += count;
- s.extend(::std::iter::repeat('~').take(count));
-
- let hi = cm.lookup_char_pos(sp.hi);
- if hi.col != lo.col {
- for (pos, ch) in iter {
- if pos >= hi.col.to_usize() { break; }
- let count = match ch {
- '\t' => 8 - col%8,
- _ => 1,
- };
- col += count;
- s.extend(::std::iter::repeat('~').take(count));
- }
- }
-
- if s.len() > 1 {
- // One extra squiggly is replaced by a "^"
- s.pop();
- }
-
- try!(print_maybe_styled(err,
- &format!("{}\n", s),
- term::attr::ForegroundColor(lvl.color())));
- }
- }
- Ok(())
-}
-
-/// Here are the differences between this and the normal `highlight_lines`:
-/// `end_highlight_lines` will always put arrow on the last byte of the
-/// span (instead of the first byte). Also, when the span is too long (more
-/// than 6 lines), `end_highlight_lines` will print the first line, then
-/// dot dot dot, then last line, whereas `highlight_lines` prints the first
-/// six lines.
-#[allow(deprecated)]
-fn end_highlight_lines(w: &mut EmitterWriter,
- cm: &codemap::CodeMap,
- sp: Span,
- lvl: Level,
- lines: codemap::FileLinesResult)
- -> io::Result<()> {
- let lines = match lines {
- Ok(lines) => lines,
- Err(_) => {
- try!(write!(&mut w.dst, "(internal compiler error: unprintable span)\n"));
- return Ok(());
- }
- };
-
- let fm = &*lines.file;
-
- let lines = &lines.lines[..];
- if lines.len() > MAX_LINES {
- if let Some(line) = fm.get_line(lines[0].line_index) {
- try!(write!(&mut w.dst, "{}:{} {}\n", fm.name,
- lines[0].line_index + 1, line));
- }
- try!(write!(&mut w.dst, "...\n"));
- let last_line_index = lines[lines.len() - 1].line_index;
- if let Some(last_line) = fm.get_line(last_line_index) {
- try!(write!(&mut w.dst, "{}:{} {}\n", fm.name,
- last_line_index + 1, last_line));
- }
- } else {
- for line_info in lines {
- if let Some(line) = fm.get_line(line_info.line_index) {
- try!(write!(&mut w.dst, "{}:{} {}\n", fm.name,
- line_info.line_index + 1, line));
- }
- }
- }
- let last_line_start = format!("{}:{} ", fm.name, lines[lines.len()-1].line_index + 1);
- let hi = cm.lookup_char_pos(sp.hi);
- let skip = last_line_start.chars().count();
- let mut s = String::new();
- for _ in 0..skip {
- s.push(' ');
- }
- if let Some(orig) = fm.get_line(lines[0].line_index) {
- let iter = orig.chars().enumerate();
- for (pos, ch) in iter {
- // Span seems to use half-opened interval, so subtract 1
- if pos >= hi.col.to_usize() - 1 { break; }
- // Whenever a tab occurs on the previous line, we insert one on
- // the error-point-squiggly-line as well (instead of a space).
- // That way the squiggly line will usually appear in the correct
- // position.
- match ch {
- '\t' => s.push('\t'),
- _ => s.push(' '),
- }
- }
- }
- s.push('^');
- s.push('\n');
- print_maybe_styled(w,
- &s[..],
- term::attr::ForegroundColor(lvl.color()))
-}
-
-fn print_macro_backtrace(w: &mut EmitterWriter,
- cm: &codemap::CodeMap,
- sp: Span)
- -> io::Result<()> {
- let cs = try!(cm.with_expn_info(sp.expn_id, |expn_info| -> io::Result<_> {
- match expn_info {
- Some(ei) => {
- let ss = ei.callee.span.map_or(String::new(),
- |span| cm.span_to_string(span));
- let (pre, post) = match ei.callee.format {
- codemap::MacroAttribute => ("#[", "]"),
- codemap::MacroBang => ("", "!"),
- codemap::CompilerExpansion => ("", ""),
- };
- try!(print_diagnostic(w, &ss, Note,
- &format!("in expansion of {}{}{}",
- pre,
- ei.callee.name,
- post),
- None));
- let ss = cm.span_to_string(ei.call_site);
- try!(print_diagnostic(w, &ss, Note, "expansion site", None));
- Ok(Some(ei.call_site))
- }
- None => Ok(None)
- }
- }));
- cs.map_or(Ok(()), |call_site| print_macro_backtrace(w, cm, call_site))
-}
-
pub fn expect<T, M>(diag: &SpanHandler, opt: Option<T>, msg: M) -> T where
M: FnOnce() -> String,
{
None => diag.handler().bug(&msg()),
}
}
+
+#[cfg(test)]
+mod test {
+ use super::{EmitterWriter, Level};
+ use codemap::{mk_sp, CodeMap, BytePos};
+ use std::sync::{Arc, Mutex};
+ use std::io::{self, Write};
+ use std::str::from_utf8;
+
+ // Diagnostic doesn't align properly in span where line number increases by one digit
+ #[test]
+ fn test_hilight_suggestion_issue_11715() {
+ struct Sink(Arc<Mutex<Vec<u8>>>);
+ impl Write for Sink {
+ fn write(&mut self, data: &[u8]) -> io::Result<usize> {
+ Write::write(&mut *self.0.lock().unwrap(), data)
+ }
+ fn flush(&mut self) -> io::Result<()> { Ok(()) }
+ }
+ let data = Arc::new(Mutex::new(Vec::new()));
+ let mut ew = EmitterWriter::new(Box::new(Sink(data.clone())), None);
+ let cm = CodeMap::new();
+ let content = "abcdefg
+ koksi
+ line3
+ line4
+ cinq
+ line6
+ line7
+ line8
+ line9
+ line10
+ e-lä-vän
+ tolv
+ dreizehn
+ ";
+ let file = cm.new_filemap_and_lines("dummy.txt", content);
+ let start = file.lines.borrow()[7];
+ let end = file.lines.borrow()[11];
+ let sp = mk_sp(start, end);
+ let lvl = Level::Error;
+ println!("span_to_lines");
+ let lines = cm.span_to_lines(sp);
+ println!("highlight_lines");
+ ew.highlight_lines(&cm, sp, lvl, lines).unwrap();
+ println!("done");
+ let vec = data.lock().unwrap().clone();
+ let vec: &[u8] = &vec;
+ let str = from_utf8(vec).unwrap();
+ println!("{}", str);
+ assert_eq!(str, "dummy.txt: 8 line8\n\
+ dummy.txt: 9 line9\n\
+ dummy.txt:10 line10\n\
+ dummy.txt:11 e-lä-vän\n\
+ dummy.txt:12 tolv\n");
+ }
+}
macro_rules! register_diagnostics {
($($code:tt),*) => (
$(register_diagnostic! { $code })*
+ );
+ ($($code:tt),*,) => (
+ $(register_diagnostic! { $code })*
)
}
macro_rules! register_long_diagnostics {
($($code:tt: $description:tt),*) => (
$(register_diagnostic! { $code, $description })*
+ );
+ ($($code:tt: $description:tt),*,) => (
+ $(register_diagnostic! { $code, $description })*
)
}
// Previously used errors.
Some(&mut ErrorInfo { description: _, use_site: Some(previous_span) }) => {
ecx.span_warn(span, &format!(
- "diagnostic code {} already used", &token::get_ident(code)
+ "diagnostic code {} already used", code
));
ecx.span_note(previous_span, "previous invocation");
}
// Unregistered errors.
None => {
ecx.span_err(span, &format!(
- "used diagnostic code {} not registered", &token::get_ident(code)
+ "used diagnostic code {} not registered", code
));
}
}
if !msg.starts_with("\n") || !msg.ends_with("\n") {
ecx.span_err(span, &format!(
"description for error code {} doesn't start and end with a newline",
- token::get_ident(*code)
+ code
));
}
ecx.span_err(span, &format!(
"description for error code {} contains a line longer than {} characters.\n\
if you're inserting a long URL use the footnote style to bypass this check.",
- token::get_ident(*code), MAX_DESCRIPTION_WIDTH
+ code, MAX_DESCRIPTION_WIDTH
));
}
});
};
if diagnostics.insert(code.name, info).is_some() {
ecx.span_err(span, &format!(
- "diagnostic code {} already registered", &token::get_ident(*code)
+ "diagnostic code {} already registered", code
));
}
});
- let sym = Ident::new(token::gensym(&(
- "__register_diagnostic_".to_string() + &token::get_ident(*code)
+ let sym = Ident::new(token::gensym(&format!(
+ "__register_diagnostic_{}", code
)));
MacEager::items(SmallVector::many(vec![
ecx.item_mod(
&ast::TtToken(_, token::Ident(ref crate_name, _)),
// DIAGNOSTICS ident.
&ast::TtToken(_, token::Ident(ref name, _))
- ) => (crate_name.as_str(), name),
+ ) => (*&crate_name, name),
_ => unreachable!()
};
.ok().expect("unable to determine target arch from $CFG_COMPILER_HOST_TRIPLE");
with_registered_diagnostics(|diagnostics| {
- if let Err(e) = output_metadata(ecx, &target_triple, crate_name, &diagnostics) {
+ if let Err(e) = output_metadata(ecx,
+ &target_triple,
+ &crate_name.name.as_str(),
+ &diagnostics) {
ecx.span_bug(span, &format!(
"error writing metadata for triple `{}` and crate `{}`, error: {}, cause: {:?}",
target_triple, crate_name, e.description(), e.cause()
diagnostics.iter().filter_map(|(code, info)| {
info.description.map(|description| {
ecx.expr_tuple(span, vec![
- ecx.expr_str(span, token::get_name(*code)),
- ecx.expr_str(span, token::get_name(description))
+ ecx.expr_str(span, code.as_str()),
+ ecx.expr_str(span, description.as_str())
])
})
}).collect();
syntax_expanders.insert(intern("cfg"),
builtin_normal_expander(
ext::cfg::expand_cfg));
+ syntax_expanders.insert(intern("push_unsafe"),
+ builtin_normal_expander(
+ ext::pushpop_safe::expand_push_unsafe));
+ syntax_expanders.insert(intern("pop_unsafe"),
+ builtin_normal_expander(
+ ext::pushpop_safe::expand_pop_unsafe));
syntax_expanders.insert(intern("trace_macros"),
builtin_normal_expander(
ext::trace_macros::expand_trace_macros));
init: Some(ex),
id: ast::DUMMY_NODE_ID,
span: sp,
- source: ast::LocalLet,
});
let decl = respan(sp, ast::DeclLocal(local));
P(respan(sp, ast::StmtDecl(P(decl), ast::DUMMY_NODE_ID)))
init: Some(ex),
id: ast::DUMMY_NODE_ID,
span: sp,
- source: ast::LocalLet,
});
let decl = respan(sp, ast::DeclLocal(local));
P(respan(sp, ast::StmtDecl(P(decl), ast::DUMMY_NODE_ID)))
}
fn expr_field_access(&self, sp: Span, expr: P<ast::Expr>, ident: ast::Ident) -> P<ast::Expr> {
- let field_name = token::get_ident(ident);
let field_span = Span {
- lo: sp.lo - Pos::from_usize(field_name.len()),
+ lo: sp.lo - Pos::from_usize(ident.name.as_str().len()),
hi: sp.hi,
expn_id: sp.expn_id,
};
} else {
match *e {
ast::TtToken(_, token::Ident(ident, _)) => {
- res_str.push_str(&token::get_ident(ident))
+ res_str.push_str(&ident.name.as_str())
},
_ => {
cx.span_err(sp, "concat_idents! requires ident args.");
}
}
}
- let res = str_to_ident(&res_str[..]);
+ let res = str_to_ident(&res_str);
let e = P(ast::Expr {
id: ast::DUMMY_NODE_ID,
decoder,
cx.ident_of("read_struct"),
vec!(
- cx.expr_str(trait_span, token::get_ident(substr.type_ident)),
+ cx.expr_str(trait_span, substr.type_ident.name.as_str()),
cx.expr_usize(trait_span, nfields),
cx.lambda_expr_1(trait_span, result, blkarg)
))
let mut variants = Vec::new();
let rvariant_arg = cx.ident_of("read_enum_variant_arg");
- for (i, &(name, v_span, ref parts)) in fields.iter().enumerate() {
- variants.push(cx.expr_str(v_span, token::get_ident(name)));
+ for (i, &(ident, v_span, ref parts)) in fields.iter().enumerate() {
+ variants.push(cx.expr_str(v_span, ident.name.as_str()));
- let path = cx.path(trait_span, vec![substr.type_ident, name]);
+ let path = cx.path(trait_span, vec![substr.type_ident, ident]);
let decoded = decode_static_fields(cx,
v_span,
path,
decoder,
cx.ident_of("read_enum"),
vec!(
- cx.expr_str(trait_span, token::get_ident(substr.type_ident)),
+ cx.expr_str(trait_span, substr.type_ident.name.as_str()),
cx.lambda_expr_1(trait_span, result, blkarg)
))
}
}
Named(ref fields) => {
// use the field's span to get nicer error messages.
- let fields = fields.iter().enumerate().map(|(i, &(name, span))| {
- let arg = getarg(cx, span, token::get_ident(name), i);
- cx.field_imm(span, name, arg)
+ let fields = fields.iter().enumerate().map(|(i, &(ident, span))| {
+ let arg = getarg(cx, span, ident.name.as_str(), i);
+ cx.field_imm(span, ident, arg)
}).collect();
cx.expr_struct(trait_span, outer_pat_path, fields)
}
..
}) in fields.iter().enumerate() {
let name = match name {
- Some(id) => token::get_ident(id),
+ Some(id) => id.name.as_str(),
None => {
token::intern_and_get_ident(&format!("_field{}", i))
}
encoder,
cx.ident_of("emit_struct"),
vec!(
- cx.expr_str(trait_span, token::get_ident(substr.type_ident)),
+ cx.expr_str(trait_span, substr.type_ident.name.as_str()),
cx.expr_usize(trait_span, fields.len()),
blk
))
}
let blk = cx.lambda_stmts_1(trait_span, stmts, blkarg);
- let name = cx.expr_str(trait_span, token::get_ident(variant.node.name));
+ let name = cx.expr_str(trait_span, variant.node.name.name.as_str());
let call = cx.expr_method_call(trait_span, blkencoder,
cx.ident_of("emit_enum_variant"),
vec!(name,
encoder,
cx.ident_of("emit_enum"),
vec!(
- cx.expr_str(trait_span, token::get_ident(substr.type_ident)),
+ cx.expr_str(trait_span, substr.type_ident.name.as_str()),
blk
));
cx.expr_block(cx.block(trait_span, vec!(me), Some(ret)))
use self::StructType::*;
use std::cell::RefCell;
+use std::collections::HashSet;
use std::vec;
use abi::Abi;
.map(|ty_param| ty_param.ident.name)
.collect();
+ let mut processed_field_types = HashSet::new();
for field_ty in field_tys {
let tys = find_type_parameters(&*field_ty, &ty_param_names);
for ty in tys {
+ // if we have already handled this type, skip it
+ if let ast::TyPath(_, ref p) = ty.node {
+ if p.segments.len() == 1
+ && ty_param_names.contains(&p.segments[0].identifier.name)
+ || processed_field_types.contains(&p.segments) {
+ continue;
+ };
+ processed_field_types.insert(p.segments.clone());
+ }
let mut bounds: Vec<_> = self.additional_bounds.iter().map(|p| {
cx.typarambound(p.to_path(cx, self.span, type_ident, generics))
}).collect();
// build fmt.debug_struct(<name>).field(<fieldname>, &<fieldval>)....build()
// or fmt.debug_tuple(<name>).field(&<fieldval>)....build()
// based on the "shape".
- let name = match *substr.fields {
+ let ident = match *substr.fields {
Struct(_) => substr.type_ident,
EnumMatching(_, v, _) => v.node.name,
EnumNonMatchingCollapsed(..) | StaticStruct(..) | StaticEnum(..) => {
// We want to make sure we have the expn_id set so that we can use unstable methods
let span = Span { expn_id: cx.backtrace(), .. span };
- let name = cx.expr_lit(span, ast::Lit_::LitStr(token::get_ident(name),
+ let name = cx.expr_lit(span, ast::Lit_::LitStr(ident.name.as_str(),
ast::StrStyle::CookedStr));
let mut expr = substr.nonself_args[0].clone();
for field in fields {
let name = cx.expr_lit(field.span, ast::Lit_::LitStr(
- token::get_ident(field.name.clone().unwrap()),
+ field.name.unwrap().name.as_str(),
ast::StrStyle::CookedStr));
// Use double indirection to make sure this works for unsized types
use visit::Visitor;
use std_inject;
+// Given suffix ["b","c","d"], returns path `::std::b::c::d` when
+// `fld.cx.use_std`, and `::core::b::c::d` otherwise.
+fn mk_core_path(fld: &mut MacroExpander,
+ span: Span,
+ suffix: &[&'static str]) -> ast::Path {
+ let mut idents = vec![fld.cx.ident_of_std("core")];
+ for s in suffix.iter() { idents.push(fld.cx.ident_of(*s)); }
+ fld.cx.path_global(span, idents)
+}
+
pub fn expand_expr(e: P<ast::Expr>, fld: &mut MacroExpander) -> P<ast::Expr> {
fn push_compiler_expansion(fld: &mut MacroExpander, span: Span, expansion_desc: &str) {
fld.cx.bt_push(ExpnInfo {
callee: NameAndSpan {
name: expansion_desc.to_string(),
format: CompilerExpansion,
+
+ // This does *not* mean code generated after
+ // `push_compiler_expansion` is automatically exempt
+ // from stability lints; must also tag such code with
+ // an appropriate span from `fld.cx.backtrace()`.
allow_internal_unstable: true,
+
span: None,
},
});
}
- e.and_then(|ast::Expr {id, node, span}| match node {
+ // Sets the expn_id so that we can use unstable methods.
+ fn allow_unstable(fld: &mut MacroExpander, span: Span) -> Span {
+ Span { expn_id: fld.cx.backtrace(), ..span }
+ }
+
+ let expr_span = e.span;
+ return e.and_then(|ast::Expr {id, node, span}| match node {
+
// expr_mac should really be expr_ext or something; it's the
// entry-point for all syntax extensions.
ast::ExprMac(mac) => {
};
// Keep going, outside-in.
- //
let fully_expanded = fld.fold_expr(expanded_expr);
+ let span = fld.new_span(span);
fld.cx.bt_pop();
fully_expanded.map(|e| ast::Expr {
id: ast::DUMMY_NODE_ID,
node: e.node,
- span: fld.new_span(span),
+ span: span,
})
}
+ // Desugar ExprBox: `in (PLACE) EXPR`
+ ast::ExprBox(Some(placer), value_expr) => {
+ // to:
+ //
+ // let p = PLACE;
+ // let mut place = Placer::make_place(p);
+ // let raw_place = Place::pointer(&mut place);
+ // push_unsafe!({
+ // std::intrinsics::move_val_init(raw_place, pop_unsafe!( EXPR ));
+ // InPlace::finalize(place)
+ // })
+
+ // Ensure feature-gate is enabled
+ feature_gate::check_for_placement_in(
+ fld.cx.ecfg.features,
+ &fld.cx.parse_sess.span_diagnostic,
+ expr_span);
+
+ push_compiler_expansion(fld, expr_span, "placement-in expansion");
+
+ let value_span = value_expr.span;
+ let placer_span = placer.span;
+
+ let placer_expr = fld.fold_expr(placer);
+ let value_expr = fld.fold_expr(value_expr);
+
+ let placer_ident = token::gensym_ident("placer");
+ let agent_ident = token::gensym_ident("place");
+ let p_ptr_ident = token::gensym_ident("p_ptr");
+
+ let placer = fld.cx.expr_ident(span, placer_ident);
+ let agent = fld.cx.expr_ident(span, agent_ident);
+ let p_ptr = fld.cx.expr_ident(span, p_ptr_ident);
+
+ let make_place = ["ops", "Placer", "make_place"];
+ let place_pointer = ["ops", "Place", "pointer"];
+ let move_val_init = ["intrinsics", "move_val_init"];
+ let inplace_finalize = ["ops", "InPlace", "finalize"];
+
+ let make_call = |fld: &mut MacroExpander, p, args| {
+ // We feed in the `expr_span` because codemap's span_allows_unstable
+ // allows the call_site span to inherit the `allow_internal_unstable`
+ // setting.
+ let span_unstable = allow_unstable(fld, expr_span);
+ let path = mk_core_path(fld, span_unstable, p);
+ let path = fld.cx.expr_path(path);
+ let expr_span_unstable = allow_unstable(fld, span);
+ fld.cx.expr_call(expr_span_unstable, path, args)
+ };
+
+ let stmt_let = |fld: &mut MacroExpander, bind, expr| {
+ fld.cx.stmt_let(placer_span, false, bind, expr)
+ };
+ let stmt_let_mut = |fld: &mut MacroExpander, bind, expr| {
+ fld.cx.stmt_let(placer_span, true, bind, expr)
+ };
+
+ // let placer = <placer_expr> ;
+ let s1 = stmt_let(fld, placer_ident, placer_expr);
+
+ // let mut place = Placer::make_place(placer);
+ let s2 = {
+ let call = make_call(fld, &make_place, vec![placer]);
+ stmt_let_mut(fld, agent_ident, call)
+ };
+
+ // let p_ptr = Place::pointer(&mut place);
+ let s3 = {
+ let args = vec![fld.cx.expr_mut_addr_of(placer_span, agent.clone())];
+ let call = make_call(fld, &place_pointer, args);
+ stmt_let(fld, p_ptr_ident, call)
+ };
+
+ // pop_unsafe!(EXPR));
+ let pop_unsafe_expr = pop_unsafe_expr(fld.cx, value_expr, value_span);
+
+ // push_unsafe!({
+ // ptr::write(p_ptr, pop_unsafe!(<value_expr>));
+ // InPlace::finalize(place)
+ // })
+ let expr = {
+ let call_move_val_init = StmtSemi(make_call(
+ fld, &move_val_init, vec![p_ptr, pop_unsafe_expr]), ast::DUMMY_NODE_ID);
+ let call_move_val_init = codemap::respan(value_span, call_move_val_init);
+
+ let call = make_call(fld, &inplace_finalize, vec![agent]);
+ Some(push_unsafe_expr(fld.cx, vec![P(call_move_val_init)], call, span))
+ };
+
+ let block = fld.cx.block_all(span, vec![s1, s2, s3], expr);
+ let result = fld.cx.expr_block(block);
+ fld.cx.bt_pop();
+ result
+ }
+
+ // Issue #22181:
+ // Eventually a desugaring for `box EXPR`
+ // (similar to the desugaring above for `in PLACE BLOCK`)
+ // should go here, desugaring
+ //
+ // to:
+ //
+ // let mut place = BoxPlace::make_place();
+ // let raw_place = Place::pointer(&mut place);
+ // let value = $value;
+ // unsafe {
+ // ::std::ptr::write(raw_place, value);
+ // Boxed::finalize(place)
+ // }
+ //
+ // But for now there are type-inference issues doing that.
+
ast::ExprWhile(cond, body, opt_ident) => {
let cond = fld.fold_expr(cond);
let (body, opt_ident) = expand_loop_block(body, opt_ident, fld);
span: span
}, fld))
}
- })
+ });
+
+ fn push_unsafe_expr(cx: &mut ExtCtxt, stmts: Vec<P<ast::Stmt>>,
+ expr: P<ast::Expr>, span: Span)
+ -> P<ast::Expr> {
+ let rules = ast::PushUnsafeBlock(ast::CompilerGenerated);
+ cx.expr_block(P(ast::Block {
+ rules: rules, span: span, id: ast::DUMMY_NODE_ID,
+ stmts: stmts, expr: Some(expr),
+ }))
+ }
+
+ fn pop_unsafe_expr(cx: &mut ExtCtxt, expr: P<ast::Expr>, span: Span)
+ -> P<ast::Expr> {
+ let rules = ast::PopUnsafeBlock(ast::CompilerGenerated);
+ cx.expr_block(P(ast::Block {
+ rules: rules, span: span, id: ast::DUMMY_NODE_ID,
+ stmts: vec![], expr: Some(expr),
+ }))
+ }
}
/// Expand a (not-ident-style) macro invocation. Returns the result
/// of expansion and the mark which must be applied to the result.
/// Our current interface doesn't allow us to apply the mark to the
/// result until after calling make_expr, make_items, etc.
-fn expand_mac_invoc<T, F, G>(mac: ast::Mac, span: codemap::Span,
+fn expand_mac_invoc<T, F, G>(mac: ast::Mac,
+ span: codemap::Span,
parse_thunk: F,
mark_thunk: G,
fld: &mut MacroExpander)
// let compilation continue
return None;
}
- let extname = pth.segments[0].identifier;
- let extnamestr = token::get_ident(extname);
- match fld.cx.syntax_env.find(&extname.name) {
+ let extname = pth.segments[0].identifier.name;
+ match fld.cx.syntax_env.find(&extname) {
None => {
fld.cx.span_err(
pth.span,
&format!("macro undefined: '{}!'",
- &extnamestr));
+ &extname));
// let compilation continue
None
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
- name: extnamestr.to_string(),
+ name: extname.to_string(),
format: MacroBang,
span: exp_span,
allow_internal_unstable: allow_internal_unstable,
fld.cx.span_err(
pth.span,
&format!("non-expression macro in expression position: {}",
- &extnamestr[..]
+ extname
));
return None;
}
fld.cx.span_err(
pth.span,
&format!("'{}' is not a tt-style macro",
- &extnamestr));
+ extname));
None
}
}
node: MacInvocTT(ref pth, ref tts, _),
..
}) => {
- (pth.segments[0].identifier, pth.span, (*tts).clone())
+ (pth.segments[0].identifier.name, pth.span, (*tts).clone())
}
_ => fld.cx.span_bug(it.span, "invalid item macro invocation")
};
- let extnamestr = token::get_ident(extname);
let fm = fresh_mark();
let items = {
- let expanded = match fld.cx.syntax_env.find(&extname.name) {
+ let expanded = match fld.cx.syntax_env.find(&extname) {
None => {
fld.cx.span_err(path_span,
&format!("macro undefined: '{}!'",
- extnamestr));
+ extname));
// let compilation continue
return SmallVector::zero();
}
fld.cx
.span_err(path_span,
&format!("macro {}! expects no ident argument, given '{}'",
- extnamestr,
- token::get_ident(it.ident)));
+ extname,
+ it.ident));
return SmallVector::zero();
}
fld.cx.bt_push(ExpnInfo {
call_site: it.span,
callee: NameAndSpan {
- name: extnamestr.to_string(),
+ name: extname.to_string(),
format: MacroBang,
span: span,
allow_internal_unstable: allow_internal_unstable,
if it.ident.name == parse::token::special_idents::invalid.name {
fld.cx.span_err(path_span,
&format!("macro {}! expects an ident argument",
- &extnamestr));
+ extname));
return SmallVector::zero();
}
fld.cx.bt_push(ExpnInfo {
call_site: it.span,
callee: NameAndSpan {
- name: extnamestr.to_string(),
+ name: extname.to_string(),
format: MacroBang,
span: span,
allow_internal_unstable: allow_internal_unstable,
fld.cx.bt_push(ExpnInfo {
call_site: it.span,
callee: NameAndSpan {
- name: extnamestr.to_string(),
+ name: extname.to_string(),
format: MacroBang,
span: None,
// `macro_rules!` doesn't directly allow
_ => {
fld.cx.span_err(it.span,
&format!("{}! is not legal in item position",
- &extnamestr));
+ extname));
return SmallVector::zero();
}
}
None => {
fld.cx.span_err(path_span,
&format!("non-item macro in item position: {}",
- &extnamestr));
+ extname));
return SmallVector::zero();
}
};
StmtDecl(decl, node_id) => decl.and_then(|Spanned {node: decl, span}| match decl {
DeclLocal(local) => {
// take it apart:
- let rewritten_local = local.map(|Local {id, pat, ty, init, source, span}| {
+ let rewritten_local = local.map(|Local {id, pat, ty, init, span}| {
// expand the ty since TyFixedLengthVec contains an Expr
// and thus may have a macro use
let expanded_ty = ty.map(|t| fld.fold_ty(t));
pat: rewritten_pat,
// also, don't forget to expand the init:
init: init.map(|e| fld.fold_expr(e)),
- source: source,
span: span
}
});
fld.cx.span_err(pth.span, "expected macro name without module separators");
return DummyResult::raw_pat(span);
}
- let extname = pth.segments[0].identifier;
- let extnamestr = token::get_ident(extname);
- let marked_after = match fld.cx.syntax_env.find(&extname.name) {
+ let extname = pth.segments[0].identifier.name;
+ let marked_after = match fld.cx.syntax_env.find(&extname) {
None => {
fld.cx.span_err(pth.span,
&format!("macro undefined: '{}!'",
- extnamestr));
+ extname));
// let compilation continue
return DummyResult::raw_pat(span);
}
fld.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
- name: extnamestr.to_string(),
+ name: extname.to_string(),
format: MacroBang,
span: tt_span,
allow_internal_unstable: allow_internal_unstable,
pth.span,
&format!(
"non-pattern macro in pattern position: {}",
- &extnamestr
+ extname
)
);
return DummyResult::raw_pat(span);
_ => {
fld.cx.span_err(span,
&format!("{}! is not legal in pattern position",
- &extnamestr));
+ extname));
return DummyResult::raw_pat(span);
}
}
fn enable_trace_macros = allow_trace_macros,
fn enable_allow_internal_unstable = allow_internal_unstable,
fn enable_custom_derive = allow_custom_derive,
+ fn enable_pushpop_unsafe = allow_pushpop_unsafe,
}
}
= varref.segments.iter().map(|s| s.identifier)
.collect();
println!("varref #{}: {:?}, resolves to {}",idx, varref_idents, varref_name);
- let string = token::get_ident(final_varref_ident);
- println!("varref's first segment's string: \"{}\"", &string[..]);
+ println!("varref's first segment's string: \"{}\"", final_varref_ident);
println!("binding #{}: {}, resolves to {}",
binding_idx, bindings[binding_idx], binding_name);
mtwt::with_sctable(|x| mtwt::display_sctable(x));
// find the xx binding
let bindings = crate_bindings(&cr);
let cxbinds: Vec<&ast::Ident> =
- bindings.iter().filter(|b| {
- let ident = token::get_ident(**b);
- let string = &ident[..];
- "xx" == string
- }).collect();
+ bindings.iter().filter(|b| b.name == "xx").collect();
let cxbinds: &[&ast::Ident] = &cxbinds[..];
let cxbind = match (cxbinds.len(), cxbinds.get(0)) {
(1, Some(b)) => *b,
// the xx binding should bind all of the xx varrefs:
for (idx,v) in varrefs.iter().filter(|p| {
p.segments.len() == 1
- && "xx" == &*token::get_ident(p.segments[0].identifier)
+ && p.segments[0].identifier.name == "xx"
}).enumerate() {
if mtwt::resolve(v.segments[0].identifier) != resolved_binding {
println!("uh oh, xx binding didn't match xx varref:");
use ptr::P;
use std::collections::HashMap;
-use std::iter::repeat;
#[derive(PartialEq)]
enum ArgumentType {
return None;
}
};
- let interned_name = token::get_ident(ident);
- let name = &interned_name[..];
+ let name: &str = &ident.name.as_str();
panictry!(p.expect(&token::Eq));
let e = p.parse_expr();
/// to
fn into_expr(mut self) -> P<ast::Expr> {
let mut locals = Vec::new();
- let mut names: Vec<_> = repeat(None).take(self.name_positions.len()).collect();
+ let mut names = vec![None; self.name_positions.len()];
let mut pats = Vec::new();
let mut heads = Vec::new();
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*
+ * The compiler code necessary to support the `push_unsafe!` and
+ * `pop_unsafe!` macros.
+ *
+ * This is a hack to allow a kind of "safety hygiene", where a macro
+ * can generate code with an interior expression that inherits the
+ * safety of some outer context.
+ *
+ * For example, in:
+ *
+ * ```rust
+ * fn foo() { push_unsafe!( { EXPR_1; pop_unsafe!( EXPR_2 ) } ) }
+ * ```
+ *
+ * the `EXPR_1` is considered to be in an `unsafe` context,
+ * but `EXPR_2` is considered to be in a "safe" (i.e. checked) context.
+ *
+ * For comparison, in:
+ *
+ * ```rust
+ * fn foo() { unsafe { push_unsafe!( { EXPR_1; pop_unsafe!( EXPR_2 ) } ) } }
+ * ```
+ *
+ * both `EXPR_1` and `EXPR_2` are considered to be in `unsafe`
+ * contexts.
+ *
+ */
+
+use ast;
+use codemap::Span;
+use ext::base::*;
+use ext::base;
+use ext::build::AstBuilder;
+use feature_gate;
+use ptr::P;
+
+enum PushPop { Push, Pop }
+
+pub fn expand_push_unsafe<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
+ -> Box<base::MacResult+'cx> {
+ expand_pushpop_unsafe(cx, sp, tts, PushPop::Push)
+}
+
+pub fn expand_pop_unsafe<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
+ -> Box<base::MacResult+'cx> {
+ expand_pushpop_unsafe(cx, sp, tts, PushPop::Pop)
+}
+
+fn expand_pushpop_unsafe<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree],
+ pp: PushPop) -> Box<base::MacResult+'cx> {
+ feature_gate::check_for_pushpop_syntax(
+ cx.ecfg.features, &cx.parse_sess.span_diagnostic, sp);
+
+ let mut exprs = match get_exprs_from_tts(cx, sp, tts) {
+ Some(exprs) => exprs.into_iter(),
+ None => return DummyResult::expr(sp),
+ };
+
+ let expr = match (exprs.next(), exprs.next()) {
+ (Some(expr), None) => expr,
+ _ => {
+ let msg = match pp {
+ PushPop::Push => "push_unsafe! takes 1 arguments",
+ PushPop::Pop => "pop_unsafe! takes 1 arguments",
+ };
+ cx.span_err(sp, msg);
+ return DummyResult::expr(sp);
+ }
+ };
+
+ let source = ast::UnsafeSource::CompilerGenerated;
+ let check_mode = match pp {
+ PushPop::Push => ast::BlockCheckMode::PushUnsafeBlock(source),
+ PushPop::Pop => ast::BlockCheckMode::PopUnsafeBlock(source),
+ };
+
+ MacEager::expr(cx.expr_block(P(ast::Block {
+ stmts: vec![],
+ expr: Some(expr),
+ id: ast::DUMMY_NODE_ID,
+ rules: check_mode,
+ span: sp
+ })))
+}
// Lift an ident to the expr that evaluates to that ident.
fn mk_ident(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> P<ast::Expr> {
- let e_str = cx.expr_str(sp, token::get_ident(ident));
+ let e_str = cx.expr_str(sp, ident.name.as_str());
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("ident_of"),
// Lift a name to the expr that evaluates to that name
fn mk_name(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> P<ast::Expr> {
- let e_str = cx.expr_str(sp, token::get_ident(ident));
+ let e_str = cx.expr_str(sp, ident.name.as_str());
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("ext_cx")),
id_ext("name_of"),
base::check_zero_tts(cx, sp, tts, "module_path!");
let string = cx.mod_path()
.iter()
- .map(|x| token::get_ident(*x).to_string())
+ .map(|x| x.to_string())
.collect::<Vec<String>>()
- .connect("::");
+ .join("::");
base::MacEager::expr(cx.expr_str(
sp,
token::intern_and_get_ident(&string[..])))
// dependency information
let filename = format!("{}", file.display());
let interned = token::intern_and_get_ident(&src[..]);
- cx.codemap().new_filemap(filename, src);
+ cx.codemap().new_filemap_and_lines(&filename, &src);
base::MacEager::expr(cx.expr_str(sp, interned))
}
// Add this input file to the code map to make it available as
// dependency information, but don't enter it's contents
let filename = format!("{}", file.display());
- cx.codemap().new_filemap(filename, "".to_string());
+ cx.codemap().new_filemap_and_lines(&filename, "");
base::MacEager::expr(cx.expr_lit(sp, ast::LitBinary(Rc::new(bytes))))
}
*idx += 1;
}
Occupied(..) => {
- let string = token::get_ident(bind_name);
panic!(p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
- &string)))
+ bind_name)))
}
}
}
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
- (format!("{} ('{}')",
- token::get_ident(name),
- token::get_ident(bind))).to_string()
+ format!("{} ('{}')", name, bind)
}
_ => panic!()
- } }).collect::<Vec<String>>().connect(" or ");
+ } }).collect::<Vec<String>>().join(" or ");
return Error(sp, format!(
"local ambiguity: multiple parsing options: \
built-in NTs {} or {} other options.",
let mut ei = bb_eis.pop().unwrap();
match ei.top_elts.get_tt(ei.idx) {
- TtToken(span, MatchNt(_, name, _, _)) => {
- let name_string = token::get_ident(name);
+ TtToken(span, MatchNt(_, ident, _, _)) => {
let match_cur = ei.match_cur;
(&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal(
- parse_nt(&mut rust_parser, span, &name_string))));
+ parse_nt(&mut rust_parser, span, &ident.name.as_str()))));
ei.idx += 1;
ei.match_cur += 1;
}
let span = parser.span;
parser.span_err(span, &msg[..]);
- let name = token::get_ident(self.macro_ident);
let msg = format!("caused by the macro expansion here; the usage \
of `{}` is likely invalid in this context",
- name);
+ self.macro_ident);
parser.span_note(self.site_span, &msg[..]);
}
}
-> Box<MacResult+'cx> {
if cx.trace_macros() {
println!("{}! {{ {} }}",
- token::get_ident(name),
+ name,
print::pprust::tts_to_string(arg));
}
tt @ &TtSequence(..) => {
check_matcher(cx, Some(tt).into_iter(), &Eof);
},
- _ => cx.span_bug(sp, "wrong-structured lhs for follow check (didn't find \
- a TtDelimited or TtSequence)")
+ _ => cx.span_err(sp, "Invalid macro matcher; matchers must be contained \
+ in balanced delimiters or a repetition indicator")
},
_ => cx.span_bug(sp, "wrong-structured lhs for follow check (didn't find a \
MatchedNonterminal)")
TtToken(sp, MatchNt(ref name, ref frag_spec, _, _)) => {
// ii. If T is a simple NT, look ahead to the next token T' in
// M. If T' is in the set FOLLOW(NT), continue. Else; reject.
- if can_be_followed_by_any(frag_spec.as_str()) {
+ if can_be_followed_by_any(&frag_spec.name.as_str()) {
continue
} else {
let next_token = match tokens.peek() {
// possibility that the sequence occurred
// zero times (in which case we need to
// look at the token that follows the
- // sequence, which may itself a sequence,
+ // sequence, which may itself be a sequence,
// and so on).
cx.span_err(sp,
&format!("`${0}:{1}` is followed by a \
sequence repetition, which is not \
allowed for `{1}` fragments",
- name.as_str(), frag_spec.as_str())
+ name, frag_spec)
);
Eof
},
let tok = if let TtToken(_, ref tok) = *token { tok } else { unreachable!() };
// If T' is in the set FOLLOW(NT), continue. Else, reject.
- match (&next_token, is_in_follow(cx, &next_token, frag_spec.as_str())) {
+ match (&next_token, is_in_follow(cx, &next_token, &frag_spec.name.as_str())) {
(_, Err(msg)) => {
cx.span_err(sp, &msg);
continue
(next, Ok(false)) => {
cx.span_err(sp, &format!("`${0}:{1}` is followed by `{2}`, which \
is not allowed for `{1}` fragments",
- name.as_str(), frag_spec.as_str(),
+ name, frag_spec,
token_to_string(next)));
continue
},
"pat" => {
match *tok {
FatArrow | Comma | Eq => Ok(true),
- Ident(i, _) if i.as_str() == "if" || i.as_str() == "in" => Ok(true),
+ Ident(i, _) if i.name == "if" || i.name == "in" => Ok(true),
_ => Ok(false)
}
},
"path" | "ty" => {
match *tok {
- Comma | FatArrow | Colon | Eq | Gt => Ok(true),
- Ident(i, _) if i.as_str() == "as" => Ok(true),
+ Comma | FatArrow | Colon | Eq | Gt | Semi => Ok(true),
+ Ident(i, _) if i.name == "as" => Ok(true),
_ => Ok(false)
}
},
LisContradiction(_) => other,
LisConstraint(r_len, _) if l_len == r_len => self.clone(),
LisConstraint(r_len, r_id) => {
- let l_n = token::get_ident(l_id.clone());
- let r_n = token::get_ident(r_id);
LisContradiction(format!("inconsistent lockstep iteration: \
- '{:?}' has {} items, but '{:?}' has {}",
- l_n, l_len, r_n, r_len).to_string())
+ '{}' has {} items, but '{}' has {}",
+ l_id, l_len, r_id, r_len))
}
},
}
MatchedSeq(..) => {
panic!(r.sp_diag.span_fatal(
r.cur_span, /* blame the macro writer */
- &format!("variable '{:?}' is still repeating at this depth",
- token::get_ident(ident))));
+ &format!("variable '{}' is still repeating at this depth",
+ ident)));
}
}
}
("visible_private_types", "1.0.0", Active),
("slicing_syntax", "1.0.0", Accepted),
("box_syntax", "1.0.0", Active),
+ ("placement_in_syntax", "1.0.0", Active),
+ ("pushpop_unsafe", "1.2.0", Active),
("on_unimplemented", "1.0.0", Active),
("simd_ffi", "1.0.0", Active),
("allocator", "1.0.0", Active),
// Allows the definition of `const fn` functions.
("const_fn", "1.2.0", Active),
+ // Allows using #[prelude_import] on glob `use` items.
+ ("prelude_import", "1.2.0", Active),
+
+ // Allows the definition recursive static items.
+ ("static_recursion", "1.3.0", Active),
+
+ // Allows default type parameters to influence type inference.
+ ("default_type_parameter_fallback", "1.3.0", Active),
+
// Allows associated type defaults
("associated_type_defaults", "1.2.0", Active),
];
and may be removed in the future")),
// used in resolve
- ("prelude_import", Whitelisted),
+ ("prelude_import", Gated("prelude_import",
+ "`#[prelude_import]` is for use by rustc only")),
// FIXME: #14407 these are only looked at on-demand so we can't
// guarantee they'll have already been checked
pub allow_trace_macros: bool,
pub allow_internal_unstable: bool,
pub allow_custom_derive: bool,
+ pub allow_placement_in: bool,
+ pub allow_box: bool,
+ pub allow_pushpop_unsafe: bool,
pub simd_ffi: bool,
pub unmarked_api: bool,
pub negate_unsigned: bool,
/// #![feature] attrs for non-language (library) features
pub declared_lib_features: Vec<(InternedString, Span)>,
pub const_fn: bool,
+ pub static_recursion: bool,
+ pub default_type_parameter_fallback: bool,
}
impl Features {
allow_trace_macros: false,
allow_internal_unstable: false,
allow_custom_derive: false,
+ allow_placement_in: false,
+ allow_box: false,
+ allow_pushpop_unsafe: false,
simd_ffi: false,
unmarked_api: false,
negate_unsigned: false,
declared_stable_lang_features: Vec::new(),
declared_lib_features: Vec::new(),
const_fn: false,
+ static_recursion: false,
+ default_type_parameter_fallback: false,
}
}
}
+const EXPLAIN_BOX_SYNTAX: &'static str =
+ "box expression syntax is experimental; you can call `Box::new` instead.";
+
+const EXPLAIN_PLACEMENT_IN: &'static str =
+ "placement-in expression syntax is experimental and subject to change.";
+
+const EXPLAIN_PUSHPOP_UNSAFE: &'static str =
+ "push/pop_unsafe macros are experimental and subject to change.";
+
+pub fn check_for_box_syntax(f: Option<&Features>, diag: &SpanHandler, span: Span) {
+ if let Some(&Features { allow_box: true, .. }) = f {
+ return;
+ }
+ emit_feature_err(diag, "box_syntax", span, EXPLAIN_BOX_SYNTAX);
+}
+
+pub fn check_for_placement_in(f: Option<&Features>, diag: &SpanHandler, span: Span) {
+ if let Some(&Features { allow_placement_in: true, .. }) = f {
+ return;
+ }
+ emit_feature_err(diag, "placement_in_syntax", span, EXPLAIN_PLACEMENT_IN);
+}
+
+pub fn check_for_pushpop_syntax(f: Option<&Features>, diag: &SpanHandler, span: Span) {
+ if let Some(&Features { allow_pushpop_unsafe: true, .. }) = f {
+ return;
+ }
+ emit_feature_err(diag, "pushpop_unsafe", span, EXPLAIN_PUSHPOP_UNSAFE);
+}
+
struct Context<'a> {
features: Vec<&'static str>,
span_handler: &'a SpanHandler,
}
impl<'a> Context<'a> {
+ fn enable_feature(&mut self, feature: &'static str) {
+ debug!("enabling feature: {}", feature);
+ self.features.push(feature);
+ }
+
fn gate_feature(&self, feature: &str, span: Span, explain: &str) {
let has_feature = self.has_feature(feature);
debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", feature, span, has_feature);
fn visit_attribute(&mut self, attr: &'v ast::Attribute) {
self.context.check_attribute(attr, true);
}
+
+ fn visit_expr(&mut self, e: &ast::Expr) {
+ // Issue 22181: overloaded-`box` and placement-`in` are
+ // implemented via a desugaring expansion, so their feature
+ // gates go into MacroVisitor since that works pre-expansion.
+ //
+ // Issue 22234: we also check during expansion as well.
+ // But we keep these checks as a pre-expansion check to catch
+ // uses in e.g. conditionalized code.
+
+ if let ast::ExprBox(None, _) = e.node {
+ self.context.gate_feature("box_syntax", e.span, EXPLAIN_BOX_SYNTAX);
+ }
+
+ if let ast::ExprBox(Some(_), _) = e.node {
+ self.context.gate_feature("placement_in_syntax", e.span, EXPLAIN_PLACEMENT_IN);
+ }
+
+ visit::walk_expr(self, e);
+ }
}
struct PostExpansionVisitor<'a> {
}
fn visit_name(&mut self, sp: Span, name: ast::Name) {
- if !token::get_name(name).is_ascii() {
+ if !name.as_str().is_ascii() {
self.gate_feature("non_ascii_idents", sp,
"non-ascii idents are not fully supported.");
}
match KNOWN_FEATURES.iter()
.find(|& &(n, _, _)| name == n) {
Some(&(name, _, Active)) => {
- cx.features.push(name);
+ cx.enable_feature(name);
}
Some(&(_, _, Removed)) => {
span_handler.span_err(mi.span, "feature has been removed");
allow_trace_macros: cx.has_feature("trace_macros"),
allow_internal_unstable: cx.has_feature("allow_internal_unstable"),
allow_custom_derive: cx.has_feature("custom_derive"),
+ allow_placement_in: cx.has_feature("placement_in_syntax"),
+ allow_box: cx.has_feature("box_syntax"),
+ allow_pushpop_unsafe: cx.has_feature("pushpop_unsafe"),
simd_ffi: cx.has_feature("simd_ffi"),
unmarked_api: cx.has_feature("unmarked_api"),
negate_unsigned: cx.has_feature("negate_unsigned"),
declared_stable_lang_features: accepted_features,
declared_lib_features: unknown_features,
const_fn: cx.has_feature("const_fn"),
+ static_recursion: cx.has_feature("static_recursion"),
+ default_type_parameter_fallback: cx.has_feature("default_type_parameter_fallback"),
}
}
}
pub fn noop_fold_local<T: Folder>(l: P<Local>, fld: &mut T) -> P<Local> {
- l.map(|Local {id, pat, ty, init, source, span}| Local {
+ l.map(|Local {id, pat, ty, init, span}| Local {
id: fld.new_id(id),
ty: ty.map(|t| fld.fold_ty(t)),
pat: fld.fold_pat(pat),
init: init.map(|e| fld.fold_expr(e)),
- source: source,
span: fld.new_span(span)
})
}
#![feature(libc)]
#![feature(ref_slice)]
#![feature(rustc_private)]
+#![feature(set_stdio)]
#![feature(staged_api)]
#![feature(str_char)]
#![feature(str_escape)]
pub mod log_syntax;
pub mod mtwt;
pub mod quote;
+ pub mod pushpop_safe;
pub mod source_util;
pub mod trace_macros;
let lines = vertical_trim(lines);
let lines = horizontal_trim(lines);
- return lines.connect("\n");
+ return lines.join("\n");
}
panic!("not a doc-comment: {}", comment);
self.span_diagnostic.span_err(sp, m)
}
+ /// Suggest some help with a given span.
+ pub fn help_span(&self, sp: Span, m: &str) {
+ self.span_diagnostic.span_help(sp, m)
+ }
+
/// Report a fatal error spanning [`from_pos`, `to_pos`).
fn fatal_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) -> ! {
self.fatal_span(codemap::mk_sp(from_pos, to_pos), m)
self.err_span(codemap::mk_sp(from_pos, to_pos), m)
}
+ /// Suggest some help spanning [`from_pos`, `to_pos`).
+ fn help_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) {
+ self.help_span(codemap::mk_sp(from_pos, to_pos), m)
+ }
+
/// Report a lexical error spanning [`from_pos`, `to_pos`), appending an
/// escaped character to the error message
fn fatal_span_char(&self, from_pos: BytePos, to_pos: BytePos, m: &str, c: char) -> ! {
None => {
if self.is_eof() {
self.peek_tok = token::Eof;
+ self.peek_span = codemap::mk_sp(self.filemap.end_pos, self.filemap.end_pos);
} else {
let start_bytepos = self.last_pos;
self.peek_tok = self.next_token_inner();
/// Lex a LIT_INTEGER or a LIT_FLOAT
fn scan_number(&mut self, c: char) -> token::Lit {
- let mut num_digits;
+ let num_digits;
let mut base = 10;
let start_bpos = self.last_pos;
accum_int *= 16;
accum_int += c.to_digit(16).unwrap_or_else(|| {
self.err_span_char(self.last_pos, self.pos,
- "illegal character in numeric character escape", c);
+ "invalid character in numeric character escape", c);
valid = false;
0
Some(_) => valid,
None => {
let last_bpos = self.last_pos;
- self.err_span_(start_bpos, last_bpos, "illegal numeric character escape");
+ self.err_span_(start_bpos, last_bpos, "invalid numeric character escape");
false
}
}
return match e {
'n' | 'r' | 't' | '\\' | '\'' | '"' | '0' => true,
'x' => self.scan_byte_escape(delim, !ascii_only),
- 'u' if self.curr_is('{') => {
- let valid = self.scan_unicode_escape(delim);
- if valid && ascii_only {
- self.err_span_(
- escaped_pos,
- self.last_pos,
+ 'u' => {
+ let valid = if self.curr_is('{') {
+ self.scan_unicode_escape(delim) && !ascii_only
+ } else {
+ self.err_span_(start, self.last_pos,
+ "incorrect unicode escape sequence");
+ self.help_span_(start, self.last_pos,
+ "format of unicode escape sequences is `\\u{…}`");
+ false
+ };
+ if ascii_only {
+ self.err_span_(start, self.last_pos,
"unicode escape sequences cannot be used as a byte or in \
a byte string"
);
- false
- } else {
- valid
}
+ valid
+
}
'\n' if delim == '"' => {
self.consume_whitespace();
if ascii_only { "unknown byte escape" }
else { "unknown character escape" },
c);
- let sp = codemap::mk_sp(escaped_pos, last_pos);
if e == '\r' {
- self.span_diagnostic.span_help(
- sp,
+ self.help_span_(escaped_pos, last_pos,
"this is an isolated carriage return; consider checking \
your editor and version control settings")
}
if (e == '{' || e == '}') && !ascii_only {
- self.span_diagnostic.span_help(
- sp,
+ self.help_span_(escaped_pos, last_pos,
"if used in a formatting string, \
curly braces are escaped with `{{` and `}}`")
}
"unterminated unicode escape (needed a `}`)");
} else {
self.err_span_char(self.last_pos, self.pos,
- "illegal character in unicode escape", c);
+ "invalid character in unicode escape", c);
}
valid = false;
0
valid = false;
}
- self.bump(); // past the ending }
-
if valid && (char::from_u32(accum_int).is_none() || count == 0) {
- self.err_span_(start_bpos, self.last_pos, "illegal unicode character escape");
+ self.err_span_(start_bpos, self.last_pos, "invalid unicode character escape");
valid = false;
}
-
+ self.bump(); // past the ending }
valid
}
let last_bpos = self.last_pos;
let curr_char = self.curr.unwrap();
self.fatal_span_char(start_bpos, last_bpos,
- "only `#` is allowed in raw string delimitation; \
- found illegal character",
+ "found invalid character; \
+ only `#` is allowed in raw string delimitation",
curr_char);
}
self.bump();
let last_pos = self.last_pos;
let ch = self.curr.unwrap();
self.fatal_span_char(start_bpos, last_pos,
- "only `#` is allowed in raw string delimitation; \
- found illegal character",
+ "found invalid character; \
+ only `#` is allowed in raw string delimitation",
ch);
}
self.bump();
//! The main parser interface
use ast;
-use codemap::{Span, CodeMap, FileMap};
+use codemap::{self, Span, CodeMap, FileMap};
use diagnostic::{SpanHandler, Handler, Auto, FatalError};
use parse::attr::ParserAttr;
use parse::parser::Parser;
+use parse::token::InternedString;
use ptr::P;
use str::char_at;
pub fn filemap_to_parser<'a>(sess: &'a ParseSess,
filemap: Rc<FileMap>,
cfg: ast::CrateConfig) -> Parser<'a> {
- tts_to_parser(sess, filemap_to_tts(sess, filemap), cfg)
+ let end_pos = filemap.end_pos;
+ let mut parser = tts_to_parser(sess, filemap_to_tts(sess, filemap), cfg);
+
+ if parser.token == token::Eof && parser.span == codemap::DUMMY_SP {
+ parser.span = codemap::mk_sp(end_pos, end_pos);
+ }
+
+ parser
}
// must preserve old name for now, because quote! from the *existing*
fn filtered_float_lit(data: token::InternedString, suffix: Option<&str>,
sd: &SpanHandler, sp: Span) -> ast::Lit_ {
debug!("filtered_float_lit: {}, {:?}", data, suffix);
- match suffix {
+ match suffix.as_ref().map(|s| &**s) {
Some("f32") => ast::LitFloat(data, ast::TyF32),
Some("f64") => ast::LitFloat(data, ast::TyF64),
Some(suf) => {
if suf.len() >= 2 && looks_like_width_suffix(&['f'], suf) {
// if it looks like a width, lets try to be helpful.
- sd.span_err(sp, &*format!("illegal width `{}` for float literal, \
- valid widths are 32 and 64", &suf[1..]));
+ sd.span_err(sp, &*format!("invalid width `{}` for float literal", &suf[1..]));
+ sd.fileline_help(sp, "valid widths are 32 and 64");
} else {
- sd.span_err(sp, &*format!("illegal suffix `{}` for float literal, \
- valid suffixes are `f32` and `f64`", suf));
+ sd.span_err(sp, &*format!("invalid suffix `{}` for float literal", suf));
+ sd.fileline_help(sp, "valid suffixes are `f32` and `f64`");
}
ast::LitFloatUnsuffixed(data)
None => ast::LitFloatUnsuffixed(data)
}
}
-pub fn float_lit(s: &str, suffix: Option<&str>, sd: &SpanHandler, sp: Span) -> ast::Lit_ {
+pub fn float_lit(s: &str, suffix: Option<InternedString>,
+ sd: &SpanHandler, sp: Span) -> ast::Lit_ {
debug!("float_lit: {:?}, {:?}", s, suffix);
// FIXME #2252: bounds checking float literals is deferred until trans
let s = s.chars().filter(|&c| c != '_').collect::<String>();
- let data = token::intern_and_get_ident(&*s);
- filtered_float_lit(data, suffix, sd, sp)
+ let data = token::intern_and_get_ident(&s);
+ filtered_float_lit(data, suffix.as_ref().map(|s| &**s), sd, sp)
}
/// Parse a string representing a byte literal into its final form. Similar to `char_lit`
Rc::new(res)
}
-pub fn integer_lit(s: &str, suffix: Option<&str>, sd: &SpanHandler, sp: Span) -> ast::Lit_ {
+pub fn integer_lit(s: &str,
+ suffix: Option<InternedString>,
+ sd: &SpanHandler,
+ sp: Span)
+ -> ast::Lit_ {
// s can only be ascii, byte indexing is fine
let s2 = s.chars().filter(|&c| c != '_').collect::<String>();
}
// 1f64 and 2f32 etc. are valid float literals.
- match suffix {
- Some(suf) if looks_like_width_suffix(&['f'], suf) => {
+ if let Some(ref suf) = suffix {
+ if looks_like_width_suffix(&['f'], suf) {
match base {
16 => sd.span_err(sp, "hexadecimal float literal is not supported"),
8 => sd.span_err(sp, "octal float literal is not supported"),
_ => ()
}
let ident = token::intern_and_get_ident(&*s);
- return filtered_float_lit(ident, suffix, sd, sp)
+ return filtered_float_lit(ident, Some(&**suf), sd, sp)
}
- _ => {}
}
if base != 10 {
s = &s[2..];
}
- if let Some(suf) = suffix {
+ if let Some(ref suf) = suffix {
if suf.is_empty() { sd.span_bug(sp, "found empty literal suffix in Some")}
- ty = match suf {
+ ty = match &**suf {
"isize" => ast::SignedIntLit(ast::TyIs, ast::Plus),
"i8" => ast::SignedIntLit(ast::TyI8, ast::Plus),
"i16" => ast::SignedIntLit(ast::TyI16, ast::Plus),
// i<digits> and u<digits> look like widths, so lets
// give an error message along those lines
if looks_like_width_suffix(&['i', 'u'], suf) {
- sd.span_err(sp, &*format!("illegal width `{}` for integer literal; \
- valid widths are 8, 16, 32 and 64",
+ sd.span_err(sp, &*format!("invalid width `{}` for integer literal",
&suf[1..]));
+ sd.fileline_help(sp, "valid widths are 8, 16, 32 and 64");
} else {
- sd.span_err(sp, &*format!("illegal suffix `{}` for numeric literal", suf));
+ sd.span_err(sp, &*format!("invalid suffix `{}` for numeric literal", suf));
sd.fileline_help(sp, "the suffix must be one of the integral types \
(`u32`, `isize`, etc)");
}
Some(&ast::TtToken(_, token::Ident(name_zip, token::Plain))),
Some(&ast::TtDelimited(_, ref macro_delimed)),
)
- if name_macro_rules.as_str() == "macro_rules"
- && name_zip.as_str() == "zip" => {
+ if name_macro_rules.name == "macro_rules"
+ && name_zip.name == "zip" => {
let tts = ¯o_delimed.tts[..];
match (tts.len(), tts.get(0), tts.get(1), tts.get(2)) {
(
(
2,
Some(&ast::TtToken(_, token::Dollar)),
- Some(&ast::TtToken(_, token::Ident(name, token::Plain))),
+ Some(&ast::TtToken(_, token::Ident(ident, token::Plain))),
)
if first_delimed.delim == token::Paren
- && name.as_str() == "a" => {},
+ && ident.name == "a" => {},
_ => panic!("value 3: {:?}", **first_delimed),
}
let tts = &second_delimed.tts[..];
(
2,
Some(&ast::TtToken(_, token::Dollar)),
- Some(&ast::TtToken(_, token::Ident(name, token::Plain))),
+ Some(&ast::TtToken(_, token::Ident(ident, token::Plain))),
)
if second_delimed.delim == token::Paren
- && name.as_str() == "a" => {},
+ && ident.name == "a" => {},
_ => panic!("value 4: {:?}", **second_delimed),
}
},
//!
//! Obsolete syntax that becomes too hard to parse can be removed.
-use ast::{Expr, ExprTup};
use codemap::Span;
use parse::parser;
-use parse::token;
-use ptr::P;
/// The specific types of unsupported syntax
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub trait ParserObsoleteMethods {
/// Reports an obsolete syntax non-fatal error.
fn obsolete(&mut self, sp: Span, kind: ObsoleteSyntax);
- /// Reports an obsolete syntax non-fatal error, and returns
- /// a placeholder expression
- fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> P<Expr>;
fn report(&mut self,
sp: Span,
kind: ObsoleteSyntax,
kind_str: &str,
desc: &str,
error: bool);
- fn is_obsolete_ident(&mut self, ident: &str) -> bool;
- fn eat_obsolete_ident(&mut self, ident: &str) -> bool;
}
impl<'a> ParserObsoleteMethods for parser::Parser<'a> {
self.report(sp, kind, kind_str, desc, error);
}
- /// Reports an obsolete syntax non-fatal error, and returns
- /// a placeholder expression
- fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> P<Expr> {
- self.obsolete(sp, kind);
- self.mk_expr(sp.lo, sp.hi, ExprTup(vec![]))
- }
-
fn report(&mut self,
sp: Span,
kind: ObsoleteSyntax,
self.obsolete_set.insert(kind);
}
}
-
- fn is_obsolete_ident(&mut self, ident: &str) -> bool {
- match self.token {
- token::Ident(sid, _) => {
- token::get_ident(sid) == ident
- }
- _ => false
- }
- }
-
- fn eat_obsolete_ident(&mut self, ident: &str) -> bool {
- if self.is_obsolete_ident(ident) {
- panictry!(self.bump());
- true
- } else {
- false
- }
- }
}
use ast::{ItemExternCrate, ItemUse};
use ast::{LifetimeDef, Lit, Lit_};
use ast::{LitBool, LitChar, LitByte, LitBinary};
-use ast::{LitStr, LitInt, Local, LocalLet};
+use ast::{LitStr, LitInt, Local};
use ast::{MacStmtWithBraces, MacStmtWithSemicolon, MacStmtWithoutBraces};
use ast::{MutImmutable, MutMutable, Mac_, MacInvocTT, MatchSource};
use ast::{MutTy, BiMul, Mutability};
use ast::{Visibility, WhereClause};
use ast;
use ast_util::{self, AS_PREC, ident_to_path, operator_prec};
-use codemap::{self, Span, BytePos, Spanned, spanned, mk_sp};
+use codemap::{self, Span, BytePos, Spanned, spanned, mk_sp, CodeMap};
use diagnostic;
use ext::tt::macro_parser;
use parse;
match *self {
TokenType::Token(ref t) => format!("`{}`", Parser::token_to_string(t)),
TokenType::Operator => "an operator".to_string(),
- TokenType::Keyword(kw) => format!("`{}`", token::get_name(kw.to_name())),
+ TokenType::Keyword(kw) => format!("`{}`", kw.to_name()),
}
}
}
t.is_plain_ident() || *t == token::Underscore
}
+/// Information about the path to a module.
+pub struct ModulePath {
+ pub name: String,
+ pub path_exists: bool,
+ pub result: Result<ModulePathSuccess, ModulePathError>,
+}
+
+pub struct ModulePathSuccess {
+ pub path: ::std::path::PathBuf,
+ pub owns_directory: bool,
+}
+
+pub struct ModulePathError {
+ pub err_msg: String,
+ pub help_msg: String,
+}
+
+
impl<'a> Parser<'a> {
pub fn new(sess: &'a ParseSess,
cfg: ast::CrateConfig,
if text.is_empty() {
self.span_bug(sp, "found empty literal suffix in Some")
}
- self.span_err(sp, &*format!("{} with a suffix is illegal", kind));
+ self.span_err(sp, &*format!("{} with a suffix is invalid", kind));
}
}
}
}
pub fn id_to_interned_str(&mut self, id: Ident) -> InternedString {
- token::get_ident(id)
+ id.name.as_str()
}
/// Is the current token one of the keywords that signals a bare function
}
token::Literal(lit, suf) => {
let (suffix_illegal, out) = match lit {
- token::Byte(i) => (true, LitByte(parse::byte_lit(i.as_str()).0)),
- token::Char(i) => (true, LitChar(parse::char_lit(i.as_str()).0)),
+ token::Byte(i) => (true, LitByte(parse::byte_lit(&i.as_str()).0)),
+ token::Char(i) => (true, LitChar(parse::char_lit(&i.as_str()).0)),
// there are some valid suffixes for integer and
// float literals, so all the handling is done
// internally.
token::Integer(s) => {
- (false, parse::integer_lit(s.as_str(),
+ (false, parse::integer_lit(&s.as_str(),
suf.as_ref().map(|s| s.as_str()),
&self.sess.span_diagnostic,
self.last_span))
}
token::Float(s) => {
- (false, parse::float_lit(s.as_str(),
+ (false, parse::float_lit(&s.as_str(),
suf.as_ref().map(|s| s.as_str()),
&self.sess.span_diagnostic,
self.last_span))
token::Str_(s) => {
(true,
- LitStr(token::intern_and_get_ident(&parse::str_lit(s.as_str())),
+ LitStr(token::intern_and_get_ident(&parse::str_lit(&s.as_str())),
ast::CookedStr))
}
token::StrRaw(s, n) => {
(true,
LitStr(
- token::intern_and_get_ident(&parse::raw_str_lit(s.as_str())),
+ token::intern_and_get_ident(&parse::raw_str_lit(&s.as_str())),
ast::RawStr(n)))
}
token::Binary(i) =>
- (true, LitBinary(parse::binary_lit(i.as_str()))),
+ (true, LitBinary(parse::binary_lit(&i.as_str()))),
token::BinaryRaw(i, _) =>
(true,
- LitBinary(Rc::new(i.as_str().as_bytes().iter().cloned().collect()))),
+ LitBinary(Rc::new(i.to_string().into_bytes()))),
};
if suffix_illegal {
// Assumes that the leading `<` has been parsed already.
pub fn parse_qualified_path(&mut self, mode: PathParsingMode)
-> PResult<(QSelf, ast::Path)> {
+ let span = self.last_span;
let self_type = try!(self.parse_ty_sum());
let mut path = if try!(self.eat_keyword(keywords::As)) {
try!(self.parse_path(LifetimeAndTypesWithoutColons))
} else {
ast::Path {
- span: self.span,
+ span: span,
global: false,
segments: vec![]
}
};
path.segments.extend(segments);
- if path.segments.len() == 1 {
- path.span.lo = self.last_span.lo;
- }
path.span.hi = self.last_span.hi;
Ok((qself, path))
return self.parse_if_expr();
}
if try!(self.eat_keyword(keywords::For) ){
- return self.parse_for_expr(None);
+ let lo = self.last_span.lo;
+ return self.parse_for_expr(None, lo);
}
if try!(self.eat_keyword(keywords::While) ){
- return self.parse_while_expr(None);
+ let lo = self.last_span.lo;
+ return self.parse_while_expr(None, lo);
}
if self.token.is_lifetime() {
let lifetime = self.get_lifetime();
+ let lo = self.span.lo;
try!(self.bump());
try!(self.expect(&token::Colon));
if try!(self.eat_keyword(keywords::While) ){
- return self.parse_while_expr(Some(lifetime))
+ return self.parse_while_expr(Some(lifetime), lo)
}
if try!(self.eat_keyword(keywords::For) ){
- return self.parse_for_expr(Some(lifetime))
+ return self.parse_for_expr(Some(lifetime), lo)
}
if try!(self.eat_keyword(keywords::Loop) ){
- return self.parse_loop_expr(Some(lifetime))
+ return self.parse_loop_expr(Some(lifetime), lo)
}
return Err(self.fatal("expected `while`, `for`, or `loop` after a label"))
}
if try!(self.eat_keyword(keywords::Loop) ){
- return self.parse_loop_expr(None);
+ let lo = self.last_span.lo;
+ return self.parse_loop_expr(None, lo);
}
if try!(self.eat_keyword(keywords::Continue) ){
let lo = self.span.lo;
match self.token {
token::SubstNt(name, _) =>
return Err(self.fatal(&format!("unknown macro variable `{}`",
- token::get_ident(name)))),
+ name))),
_ => {}
}
}
ex = ExprAddrOf(m, e);
}
token::Ident(_, _) => {
- if !self.check_keyword(keywords::Box) {
+ if !self.check_keyword(keywords::Box) && !self.check_keyword(keywords::In) {
return self.parse_dot_or_call_expr();
}
let lo = self.span.lo;
- let box_hi = self.span.hi;
+ let keyword_hi = self.span.hi;
+ let is_in = self.token.is_keyword(keywords::In);
try!(self.bump());
- // Check for a place: `box(PLACE) EXPR`.
- if try!(self.eat(&token::OpenDelim(token::Paren)) ){
- // Support `box() EXPR` as the default.
- if !try!(self.eat(&token::CloseDelim(token::Paren)) ){
+ if is_in {
+ let place = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL));
+ let blk = try!(self.parse_block());
+ hi = blk.span.hi;
+ let blk_expr = self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk));
+ ex = ExprBox(Some(place), blk_expr);
+ return Ok(self.mk_expr(lo, hi, ex));
+ }
+
+ // FIXME (#22181) Remove `box (PLACE) EXPR` support
+ // entirely after next release (enabling `(box (EXPR))`),
+ // since it will be replaced by `in PLACE { EXPR }`, ...
+ //
+ // ... but for now: check for a place: `box(PLACE) EXPR`.
+
+ if try!(self.eat(&token::OpenDelim(token::Paren))) {
+ let box_span = mk_sp(lo, self.last_span.hi);
+ self.span_warn(box_span,
+ "deprecated syntax; use the `in` keyword now \
+ (e.g. change `box (<expr>) <expr>` to \
+ `in <expr> { <expr> }`)");
+
+ // Continue supporting `box () EXPR` (temporarily)
+ if !try!(self.eat(&token::CloseDelim(token::Paren))) {
let place = try!(self.parse_expr_nopanic());
try!(self.expect(&token::CloseDelim(token::Paren)));
// Give a suggestion to use `box()` when a parenthesised expression is used
self.span_err(span,
&format!("expected expression, found `{}`",
this_token_to_string));
- let box_span = mk_sp(lo, box_hi);
+
+ // Spanning just keyword avoids constructing
+ // printout of arg expression (which starts
+ // with parenthesis, as established above).
+
+ let box_span = mk_sp(lo, keyword_hi);
self.span_suggestion(box_span,
- "try using `box()` instead:",
- "box()".to_string());
+ "try using `box ()` instead:",
+ format!("box ()"));
self.abort_if_errors();
}
let subexpression = try!(self.parse_prefix_expr());
// Otherwise, we use the unique pointer default.
let subexpression = try!(self.parse_prefix_expr());
hi = subexpression.span.hi;
+
// FIXME (pnkfelix): After working out kinks with box
// desugaring, should be `ExprBox(None, subexpression)`
// instead.
// (much lower than other prefix expressions) to be consistent
// with the postfix-form 'expr..'
let lo = self.span.lo;
+ let mut hi = self.span.hi;
try!(self.bump());
let opt_end = if self.is_at_start_of_range_notation_rhs() {
let end = try!(self.parse_binops());
+ hi = end.span.hi;
Some(end)
} else {
None
};
- let hi = self.span.hi;
let ex = self.mk_range(None, opt_end);
Ok(self.mk_expr(lo, hi, ex))
}
}
// A range expression, either `expr..expr` or `expr..`.
token::DotDot => {
+ let lo = lhs.span.lo;
+ let mut hi = self.span.hi;
try!(self.bump());
let opt_end = if self.is_at_start_of_range_notation_rhs() {
let end = try!(self.parse_binops());
+ hi = end.span.hi;
Some(end)
} else {
None
};
-
- let lo = lhs.span.lo;
- let hi = self.span.hi;
let range = self.mk_range(Some(lhs), opt_end);
return Ok(self.mk_expr(lo, hi, range));
}
}
/// Parse a 'for' .. 'in' expression ('for' token already eaten)
- pub fn parse_for_expr(&mut self, opt_ident: Option<ast::Ident>) -> PResult<P<Expr>> {
+ pub fn parse_for_expr(&mut self, opt_ident: Option<ast::Ident>,
+ span_lo: BytePos) -> PResult<P<Expr>> {
// Parse: `for <src_pat> in <src_expr> <src_loop_block>`
- let lo = self.last_span.lo;
let pat = try!(self.parse_pat_nopanic());
try!(self.expect_keyword(keywords::In));
let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL));
let loop_block = try!(self.parse_block());
let hi = self.last_span.hi;
- Ok(self.mk_expr(lo, hi, ExprForLoop(pat, expr, loop_block, opt_ident)))
+ Ok(self.mk_expr(span_lo, hi, ExprForLoop(pat, expr, loop_block, opt_ident)))
}
/// Parse a 'while' or 'while let' expression ('while' token already eaten)
- pub fn parse_while_expr(&mut self, opt_ident: Option<ast::Ident>) -> PResult<P<Expr>> {
+ pub fn parse_while_expr(&mut self, opt_ident: Option<ast::Ident>,
+ span_lo: BytePos) -> PResult<P<Expr>> {
if self.token.is_keyword(keywords::Let) {
- return self.parse_while_let_expr(opt_ident);
+ return self.parse_while_let_expr(opt_ident, span_lo);
}
- let lo = self.last_span.lo;
let cond = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL));
let body = try!(self.parse_block());
let hi = body.span.hi;
- return Ok(self.mk_expr(lo, hi, ExprWhile(cond, body, opt_ident)));
+ return Ok(self.mk_expr(span_lo, hi, ExprWhile(cond, body, opt_ident)));
}
/// Parse a 'while let' expression ('while' token already eaten)
- pub fn parse_while_let_expr(&mut self, opt_ident: Option<ast::Ident>) -> PResult<P<Expr>> {
- let lo = self.last_span.lo;
+ pub fn parse_while_let_expr(&mut self, opt_ident: Option<ast::Ident>,
+ span_lo: BytePos) -> PResult<P<Expr>> {
try!(self.expect_keyword(keywords::Let));
let pat = try!(self.parse_pat_nopanic());
try!(self.expect(&token::Eq));
let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL));
let body = try!(self.parse_block());
let hi = body.span.hi;
- return Ok(self.mk_expr(lo, hi, ExprWhileLet(pat, expr, body, opt_ident)));
+ return Ok(self.mk_expr(span_lo, hi, ExprWhileLet(pat, expr, body, opt_ident)));
}
- pub fn parse_loop_expr(&mut self, opt_ident: Option<ast::Ident>) -> PResult<P<Expr>> {
- let lo = self.last_span.lo;
+ pub fn parse_loop_expr(&mut self, opt_ident: Option<ast::Ident>,
+ span_lo: BytePos) -> PResult<P<Expr>> {
let body = try!(self.parse_block());
let hi = body.span.hi;
- Ok(self.mk_expr(lo, hi, ExprLoop(body, opt_ident)))
+ Ok(self.mk_expr(span_lo, hi, ExprLoop(body, opt_ident)))
}
fn parse_match_expr(&mut self) -> PResult<P<Expr>> {
init: init,
id: ast::DUMMY_NODE_ID,
span: mk_sp(lo, self.last_span.hi),
- source: LocalLet,
}))
}
None
};
- if try!(self.eat(&token::DotDot) ){
+ if opt_trait.is_some() && try!(self.eat(&token::DotDot) ){
if generics.is_parameterized() {
self.span_err(impl_span, "default trait implementations are not \
allowed to have generics");
if fields.is_empty() {
return Err(self.fatal(&format!("unit-like struct definition should be \
written as `struct {};`",
- token::get_ident(class_name.clone()))));
+ class_name)));
}
try!(self.bump());
if fields.is_empty() {
return Err(self.fatal(&format!("unit-like struct definition should be \
written as `struct {};`",
- token::get_ident(class_name.clone()))));
+ class_name)));
}
generics.where_clause = try!(self.parse_where_clause());
return Err(self.fatal(&format!("expected item, found `{}`", token_str)));
}
+ let hi = if self.span == codemap::DUMMY_SP {
+ inner_lo
+ } else {
+ self.span.lo
+ };
+
Ok(ast::Mod {
- inner: mk_sp(inner_lo, self.span.lo),
+ inner: mk_sp(inner_lo, hi),
items: items
})
}
fn push_mod_path(&mut self, id: Ident, attrs: &[Attribute]) {
let default_path = self.id_to_interned_str(id);
- let file_path = match ::attr::first_attr_value_str_by_name(attrs,
- "path") {
+ let file_path = match ::attr::first_attr_value_str_by_name(attrs, "path") {
Some(d) => d,
None => default_path,
};
self.mod_path_stack.pop().unwrap();
}
- /// Read a module from a source file.
- fn eval_src_mod(&mut self,
- id: ast::Ident,
- outer_attrs: &[ast::Attribute],
- id_sp: Span)
- -> PResult<(ast::Item_, Vec<ast::Attribute> )> {
+ pub fn submod_path_from_attr(attrs: &[ast::Attribute], dir_path: &Path) -> Option<PathBuf> {
+ ::attr::first_attr_value_str_by_name(attrs, "path").map(|d| dir_path.join(&*d))
+ }
+
+ /// Returns either a path to a module, or .
+ pub fn default_submod_path(id: ast::Ident, dir_path: &Path, codemap: &CodeMap) -> ModulePath
+ {
+ let mod_name = id.to_string();
+ let default_path_str = format!("{}.rs", mod_name);
+ let secondary_path_str = format!("{}/mod.rs", mod_name);
+ let default_path = dir_path.join(&default_path_str);
+ let secondary_path = dir_path.join(&secondary_path_str);
+ let default_exists = codemap.file_exists(&default_path);
+ let secondary_exists = codemap.file_exists(&secondary_path);
+
+ let result = match (default_exists, secondary_exists) {
+ (true, false) => Ok(ModulePathSuccess { path: default_path, owns_directory: false }),
+ (false, true) => Ok(ModulePathSuccess { path: secondary_path, owns_directory: true }),
+ (false, false) => Err(ModulePathError {
+ err_msg: format!("file not found for module `{}`", mod_name),
+ help_msg: format!("name the file either {} or {} inside the directory {:?}",
+ default_path_str,
+ secondary_path_str,
+ dir_path.display()),
+ }),
+ (true, true) => Err(ModulePathError {
+ err_msg: format!("file for module `{}` found at both {} and {}",
+ mod_name,
+ default_path_str,
+ secondary_path_str),
+ help_msg: "delete or rename one of them to remove the ambiguity".to_owned(),
+ }),
+ };
+
+ ModulePath {
+ name: mod_name,
+ path_exists: default_exists || secondary_exists,
+ result: result,
+ }
+ }
+
+ fn submod_path(&mut self,
+ id: ast::Ident,
+ outer_attrs: &[ast::Attribute],
+ id_sp: Span) -> PResult<ModulePathSuccess> {
let mut prefix = PathBuf::from(&self.sess.codemap().span_to_filename(self.span));
prefix.pop();
let mut dir_path = prefix;
for part in &self.mod_path_stack {
dir_path.push(&**part);
}
- let mod_string = token::get_ident(id);
- let (file_path, owns_directory) = match ::attr::first_attr_value_str_by_name(
- outer_attrs, "path") {
- Some(d) => (dir_path.join(&*d), true),
- None => {
- let mod_name = mod_string.to_string();
- let default_path_str = format!("{}.rs", mod_name);
- let secondary_path_str = format!("{}/mod.rs", mod_name);
- let default_path = dir_path.join(&default_path_str[..]);
- let secondary_path = dir_path.join(&secondary_path_str[..]);
- let default_exists = self.sess.codemap().file_exists(&default_path);
- let secondary_exists = self.sess.codemap().file_exists(&secondary_path);
-
- if !self.owns_directory {
- self.span_err(id_sp,
- "cannot declare a new module at this location");
- let this_module = match self.mod_path_stack.last() {
- Some(name) => name.to_string(),
- None => self.root_module_name.as_ref().unwrap().clone(),
- };
- self.span_note(id_sp,
- &format!("maybe move this module `{0}` \
- to its own directory via \
- `{0}/mod.rs`",
- this_module));
- if default_exists || secondary_exists {
- self.span_note(id_sp,
- &format!("... or maybe `use` the module \
- `{}` instead of possibly \
- redeclaring it",
- mod_name));
- }
- self.abort_if_errors();
- }
- match (default_exists, secondary_exists) {
- (true, false) => (default_path, false),
- (false, true) => (secondary_path, true),
- (false, false) => {
- return Err(self.span_fatal_help(id_sp,
- &format!("file not found for module `{}`",
- mod_name),
- &format!("name the file either {} or {} inside \
- the directory {:?}",
- default_path_str,
- secondary_path_str,
- dir_path.display())));
- }
- (true, true) => {
- return Err(self.span_fatal_help(
- id_sp,
- &format!("file for module `{}` found at both {} \
- and {}",
- mod_name,
- default_path_str,
- secondary_path_str),
- "delete or rename one of them to remove the ambiguity"));
- }
- }
+ if let Some(p) = Parser::submod_path_from_attr(outer_attrs, &dir_path) {
+ return Ok(ModulePathSuccess { path: p, owns_directory: true });
+ }
+
+ let paths = Parser::default_submod_path(id, &dir_path, self.sess.codemap());
+
+ if !self.owns_directory {
+ self.span_err(id_sp, "cannot declare a new module at this location");
+ let this_module = match self.mod_path_stack.last() {
+ Some(name) => name.to_string(),
+ None => self.root_module_name.as_ref().unwrap().clone(),
+ };
+ self.span_note(id_sp,
+ &format!("maybe move this module `{0}` to its own directory \
+ via `{0}/mod.rs`",
+ this_module));
+ if paths.path_exists {
+ self.span_note(id_sp,
+ &format!("... or maybe `use` the module `{}` instead \
+ of possibly redeclaring it",
+ paths.name));
}
- };
+ self.abort_if_errors();
+ }
+
+ match paths.result {
+ Ok(succ) => Ok(succ),
+ Err(err) => Err(self.span_fatal_help(id_sp, &err.err_msg, &err.help_msg)),
+ }
+ }
+
+ /// Read a module from a source file.
+ fn eval_src_mod(&mut self,
+ id: ast::Ident,
+ outer_attrs: &[ast::Attribute],
+ id_sp: Span)
+ -> PResult<(ast::Item_, Vec<ast::Attribute> )> {
+ let ModulePathSuccess { path, owns_directory } = try!(self.submod_path(id,
+ outer_attrs,
+ id_sp));
- self.eval_src_mod_from_path(file_path, owns_directory,
- mod_string.to_string(), id_sp)
+ self.eval_src_mod_from_path(path,
+ owns_directory,
+ id.to_string(),
+ id_sp)
}
fn eval_src_mod_from_path(&mut self,
included_mod_stack.push(path.clone());
drop(included_mod_stack);
- let mut p0 =
- new_sub_parser_from_file(self.sess,
- self.cfg.clone(),
- &path,
- owns_directory,
- Some(name),
- id_sp);
+ let mut p0 = new_sub_parser_from_file(self.sess,
+ self.cfg.clone(),
+ &path,
+ owns_directory,
+ Some(name),
+ id_sp);
let mod_inner_lo = p0.span.lo;
let mod_attrs = p0.parse_inner_attributes();
let m0 = try!(p0.parse_mod_items(&token::Eof, mod_inner_lo));
self.span_err(start_span,
&format!("unit-like struct variant should be written \
without braces, as `{},`",
- token::get_ident(ident)));
+ ident));
}
kind = StructVariantKind(struct_def);
} else if self.check(&token::OpenDelim(token::Paren)) {
let sp = self.span;
self.expect_no_suffix(sp, "ABI spec", suf);
try!(self.bump());
- let the_string = s.as_str();
- match abi::lookup(the_string) {
+ match abi::lookup(&s.as_str()) {
Some(abi) => Ok(Some(abi)),
None => {
let last_span = self.last_span;
self.span_err(
last_span,
- &format!("illegal ABI: expected one of [{}], \
+ &format!("invalid ABI: expected one of [{}], \
found `{}`",
- abi::all_names().connect(", "),
- the_string));
+ abi::all_names().join(", "),
+ s));
Ok(None)
}
}
string: string,
}
}
+
+ #[inline]
+ pub fn new_from_name(name: ast::Name) -> InternedString {
+ let interner = get_ident_interner();
+ InternedString::new_from_rc_str(interner.get(name))
+ }
}
impl Deref for InternedString {
}
}
-impl<'a> PartialEq<InternedString > for &'a str {
+impl<'a> PartialEq<InternedString> for &'a str {
#[inline(always)]
fn eq(&self, other: &InternedString) -> bool {
PartialEq::eq(*self, &other.string[..])
impl Decodable for InternedString {
fn decode<D: Decoder>(d: &mut D) -> Result<InternedString, D::Error> {
- Ok(get_name(get_ident_interner().intern(&try!(d.read_str())[..])))
+ Ok(intern(try!(d.read_str()).as_ref()).as_str())
}
}
}
}
-/// Returns the string contents of a name, using the thread-local interner.
-#[inline]
-pub fn get_name(name: ast::Name) -> InternedString {
- let interner = get_ident_interner();
- InternedString::new_from_rc_str(interner.get(name))
-}
-
-/// Returns the string contents of an identifier, using the thread-local
-/// interner.
-#[inline]
-pub fn get_ident(ident: ast::Ident) -> InternedString {
- get_name(ident.name)
-}
-
/// Interns and returns the string contents of an identifier, using the
/// thread-local interner.
#[inline]
pub fn intern_and_get_ident(s: &str) -> InternedString {
- get_name(intern(s))
+ intern(s).as_str()
}
/// Maps a string to its interned representation.
use std::io;
use std::string;
-use std::iter::repeat;
#[derive(Clone, Copy, PartialEq)]
pub enum Breaks {
// fall behind.
let n: usize = 3 * linewidth;
debug!("mk_printer {}", linewidth);
- let token: Vec<Token> = repeat(Token::Eof).take(n).collect();
- let size: Vec<isize> = repeat(0).take(n).collect();
- let scan_stack: Vec<usize> = repeat(0).take(n).collect();
+ let token = vec![Token::Eof; n];
+ let size = vec![0_isize; n];
+ let scan_stack = vec![0_usize; n];
Printer {
out: out,
buf_len: n,
// of the feature gate, so we fake them up here.
let no_std_meta = attr::mk_word_item(InternedString::new("no_std"));
+ let prelude_import_meta = attr::mk_word_item(InternedString::new("prelude_import"));
// #![feature(no_std)]
let fake_attr = attr::mk_attr_inner(attr::mk_attr_id(),
attr::mk_list_item(InternedString::new("feature"),
- vec![no_std_meta.clone()]));
+ vec![no_std_meta.clone(),
+ prelude_import_meta]));
try!(s.print_attribute(&fake_attr));
// #![no_std]
/* Literals */
token::Literal(lit, suf) => {
let mut out = match lit {
- token::Byte(b) => format!("b'{}'", b.as_str()),
- token::Char(c) => format!("'{}'", c.as_str()),
- token::Float(c) => c.as_str().to_string(),
- token::Integer(c) => c.as_str().to_string(),
- token::Str_(s) => format!("\"{}\"", s.as_str()),
+ token::Byte(b) => format!("b'{}'", b),
+ token::Char(c) => format!("'{}'", c),
+ token::Float(c) => c.to_string(),
+ token::Integer(c) => c.to_string(),
+ token::Str_(s) => format!("\"{}\"", s),
token::StrRaw(s, n) => format!("r{delim}\"{string}\"{delim}",
delim=repeat("#", n),
- string=s.as_str()),
- token::Binary(v) => format!("b\"{}\"", v.as_str()),
+ string=s),
+ token::Binary(v) => format!("b\"{}\"", v),
token::BinaryRaw(s, n) => format!("br{delim}\"{string}\"{delim}",
delim=repeat("#", n),
- string=s.as_str()),
+ string=s),
};
if let Some(s) = suf {
- out.push_str(s.as_str())
+ out.push_str(&s.as_str())
}
out
}
/* Name components */
- token::Ident(s, _) => token::get_ident(s).to_string(),
- token::Lifetime(s) => format!("{}", token::get_ident(s)),
+ token::Ident(s, _) => s.to_string(),
+ token::Lifetime(s) => s.to_string(),
token::Underscore => "_".to_string(),
/* Other */
- token::DocComment(s) => s.as_str().to_string(),
+ token::DocComment(s) => s.to_string(),
token::SubstNt(s, _) => format!("${}", s),
token::MatchNt(s, t, _, _) => format!("${}:{}", s, t),
token::Eof => "<eof>".to_string(),
token::Whitespace => " ".to_string(),
token::Comment => "/* */".to_string(),
- token::Shebang(s) => format!("/* shebang: {}*/", s.as_str()),
+ token::Shebang(s) => format!("/* shebang: {}*/", s),
token::SpecialVarNt(var) => format!("${}", var.as_str()),
try!(self.head(&visibility_qualified(item.vis,
"extern crate")));
if let Some(p) = *optional_path {
- let val = token::get_name(p);
+ let val = p.as_str();
if val.contains("-") {
try!(self.print_string(&val, ast::CookedStr));
} else {
attrs: &[ast::Attribute],
close_box: bool) -> io::Result<()> {
match blk.rules {
- ast::UnsafeBlock(..) => try!(self.word_space("unsafe")),
- ast::DefaultBlock => ()
+ ast::UnsafeBlock(..) | ast::PushUnsafeBlock(..) => try!(self.word_space("unsafe")),
+ ast::DefaultBlock | ast::PopUnsafeBlock(..) => ()
}
try!(self.maybe_print_comment(blk.span.lo));
try!(self.ann.pre(self, NodeBlock(blk)));
}
pub fn print_ident(&mut self, ident: ast::Ident) -> io::Result<()> {
- try!(word(&mut self.s, &token::get_ident(ident)));
+ try!(word(&mut self.s, &ident.name.as_str()));
self.ann.post(self, NodeIdent(&ident))
}
}
pub fn print_name(&mut self, name: ast::Name) -> io::Result<()> {
- try!(word(&mut self.s, &token::get_name(name)));
+ try!(word(&mut self.s, &name.as_str()));
self.ann.post(self, NodeName(&name))
}
use ast;
use attr;
-use codemap::DUMMY_SP;
+use codemap::{DUMMY_SP, Span, ExpnInfo, NameAndSpan, MacroAttribute};
use codemap;
use fold::Folder;
use fold;
use parse::token::InternedString;
use parse::token::special_idents;
-use parse::token;
+use parse::{token, ParseSess};
use ptr::P;
use util::small_vector::SmallVector;
+/// Craft a span that will be ignored by the stability lint's
+/// call to codemap's is_internal check.
+/// The expanded code uses the unstable `#[prelude_import]` attribute.
+fn ignored_span(sess: &ParseSess, sp: Span) -> Span {
+ let info = ExpnInfo {
+ call_site: DUMMY_SP,
+ callee: NameAndSpan {
+ name: "std_inject".to_string(),
+ format: MacroAttribute,
+ span: None,
+ allow_internal_unstable: true,
+ }
+ };
+ let expn_id = sess.codemap().record_expansion(info);
+ let mut sp = sp;
+ sp.expn_id = expn_id;
+ return sp;
+}
+
pub fn maybe_inject_crates_ref(krate: ast::Crate, alt_std_name: Option<String>)
-> ast::Crate {
if use_std(&krate) {
}
}
-pub fn maybe_inject_prelude(krate: ast::Crate) -> ast::Crate {
+pub fn maybe_inject_prelude(sess: &ParseSess, krate: ast::Crate) -> ast::Crate {
if use_std(&krate) {
- inject_prelude(krate)
+ let mut fold = PreludeInjector {
+ span: ignored_span(sess, DUMMY_SP)
+ };
+ fold.fold_crate(krate)
} else {
krate
}
fold.fold_crate(krate)
}
-struct PreludeInjector;
-
+struct PreludeInjector {
+ span: Span
+}
impl fold::Folder for PreludeInjector {
fn fold_crate(&mut self, mut krate: ast::Crate) -> ast::Crate {
fn fold_mod(&mut self, mut mod_: ast::Mod) -> ast::Mod {
let prelude_path = ast::Path {
- span: DUMMY_SP,
+ span: self.span,
global: false,
segments: vec![
ast::PathSegment {
ident: special_idents::invalid,
node: ast::ItemUse(vp),
attrs: vec![ast::Attribute {
- span: DUMMY_SP,
+ span: self.span,
node: ast::Attribute_ {
id: attr::mk_attr_id(),
style: ast::AttrOuter,
value: P(ast::MetaItem {
- span: DUMMY_SP,
- node: ast::MetaWord(token::get_name(
- special_idents::prelude_import.name)),
+ span: self.span,
+ node: ast::MetaWord(special_idents::prelude_import.name.as_str()),
}),
is_sugared_doc: false,
},
}],
vis: ast::Inherited,
- span: DUMMY_SP,
+ span: self.span,
}));
fold::noop_fold_mod(mod_, self)
}
}
-
-fn inject_prelude(krate: ast::Crate) -> ast::Crate {
- let mut fold = PreludeInjector;
- fold.fold_crate(krate)
-}
walk_struct_def(self, s)
}
fn visit_struct_field(&mut self, s: &'v StructField) { walk_struct_field(self, s) }
+ fn visit_enum_def(&mut self, enum_definition: &'v EnumDef,
+ generics: &'v Generics) {
+ walk_enum_def(self, enum_definition, generics)
+ }
+
fn visit_variant(&mut self, v: &'v Variant, g: &'v Generics) { walk_variant(self, v, g) }
/// Visits an optional reference to a lifetime. The `span` is the span of some surrounding
}
ItemEnum(ref enum_definition, ref type_parameters) => {
visitor.visit_generics(type_parameters);
- walk_enum_def(visitor, enum_definition, type_parameters)
+ visitor.visit_enum_def(enum_definition, type_parameters)
}
ItemDefaultImpl(_, ref trait_ref) => {
visitor.visit_trait_ref(trait_ref)
#![deny(missing_docs)]
#![feature(box_syntax)]
-#![feature(owned_ascii_ext)]
#![feature(path_ext)]
#![feature(rustc_private)]
#![feature(staged_api)]
/// Terminal color definitions
+#[allow(missing_docs)]
pub mod color {
/// Number for a terminal color
pub type Color = u16;
use self::States::*;
use self::FormatState::*;
use self::FormatOp::*;
-use std::ascii::OwnedAsciiExt;
+use std::ascii::AsciiExt;
use std::mem::replace;
use std::iter::repeat;
}
}
FormatHEX => {
- s = s.into_ascii_uppercase();
+ s = s.to_ascii_uppercase();
if flags.alternate {
let s_ = replace(&mut s, vec!(b'0', b'X'));
s.extend(s_);
// These are the orders ncurses uses in its compiled format (as of 5.9). Not sure if portable.
+#[allow(missing_docs)]
pub static boolfnames: &'static[&'static str] = &["auto_left_margin", "auto_right_margin",
"no_esc_ctlc", "ceol_standout_glitch", "eat_newline_glitch", "erase_overstrike", "generic_type",
"hard_copy", "has_meta_key", "has_status_line", "insert_null_glitch", "memory_above",
"no_correctly_working_cr", "gnu_has_meta_key", "linefeed_is_newline", "has_hardware_tabs",
"return_does_clr_eol"];
+#[allow(missing_docs)]
pub static boolnames: &'static[&'static str] = &["bw", "am", "xsb", "xhp", "xenl", "eo",
"gn", "hc", "km", "hs", "in", "db", "da", "mir", "msgr", "os", "eslok", "xt", "hz", "ul", "xon",
"nxon", "mc5i", "chts", "nrrmc", "npc", "ndscr", "ccc", "bce", "hls", "xhpa", "crxm", "daisy",
"xvpa", "sam", "cpix", "lpix", "OTbs", "OTns", "OTnc", "OTMT", "OTNL", "OTpt", "OTxr"];
+#[allow(missing_docs)]
pub static numfnames: &'static[&'static str] = &[ "columns", "init_tabs", "lines",
"lines_of_memory", "magic_cookie_glitch", "padding_baud_rate", "virtual_terminal",
"width_status_line", "num_labels", "label_height", "label_width", "max_attributes",
"bit_image_entwining", "bit_image_type", "magic_cookie_glitch_ul", "carriage_return_delay",
"new_line_delay", "backspace_delay", "horizontal_tab_delay", "number_of_function_keys"];
+#[allow(missing_docs)]
pub static numnames: &'static[&'static str] = &[ "cols", "it", "lines", "lm", "xmc", "pb",
"vt", "wsl", "nlab", "lh", "lw", "ma", "wnum", "colors", "pairs", "ncv", "bufsz", "spinv",
"spinh", "maddr", "mjump", "mcs", "mls", "npins", "orc", "orl", "orhi", "orvi", "cps", "widcs",
"btns", "bitwin", "bitype", "UTug", "OTdC", "OTdN", "OTdB", "OTdT", "OTkn"];
+#[allow(missing_docs)]
pub static stringfnames: &'static[&'static str] = &[ "back_tab", "bell", "carriage_return",
"change_scroll_region", "clear_all_tabs", "clear_screen", "clr_eol", "clr_eos",
"column_address", "command_character", "cursor_address", "cursor_down", "cursor_home",
"acs_lrcorner", "acs_ltee", "acs_rtee", "acs_btee", "acs_ttee", "acs_hline", "acs_vline",
"acs_plus", "memory_lock", "memory_unlock", "box_chars_1"];
+#[allow(missing_docs)]
pub static stringnames: &'static[&'static str] = &[ "cbt", "_", "cr", "csr", "tbc", "clear",
"_", "_", "hpa", "cmdch", "cup", "cud1", "home", "civis", "cub1", "mrcup", "cnorm", "cuf1",
"ll", "cuu1", "cvvis", "dch1", "dl1", "dsl", "hd", "smacs", "blink", "bold", "smcup", "smdc",
#![feature(asm)]
#![feature(box_syntax)]
-#![feature(duration)]
#![feature(duration_span)]
#![feature(fnbox)]
#![feature(iter_cmp)]
#![feature(libc)]
-#![feature(rt)]
#![feature(rustc_private)]
#![feature(set_stdio)]
-#![feature(slice_extras)]
#![feature(staged_api)]
extern crate getopts;
// Parses command line arguments into test options
pub fn parse_opts(args: &[String]) -> Option<OptRes> {
- let args_ = args.tail();
+ let args_ = &args[1..];
let matches =
match getopts::getopts(args_, &optgroups()) {
Ok(m) => m,
#[allow(deprecated)]
fn get_concurrency() -> usize {
- match env::var("RUST_TEST_THREADS") {
+ return match env::var("RUST_TEST_THREADS") {
Ok(s) => {
let opt_n: Option<usize> = s.parse().ok();
match opt_n {
_ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s)
}
}
- Err(..) => {
- if std::rt::util::limit_thread_creation_due_to_osx_and_valgrind() {
- 1
- } else {
- extern { fn rust_get_num_cpus() -> libc::uintptr_t; }
- unsafe { rust_get_num_cpus() as usize }
- }
+ Err(..) => num_cpus(),
+ };
+
+ #[cfg(windows)]
+ fn num_cpus() -> usize {
+ unsafe {
+ let mut sysinfo = std::mem::zeroed();
+ libc::GetSystemInfo(&mut sysinfo);
+ sysinfo.dwNumberOfProcessors as usize
}
}
+
+ #[cfg(unix)]
+ fn num_cpus() -> usize {
+ extern { fn rust_get_num_cpus() -> libc::uintptr_t; }
+ unsafe { rust_get_num_cpus() as usize }
+ }
}
pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
.map(|(k,v)| format!("{}: {} (+/- {})", *k,
v.value, v.noise))
.collect();
- v.connect(", ")
+ v.join(", ")
}
}
}
pub fn ns_elapsed(&mut self) -> u64 {
- self.dur.secs() * 1_000_000_000 + (self.dur.extra_nanos() as u64)
+ self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
}
pub fn ns_per_iter(&mut self) -> u64 {
#endif
#if !defined(__APPLE__)
-.type MORESTACK,%function
+func MORESTACK
#endif
// FIXME(AARCH64): this might not be perfectly right but works for now
bl STACK_EXHAUSTED
// the above function ensures that it never returns
.cfi_endproc
+
+#if !defined(__APPLE__)
+endfunc MORESTACK
+#endif
+++ /dev/null
-// ISO C9x compliant inttypes.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
-//
-// Copyright (c) 2006 Alexander Chemeris
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. The name of the author may be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-///////////////////////////////////////////////////////////////////////////////
-
-#ifndef _MSC_VER // [
-#error "Use this header only with Microsoft Visual C++ compilers!"
-#endif // _MSC_VER ]
-
-#ifndef _MSC_INTTYPES_H_ // [
-#define _MSC_INTTYPES_H_
-
-#if _MSC_VER > 1000
-#pragma once
-#endif
-
-#include "stdint.h"
-
-// 7.8 Format conversion of integer types
-
-typedef struct {
- intmax_t quot;
- intmax_t rem;
-} imaxdiv_t;
-
-// 7.8.1 Macros for format specifiers
-
-#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
-
-// The fprintf macros for signed integers are:
-#define PRId8 "d"
-#define PRIi8 "i"
-#define PRIdLEAST8 "d"
-#define PRIiLEAST8 "i"
-#define PRIdFAST8 "d"
-#define PRIiFAST8 "i"
-
-#define PRId16 "hd"
-#define PRIi16 "hi"
-#define PRIdLEAST16 "hd"
-#define PRIiLEAST16 "hi"
-#define PRIdFAST16 "hd"
-#define PRIiFAST16 "hi"
-
-#define PRId32 "I32d"
-#define PRIi32 "I32i"
-#define PRIdLEAST32 "I32d"
-#define PRIiLEAST32 "I32i"
-#define PRIdFAST32 "I32d"
-#define PRIiFAST32 "I32i"
-
-#define PRId64 "I64d"
-#define PRIi64 "I64i"
-#define PRIdLEAST64 "I64d"
-#define PRIiLEAST64 "I64i"
-#define PRIdFAST64 "I64d"
-#define PRIiFAST64 "I64i"
-
-#define PRIdMAX "I64d"
-#define PRIiMAX "I64i"
-
-#define PRIdPTR "Id"
-#define PRIiPTR "Ii"
-
-// The fprintf macros for unsigned integers are:
-#define PRIo8 "o"
-#define PRIu8 "u"
-#define PRIx8 "x"
-#define PRIX8 "X"
-#define PRIoLEAST8 "o"
-#define PRIuLEAST8 "u"
-#define PRIxLEAST8 "x"
-#define PRIXLEAST8 "X"
-#define PRIoFAST8 "o"
-#define PRIuFAST8 "u"
-#define PRIxFAST8 "x"
-#define PRIXFAST8 "X"
-
-#define PRIo16 "ho"
-#define PRIu16 "hu"
-#define PRIx16 "hx"
-#define PRIX16 "hX"
-#define PRIoLEAST16 "ho"
-#define PRIuLEAST16 "hu"
-#define PRIxLEAST16 "hx"
-#define PRIXLEAST16 "hX"
-#define PRIoFAST16 "ho"
-#define PRIuFAST16 "hu"
-#define PRIxFAST16 "hx"
-#define PRIXFAST16 "hX"
-
-#define PRIo32 "I32o"
-#define PRIu32 "I32u"
-#define PRIx32 "I32x"
-#define PRIX32 "I32X"
-#define PRIoLEAST32 "I32o"
-#define PRIuLEAST32 "I32u"
-#define PRIxLEAST32 "I32x"
-#define PRIXLEAST32 "I32X"
-#define PRIoFAST32 "I32o"
-#define PRIuFAST32 "I32u"
-#define PRIxFAST32 "I32x"
-#define PRIXFAST32 "I32X"
-
-#define PRIo64 "I64o"
-#define PRIu64 "I64u"
-#define PRIx64 "I64x"
-#define PRIX64 "I64X"
-#define PRIoLEAST64 "I64o"
-#define PRIuLEAST64 "I64u"
-#define PRIxLEAST64 "I64x"
-#define PRIXLEAST64 "I64X"
-#define PRIoFAST64 "I64o"
-#define PRIuFAST64 "I64u"
-#define PRIxFAST64 "I64x"
-#define PRIXFAST64 "I64X"
-
-#define PRIoMAX "I64o"
-#define PRIuMAX "I64u"
-#define PRIxMAX "I64x"
-#define PRIXMAX "I64X"
-
-#define PRIoPTR "Io"
-#define PRIuPTR "Iu"
-#define PRIxPTR "Ix"
-#define PRIXPTR "IX"
-
-// The fscanf macros for signed integers are:
-#define SCNd8 "d"
-#define SCNi8 "i"
-#define SCNdLEAST8 "d"
-#define SCNiLEAST8 "i"
-#define SCNdFAST8 "d"
-#define SCNiFAST8 "i"
-
-#define SCNd16 "hd"
-#define SCNi16 "hi"
-#define SCNdLEAST16 "hd"
-#define SCNiLEAST16 "hi"
-#define SCNdFAST16 "hd"
-#define SCNiFAST16 "hi"
-
-#define SCNd32 "ld"
-#define SCNi32 "li"
-#define SCNdLEAST32 "ld"
-#define SCNiLEAST32 "li"
-#define SCNdFAST32 "ld"
-#define SCNiFAST32 "li"
-
-#define SCNd64 "I64d"
-#define SCNi64 "I64i"
-#define SCNdLEAST64 "I64d"
-#define SCNiLEAST64 "I64i"
-#define SCNdFAST64 "I64d"
-#define SCNiFAST64 "I64i"
-
-#define SCNdMAX "I64d"
-#define SCNiMAX "I64i"
-
-#ifdef _WIN64 // [
-# define SCNdPTR "I64d"
-# define SCNiPTR "I64i"
-#else // _WIN64 ][
-# define SCNdPTR "ld"
-# define SCNiPTR "li"
-#endif // _WIN64 ]
-
-// The fscanf macros for unsigned integers are:
-#define SCNo8 "o"
-#define SCNu8 "u"
-#define SCNx8 "x"
-#define SCNX8 "X"
-#define SCNoLEAST8 "o"
-#define SCNuLEAST8 "u"
-#define SCNxLEAST8 "x"
-#define SCNXLEAST8 "X"
-#define SCNoFAST8 "o"
-#define SCNuFAST8 "u"
-#define SCNxFAST8 "x"
-#define SCNXFAST8 "X"
-
-#define SCNo16 "ho"
-#define SCNu16 "hu"
-#define SCNx16 "hx"
-#define SCNX16 "hX"
-#define SCNoLEAST16 "ho"
-#define SCNuLEAST16 "hu"
-#define SCNxLEAST16 "hx"
-#define SCNXLEAST16 "hX"
-#define SCNoFAST16 "ho"
-#define SCNuFAST16 "hu"
-#define SCNxFAST16 "hx"
-#define SCNXFAST16 "hX"
-
-#define SCNo32 "lo"
-#define SCNu32 "lu"
-#define SCNx32 "lx"
-#define SCNX32 "lX"
-#define SCNoLEAST32 "lo"
-#define SCNuLEAST32 "lu"
-#define SCNxLEAST32 "lx"
-#define SCNXLEAST32 "lX"
-#define SCNoFAST32 "lo"
-#define SCNuFAST32 "lu"
-#define SCNxFAST32 "lx"
-#define SCNXFAST32 "lX"
-
-#define SCNo64 "I64o"
-#define SCNu64 "I64u"
-#define SCNx64 "I64x"
-#define SCNX64 "I64X"
-#define SCNoLEAST64 "I64o"
-#define SCNuLEAST64 "I64u"
-#define SCNxLEAST64 "I64x"
-#define SCNXLEAST64 "I64X"
-#define SCNoFAST64 "I64o"
-#define SCNuFAST64 "I64u"
-#define SCNxFAST64 "I64x"
-#define SCNXFAST64 "I64X"
-
-#define SCNoMAX "I64o"
-#define SCNuMAX "I64u"
-#define SCNxMAX "I64x"
-#define SCNXMAX "I64X"
-
-#ifdef _WIN64 // [
-# define SCNoPTR "I64o"
-# define SCNuPTR "I64u"
-# define SCNxPTR "I64x"
-# define SCNXPTR "I64X"
-#else // _WIN64 ][
-# define SCNoPTR "lo"
-# define SCNuPTR "lu"
-# define SCNxPTR "lx"
-# define SCNXPTR "lX"
-#endif // _WIN64 ]
-
-#endif // __STDC_FORMAT_MACROS ]
-
-// 7.8.2 Functions for greatest-width integer types
-
-// 7.8.2.1 The imaxabs function
-#define imaxabs _abs64
-
-// 7.8.2.2 The imaxdiv function
-
-// This is modified version of div() function from Microsoft's div.c found
-// in %MSVC.NET%\crt\src\div.c
-#ifdef STATIC_IMAXDIV // [
-static
-#else // STATIC_IMAXDIV ][
-_inline
-#endif // STATIC_IMAXDIV ]
-imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
-{
- imaxdiv_t result;
-
- result.quot = numer / denom;
- result.rem = numer % denom;
-
- if (numer < 0 && result.rem > 0) {
- // did division wrong; must fix up
- ++result.quot;
- result.rem -= denom;
- }
-
- return result;
-}
-
-// 7.8.2.3 The strtoimax and strtoumax functions
-#define strtoimax _strtoi64
-#define strtoumax _strtoui64
-
-// 7.8.2.4 The wcstoimax and wcstoumax functions
-#define wcstoimax _wcstoi64
-#define wcstoumax _wcstoui64
-
-
-#endif // _MSC_INTTYPES_H_ ]
+++ /dev/null
-// ISO C9x compliant stdint.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
-//
-// Copyright (c) 2006-2008 Alexander Chemeris
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. The name of the author may be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-///////////////////////////////////////////////////////////////////////////////
-
-#ifndef _MSC_VER // [
-#error "Use this header only with Microsoft Visual C++ compilers!"
-#endif // _MSC_VER ]
-
-#ifndef _MSC_STDINT_H_ // [
-#define _MSC_STDINT_H_
-
-#if _MSC_VER > 1000
-#pragma once
-#endif
-
-#include <limits.h>
-
-// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
-// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
-// or compiler give many errors like this:
-// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
-#ifdef __cplusplus
-extern "C" {
-#endif
-# include <wchar.h>
-#ifdef __cplusplus
-}
-#endif
-
-// Define _W64 macros to mark types changing their size, like intptr_t.
-#ifndef _W64
-# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
-# define _W64 __w64
-# else
-# define _W64
-# endif
-#endif
-
-
-// 7.18.1 Integer types
-
-// 7.18.1.1 Exact-width integer types
-
-// Visual Studio 6 and Embedded Visual C++ 4 doesn't
-// realize that, e.g. char has the same size as __int8
-// so we give up on __intX for them.
-#if (_MSC_VER < 1300)
- typedef signed char int8_t;
- typedef signed short int16_t;
- typedef signed int int32_t;
- typedef unsigned char uint8_t;
- typedef unsigned short uint16_t;
- typedef unsigned int uint32_t;
-#else
- typedef signed __int8 int8_t;
- typedef signed __int16 int16_t;
- typedef signed __int32 int32_t;
- typedef unsigned __int8 uint8_t;
- typedef unsigned __int16 uint16_t;
- typedef unsigned __int32 uint32_t;
-#endif
-typedef signed __int64 int64_t;
-typedef unsigned __int64 uint64_t;
-
-
-// 7.18.1.2 Minimum-width integer types
-typedef int8_t int_least8_t;
-typedef int16_t int_least16_t;
-typedef int32_t int_least32_t;
-typedef int64_t int_least64_t;
-typedef uint8_t uint_least8_t;
-typedef uint16_t uint_least16_t;
-typedef uint32_t uint_least32_t;
-typedef uint64_t uint_least64_t;
-
-// 7.18.1.3 Fastest minimum-width integer types
-typedef int8_t int_fast8_t;
-typedef int16_t int_fast16_t;
-typedef int32_t int_fast32_t;
-typedef int64_t int_fast64_t;
-typedef uint8_t uint_fast8_t;
-typedef uint16_t uint_fast16_t;
-typedef uint32_t uint_fast32_t;
-typedef uint64_t uint_fast64_t;
-
-// 7.18.1.4 Integer types capable of holding object pointers
-#ifdef _WIN64 // [
- typedef signed __int64 intptr_t;
- typedef unsigned __int64 uintptr_t;
-#else // _WIN64 ][
- typedef _W64 signed int intptr_t;
- typedef _W64 unsigned int uintptr_t;
-#endif // _WIN64 ]
-
-// 7.18.1.5 Greatest-width integer types
-typedef int64_t intmax_t;
-typedef uint64_t uintmax_t;
-
-
-// 7.18.2 Limits of specified-width integer types
-
-#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
-
-// 7.18.2.1 Limits of exact-width integer types
-#define INT8_MIN ((int8_t)_I8_MIN)
-#define INT8_MAX _I8_MAX
-#define INT16_MIN ((int16_t)_I16_MIN)
-#define INT16_MAX _I16_MAX
-#define INT32_MIN ((int32_t)_I32_MIN)
-#define INT32_MAX _I32_MAX
-#define INT64_MIN ((int64_t)_I64_MIN)
-#define INT64_MAX _I64_MAX
-#define UINT8_MAX _UI8_MAX
-#define UINT16_MAX _UI16_MAX
-#define UINT32_MAX _UI32_MAX
-#define UINT64_MAX _UI64_MAX
-
-// 7.18.2.2 Limits of minimum-width integer types
-#define INT_LEAST8_MIN INT8_MIN
-#define INT_LEAST8_MAX INT8_MAX
-#define INT_LEAST16_MIN INT16_MIN
-#define INT_LEAST16_MAX INT16_MAX
-#define INT_LEAST32_MIN INT32_MIN
-#define INT_LEAST32_MAX INT32_MAX
-#define INT_LEAST64_MIN INT64_MIN
-#define INT_LEAST64_MAX INT64_MAX
-#define UINT_LEAST8_MAX UINT8_MAX
-#define UINT_LEAST16_MAX UINT16_MAX
-#define UINT_LEAST32_MAX UINT32_MAX
-#define UINT_LEAST64_MAX UINT64_MAX
-
-// 7.18.2.3 Limits of fastest minimum-width integer types
-#define INT_FAST8_MIN INT8_MIN
-#define INT_FAST8_MAX INT8_MAX
-#define INT_FAST16_MIN INT16_MIN
-#define INT_FAST16_MAX INT16_MAX
-#define INT_FAST32_MIN INT32_MIN
-#define INT_FAST32_MAX INT32_MAX
-#define INT_FAST64_MIN INT64_MIN
-#define INT_FAST64_MAX INT64_MAX
-#define UINT_FAST8_MAX UINT8_MAX
-#define UINT_FAST16_MAX UINT16_MAX
-#define UINT_FAST32_MAX UINT32_MAX
-#define UINT_FAST64_MAX UINT64_MAX
-
-// 7.18.2.4 Limits of integer types capable of holding object pointers
-#ifdef _WIN64 // [
-# define INTPTR_MIN INT64_MIN
-# define INTPTR_MAX INT64_MAX
-# define UINTPTR_MAX UINT64_MAX
-#else // _WIN64 ][
-# define INTPTR_MIN INT32_MIN
-# define INTPTR_MAX INT32_MAX
-# define UINTPTR_MAX UINT32_MAX
-#endif // _WIN64 ]
-
-// 7.18.2.5 Limits of greatest-width integer types
-#define INTMAX_MIN INT64_MIN
-#define INTMAX_MAX INT64_MAX
-#define UINTMAX_MAX UINT64_MAX
-
-// 7.18.3 Limits of other integer types
-
-#ifdef _WIN64 // [
-# define PTRDIFF_MIN _I64_MIN
-# define PTRDIFF_MAX _I64_MAX
-#else // _WIN64 ][
-# define PTRDIFF_MIN _I32_MIN
-# define PTRDIFF_MAX _I32_MAX
-#endif // _WIN64 ]
-
-#define SIG_ATOMIC_MIN INT_MIN
-#define SIG_ATOMIC_MAX INT_MAX
-
-#ifndef SIZE_MAX // [
-# ifdef _WIN64 // [
-# define SIZE_MAX _UI64_MAX
-# else // _WIN64 ][
-# define SIZE_MAX _UI32_MAX
-# endif // _WIN64 ]
-#endif // SIZE_MAX ]
-
-// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
-#ifndef WCHAR_MIN // [
-# define WCHAR_MIN 0
-#endif // WCHAR_MIN ]
-#ifndef WCHAR_MAX // [
-# define WCHAR_MAX _UI16_MAX
-#endif // WCHAR_MAX ]
-
-#define WINT_MIN 0
-#define WINT_MAX _UI16_MAX
-
-#endif // __STDC_LIMIT_MACROS ]
-
-
-// 7.18.4 Limits of other integer types
-
-#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
-
-// 7.18.4.1 Macros for minimum-width integer constants
-
-#define INT8_C(val) val##i8
-#define INT16_C(val) val##i16
-#define INT32_C(val) val##i32
-#define INT64_C(val) val##i64
-
-#define UINT8_C(val) val##ui8
-#define UINT16_C(val) val##ui16
-#define UINT32_C(val) val##ui32
-#define UINT64_C(val) val##ui64
-
-// 7.18.4.2 Macros for greatest-width integer constants
-#define INTMAX_C INT64_C
-#define UINTMAX_C UINT64_C
-
-#endif // __STDC_CONSTANT_MACROS ]
-
-
-#endif // _MSC_STDINT_H_ ]
+++ /dev/null
-// This piece of magic brought to you by:
-// http://www.nedproductions.biz/blog/
-// implementing-typeof-in-microsofts-c-compiler
-
-#ifndef MSVC_TYPEOF_H
-#define MSVC_TYPEOF_H
-
-#if defined(_MSC_VER) && _MSC_VER>=1400
-namespace msvc_typeof_impl {
- /* This is a fusion of Igor Chesnokov's method (http://rsdn.ru/forum/src/1094305.aspx)
- and Steven Watanabe's method (http://lists.boost.org/Archives/boost/2006/12/115006.php)
-
- How it works:
- C++ allows template type inference for templated function parameters but nothing else.
- What we do is to pass the expression sent to typeof() into the templated function vartypeID()
- as its parameter, thus extracting its type. The big problem traditionally now is how to get
- that type out of the vartypeID() instance, and here's how we do it:
- 1. unique_type_id() returns a monotonically increasing integer for every unique type
- passed to it during this compilation unit. It also specialises an instance of
- msvc_extract_type<unique_type_id, type>::id2type_impl<true>.
- 2. vartypeID() returns a sized<unique_type_id> for the type where
- sizeof(sized<unique_type_id>)==unique_type_id. We vector through sized as a means
- of returning the unique_type_id at compile time rather than runtime.
- 3. msvc_extract_type<unique_type_id> then extracts the type by using a bug in MSVC to
- reselect the specialised child type (id2type_impl<true>) from within the specialisation
- of itself originally performed by the above instance of unique_type_id. This bug works
- because when MSVC calculated the signature of the specialised
- msvc_extract_type<unique_type_id, type>::id2type_impl<true>, it does not include the
- value of type in the signature of id2type_impl<true>. Therefore when we reselect
- msvc_extract_type<unique_type_id>::id2type_impl<true> it erroneously returns the one
- already in its list of instantiated types rather than correctly generating a newly
- specialised msvc_extract_type<unique_type_id, msvc_extract_type_default_param>::id2type_impl<true>
-
- This bug allows the impossible and gives us a working typeof() in MSVC. Hopefully Microsoft
- won't fix this bug until they implement a native typeof.
- */
-
- struct msvc_extract_type_default_param {};
- template<int ID, typename T = msvc_extract_type_default_param> struct msvc_extract_type;
-
- template<int ID> struct msvc_extract_type<ID, msvc_extract_type_default_param>
- {
- template<bool> struct id2type_impl;
-
- typedef id2type_impl<true> id2type;
- };
-
- template<int ID, typename T> struct msvc_extract_type : msvc_extract_type<ID, msvc_extract_type_default_param>
- {
- template<> struct id2type_impl<true> //VC8.0 specific bugfeature
- {
- typedef T type;
- };
- template<bool> struct id2type_impl;
-
- typedef id2type_impl<true> id2type;
- };
-
-
- template<int N> class CCounter;
-
- // TUnused is required to force compiler to recompile CCountOf class
- template<typename TUnused, int NTested = 0> struct CCountOf
- {
- enum
- {
- __if_exists(CCounter<NTested>) { count = CCountOf<TUnused, NTested + 1>::count }
- __if_not_exists(CCounter<NTested>) { count = NTested }
- };
- };
-
- template<class TTypeReg, class TUnused, int NValue> struct CProvideCounterValue { enum { value = NValue }; };
-
- // type_id
- #define unique_type_id(type) \
- (CProvideCounterValue< \
- /*register TYPE--ID*/ typename msvc_extract_type<CCountOf<type >::count, type>::id2type, \
- /*increment compile-time Counter*/ CCounter<CCountOf<type >::count>, \
- /*pass value of Counter*/CCountOf<type >::count \
- >::value)
-
- // Lets type_id() be > than 0
- class __Increment_type_id { enum { value = unique_type_id(__Increment_type_id) }; };
-
- // vartypeID() returns a type with sizeof(type_id)
- template<int NSize> class sized { char m_pad[NSize]; };
- template<typename T> typename sized<unique_type_id(T)> vartypeID(T&);
- template<typename T> typename sized<unique_type_id(const T)> vartypeID(const T&);
- template<typename T> typename sized<unique_type_id(volatile T)> vartypeID(volatile T&);
- template<typename T> typename sized<unique_type_id(const volatile T)> vartypeID(const volatile T&);
-}
-
-#define typeof(expression) msvc_typeof_impl::msvc_extract_type<sizeof(msvc_typeof_impl::vartypeID(expression))>::id2type::type
-#endif
-
-#endif
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#if !defined(_WIN32)
+
#include <stdint.h>
#include <time.h>
#include <string.h>
#include <stdlib.h>
-#if !defined(_WIN32)
#include <dirent.h>
#include <pthread.h>
#include <signal.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
-#else
-#include <windows.h>
-#include <wincrypt.h>
-#include <stdio.h>
-#include <tchar.h>
-#endif
#ifdef __APPLE__
#include <TargetConditionals.h>
#endif
#endif
-/* Foreign builtins. */
-//include valgrind.h after stdint.h so that uintptr_t is defined for msys2 w64
-#ifndef _WIN32
-#include "valgrind/valgrind.h"
-#endif
-
-#if defined(_MSC_VER)
-# define RUST_BUILTIN_API __declspec(dllexport)
-#else
-# define RUST_BUILTIN_API
-#endif
-
-#ifndef _WIN32
char*
rust_list_dir_val(struct dirent* entry_ptr) {
return entry_ptr->d_name;
rust_dirent_t_size() {
return sizeof(struct dirent);
}
-#endif
-#if defined(_WIN32)
-int
-get_num_cpus() {
- SYSTEM_INFO sysinfo;
- GetSystemInfo(&sysinfo);
-
- return (int) sysinfo.dwNumberOfProcessors;
-}
-#elif defined(__BSD__)
+#if defined(__BSD__)
int
get_num_cpus() {
/* swiped from http://stackoverflow.com/questions/150355/
}
#endif
-RUST_BUILTIN_API
uintptr_t
rust_get_num_cpus() {
return get_num_cpus();
}
-uintptr_t
-rust_running_on_valgrind() {
-#ifdef _WIN32
- return 0;
-#else
- return RUNNING_ON_VALGRIND;
-#endif
-}
-
#if defined(__DragonFly__)
#include <errno.h>
// In DragonFly __error() is an inline function and as such
#endif
+#endif // !defined(_WIN32)
+
//
// Local Variables:
// mode: C++
+++ /dev/null
-; Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-; file at the top-level directory of this distribution and at
-; http://rust-lang.org/COPYRIGHT.
-;
-; Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-; http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-; <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-; option. This file may not be copied, modified, or distributed
-; except according to those terms.
-
-; Rust's try-catch
-; When f(...) returns normally, the return value is null.
-; When f(...) throws, the return value is a pointer to the caught exception object.
-
-; See also: libstd/rt/unwind.rs
-
-define i8* @rust_try(void (i8*)* %f, i8* %env) {
-
- %1 = invoke i8* @rust_try_inner(void (i8*)* %f, i8* %env)
- to label %normal
- unwind label %catch
-
-normal:
- ret i8* %1
-
-catch:
- landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @rust_eh_personality_catch to i8*)
- catch i8* null
- ; rust_try_inner's landing pad does not resume unwinds, so execution will never reach here
- ret i8* null
-}
-
-define internal i8* @rust_try_inner(void (i8*)* %f, i8* %env) {
-
- invoke void %f(i8* %env)
- to label %normal
- unwind label %catch
-
-normal:
- ret i8* null
-
-catch:
- %1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @rust_eh_personality to i8*)
- catch i8* null
- ; extract and return pointer to the exception object
- %2 = extractvalue { i8*, i32 } %1, 0
- ret i8* %2
-}
-
-declare i32 @rust_eh_personality(...)
-declare i32 @rust_eh_personality_catch(...)
+++ /dev/null
-/*
- ----------------------------------------------------------------
-
- Notice that the following BSD-style license applies to this one
- file (memcheck.h) only. The rest of Valgrind is licensed under the
- terms of the GNU General Public License, version 2, unless
- otherwise indicated. See the COPYING file in the source
- distribution for details.
-
- ----------------------------------------------------------------
-
- This file is part of MemCheck, a heavyweight Valgrind tool for
- detecting memory errors.
-
- Copyright (C) 2000-2013 Julian Seward. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
- documentation would be appreciated but is not required.
-
- 3. Altered source versions must be plainly marked as such, and must
- not be misrepresented as being the original software.
-
- 4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
- GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- ----------------------------------------------------------------
-
- Notice that the above BSD-style license applies to this one file
- (memcheck.h) only. The entire rest of Valgrind is licensed under
- the terms of the GNU General Public License, version 2. See the
- COPYING file in the source distribution for details.
-
- ----------------------------------------------------------------
-*/
-
-
-#ifndef __MEMCHECK_H
-#define __MEMCHECK_H
-
-
-/* This file is for inclusion into client (your!) code.
-
- You can use these macros to manipulate and query memory permissions
- inside your own programs.
-
- See comment near the top of valgrind.h on how to use them.
-*/
-
-#include "valgrind.h"
-
-/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
- This enum comprises an ABI exported by Valgrind to programs
- which use client requests. DO NOT CHANGE THE ORDER OF THESE
- ENTRIES, NOR DELETE ANY -- add new ones at the end. */
-typedef
- enum {
- VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
- VG_USERREQ__MAKE_MEM_UNDEFINED,
- VG_USERREQ__MAKE_MEM_DEFINED,
- VG_USERREQ__DISCARD,
- VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
- VG_USERREQ__CHECK_MEM_IS_DEFINED,
- VG_USERREQ__DO_LEAK_CHECK,
- VG_USERREQ__COUNT_LEAKS,
-
- VG_USERREQ__GET_VBITS,
- VG_USERREQ__SET_VBITS,
-
- VG_USERREQ__CREATE_BLOCK,
-
- VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
-
- /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
- VG_USERREQ__COUNT_LEAK_BLOCKS,
-
- /* This is just for memcheck's internal use - don't use it */
- _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
- = VG_USERREQ_TOOL_BASE('M','C') + 256
- } Vg_MemCheckClientRequest;
-
-
-
-/* Client-code macros to manipulate the state of memory. */
-
-/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
-#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__MAKE_MEM_NOACCESS, \
- (_qzz_addr), (_qzz_len), 0, 0, 0)
-
-/* Similarly, mark memory at _qzz_addr as addressable but undefined
- for _qzz_len bytes. */
-#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__MAKE_MEM_UNDEFINED, \
- (_qzz_addr), (_qzz_len), 0, 0, 0)
-
-/* Similarly, mark memory at _qzz_addr as addressable and defined
- for _qzz_len bytes. */
-#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__MAKE_MEM_DEFINED, \
- (_qzz_addr), (_qzz_len), 0, 0, 0)
-
-/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
- not altered: bytes which are addressable are marked as defined,
- but those which are not addressable are left unchanged. */
-#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
- (_qzz_addr), (_qzz_len), 0, 0, 0)
-
-/* Create a block-description handle. The description is an ascii
- string which is included in any messages pertaining to addresses
- within the specified memory range. Has no other effect on the
- properties of the memory range. */
-#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CREATE_BLOCK, \
- (_qzz_addr), (_qzz_len), (_qzz_desc), \
- 0, 0)
-
-/* Discard a block-description-handle. Returns 1 for an
- invalid handle, 0 for a valid handle. */
-#define VALGRIND_DISCARD(_qzz_blkindex) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__DISCARD, \
- 0, (_qzz_blkindex), 0, 0, 0)
-
-
-/* Client-code macros to check the state of memory. */
-
-/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
- If suitable addressibility is not established, Valgrind prints an
- error message and returns the address of the first offending byte.
- Otherwise it returns zero. */
-#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \
- (_qzz_addr), (_qzz_len), 0, 0, 0)
-
-/* Check that memory at _qzz_addr is addressable and defined for
- _qzz_len bytes. If suitable addressibility and definedness are not
- established, Valgrind prints an error message and returns the
- address of the first offending byte. Otherwise it returns zero. */
-#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__CHECK_MEM_IS_DEFINED, \
- (_qzz_addr), (_qzz_len), 0, 0, 0)
-
-/* Use this macro to force the definedness and addressibility of an
- lvalue to be checked. If suitable addressibility and definedness
- are not established, Valgrind prints an error message and returns
- the address of the first offending byte. Otherwise it returns
- zero. */
-#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
- VALGRIND_CHECK_MEM_IS_DEFINED( \
- (volatile unsigned char *)&(__lvalue), \
- (unsigned long)(sizeof (__lvalue)))
-
-
-/* Do a full memory leak check (like --leak-check=full) mid-execution. */
-#define VALGRIND_DO_LEAK_CHECK \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
- 0, 0, 0, 0, 0)
-
-/* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for
- which there was an increase in leaked bytes or leaked nr of blocks
- since the previous leak search. */
-#define VALGRIND_DO_ADDED_LEAK_CHECK \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
- 0, 1, 0, 0, 0)
-
-/* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with
- increased or decreased leaked bytes/blocks since previous leak
- search. */
-#define VALGRIND_DO_CHANGED_LEAK_CHECK \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
- 0, 2, 0, 0, 0)
-
-/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
-#define VALGRIND_DO_QUICK_LEAK_CHECK \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
- 1, 0, 0, 0, 0)
-
-/* Return number of leaked, dubious, reachable and suppressed bytes found by
- all previous leak checks. They must be lvalues. */
-#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
- /* For safety on 64-bit platforms we assign the results to private
- unsigned long variables, then assign these to the lvalues the user
- specified, which works no matter what type 'leaked', 'dubious', etc
- are. We also initialise '_qzz_leaked', etc because
- VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
- defined. */ \
- { \
- unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
- unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
- VALGRIND_DO_CLIENT_REQUEST_STMT( \
- VG_USERREQ__COUNT_LEAKS, \
- &_qzz_leaked, &_qzz_dubious, \
- &_qzz_reachable, &_qzz_suppressed, 0); \
- leaked = _qzz_leaked; \
- dubious = _qzz_dubious; \
- reachable = _qzz_reachable; \
- suppressed = _qzz_suppressed; \
- }
-
-/* Return number of leaked, dubious, reachable and suppressed bytes found by
- all previous leak checks. They must be lvalues. */
-#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
- /* For safety on 64-bit platforms we assign the results to private
- unsigned long variables, then assign these to the lvalues the user
- specified, which works no matter what type 'leaked', 'dubious', etc
- are. We also initialise '_qzz_leaked', etc because
- VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
- defined. */ \
- { \
- unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
- unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
- VALGRIND_DO_CLIENT_REQUEST_STMT( \
- VG_USERREQ__COUNT_LEAK_BLOCKS, \
- &_qzz_leaked, &_qzz_dubious, \
- &_qzz_reachable, &_qzz_suppressed, 0); \
- leaked = _qzz_leaked; \
- dubious = _qzz_dubious; \
- reachable = _qzz_reachable; \
- suppressed = _qzz_suppressed; \
- }
-
-
-/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
- into the provided zzvbits array. Return values:
- 0 if not running on valgrind
- 1 success
- 2 [previously indicated unaligned arrays; these are now allowed]
- 3 if any parts of zzsrc/zzvbits are not addressable.
- The metadata is not copied in cases 0, 2 or 3 so it should be
- impossible to segfault your system by using this call.
-*/
-#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__GET_VBITS, \
- (const char*)(zza), \
- (char*)(zzvbits), \
- (zznbytes), 0, 0)
-
-/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
- from the provided zzvbits array. Return values:
- 0 if not running on valgrind
- 1 success
- 2 [previously indicated unaligned arrays; these are now allowed]
- 3 if any parts of zza/zzvbits are not addressable.
- The metadata is not copied in cases 0, 2 or 3 so it should be
- impossible to segfault your system by using this call.
-*/
-#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__SET_VBITS, \
- (const char*)(zza), \
- (const char*)(zzvbits), \
- (zznbytes), 0, 0 )
-
-#endif
+++ /dev/null
-/* -*- c -*-
- ----------------------------------------------------------------
-
- Notice that the following BSD-style license applies to this one
- file (valgrind.h) only. The rest of Valgrind is licensed under the
- terms of the GNU General Public License, version 2, unless
- otherwise indicated. See the COPYING file in the source
- distribution for details.
-
- ----------------------------------------------------------------
-
- This file is part of Valgrind, a dynamic binary instrumentation
- framework.
-
- Copyright (C) 2000-2013 Julian Seward. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
- documentation would be appreciated but is not required.
-
- 3. Altered source versions must be plainly marked as such, and must
- not be misrepresented as being the original software.
-
- 4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
- GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- ----------------------------------------------------------------
-
- Notice that the above BSD-style license applies to this one file
- (valgrind.h) only. The entire rest of Valgrind is licensed under
- the terms of the GNU General Public License, version 2. See the
- COPYING file in the source distribution for details.
-
- ----------------------------------------------------------------
-*/
-
-
-/* This file is for inclusion into client (your!) code.
-
- You can use these macros to manipulate and query Valgrind's
- execution inside your own programs.
-
- The resulting executables will still run without Valgrind, just a
- little bit more slowly than they otherwise would, but otherwise
- unchanged. When not running on valgrind, each client request
- consumes very few (eg. 7) instructions, so the resulting performance
- loss is negligible unless you plan to execute client requests
- millions of times per second. Nevertheless, if that is still a
- problem, you can compile with the NVALGRIND symbol defined (gcc
- -DNVALGRIND) so that client requests are not even compiled in. */
-
-#ifndef __VALGRIND_H
-#define __VALGRIND_H
-
-
-/* ------------------------------------------------------------------ */
-/* VERSION NUMBER OF VALGRIND */
-/* ------------------------------------------------------------------ */
-
-/* Specify Valgrind's version number, so that user code can
- conditionally compile based on our version number. Note that these
- were introduced at version 3.6 and so do not exist in version 3.5
- or earlier. The recommended way to use them to check for "version
- X.Y or later" is (eg)
-
-#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
- && (__VALGRIND_MAJOR__ > 3 \
- || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
-*/
-#define __VALGRIND_MAJOR__ 3
-#define __VALGRIND_MINOR__ 8
-
-
-#include <stdarg.h>
-
-/* Nb: this file might be included in a file compiled with -ansi. So
- we can't use C++ style "//" comments nor the "asm" keyword (instead
- use "__asm__"). */
-
-/* Derive some tags indicating what the target platform is. Note
- that in this file we're using the compiler's CPP symbols for
- identifying architectures, which are different to the ones we use
- within the rest of Valgrind. Note, __powerpc__ is active for both
- 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
- latter (on Linux, that is).
-
- Misc note: how to find out what's predefined in gcc by default:
- gcc -Wp,-dM somefile.c
-*/
-#undef PLAT_x86_darwin
-#undef PLAT_amd64_darwin
-#undef PLAT_x86_win32
-#undef PLAT_amd64_win64
-#undef PLAT_x86_linux
-#undef PLAT_amd64_linux
-#undef PLAT_ppc32_linux
-#undef PLAT_ppc64_linux
-#undef PLAT_arm_linux
-#undef PLAT_s390x_linux
-#undef PLAT_mips32_linux
-#undef PLAT_mips64_linux
-
-
-#if defined(__APPLE__) && defined(__i386__)
-# define PLAT_x86_darwin 1
-#elif defined(__APPLE__) && defined(__x86_64__)
-# define PLAT_amd64_darwin 1
-#elif defined(__MINGW64__) || (defined(_WIN64) && defined(_M_X64))
-# define PLAT_amd64_win64 1
-#elif defined(__MINGW32__) || defined(__CYGWIN32__) \
- || (defined(_WIN32) && defined(_M_IX86))
-# define PLAT_x86_win32 1
-#elif defined(__linux__) && defined(__i386__)
-# define PLAT_x86_linux 1
-#elif defined(__linux__) && defined(__x86_64__)
-# define PLAT_amd64_linux 1
-#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
-# define PLAT_ppc32_linux 1
-#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
-# define PLAT_ppc64_linux 1
-#elif defined(__linux__) && defined(__arm__)
-# define PLAT_arm_linux 1
-#elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
-# define PLAT_s390x_linux 1
-#elif defined(__linux__) && defined(__mips__)
-#if (__mips==64)
-# define PLAT_mips64_linux 1
-#else
-# define PLAT_mips32_linux 1
-#endif
-#else
-/* If we're not compiling for our target platform, don't generate
- any inline asms. */
-# if !defined(NVALGRIND)
-# define NVALGRIND 1
-# endif
-#endif
-
-
-/* ------------------------------------------------------------------ */
-/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
-/* in here of use to end-users -- skip to the next section. */
-/* ------------------------------------------------------------------ */
-
-/*
- * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
- * request. Accepts both pointers and integers as arguments.
- *
- * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind
- * client request that does not return a value.
-
- * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
- * client request and whose value equals the client request result. Accepts
- * both pointers and integers as arguments. Note that such calls are not
- * necessarily pure functions -- they may have side effects.
- */
-
-#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \
- _zzq_request, _zzq_arg1, _zzq_arg2, \
- _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \
- (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
- (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
-
-#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \
- _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
- (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
-
-#if defined(NVALGRIND)
-
-/* Define NVALGRIND to completely remove the Valgrind magic sequence
- from the compiled code (analogous to NDEBUG's effects on
- assert()) */
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- (_zzq_default)
-
-#else /* ! NVALGRIND */
-
-/* The following defines the magic code sequences which the JITter
- spots and handles magically. Don't look too closely at them as
- they will rot your brain.
-
- The assembly code sequences for all architectures is in this one
- file. This is because this file must be stand-alone, and we don't
- want to have multiple files.
-
- For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
- value gets put in the return slot, so that everything works when
- this is executed not under Valgrind. Args are passed in a memory
- block, and so there's no intrinsic limit to the number that could
- be passed, but it's currently five.
-
- The macro args are:
- _zzq_rlval result lvalue
- _zzq_default default value (result returned when running on real CPU)
- _zzq_request request code
- _zzq_arg1..5 request params
-
- The other two macros are used to support function wrapping, and are
- a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
- guest's NRADDR pseudo-register and whatever other information is
- needed to safely run the call original from the wrapper: on
- ppc64-linux, the R2 value at the divert point is also needed. This
- information is abstracted into a user-visible type, OrigFn.
-
- VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
- guest, but guarantees that the branch instruction will not be
- redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
- branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
- complete inline asm, since it needs to be combined with more magic
- inline asm stuff to be useful.
-*/
-
-/* ------------------------- x86-{linux,darwin} ---------------- */
-
-#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
- || (defined(PLAT_x86_win32) && defined(__GNUC__))
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "roll $3, %%edi ; roll $13, %%edi\n\t" \
- "roll $29, %%edi ; roll $19, %%edi\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({volatile unsigned int _zzq_args[6]; \
- volatile unsigned int _zzq_result; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EDX = client_request ( %EAX ) */ \
- "xchgl %%ebx,%%ebx" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "memory" \
- ); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EAX = guest_NRADDR */ \
- "xchgl %%ecx,%%ecx" \
- : "=a" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_EAX \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir *%EAX */ \
- "xchgl %%edx,%%edx\n\t"
-
-#define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "xchgl %%edi,%%edi\n\t" \
- : : : "cc", "memory" \
- ); \
- } while (0)
-
-#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
-
-/* ------------------------- x86-Win32 ------------------------- */
-
-#if defined(PLAT_x86_win32) && !defined(__GNUC__)
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#if defined(_MSC_VER)
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- __asm rol edi, 3 __asm rol edi, 13 \
- __asm rol edi, 29 __asm rol edi, 19
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \
- (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \
- (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \
- (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5))
-
-static __inline uintptr_t
-valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
- uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
- uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
- uintptr_t _zzq_arg5)
-{
- volatile uintptr_t _zzq_args[6];
- volatile unsigned int _zzq_result;
- _zzq_args[0] = (uintptr_t)(_zzq_request);
- _zzq_args[1] = (uintptr_t)(_zzq_arg1);
- _zzq_args[2] = (uintptr_t)(_zzq_arg2);
- _zzq_args[3] = (uintptr_t)(_zzq_arg3);
- _zzq_args[4] = (uintptr_t)(_zzq_arg4);
- _zzq_args[5] = (uintptr_t)(_zzq_arg5);
- __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
- __SPECIAL_INSTRUCTION_PREAMBLE
- /* %EDX = client_request ( %EAX ) */
- __asm xchg ebx,ebx
- __asm mov _zzq_result, edx
- }
- return _zzq_result;
-}
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned int __addr; \
- __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EAX = guest_NRADDR */ \
- __asm xchg ecx,ecx \
- __asm mov __addr, eax \
- } \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_EAX ERROR
-
-#define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
- __asm xchg edi,edi \
- } \
- } while (0)
-
-#else
-#error Unsupported compiler.
-#endif
-
-#endif /* PLAT_x86_win32 */
-
-/* -------------------- amd64-{linux,darwin,win64} ------------- */
-
-#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
- || defined(PLAT_amd64_win64)
-
-typedef
- struct {
- unsigned long long int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
- "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({ volatile unsigned long long int _zzq_args[6]; \
- volatile unsigned long long int _zzq_result; \
- _zzq_args[0] = (unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %RDX = client_request ( %RAX ) */ \
- "xchgq %%rbx,%%rbx" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "memory" \
- ); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned long long int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %RAX = guest_NRADDR */ \
- "xchgq %%rcx,%%rcx" \
- : "=a" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_RAX \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir *%RAX */ \
- "xchgq %%rdx,%%rdx\n\t"
-
-#define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "xchgq %%rdi,%%rdi\n\t" \
- : : : "cc", "memory" \
- ); \
- } while (0)
-
-#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
-
-/* ------------------------ ppc32-linux ------------------------ */
-
-#if defined(PLAT_ppc32_linux)
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
- "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({ unsigned int _zzq_args[6]; \
- unsigned int _zzq_result; \
- unsigned int* _zzq_ptr; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 3,%1\n\t" /*default*/ \
- "mr 4,%2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" /*result*/ \
- : "=b" (_zzq_result) \
- : "b" (_zzq_default), "b" (_zzq_ptr) \
- : "cc", "memory", "r3", "r4"); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
-
-#define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "or 5,5,5\n\t" \
- ); \
- } while (0)
-
-#endif /* PLAT_ppc32_linux */
-
-/* ------------------------ ppc64-linux ------------------------ */
-
-#if defined(PLAT_ppc64_linux)
-
-typedef
- struct {
- unsigned long long int nraddr; /* where's the code? */
- unsigned long long int r2; /* what tocptr do we need? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
- "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({ unsigned long long int _zzq_args[6]; \
- unsigned long long int _zzq_result; \
- unsigned long long int* _zzq_ptr; \
- _zzq_args[0] = (unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 3,%1\n\t" /*default*/ \
- "mr 4,%2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" /*result*/ \
- : "=b" (_zzq_result) \
- : "b" (_zzq_default), "b" (_zzq_ptr) \
- : "cc", "memory", "r3", "r4"); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned long long int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->r2 = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
-
-#define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "or 5,5,5\n\t" \
- ); \
- } while (0)
-
-#endif /* PLAT_ppc64_linux */
-
-/* ------------------------- arm-linux ------------------------- */
-
-#if defined(PLAT_arm_linux)
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
- "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({volatile unsigned int _zzq_args[6]; \
- volatile unsigned int _zzq_result; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- __asm__ volatile("mov r3, %1\n\t" /*default*/ \
- "mov r4, %2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* R3 = client_request ( R4 ) */ \
- "orr r10, r10, r10\n\t" \
- "mov %0, r3" /*result*/ \
- : "=r" (_zzq_result) \
- : "r" (_zzq_default), "r" (&_zzq_args[0]) \
- : "cc","memory", "r3", "r4"); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* R3 = guest_NRADDR */ \
- "orr r11, r11, r11\n\t" \
- "mov %0, r3" \
- : "=r" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R4 */ \
- "orr r12, r12, r12\n\t"
-
-#define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "orr r9, r9, r9\n\t" \
- : : : "cc", "memory" \
- ); \
- } while (0)
-
-#endif /* PLAT_arm_linux */
-
-/* ------------------------ s390x-linux ------------------------ */
-
-#if defined(PLAT_s390x_linux)
-
-typedef
- struct {
- unsigned long long int nraddr; /* where's the code? */
- }
- OrigFn;
-
-/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
- * code. This detection is implemented in platform specific toIR.c
- * (e.g. VEX/priv/guest_s390_decoder.c).
- */
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "lr 15,15\n\t" \
- "lr 1,1\n\t" \
- "lr 2,2\n\t" \
- "lr 3,3\n\t"
-
-#define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
-#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
-#define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
-#define __VEX_INJECT_IR_CODE "lr 5,5\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({volatile unsigned long long int _zzq_args[6]; \
- volatile unsigned long long int _zzq_result; \
- _zzq_args[0] = (unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
- __asm__ volatile(/* r2 = args */ \
- "lgr 2,%1\n\t" \
- /* r3 = default */ \
- "lgr 3,%2\n\t" \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- __CLIENT_REQUEST_CODE \
- /* results = r3 */ \
- "lgr %0, 3\n\t" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "2", "3", "memory" \
- ); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned long long int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- __GET_NR_CONTEXT_CODE \
- "lgr %0, 3\n\t" \
- : "=a" (__addr) \
- : \
- : "cc", "3", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_R1 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- __CALL_NO_REDIR_CODE
-
-#define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- __VEX_INJECT_IR_CODE); \
- } while (0)
-
-#endif /* PLAT_s390x_linux */
-
-/* ------------------------- mips32-linux ---------------- */
-
-#if defined(PLAT_mips32_linux)
-
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
-
-/* .word 0x342
- * .word 0x742
- * .word 0xC2
- * .word 0x4C2*/
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "srl $0, $0, 13\n\t" \
- "srl $0, $0, 29\n\t" \
- "srl $0, $0, 3\n\t" \
- "srl $0, $0, 19\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({ volatile unsigned int _zzq_args[6]; \
- volatile unsigned int _zzq_result; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- __asm__ volatile("move $11, %1\n\t" /*default*/ \
- "move $12, %2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* T3 = client_request ( T4 ) */ \
- "or $13, $13, $13\n\t" \
- "move %0, $11\n\t" /*result*/ \
- : "=r" (_zzq_result) \
- : "r" (_zzq_default), "r" (&_zzq_args[0]) \
- : "$11", "$12"); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %t9 = guest_NRADDR */ \
- "or $14, $14, $14\n\t" \
- "move %0, $11" /*result*/ \
- : "=r" (__addr) \
- : \
- : "$11" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_T9 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir *%t9 */ \
- "or $15, $15, $15\n\t"
-
-#define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "or $11, $11, $11\n\t" \
- ); \
- } while (0)
-
-
-#endif /* PLAT_mips32_linux */
-
-/* ------------------------- mips64-linux ---------------- */
-
-#if defined(PLAT_mips64_linux)
-
-typedef
- struct {
- unsigned long long nraddr; /* where's the code? */
- }
- OrigFn;
-
-/* dsll $0,$0, 3
- * dsll $0,$0, 13
- * dsll $0,$0, 29
- * dsll $0,$0, 19*/
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \
- "dsll $0,$0,29 ; dsll $0,$0,19\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({ volatile unsigned long long int _zzq_args[6]; \
- volatile unsigned long long int _zzq_result; \
- _zzq_args[0] = (unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
- __asm__ volatile("move $11, %1\n\t" /*default*/ \
- "move $12, %2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* $11 = client_request ( $12 ) */ \
- "or $13, $13, $13\n\t" \
- "move %0, $11\n\t" /*result*/ \
- : "=r" (_zzq_result) \
- : "r" (_zzq_default), "r" (&_zzq_args[0]) \
- : "$11", "$12"); \
- _zzq_result; \
- })
-
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned long long int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* $11 = guest_NRADDR */ \
- "or $14, $14, $14\n\t" \
- "move %0, $11" /*result*/ \
- : "=r" (__addr) \
- : \
- : "$11"); \
- _zzq_orig->nraddr = __addr; \
- }
-
-#define VALGRIND_CALL_NOREDIR_T9 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir $25 */ \
- "or $15, $15, $15\n\t"
-
-#define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "or $11, $11, $11\n\t" \
- ); \
- } while (0)
-
-#endif /* PLAT_mips64_linux */
-
-/* Insert assembly code for other platforms here... */
-
-#endif /* NVALGRIND */
-
-
-/* ------------------------------------------------------------------ */
-/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
-/* ugly. It's the least-worst tradeoff I can think of. */
-/* ------------------------------------------------------------------ */
-
-/* This section defines magic (a.k.a appalling-hack) macros for doing
- guaranteed-no-redirection macros, so as to get from function
- wrappers to the functions they are wrapping. The whole point is to
- construct standard call sequences, but to do the call itself with a
- special no-redirect call pseudo-instruction that the JIT
- understands and handles specially. This section is long and
- repetitious, and I can't see a way to make it shorter.
-
- The naming scheme is as follows:
-
- CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
-
- 'W' stands for "word" and 'v' for "void". Hence there are
- different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
- and for each, the possibility of returning a word-typed result, or
- no result.
-*/
-
-/* Use these to write the name of your wrapper. NOTE: duplicates
- VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts
- the default behaviour equivalance class tag "0000" into the name.
- See pub_tool_redir.h for details -- normally you don't need to
- think about this, though. */
-
-/* Use an extra level of macroisation so as to ensure the soname/fnname
- args are fully macro-expanded before pasting them together. */
-#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
-
-#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
- VG_CONCAT4(_vgw00000ZU_,soname,_,fnname)
-
-#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
- VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname)
-
-/* Use this macro from within a wrapper function to collect the
- context (address and possibly other info) of the original function.
- Once you have that you can then use it in one of the CALL_FN_
- macros. The type of the argument _lval is OrigFn. */
-#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
-
-/* Also provide end-user facilities for function replacement, rather
- than wrapping. A replacement function differs from a wrapper in
- that it has no way to get hold of the original function being
- called, and hence no way to call onwards to it. In a replacement
- function, VALGRIND_GET_ORIG_FN always returns zero. */
-
-#define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \
- VG_CONCAT4(_vgr00000ZU_,soname,_,fnname)
-
-#define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \
- VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname)
-
-/* Derivatives of the main macros below, for calling functions
- returning void. */
-
-#define CALL_FN_v_v(fnptr) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_v(_junk,fnptr); } while (0)
-
-#define CALL_FN_v_W(fnptr, arg1) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
-
-#define CALL_FN_v_WW(fnptr, arg1,arg2) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
-
-#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
-
-#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
-
-#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
-
-#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
-
-#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
-
-/* ------------------------- x86-{linux,darwin} ---------------- */
-
-#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
-
-/* These regs are trashed by the hidden call. No need to mention eax
- as gcc can already see that, plus causes gcc to bomb. */
-#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
-
-/* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
-
-#define VALGRIND_ALIGN_STACK \
- "movl %%esp,%%edi\n\t" \
- "andl $0xfffffff0,%%esp\n\t"
-#define VALGRIND_RESTORE_STACK \
- "movl %%edi,%%esp\n\t"
-
-/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
- long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $12, %%esp\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $8, %%esp\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $4, %%esp\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $12, %%esp\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $8, %%esp\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $4, %%esp\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $12, %%esp\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $8, %%esp\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $4, %%esp\n\t" \
- "pushl 44(%%eax)\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "pushl 48(%%eax)\n\t" \
- "pushl 44(%%eax)\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_x86_linux || PLAT_x86_darwin */
-
-/* ------------------------ amd64-{linux,darwin} --------------- */
-
-#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
-
-/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
- "rdi", "r8", "r9", "r10", "r11"
-
-/* This is all pretty complex. It's so as to make stack unwinding
- work reliably. See bug 243270. The basic problem is the sub and
- add of 128 of %rsp in all of the following macros. If gcc believes
- the CFA is in %rsp, then unwinding may fail, because what's at the
- CFA is not what gcc "expected" when it constructs the CFIs for the
- places where the macros are instantiated.
-
- But we can't just add a CFI annotation to increase the CFA offset
- by 128, to match the sub of 128 from %rsp, because we don't know
- whether gcc has chosen %rsp as the CFA at that point, or whether it
- has chosen some other register (eg, %rbp). In the latter case,
- adding a CFI annotation to change the CFA offset is simply wrong.
-
- So the solution is to get hold of the CFA using
- __builtin_dwarf_cfa(), put it in a known register, and add a
- CFI annotation to say what the register is. We choose %rbp for
- this (perhaps perversely), because:
-
- (1) %rbp is already subject to unwinding. If a new register was
- chosen then the unwinder would have to unwind it in all stack
- traces, which is expensive, and
-
- (2) %rbp is already subject to precise exception updates in the
- JIT. If a new register was chosen, we'd have to have precise
- exceptions for it too, which reduces performance of the
- generated code.
-
- However .. one extra complication. We can't just whack the result
- of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
- list of trashed registers at the end of the inline assembly
- fragments; gcc won't allow %rbp to appear in that list. Hence
- instead we need to stash %rbp in %r15 for the duration of the asm,
- and say that %r15 is trashed instead. gcc seems happy to go with
- that.
-
- Oh .. and this all needs to be conditionalised so that it is
- unchanged from before this commit, when compiled with older gccs
- that don't support __builtin_dwarf_cfa. Furthermore, since
- this header file is freestanding, it has to be independent of
- config.h, and so the following conditionalisation cannot depend on
- configure time checks.
-
- Although it's not clear from
- 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
- this expression excludes Darwin.
- .cfi directives in Darwin assembly appear to be completely
- different and I haven't investigated how they work.
-
- For even more entertainment value, note we have to use the
- completely undocumented __builtin_dwarf_cfa(), which appears to
- really compute the CFA, whereas __builtin_frame_address(0) claims
- to but actually doesn't. See
- https://bugs.kde.org/show_bug.cgi?id=243270#c47
-*/
-#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
-# define __FRAME_POINTER \
- ,"r"(__builtin_dwarf_cfa())
-# define VALGRIND_CFI_PROLOGUE \
- "movq %%rbp, %%r15\n\t" \
- "movq %2, %%rbp\n\t" \
- ".cfi_remember_state\n\t" \
- ".cfi_def_cfa rbp, 0\n\t"
-# define VALGRIND_CFI_EPILOGUE \
- "movq %%r15, %%rbp\n\t" \
- ".cfi_restore_state\n\t"
-#else
-# define __FRAME_POINTER
-# define VALGRIND_CFI_PROLOGUE
-# define VALGRIND_CFI_EPILOGUE
-#endif
-
-/* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
-
-#define VALGRIND_ALIGN_STACK \
- "movq %%rsp,%%r14\n\t" \
- "andq $0xfffffffffffffff0,%%rsp\n\t"
-#define VALGRIND_RESTORE_STACK \
- "movq %%r14,%%rsp\n\t"
-
-/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
- long) == 8. */
-
-/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
- macros. In order not to trash the stack redzone, we need to drop
- %rsp by 128 before the hidden call, and restore afterwards. The
- nastyness is that it is only by luck that the stack still appears
- to be unwindable during the hidden call - since then the behaviour
- of any routine using this macro does not match what the CFI data
- says. Sigh.
-
- Why is this important? Imagine that a wrapper has a stack
- allocated local, and passes to the hidden call, a pointer to it.
- Because gcc does not know about the hidden call, it may allocate
- that local in the redzone. Unfortunately the hidden call may then
- trash it before it comes to use it. So we must step clear of the
- redzone, for the duration of the hidden call, to make it safe.
-
- Probably the same problem afflicts the other redzone-style ABIs too
- (ppc64-linux); but for those, the stack is
- self describing (none of this CFI nonsense) so at least messing
- with the stack pointer doesn't give a danger of non-unwindable
- stack. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $136,%%rsp\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $136,%%rsp\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $136,%%rsp\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "pushq 96(%%rax)\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
-
-/* ------------------------ ppc32-linux ------------------------ */
-
-#if defined(PLAT_ppc32_linux)
-
-/* This is useful for finding out about the on-stack stuff:
-
- extern int f9 ( int,int,int,int,int,int,int,int,int );
- extern int f10 ( int,int,int,int,int,int,int,int,int,int );
- extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
- extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
-
- int g9 ( void ) {
- return f9(11,22,33,44,55,66,77,88,99);
- }
- int g10 ( void ) {
- return f10(11,22,33,44,55,66,77,88,99,110);
- }
- int g11 ( void ) {
- return f11(11,22,33,44,55,66,77,88,99,110,121);
- }
- int g12 ( void ) {
- return f12(11,22,33,44,55,66,77,88,99,110,121,132);
- }
-*/
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-
-/* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
-
-#define VALGRIND_ALIGN_STACK \
- "mr 28,1\n\t" \
- "rlwinm 1,1,0,0,27\n\t"
-#define VALGRIND_RESTORE_STACK \
- "mr 1,28\n\t"
-
-/* These CALL_FN_ macros assume that on ppc32-linux,
- sizeof(unsigned long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "addi 1,1,-16\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "addi 1,1,-16\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "addi 1,1,-32\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,16(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- _argvec[12] = (unsigned long)arg12; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "addi 1,1,-32\n\t" \
- /* arg12 */ \
- "lwz 3,48(11)\n\t" \
- "stw 3,20(1)\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,16(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_ppc32_linux */
-
-/* ------------------------ ppc64-linux ------------------------ */
-
-#if defined(PLAT_ppc64_linux)
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-
-/* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
-
-#define VALGRIND_ALIGN_STACK \
- "mr 28,1\n\t" \
- "rldicr 1,1,0,59\n\t"
-#define VALGRIND_RESTORE_STACK \
- "mr 1,28\n\t"
-
-/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
- long) == 8. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-128\n\t" /* expand stack frame */ \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-128\n\t" /* expand stack frame */ \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-144\n\t" /* expand stack frame */ \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-144\n\t" /* expand stack frame */ \
- /* arg12 */ \
- "ld 3,96(11)\n\t" \
- "std 3,136(1)\n\t" \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_ppc64_linux */
-
-/* ------------------------- arm-linux ------------------------- */
-
-#if defined(PLAT_arm_linux)
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
-
-/* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
-
-/* This is a bit tricky. We store the original stack pointer in r10
- as it is callee-saves. gcc doesn't allow the use of r11 for some
- reason. Also, we can't directly "bic" the stack pointer in thumb
- mode since r13 isn't an allowed register number in that context.
- So use r4 as a temporary, since that is about to get trashed
- anyway, just after each use of this macro. Side effect is we need
- to be very careful about any future changes, since
- VALGRIND_ALIGN_STACK simply assumes r4 is usable. */
-#define VALGRIND_ALIGN_STACK \
- "mov r10, sp\n\t" \
- "mov r4, sp\n\t" \
- "bic r4, r4, #7\n\t" \
- "mov sp, r4\n\t"
-#define VALGRIND_RESTORE_STACK \
- "mov sp, r10\n\t"
-
-/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
- long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #4 \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "push {r0} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "push {r0, r1} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #4 \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "push {r0, r1, r2} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "push {r0, r1, r2, r3} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #4 \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #40] \n\t" \
- "push {r0} \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #4 \n\t" \
- "ldr r0, [%1, #40] \n\t" \
- "ldr r1, [%1, #44] \n\t" \
- "push {r0, r1} \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #40] \n\t" \
- "ldr r1, [%1, #44] \n\t" \
- "ldr r2, [%1, #48] \n\t" \
- "push {r0, r1, r2} \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_arm_linux */
-
-/* ------------------------- s390x-linux ------------------------- */
-
-#if defined(PLAT_s390x_linux)
-
-/* Similar workaround as amd64 (see above), but we use r11 as frame
- pointer and save the old r11 in r7. r11 might be used for
- argvec, therefore we copy argvec in r1 since r1 is clobbered
- after the call anyway. */
-#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
-# define __FRAME_POINTER \
- ,"d"(__builtin_dwarf_cfa())
-# define VALGRIND_CFI_PROLOGUE \
- ".cfi_remember_state\n\t" \
- "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \
- "lgr 7,11\n\t" \
- "lgr 11,%2\n\t" \
- ".cfi_def_cfa r11, 0\n\t"
-# define VALGRIND_CFI_EPILOGUE \
- "lgr 11, 7\n\t" \
- ".cfi_restore_state\n\t"
-#else
-# define __FRAME_POINTER
-# define VALGRIND_CFI_PROLOGUE \
- "lgr 1,%1\n\t"
-# define VALGRIND_CFI_EPILOGUE
-#endif
-
-/* Nb: On s390 the stack pointer is properly aligned *at all times*
- according to the s390 GCC maintainer. (The ABI specification is not
- precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and
- VALGRIND_RESTORE_STACK are not defined here. */
-
-/* These regs are trashed by the hidden call. Note that we overwrite
- r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
- function a proper return address. All others are ABI defined call
- clobbers. */
-#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \
- "f0","f1","f2","f3","f4","f5","f6","f7"
-
-/* Nb: Although r11 is modified in the asm snippets below (inside
- VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for
- two reasons:
- (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not
- modified
- (2) GCC will complain that r11 cannot appear inside a clobber section,
- when compiled with -O -fno-omit-frame-pointer
- */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 1, 0(1)\n\t" /* target->r1 */ \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-/* The call abi has the arguments in r2-r6 and stack */
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-168\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,168\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-176\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,176\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-184\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,184\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-192\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,192\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-200\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,200\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10, arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-208\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "mvc 200(8,15), 88(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,208\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- _argvec[12] = (unsigned long)arg12; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-216\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "mvc 200(8,15), 88(1)\n\t" \
- "mvc 208(8,15), 96(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,216\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-
-#endif /* PLAT_s390x_linux */
-
-/* ------------------------- mips32-linux ----------------------- */
-
-#if defined(PLAT_mips32_linux)
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
-"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-"$25", "$31"
-
-/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
- long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "subu $29, $29, 16 \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 16\n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "subu $29, $29, 16 \n\t" \
- "lw $4, 4(%1) \n\t" /* arg1*/ \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 16 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "subu $29, $29, 16 \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 16 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "subu $29, $29, 16 \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 16 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "subu $29, $29, 16 \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 16 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 24\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 24 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 32\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "nop\n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 32 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 32\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 32 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 40\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 32(%1) \n\t" \
- "sw $4, 28($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 40 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 40\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 32(%1) \n\t" \
- "sw $4, 28($29) \n\t" \
- "lw $4, 36(%1) \n\t" \
- "sw $4, 32($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 40 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 48\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 32(%1) \n\t" \
- "sw $4, 28($29) \n\t" \
- "lw $4, 36(%1) \n\t" \
- "sw $4, 32($29) \n\t" \
- "lw $4, 40(%1) \n\t" \
- "sw $4, 36($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 48 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 48\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 32(%1) \n\t" \
- "sw $4, 28($29) \n\t" \
- "lw $4, 36(%1) \n\t" \
- "sw $4, 32($29) \n\t" \
- "lw $4, 40(%1) \n\t" \
- "sw $4, 36($29) \n\t" \
- "lw $4, 44(%1) \n\t" \
- "sw $4, 40($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 48 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 56\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 32(%1) \n\t" \
- "sw $4, 28($29) \n\t" \
- "lw $4, 36(%1) \n\t" \
- "sw $4, 32($29) \n\t" \
- "lw $4, 40(%1) \n\t" \
- "sw $4, 36($29) \n\t" \
- "lw $4, 44(%1) \n\t" \
- "sw $4, 40($29) \n\t" \
- "lw $4, 48(%1) \n\t" \
- "sw $4, 44($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 56 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_mips32_linux */
-
-/* ------------------------- mips64-linux ------------------------- */
-
-#if defined(PLAT_mips64_linux)
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
-"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-"$25", "$31"
-
-/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
- long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" /* arg1*/ \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $11, 64(%1)\n\t" \
- "ld $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- "dsubu $29, $29, 8\n\t" \
- "ld $4, 72(%1)\n\t" \
- "sd $4, 0($29)\n\t" \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $11, 64(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "daddu $29, $29, 8\n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- "dsubu $29, $29, 16\n\t" \
- "ld $4, 72(%1)\n\t" \
- "sd $4, 0($29)\n\t" \
- "ld $4, 80(%1)\n\t" \
- "sd $4, 8($29)\n\t" \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $11, 64(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "daddu $29, $29, 16\n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- "dsubu $29, $29, 24\n\t" \
- "ld $4, 72(%1)\n\t" \
- "sd $4, 0($29)\n\t" \
- "ld $4, 80(%1)\n\t" \
- "sd $4, 8($29)\n\t" \
- "ld $4, 88(%1)\n\t" \
- "sd $4, 16($29)\n\t" \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $11, 64(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "daddu $29, $29, 24\n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- "dsubu $29, $29, 32\n\t" \
- "ld $4, 72(%1)\n\t" \
- "sd $4, 0($29)\n\t" \
- "ld $4, 80(%1)\n\t" \
- "sd $4, 8($29)\n\t" \
- "ld $4, 88(%1)\n\t" \
- "sd $4, 16($29)\n\t" \
- "ld $4, 96(%1)\n\t" \
- "sd $4, 24($29)\n\t" \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $11, 64(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "daddu $29, $29, 32\n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_mips64_linux */
-
-
-/* ------------------------------------------------------------------ */
-/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
-/* */
-/* ------------------------------------------------------------------ */
-
-/* Some request codes. There are many more of these, but most are not
- exposed to end-user view. These are the public ones, all of the
- form 0x1000 + small_number.
-
- Core ones are in the range 0x00000000--0x0000ffff. The non-public
- ones start at 0x2000.
-*/
-
-/* These macros are used by tools -- they must be public, but don't
- embed them into other programs. */
-#define VG_USERREQ_TOOL_BASE(a,b) \
- ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
-#define VG_IS_TOOL_USERREQ(a, b, v) \
- (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
-
-/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
- This enum comprises an ABI exported by Valgrind to programs
- which use client requests. DO NOT CHANGE THE ORDER OF THESE
- ENTRIES, NOR DELETE ANY -- add new ones at the end. */
-typedef
- enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
- VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
-
- /* These allow any function to be called from the simulated
- CPU but run on the real CPU. Nb: the first arg passed to
- the function is always the ThreadId of the running
- thread! So CLIENT_CALL0 actually requires a 1 arg
- function, etc. */
- VG_USERREQ__CLIENT_CALL0 = 0x1101,
- VG_USERREQ__CLIENT_CALL1 = 0x1102,
- VG_USERREQ__CLIENT_CALL2 = 0x1103,
- VG_USERREQ__CLIENT_CALL3 = 0x1104,
-
- /* Can be useful in regression testing suites -- eg. can
- send Valgrind's output to /dev/null and still count
- errors. */
- VG_USERREQ__COUNT_ERRORS = 0x1201,
-
- /* Allows the client program and/or gdbserver to execute a monitor
- command. */
- VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
-
- /* These are useful and can be interpreted by any tool that
- tracks malloc() et al, by using vg_replace_malloc.c. */
- VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
- VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b,
- VG_USERREQ__FREELIKE_BLOCK = 0x1302,
- /* Memory pool support. */
- VG_USERREQ__CREATE_MEMPOOL = 0x1303,
- VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
- VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
- VG_USERREQ__MEMPOOL_FREE = 0x1306,
- VG_USERREQ__MEMPOOL_TRIM = 0x1307,
- VG_USERREQ__MOVE_MEMPOOL = 0x1308,
- VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
- VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
-
- /* Allow printfs to valgrind log. */
- /* The first two pass the va_list argument by value, which
- assumes it is the same size as or smaller than a UWord,
- which generally isn't the case. Hence are deprecated.
- The second two pass the vargs by reference and so are
- immune to this problem. */
- /* both :: char* fmt, va_list vargs (DEPRECATED) */
- VG_USERREQ__PRINTF = 0x1401,
- VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
- /* both :: char* fmt, va_list* vargs */
- VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
- VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
-
- /* Stack support. */
- VG_USERREQ__STACK_REGISTER = 0x1501,
- VG_USERREQ__STACK_DEREGISTER = 0x1502,
- VG_USERREQ__STACK_CHANGE = 0x1503,
-
- /* Wine support */
- VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
-
- /* Querying of debug info. */
- VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701,
-
- /* Disable/enable error reporting level. Takes a single
- Word arg which is the delta to this thread's error
- disablement indicator. Hence 1 disables or further
- disables errors, and -1 moves back towards enablement.
- Other values are not allowed. */
- VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801,
-
- /* Initialise IR injection */
- VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901
- } Vg_ClientRequest;
-
-#if !defined(__GNUC__)
-# define __extension__ /* */
-#endif
-
-
-/* Returns the number of Valgrinds this code is running under. That
- is, 0 if running natively, 1 if running under Valgrind, 2 if
- running under Valgrind which is running under another Valgrind,
- etc. */
-#define RUNNING_ON_VALGRIND \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \
- VG_USERREQ__RUNNING_ON_VALGRIND, \
- 0, 0, 0, 0, 0) \
-
-
-/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
- _qzz_len - 1]. Useful if you are debugging a JITter or some such,
- since it provides a way to make sure valgrind will retranslate the
- invalidated area. Returns no value. */
-#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \
- _qzz_addr, _qzz_len, 0, 0, 0)
-
-
-/* These requests are for getting Valgrind itself to print something.
- Possibly with a backtrace. This is a really ugly hack. The return value
- is the number of characters printed, excluding the "**<pid>** " part at the
- start and the backtrace (if present). */
-
-#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
-/* Modern GCC will optimize the static routine out if unused,
- and unused attribute will shut down warnings about it. */
-static int VALGRIND_PRINTF(const char *format, ...)
- __attribute__((format(__printf__, 1, 2), __unused__));
-#endif
-static int
-#if defined(_MSC_VER)
-__inline
-#endif
-VALGRIND_PRINTF(const char *format, ...)
-{
-#if defined(NVALGRIND)
- return 0;
-#else /* NVALGRIND */
-#if defined(_MSC_VER) || defined(__MINGW64__)
- uintptr_t _qzz_res;
-#else
- unsigned long _qzz_res;
-#endif
- va_list vargs;
- va_start(vargs, format);
-#if defined(_MSC_VER) || defined(__MINGW64__)
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_VALIST_BY_REF,
- (uintptr_t)format,
- (uintptr_t)&vargs,
- 0, 0, 0);
-#else
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_VALIST_BY_REF,
- (unsigned long)format,
- (unsigned long)&vargs,
- 0, 0, 0);
-#endif
- va_end(vargs);
- return (int)_qzz_res;
-#endif /* NVALGRIND */
-}
-
-#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
-static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
- __attribute__((format(__printf__, 1, 2), __unused__));
-#endif
-static int
-#if defined(_MSC_VER)
-__inline
-#endif
-VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
-{
-#if defined(NVALGRIND)
- return 0;
-#else /* NVALGRIND */
-#if defined(_MSC_VER) || defined(__MINGW64__)
- uintptr_t _qzz_res;
-#else
- unsigned long _qzz_res;
-#endif
- va_list vargs;
- va_start(vargs, format);
-#if defined(_MSC_VER) || defined(__MINGW64__)
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
- (uintptr_t)format,
- (uintptr_t)&vargs,
- 0, 0, 0);
-#else
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
- (unsigned long)format,
- (unsigned long)&vargs,
- 0, 0, 0);
-#endif
- va_end(vargs);
- return (int)_qzz_res;
-#endif /* NVALGRIND */
-}
-
-
-/* These requests allow control to move from the simulated CPU to the
- real CPU, calling an arbitrary function.
-
- Note that the current ThreadId is inserted as the first argument.
- So this call:
-
- VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
-
- requires f to have this signature:
-
- Word f(Word tid, Word arg1, Word arg2)
-
- where "Word" is a word-sized type.
-
- Note that these client requests are not entirely reliable. For example,
- if you call a function with them that subsequently calls printf(),
- there's a high chance Valgrind will crash. Generally, your prospects of
- these working are made higher if the called function does not refer to
- any global variables, and does not refer to any libc or other functions
- (printf et al). Any kind of entanglement with libc or dynamic linking is
- likely to have a bad outcome, for tricky reasons which we've grappled
- with a lot in the past.
-*/
-#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL0, \
- _qyy_fn, \
- 0, 0, 0, 0)
-
-#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL1, \
- _qyy_fn, \
- _qyy_arg1, 0, 0, 0)
-
-#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL2, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, 0, 0)
-
-#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL3, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, \
- _qyy_arg3, 0)
-
-
-/* Counts the number of errors that have been recorded by a tool. Nb:
- the tool must record the errors with VG_(maybe_record_error)() or
- VG_(unique_error)() for them to be counted. */
-#define VALGRIND_COUNT_ERRORS \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- 0 /* default return */, \
- VG_USERREQ__COUNT_ERRORS, \
- 0, 0, 0, 0, 0)
-
-/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
- when heap blocks are allocated in order to give accurate results. This
- happens automatically for the standard allocator functions such as
- malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
- delete[], etc.
-
- But if your program uses a custom allocator, this doesn't automatically
- happen, and Valgrind will not do as well. For example, if you allocate
- superblocks with mmap() and then allocates chunks of the superblocks, all
- Valgrind's observations will be at the mmap() level and it won't know that
- the chunks should be considered separate entities. In Memcheck's case,
- that means you probably won't get heap block overrun detection (because
- there won't be redzones marked as unaddressable) and you definitely won't
- get any leak detection.
-
- The following client requests allow a custom allocator to be annotated so
- that it can be handled accurately by Valgrind.
-
- VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
- by a malloc()-like function. For Memcheck (an illustrative case), this
- does two things:
-
- - It records that the block has been allocated. This means any addresses
- within the block mentioned in error messages will be
- identified as belonging to the block. It also means that if the block
- isn't freed it will be detected by the leak checker.
-
- - It marks the block as being addressable and undefined (if 'is_zeroed' is
- not set), or addressable and defined (if 'is_zeroed' is set). This
- controls how accesses to the block by the program are handled.
-
- 'addr' is the start of the usable block (ie. after any
- redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
- can apply redzones -- these are blocks of padding at the start and end of
- each block. Adding redzones is recommended as it makes it much more likely
- Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
- zeroed (or filled with another predictable value), as is the case for
- calloc().
-
- VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
- heap block -- that will be used by the client program -- is allocated.
- It's best to put it at the outermost level of the allocator if possible;
- for example, if you have a function my_alloc() which calls
- internal_alloc(), and the client request is put inside internal_alloc(),
- stack traces relating to the heap block will contain entries for both
- my_alloc() and internal_alloc(), which is probably not what you want.
-
- For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
- custom blocks from within a heap block, B, that has been allocated with
- malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
- -- the custom blocks will take precedence.
-
- VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
- Memcheck, it does two things:
-
- - It records that the block has been deallocated. This assumes that the
- block was annotated as having been allocated via
- VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
-
- - It marks the block as being unaddressable.
-
- VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
- heap block is deallocated.
-
- VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
- Memcheck, it does four things:
-
- - It records that the size of a block has been changed. This assumes that
- the block was annotated as having been allocated via
- VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
-
- - If the block shrunk, it marks the freed memory as being unaddressable.
-
- - If the block grew, it marks the new area as undefined and defines a red
- zone past the end of the new block.
-
- - The V-bits of the overlap between the old and the new block are preserved.
-
- VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
- and before deallocation of the old block.
-
- In many cases, these three client requests will not be enough to get your
- allocator working well with Memcheck. More specifically, if your allocator
- writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
- will be necessary to mark the memory as addressable just before the zeroing
- occurs, otherwise you'll get a lot of invalid write errors. For example,
- you'll need to do this if your allocator recycles freed blocks, but it
- zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
- Alternatively, if your allocator reuses freed blocks for allocator-internal
- data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
-
- Really, what's happening is a blurring of the lines between the client
- program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
- memory should be considered unaddressable to the client program, but the
- allocator knows more than the rest of the client program and so may be able
- to safely access it. Extra client requests are necessary for Valgrind to
- understand the distinction between the allocator and the rest of the
- program.
-
- Ignored if addr == 0.
-*/
-#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \
- addr, sizeB, rzB, is_zeroed, 0)
-
-/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
- Ignored if addr == 0.
-*/
-#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \
- addr, oldSizeB, newSizeB, rzB, 0)
-
-/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
- Ignored if addr == 0.
-*/
-#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \
- addr, rzB, 0, 0, 0)
-
-/* Create a memory pool. */
-#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
- pool, rzB, is_zeroed, 0, 0)
-
-/* Destroy a memory pool. */
-#define VALGRIND_DESTROY_MEMPOOL(pool) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \
- pool, 0, 0, 0, 0)
-
-/* Associate a piece of memory with a memory pool. */
-#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \
- pool, addr, size, 0, 0)
-
-/* Disassociate a piece of memory from a memory pool. */
-#define VALGRIND_MEMPOOL_FREE(pool, addr) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \
- pool, addr, 0, 0, 0)
-
-/* Disassociate any pieces outside a particular range. */
-#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \
- pool, addr, size, 0, 0)
-
-/* Resize and/or move a piece associated with a memory pool. */
-#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \
- poolA, poolB, 0, 0, 0)
-
-/* Resize and/or move a piece associated with a memory pool. */
-#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \
- pool, addrA, addrB, size, 0)
-
-/* Return 1 if a mempool exists, else 0. */
-#define VALGRIND_MEMPOOL_EXISTS(pool) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MEMPOOL_EXISTS, \
- pool, 0, 0, 0, 0)
-
-/* Mark a piece of memory as being a stack. Returns a stack id. */
-#define VALGRIND_STACK_REGISTER(start, end) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__STACK_REGISTER, \
- start, end, 0, 0, 0)
-
-/* Unmark the piece of memory associated with a stack id as being a
- stack. */
-#define VALGRIND_STACK_DEREGISTER(id) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \
- id, 0, 0, 0, 0)
-
-/* Change the start and end address of the stack id. */
-#define VALGRIND_STACK_CHANGE(id, start, end) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \
- id, start, end, 0, 0)
-
-/* Load PDB debug info for Wine PE image_map. */
-#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \
- fd, ptr, total_size, delta, 0)
-
-/* Map a code address to a source file name and line number. buf64
- must point to a 64-byte buffer in the caller's address space. The
- result will be dumped in there and is guaranteed to be zero
- terminated. If no info is found, the first byte is set to zero. */
-#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MAP_IP_TO_SRCLOC, \
- addr, buf64, 0, 0, 0)
-
-/* Disable error reporting for this thread. Behaves in a stack like
- way, so you can safely call this multiple times provided that
- VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times
- to re-enable reporting. The first call of this macro disables
- reporting. Subsequent calls have no effect except to increase the
- number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable
- reporting. Child threads do not inherit this setting from their
- parents -- they are always created with reporting enabled. */
-#define VALGRIND_DISABLE_ERROR_REPORTING \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
- 1, 0, 0, 0, 0)
-
-/* Re-enable error reporting, as per comments on
- VALGRIND_DISABLE_ERROR_REPORTING. */
-#define VALGRIND_ENABLE_ERROR_REPORTING \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
- -1, 0, 0, 0, 0)
-
-/* Execute a monitor command from the client program.
- If a connection is opened with GDB, the output will be sent
- according to the output mode set for vgdb.
- If no connection is opened, output will go to the log output.
- Returns 1 if command not recognised, 0 otherwise. */
-#define VALGRIND_MONITOR_COMMAND(command) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \
- command, 0, 0, 0, 0)
-
-
-#undef PLAT_x86_darwin
-#undef PLAT_amd64_darwin
-#undef PLAT_x86_win32
-#undef PLAT_amd64_win64
-#undef PLAT_x86_linux
-#undef PLAT_amd64_linux
-#undef PLAT_ppc32_linux
-#undef PLAT_ppc64_linux
-#undef PLAT_arm_linux
-#undef PLAT_s390x_linux
-#undef PLAT_mips32_linux
-#undef PLAT_mips64_linux
-
-#endif /* __VALGRIND_H */
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
--- /dev/null
+Copyright (c) 2015 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
installs/uninstalls.
* Fix argument handling for spaces.
* Add --bindir.
+
+# License
+
+This software is distributed under the terms of both the MIT license
+and/or the Apache License (Version 2.0), at your option.
+
+See [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT) for details.
_ostype=unknown-bitrig
;;
+ NetBSD)
+ _ostype=unknown-netbsd
+ ;;
+
OpenBSD)
_ostype=unknown-openbsd
;;
verbose_msg "removing manifest directory $_md"
run rm -r "$_md"
want_ok "failed to remove $_md"
+
+ maybe_unconfigure_ld
fi
fi
done
}
-maybe_run_ldconfig() {
+maybe_configure_ld() {
+ local _abs_libdir="$1"
+
get_host_triple
local _ostype="$RETVAL"
assert_nz "$_ostype" "ostype"
if [ "$_ostype" = "unknown-linux-gnu" -a ! -n "${CFG_DISABLE_LDCONFIG-}" ]; then
+
+ # Fedora-based systems do not configure the dynamic linker to look
+ # /usr/local/lib, which is our default installation directory. To
+ # make things just work, try to put that directory in
+ # /etc/ld.so.conf.d/rust-installer-v1 so ldconfig picks it up.
+ # Issue #30.
+ #
+ # This will get rm'd when the last component is uninstalled in
+ # maybe_unconfigure_ld.
+ if [ "$_abs_libdir" = "/usr/local/lib" -a -d "/etc/ld.so.conf.d" ]; then
+ echo "$_abs_libdir" > "/etc/ld.so.conf.d/rust-installer-v1-$TEMPLATE_REL_MANIFEST_DIR.conf"
+ if [ $? -ne 0 ]; then
+ # This shouldn't happen if we've gotten this far
+ # installing to /usr/local
+ warn "failed to update /etc/ld.so.conf.d. this is unexpected"
+ fi
+ fi
+
verbose_msg "running ldconfig"
if [ -n "${CFG_VERBOSE-}" ]; then
ldconfig
fi
}
+maybe_unconfigure_ld() {
+ get_host_triple
+ local _ostype="$RETVAL"
+ assert_nz "$_ostype" "ostype"
+
+ if [ "$_ostype" != "unknown-linux-gnu" ]; then
+ return 0
+ fi
+
+ rm "/etc/ld.so.conf.d/rust-installer-v1-$TEMPLATE_REL_MANIFEST_DIR.conf" 2> /dev/null
+ # Above may fail since that file may not have been created on install
+}
+
# Doing our own 'install'-like backup that is consistent across platforms
maybe_backup_path() {
local _file_install_path="$1"
# Install each component
install_components "$src_dir" "$abs_libdir" "$dest_prefix" "$components"
-# Run ldconfig to make dynamic libraries available to the linker
-maybe_run_ldconfig
+# Make dynamic libraries available to the linker
+maybe_configure_ld "$abs_libdir"
echo
echo " $TEMPLATE_SUCCESS_MESSAGE"
use error::{err, CliResult, CommandResult};
use book;
use book::{Book, BookItem};
-use css;
+
use javascript;
use rustdoc;
}
try!(fs::create_dir(&tgt));
- try!(File::create(&tgt.join("rust-book.css")).and_then(|mut f| {
- f.write_all(css::STYLE.as_bytes())
- }));
+ // Copy static files
+ let css = include_bytes!("static/rustbook.css");
+ let js = include_bytes!("static/rustbook.js");
+
+ let mut css_file = try!(File::create(tgt.join("rust-book.css")));
+ try!(css_file.write_all(css));
+
+ let mut js_file = try!(File::create(tgt.join("rust-book.js")));
+ try!(js_file.write_all(js));
+
let mut summary = try!(File::open(&src.join("SUMMARY.md")));
match book::parse_summary(&mut summary, &src) {
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// The rust-book CSS in string form.
-
-pub static STYLE: &'static str = r#"
-@import url("../rust.css");
-
-body {
- max-width:none;
-}
-
-@media only screen {
- #toc {
- position: absolute;
- left: 0px;
- top: 0px;
- bottom: 0px;
- width: 250px;
- overflow-y: auto;
- border-right: 1px solid rgba(0, 0, 0, 0.07);
- padding: 10px 10px;
- font-size: 16px;
- background: none repeat scroll 0% 0% #FFF;
- box-sizing: border-box;
- -webkit-overflow-scrolling: touch;
- }
-
- #page-wrapper {
- position: absolute;
- overflow-y: auto;
- left: 260px;
- right: 0px;
- top: 0px;
- bottom: 0px;
- box-sizing: border-box;
- background: none repeat scroll 0% 0% #FFF;
- -webkit-overflow-scrolling: touch;
- }
-}
-
-@media only print {
- #toc, #nav {
- display: none;
- }
-}
-
-@media only screen and (max-width: 1060px) {
- #toc {
- width: 100%;
- margin-right: 0;
- top: 40px;
- }
- #page-wrapper {
- top: 40px;
- left: 15px;
- padding-right: 15px;
- }
- .mobile-hidden {
- display: none;
- }
-}
-
-#page {
- margin-left: auto;
- margin-right:auto;
- max-width: 750px;
- padding-bottom: 50px;
-}
-
-.chapter {
- list-style: none outside none;
- padding-left: 0px;
- line-height: 30px;
-}
-
-.section {
- list-style: none outside none;
- padding-left: 20px;
- line-height: 30px;
-}
-
-.section li {
- text-overflow: ellipsis;
- overflow: hidden;
- white-space: nowrap;
-}
-
-.chapter li a {
- color: #000000;
-}
-
-.chapter li a.active {
- text-decoration: underline;
- font-weight: bold;
-}
-
-#toggle-nav {
- height: 20px;
- width: 30px;
- padding: 3px 3px 0 3px;
-}
-
-#toggle-nav {
- margin-top: 5px;
- width: 30px;
- height: 30px;
- background-color: #FFF;
- border: 1px solid #666;
- border-radius: 3px 3px 3px 3px;
-}
-
-.sr-only {
- position: absolute;
- width: 1px;
- height: 1px;
- margin: -1px;
- padding: 0;
- overflow: hidden;
- clip: rect(0, 0, 0, 0);
- border: 0;
-}
-
-.bar {
- display: block;
- background-color: #000;
- border-radius: 2px;
- width: 100%;
- height: 2px;
- margin: 2px 0 3px;
- padding: 0;
-}
-
-.left {
- float: left;
-}
-
-.right {
- float: right;
-}
-"#;
// The rust-book JavaScript in string form.
pub static JAVASCRIPT: &'static str = r#"
-<script type="text/javascript">
-document.addEventListener("DOMContentLoaded", function(event) {
- document.getElementById("toggle-nav").onclick = toggleNav;
- function toggleNav() {
- var toc = document.getElementById("toc");
- var pagewrapper = document.getElementById("page-wrapper");
- toggleClass(toc, "mobile-hidden");
- toggleClass(pagewrapper, "mobile-hidden");
- };
-
- function toggleClass(el, className) {
- // from http://youmightnotneedjquery.com/
- if (el.classList) {
- el.classList.toggle(className);
- } else {
- var classes = el.className.split(' ');
- var existingIndex = classes.indexOf(className);
-
- if (existingIndex >= 0) {
- classes.splice(existingIndex, 1);
- } else {
- classes.push(className);
- }
-
- el.className = classes.join(' ');
- }
- }
-
- // The below code is used to add prev and next navigation links to the bottom
- // of each of the sections.
- // It works by extracting the current page based on the url and iterates over
- // the menu links until it finds the menu item for the current page. We then
- // create a copy of the preceding and following menu links and add the
- // correct css class and insert them into the bottom of the page.
- var toc = document.getElementById('toc').getElementsByTagName('a');
- var href = document.location.pathname.split('/').pop();
- if (href === 'index.html' || href === '') {
- href = 'README.html';
- }
-
- for (var i = 0; i < toc.length; i++) {
- if (toc[i].attributes['href'].value.split('/').pop() === href) {
- var nav = document.createElement('p');
- if (i > 0) {
- var prevNode = toc[i-1].cloneNode(true);
- prevNode.className = 'left';
- nav.appendChild(prevNode);
- }
- if (i < toc.length - 1) {
- var nextNode = toc[i+1].cloneNode(true);
- nextNode.className = 'right';
- nav.appendChild(nextNode);
- }
- document.getElementById('page').appendChild(nav);
- break;
- }
- }
-
-});
-</script>
+<script type="text/javascript" src="rust-book.js"></script>
<script type="text/javascript" src="playpen.js"></script>
"#;
mod serve;
mod test;
-mod css;
mod javascript;
static EXIT_STATUS: AtomicIsize = ATOMIC_ISIZE_INIT;
--- /dev/null
+/**
+ * Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+ * file at the top-level directory of this distribution and at
+ * http://rust-lang.org/COPYRIGHT.
+ *
+ * Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+ * http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+ * <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+ * option. This file may not be copied, modified, or distributed
+ * except according to those terms.
+ */
+
+@import url("../rust.css");
+
+body {
+ max-width:none;
+ font: 16px/1.4 'Source Serif Pro', Georgia, Times, 'Times New Roman', serif;
+ line-height: 1.6;
+ color: #333;
+}
+
+h1, h2, h3, h4, h5, h6 {
+ font-family: 'Open Sans', 'Fira Sans', 'Helvetica Neue', Helvetica, Arial, sans-serif;
+ font-weight: bold;
+ color: #333;
+}
+
+@media only screen {
+ #toc {
+ position: absolute;
+ left: 0px;
+ top: 0px;
+ bottom: 0px;
+ width: 300px;
+ overflow-y: auto;
+ border-right: 1px solid rgba(0, 0, 0, 0.07);
+ padding: 10px 10px;
+ font-size: 14px;
+ box-sizing: border-box;
+ -webkit-overflow-scrolling: touch;
+ background-color: #fafafa;
+ color: #364149;
+ }
+
+ #page-wrapper {
+ position: absolute;
+ overflow-y: auto;
+ left: 310px;
+ right: 0px;
+ top: 0px;
+ bottom: 0px;
+ box-sizing: border-box;
+ background: none repeat scroll 0% 0% #FFF;
+ -webkit-overflow-scrolling: touch;
+ }
+}
+
+@media only print {
+ #toc, #nav, #menu-bar {
+ display: none;
+ }
+}
+
+@media only screen and (max-width: 1060px) {
+ #toc {
+ width: 100%;
+ margin-right: 0;
+ top: 40px;
+ }
+ #page-wrapper {
+ top: 40px;
+ left: 15px;
+ padding-right: 15px;
+ }
+ .mobile-hidden {
+ display: none;
+ }
+}
+
+#page {
+ margin-left: auto;
+ margin-right:auto;
+ max-width: 750px;
+ padding-bottom: 50px;
+}
+
+.chapter {
+ list-style: none outside none;
+ padding-left: 0px;
+ line-height: 30px;
+}
+
+.section {
+ list-style: none outside none;
+ padding-left: 20px;
+ line-height: 40px;
+}
+
+.section li {
+ text-overflow: ellipsis;
+ overflow: hidden;
+ white-space: nowrap;
+}
+
+.chapter li a {
+ color: #333;
+ padding: 5px 0;
+}
+
+.chapter li a.active {
+ color: #008cff;
+}
+
+.chapter li a:hover {
+ color: #008cff;
+ text-decoration: none;
+}
+
+#toggle-nav {
+ height: 20px;
+ width: 30px;
+ padding: 3px 3px 0 3px;
+}
+
+#toggle-nav {
+ margin-top: 5px;
+ width: 30px;
+ height: 30px;
+ background-color: #FFF;
+ border: 1px solid #666;
+ border-radius: 3px 3px 3px 3px;
+}
+
+.sr-only {
+ position: absolute;
+ width: 1px;
+ height: 1px;
+ margin: -1px;
+ padding: 0;
+ overflow: hidden;
+ clip: rect(0, 0, 0, 0);
+ border: 0;
+}
+
+.bar {
+ display: block;
+ background-color: #000;
+ border-radius: 2px;
+ width: 100%;
+ height: 2px;
+ margin: 2px 0 3px;
+ padding: 0;
+}
+
+pre {
+ padding: 16px;
+ overflow: auto;
+ font-size: 85%;
+ line-height: 1.45;
+ background-color: #f7f7f7;
+ border: 0;
+ border-radius: 3px;
+}
+
+.nav-previous-next {
+ margin-top: 60px;
+}
+
+.left {
+ float: left;
+}
+
+.right {
+ float: right;
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+document.addEventListener("DOMContentLoaded", function(event) {
+
+ document.getElementById("toggle-nav").onclick = toggleNav;
+
+ function toggleNav() {
+ var toc = document.getElementById("toc");
+ var pagewrapper = document.getElementById("page-wrapper");
+ toggleClass(toc, "mobile-hidden");
+ toggleClass(pagewrapper, "mobile-hidden");
+ }
+
+ function toggleClass(el, className) {
+ // from http://youmightnotneedjquery.com/
+ if (el.classList) {
+ el.classList.toggle(className);
+ } else {
+ var classes = el.className.split(' ');
+ var existingIndex = classes.indexOf(className);
+
+ if (existingIndex >= 0) {
+ classes.splice(existingIndex, 1);
+ } else {
+ classes.push(className);
+ }
+
+ el.className = classes.join(' ');
+ }
+ }
+
+ // The below code is used to add prev and next navigation links to the bottom
+ // of each of the sections.
+ // It works by extracting the current page based on the url and iterates over
+ // the menu links until it finds the menu item for the current page. We then
+ // create a copy of the preceding and following menu links and add the
+ // correct css class and insert them into the bottom of the page.
+ var toc = document.getElementById('toc').getElementsByTagName('a');
+ var href = document.location.pathname.split('/').pop();
+ if (href === 'index.html' || href === '') {
+ href = 'README.html';
+ }
+
+ for (var i = 0; i < toc.length; i++) {
+ if (toc[i].attributes.href.value.split('/').pop() === href) {
+ var nav = document.createElement('p');
+ if (i > 0) {
+ var prevNode = toc[i-1].cloneNode(true);
+ prevNode.className = 'left';
+ prevNode.setAttribute('rel', 'prev');
+ nav.appendChild(prevNode);
+ }
+ if (i < toc.length - 1) {
+ var nextNode = toc[i+1].cloneNode(true);
+ nextNode.className = 'right';
+ nextNode.setAttribute('rel', 'next');
+ nav.appendChild(nextNode);
+ }
+ document.getElementById('page').appendChild(nav);
+ break;
+ }
+ }
+
+});
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#include "rustllvm.h"
+
+#include "llvm/Object/Archive.h"
+
+#if LLVM_VERSION_MINOR >= 7
+#include "llvm/Object/ArchiveWriter.h"
+#endif
+
+using namespace llvm;
+using namespace llvm::object;
+
+struct LLVMRustArchiveMember {
+ const char *filename;
+ const char *name;
+ Archive::Child child;
+
+ LLVMRustArchiveMember(): filename(NULL), name(NULL), child(NULL, NULL) {}
+ ~LLVMRustArchiveMember() {}
+};
+
+#if LLVM_VERSION_MINOR >= 6
+typedef OwningBinary<Archive> RustArchive;
+#define GET_ARCHIVE(a) ((a)->getBinary())
+#else
+typedef Archive RustArchive;
+#define GET_ARCHIVE(a) (a)
+#endif
+
+extern "C" void*
+LLVMRustOpenArchive(char *path) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> buf_or = MemoryBuffer::getFile(path,
+ -1,
+ false);
+ if (!buf_or) {
+ LLVMRustSetLastError(buf_or.getError().message().c_str());
+ return nullptr;
+ }
+
+#if LLVM_VERSION_MINOR >= 6
+ ErrorOr<std::unique_ptr<Archive>> archive_or =
+ Archive::create(buf_or.get()->getMemBufferRef());
+
+ if (!archive_or) {
+ LLVMRustSetLastError(archive_or.getError().message().c_str());
+ return nullptr;
+ }
+
+ OwningBinary<Archive> *ret = new OwningBinary<Archive>(
+ std::move(archive_or.get()), std::move(buf_or.get()));
+#else
+ std::error_code err;
+ Archive *ret = new Archive(std::move(buf_or.get()), err);
+ if (err) {
+ LLVMRustSetLastError(err.message().c_str());
+ return nullptr;
+ }
+#endif
+
+ return ret;
+}
+
+extern "C" void
+LLVMRustDestroyArchive(RustArchive *ar) {
+ delete ar;
+}
+
+struct RustArchiveIterator {
+ Archive::child_iterator cur;
+ Archive::child_iterator end;
+};
+
+extern "C" RustArchiveIterator*
+LLVMRustArchiveIteratorNew(RustArchive *ra) {
+ Archive *ar = GET_ARCHIVE(ra);
+ RustArchiveIterator *rai = new RustArchiveIterator();
+ rai->cur = ar->child_begin();
+ rai->end = ar->child_end();
+ return rai;
+}
+
+extern "C" const Archive::Child*
+LLVMRustArchiveIteratorNext(RustArchiveIterator *rai) {
+ if (rai->cur == rai->end)
+ return NULL;
+ const Archive::Child *cur = rai->cur.operator->();
+ Archive::Child *ret = new Archive::Child(*cur);
+ ++rai->cur;
+ return ret;
+}
+
+extern "C" void
+LLVMRustArchiveChildFree(Archive::Child *child) {
+ delete child;
+}
+
+extern "C" void
+LLVMRustArchiveIteratorFree(RustArchiveIterator *rai) {
+ delete rai;
+}
+
+extern "C" const char*
+LLVMRustArchiveChildName(const Archive::Child *child, size_t *size) {
+ ErrorOr<StringRef> name_or_err = child->getName();
+ if (name_or_err.getError())
+ return NULL;
+ StringRef name = name_or_err.get();
+ *size = name.size();
+ return name.data();
+}
+
+extern "C" const char*
+LLVMRustArchiveChildData(Archive::Child *child, size_t *size) {
+ StringRef buf;
+#if LLVM_VERSION_MINOR >= 7
+ ErrorOr<StringRef> buf_or_err = child->getBuffer();
+ if (buf_or_err.getError()) {
+ LLVMRustSetLastError(buf_or_err.getError().message().c_str());
+ return NULL;
+ }
+ buf = buf_or_err.get();
+#else
+ buf = child->getBuffer();
+#endif
+ *size = buf.size();
+ return buf.data();
+}
+
+extern "C" LLVMRustArchiveMember*
+LLVMRustArchiveMemberNew(char *Filename, char *Name, Archive::Child *child) {
+ LLVMRustArchiveMember *Member = new LLVMRustArchiveMember;
+ Member->filename = Filename;
+ Member->name = Name;
+ if (child)
+ Member->child = *child;
+ return Member;
+}
+
+extern "C" void
+LLVMRustArchiveMemberFree(LLVMRustArchiveMember *Member) {
+ delete Member;
+}
+
+extern "C" int
+LLVMRustWriteArchive(char *Dst,
+ size_t NumMembers,
+ const LLVMRustArchiveMember **NewMembers,
+ bool WriteSymbtab,
+ Archive::Kind Kind) {
+#if LLVM_VERSION_MINOR >= 7
+ std::vector<NewArchiveIterator> Members;
+
+ for (size_t i = 0; i < NumMembers; i++) {
+ auto Member = NewMembers[i];
+ assert(Member->name);
+ if (Member->filename) {
+ Members.push_back(NewArchiveIterator(Member->filename, Member->name));
+ } else {
+ Members.push_back(NewArchiveIterator(Member->child, Member->name));
+ }
+ }
+ auto pair = writeArchive(Dst, Members, WriteSymbtab, Kind, true);
+ if (!pair.second)
+ return 0;
+ LLVMRustSetLastError(pair.second.message().c_str());
+#else
+ LLVMRustSetLastError("writing archives not supported with this LLVM version");
+#endif
+ return -1;
+}
#else
#include "llvm/Target/TargetLibraryInfo.h"
#endif
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
}
}
}
+
+extern "C" void
+LLVMRustSetDataLayoutFromTargetMachine(LLVMModuleRef Module,
+ LLVMTargetMachineRef TMR) {
+ TargetMachine *Target = unwrap(TMR);
+#if LLVM_VERSION_MINOR >= 7
+ if (const DataLayout *DL = Target->getDataLayout())
+ unwrap(Module)->setDataLayout(*DL);
+#elif LLVM_VERSION_MINOR >= 6
+ if (const DataLayout *DL = Target->getSubtargetImpl()->getDataLayout())
+ unwrap(Module)->setDataLayout(DL);
+#else
+ if (const DataLayout *DL = Target->getDataLayout())
+ unwrap(Module)->setDataLayout(DL);
+#endif
+}
+
+extern "C" LLVMTargetDataRef
+LLVMRustGetModuleDataLayout(LLVMModuleRef M) {
+#if LLVM_VERSION_MINOR >= 7
+ return wrap(&unwrap(M)->getDataLayout());
+#else
+ return wrap(unwrap(M)->getDataLayout());
+#endif
+}
idx, B)));
}
-extern "C" void LLVMAddFunctionAttribute(LLVMValueRef Fn, unsigned index, uint64_t Val) {
+extern "C" void LLVMAddFunctionAttribute(LLVMValueRef Fn, unsigned index,
+ uint64_t Val) {
Function *A = unwrap<Function>(Fn);
AttrBuilder B;
B.addRawValue(Val);
return true;
}
-extern "C" void*
-LLVMRustOpenArchive(char *path) {
- ErrorOr<std::unique_ptr<MemoryBuffer>> buf_or = MemoryBuffer::getFile(path,
- -1,
- false);
- if (!buf_or) {
- LLVMRustSetLastError(buf_or.getError().message().c_str());
- return nullptr;
- }
-
-#if LLVM_VERSION_MINOR >= 6
- ErrorOr<std::unique_ptr<Archive>> archive_or =
- Archive::create(buf_or.get()->getMemBufferRef());
-
- if (!archive_or) {
- LLVMRustSetLastError(archive_or.getError().message().c_str());
- return nullptr;
- }
-
- OwningBinary<Archive> *ret = new OwningBinary<Archive>(
- std::move(archive_or.get()), std::move(buf_or.get()));
-#else
- std::error_code err;
- Archive *ret = new Archive(std::move(buf_or.get()), err);
- if (err) {
- LLVMRustSetLastError(err.message().c_str());
- return nullptr;
- }
-#endif
-
- return ret;
-}
-
-#if LLVM_VERSION_MINOR >= 6
-typedef OwningBinary<Archive> RustArchive;
-#define GET_ARCHIVE(a) ((a)->getBinary())
-#else
-typedef Archive RustArchive;
-#define GET_ARCHIVE(a) (a)
-#endif
-
-extern "C" void
-LLVMRustDestroyArchive(RustArchive *ar) {
- delete ar;
-}
-
-struct RustArchiveIterator {
- Archive::child_iterator cur;
- Archive::child_iterator end;
-};
-
-extern "C" RustArchiveIterator*
-LLVMRustArchiveIteratorNew(RustArchive *ra) {
- Archive *ar = GET_ARCHIVE(ra);
- RustArchiveIterator *rai = new RustArchiveIterator();
- rai->cur = ar->child_begin();
- rai->end = ar->child_end();
- return rai;
-}
-
-extern "C" const Archive::Child*
-LLVMRustArchiveIteratorCurrent(RustArchiveIterator *rai) {
- if (rai->cur == rai->end)
- return NULL;
-#if LLVM_VERSION_MINOR >= 6
- const Archive::Child &ret = *rai->cur;
- return &ret;
-#else
- return rai->cur.operator->();
-#endif
-}
-
-extern "C" void
-LLVMRustArchiveIteratorNext(RustArchiveIterator *rai) {
- if (rai->cur == rai->end)
- return;
- ++rai->cur;
-}
-
-extern "C" void
-LLVMRustArchiveIteratorFree(RustArchiveIterator *rai) {
- delete rai;
-}
-
-extern "C" const char*
-LLVMRustArchiveChildName(const Archive::Child *child, size_t *size) {
- ErrorOr<StringRef> name_or_err = child->getName();
- if (name_or_err.getError())
- return NULL;
- StringRef name = name_or_err.get();
- *size = name.size();
- return name.data();
-}
-
-extern "C" const char*
-LLVMRustArchiveChildData(Archive::Child *child, size_t *size) {
- StringRef buf = child->getBuffer();
- *size = buf.size();
- return buf.data();
-}
-
extern "C" void
LLVMRustSetDLLStorageClass(LLVMValueRef Value,
GlobalValue::DLLStorageClassTypes Class) {
raw_rust_string_ostream os(str);
unwrap(d)->print("", os);
}
+
+extern "C" LLVMValueRef
+LLVMRustBuildLandingPad(LLVMBuilderRef Builder,
+ LLVMTypeRef Ty,
+ LLVMValueRef PersFn,
+ unsigned NumClauses,
+ const char* Name,
+ LLVMValueRef F) {
+#if LLVM_VERSION_MINOR >= 7
+ unwrap<Function>(F)->setPersonalityFn(unwrap<Constant>(PersFn));
+ return LLVMBuildLandingPad(Builder, Ty, NumClauses, Name);
+#else
+ return LLVMBuildLandingPad(Builder, Ty, PersFn, NumClauses, Name);
+#endif
+}
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
-2015-06-16
+2015-06-30
+S 2015-07-26 a5c12f4
+ bitrig-x86_64 8734eb41ffbe6ddc1120aa2910db4162ec9cf270
+ freebsd-i386 2fee22adec101e2f952a5548fd1437ce1bd8d26f
+ freebsd-x86_64 bc50b0f8d7f6d62f4f5ffa136f5387f5bf6524fd
+ linux-i386 3459275cdf3896f678e225843fa56f0d9fdbabe8
+ linux-x86_64 e451e3bd6e5fcef71e41ae6f3da9fb1cf0e13a0c
+ macos-i386 428944a7984c0988e77909d82ca2ef77d96a1fbd
+ macos-x86_64 b0515bb7d2892b9a58282fc865fee11a885406d6
+ winnt-i386 22286e815372c3e03729853af48a2f6d538ed086
+ winnt-x86_64 f13aa3c02a15f8e794b9e180487bdf04378f8f04
+
+S 2015-07-17 d4432b3
+ bitrig-x86_64 af77768e0eb0f4c7ec5a8e36047a08053b54b230
+ freebsd-i386 b049325e5b2efe5f4884f3dafda448c1dac49b4f
+ freebsd-x86_64 a59e397188dbfe67456a6301df5ca13c7e238ab9
+ linux-i386 93f6216a35d3bed3cedf244c9aff4cd716336bd9
+ linux-x86_64 d8f4967fc71a153c925faecf95a7feadf7e463a4
+ macos-i386 29852c4d4b5a851f16d627856a279cae5bf9bd01
+ macos-x86_64 1a20259899321062a0325edb1d22990f05d18708
+ winnt-i386 df50210f41db9a6f2968be5773b8e3bae32bb823
+ winnt-x86_64 d7774b724988485652781a804bdf8e05d28ead48
+
S 2015-05-24 ba0e1cd
bitrig-x86_64 2a710e16e3e3ef3760df1f724d66b3af34c1ef3f
freebsd-x86_64 370db40613f5c08563ed7e38357826dd42d4e0f8
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "lib"]
+#![crate_name = "default_param_test"]
+
+use std::marker::PhantomData;
+
+pub struct Foo<A, B>(PhantomData<(A, B)>);
+
+pub fn bleh<A=i32, X=char>() -> Foo<A, X> { Foo(PhantomData) }
+
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![crate_type = "rlib"]
+
+pub fn foo() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate issue_14344_1;
+
+pub fn bar() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![crate_type = "rlib"]
+
+#[link(name = "rust_test_helpers", kind = "static")]
+extern {
+ pub fn rust_dbg_extern_identity_u32(u: u32) -> u32;
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate issue_25185_1;
+
+pub use issue_25185_1::rust_dbg_extern_identity_u32;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(no_std)]
+#![feature(no_std, core, libc)]
#![no_std]
#![feature(lang_items)]
-#![feature(associated_type_defaults)]
-#[lang="sized"]
-pub trait Sized { }
-
-#[lang="panic"]
-fn panic(_: &(&'static str, &'static str, usize)) -> ! { loop {} }
+extern crate core;
+extern crate libc;
#[lang = "stack_exhausted"]
extern fn stack_exhausted() {}
#[lang = "eh_personality"]
extern fn eh_personality() {}
-#[lang="copy"]
-pub trait Copy {
- // Empty.
-}
-
-#[lang="rem"]
-pub trait Rem<RHS=Self> {
- type Output = Self;
- fn rem(self, rhs: RHS) -> Self::Output;
-}
-
-impl Rem for isize {
- type Output = isize;
+#[lang = "eh_unwind_resume"]
+extern fn eh_unwind_resume() {}
- #[inline]
- fn rem(self, other: isize) -> isize {
- // if you use `self % other` here, as one would expect, you
- // get back an error because of potential failure/overflow,
- // which tries to invoke error fns that don't have the
- // appropriate signatures anymore. So...just return 0.
- 0
- }
+#[lang = "panic_fmt"]
+extern fn rust_begin_unwind(msg: core::fmt::Arguments, file: &'static str,
+ line: u32) -> ! {
+ loop {}
}
extern crate rustc;
use syntax::ast;
-use syntax::parse::token;
use rustc::lint::{Context, LintPass, LintPassObject, LintArray};
use rustc::plugin::Registry;
}
fn check_item(&mut self, cx: &Context, it: &ast::Item) {
- let name = token::get_ident(it.ident);
- if &name[..] == "lintme" {
- cx.span_lint(TEST_LINT, it.span, "item is named 'lintme'");
- } else if &name[..] == "pleaselintme" {
- cx.span_lint(PLEASE_LINT, it.span, "item is named 'pleaselintme'");
+ match &*it.ident.name.as_str() {
+ "lintme" => cx.span_lint(TEST_LINT, it.span, "item is named 'lintme'"),
+ "pleaselintme" => cx.span_lint(PLEASE_LINT, it.span, "item is named 'pleaselintme'"),
+ _ => {}
}
}
}
extern crate rustc;
use syntax::ast;
-use syntax::parse::token;
use rustc::lint::{Context, LintPass, LintPassObject, LintArray};
use rustc::plugin::Registry;
}
fn check_item(&mut self, cx: &Context, it: &ast::Item) {
- let name = token::get_ident(it.ident);
- if &name[..] == "lintme" {
+ if it.ident.name == "lintme" {
cx.span_lint(TEST_LINT, it.span, "item is named 'lintme'");
}
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![crate_type = "rlib"]
+
+pub fn foo() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// force-host
+
+#![feature(plugin_registrar)]
+#![feature(rustc_private)]
+
+extern crate rustc;
+
+use rustc::plugin::Registry;
+
+#[plugin_registrar]
+pub fn plugin_registrar(_reg: &mut Registry) {}
sp: Span,
_: &[ast::TokenTree]) -> Box<MacResult+'cx> {
let args = self.args.iter().map(|i| pprust::meta_item_to_string(&*i))
- .collect::<Vec<_>>().connect(", ");
+ .collect::<Vec<_>>().join(", ");
let interned = token::intern_and_get_ident(&args[..]);
MacEager::expr(ecx.expr_str(sp, interned))
}
extern crate rustc;
use syntax::codemap::Span;
-use syntax::parse::token;
use syntax::ast::{TokenTree, TtToken};
+use syntax::parse::token;
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager};
use syntax::ext::build::AstBuilder; // trait for expr_usize
use rustc::plugin::Registry;
("I", 1)];
let text = match args {
- [TtToken(_, token::Ident(s, _))] => token::get_ident(s).to_string(),
+ [TtToken(_, token::Ident(s, _))] => s.to_string(),
_ => {
cx.span_err(sp, "argument should be a single identifier");
return DummyResult::any(sp);
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(optin_builtin_traits)]
+
+pub trait AnOibit {}
+
+impl AnOibit for .. {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(staged_api)]
+#![staged_api]
+#![stable(feature = "foo", since = "1.2.0")]
+
+
+#[unstable(feature = "foo", issue = "1")]
+pub fn unstable() {}
+
+#[unstable(feature = "foo", reason = "message", issue = "2")]
+pub fn unstable_msg() {}
use std::time::Duration;
fn timed<F>(label: &str, f: F) where F: FnMut() {
- println!(" {}: {}", label, Duration::span(f));
+ println!(" {}: {:?}", label, Duration::span(f));
}
trait MutableMap {
}
fn write_row(label: &str, value: Duration) {
- println!("{:30} {} s\n", label, value);
+ println!("{:30} {:?} s\n", label, value);
}
fn write_results(label: &str, results: &Results) {
#![feature(rand, vec_push_all, duration, duration_span)]
-use std::iter::repeat;
use std::mem::swap;
use std::env;
use std::__rand::{thread_rng, Rng};
let dur = Duration::span(test);
- println!("{}:\t\t{}", name, dur);
+ println!("{}:\t\t{:?}", name, dur);
}
fn shift_push() {
- let mut v1 = repeat(1).take(30000).collect::<Vec<_>>();
+ let mut v1 = vec![1; 30000];
let mut v2 = Vec::new();
while !v1.is_empty() {
let mut v = Vec::new();
let mut i = 0;
while i < 1500 {
- let rv = repeat(i).take(r.gen_range(0, i + 1)).collect::<Vec<_>>();
+ let rv = vec![i; r.gen_range(0, i + 1)];
if r.gen() {
v.extend(rv);
} else {
let mut v = Vec::new();
let mut i = 0;
while i < 1500 {
- let rv = repeat(i).take(r.gen_range(0, i + 1)).collect::<Vec<_>>();
+ let rv = vec![i; r.gen_range(0, i + 1)];
if r.gen() {
let mut t = v.clone();
t.push_all(&rv);
let mut v = Vec::new();
for i in 0..1500 {
- let mut rv = repeat(i).take(r.gen_range(0, i + 1)).collect::<Vec<_>>();
+ let mut rv = vec![i; r.gen_range(0, i + 1)];
if r.gen() {
v.push_all(&rv);
}
});
let result = result.unwrap();
print!("Count is {}\n", result);
- print!("Test took {}\n", dur);
- let thruput = ((size / workers * workers) as f64) / (dur.secs() as f64);
+ print!("Test took {:?}\n", dur);
+ let thruput = ((size / workers * workers) as f64) / (dur.as_secs() as f64);
print!("Throughput={} per sec\n", thruput);
assert_eq!(result, num_bytes * size);
}
});
let result = result.unwrap();
print!("Count is {}\n", result);
- print!("Test took {}\n", dur);
- let thruput = ((size / workers * workers) as f64) / (dur.secs() as f64);
+ print!("Test took {:?}\n", dur);
+ let thruput = ((size / workers * workers) as f64) / (dur.as_secs() as f64);
print!("Throughput={} per sec\n", thruput);
assert_eq!(result, num_bytes * size);
}
// all done, report stats.
let num_msgs = num_tasks * msg_per_task;
- let rate = (num_msgs as f64) / (dur.secs() as f64);
+ let rate = (num_msgs as f64) / (dur.as_secs() as f64);
- println!("Sent {} messages in {}", num_msgs, dur);
+ println!("Sent {} messages in {:?}", num_msgs, dur);
println!(" {} messages / second", rate);
println!(" {} μs / message", 1000000. / rate);
}
use std::env;
use std::io;
use std::io::prelude::*;
-use std::iter::repeat;
const LINE_LEN: usize = 60;
const LOOKUP_SIZE: usize = 4 * 1024;
fn make(&mut self, n: usize) -> io::Result<()> {
let alu_len = self.alu.len();
- let mut buf = repeat(0).take(alu_len + LINE_LEN).collect::<Vec<_>>();
+ let mut buf = vec![0; alu_len + LINE_LEN];
let alu: &[u8] = self.alu.as_bytes();
for (slot, val) in buf.iter_mut().zip(alu) {
#![feature(iter_cmp)]
-use std::iter::repeat;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread;
// Converts a list of mask to a Vec<u8>.
fn to_vec(raw_sol: &List<u64>) -> Vec<u8> {
- let mut sol = repeat('.' as u8).take(50).collect::<Vec<_>>();
+ let mut sol = vec![b'.'; 50];
for &m in raw_sol.iter() {
let id = '0' as u8 + get_id(m);
for i in 0..50 {
let dur = Duration::span(|| fibn = Some(fib(n)));
let fibn = fibn.unwrap();
- println!("{}\t{}\t{}", n, fibn, dur);
+ println!("{}\t{}\t{:?}", n, fibn, dur);
}
}
}
#![allow(non_snake_case)]
#![feature(unboxed_closures, iter_arith, core_simd, scoped)]
-use std::iter::repeat;
use std::thread;
use std::env;
use std::simd::f64x2;
fn spectralnorm(n: usize) -> f64 {
assert!(n % 2 == 0, "only even lengths are accepted");
- let mut u = repeat(1.0).take(n).collect::<Vec<_>>();
+ let mut u = vec![1.0; n];
let mut v = u.clone();
let mut tmp = v.clone();
for _ in 0..10 {
let maxf = max as f64;
- println!("insert(): {} seconds\n", checkf);
- println!(" : {} op/s\n", maxf / checkf.secs() as f64);
- println!("get() : {} seconds\n", appendf);
- println!(" : {} op/s\n", maxf / appendf.secs() as f64);
+ println!("insert(): {:?} seconds\n", checkf);
+ println!(" : {} op/s\n", maxf / checkf.as_secs() as f64);
+ println!("get() : {:?} seconds\n", appendf);
+ println!(" : {} op/s\n", maxf / appendf.as_secs() as f64);
}
use std::io::prelude::*;
use std::io;
-use std::iter::repeat;
use std::env;
// Computes a single solution to a given 9x9 sudoku
reader.read_line(&mut s).unwrap();
assert_eq!(s, "9,9\n");
- let mut g = repeat(vec![0, 0, 0, 0, 0, 0, 0, 0, 0])
- .take(10).collect::<Vec<_>>();
+ let mut g = vec![vec![0, 0, 0, 0, 0, 0, 0, 0, 0]; 10];
for line in reader.lines() {
let line = line.unwrap();
let comps: Vec<&str> = line
recurse_or_panic(depth, None)
}).join();
});
- println!("iter: {}", dur);
+ println!("iter: {:?}", dur);
}
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C no-prepopulate-passes
+
+// CHECK: @VAR1 = constant i32 1, section ".test_one"
+#[no_mangle]
+#[link_section = ".test_one"]
+pub static VAR1: u32 = 1;
+
+pub enum E {
+ A(u32),
+ B(f32)
+}
+
+// CHECK: @VAR2 = constant {{.*}} { i32 0, i32 666, {{.*}} }, section ".test_two"
+#[no_mangle]
+#[link_section = ".test_two"]
+pub static VAR2: E = E::A(666);
+
+// CHECK: @VAR3 = constant {{.*}} { i32 1, float 1.000000e+00, {{.*}} }, section ".test_three"
+#[no_mangle]
+#[link_section = ".test_three"]
+pub static VAR3: E = E::B(1.);
+
+// CHECK: define void @fn1() {{.*}} section ".test_four" {
+#[no_mangle]
+#[link_section = ".test_four"]
+pub fn fn1() {}
impl Foo for SignedBar {
const BAR: i32 = -1;
- //~^ ERROR E0326
+ //~^ ERROR implemented const `BAR` has an incompatible type for trait
+ //~| expected u32,
+ //~| found i32 [E0326]
}
fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_consts)]
+
+struct Foo;
+
+impl Foo {
+ const bar: bool = true;
+ fn bar() {} //~ ERROR duplicate associated function
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// Before the introduction of the "duplicate associated type" error, the
+// program below used to result in the "ambiguous associated type" error E0223,
+// which is unexpected.
+
+trait Foo {
+ type Bar;
+}
+
+struct Baz;
+
+impl Foo for Baz {
+ type Bar = i16;
+ type Bar = u16; //~ ERROR duplicate associated type
+}
+
+fn main() {
+ let x: Baz::Bar = 5;
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test for issue #23969
+
+#![feature(associated_consts)]
+
+trait Foo {
+ type Ty;
+ const BAR: u32;
+}
+
+impl Foo for () {
+ type Ty = ();
+ type Ty = usize; //~ ERROR duplicate associated type
+ const BAR: u32 = 7;
+ const BAR: u32 = 8; //~ ERROR duplicate associated constant
+}
+
+fn main() {
+ let _: <() as Foo>::Ty = ();
+ let _: u32 = <() as Foo>::BAR;
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_consts)]
+#![feature(associated_type_defaults)]
+
+pub trait Tr {
+ type Assoc = u8;
+ type Assoc2 = Self::Assoc;
+ const C: u8 = 11;
+ fn foo(&self) {}
+}
+
+impl Tr for () {
+ type Assoc = ();
+ //~^ ERROR need to be reimplemented as `Assoc` was overridden: `Assoc2`, `C`, `foo`
+}
+
+fn main() {}
// except according to those terms.
fn main() {
- 1 = 2; //~ ERROR illegal left-hand side expression
- 1 += 2; //~ ERROR illegal left-hand side expression
- (1, 2) = (3, 4); //~ ERROR illegal left-hand side expression
+ 1 = 2; //~ ERROR invalid left-hand side expression
+ 1 += 2; //~ ERROR invalid left-hand side expression
+ (1, 2) = (3, 4); //~ ERROR invalid left-hand side expression
let (a, b) = (1, 2);
- (a, b) = (3, 4); //~ ERROR illegal left-hand side expression
+ (a, b) = (3, 4); //~ ERROR invalid left-hand side expression
- None = Some(3); //~ ERROR illegal left-hand side expression
+ None = Some(3); //~ ERROR invalid left-hand side expression
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --cap-lints test
+// error-pattern: unknown lint level: `test`
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --cap-lints deny
+
+#![deny(warnings)]
+
+use std::option; //~ ERROR
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --cap-lints warn
+
+#![deny(warnings)]
+#![feature(rustc_attrs)]
+
+use std::option; //~ WARN
+
+#[rustc_error]
+fn main() {} //~ ERROR: compilation successful
+
fn inc(v: &mut Box<isize>) {
*v = box() (**v + 1);
+ //~^ WARN deprecated syntax
}
fn pre_freeze_cond() {
fn inc(v: &mut Box<isize>) {
*v = box() (**v + 1);
+ //~^ WARN deprecated syntax
}
fn loop_overarching_alias_mut() {
fn inc(v: &mut Box<isize>) {
*v = box() (**v + 1);
+ //~^ WARN deprecated syntax
}
fn pre_freeze() {
let foo(box i) = self;
let foo(box j) = f;
foo(box() (i + j))
+ //~^ WARN deprecated syntax
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: cannot cast as `bool`, compare with zero instead
-fn main() { let u = (5 as bool); }
+fn main() {
+ let u = (5 as bool);
+ //~^ ERROR cannot cast as `bool`
+ //~^^ HELP compare with zero instead
+}
fn illegal_cast<U:?Sized,V:?Sized>(u: *const U) -> *const V
{
- u as *const V //~ ERROR vtable kinds
+ u as *const V
+ //~^ ERROR casting
+ //~^^ NOTE vtable kinds
}
fn illegal_cast_2<U:?Sized>(u: *const U) -> *const str
{
- u as *const str //~ ERROR vtable kinds
+ u as *const str
+ //~^ ERROR casting
+ //~^^ NOTE vtable kinds
}
trait Foo { fn foo(&self) {} }
let _ = v as (u32,); //~ ERROR non-scalar
let _ = Some(&v) as *const u8; //~ ERROR non-scalar
- let _ = v as f32; //~ ERROR through a usize first
- let _ = main as f64; //~ ERROR through a usize first
- let _ = &v as usize; //~ ERROR through a raw pointer first
- let _ = f as *const u8; //~ ERROR through a usize first
- let _ = 3 as bool; //~ ERROR compare with zero
- let _ = E::A as bool; //~ ERROR compare with zero
+ let _ = v as f32;
+ //~^ ERROR casting
+ //~^^ HELP through a usize first
+ let _ = main as f64;
+ //~^ ERROR casting
+ //~^^ HELP through a usize first
+ let _ = &v as usize;
+ //~^ ERROR casting
+ //~^^ HELP through a raw pointer first
+ let _ = f as *const u8;
+ //~^ ERROR casting
+ //~^^ HELP through a usize first
+ let _ = 3 as bool;
+ //~^ ERROR cannot cast as `bool`
+ //~^^ HELP compare with zero
+ let _ = E::A as bool;
+ //~^ ERROR cannot cast as `bool`
+ //~^^ HELP compare with zero
let _ = 0x61u32 as char; //~ ERROR only `u8` can be cast
- let _ = false as f32; //~ ERROR through an integer first
- let _ = E::A as f32; //~ ERROR through an integer first
- let _ = 'a' as f32; //~ ERROR through an integer first
+ let _ = false as f32;
+ //~^ ERROR casting
+ //~^^ HELP through an integer first
+ let _ = E::A as f32;
+ //~^ ERROR casting
+ //~^^ HELP through an integer first
+ let _ = 'a' as f32;
+ //~^ ERROR casting
+ //~^^ HELP through an integer first
- let _ = false as *const u8; //~ ERROR through a usize first
- let _ = E::A as *const u8; //~ ERROR through a usize first
- let _ = 'a' as *const u8; //~ ERROR through a usize first
+ let _ = false as *const u8;
+ //~^ ERROR casting
+ //~^^ HELP through a usize first
+ let _ = E::A as *const u8;
+ //~^ ERROR casting
+ //~^^ HELP through a usize first
+ let _ = 'a' as *const u8;
+ //~^ ERROR casting
+ //~^^ HELP through a usize first
- let _ = 42usize as *const [u8]; //~ ERROR illegal cast
- let _ = v as *const [u8]; //~ ERROR illegal cast
+ let _ = 42usize as *const [u8]; //~ ERROR casting
+ let _ = v as *const [u8]; //~ ERROR casting
let _ = fat_v as *const Foo;
//~^ ERROR `core::marker::Sized` is not implemented for the type `[u8]`
- let _ = foo as *const str; //~ ERROR illegal cast
- let _ = foo as *mut str; //~ ERROR illegal cast
- let _ = main as *mut str; //~ ERROR illegal cast
- let _ = &f as *mut f32; //~ ERROR illegal cast
- let _ = &f as *const f64; //~ ERROR illegal cast
- let _ = fat_v as usize; //~ ERROR through a raw pointer first
+ let _ = foo as *const str; //~ ERROR casting
+ let _ = foo as *mut str; //~ ERROR casting
+ let _ = main as *mut str; //~ ERROR casting
+ let _ = &f as *mut f32; //~ ERROR casting
+ let _ = &f as *const f64; //~ ERROR casting
+ let _ = fat_v as usize;
+ //~^ ERROR casting
+ //~^^ HELP through a raw pointer first
let a : *const str = "hello";
let _ = a as *const Foo;
let _ = main.f as *const u32; //~ ERROR attempted access of field
let cf: *const Foo = &0;
- let _ = cf as *const [u8]; //~ ERROR vtable kinds
- let _ = cf as *const Bar; //~ ERROR vtable kinds
+ let _ = cf as *const [u8];
+ //~^ ERROR casting
+ //~^^ NOTE vtable kinds
+ let _ = cf as *const Bar;
+ //~^ ERROR casting
+ //~^^ NOTE vtable kinds
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Tests that empty codemaps don't ICE (#23301)
+
+// compile-flags: --cfg ""
+
+// error-pattern: expected ident, found
+
+pub fn main() {
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for #25954: detect and reject a closure type that
+// references itself.
+
+use std::cell::{Cell, RefCell};
+
+struct A<T: Fn()> {
+ x: RefCell<Option<T>>,
+ b: Cell<i32>,
+}
+
+fn main() {
+ let mut p = A{x: RefCell::new(None), b: Cell::new(4i32)};
+
+ // This is an error about types of infinite size:
+ let q = || p.b.set(5i32); //~ ERROR mismatched types
+
+ *(p.x.borrow_mut()) = Some(q);
+}
impl !Send for Vec<isize> { }
//~^ ERROR E0117
-//~| ERROR E0119
fn main() { }
// except according to those terms.
static a: &'static str = "foo";
-static b: *const u8 = a as *const u8; //~ ERROR illegal cast
-static c: *const u8 = &a as *const u8; //~ ERROR illegal cast
+static b: *const u8 = a as *const u8; //~ ERROR casting
+static c: *const u8 = &a as *const u8; //~ ERROR casting
fn main() {
}
// evaluation below (e.g. that performed by trans and llvm), so if you
// change this warn to a deny, then the compiler will exit before
// those errors are detected.
-#![warn(unsigned_negation)]
use std::fmt;
use std::{i8, i16, i32, i64, isize};
const VALS_U8: (u8, u8, u8, u8) =
(-u8::MIN,
- //~^ WARNING negation of unsigned int variable may be unintentional
- // (The above is separately linted; unsigned negation is defined to be !x+1.)
u8::MIN - 1,
//~^ ERROR attempted to sub with overflow
u8::MAX + 1,
const VALS_U16: (u16, u16, u16, u16) =
(-u16::MIN,
- //~^ WARNING negation of unsigned int variable may be unintentional
- // (The above is separately linted; unsigned negation is defined to be !x+1.)
u16::MIN - 1,
//~^ ERROR attempted to sub with overflow
u16::MAX + 1,
const VALS_U32: (u32, u32, u32, u32) =
(-u32::MIN,
- //~^ WARNING negation of unsigned int variable may be unintentional
- // (The above is separately linted; unsigned negation is defined to be !x+1.)
u32::MIN - 1,
//~^ ERROR attempted to sub with overflow
u32::MAX + 1,
const VALS_U64: (u64, u64, u64, u64) =
(-u64::MIN,
- //~^ WARNING negation of unsigned int variable may be unintentional
- // (The above is separately linted; unsigned negation is defined to be !x+1.)
u64::MIN - 1,
//~^ ERROR attempted to sub with overflow
u64::MAX + 1,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: recursive constant
-static a: isize = b;
-static b: isize = a;
+const a: isize = b; //~ ERROR recursive constant
+const b: isize = a; //~ ERROR recursive constant
fn main() {
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(default_type_parameter_fallback)]
+
+use std::fmt::Debug;
+
+// Example from the RFC
+fn foo<F:Default=usize>() -> F { F::default() }
+//~^ NOTE: a default was defined here...
+
+fn bar<B:Debug=isize>(b: B) { println!("{:?}", b); }
+//~^ NOTE: a second default was defined here...
+
+fn main() {
+ // Here, F is instantiated with $0=uint
+ let x = foo();
+ //~^ ERROR: mismatched types
+ //~| NOTE: conflicting type parameter defaults `usize` and `isize`
+ //~| NOTE: ...that was applied to an unconstrained type variable here
+
+ // Here, B is instantiated with $1=uint, and constraint $0 <: $1 is added.
+ bar(x);
+ //~^ NOTE: ...that also applies to the same type variable here
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+//aux-build:default_ty_param_cross_crate_crate.rs
+
+#![feature(default_type_parameter_fallback)]
+
+extern crate default_param_test;
+
+use default_param_test::{Foo, bleh};
+
+fn meh<X, B=bool>(x: Foo<X, B>) {}
+//~^ NOTE: a default was defined here...
+
+fn main() {
+ let foo = bleh();
+ //~^ NOTE: ...that also applies to the same type variable here
+
+ meh(foo);
+ //~^ ERROR: mismatched types:
+ //~| NOTE: conflicting type parameter defaults `bool` and `char`
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// check that dropck does the right thing with misc. Ty variants
+
+use std::fmt;
+struct NoisyDrop<T: fmt::Debug>(T);
+impl<T: fmt::Debug> Drop for NoisyDrop<T> {
+ fn drop(&mut self) {
+ let _ = vec!["0wned"];
+ println!("dropping {:?}", self.0)
+ }
+}
+
+trait Associator {
+ type As;
+}
+impl<T: fmt::Debug> Associator for T {
+ type As = NoisyDrop<T>;
+}
+struct Wrap<A: Associator>(<A as Associator>::As);
+
+fn projection() {
+ let (_w, bomb);
+ bomb = vec![""];
+ _w = Wrap::<&[&str]>(NoisyDrop(&bomb));
+ //~^ ERROR `bomb` does not live long enough
+}
+
+fn closure() {
+ let (_w,v);
+ v = vec![""];
+ _w = {
+ let u = NoisyDrop(&v);
+ //~^ ERROR `v` does not live long enough
+ move || u.0.len()
+ };
+}
+
+fn main() { closure(); projection() }
}
pub fn main() {
- let a = E::L0 as f32; //~ ERROR illegal cast
- let c = F::H1 as f32; //~ ERROR illegal cast
+ let a = E::L0 as f32; //~ ERROR casting
+ let c = F::H1 as f32; //~ ERROR casting
assert_eq!(a, -1.0f32);
assert_eq!(c, -1.0f32);
}
H1 = 0xFFFFFFFFFFFFFFFF
}
-static C0: f32 = E::L0 as f32; //~ ERROR illegal cast
-static C1: f32 = F::H1 as f32; //~ ERROR illegal cast
+static C0: f32 = E::L0 as f32; //~ ERROR casting
+static C1: f32 = F::H1 as f32; //~ ERROR casting
pub fn main() {
let b = C0;
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
Bar
}
-fn foo(x: Foo::Bar) {} //~ERROR found value name used as a type
+fn foo(x: Foo::Bar) {} //~ERROR found value `Foo::Bar` used as a type
fn main() {}
let p = a as *const [i32];
let q = a.as_ptr();
- a as usize; //~ ERROR illegal cast
+ a as usize; //~ ERROR casting
b as usize; //~ ERROR non-scalar cast
- p as usize; //~ ERROR illegal cast; cast through a raw pointer
+ p as usize;
+ //~^ ERROR casting
+ //~^^ HELP cast through a raw pointer
// #22955
- q as *const [i32]; //~ ERROR illegal cast
+ q as *const [i32]; //~ ERROR casting
// #21397
- let t: *mut (Trait + 'static) = 0 as *mut _; //~ ERROR illegal cast
- let mut fail: *const str = 0 as *const str; //~ ERROR illegal cast
+ let t: *mut (Trait + 'static) = 0 as *mut _; //~ ERROR casting
+ let mut fail: *const str = 0 as *const str; //~ ERROR casting
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-fn main() {
- use std::boxed::HEAP;
+// Check that `box EXPR` is feature-gated.
+//
+// See also feature-gate-placement-expr.rs
+//
+// (Note that the two tests are separated since the checks appear to
+// be performed at distinct phases, with an abort_if_errors call
+// separating them.)
+fn main() {
let x = box 'c'; //~ ERROR box expression syntax is experimental
println!("x: {}", x);
let x = box () 'c'; //~ ERROR box expression syntax is experimental
- println!("x: {}", x);
-
- let x = box (HEAP) 'c'; //~ ERROR box expression syntax is experimental
+ //~^ WARN deprecated syntax
println!("x: {}", x);
}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Test that negating unsigned integers is gated by `negate_unsigned` feature
-// gate
-
-const MAX: usize = -1;
-//~^ ERROR unary negation of unsigned integers may be removed in the future
-
-fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that `in PLACE { EXPR }` is feature-gated.
+//
+// See also feature-gate-box-expr.rs
+//
+// (Note that the two tests are separated since the checks appear to
+// be performed at distinct phases, with an abort_if_errors call
+// separating them.)
+
+fn main() {
+ use std::boxed::HEAP;
+
+ let x = box (HEAP) 'c'; //~ ERROR placement-in expression syntax is experimental
+ //~^ WARN deprecated syntax
+ println!("x: {}", x);
+
+ let x = in HEAP { 'c' }; //~ ERROR placement-in expression syntax is experimental
+ println!("x: {}", x);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[prelude_import] //~ ERROR `#[prelude_import]` is for use by rustc only
+use std::prelude::v1::*;
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let c = push_unsafe!('c'); //~ ERROR push/pop_unsafe macros are experimental
+ let c = pop_unsafe!('c'); //~ ERROR push/pop_unsafe macros are experimental
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test to make sure the names of the lifetimes are correctly resolved
+// in extern blocks.
+
+extern {
+ pub fn life<'a>(x:&'a i32);
+ pub fn life2<'b>(x:&'a i32, y:&'b i32); //~ ERROR use of undeclared lifetime name `'a`
+ pub fn life3<'a>(x:&'a i32, y:&i32) -> &'a i32;
+ pub fn life4<'b>(x: for<'c> fn(&'a i32)); //~ ERROR use of undeclared lifetime name `'a`
+ pub fn life5<'b>(x: for<'c> fn(&'b i32));
+ pub fn life6<'b>(x: for<'c> fn(&'c i32));
+ pub fn life7<'b>() -> for<'c> fn(&'a i32); //~ ERROR use of undeclared lifetime name `'a`
+ pub fn life8<'b>() -> for<'c> fn(&'b i32);
+ pub fn life9<'b>() -> for<'c> fn(&'c i32);
+}
+fn main() {}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Test what happens when a HR obligation is applied to an impl with
-// "outlives" bounds. Currently we're pretty conservative here; this
-// will probably improve in time.
-
-trait Foo<X> {
- fn foo(&self, x: X) { }
-}
-
-fn want_foo<T>()
- where T : for<'a> Foo<&'a isize>
-{
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Expressed as a where clause
-
-struct SomeStruct<X> {
- x: X
-}
-
-impl<'a,X> Foo<&'a isize> for SomeStruct<X>
- where X : 'a
-{
-}
-
-fn one() {
- // In fact there is no good reason for this to be an error, but
- // whatever, I'm mostly concerned it doesn't ICE right now:
- want_foo::<SomeStruct<usize>>();
- //~^ ERROR requirement `for<'a> usize : 'a` is not satisfied
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Expressed as shorthand
-
-struct AnotherStruct<X> {
- x: X
-}
-
-impl<'a,X:'a> Foo<&'a isize> for AnotherStruct<X>
-{
-}
-
-fn two() {
- want_foo::<AnotherStruct<usize>>();
- //~^ ERROR requirement `for<'a> usize : 'a` is not satisfied
-}
-
-fn main() { }
// except according to those terms.
-// error-pattern: illegal recursive enum type; wrap the inner value in a box
+// error-pattern: invalid recursive enum type
enum mlist { cons(isize, mlist), nil, }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! invalid {
+ _ => (); //~^ ERROR Invalid macro matcher
+}
+
+fn main() {
+}
fn main() {
A::C = 1;
- //~^ ERROR: illegal left-hand side expression
+ //~^ ERROR: invalid left-hand side expression
//~| ERROR: mismatched types
}
// except according to those terms.
#![feature(box_syntax)]
+#![feature(placement_in_syntax)]
fn main() {
box ( () ) 0;
- //~^ ERROR: only the exchange heap is currently supported
+ //~^ ERROR: the trait `core::ops::Placer<_>` is not implemented
+ //~| ERROR: the trait `core::ops::Placer<_>` is not implemented
+ //~| WARN deprecated syntax
}
}
extern "C" {
- fn foo(x: A); //~ ERROR found type without foreign-function-safe
+ fn foo(x: A); //~ ERROR found struct without foreign-function-safe
fn bar(x: B); //~ ERROR foreign-function-safe
fn baz(x: C);
fn qux(x: A2); //~ ERROR foreign-function-safe
fn main() {
let x = X { a: [0] };
- let _f = &x.a as *mut u8; //~ ERROR illegal cast
+ let _f = &x.a as *mut u8; //~ ERROR casting
let local: [u8; 1] = [0];
- let _v = &local as *mut u8; //~ ERROR illegal cast
+ let _v = &local as *mut u8; //~ ERROR casting
}
impl<T: fmt::Debug> ops::FnOnce<(),> for Debuger<T> {
type Output = ();
fn call_once(self, _args: ()) {
-//~^ ERROR `call_once` has an incompatible type for trait: expected "rust-call" fn, found "Rust" fn
+ //~^ ERROR `call_once` has an incompatible type for trait
+ //~| expected "rust-call" fn,
+ //~| found "Rust" fn
println!("{:?}", self.x);
}
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern: too big for the current architecture
+
+#[cfg(target_pointer_width = "32")]
+fn main() {
+ let x = [0usize; 0xffff_ffff];
+}
+
+#[cfg(target_pointer_width = "64")]
+fn main() {
+ let x = [0usize; 0xffff_ffff_ffff_ffff];
+}
#![deny(warnings)]
extern {
- pub fn foo(x: (isize)); //~ ERROR found rust type `isize` in foreign module
+ pub fn foo(x: (isize)); //~ ERROR found Rust type `isize` in foreign module
}
fn main() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-static FOO: usize = FOO; //~ ERROR recursive constant
+const FOO: usize = FOO; //~ ERROR recursive constant
fn main() {
let _x: [u8; FOO]; // caused stack overflow prior to fix
let _y: usize = 1 + {
- static BAR: usize = BAR; //~ ERROR recursive constant
+ const BAR: usize = BAR; //~ ERROR recursive constant
let _z: [u8; BAR]; // caused stack overflow prior to fix
1
};
// except according to those terms.
struct Foo { foo: Option<Option<Foo>> }
-//~^ ERROR illegal recursive struct type; wrap the inner value in a box to make it representable
+//~^ ERROR invalid recursive struct type
impl Foo { fn bar(&self) {} }
// except according to those terms.
struct Baz { q: Option<Foo> }
-//~^ ERROR illegal recursive struct type; wrap the inner value in a box to make it representable
+//~^ ERROR invalid recursive struct type
struct Foo { q: Option<Baz> }
-//~^ ERROR illegal recursive struct type; wrap the inner value in a box to make it representable
+//~^ ERROR invalid recursive struct type
impl Foo { fn bar(&self) {} }
use std::sync::Mutex;
struct Foo { foo: Mutex<Option<Foo>> }
-//~^ ERROR illegal recursive struct type; wrap the inner value in a box to make it representable
+//~^ ERROR invalid recursive struct type
impl Foo { fn bar(&self) {} }
use std::marker;
struct Foo<T> { foo: Option<Option<Foo<T>>>, marker: marker::PhantomData<T> }
-//~^ ERROR illegal recursive struct type; wrap the inner value in a box to make it representable
+//~^ ERROR invalid recursive struct type
impl<T> Foo<T> { fn bar(&self) {} }
struct Foo { foo: Bar<Foo> }
struct Bar<T> { x: Bar<Foo> , marker: marker::PhantomData<T> }
-//~^ ERROR illegal recursive struct type; wrap the inner value in a box to make it representable
+//~^ ERROR invalid recursive struct type
impl Foo { fn foo(&self) {} }
use std::sync::Mutex;
enum Foo { X(Mutex<Option<Foo>>) }
-//~^ ERROR illegal recursive enum type; wrap the inner value in a box to make it representable
+//~^ ERROR invalid recursive enum type
impl Foo { fn bar(self) {} }
// except according to those terms.
enum Foo { Voo(Option<Option<Foo>>) }
-//~^ ERROR illegal recursive enum type; wrap the inner value in a box to make it representable
+//~^ ERROR invalid recursive enum type
impl Foo { fn bar(&self) {} }
fn main() {
let _x = Test::Foo as *const isize;
- //~^ ERROR illegal cast; cast through a usize first: `Test` as `*const isize`
+ //~^ ERROR casting `Test` as `*const isize` is invalid
+ //~^^ HELP cast through a usize first
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use foo::MyEnum::Result;
+use foo::NoResult; // Through a re-export
+
+mod foo {
+ pub use self::MyEnum::NoResult;
+
+ enum MyEnum {
+ Result,
+ NoResult
+ }
+
+ fn new() -> NoResult<MyEnum, String> {
+ //~^ ERROR: found value `foo::MyEnum::NoResult` used as a type
+ unimplemented!()
+ }
+}
+
+mod bar {
+ use foo::MyEnum::Result;
+ use foo;
+
+ fn new() -> Result<foo::MyEnum, String> {
+ //~^ ERROR: found value `foo::MyEnum::Result` used as a type
+ unimplemented!()
+ }
+}
+
+fn new() -> Result<foo::MyEnum, String> {
+ //~^ ERROR: found value `foo::MyEnum::Result` used as a type
+ unimplemented!()
+}
+
+fn newer() -> NoResult<foo::MyEnum, String> {
+ //~^ ERROR: found value `foo::MyEnum::NoResult` used as a type
+ unimplemented!()
+}
+
+fn main() {
+ let _ = new();
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+const X: u8 = 1;
+static Y: u8 = 1;
+fn foo() {}
+
+impl X {}
+//~^ ERROR use of undeclared type name `X`
+impl Y {}
+//~^ ERROR use of undeclared type name `Y`
+impl foo {}
+//~^ ERROR use of undeclared type name `foo`
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub struct Foo<Bar=Bar>; //~ ERROR E0128
+pub struct Baz(Foo);
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo {
+ fn foo<T>(&self, val: T);
+}
+
+trait Bar: Foo { }
+
+pub struct Thing;
+
+impl Foo for Thing {
+ fn foo<T>(&self, val: T) { }
+}
+
+impl Bar for Thing { }
+
+fn main() {
+ let mut thing = Thing;
+ let test: &mut Bar = &mut thing;
+ //~^ ERROR cannot convert to a trait object because trait `Bar` is not object-safe
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct X { x: i32 }
+
+fn main() {
+ let mut b: Vec<X> = vec![];
+ b.sort();
+ //~^ ERROR the trait `core::cmp::Ord` is not implemented for the type `X`
+}
impl<'a, T> Fn<(&'a T,)> for Foo {
extern "rust-call" fn call(&self, (_,): (T,)) {}
- //~^ ERROR: has an incompatible type for trait: expected &-ptr
+ //~^ ERROR: has an incompatible type for trait
+ //~| expected &-ptr
}
impl<'a, T> FnMut<(&'a T,)> for Foo {
extern "rust-call" fn call_mut(&mut self, (_,): (T,)) {}
- //~^ ERROR: has an incompatible type for trait: expected &-ptr
+ //~^ ERROR: has an incompatible type for trait
+ //~| expected &-ptr
}
impl<'a, T> FnOnce<(&'a T,)> for Foo {
type Output = ();
extern "rust-call" fn call_once(self, (_,): (T,)) {}
- //~^ ERROR: has an incompatible type for trait: expected &-ptr
+ //~^ ERROR: has an incompatible type for trait
+ //~| expected &-ptr
}
fn main() {}
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-// test that autoderef of a type like this does not
-// cause compiler to loop. Note that no instances
-// of such a type could ever be constructed.
-struct S { //~ ERROR this type cannot be instantiated
- x: X,
- to_str: (),
-}
-
-struct X(Box<S>); //~ ERROR this type cannot be instantiated
-
-fn main() {}
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// test that autoderef of a type like this does not
-// cause compiler to loop. Note that no instances
-// of such a type could ever be constructed.
-
-struct t(Box<t>); //~ ERROR this type cannot be instantiated
-
-trait to_str_2 {
- fn my_to_string() -> String;
-}
-
-// I use an impl here because it will cause
-// the compiler to attempt autoderef and then
-// try to resolve the method.
-impl to_str_2 for t {
- fn my_to_string() -> String { "t".to_string() }
-}
-
-fn new_t(x: t) {
- x.my_to_string();
- // (there used to be an error emitted right here as well. It was
- // spurious, at best; if `t` did exist as a type, it clearly would
- // have an impl of the `to_str_2` trait.)
-}
-
-fn main() {
-}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub trait Subscriber {
+ type Input;
+}
+
+pub trait Processor: Subscriber<Input = Self::Input> {
+ //~^ ERROR unsupported cyclic reference between types/traits detected [E0391]
+ type Input;
+}
+
+fn main() {}
fn foo<'a, T: Trait<'a>>(value: T::A) {
let new: T::B = unsafe { std::mem::transmute(value) };
-//~^ ERROR: cannot transmute to or from a type that contains type parameters in its interior [E0139]
+//~^ ERROR: cannot transmute to or from a type that contains unsubstituted type parameters [E0139]
}
fn main() { }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct S;
+
+impl Iterator for S {
+ type Item = i32;
+ fn next(&mut self) -> Result<i32, i32> { Ok(7) }
+ //~^ ERROR method `next` has an incompatible type for trait
+ //~| expected enum `core::option::Option`
+ //~| found enum `core::result::Result` [E0053]
+}
+
+fn main() {}
struct Inches(i32);
fn main() {
- Inches as f32; //~ ERROR illegal cast; cast through a usize first
+ Inches as f32;
+ //~^ ERROR casting
+ //~^^ cast through a usize first
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::ops::Index;
+
+pub trait Array2D: Index<usize> {
+ fn rows(&self) -> usize;
+ fn columns(&self) -> usize;
+ fn get<'a>(&'a self, y: usize, x: usize) -> Option<&'a <Self as Index<usize>>::Output> {
+ if y >= self.rows() || x >= self.columns() {
+ return None;
+ }
+ let i = y * self.columns() + x;
+ let indexer = &(*self as &Index<usize, Output = <Self as Index<usize>>::Output>);
+ //~^ERROR non-scalar cast
+ Some(indexer.index(i))
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that an enum with recursion in the discriminant throws
+// the appropriate error (rather than, say, blowing the stack).
+enum X {
+ A = X::A as isize, //~ ERROR E0265
+}
+
+// Since `Y::B` here defaults to `Y::A+1`, this is also a
+// recursive definition.
+enum Y {
+ A = Y::B as isize, //~ ERROR E0265
+ B,
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub trait ToNbt<T> {
+ fn new(val: T) -> Self;
+}
+
+impl ToNbt<Self> {} //~ ERROR use of `Self` outside of an impl or trait
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_type_defaults)]
+
+use std::ops::{Index};
+
+trait Hierarchy {
+ type Value;
+ type ChildKey;
+ type Children = Index<Self::ChildKey, Output=Hierarchy>;
+ //~^ ERROR: the value of the associated type `ChildKey`
+ //~^^ ERROR: the value of the associated type `Children`
+ //~^^^ ERROR: the value of the associated type `Value`
+
+ fn data(&self) -> Option<(Self::Value, Self::Children)>;
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_type_defaults)]
+
+pub struct C<AType: A> {a:AType}
+
+pub trait A {
+ type B = C<Self::anything_here_kills_it>;
+ //~^ ERROR: associated type `anything_here_kills_it` not found for `Self`
+}
+
+fn main() {}
impl Deref for Thing {
//~^ ERROR not all trait items implemented, missing: `Target` [E0046]
fn deref(&self) -> i8 { self.0 }
- //~^ ERROR method `deref` has an incompatible type for trait: expected &-ptr, found i8 [E0053]
+ //~^ ERROR method `deref` has an incompatible type for trait
+ //~| expected &-ptr
+ //~| found i8 [E0053]
}
let thing = Thing(72);
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct NoCopy;
+fn main() {
+ let x = NoCopy;
+ let f = move || { let y = x; };
+ //~^ NOTE `x` moved into closure environment here because it has type `NoCopy`
+ let z = x;
+ //~^ ERROR use of moved value: `x`
+}
--- /dev/null
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::sync::mpsc::channel;
+use std::thread::spawn;
+use std::marker::PhantomData;
+
+struct Foo<T> {foo: PhantomData<T>}
+
+fn main() {
+ let (tx, rx) = channel();
+
+ spawn(move || {
+ tx.send(Foo{ foo: PhantomData }); //~ ERROR E0282
+ });
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct A;
+struct B;
+
+static S: &'static B = &A; //~ ERROR user-defined dereference operators
+
+use std::ops::Deref;
+
+impl Deref for A {
+ type Target = B;
+ fn deref(&self)->&B { static B_: B = B; &B_ }
+}
+
+fn main(){}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo<T>() where for<'a> T: 'a {}
+
+fn main<'a>() {
+ foo::<&'a i32>();
+ //~^ ERROR the type `&'a i32` does not fulfill the required lifetime
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn parse_type(iter: Box<Iterator<Item=&str>+'static>) -> &str { iter.next() }
+//~^ ERROR missing lifetime specifier [E0106]
+//~^^ HELP 2 elided lifetimes
+
+fn parse_type_2(iter: fn(&u8)->&u8) -> &str { iter() }
+//~^ ERROR missing lifetime specifier [E0106]
+//~^^ HELP 0 elided free lifetimes
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn avg<T=T::Item>(_: T) {} //~ ERROR associated type `Item` not found for `T`
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ enum Foo { A { x: u32 } }
+ let orig = Foo::A { x: 5 };
+ Foo::A { x: 6, ..orig };
+ //~^ ERROR functional record update syntax requires a struct
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct S;
+
+fn main() {
+ let b = [0; S];
+ //~^ ERROR mismatched types
+ //~| expected `usize`
+ //~| found `S`
+ //~| expected usize
+ //~| found struct `S`
+ //~| ERROR expected positive integer for repeat count, found struct
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ match Some(1) {
+ None @ _ => {} //~ ERROR declaration of `None` shadows an enum variant
+ };
+ const C: u8 = 1;
+ match 1 {
+ C @ 2 => { //~ ERROR only irrefutable patterns allowed here
+ println!("{}", C);
+ }
+ _ => {}
+ };
+}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for #27042. Test that a loop's label is included in its span.
+
+fn main() {
+ let _: i32 =
+ 'a: //~ ERROR mismatched types
+ loop { break };
+ let _: i32 =
+ 'b: //~ ERROR mismatched types
+ while true { break };
+ let _: i32 =
+ 'c: //~ ERROR mismatched types
+ for _ in None { break };
+ let _: i32 =
+ 'd: //~ ERROR mismatched types
+ while let Some(_) = None { break };
+}
use send_packet;
pub type ping = send_packet<pong>;
pub struct pong(send_packet<ping>);
- //~^ ERROR illegal recursive struct type; wrap the inner value in a box to make it representable
+ //~^ ERROR invalid recursive struct type
}
fn main() {}
enum foo { foo_(bar) }
enum bar { bar_none, bar_some(bar) }
-//~^ ERROR illegal recursive enum type; wrap the inner value in a box to make it representable
+//~^ ERROR invalid recursive enum type
fn main() {
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#![feature(static_recursion)]
+
enum foo { foo_(bar) }
struct bar { x: bar }
-//~^ ERROR illegal recursive struct type; wrap the inner value in a box to make it representable
-//~^^ ERROR this type cannot be instantiated without an instance of itself
+//~^ ERROR invalid recursive struct type
fn main() {
}
enum E1 { V1(E2<E1>), }
enum E2<T> { V2(E2<E1>, marker::PhantomData<T>), }
-//~^ ERROR illegal recursive enum type; wrap the inner value in a box to make it representable
+//~^ ERROR invalid recursive enum type
impl E1 { fn foo(&self) {} }
// except according to those terms.
struct S {
- //~^ ERROR illegal recursive struct type; wrap the inner value in a box to make it representable
+ //~^ ERROR invalid recursive struct type
element: Option<S>
}
fn main() {
- let _m = Monster(); //~ ERROR `Monster` is a structure name, but
+ let _m = Monster(); //~ ERROR `Monster` is the name of a struct or
//~^ HELP did you mean to write: `Monster { /* fields */ }`?
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[allow(unused_imports)]
+
+mod foo {
+ use baz::bar;
+ //~^ ERROR import `bar` conflicts with existing submodule
+ mod bar {}
+}
+mod baz { pub mod bar {} }
+
+fn main() {}
// aux-build:lifetime_bound_will_change_warning_lib.rs
-// Test that we get suitable warnings when lifetime bound change will
-// cause breakage.
+// Test that various corner cases cause an error. These are tests
+// that used to pass before we tweaked object defaults.
#![allow(dead_code)]
#![allow(unused_variables)]
fn test2<'a>(x: &'a Box<Fn()+'a>) {
// but ref_obj will not, so warn.
- ref_obj(x) //~ WARNING this code may fail to compile in Rust 1.3
+ ref_obj(x) //~ ERROR mismatched types
}
fn test2cc<'a>(x: &'a Box<Fn()+'a>) {
// same as test2, but cross crate
- lib::ref_obj(x) //~ WARNING this code may fail to compile in Rust 1.3
+ lib::ref_obj(x) //~ ERROR mismatched types
}
fn test3<'a>(x: &'a Box<Fn()+'static>) {
}
#[rustc_error]
-fn main() { //~ ERROR compilation successful
+fn main() {
}
extern {
fn zf(x: Z);
- fn uf(x: U); //~ ERROR found type without foreign-function-safe
- fn bf(x: B); //~ ERROR found type without foreign-function-safe
- fn tf(x: T); //~ ERROR found type without foreign-function-safe
+ fn uf(x: U); //~ ERROR found enum without foreign-function-safe
+ fn bf(x: B); //~ ERROR found enum without foreign-function-safe
+ fn tf(x: T); //~ ERROR found enum without foreign-function-safe
}
pub fn main() { }
extern crate libc;
+trait Mirror { type It; }
+impl<T> Mirror for T { type It = Self; }
+#[repr(C)]
+pub struct StructWithProjection(*mut <StructWithProjection as Mirror>::It);
+#[repr(C)]
+pub struct StructWithProjectionAndLifetime<'a>(
+ &'a mut <StructWithProjectionAndLifetime<'a> as Mirror>::It
+);
+pub type I32Pair = (i32, i32);
+#[repr(C)]
+pub struct ZeroSize;
+pub type RustFn = fn();
+pub type RustBadRet = extern fn() -> Box<u32>;
+pub type CVoidRet = ();
+
extern {
- pub fn bare_type1(size: isize); //~ ERROR: found rust type
- pub fn bare_type2(size: usize); //~ ERROR: found rust type
- pub fn ptr_type1(size: *const isize); //~ ERROR: found rust type
- pub fn ptr_type2(size: *const usize); //~ ERROR: found rust type
+ pub fn bare_type1(size: isize); //~ ERROR: found Rust type
+ pub fn bare_type2(size: usize); //~ ERROR: found Rust type
+ pub fn ptr_type1(size: *const isize); //~ ERROR: found Rust type
+ pub fn ptr_type2(size: *const usize); //~ ERROR: found Rust type
+ pub fn slice_type(p: &[u32]); //~ ERROR: found Rust slice type
+ pub fn str_type(p: &str); //~ ERROR: found Rust type
+ pub fn box_type(p: Box<u32>); //~ ERROR found Rust type
+ pub fn char_type(p: char); //~ ERROR found Rust type
+ pub fn trait_type(p: &Clone); //~ ERROR found Rust trait type
+ pub fn tuple_type(p: (i32, i32)); //~ ERROR found Rust tuple type
+ pub fn tuple_type2(p: I32Pair); //~ ERROR found Rust tuple type
+ pub fn zero_size(p: ZeroSize); //~ ERROR found zero-size struct
+ pub fn fn_type(p: RustFn); //~ ERROR found function pointer with Rust
+ pub fn fn_type2(p: fn()); //~ ERROR found function pointer with Rust
+ pub fn fn_contained(p: RustBadRet); //~ ERROR: found Rust type
pub fn good1(size: *const libc::c_int);
pub fn good2(size: *const libc::c_uint);
+ pub fn good3(fptr: Option<extern fn()>);
+ pub fn good4(aptr: &[u8; 4 as usize]);
+ pub fn good5(s: StructWithProjection);
+ pub fn good6(s: StructWithProjectionAndLifetime);
+ pub fn good7(fptr: extern fn() -> ());
+ pub fn good8(fptr: extern fn() -> !);
+ pub fn good9() -> ();
+ pub fn good10() -> CVoidRet;
}
fn main() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(negate_unsigned)]
#![deny(exceeding_bitshifts)]
#![allow(unused_variables)]
#![allow(dead_code)]
-#![feature(num_bits_bytes, negate_unsigned)]
+#![feature(num_bits_bytes)]
fn main() {
let n = 1u8 << 7;
let n = 1_isize << std::isize::BITS; //~ ERROR: bitshift exceeds the type's number of bits
let n = 1_usize << std::usize::BITS; //~ ERROR: bitshift exceeds the type's number of bits
+
+
+ let n = 1i8<<(1isize+-1);
}
//~^ ERROR use of deprecated item
//~^^ ERROR use of unstable
override1: 2,
- override2: 3, //~ ERROR use of unstable
+ //~^ ERROR use of deprecated item
+ override2: 3,
+ //~^ ERROR use of deprecated item
+ //~^^ ERROR use of unstable
};
let _ = x.inherit;
//~^ ERROR use of deprecated item
//~^^ ERROR use of unstable
let _ = x.override1;
- let _ = x.override2; //~ ERROR use of unstable
+ //~^ ERROR use of deprecated item
+ let _ = x.override2;
+ //~^ ERROR use of deprecated item
+ //~^^ ERROR use of unstable
let Deprecated {
//~^ ERROR use of deprecated item
//~^ ERROR use of deprecated item
//~^^ ERROR use of unstable
override1: _,
- override2: _ //~ ERROR use of unstable
+ //~^ ERROR use of deprecated item
+ override2: _
+ //~^ ERROR use of unstable
+ //~^^ ERROR use of deprecated item
} = x;
let Deprecated
//~^ ERROR use of deprecated item
//~^^ ERROR use of unstable
let _ = x.1;
- let _ = x.2; //~ ERROR use of unstable
+ //~^ ERROR use of deprecated item
+ let _ = x.2;
+ //~^ ERROR use of deprecated item
+ //~^^ ERROR use of unstable
let Deprecated2
//~^ ERROR use of deprecated item
//~^ ERROR use of deprecated item
//~^^ ERROR use of unstable
_,
- _) //~ ERROR use of unstable
+ //~^ ERROR use of deprecated item
+ _)
+ //~^ ERROR use of deprecated item
+ //~^^ ERROR use of unstable
= x;
let Deprecated2
//~^ ERROR use of deprecated item
inherit: 1,
//~^ ERROR use of deprecated item
override1: 2,
+ //~^ ERROR use of deprecated item
override2: 3,
+ //~^ ERROR use of deprecated item
};
let _ = x.inherit;
//~^ ERROR use of deprecated item
let _ = x.override1;
+ //~^ ERROR use of deprecated item
let _ = x.override2;
+ //~^ ERROR use of deprecated item
let Deprecated {
//~^ ERROR use of deprecated item
inherit: _,
//~^ ERROR use of deprecated item
override1: _,
+ //~^ ERROR use of deprecated item
override2: _
+ //~^ ERROR use of deprecated item
} = x;
let Deprecated
let _ = x.0;
//~^ ERROR use of deprecated item
let _ = x.1;
+ //~^ ERROR use of deprecated item
let _ = x.2;
+ //~^ ERROR use of deprecated item
let Deprecated2
//~^ ERROR use of deprecated item
(_,
//~^ ERROR use of deprecated item
_,
+ //~^ ERROR use of deprecated item
_)
+ //~^ ERROR use of deprecated item
= x;
let Deprecated2
//~^ ERROR use of deprecated item
<Foo as Trait>::trait_stable_text(&foo);
let _ = DeprecatedStruct { i: 0 }; //~ ERROR use of deprecated item
- let _ = DeprecatedUnstableStruct { i: 0 }; //~ ERROR use of deprecated item
- //~^ ERROR use of unstable library feature
+ let _ = DeprecatedUnstableStruct {
+ //~^ ERROR use of deprecated item
+ //~^^ ERROR use of unstable library feature
+ i: 0 //~ ERROR use of deprecated item
+ };
let _ = UnstableStruct { i: 0 }; //~ ERROR use of unstable library feature
let _ = StableStruct { i: 0 };
<Foo>::trait_stable_text(&foo);
<Foo as Trait>::trait_stable_text(&foo);
- let _ = DeprecatedStruct { i: 0 }; //~ ERROR use of deprecated item
+ let _ = DeprecatedStruct {
+ //~^ ERROR use of deprecated item
+ i: 0 //~ ERROR use of deprecated item
+ };
let _ = UnstableStruct { i: 0 };
let _ = StableStruct { i: 0 };
i += 1;
}
}
-
-fn quy() {
- let i = -23_usize; //~ WARNING negation of unsigned int literal may be unintentional
- //~^ WARNING unused variable
-}
-
-fn quz() {
- let i = 23_usize;
- let j = -i; //~ WARNING negation of unsigned int variable may be unintentional
- //~^ WARNING unused variable
-}
}
}
+// Trait method calls.
trait Foo {
fn bar(&self) { //~ ERROR function cannot return without recurring
self.bar() //~ NOTE recursive call site
self.bar() //~ NOTE recursive call site
}
}
+}
+
+// Trait method call with integer fallback after method resolution.
+impl Foo for i32 {
+ fn bar(&self) { //~ ERROR function cannot return without recurring
+ 0.bar() //~ NOTE recursive call site
+ }
+}
+
+impl Foo for u32 {
+ fn bar(&self) {
+ 0.bar()
+ }
+}
+
+// Trait method calls via paths.
+trait Foo2 {
+ fn bar(&self) { //~ ERROR function cannot return without recurring
+ Foo2::bar(self) //~ NOTE recursive call site
+ }
+}
+impl Foo2 for Box<Foo2+'static> {
+ fn bar(&self) { //~ ERROR function cannot return without recurring
+ loop {
+ Foo2::bar(self) //~ NOTE recursive call site
+ }
+ }
}
struct Baz;
impl Baz {
+ // Inherent method call.
fn qux(&self) { //~ ERROR function cannot return without recurring
self.qux(); //~ NOTE recursive call site
}
+
+ // Inherent method call via path.
+ fn as_ref(&self) -> &Self { //~ ERROR function cannot return without recurring
+ Baz::as_ref(self) //~ NOTE recursive call site
+ }
+}
+
+// Trait method calls to impls via paths.
+impl Default for Baz {
+ fn default() -> Baz { //~ ERROR function cannot return without recurring
+ let x = Default::default(); //~ NOTE recursive call site
+ x
+ }
+}
+
+// Overloaded operators.
+impl std::ops::Deref for Baz {
+ type Target = ();
+ fn deref(&self) -> &() { //~ ERROR function cannot return without recurring
+ &**self //~ NOTE recursive call site
+ }
+}
+
+impl std::ops::Index<usize> for Baz {
+ type Output = Baz;
+ fn index(&self, x: usize) -> &Baz { //~ ERROR function cannot return without recurring
+ &self[x] //~ NOTE recursive call site
+ }
+}
+
+// Overloaded autoderef.
+struct Quux;
+impl std::ops::Deref for Quux {
+ type Target = Baz;
+ fn deref(&self) -> &Baz { //~ ERROR function cannot return without recurring
+ self.as_ref() //~ NOTE recursive call site
+ }
+}
+
+fn all_fine() {
+ let _f = all_fine;
+}
+
+// issue 26333
+trait Bar {
+ fn method<T: Bar>(&self, x: &T) {
+ x.method(x)
+ }
}
fn main() {}
let mut b = 3; //~ ERROR: variable does not need to be mutable
let mut a = vec!(3); //~ ERROR: variable does not need to be mutable
let (mut a, b) = (1, 2); //~ ERROR: variable does not need to be mutable
+ let mut a; //~ ERROR: variable does not need to be mutable
+ a = 3;
+
+ let mut b; //~ ERROR: variable does not need to be mutable
+ if true {
+ b = 3;
+ } else {
+ b = 4;
+ }
match 30 {
mut x => {} //~ ERROR: variable does not need to be mutable
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ match 5 {
+ 6 ... 1 => { }
+ _ => { }
+ };
+ //~^^^ ERROR lower range bound must be less than or equal to upper
+
+ match 5u64 {
+ 0xFFFF_FFFF_FFFF_FFFF ... 1 => { }
+ _ => { }
+ };
+ //~^^^ ERROR lower range bound must be less than or equal to upper
+}
// except according to those terms.
fn main() {
- match 5 {
- 6 ... 1 => { }
- _ => { }
- };
- //~^^^ ERROR lower range bound must be less than or equal to upper
-
match "wow" {
"bar" ... "foo" => { }
};
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
#[rustc_move_fragments]
pub fn test_match_partial(p: Lonely<D, D>) {
//~^ ERROR parent_of_fragments: `$(local p)`
- //~| ERROR assigned_leaf_path: `($(local p) as Zero)`
+ //~| ERROR assigned_leaf_path: `($(local p) as Lonely::Zero)`
match p {
Zero(..) => {}
_ => {}
#[rustc_move_fragments]
pub fn test_match_full(p: Lonely<D, D>) {
//~^ ERROR parent_of_fragments: `$(local p)`
- //~| ERROR assigned_leaf_path: `($(local p) as Zero)`
- //~| ERROR assigned_leaf_path: `($(local p) as One)`
- //~| ERROR assigned_leaf_path: `($(local p) as Two)`
+ //~| ERROR assigned_leaf_path: `($(local p) as Lonely::Zero)`
+ //~| ERROR assigned_leaf_path: `($(local p) as Lonely::One)`
+ //~| ERROR assigned_leaf_path: `($(local p) as Lonely::Two)`
match p {
Zero(..) => {}
One(..) => {}
#[rustc_move_fragments]
pub fn test_match_bind_one(p: Lonely<D, D>) {
//~^ ERROR parent_of_fragments: `$(local p)`
- //~| ERROR assigned_leaf_path: `($(local p) as Zero)`
- //~| ERROR parent_of_fragments: `($(local p) as One)`
- //~| ERROR moved_leaf_path: `($(local p) as One).#0`
- //~| ERROR assigned_leaf_path: `($(local p) as Two)`
+ //~| ERROR assigned_leaf_path: `($(local p) as Lonely::Zero)`
+ //~| ERROR parent_of_fragments: `($(local p) as Lonely::One)`
+ //~| ERROR moved_leaf_path: `($(local p) as Lonely::One).#0`
+ //~| ERROR assigned_leaf_path: `($(local p) as Lonely::Two)`
//~| ERROR assigned_leaf_path: `$(local data)`
match p {
Zero(..) => {}
#[rustc_move_fragments]
pub fn test_match_bind_many(p: Lonely<D, D>) {
//~^ ERROR parent_of_fragments: `$(local p)`
- //~| ERROR assigned_leaf_path: `($(local p) as Zero)`
- //~| ERROR parent_of_fragments: `($(local p) as One)`
- //~| ERROR moved_leaf_path: `($(local p) as One).#0`
+ //~| ERROR assigned_leaf_path: `($(local p) as Lonely::Zero)`
+ //~| ERROR parent_of_fragments: `($(local p) as Lonely::One)`
+ //~| ERROR moved_leaf_path: `($(local p) as Lonely::One).#0`
//~| ERROR assigned_leaf_path: `$(local data)`
- //~| ERROR parent_of_fragments: `($(local p) as Two)`
- //~| ERROR moved_leaf_path: `($(local p) as Two).#0`
- //~| ERROR moved_leaf_path: `($(local p) as Two).#1`
+ //~| ERROR parent_of_fragments: `($(local p) as Lonely::Two)`
+ //~| ERROR moved_leaf_path: `($(local p) as Lonely::Two).#0`
+ //~| ERROR moved_leaf_path: `($(local p) as Lonely::Two).#1`
//~| ERROR assigned_leaf_path: `$(local left)`
//~| ERROR assigned_leaf_path: `$(local right)`
match p {
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
#[rustc_move_fragments]
pub fn test_match_bind_and_underscore(p: Lonely<D, D>) {
//~^ ERROR parent_of_fragments: `$(local p)`
- //~| ERROR assigned_leaf_path: `($(local p) as Zero)`
- //~| ERROR assigned_leaf_path: `($(local p) as One)`
- //~| ERROR parent_of_fragments: `($(local p) as Two)`
- //~| ERROR moved_leaf_path: `($(local p) as Two).#0`
- //~| ERROR unmoved_fragment: `($(local p) as Two).#1`
+ //~| ERROR assigned_leaf_path: `($(local p) as Lonely::Zero)`
+ //~| ERROR assigned_leaf_path: `($(local p) as Lonely::One)`
+ //~| ERROR parent_of_fragments: `($(local p) as Lonely::Two)`
+ //~| ERROR moved_leaf_path: `($(local p) as Lonely::Two).#0`
+ //~| ERROR unmoved_fragment: `($(local p) as Lonely::Two).#1`
//~| ERROR assigned_leaf_path: `$(local left)`
match p {
#![feature(box_syntax)]
-fn dup(x: Box<isize>) -> Box<(Box<isize>,Box<isize>)> { box() (x, x) } //~ ERROR use of moved value
+fn dup(x: Box<isize>) -> Box<(Box<isize>,Box<isize>)> {
+ box() (x, x) //~ ERROR use of moved value
+ //~^ WARN deprecated syntax
+}
fn main() {
dup(box 3);
}
_x: Port<()>,
}
- impl Drop for foo {
+ impl Drop for foo {
fn drop(&mut self) {}
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that we don't ICE when we are missing the owned_box lang item.
+
+// error-pattern: requires `owned_box` lang_item
+
+#![no_std]
+#![feature(lang_items, no_std, box_syntax)]
+
+extern crate core;
+
+fn main() {
+ let x = box 1i32;
+}
+
+#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
+#[lang = "eh_personality"] extern fn eh_personality() {}
+#[lang = "eh_unwind_resume"] extern fn eh_unwind_resume() {}
+#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
}
fn e(t: Ref2<Ref0<Test>>) {
- //~^ ERROR lifetime bound for this object type cannot be deduced from context
- //
- // In this case, Ref2 is ambiguous, and Ref0 inherits the
- // ambiguity.
+ // In this case, Ref2 is ambiguous, but Ref0 overrides with 'static.
}
fn f(t: &Ref2<Test>) {
//
// Under new rules the result is:
//
- // for<'a> fn(&'a Box<SomeTrait+'a>) -> Box<SomeTrait+'static>
+ // for<'a> fn(&'a Box<SomeTrait+'static>) -> Box<SomeTrait+'static>
//
- // Therefore, we get a type error attempting to return `deref(ss)`
- // since `SomeTrait+'a <: SomeTrait+'static` does not hold.
+ // Therefore, no type error.
deref(ss)
- //~^ ERROR cannot infer
}
fn load1(ss: &SomeTrait) -> &SomeTrait {
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that the lifetime from the enclosing `&` is "inherited"
+// through the `Box` struct.
+
+// pretty-expanded FIXME #23616
+
+#![allow(dead_code)]
+
+trait Test {
+ fn foo(&self) { }
+}
+
+struct SomeStruct<'a> {
+ t: &'a Box<Test>,
+}
+
+fn c<'a>(t: &'a Box<Test+'a>, mut ss: SomeStruct<'a>) {
+ ss.t = t; //~ ERROR mismatched types
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that the lifetime from the enclosing `&` is "inherited"
+// through the `MyBox` struct.
+
+// pretty-expanded FIXME #23616
+
+#![allow(dead_code)]
+#![feature(rustc_error)]
+
+trait Test {
+ fn foo(&self) { }
+}
+
+struct SomeStruct<'a> {
+ t: &'a MyBox<Test>,
+ u: &'a MyBox<Test+'a>,
+}
+
+struct MyBox<T:?Sized> {
+ b: Box<T>
+}
+
+fn c<'a>(t: &'a MyBox<Test+'a>, mut ss: SomeStruct<'a>) {
+ ss.t = t; //~ ERROR mismatched types
+}
+
+fn main() {
+}
}
fn load0(ss: &MyBox<SomeTrait>) -> MyBox<SomeTrait> {
- deref(ss) //~ ERROR cannot infer
+ deref(ss)
}
fn load1<'a,'b>(a: &'a MyBox<SomeTrait>,
{
a
//~^ ERROR cannot infer
- //~| ERROR mismatched types
}
fn load2<'a>(ss: &MyBox<SomeTrait+'a>) -> MyBox<SomeTrait+'a> {
- load0(ss) //~ WARNING E0398
+ load0(ss) //~ ERROR mismatched types
}
fn main() {
// except according to those terms.
fn main() {
- let a = 1_is; //~ ERROR illegal suffix
- let b = 2_us; //~ ERROR illegal suffix
+ let a = 1_is; //~ ERROR invalid suffix
+ let b = 2_us; //~ ERROR invalid suffix
}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Basic sanity check for `push_unsafe!(EXPR)` and
+// `pop_unsafe!(EXPR)`: we can call unsafe code when there are a
+// positive number of pushes in the stack, or if we are within a
+// normal `unsafe` block, but otherwise cannot.
+
+#![feature(pushpop_unsafe)]
+
+static mut X: i32 = 0;
+
+unsafe fn f() { X += 1; return; }
+fn g() { unsafe { X += 1_000; } return; }
+
+fn main() {
+ push_unsafe!( {
+ f(); pop_unsafe!({
+ f() //~ ERROR: call to unsafe function
+ })
+ } );
+
+ push_unsafe!({
+ f();
+ pop_unsafe!({
+ g();
+ f(); //~ ERROR: call to unsafe function
+ })
+ } );
+
+ push_unsafe!({
+ g(); pop_unsafe!({
+ unsafe {
+ f();
+ }
+ f(); //~ ERROR: call to unsafe function
+ })
+ });
+
+
+ // Note: For implementation simplicity the compiler just
+ // ICE's if you underflow the push_unsafe stack.
+ //
+ // Thus all of the following cases cause an ICE.
+ //
+ // (The "ERROR" notes are from an earlier version
+ // that used saturated arithmetic rather than checked
+ // arithmetic.)
+
+ // pop_unsafe!{ g() };
+ //
+ // push_unsafe!({
+ // pop_unsafe!(pop_unsafe!{ g() })
+ // });
+ //
+ // push_unsafe!({
+ // g();
+ // pop_unsafe!(pop_unsafe!({
+ // f() // ERROR: call to unsafe function
+ // }))
+ // });
+ //
+ // pop_unsafe!({
+ // f(); // ERROR: call to unsafe function
+ // })
+
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: illegal recursive enum type
+// error-pattern: invalid recursive enum type
enum list<T> { cons(T, list<T>), nil }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let x = vec![1];
+ let y = x;
+ //~^ HELP use a `ref` binding as shown
+ //~| SUGGESTION let ref y = x;
+ x; //~ ERROR use of moved value
+
+ let x = vec![1];
+ let mut y = x;
+ //~^ HELP use a `ref` binding as shown
+ //~| SUGGESTION let ref mut y = x;
+ x; //~ ERROR use of moved value
+
+ let x = (Some(vec![1]), ());
+
+ match x {
+ (Some(y), ()) => {},
+ //~^ HELP use a `ref` binding as shown
+ //~| SUGGESTION (Some(ref y), ()) => {},
+ _ => {},
+ }
+ x; //~ ERROR use of partially moved value
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-struct Foo<'static> { //~ ERROR illegal lifetime parameter name: `'static`
+struct Foo<'static> { //~ ERROR invalid lifetime parameter name: `'static`
x: &'static isize
}
fn static_proc(x: &isize) -> Box<FnMut()->(isize) + 'static> {
// This is illegal, because the region bound on `proc` is 'static.
- Box::new(move|| { *x }) //~ ERROR captured variable `x` does not outlive the enclosing closure
+ Box::new(move|| { *x }) //~ ERROR does not fulfill the required lifetime
}
fn main() { }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn static_id<'a,'b>(t: &'a ()) -> &'static ()
+ where 'a: 'static { t }
+fn static_id_indirect<'a,'b>(t: &'a ()) -> &'static ()
+ where 'a: 'b, 'b: 'static { t }
+fn static_id_wrong_way<'a>(t: &'a ()) -> &'static () where 'static: 'a {
+ t //~ ERROR cannot infer an appropriate lifetime
+}
+
+fn error(u: &(), v: &()) {
+ static_id(&u); //~ ERROR cannot infer an appropriate lifetime
+ static_id_indirect(&v); //~ ERROR cannot infer an appropriate lifetime
+}
+
+fn main() {}
fn main() {
let n = 1;
- let a = [0; n]; //~ ERROR expected constant integer for repeat count, found variable
+ let a = [0; n];
+ //~^ ERROR expected constant integer for repeat count, found variable [E0307]
let b = [0; ()];
-//~^ ERROR mismatched types
-//~| expected `usize`
-//~| found `()`
-//~| expected usize
-//~| found ()
-//~| ERROR expected positive integer for repeat count, found tuple
+ //~^ ERROR mismatched types
+ //~| expected `usize`
+ //~| found `()`
+ //~| expected usize
+ //~| found ()) [E0308]
+ //~| ERROR expected positive integer for repeat count, found tuple [E0306]
let c = [0; true];
//~^ ERROR mismatched types
//~| expected `usize`
//~| found `bool`
//~| expected usize
- //~| found bool
- //~| ERROR expected positive integer for repeat count, found boolean
+ //~| found bool) [E0308]
+ //~| ERROR expected positive integer for repeat count, found boolean [E0306]
let d = [0; 0.5];
//~^ ERROR mismatched types
//~| expected `usize`
//~| found `_`
//~| expected usize
- //~| found floating-point variable
- //~| ERROR expected positive integer for repeat count, found float
+ //~| found floating-point variable) [E0308]
+ //~| ERROR expected positive integer for repeat count, found float [E0306]
let e = [0; "foo"];
//~^ ERROR mismatched types
//~| expected `usize`
//~| found `&'static str`
//~| expected usize
- //~| found &-ptr
- //~| ERROR expected positive integer for repeat count, found string
+ //~| found &-ptr) [E0308]
+ //~| ERROR expected positive integer for repeat count, found string literal [E0306]
let f = [0; -4_isize];
//~^ ERROR mismatched types
//~| expected `usize`
//~| found `isize`
//~| expected usize
- //~| found isize
- //~| ERROR expected positive integer for repeat count, found negative integer
+ //~| found isize) [E0308]
+ //~| ERROR expected positive integer for repeat count, found negative integer [E0306]
let f = [0_usize; -1_isize];
//~^ ERROR mismatched types
//~| expected `usize`
//~| found `isize`
//~| expected usize
- //~| found isize
- //~| ERROR expected positive integer for repeat count, found negative integer
+ //~| found isize) [E0308]
+ //~| ERROR expected positive integer for repeat count, found negative integer [E0306]
+ struct G {
+ g: (),
+ }
+ let g = [0; G { g: () }];
+ //~^ ERROR mismatched types
+ //~| expected `usize`
+ //~| found `main::G`
+ //~| expected usize
+ //~| found struct `main::G`) [E0308]
+ //~| ERROR expected positive integer for repeat count, found struct [E0306]
}
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:stability_attribute_issue.rs
+
+#![deny(deprecated)]
+
+extern crate stability_attribute_issue;
+use stability_attribute_issue::*;
+
+fn main() {
+ unstable(); //~ ERROR use of unstable library feature 'foo' (see issue #1)
+ unstable_msg(); //~ ERROR use of unstable library feature 'foo': message (see issue #2)
+}
#[deprecated(since = "b")]
#[deprecated(since = "b")]
fn multiple4() { } //~ ERROR multiple deprecated attributes
+//~^ ERROR Invalid stability or deprecation version found
#[deprecated(since = "a")]
fn deprecated_without_unstable_or_stable() { } //~ ERROR deprecated attribute must be paired
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Z(&'static Z);
+//~^ ERROR this type cannot be instantiated
+
+pub fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+static mut S: *const u8 = unsafe { &S as *const *const u8 as *const u8 };
+//~^ ERROR recursive static
+
+pub fn main() {
+ unsafe { assert_eq!(S, *(S as *const *const u8)); }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn bot<T>() -> T { loop {} }
+
+fn mutate(s: &mut str) {
+ s[1..2] = bot();
+ //~^ ERROR `core::marker::Sized` is not implemented for the type `str`
+ //~| ERROR `core::marker::Sized` is not implemented for the type `str`
+ s[1usize] = bot();
+ //~^ ERROR `core::ops::Index<usize>` is not implemented for the type `str`
+ //~| ERROR `core::ops::IndexMut<usize>` is not implemented for the type `str`
+ //~| ERROR `core::ops::Index<usize>` is not implemented for the type `str`
+}
+
+pub fn main() {}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_consts)]
+
+trait A { }
+
+impl A for isize {
+ const BAR: () = (); //~ ERROR const `BAR` is not a member of trait `A`
+ type Baz = (); //~ ERROR type `Baz` is not a member of trait `A`
+ fn foo(&self) { } //~ ERROR method `foo` is not a member of trait `A`
+}
+
+fn main() { }
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-trait A { }
-
-impl A for isize {
- fn foo(&self) { } //~ ERROR method `foo` is not a member of trait `A`
-}
-
-fn main() { }
impl Mumbo for usize {
// Cannot have a larger effect than the trait:
unsafe fn jumbo(&self, x: &usize) { *self + *x; }
- //~^ ERROR expected normal fn, found unsafe fn
+ //~^ ERROR method `jumbo` has an incompatible type for trait
+ //~| expected normal fn,
+ //~| found unsafe fn
}
fn main() {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern:this type cannot be instantiated
+// error-pattern:invalid recursive struct type
struct t1 {
foo: isize,
foolish: t1
fn main() {
let x : i16 = 22;
((&x) as *const i16) as f32;
- //~^ ERROR illegal cast; cast through a usize first: `*const i16` as `f32`
+ //~^ ERROR casting `*const i16` as `f32` is invalid
+ //~^^ HELP cast through a usize first
}
fn main() {
let x: Box<Foo(isize)>;
- //~^ ERROR parenthetical notation is only stable when used with the `Fn` family
+ //~^ ERROR parenthetical notation is only stable when used with `Fn`-family
// No errors with these:
let x: Box<Fn(isize)>;
// Test that the `Fn` traits require `()` form without a feature gate.
fn bar1(x: &Fn<(), Output=()>) {
- //~^ ERROR angle-bracket notation is not stable when used with the `Fn` family
+ //~^ ERROR of `Fn`-family traits' type parameters is subject to change
}
fn bar2<T>(x: &T) where T: Fn<()> {
- //~^ ERROR angle-bracket notation is not stable when used with the `Fn` family
+ //~^ ERROR of `Fn`-family traits' type parameters is subject to change
}
fn main() { }
impl Foo for u32 {
fn len(&self) -> u32 { *self }
- //~^ ERROR incompatible type for trait: expected unsafe fn, found normal fn
+ //~^ ERROR method `len` has an incompatible type for trait
+ //~| expected unsafe fn,
+ //~| found normal fn
}
fn main() { }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern:illegal cast
+// error-pattern:casting
#![feature(libc)]
let mut x1 = X { y: [0, 0] };
// This is still an error since we don't allow casts from &mut [T; n] to *mut T.
- let p1: *mut u8 = &mut x1.y as *mut _; //~ ERROR illegal cast
+ let p1: *mut u8 = &mut x1.y as *mut _; //~ ERROR casting
let t1: *mut [u8; 2] = &mut x1.y as *mut _;
let h1: *mut [u8; 2] = &mut x1.y as *mut [u8; 2];
}
mod xx {
extern {
- pub fn strlen(str: *const u8) -> usize; //~ ERROR found rust type `usize`
- pub fn foo(x: isize, y: usize); //~ ERROR found rust type `isize`
- //~^ ERROR found rust type `usize`
+ pub fn strlen(str: *const u8) -> usize; //~ ERROR found Rust type `usize`
+ pub fn foo(x: isize, y: usize); //~ ERROR found Rust type `isize`
+ //~^ ERROR found Rust type `usize`
}
}
// gdb-check:type = f32
// gdb-command:whatis f64
// gdb-check:type = f64
+// gdb-command:whatis fnptr
+// gdb-check:type = [...] (*)([...])
// gdb-command:info functions _yyy
-// gdb-check:[...]![...]_yyy([...])([...]);
+// gdb-check:[...]![...]_yyy([...]);
+// gdb-command:ptype closure_0
+// gdb-check: type = struct closure {
+// gdb-check: <no data fields>
+// gdb-check: }
+// gdb-command:ptype closure_1
+// gdb-check: type = struct closure {
+// gdb-check: bool *__0;
+// gdb-check: }
+// gdb-command:ptype closure_2
+// gdb-check: type = struct closure {
+// gdb-check: bool *__0;
+// gdb-check: isize *__1;
+// gdb-check: }
+
+//
// gdb-command:continue
#![allow(unused_variables)]
let u64: u64 = 64;
let f32: f32 = 2.5;
let f64: f64 = 3.5;
+ let fnptr : fn() = _zzz;
+ let closure_0 = || {};
+ let closure_1 = || { b; };
+ let closure_2 = || { if b { i } else { i }; };
_zzz(); // #break
if 1 == 1 { _yyy(); }
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// This test uses only GDB Python API features which should be available in
-// older versions of GDB too. A more extensive test can be found in
-// gdb-pretty-struct-and-enums.rs
-
// ignore-bitrig
// ignore-windows failing on win32 bot
// ignore-freebsd: gdb package too new
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// ignore-lldb: FIXME(#27089)
+// min-lldb-version: 310
+
+// compile-flags:-g
+
+// === GDB TESTS ===================================================================================
+// gdb-command:run
+
+// gdb-command:print eight_bytes1
+// gdb-check:$1 = {{RUST$ENUM$DISR = Variant1, __0 = 100}, {RUST$ENUM$DISR = Variant1, __0 = 100}}
+// gdb-command:print four_bytes1
+// gdb-check:$2 = {{RUST$ENUM$DISR = Variant1, __0 = 101}, {RUST$ENUM$DISR = Variant1, __0 = 101}}
+// gdb-command:print two_bytes1
+// gdb-check:$3 = {{RUST$ENUM$DISR = Variant1, __0 = 102}, {RUST$ENUM$DISR = Variant1, __0 = 102}}
+// gdb-command:print one_byte1
+// gdb-check:$4 = {{RUST$ENUM$DISR = Variant1, __0 = 65 'A'}, {RUST$ENUM$DISR = Variant1, __0 = 65 'A'}}
+
+// gdb-command:print eight_bytes2
+// gdb-check:$5 = {{RUST$ENUM$DISR = Variant2, __0 = 100}, {RUST$ENUM$DISR = Variant2, __0 = 100}}
+// gdb-command:print four_bytes2
+// gdb-check:$6 = {{RUST$ENUM$DISR = Variant2, __0 = 101}, {RUST$ENUM$DISR = Variant2, __0 = 101}}
+// gdb-command:print two_bytes2
+// gdb-check:$7 = {{RUST$ENUM$DISR = Variant2, __0 = 102}, {RUST$ENUM$DISR = Variant2, __0 = 102}}
+// gdb-command:print one_byte2
+// gdb-check:$8 = {{RUST$ENUM$DISR = Variant2, __0 = 65 'A'}, {RUST$ENUM$DISR = Variant2, __0 = 65 'A'}}
+
+// gdb-command:continue
+
+// === LLDB TESTS ==================================================================================
+// lldb-command:run
+
+// lldb-command:print eight_bytes1
+// lldb-check:[...]$0 = Variant1(100)
+// lldb-command:print four_bytes1
+// lldb-check:[...]$1 = Variant1(101)
+// lldb-command:print two_bytes1
+// lldb-check:[...]$2 = Variant1(102)
+// lldb-command:print one_byte1
+// lldb-check:[...]$3 = Variant1('A')
+
+// lldb-command:print eight_bytes2
+// lldb-check:[...]$4 = Variant2(100)
+// lldb-command:print four_bytes2
+// lldb-check:[...]$5 = Variant2(101)
+// lldb-command:print two_bytes2
+// lldb-check:[...]$6 = Variant2(102)
+// lldb-command:print one_byte2
+// lldb-check:[...]$7 = Variant2('A')
+
+// lldb-command:continue
+
+#![allow(unused_variables)]
+#![allow(dead_code)]
+#![omit_gdb_pretty_printer_section]
+
+// This test case makes sure that we get correct type descriptions for the enum
+// discriminant of different instantiations of the same generic enum type where,
+// dependending on the generic type parameter(s), the discriminant has a
+// different size in memory.
+
+enum Enum<T> {
+ Variant1(T),
+ Variant2(T)
+}
+
+fn main() {
+ // These are ordered for descending size on purpose
+ let eight_bytes1 = Enum::Variant1(100.0f64);
+ let four_bytes1 = Enum::Variant1(101i32);
+ let two_bytes1 = Enum::Variant1(102i16);
+ let one_byte1 = Enum::Variant1(65u8);
+
+ let eight_bytes2 = Enum::Variant2(100.0f64);
+ let four_bytes2 = Enum::Variant2(101i32);
+ let two_bytes2 = Enum::Variant2(102i16);
+ let one_byte2 = Enum::Variant2(65u8);
+
+ zzz(); // #break
+}
+
+fn zzz() { () }
extern
- "C"suffix //~ ERROR ABI spec with a suffix is illegal
+ "C"suffix //~ ERROR ABI spec with a suffix is invalid
fn foo() {}
extern
- "C"suffix //~ ERROR ABI spec with a suffix is illegal
+ "C"suffix //~ ERROR ABI spec with a suffix is invalid
{}
fn main() {
- ""suffix; //~ ERROR str literal with a suffix is illegal
- b""suffix; //~ ERROR binary str literal with a suffix is illegal
- r#""#suffix; //~ ERROR str literal with a suffix is illegal
- br#""#suffix; //~ ERROR binary str literal with a suffix is illegal
- 'a'suffix; //~ ERROR char literal with a suffix is illegal
- b'a'suffix; //~ ERROR byte literal with a suffix is illegal
+ ""suffix; //~ ERROR str literal with a suffix is invalid
+ b""suffix; //~ ERROR binary str literal with a suffix is invalid
+ r#""#suffix; //~ ERROR str literal with a suffix is invalid
+ br#""#suffix; //~ ERROR binary str literal with a suffix is invalid
+ 'a'suffix; //~ ERROR char literal with a suffix is invalid
+ b'a'suffix; //~ ERROR byte literal with a suffix is invalid
- 1234u1024; //~ ERROR illegal width `1024` for integer literal
- 1234i1024; //~ ERROR illegal width `1024` for integer literal
- 1234f1024; //~ ERROR illegal width `1024` for float literal
- 1234.5f1024; //~ ERROR illegal width `1024` for float literal
+ 1234u1024; //~ ERROR invalid width `1024` for integer literal
+ 1234i1024; //~ ERROR invalid width `1024` for integer literal
+ 1234f1024; //~ ERROR invalid width `1024` for float literal
+ 1234.5f1024; //~ ERROR invalid width `1024` for float literal
- 1234suffix; //~ ERROR illegal suffix `suffix` for numeric literal
- 0b101suffix; //~ ERROR illegal suffix `suffix` for numeric literal
- 1.0suffix; //~ ERROR illegal suffix `suffix` for float literal
- 1.0e10suffix; //~ ERROR illegal suffix `suffix` for float literal
+ 1234suffix; //~ ERROR invalid suffix `suffix` for numeric literal
+ 0b101suffix; //~ ERROR invalid suffix `suffix` for numeric literal
+ 1.0suffix; //~ ERROR invalid suffix `suffix` for float literal
+ 1.0e10suffix; //~ ERROR invalid suffix `suffix` for float literal
}
pub fn main() {
b'\f'; //~ ERROR unknown byte escape
- b'\x0Z'; //~ ERROR illegal character in numeric character escape: Z
+ b'\x0Z'; //~ ERROR invalid character in numeric character escape: Z
b' '; //~ ERROR byte constant must be escaped
b'''; //~ ERROR byte constant must be escaped
b'é'; //~ ERROR byte constant must be ASCII
pub fn main() {
b"\f"; //~ ERROR unknown byte escape
- b"\x0Z"; //~ ERROR illegal character in numeric character escape: Z
+ b"\x0Z"; //~ ERROR invalid character in numeric character escape: Z
b"é"; //~ ERROR byte constant must be ASCII
b"a //~ ERROR unterminated double quote byte string
}
// compile-flags: -Z parse-only
-impl Foo; //~ ERROR expected one of `(`, `+`, `..`, `::`, `<`, `for`, `where`, or `{`, found `;`
+impl Foo; //~ ERROR expected one of `(`, `+`, `::`, `<`, `for`, `where`, or `{`, found `;`
//~^ ERROR unicode escape sequences cannot be used as a byte or in a byte string
let _ = b'\u';
- //~^ ERROR unknown byte escape: u
+ //~^ ERROR incorrect unicode escape sequence
+ //~^^ ERROR unicode escape sequences cannot be used as a byte or in a byte string
let _ = b'\x5';
//~^ ERROR numeric character escape is too short
let _ = b'\xxy';
- //~^ ERROR illegal character in numeric character escape: x
- //~^^ ERROR illegal character in numeric character escape: y
+ //~^ ERROR invalid character in numeric character escape: x
+ //~^^ ERROR invalid character in numeric character escape: y
let _ = '\x5';
//~^ ERROR numeric character escape is too short
let _ = '\xxy';
- //~^ ERROR illegal character in numeric character escape: x
- //~^^ ERROR illegal character in numeric character escape: y
+ //~^ ERROR invalid character in numeric character escape: x
+ //~^^ ERROR invalid character in numeric character escape: y
let _ = b"\u{a4a4} \xf \u";
//~^ ERROR unicode escape sequences cannot be used as a byte or in a byte string
- //~^^ ERROR illegal character in numeric character escape:
- //~^^^ ERROR unknown byte escape: u
+ //~^^ ERROR invalid character in numeric character escape:
+ //~^^^ ERROR incorrect unicode escape sequence
+ //~^^^^ ERROR unicode escape sequences cannot be used as a byte or in a byte string
let _ = "\u{ffffff} \xf \u";
- //~^ ERROR illegal unicode character escape
- //~^^ ERROR illegal character in numeric character escape:
+ //~^ ERROR invalid unicode character escape
+ //~^^ ERROR invalid character in numeric character escape:
//~^^^ ERROR form of character escape may only be used with characters in the range [\x00-\x7f]
- //~^^^^ ERROR unknown character escape: u
+ //~^^^^ ERROR incorrect unicode escape sequence
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+impl A .. {} //~ ERROR
+
+fn main() {}
// ignore-windows
// ignore-freebsd
// ignore-openbsd
+// ignore-netbsd
// ignore-bitrig
// compile-flags: -Z parse-only
// compile-flags: -Z parse-only
pub extern
- "invalid-ab_isize" //~ ERROR illegal ABI
+ "invalid-ab_isize" //~ ERROR invalid ABI
fn foo() {}
fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! outer {
+ (#[$outer:meta]) => ()
+}
+
+outer! {
+ //! Inner
+} //~^ ERROR no rules expected the token `!`
+
+fn main() { }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! inner {
+ (#![$inner:meta]) => ()
+}
+
+inner! {
+ /// Outer
+} //~^ ERROR no rules expected the token `[`
+
+fn main() { }
}
impl Cmp, ToString for S {
-//~^ ERROR: expected one of `(`, `+`, `..`, `::`, `<`, `for`, `where`, or `{`, found `,`
+//~^ ERROR: expected one of `(`, `+`, `::`, `<`, `for`, `where`, or `{`, found `,`
fn eq(&&other: S) { false }
fn to_string(&self) -> String { "hi".to_string() }
}
// compile-flags: -Z parse-only
pub fn main() {
- let s = "\u{d805}"; //~ ERROR illegal unicode character escape
+ let s = "\u{d805}"; //~ ERROR invalid unicode character escape
}
pub fn main() {
let s = "\u{lol}";
- //~^ ERROR illegal character in unicode escape: l
- //~^^ ERROR illegal character in unicode escape: o
- //~^^^ ERROR illegal character in unicode escape: l
+ //~^ ERROR invalid character in unicode escape: l
+ //~^^ ERROR invalid character in unicode escape: o
+ //~^^^ ERROR invalid character in unicode escape: l
}
fn main() {
box (1 + 1)
- //~^ HELP try using `box()` instead:
- //~| SUGGESTION box() (1 + 1)
+ //~^ HELP try using `box ()` instead:
+ //~| SUGGESTION box () (1 + 1)
+ //~| WARN deprecated syntax
; //~ ERROR expected expression, found `;`
}
// compile-flags: -Z parse-only
static s: &'static str =
- r#x"#"x# //~ ERROR only `#` is allowed in raw string delimitation; found illegal character
+ r#x"#"x# //~ ERROR found invalid character; only `#` is allowed in raw string delimitation
;
impl Foo + Owned for Bar {
//~^ ERROR not a trait
-//~^^ ERROR expected one of `..`, `where`, or `{`, found `Bar`
+//~^^ ERROR expected one of `where` or `{`, found `Bar`
}
fn main() { }
-#![feature(no_std)]
+#![feature(no_std, prelude_import)]
#![no_std]
#[prelude_import]
use std::prelude::v1::*;
-include ../tools.mk
-ifndef IS_WINDOWS
EXTRAFLAGS := $(EXTRACFLAGS)
-endif
# FIXME: ignore freebsd
ifneq ($(shell uname),FreeBSD)
-include ../tools.mk
+# FIXME: ignore freebsd
# This is a basic test of LLVM ExecutionEngine functionality using compiled
# Rust code built using the `rustc` crate.
+ifneq ($(shell uname),FreeBSD)
all:
$(RUSTC) test.rs
$(call RUN,test $(RUSTC))
+else
+all:
+
+endif
// except according to those terms.
#![feature(rustc_private)]
+#![feature(libc)]
+extern crate libc;
extern crate rustc;
extern crate rustc_driver;
extern crate rustc_lint;
use rustc::session::build_session;
use rustc_driver::driver;
use rustc_resolve::MakeGlobMap;
+use libc::c_void;
use syntax::diagnostics::registry::Registry;
}
/// Returns a raw pointer to the named function.
- pub fn get_function(&mut self, name: &str) -> Option<*const ()> {
+ pub fn get_function(&mut self, name: &str) -> Option<*const c_void> {
let s = CString::new(name.as_bytes()).unwrap();
for &m in &self.modules {
}
/// Returns a raw pointer to the named global item.
- pub fn get_global(&mut self, name: &str) -> Option<*const ()> {
+ pub fn get_global(&mut self, name: &str) -> Option<*const c_void> {
let s = CString::new(name.as_bytes()).unwrap();
for &m in &self.modules {
TARGET_RPATH_DIR:=$(TARGET_RPATH_DIR):$(TMPDIR)
all:
- $(RUSTC) dylib.rs -o $(TMPDIR)/libdylib.so
- $(RUSTC) main.rs
+ $(RUSTC) dylib.rs -o $(TMPDIR)/libdylib.so -C prefer-dynamic
+ $(RUSTC) main.rs -C prefer-dynamic
$(call RUN,main)
all:
$(RUSTC) -C extra-filename=bar foo.rs -C save-temps
- rm $(TMPDIR)/foobar.o
+ rm $(TMPDIR)/foobar.0.o
rm $(TMPDIR)/$(call BIN,foobar)
$(TMPDIR)/%.dot: %.rs
$(eval $(call FIND_LAST_BLOCK,$<))
- $(RUSTC_LIB) -Z unstable-options --xpretty flowgraph,unlabelled=$(LASTBLOCKNUM_$<) $< -o $@.tmp
+ $(RUSTC_LIB) -Z unstable-options --unpretty flowgraph,unlabelled=$(LASTBLOCKNUM_$<) $< -o $@.tmp
cat $@.tmp | sed -e 's@ (id=[0-9]*)@@g' \
-e 's@\[label=""\]@@' \
-e 's@digraph [a-zA-Z0-9_]* @digraph block @' \
+++ /dev/null
--include ../tools.mk
-
-# This test attempts to use syntax extensions, which are known to be
-# incompatible with stage1 at the moment.
-
-ifeq ($(RUST_BUILD_STAGE),1)
-DOTEST=
-else
-DOTEST=dotest
-endif
-
-all: $(DOTEST)
-
-dotest:
- env
- $(RUSTC) lib.rs
- $(RUSTC) main.rs -C lto
- $(call RUN,main)
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![crate_type = "rlib"]
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(rustc_private)]
-
-extern crate lib;
-#[macro_use] extern crate log;
-
-fn main() {}
#[lang = "stack_exhausted"] fn stack_exhausted() {}
#[lang = "eh_personality"] fn eh_personality() {}
+#[lang = "eh_unwind_resume"] fn eh_unwind_resume() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
#[lang = "stack_exhausted"] fn stack_exhausted() {}
#[lang = "eh_personality"] fn eh_personality() {}
+#[lang = "eh_unwind_resume"] fn eh_unwind_resume() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
+
}
struct CharSearcher<'a>(<CharEqPattern as Pattern<'a>>::Searcher);
+
+pub trait Error {
+}
+
+impl Error + 'static {
+ pub fn is<T: Error + 'static>(&self) -> bool {
+ panic!()
+ }
+}
+
+impl Error + 'static + Send {
+ pub fn is<T: Error + 'static>(&self) -> bool {
+ <Error + 'static>::is::<T>(self)
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:lto-syntax-extension-lib.rs
+// aux-build:lto-syntax-extension-plugin.rs
+// compile-flags:-C lto
+// ignore-stage1
+// no-prefer-dynamic
+
+#![feature(plugin)]
+#![plugin(lto_syntax_extension_plugin)]
+
+extern crate lto_syntax_extension_lib;
+
+fn main() {
+ lto_syntax_extension_lib::foo();
+}
#![allow(unknown_features)]
#![feature(box_syntax)]
+use std::cell::RefCell;
use std::fmt::Debug;
+use std::rc::Rc;
// Check that coercions apply at the pointer level and don't cause
// rvalue expressions to be unsized. See #20169 for more information.
let _: Box<[isize]> = Box::new([1, 2, 3]);
let _: Box<Fn(isize) -> _> = Box::new(|x| (x as u8));
+ let _: Rc<RefCell<[isize]>> = Rc::new(RefCell::new([1, 2, 3]));
+ let _: Rc<RefCell<FnMut(isize) -> _>> = Rc::new(RefCell::new(|x| (x as u8)));
+
let _: Vec<Box<Fn(isize) -> _>> = vec![
Box::new(|x| (x as u8)),
Box::new(|x| (x as i16 as u8)),
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+
+#![feature(default_type_parameter_fallback)]
+
+use std::marker::PhantomData;
+
+trait Id {
+ type This;
+}
+
+impl<A> Id for A {
+ type This = A;
+}
+
+struct Foo<X: Default = usize, Y = <X as Id>::This> {
+ data: PhantomData<(X, Y)>
+}
+
+impl<X: Default, Y> Foo<X, Y> {
+ fn new() -> Foo<X, Y> {
+ Foo { data: PhantomData }
+ }
+}
+
+fn main() {
+ let foo = Foo::new();
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+
+#![feature(default_type_parameter_fallback)]
+use std::marker::PhantomData;
+
+struct Foo<T,U=T> { t: T, data: PhantomData<U> }
+
+fn main() {
+ let foo = Foo { t: 'a', data: PhantomData };
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(default_type_parameter_fallback)]
+
+struct Foo;
+
+impl Foo {
+ fn method<A:Default=String>(&self) -> A {
+ A::default()
+ }
+}
+
+fn main() {
+ let f = Foo.method();
+ println!("{}", f);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(default_type_parameter_fallback)]
+
+struct Foo<A>(A);
+
+impl<A:Default=i32> Foo<A> {
+ fn new() -> Foo<A> {
+ Foo(A::default())
+ }
+}
+
+fn main() {
+ let foo = Foo::new();
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+
+#![feature(default_type_parameter_fallback)]
+
+use std::marker::PhantomData;
+
+struct DeterministicHasher;
+struct RandomHasher;
+
+
+struct MyHashMap<K, V, H=DeterministicHasher> {
+ data: PhantomData<(K, V, H)>
+}
+
+impl<K, V, H> MyHashMap<K, V, H> {
+ fn new() -> MyHashMap<K, V, H> {
+ MyHashMap { data: PhantomData }
+ }
+}
+
+mod mystd {
+ use super::{MyHashMap, RandomHasher};
+ pub type HashMap<K, V, H=RandomHasher> = MyHashMap<K, V, H>;
+}
+
+fn try_me<H>(hash_map: mystd::HashMap<i32, i32, H>) {}
+
+fn main() {
+ let hash_map = mystd::HashMap::new();
+ try_me(hash_map);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(default_type_parameter_fallback)]
+
+// Another example from the RFC
+trait Foo { }
+trait Bar { }
+
+impl<T:Bar=usize> Foo for Vec<T> {}
+impl Bar for usize {}
+
+fn takes_foo<F:Foo>(f: F) {}
+
+fn main() {
+ let x = Vec::new(); // x: Vec<$0>
+ takes_foo(x); // adds oblig Vec<$0> : Foo
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(default_type_parameter_fallback)]
+
+// An example from the RFC
+trait Foo { fn takes_foo(&self); }
+trait Bar { }
+
+impl<T:Bar=usize> Foo for Vec<T> {
+ fn takes_foo(&self) {}
+}
+
+impl Bar for usize {}
+
+fn main() {
+ let x = Vec::new(); // x: Vec<$0>
+ x.takes_foo(); // adds oblig Vec<$0> : Foo
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(default_type_parameter_fallback)]
+
+use std::collections::HashMap;
+
+type IntMap<K=usize> = HashMap<K, usize>;
+
+fn main() {
+ let x = IntMap::new();
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+thread_local!(static FOO: Foo = Foo);
+thread_local!(static BAR: Bar = Bar(1));
+thread_local!(static BAZ: Baz = Baz);
+
+struct Foo;
+struct Bar(i32);
+struct Baz;
+
+impl Drop for Foo {
+ fn drop(&mut self) {
+ BAR.with(|_| {});
+ }
+}
+
+impl Drop for Bar {
+ fn drop(&mut self) {
+ assert_eq!(self.0, 1);
+ self.0 = 2;
+ BAZ.with(|_| {});
+ assert_eq!(self.0, 2);
+ }
+}
+
+fn main() {
+ std::thread::spawn(|| {
+ FOO.with(|_| {});
+ }).join().unwrap();
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test exercises cases where cyclic structure is legal,
+// including when the cycles go through data-structures such
+// as `Vec` or `TypedArena`.
+//
+// The intent is to cover as many such cases as possible, ensuring
+// that if the compiler did not complain circa Rust 1.x (1.2 as of
+// this writing), then it will continue to not complain in the future.
+//
+// Note that while some of the tests are only exercising using the
+// given collection as a "backing store" for a set of nodes that hold
+// the actual cycle (and thus the cycle does not go through the
+// collection itself in such cases), in general we *do* want to make
+// sure to have at least one example exercising a cycle that goes
+// through the collection, for every collection type that supports
+// this.
+
+#![feature(vecmap)]
+
+use std::cell::Cell;
+use std::cmp::Ordering;
+use std::collections::BinaryHeap;
+use std::collections::HashMap;
+use std::collections::LinkedList;
+use std::collections::VecDeque;
+use std::collections::VecMap;
+use std::collections::btree_map::BTreeMap;
+use std::collections::btree_set::BTreeSet;
+use std::hash::{Hash, Hasher};
+
+const PRINT: bool = false;
+
+pub fn main() {
+ let c_orig = ContextData {
+ curr_depth: 0,
+ max_depth: 3,
+ visited: 0,
+ max_visits: 1000,
+ skipped: 0,
+ curr_mark: 0,
+ saw_prev_marked: false,
+ };
+
+ // Cycle 1: { v[0] -> v[1], v[1] -> v[0] };
+ // does not exercise `v` itself
+ let v: Vec<S> = vec![Named::new("s0"),
+ Named::new("s1")];
+ v[0].next.set(Some(&v[1]));
+ v[1].next.set(Some(&v[0]));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 10;
+ assert!(!c.saw_prev_marked);
+ v[0].for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(""); }
+
+ // Cycle 2: { v[0] -> v, v[1] -> v }
+ let v: V = Named::new("v");
+ v.contents[0].set(Some(&v));
+ v.contents[1].set(Some(&v));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 20;
+ assert!(!c.saw_prev_marked);
+ v.for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(""); }
+
+ // Cycle 3: { hk0 -> hv0, hv0 -> hk0, hk1 -> hv1, hv1 -> hk1 };
+ // does not exercise `h` itself
+
+ let mut h: HashMap<H,H> = HashMap::new();
+ h.insert(Named::new("hk0"), Named::new("hv0"));
+ h.insert(Named::new("hk1"), Named::new("hv1"));
+ for (key, val) in h.iter() {
+ val.next.set(Some(key));
+ key.next.set(Some(val));
+ }
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 30;
+ for (key, _) in h.iter() {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ key.for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+ }
+
+ if PRINT { println!(""); }
+
+ // Cycle 4: { h -> (hmk0,hmv0,hmk1,hmv1), {hmk0,hmv0,hmk1,hmv1} -> h }
+
+ let mut h: HashMap<HM,HM> = HashMap::new();
+ h.insert(Named::new("hmk0"), Named::new("hmv0"));
+ h.insert(Named::new("hmk0"), Named::new("hmv0"));
+ for (key, val) in h.iter() {
+ val.contents.set(Some(&h));
+ key.contents.set(Some(&h));
+ }
+
+ let mut c = c_orig.clone();
+ c.max_depth = 2;
+ c.curr_mark = 40;
+ for (key, _) in h.iter() {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ key.for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+ // break;
+ }
+
+ if PRINT { println!(""); }
+
+ // Cycle 5: { vd[0] -> vd[1], vd[1] -> vd[0] };
+ // does not exercise vd itself
+ let mut vd: VecDeque<S> = VecDeque::new();
+ vd.push_back(Named::new("d0"));
+ vd.push_back(Named::new("d1"));
+ vd[0].next.set(Some(&vd[1]));
+ vd[1].next.set(Some(&vd[0]));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 50;
+ assert!(!c.saw_prev_marked);
+ vd[0].for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(""); }
+
+ // Cycle 6: { vd -> (vd0, vd1), {vd0, vd1} -> vd }
+ let mut vd: VecDeque<VD> = VecDeque::new();
+ vd.push_back(Named::new("vd0"));
+ vd.push_back(Named::new("vd1"));
+ vd[0].contents.set(Some(&vd));
+ vd[1].contents.set(Some(&vd));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 60;
+ assert!(!c.saw_prev_marked);
+ vd[0].for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(""); }
+
+ // Cycle 7: { vm -> (vm0, vm1), {vm0, vm1} -> vm }
+ let mut vm: VecMap<VM> = VecMap::new();
+ vm.insert(0, Named::new("vm0"));
+ vm.insert(1, Named::new("vm1"));
+ vm[0].contents.set(Some(&vm));
+ vm[1].contents.set(Some(&vm));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 70;
+ assert!(!c.saw_prev_marked);
+ vm[0].for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(""); }
+
+ // Cycle 8: { ll -> (ll0, ll1), {ll0, ll1} -> ll }
+ let mut ll: LinkedList<LL> = LinkedList::new();
+ ll.push_back(Named::new("ll0"));
+ ll.push_back(Named::new("ll1"));
+ for e in &ll {
+ e.contents.set(Some(&ll));
+ }
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 80;
+ for e in &ll {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ e.for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+ // break;
+ }
+
+ if PRINT { println!(""); }
+
+ // Cycle 9: { bh -> (bh0, bh1), {bh0, bh1} -> bh }
+ let mut bh: BinaryHeap<BH> = BinaryHeap::new();
+ bh.push(Named::new("bh0"));
+ bh.push(Named::new("bh1"));
+ for b in bh.iter() {
+ b.contents.set(Some(&bh));
+ }
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 90;
+ for b in &bh {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ b.for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+ // break;
+ }
+
+ if PRINT { println!(""); }
+
+ // Cycle 10: { btm -> (btk0, btv1), {bt0, bt1} -> btm }
+ let mut btm: BTreeMap<BTM, BTM> = BTreeMap::new();
+ btm.insert(Named::new("btk0"), Named::new("btv0"));
+ btm.insert(Named::new("btk1"), Named::new("btv1"));
+ for (k, v) in btm.iter() {
+ k.contents.set(Some(&btm));
+ v.contents.set(Some(&btm));
+ }
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 100;
+ for (k, _) in &btm {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ k.for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+ // break;
+ }
+
+ if PRINT { println!(""); }
+
+ // Cycle 10: { bts -> (bts0, bts1), {bts0, bts1} -> btm }
+ let mut bts: BTreeSet<BTS> = BTreeSet::new();
+ bts.insert(Named::new("bts0"));
+ bts.insert(Named::new("bts1"));
+ for v in bts.iter() {
+ v.contents.set(Some(&bts));
+ }
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 100;
+ for b in &bts {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ b.for_each_child(&mut c);
+ assert!(c.saw_prev_marked);
+ // break;
+ }
+}
+
+trait Named {
+ fn new(&'static str) -> Self;
+ fn name(&self) -> &str;
+}
+
+trait Marked<M> {
+ fn mark(&self) -> M;
+ fn set_mark(&self, mark: M);
+}
+
+struct S<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ next: Cell<Option<&'a S<'a>>>,
+}
+
+impl<'a> Named for S<'a> {
+ fn new<'b>(name: &'static str) -> S<'b> {
+ S { name: name, mark: Cell::new(0), next: Cell::new(None) }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for S<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+struct V<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Vec<Cell<Option<&'a V<'a>>>>,
+}
+
+impl<'a> Named for V<'a> {
+ fn new<'b>(name: &'static str) -> V<'b> {
+ V { name: name,
+ mark: Cell::new(0),
+ contents: vec![Cell::new(None), Cell::new(None)]
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for V<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+#[derive(Eq)]
+struct H<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ next: Cell<Option<&'a H<'a>>>,
+}
+
+impl<'a> Named for H<'a> {
+ fn new<'b>(name: &'static str) -> H<'b> {
+ H { name: name, mark: Cell::new(0), next: Cell::new(None) }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for H<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+impl<'a> PartialEq for H<'a> {
+ fn eq(&self, rhs: &H<'a>) -> bool {
+ self.name == rhs.name
+ }
+}
+
+impl<'a> Hash for H<'a> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.name.hash(state)
+ }
+}
+
+#[derive(Eq)]
+struct HM<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a HashMap<HM<'a>, HM<'a>>>>,
+}
+
+impl<'a> Named for HM<'a> {
+ fn new<'b>(name: &'static str) -> HM<'b> {
+ HM { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for HM<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+impl<'a> PartialEq for HM<'a> {
+ fn eq(&self, rhs: &HM<'a>) -> bool {
+ self.name == rhs.name
+ }
+}
+
+impl<'a> Hash for HM<'a> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.name.hash(state)
+ }
+}
+
+
+struct VD<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a VecDeque<VD<'a>>>>,
+}
+
+impl<'a> Named for VD<'a> {
+ fn new<'b>(name: &'static str) -> VD<'b> {
+ VD { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for VD<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+struct VM<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a VecMap<VM<'a>>>>,
+}
+
+impl<'a> Named for VM<'a> {
+ fn new<'b>(name: &'static str) -> VM<'b> {
+ VM { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for VM<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+struct LL<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a LinkedList<LL<'a>>>>,
+}
+
+impl<'a> Named for LL<'a> {
+ fn new<'b>(name: &'static str) -> LL<'b> {
+ LL { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for LL<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+struct BH<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a BinaryHeap<BH<'a>>>>,
+}
+
+impl<'a> Named for BH<'a> {
+ fn new<'b>(name: &'static str) -> BH<'b> {
+ BH { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for BH<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+impl<'a> Eq for BH<'a> { }
+
+impl<'a> PartialEq for BH<'a> {
+ fn eq(&self, rhs: &BH<'a>) -> bool {
+ self.name == rhs.name
+ }
+}
+
+impl<'a> PartialOrd for BH<'a> {
+ fn partial_cmp(&self, rhs: &BH<'a>) -> Option<Ordering> {
+ Some(self.cmp(rhs))
+ }
+}
+
+impl<'a> Ord for BH<'a> {
+ fn cmp(&self, rhs: &BH<'a>) -> Ordering {
+ self.name.cmp(rhs.name)
+ }
+}
+
+struct BTM<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a BTreeMap<BTM<'a>, BTM<'a>>>>,
+}
+
+impl<'a> Named for BTM<'a> {
+ fn new<'b>(name: &'static str) -> BTM<'b> {
+ BTM { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for BTM<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+impl<'a> Eq for BTM<'a> { }
+
+impl<'a> PartialEq for BTM<'a> {
+ fn eq(&self, rhs: &BTM<'a>) -> bool {
+ self.name == rhs.name
+ }
+}
+
+impl<'a> PartialOrd for BTM<'a> {
+ fn partial_cmp(&self, rhs: &BTM<'a>) -> Option<Ordering> {
+ Some(self.cmp(rhs))
+ }
+}
+
+impl<'a> Ord for BTM<'a> {
+ fn cmp(&self, rhs: &BTM<'a>) -> Ordering {
+ self.name.cmp(rhs.name)
+ }
+}
+
+struct BTS<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a BTreeSet<BTS<'a>>>>,
+}
+
+impl<'a> Named for BTS<'a> {
+ fn new<'b>(name: &'static str) -> BTS<'b> {
+ BTS { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for BTS<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+impl<'a> Eq for BTS<'a> { }
+
+impl<'a> PartialEq for BTS<'a> {
+ fn eq(&self, rhs: &BTS<'a>) -> bool {
+ self.name == rhs.name
+ }
+}
+
+impl<'a> PartialOrd for BTS<'a> {
+ fn partial_cmp(&self, rhs: &BTS<'a>) -> Option<Ordering> {
+ Some(self.cmp(rhs))
+ }
+}
+
+impl<'a> Ord for BTS<'a> {
+ fn cmp(&self, rhs: &BTS<'a>) -> Ordering {
+ self.name.cmp(rhs.name)
+ }
+}
+
+
+trait Context {
+ fn should_act(&self) -> bool;
+ fn increase_visited(&mut self);
+ fn increase_skipped(&mut self);
+ fn increase_depth(&mut self);
+ fn decrease_depth(&mut self);
+}
+
+trait PrePost<T> {
+ fn pre(&mut self, &T);
+ fn post(&mut self, &T);
+ fn hit_limit(&mut self, &T);
+}
+
+trait Children<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<Self>, Self: Sized;
+
+ fn descend_into_self<C>(&self, context: &mut C)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ context.pre(self);
+ if context.should_act() {
+ context.increase_visited();
+ context.increase_depth();
+ self.for_each_child(context);
+ context.decrease_depth();
+ } else {
+ context.hit_limit(self);
+ context.increase_skipped();
+ }
+ context.post(self);
+ }
+
+ fn descend<'b, C>(&self, c: &Cell<Option<&'b Self>>, context: &mut C)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ if let Some(r) = c.get() {
+ r.descend_into_self(context);
+ }
+ }
+}
+
+impl<'a> Children<'a> for S<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<S<'a>>
+ {
+ self.descend(&self.next, context);
+ }
+}
+
+impl<'a> Children<'a> for V<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<V<'a>>
+ {
+ for r in &self.contents {
+ self.descend(r, context);
+ }
+ }
+}
+
+impl<'a> Children<'a> for H<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<H<'a>>
+ {
+ self.descend(&self.next, context);
+ }
+}
+
+impl<'a> Children<'a> for HM<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<HM<'a>>
+ {
+ if let Some(ref hm) = self.contents.get() {
+ for (k, v) in hm.iter() {
+ for r in &[k, v] {
+ r.descend_into_self(context);
+ }
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for VD<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<VD<'a>>
+ {
+ if let Some(ref vd) = self.contents.get() {
+ for r in vd.iter() {
+ r.descend_into_self(context);
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for VM<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<VM<'a>>
+ {
+ if let Some(ref vd) = self.contents.get() {
+ for (_idx, r) in vd.iter() {
+ r.descend_into_self(context);
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for LL<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<LL<'a>>
+ {
+ if let Some(ref ll) = self.contents.get() {
+ for r in ll.iter() {
+ r.descend_into_self(context);
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for BH<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<BH<'a>>
+ {
+ if let Some(ref bh) = self.contents.get() {
+ for r in bh.iter() {
+ r.descend_into_self(context);
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for BTM<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<BTM<'a>>
+ {
+ if let Some(ref bh) = self.contents.get() {
+ for (k, v) in bh.iter() {
+ for r in &[k, v] {
+ r.descend_into_self(context);
+ }
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for BTS<'a> {
+ fn for_each_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<BTS<'a>>
+ {
+ if let Some(ref bh) = self.contents.get() {
+ for r in bh.iter() {
+ r.descend_into_self(context);
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+struct ContextData {
+ curr_depth: usize,
+ max_depth: usize,
+ visited: usize,
+ max_visits: usize,
+ skipped: usize,
+ curr_mark: u32,
+ saw_prev_marked: bool,
+}
+
+impl Context for ContextData {
+ fn should_act(&self) -> bool {
+ self.curr_depth < self.max_depth && self.visited < self.max_visits
+ }
+ fn increase_visited(&mut self) { self.visited += 1; }
+ fn increase_skipped(&mut self) { self.skipped += 1; }
+ fn increase_depth(&mut self) { self.curr_depth += 1; }
+ fn decrease_depth(&mut self) { self.curr_depth -= 1; }
+}
+
+impl<T:Named+Marked<u32>> PrePost<T> for ContextData {
+ fn pre(&mut self, t: &T) {
+ for _ in 0..self.curr_depth {
+ if PRINT { print!(" "); }
+ }
+ if PRINT { println!("prev {}", t.name()); }
+ if t.mark() == self.curr_mark {
+ for _ in 0..self.curr_depth {
+ if PRINT { print!(" "); }
+ }
+ if PRINT { println!("(probably previously marked)"); }
+ self.saw_prev_marked = true;
+ }
+ t.set_mark(self.curr_mark);
+ }
+ fn post(&mut self, t: &T) {
+ for _ in 0..self.curr_depth {
+ if PRINT { print!(" "); }
+ }
+ if PRINT { println!("post {}", t.name()); }
+ }
+ fn hit_limit(&mut self, t: &T) {
+ for _ in 0..self.curr_depth {
+ if PRINT { print!(" "); }
+ }
+ if PRINT { println!("LIMIT {}", t.name()); }
+ }
+}
// Test a very simple custom DST coercion.
-#![feature(core)]
+#![feature(core, rc_weak)]
use std::cell::RefCell;
-use std::rc::Rc;
+use std::rc::{Rc, Weak};
trait Baz {
fn get(&self) -> i32;
let b: Rc<Baz> = a.clone();
assert_eq!(b.get(), 42);
+ let c: Weak<i32> = a.downgrade();
+ let d: Weak<Baz> = c.clone();
+
let _c = b.clone();
let a: Rc<RefCell<i32>> = Rc::new(RefCell::new(42));
let b: Rc<RefCell<Baz>> = a.clone();
assert_eq!(b.borrow().get(), 42);
+ let c: Weak<RefCell<Baz>> = a.downgrade();
}
// Test a foreign function that accepts empty struct.
// pretty-expanded FIXME #23616
+// ignore-msvc
struct TwoU8s {
one: u8,
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that negating unsigned integers is gated by `negate_unsigned` feature
+// gate
+
+struct S;
+impl std::ops::Neg for S {
+ type Output = u32;
+ fn neg(self) -> u32 { 0 }
+}
+
+const _MAX: usize = -1;
+//~^ WARN unary negation of unsigned integers will be feature gated in the future
+
+fn main() {
+ let a = -1;
+ //~^ WARN unary negation of unsigned integers will be feature gated in the future
+ let _b : u8 = a; // for infering variable a to u8.
+
+ -a;
+ //~^ WARN unary negation of unsigned integers will be feature gated in the future
+
+ let _d = -1u8;
+ //~^ WARN unary negation of unsigned integers will be feature gated in the future
+
+ for _ in -10..10u8 {}
+ //~^ WARN unary negation of unsigned integers will be feature gated in the future
+
+ -S; // should not trigger the gate; issue 26840
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test what happens when a HR obligation is applied to an impl with
+// "outlives" bounds. Currently we're pretty conservative here; this
+// will probably improve in time.
+
+trait Foo<X> {
+ fn foo(&self, x: X) { }
+}
+
+fn want_foo<T>()
+ where T : for<'a> Foo<&'a isize>
+{
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Expressed as a where clause
+
+struct SomeStruct<X> {
+ x: X
+}
+
+impl<'a,X> Foo<&'a isize> for SomeStruct<X>
+ where X : 'a
+{
+}
+
+fn one() {
+ want_foo::<SomeStruct<usize>>();
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Expressed as shorthand
+
+struct AnotherStruct<X> {
+ x: X
+}
+
+impl<'a,X:'a> Foo<&'a isize> for AnotherStruct<X>
+{
+}
+
+fn two() {
+ want_foo::<AnotherStruct<usize>>();
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that when we match a trait reference like `Foo<A>: Foo<_#0t>`,
+// we unify with `_#0t` with `A`. In this code, if we failed to do
+// that, then you get an unconstrained type-variable in `call`.
+//
+// Also serves as a regression test for issue #26952, though the test
+// was derived from another reported regression with the same cause.
+
+use std::marker::PhantomData;
+
+trait Trait<A> { fn foo(&self); }
+
+struct Type<A> { a: PhantomData<A> }
+
+fn as_trait<A>(t: &Type<A>) -> &Trait<A> { loop { } }
+
+fn want<A,T:Trait<A>+?Sized>(t: &T) { }
+
+fn call<A>(p: Type<A>) {
+ let q = as_trait(&p);
+ want(q); // parameter A to `want` *would* be unconstrained
+}
+
+fn main() { }
target_os = "macos",
target_os = "freebsd",
target_os = "dragonfly",
+ target_os = "netbsd",
target_os = "openbsd"))]
mod m {
#[main]
// except according to those terms.
-#![allow(unknown_features)]
#![feature(box_syntax)]
#![feature(intrinsics)]
-// needed to check for drop fill word.
-#![feature(filling_drop)]
-
-use std::mem::{self, transmute};
mod rusti {
extern "rust-intrinsic" {
pub fn init<T>() -> T;
- pub fn move_val_init<T>(dst: &mut T, src: T);
+ pub fn move_val_init<T>(dst: *mut T, src: T);
}
}
pub fn main() {
unsafe {
- let x: Box<_> = box 1;
- let mut y = rusti::init();
- let mut z: *const usize = transmute(&x);
+ // sanity check
+ check_drops_state(0, None);
+
+ let mut x: Box<D> = box D(1);
+ assert_eq!(x.0, 1);
+
+ // A normal overwrite, to demonstrate `check_drops_state`.
+ x = box D(2);
+
+ // At this point, one destructor has run, because the
+ // overwrite of `x` drops its initial value.
+ check_drops_state(1, Some(1));
+
+ let mut y: Box<D> = rusti::init();
+
+ // An initial binding does not overwrite anything.
+ check_drops_state(1, Some(1));
+
+ // Since `y` has been initialized via the `init` intrinsic, it
+ // would be unsound to directly overwrite its value via normal
+ // assignment.
+ //
+ // The code currently generated by the compiler is overly
+ // accepting, however, in that it will check if `y` is itself
+ // null and thus avoid the unsound action of attempting to
+ // free null. In other words, if we were to do a normal
+ // assignment like `y = box D(4);` here, it probably would not
+ // crash today. But the plan is that it may well crash in the
+ // future, (I believe).
+
+ // `x` is moved here; the manner in which this is tracked by the
+ // compiler is hidden.
rusti::move_val_init(&mut y, x);
- assert_eq!(*y, 1);
- // `x` is nulled out, not directly visible
- assert_eq!(*z, mem::POST_DROP_USIZE);
+
+ // In particular, it may be tracked via a drop-flag embedded
+ // in the value, or via a null pointer, or via
+ // mem::POST_DROP_USIZE, or (most preferably) via a
+ // stack-local drop flag.
+ //
+ // (This test used to build-in knowledge of how it was
+ // tracked, and check that the underlying stack slot had been
+ // set to `mem::POST_DROP_USIZE`.)
+
+ // But what we *can* observe is how many times the destructor
+ // for `D` is invoked, and what the last value we saw was
+ // during such a destructor call. We do so after the end of
+ // this scope.
+
+ assert_eq!(y.0, 2);
+ y.0 = 3;
+ assert_eq!(y.0, 3);
+
+ check_drops_state(1, Some(1));
+ }
+
+ check_drops_state(2, Some(3));
+}
+
+static mut NUM_DROPS: i32 = 0;
+static mut LAST_DROPPED: Option<i32> = None;
+
+fn check_drops_state(num_drops: i32, last_dropped: Option<i32>) {
+ unsafe {
+ assert_eq!(NUM_DROPS, num_drops);
+ assert_eq!(LAST_DROPPED, last_dropped);
+ }
+}
+
+struct D(i32);
+impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ NUM_DROPS += 1;
+ LAST_DROPPED = Some(self.0);
+ }
}
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn works<T>(x: T) -> Vec<T> { vec![x] }
+
+fn also_works<T: Clone>(x: T) -> Vec<T> { vec![x] }
+
+fn main() {
+ let _: Vec<usize> = works(0);
+ let _: Vec<usize> = also_works(0);
+ let _ = works(0);
+ let _ = also_works(0);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+const JSVAL_TAG_CLEAR: u32 = 0xFFFFFF80;
+const JSVAL_TYPE_INT32: u8 = 0x01;
+const JSVAL_TYPE_UNDEFINED: u8 = 0x02;
+#[repr(u32)]
+enum ValueTag {
+ JSVAL_TAG_INT32 = JSVAL_TAG_CLEAR | (JSVAL_TYPE_INT32 as u32),
+ JSVAL_TAG_UNDEFINED = JSVAL_TAG_CLEAR | (JSVAL_TYPE_UNDEFINED as u32),
+}
+
+fn main() {
+ let _ = ValueTag::JSVAL_TAG_INT32;
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo: Sized {
+ fn foo(self) {}
+}
+
+trait Bar: Sized {
+ fn bar(self) {}
+}
+
+struct S;
+
+impl<'l> Foo for &'l S {}
+
+impl<T: Foo> Bar for T {}
+
+fn main() {
+ let s = S;
+ s.foo();
+ (&s).bar();
+ s.bar();
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:issue-14344-1.rs
+// aux-build:issue-14344-2.rs
+
+extern crate issue_14344_1;
+extern crate issue_14344_2;
+
+fn main() {
+ issue_14344_1::foo();
+ issue_14344_2::bar();
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[derive(Debug)]
+struct Matrix4<S>(S);
+trait POrd<S> {}
+
+fn translate<S: POrd<S>>(s: S) -> Matrix4<S> { Matrix4(s) }
+
+impl POrd<f32> for f32 {}
+impl POrd<f64> for f64 {}
+
+fn main() {
+ let x = 1.0;
+ let m : Matrix4<f32> = translate(x);
+ println!("m: {:?}", m);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait SomeTrait {}
+struct Meow;
+impl SomeTrait for Meow {}
+
+struct Foo<'a> {
+ x: &'a SomeTrait,
+ y: &'a SomeTrait,
+}
+
+impl<'a> Foo<'a> {
+ pub fn new<'b>(x: &'b SomeTrait, y: &'b SomeTrait) -> Foo<'b> { Foo { x: x, y: y } }
+}
+
+fn main() {
+ let r = Meow;
+ let s = Meow;
+ let q = Foo::new(&r as &SomeTrait, &s as &SomeTrait);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+const count : usize = 2 as usize;
+fn main() {
+ let larger : [usize; count*2];
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(reflect_marker)]
+
+use std::any::TypeId;
+use std::marker::Reflect;
+use std::rc::Rc;
+
+type Fp<T> = Rc<T>;
+
+struct Engine;
+
+trait Component: 'static + Reflect {}
+impl Component for Engine {}
+
+trait Env {
+ fn get_component_type_id(&self, type_id: TypeId) -> Option<Fp<Component>>;
+}
+
+impl<'a> Env+'a {
+ fn get_component<T: Component>(&self) -> Option<Fp<T>> {
+ let x = self.get_component_type_id(TypeId::of::<T>());
+ None
+ }
+}
+
+trait Figment {
+ fn init(&mut self, env: &Env);
+}
+
+struct MyFigment;
+
+impl Figment for MyFigment {
+ fn init(&mut self, env: &Env) {
+ let engine = env.get_component::<Engine>();
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unboxed_closures)]
+#![feature(core)]
+
+struct Fun<F>(F);
+
+impl<F, T> FnOnce<(T,)> for Fun<F> where F: Fn(T) -> T {
+ type Output = T;
+
+ extern "rust-call" fn call_once(self, (t,): (T,)) -> T {
+ (self.0)(t)
+ }
+}
+
+fn main() {
+ let fun = Fun(|i: isize| i * 2);
+ println!("{}", fun(3));
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(static_recursion)]
+
+// test that autoderef of a type like this does not
+// cause compiler to loop. Note that no instances
+// of such a type could ever be constructed.
+
+struct S {
+ x: X,
+ to_str: (),
+}
+
+struct X(Box<S>);
+
+fn main() {}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(static_recursion)]
+
+// test that autoderef of a type like this does not
+// cause compiler to loop. Note that no instances
+// of such a type could ever be constructed.
+
+struct T(Box<T>);
+
+trait ToStr2 {
+ fn my_to_string(&self) -> String;
+}
+
+impl ToStr2 for T {
+ fn my_to_string(&self) -> String { "t".to_string() }
+}
+
+#[allow(dead_code)]
+fn new_t(x: T) {
+ x.my_to_string();
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub trait Subscriber {
+ type Input;
+}
+
+pub trait Processor: Subscriber<Input = <Self as Processor>::Input> {
+ type Input;
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub trait Trait where Self::Out: std::fmt::Display {
+ type Out;
+}
+
+fn main() {}
#[link_name="lgamma_r"]
pub fn lgamma(n: c_double, sign: &mut c_int) -> c_double;
#[cfg(windows)]
- #[link_name="__lgamma_r"]
+ #[link_name="lgamma"]
pub fn lgamma(n: c_double, sign: &mut c_int) -> c_double;
}
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait A<T: A<T>> {}
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(box_syntax)]
+
+struct Node<T: ?Sized>(T);
+
+fn main() {
+ let x: Box<Node<[isize]>> = box Node([]);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub struct Registry<'a> {
+ listener: &'a mut (),
+}
+
+pub struct Listener<'a> {
+ pub announce: Option<Box<FnMut(&mut Registry) + 'a>>,
+ pub remove: Option<Box<FnMut(&mut Registry) + 'a>>,
+}
+
+impl<'a> Drop for Registry<'a> {
+ fn drop(&mut self) {}
+}
+
+fn main() {
+ let mut registry_listener = Listener {
+ announce: None,
+ remove: None,
+ };
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:issue-25185-1.rs
+// aux-build:issue-25185-2.rs
+
+extern crate issue_25185_2;
+
+fn main() {
+ let x = unsafe {
+ issue_25185_2::rust_dbg_extern_identity_u32(1)
+ };
+ assert_eq!(x, 1);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ || {
+ 'label: loop {
+ }
+ };
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+enum FooMode {
+ Check = 0x1001,
+}
+
+enum BarMode {
+ Check = 0x2001,
+}
+
+enum Mode {
+ Foo(FooMode),
+ Bar(BarMode),
+}
+
+#[inline(never)]
+fn broken(mode: &Mode) -> u32 {
+ for _ in 0..1 {
+ if let Mode::Foo(FooMode::Check) = *mode { return 17 }
+ if let Mode::Bar(BarMode::Check) = *mode { return 19 }
+ }
+ return 42;
+}
+
+fn main() {
+ let mode = Mode::Bar(BarMode::Check);
+ assert_eq!(broken(&mode), 19);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-g
+
+fn helper<F: FnOnce(usize) -> bool>(_f: F) {
+ print!("");
+}
+
+fn main() {
+ let cond = 0;
+ helper(|v| v == cond)
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Parser<'a>(Box<FnMut(Parser) + 'a>);
+
+fn main() {
+ let _x = Parser(Box::new(|_|{}));
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(unused_attributes)]
+
+#[repr(C)]
+#[repr(packed)]
+pub struct Foo;
+
+#[repr(packed)]
+#[repr(C)]
+pub struct Bar;
+
+fn main() { }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(const_fn)]
+
+// Check that the destructors of simple enums are run on unwinding
+
+use std::sync::atomic::{Ordering, AtomicUsize};
+use std::thread;
+
+static LOG: AtomicUsize = AtomicUsize::new(0);
+
+enum WithDtor { Val }
+impl Drop for WithDtor {
+ fn drop(&mut self) {
+ LOG.store(LOG.load(Ordering::SeqCst)+1,Ordering::SeqCst);
+ }
+}
+
+pub fn main() {
+ thread::spawn(move|| {
+ let _e: WithDtor = WithDtor::Val;
+ panic!("fail");
+ }).join().unwrap_err();
+
+ assert_eq!(LOG.load(Ordering::SeqCst), 1);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Wrapper<'a, T: ?Sized>(&'a mut i32, T);
+
+impl<'a, T: ?Sized> Drop for Wrapper<'a, T> {
+ fn drop(&mut self) {
+ *self.0 = 432;
+ }
+}
+
+fn main() {
+ let mut x = 0;
+ {
+ let wrapper = Box::new(Wrapper(&mut x, 123));
+ let _: Box<Wrapper<Send>> = wrapper;
+ }
+ assert_eq!(432, x)
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo<'a> {
+ fn bar<'b>(&self, x: &'b u8) -> u8 where 'a: 'b { *x+7 }
+}
+
+pub struct FooBar;
+impl Foo<'static> for FooBar {}
+fn test(foobar: FooBar) -> Box<Foo<'static>> {
+ Box::new(foobar)
+}
+
+fn main() {
+ assert_eq!(test(FooBar).bar(&4), 11);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct NonOrd;
+
+fn main() {
+ let _: Box<Iterator<Item = _>> = Box::new(vec![NonOrd].into_iter());
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let mut c = (1, "".to_owned());
+ match c {
+ c2 => {
+ c.0 = 2;
+ assert_eq!(c2.0, 1);
+ }
+ }
+}
p: Option<*const packet<T>>,
}
- impl<T:Send> Drop for send_packet<T> {
+ impl<T:Send> Drop for send_packet<T> {
fn drop(&mut self) {
unsafe {
if self.p != None {
p: Option<*const packet<T>>,
}
- impl<T:Send> Drop for recv_packet<T> {
+ impl<T:Send> Drop for recv_packet<T> {
fn drop(&mut self) {
unsafe {
if self.p != None {
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt;
+struct NoisyDrop<T: fmt::Debug>(T);
+impl<T: fmt::Debug> Drop for NoisyDrop<T> {
+ fn drop(&mut self) {}
+}
+
+struct Bar<T: fmt::Debug>([*const NoisyDrop<T>; 2]);
+
+fn fine() {
+ let (u,b);
+ u = vec![43];
+ b = Bar([&NoisyDrop(&u), &NoisyDrop(&u)]);
+}
+
+struct Bar2<T: fmt::Debug>(*const NoisyDrop<T>, *const NoisyDrop<T>);
+
+fn lolwut() {
+ let (u,v);
+ u = vec![43];
+ v = Bar2(&NoisyDrop(&u), &NoisyDrop(&u));
+}
+
+fn main() { fine(); lolwut() }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ const _C: &'static Fn() = &||{};
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub trait Trait<'a> {
+ type T;
+ type U;
+ fn foo(&self, s: &'a ()) -> &'a ();
+}
+
+impl<'a> Trait<'a> for () {
+ type T = &'a ();
+ type U = Self::T;
+
+ fn foo(&self, s: &'a ()) -> &'a () {
+ let t: Self::T = s; t
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-pretty #27582
+
+// Check that when a `let`-binding occurs in a loop, its associated
+// drop-flag is reinitialized (to indicate "needs-drop" at the end of
+// the owning variable's scope).
+
+struct A<'a>(&'a mut i32);
+
+impl<'a> Drop for A<'a> {
+ fn drop(&mut self) {
+ *self.0 += 1;
+ }
+}
+
+fn main() {
+ let mut cnt = 0;
+ for i in 0..2 {
+ let a = A(&mut cnt);
+ if i == 1 { // Note that
+ break; // both this break
+ } // and also
+ drop(a); // this move of `a`
+ // are necessary to expose the bug
+ }
+ assert_eq!(cnt, 2);
+}
// If there is a canonical constructor it is typically named the same as the type.
// Other constructor sort of functions are typically named from_foo, from_bar, etc.
fn AsciiArt(width: usize, height: usize, fill: char) -> AsciiArt {
- // Use an anonymous function to build a vector of vectors containing
- // blank characters for each position in our canvas.
- let mut lines = Vec::new();
- for _ in 0..height {
- lines.push(repeat('.').take(width).collect::<Vec<_>>());
- }
+ // Build a vector of vectors containing blank characters for each position in
+ // our canvas.
+ let lines = vec![vec!['.'; width]; height];
// Rust code often returns values by omitting the trailing semi-colon
// instead of using an explicit return statement.
.collect::<Vec<String>>();
// Concatenate the lines together using a new-line.
- write!(f, "{}", lines.connect("\n"))
+ write!(f, "{}", lines.join("\n"))
}
}
+++ /dev/null
-// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// aux-build:lang-item-public.rs
-// ignore-android
-
-#![feature(lang_items, start, no_std)]
-#![no_std]
-
-extern crate lang_item_public as lang_lib;
-
-#[cfg(target_os = "linux")]
-#[link(name = "c")]
-extern {}
-
-#[cfg(target_os = "android")]
-#[link(name = "c")]
-extern {}
-
-#[cfg(target_os = "freebsd")]
-#[link(name = "execinfo")]
-extern {}
-
-#[cfg(target_os = "freebsd")]
-#[link(name = "c")]
-extern {}
-
-#[cfg(target_os = "dragonfly")]
-#[link(name = "c")]
-extern {}
-
-#[cfg(any(target_os = "bitrig", target_os = "openbsd"))]
-#[link(name = "c")]
-extern {}
-
-#[cfg(target_os = "macos")]
-#[link(name = "System")]
-extern {}
-
-#[start]
-fn main(_: isize, _: *const *const u8) -> isize {
- 1_isize % 1_isize
-}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --cap-lints allow
+
+#![deny(warnings)]
+
+use std::option;
+
+fn main() {}
+
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! doc {
+ (
+ $(#[$outer:meta])*
+ mod $i:ident {
+ $(#![$inner:meta])*
+ }
+ ) =>
+ (
+ $(#[$outer])*
+ pub mod $i {
+ $(#![$inner])*
+ }
+ )
+}
+
+doc! {
+ /// Outer doc
+ mod Foo {
+ //! Inner doc
+ }
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// When expanding a macro, documentation attributes (including documentation comments) must be
+// passed "as is" without being parsed. Otherwise, some text will be incorrectly interpreted as
+// escape sequences, leading to an ICE.
+//
+// Related issues: #25929, #25943
+
+macro_rules! homura {
+ (#[$x:meta]) => ()
+}
+
+homura! {
+ /// \madoka \x41
+}
+
+fn main() { }
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
-#![allow(warnings)]
+#![allow(dead_code, unused_variables)]
#![feature(box_syntax, box_heap)]
+#![feature(placement_in_syntax)]
+
+// during check-pretty, the expanded code needs to opt into these
+// features
+#![feature(placement_new_protocol, core_intrinsics)]
// Tests that the new `box` syntax works with unique pointers.
let y: Box<isize> = box 2;
let b: Box<isize> = box()(1 + 2);
let c = box()(3 + 4);
+
+ let s: Box<Structure> = box Structure {
+ x: 3,
+ y: 4,
+ };
}
ss.u = t;
}
-fn c<'a>(t: &'a Box<Test+'a>, mut ss: SomeStruct<'a>) {
- ss.t = t;
-}
+// see also compile-fail/object-lifetime-default-from-rptr-box-error.rs
fn d<'a>(t: &'a Box<Test+'a>, mut ss: SomeStruct<'a>) {
ss.u = t;
ss.u = t;
}
-fn c<'a>(t: &'a MyBox<Test+'a>, mut ss: SomeStruct<'a>) {
- ss.t = t;
-}
+// see also compile-fail/object-lifetime-default-from-rptr-box-error.rs
fn d<'a>(t: &'a MyBox<Test+'a>, mut ss: SomeStruct<'a>) {
ss.u = t;
// Tests parallel codegen - this can fail if the symbol for the anonymous
// closure in `sum` pollutes the second codegen unit from the first.
+// ignore-bitrig
// compile-flags: -C codegen_units=2
#![feature(core)]
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code, unused_variables)]
+#![feature(box_heap)]
+#![feature(placement_in_syntax)]
+
+// Tests that the new `in` syntax works with unique pointers.
+//
+// Compare with new-box-syntax.rs
+
+use std::boxed::{Box, HEAP};
+
+struct Structure {
+ x: isize,
+ y: isize,
+}
+
+pub fn main() {
+ let x: Box<isize> = in HEAP { 2 };
+ let b: Box<isize> = in HEAP { 1 + 2 };
+ let c = in HEAP { 3 + 4 };
+
+ let s: Box<Structure> = in HEAP {
+ Structure {
+ x: 3,
+ y: 4,
+ }
+ };
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Basic sanity check for `push_unsafe!(EXPR)` and
+// `pop_unsafe!(EXPR)`: we can call unsafe code when there are a
+// positive number of pushes in the stack, or if we are within a
+// normal `unsafe` block, but otherwise cannot.
+
+// ignore-pretty because the `push_unsafe!` and `pop_unsafe!` macros
+// are not integrated with the pretty-printer.
+
+#![feature(pushpop_unsafe)]
+
+static mut X: i32 = 0;
+
+unsafe fn f() { X += 1; return; }
+fn g() { unsafe { X += 1_000; } return; }
+
+fn check_reset_x(x: i32) -> bool {
+ #![allow(unused_parens)] // dont you judge my style choices!
+ unsafe {
+ let ret = (x == X);
+ X = 0;
+ ret
+ }
+}
+
+fn main() {
+ // double-check test infrastructure
+ assert!(check_reset_x(0));
+ unsafe { f(); }
+ assert!(check_reset_x(1));
+ assert!(check_reset_x(0));
+ { g(); }
+ assert!(check_reset_x(1000));
+ assert!(check_reset_x(0));
+ unsafe { f(); g(); g(); }
+ assert!(check_reset_x(2001));
+
+ push_unsafe!( { f(); pop_unsafe!( g() ) } );
+ assert!(check_reset_x(1_001));
+ push_unsafe!( { g(); pop_unsafe!( unsafe { f(); f(); } ) } );
+ assert!(check_reset_x(1_002));
+
+ unsafe { push_unsafe!( { f(); pop_unsafe!( { f(); f(); } ) } ); }
+ assert!(check_reset_x(3));
+ push_unsafe!( { f(); push_unsafe!( { pop_unsafe!( { f(); f(); f(); } ) } ); } );
+ assert!(check_reset_x(4));
+}
use alloc::heap;
use std::ptr;
-use std::iter::repeat;
fn main() {
unsafe {
unsafe fn test_triangle() -> bool {
static COUNT : usize = 16;
- let mut ascend = repeat(ptr::null_mut()).take(COUNT).collect::<Vec<_>>();
+ let mut ascend = vec![ptr::null_mut(); COUNT];
let ascend = &mut *ascend;
static ALIGN : usize = 1;
target_os = "macos",
target_os = "freebsd",
target_os = "dragonfly",
+ target_os = "netbsd",
target_os = "openbsd"))]
mod m {
#[cfg(target_arch = "x86")]
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn invariant_id<'a,'b>(t: &'b mut &'static ()) -> &'b mut &'a ()
+ where 'a: 'static { t }
+fn static_id<'a>(t: &'a ()) -> &'static ()
+ where 'a: 'static { t }
+fn static_id_indirect<'a,'b>(t: &'a ()) -> &'static ()
+ where 'a: 'b, 'b: 'static { t }
+fn ref_id<'a>(t: &'a ()) -> &'a () where 'static: 'a { t }
+
+static UNIT: () = ();
+
+fn main()
+{
+ let mut val : &'static () = &UNIT;
+ invariant_id(&mut val);
+ static_id(val);
+ static_id_indirect(val);
+ ref_id(val);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! foo {
+ ($t:ty; $p:path;) => {}
+}
+
+fn main() {
+ foo!(i32; i32;);
+}
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
+#[lang = "eh_unwind_resume"] extern fn eh_unwind_resume() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
#[start]
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(static_recursion)]
+
+static mut S: *const u8 = unsafe { &S as *const *const u8 as *const u8 };
+
+struct StaticDoubleLinked {
+ prev: &'static StaticDoubleLinked,
+ next: &'static StaticDoubleLinked,
+ data: i32,
+ head: bool
+}
+
+static L1: StaticDoubleLinked = StaticDoubleLinked{prev: &L3, next: &L2, data: 1, head: true};
+static L2: StaticDoubleLinked = StaticDoubleLinked{prev: &L1, next: &L3, data: 2, head: false};
+static L3: StaticDoubleLinked = StaticDoubleLinked{prev: &L2, next: &L1, data: 3, head: false};
+
+
+pub fn main() {
+ unsafe { assert_eq!(S, *(S as *const *const u8)); }
+
+ let mut test_vec = Vec::new();
+ let mut cur = &L1;
+ loop {
+ test_vec.push(cur.data);
+ cur = cur.next;
+ if cur.head { break }
+ }
+ assert_eq!(&test_vec, &[1,2,3]);
+
+ let mut test_vec = Vec::new();
+ let mut cur = &L1;
+ loop {
+ cur = cur.prev;
+ test_vec.push(cur.data);
+ if cur.head { break }
+ }
+ assert_eq!(&test_vec, &[3,2,1]);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// pretty-expanded FIXME #23616
+
+use std::sync::atomic::*;
+
+trait SendSync: Send + Sync {}
+
+impl SendSync for AtomicBool {}
+impl SendSync for AtomicIsize {}
+impl SendSync for AtomicUsize {}
+impl<T> SendSync for AtomicPtr<T> {}
+
+fn main() {}
// except according to those terms.
// ignore-android needs extra network permissions
-// ignore-openbsd system ulimit (Too many open files)
// ignore-bitrig system ulimit (Too many open files)
+// ignore-netbsd system ulimit (Too many open files)
+// ignore-openbsd system ulimit (Too many open files)
use std::io::prelude::*;
use std::net::{TcpListener, TcpStream};
self.iter()
.map(|e| e.to_string_())
.collect::<Vec<String>>()
- .connect(", "))
+ .join(", "))
}
}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that the right implementation is called through a trait
+// object when supertraits include multiple references to the
+// same trait, with different type parameters.
+
+trait A: PartialEq<Foo> + PartialEq<Bar> { }
+
+struct Foo;
+struct Bar;
+
+struct Aimpl;
+
+impl PartialEq<Foo> for Aimpl {
+ fn eq(&self, _rhs: &Foo) -> bool {
+ true
+ }
+}
+
+impl PartialEq<Bar> for Aimpl {
+ fn eq(&self, _rhs: &Bar) -> bool {
+ false
+ }
+}
+
+impl A for Aimpl { }
+
+fn main() {
+ let a = &Aimpl as &A;
+
+ assert!(*a == Foo);
+}
target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
+ target_os = "netbsd",
target_os = "openbsd",
target_os = "android"))]
pub fn main() { }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:xcrate_associated_type_defaults.rs
+
+extern crate xcrate_associated_type_defaults;
+use xcrate_associated_type_defaults::Foo;
+
+fn main() {
+ ().bar(5);
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:rustdoc-impl-parts-crosscrate.rs
+// ignore-cross-compile
+
+#![feature(optin_builtin_traits)]
+
+extern crate rustdoc_impl_parts_crosscrate;
+
+pub struct Bar<T> { t: T }
+
+// The output file is html embeded in javascript, so the html tags
+// aren't stripped by the processing script and we can't check for the
+// full impl string. Instead, just make sure something from each part
+// is mentioned.
+
+// @has implementors/rustdoc_impl_parts_crosscrate/trait.AnOibit.js Bar
+// @has - Send
+// @has - !AnOibit
+// @has - Copy
+impl<T: Send> !rustdoc_impl_parts_crosscrate::AnOibit for Bar<T>
+ where T: Copy {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(optin_builtin_traits)]
+
+pub trait AnOibit {}
+
+impl AnOibit for .. {}
+
+pub struct Foo<T> { field: T }
+
+// @has impl_parts/struct.Foo.html '//*[@class="impl"]//code' \
+// "impl<T: Clone> !AnOibit for Foo<T> where T: Sync"
+// @has impl_parts/trait.AnOibit.html '//*[@class="item-list"]//code' \
+// "impl<T: Clone> !AnOibit for Foo<T> where T: Sync"
+impl<T: Clone> !AnOibit for Foo<T> where T: Sync {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! doc {
+ (#[$outer:meta] mod $i:ident { #![$inner:meta] }) =>
+ (
+ #[$outer]
+ pub mod $i {
+ #![$inner]
+ }
+ )
+}
+
+doc! {
+ /// Outer comment
+ mod Foo {
+ //! Inner comment
+ }
+}
+
+// @has issue_23812/Foo/index.html
+// @has - 'Outer comment'
+// @!has - '/// Outer comment'
+// @has - 'Inner comment'
+// @!has - '//! Inner comment'
+
+
+doc! {
+ /** Outer block comment */
+ mod Bar {
+ /*! Inner block comment */
+ }
+}
+
+// @has issue_23812/Bar/index.html
+// @has - 'Outer block comment'
+// @!has - '/** Outer block comment */'
+// @has - 'Inner block comment'
+// @!has - '/*! Inner block comment */'