]>
Commit | Line | Data |
---|---|---|
9fa01778 | 1 | //! This is an NFA-based parser, which calls out to the main rust parser for named non-terminals |
3b2f2976 XL |
2 | //! (which it commits to fully when it hits one in a grammar). There's a set of current NFA threads |
3 | //! and a set of next ones. Instead of NTs, we have a special case for Kleene star. The big-O, in | |
4 | //! pathological cases, is worse than traditional use of NFA or Earley parsing, but it's an easier | |
5 | //! fit for Macro-by-Example-style rules. | |
6 | //! | |
7 | //! (In order to prevent the pathological case, we'd need to lazily construct the resulting | |
8 | //! `NamedMatch`es at the very end. It'd be a pain, and require more memory to keep around old | |
9 | //! items, but it would also save overhead) | |
10 | //! | |
94b46f34 | 11 | //! We don't say this parser uses the Earley algorithm, because it's unnecessarily inaccurate. |
3b2f2976 XL |
12 | //! The macro parser restricts itself to the features of finite state automata. Earley parsers |
13 | //! can be described as an extension of NFAs with completion rules, prediction rules, and recursion. | |
1a4d82fc JJ |
14 | //! |
15 | //! Quick intro to how the parser works: | |
16 | //! | |
17 | //! A 'position' is a dot in the middle of a matcher, usually represented as a | |
18 | //! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`. | |
19 | //! | |
20 | //! The parser walks through the input a character at a time, maintaining a list | |
3b2f2976 | 21 | //! of threads consistent with the current position in the input string: `cur_items`. |
1a4d82fc | 22 | //! |
3b2f2976 XL |
23 | //! As it processes them, it fills up `eof_items` with threads that would be valid if |
24 | //! the macro invocation is now over, `bb_items` with threads that are waiting on | |
9fa01778 | 25 | //! a Rust non-terminal like `$e:expr`, and `next_items` with threads that are waiting |
b039eaaf | 26 | //! on a particular token. Most of the logic concerns moving the · through the |
3b2f2976 XL |
27 | //! repetitions indicated by Kleene stars. The rules for moving the · without |
28 | //! consuming any input are called epsilon transitions. It only advances or calls | |
29 | //! out to the real Rust parser when no `cur_items` threads remain. | |
1a4d82fc | 30 | //! |
7cac9316 | 31 | //! Example: |
1a4d82fc | 32 | //! |
7cac9316 XL |
33 | //! ```text, ignore |
34 | //! Start parsing a a a a b against [· a $( a )* a b]. | |
35 | //! | |
36 | //! Remaining input: a a a a b | |
3b2f2976 | 37 | //! next: [· a $( a )* a b] |
1a4d82fc | 38 | //! |
7cac9316 | 39 | //! - - - Advance over an a. - - - |
1a4d82fc | 40 | //! |
7cac9316 | 41 | //! Remaining input: a a a b |
1a4d82fc JJ |
42 | //! cur: [a · $( a )* a b] |
43 | //! Descend/Skip (first item). | |
44 | //! next: [a $( · a )* a b] [a $( a )* · a b]. | |
45 | //! | |
7cac9316 | 46 | //! - - - Advance over an a. - - - |
1a4d82fc | 47 | //! |
7cac9316 | 48 | //! Remaining input: a a b |
3b2f2976 XL |
49 | //! cur: [a $( a · )* a b] [a $( a )* a · b] |
50 | //! Follow epsilon transition: Finish/Repeat (first item) | |
1a4d82fc JJ |
51 | //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] |
52 | //! | |
7cac9316 | 53 | //! - - - Advance over an a. - - - (this looks exactly like the last step) |
1a4d82fc | 54 | //! |
7cac9316 | 55 | //! Remaining input: a b |
3b2f2976 XL |
56 | //! cur: [a $( a · )* a b] [a $( a )* a · b] |
57 | //! Follow epsilon transition: Finish/Repeat (first item) | |
1a4d82fc JJ |
58 | //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] |
59 | //! | |
7cac9316 | 60 | //! - - - Advance over an a. - - - (this looks exactly like the last step) |
1a4d82fc | 61 | //! |
7cac9316 | 62 | //! Remaining input: b |
3b2f2976 XL |
63 | //! cur: [a $( a · )* a b] [a $( a )* a · b] |
64 | //! Follow epsilon transition: Finish/Repeat (first item) | |
65 | //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] | |
1a4d82fc | 66 | //! |
7cac9316 | 67 | //! - - - Advance over a b. - - - |
1a4d82fc | 68 | //! |
7cac9316 | 69 | //! Remaining input: '' |
1a4d82fc | 70 | //! eof: [a $( a )* a b ·] |
7cac9316 | 71 | //! ``` |
1a4d82fc | 72 | |
e74abb32 XL |
73 | crate use NamedMatch::*; |
74 | crate use ParseResult::*; | |
9fa01778 XL |
75 | use TokenTreeOrTokenTreeSlice::*; |
76 | ||
e74abb32 XL |
77 | use crate::mbe::{self, TokenTree}; |
78 | ||
74b04a01 | 79 | use rustc_ast::token::{self, DocComment, Nonterminal, Token}; |
3dfed10e | 80 | use rustc_parse::parser::Parser; |
74b04a01 | 81 | use rustc_session::parse::ParseSess; |
3dfed10e | 82 | use rustc_span::symbol::MacroRulesNormalizedIdent; |
970d7e83 | 83 | |
9fa01778 | 84 | use smallvec::{smallvec, SmallVec}; |
223e47cc | 85 | |
b7449926 | 86 | use rustc_data_structures::fx::FxHashMap; |
9fa01778 | 87 | use rustc_data_structures::sync::Lrc; |
74b04a01 | 88 | use std::borrow::Cow; |
b7449926 | 89 | use std::collections::hash_map::Entry::{Occupied, Vacant}; |
1a4d82fc | 90 | use std::mem; |
94b46f34 | 91 | use std::ops::{Deref, DerefMut}; |
223e47cc | 92 | |
2c00a5a8 | 93 | // To avoid costly uniqueness checks, we require that `MatchSeq` always has a nonempty body. |
223e47cc | 94 | |
2c00a5a8 XL |
95 | /// Either a sequence of token trees or a single one. This is used as the representation of the |
96 | /// sequence of tokens that make up a matcher. | |
1a4d82fc | 97 | #[derive(Clone)] |
a1dfa0c6 | 98 | enum TokenTreeOrTokenTreeSlice<'tt> { |
8bb4bdeb | 99 | Tt(TokenTree), |
a1dfa0c6 | 100 | TtSeq(&'tt [TokenTree]), |
1a4d82fc | 101 | } |
223e47cc | 102 | |
a1dfa0c6 | 103 | impl<'tt> TokenTreeOrTokenTreeSlice<'tt> { |
2c00a5a8 XL |
104 | /// Returns the number of constituent top-level token trees of `self` (top-level in that it |
105 | /// will not recursively descend into subtrees). | |
85aaf69f | 106 | fn len(&self) -> usize { |
92a42be0 SL |
107 | match *self { |
108 | TtSeq(ref v) => v.len(), | |
109 | Tt(ref tt) => tt.len(), | |
1a4d82fc JJ |
110 | } |
111 | } | |
223e47cc | 112 | |
a1dfa0c6 | 113 | /// The `index`-th token tree of `self`. |
85aaf69f | 114 | fn get_tt(&self, index: usize) -> TokenTree { |
92a42be0 SL |
115 | match *self { |
116 | TtSeq(ref v) => v[index].clone(), | |
117 | Tt(ref tt) => tt.get_tt(index), | |
1a4d82fc JJ |
118 | } |
119 | } | |
223e47cc LB |
120 | } |
121 | ||
2c00a5a8 XL |
122 | /// An unzipping of `TokenTree`s... see the `stack` field of `MatcherPos`. |
123 | /// | |
124 | /// This is used by `inner_parse_loop` to keep track of delimited submatchers that we have | |
125 | /// descended into. | |
1a4d82fc | 126 | #[derive(Clone)] |
a1dfa0c6 | 127 | struct MatcherTtFrame<'tt> { |
2c00a5a8 | 128 | /// The "parent" matcher that we are descending into. |
a1dfa0c6 | 129 | elts: TokenTreeOrTokenTreeSlice<'tt>, |
2c00a5a8 | 130 | /// The position of the "dot" in `elts` at the time we descended. |
85aaf69f | 131 | idx: usize, |
223e47cc LB |
132 | } |
133 | ||
a1dfa0c6 XL |
134 | type NamedMatchVec = SmallVec<[NamedMatch; 4]>; |
135 | ||
136 | /// Represents a single "position" (aka "matcher position", aka "item"), as | |
137 | /// described in the module documentation. | |
138 | /// | |
139 | /// Here: | |
140 | /// | |
141 | /// - `'root` represents the lifetime of the stack slot that holds the root | |
142 | /// `MatcherPos`. As described in `MatcherPosHandle`, the root `MatcherPos` | |
143 | /// structure is stored on the stack, but subsequent instances are put into | |
144 | /// the heap. | |
145 | /// - `'tt` represents the lifetime of the token trees that this matcher | |
146 | /// position refers to. | |
147 | /// | |
148 | /// It is important to distinguish these two lifetimes because we have a | |
149 | /// `SmallVec<TokenTreeOrTokenTreeSlice<'tt>>` below, and the destructor of | |
150 | /// that is considered to possibly access the data from its elements (it lacks | |
151 | /// a `#[may_dangle]` attribute). As a result, the compiler needs to know that | |
152 | /// all the elements in that `SmallVec` strictly outlive the root stack slot | |
153 | /// lifetime. By separating `'tt` from `'root`, we can show that. | |
1a4d82fc | 154 | #[derive(Clone)] |
dc9dc135 | 155 | struct MatcherPos<'root, 'tt> { |
2c00a5a8 | 156 | /// The token or sequence of tokens that make up the matcher |
a1dfa0c6 XL |
157 | top_elts: TokenTreeOrTokenTreeSlice<'tt>, |
158 | ||
2c00a5a8 | 159 | /// The position of the "dot" in this matcher |
85aaf69f | 160 | idx: usize, |
a1dfa0c6 | 161 | |
2c00a5a8 XL |
162 | /// For each named metavar in the matcher, we keep track of token trees matched against the |
163 | /// metavar by the black box parser. In particular, there may be more than one match per | |
164 | /// metavar if we are in a repetition (each repetition matches each of the variables). | |
165 | /// Moreover, matchers and repetitions can be nested; the `matches` field is shared (hence the | |
166 | /// `Rc`) among all "nested" matchers. `match_lo`, `match_cur`, and `match_hi` keep track of | |
167 | /// the current position of the `self` matcher position in the shared `matches` list. | |
168 | /// | |
169 | /// Also, note that while we are descending into a sequence, matchers are given their own | |
170 | /// `matches` vector. Only once we reach the end of a full repetition of the sequence do we add | |
171 | /// all bound matches from the submatcher into the shared top-level `matches` vector. If `sep` | |
172 | /// and `up` are `Some`, then `matches` is _not_ the shared top-level list. Instead, if one | |
173 | /// wants the shared `matches`, one should use `up.matches`. | |
9fa01778 | 174 | matches: Box<[Lrc<NamedMatchVec>]>, |
2c00a5a8 XL |
175 | /// The position in `matches` corresponding to the first metavar in this matcher's sequence of |
176 | /// token trees. In other words, the first metavar in the first token of `top_elts` corresponds | |
177 | /// to `matches[match_lo]`. | |
85aaf69f | 178 | match_lo: usize, |
2c00a5a8 XL |
179 | /// The position in `matches` corresponding to the metavar we are currently trying to match |
180 | /// against the source token stream. `match_lo <= match_cur <= match_hi`. | |
85aaf69f | 181 | match_cur: usize, |
2c00a5a8 XL |
182 | /// Similar to `match_lo` except `match_hi` is the position in `matches` of the _last_ metavar |
183 | /// in this matcher. | |
85aaf69f | 184 | match_hi: usize, |
2c00a5a8 | 185 | |
a1dfa0c6 XL |
186 | // The following fields are used if we are matching a repetition. If we aren't, they should be |
187 | // `None`. | |
2c00a5a8 | 188 | /// The KleeneOp of this sequence if we are in a repetition. |
e74abb32 | 189 | seq_op: Option<mbe::KleeneOp>, |
a1dfa0c6 XL |
190 | |
191 | /// The separator if we are in a repetition. | |
2c00a5a8 | 192 | sep: Option<Token>, |
a1dfa0c6 | 193 | |
2c00a5a8 XL |
194 | /// The "parent" matcher position if we are in a repetition. That is, the matcher position just |
195 | /// before we enter the sequence. | |
a1dfa0c6 | 196 | up: Option<MatcherPosHandle<'root, 'tt>>, |
2c00a5a8 | 197 | |
a1dfa0c6 | 198 | /// Specifically used to "unzip" token trees. By "unzip", we mean to unwrap the delimiters from |
0731742a | 199 | /// a delimited token tree (e.g., something wrapped in `(` `)`) or to get the contents of a doc |
a1dfa0c6 XL |
200 | /// comment... |
201 | /// | |
0731742a | 202 | /// When matching against matchers with nested delimited submatchers (e.g., `pat ( pat ( .. ) |
2c00a5a8 XL |
203 | /// pat ) pat`), we need to keep track of the matchers we are descending into. This stack does |
204 | /// that where the bottom of the stack is the outermost matcher. | |
a1dfa0c6 XL |
205 | /// Also, throughout the comments, this "descent" is often referred to as "unzipping"... |
206 | stack: SmallVec<[MatcherTtFrame<'tt>; 1]>, | |
223e47cc LB |
207 | } |
208 | ||
a1dfa0c6 | 209 | impl<'root, 'tt> MatcherPos<'root, 'tt> { |
9fa01778 | 210 | /// Adds `m` as a named match for the `idx`-th metavar. |
041b39d2 | 211 | fn push_match(&mut self, idx: usize, m: NamedMatch) { |
9fa01778 | 212 | let matches = Lrc::make_mut(&mut self.matches[idx]); |
041b39d2 XL |
213 | matches.push(m); |
214 | } | |
215 | } | |
216 | ||
94b46f34 XL |
217 | // Lots of MatcherPos instances are created at runtime. Allocating them on the |
218 | // heap is slow. Furthermore, using SmallVec<MatcherPos> to allocate them all | |
219 | // on the stack is also slow, because MatcherPos is quite a large type and | |
220 | // instances get moved around a lot between vectors, which requires lots of | |
221 | // slow memcpy calls. | |
222 | // | |
223 | // Therefore, the initial MatcherPos is always allocated on the stack, | |
224 | // subsequent ones (of which there aren't that many) are allocated on the heap, | |
225 | // and this type is used to encapsulate both cases. | |
dc9dc135 | 226 | enum MatcherPosHandle<'root, 'tt> { |
a1dfa0c6 XL |
227 | Ref(&'root mut MatcherPos<'root, 'tt>), |
228 | Box(Box<MatcherPos<'root, 'tt>>), | |
94b46f34 XL |
229 | } |
230 | ||
a1dfa0c6 | 231 | impl<'root, 'tt> Clone for MatcherPosHandle<'root, 'tt> { |
94b46f34 XL |
232 | // This always produces a new Box. |
233 | fn clone(&self) -> Self { | |
234 | MatcherPosHandle::Box(match *self { | |
235 | MatcherPosHandle::Ref(ref r) => Box::new((**r).clone()), | |
236 | MatcherPosHandle::Box(ref b) => b.clone(), | |
237 | }) | |
238 | } | |
239 | } | |
240 | ||
a1dfa0c6 XL |
241 | impl<'root, 'tt> Deref for MatcherPosHandle<'root, 'tt> { |
242 | type Target = MatcherPos<'root, 'tt>; | |
94b46f34 XL |
243 | fn deref(&self) -> &Self::Target { |
244 | match *self { | |
245 | MatcherPosHandle::Ref(ref r) => r, | |
246 | MatcherPosHandle::Box(ref b) => b, | |
247 | } | |
248 | } | |
249 | } | |
250 | ||
a1dfa0c6 XL |
251 | impl<'root, 'tt> DerefMut for MatcherPosHandle<'root, 'tt> { |
252 | fn deref_mut(&mut self) -> &mut MatcherPos<'root, 'tt> { | |
94b46f34 XL |
253 | match *self { |
254 | MatcherPosHandle::Ref(ref mut r) => r, | |
255 | MatcherPosHandle::Box(ref mut b) => b, | |
256 | } | |
257 | } | |
258 | } | |
259 | ||
2c00a5a8 | 260 | /// Represents the possible results of an attempted parse. |
e74abb32 | 261 | crate enum ParseResult<T> { |
2c00a5a8 XL |
262 | /// Parsed successfully. |
263 | Success(T), | |
264 | /// Arm failed to match. If the second parameter is `token::Eof`, it indicates an unexpected | |
265 | /// end of macro invocation. Otherwise, it indicates that no rules expected the given token. | |
dc9dc135 | 266 | Failure(Token, &'static str), |
2c00a5a8 | 267 | /// Fatal error (malformed macro?). Abort compilation. |
dfeec247 | 268 | Error(rustc_span::Span, String), |
ba9703b0 | 269 | ErrorReported, |
2c00a5a8 XL |
270 | } |
271 | ||
ba9703b0 XL |
272 | /// A `ParseResult` where the `Success` variant contains a mapping of |
273 | /// `MacroRulesNormalizedIdent`s to `NamedMatch`es. This represents the mapping | |
274 | /// of metavars to the token trees they bind to. | |
275 | crate type NamedParseResult = ParseResult<FxHashMap<MacroRulesNormalizedIdent, NamedMatch>>; | |
476ff2be | 276 | |
2c00a5a8 | 277 | /// Count how many metavars are named in the given matcher `ms`. |
e74abb32 | 278 | pub(super) fn count_names(ms: &[TokenTree]) -> usize { |
1a4d82fc | 279 | ms.iter().fold(0, |count, elt| { |
dfeec247 XL |
280 | count |
281 | + match *elt { | |
282 | TokenTree::Sequence(_, ref seq) => seq.num_captures, | |
283 | TokenTree::Delimited(_, ref delim) => count_names(&delim.tts), | |
284 | TokenTree::MetaVar(..) => 0, | |
285 | TokenTree::MetaVarDecl(..) => 1, | |
286 | TokenTree::Token(..) => 0, | |
287 | } | |
1a4d82fc | 288 | }) |
223e47cc LB |
289 | } |
290 | ||
a1dfa0c6 | 291 | /// `len` `Vec`s (initially shared and empty) that will store matches of metavars. |
9fa01778 | 292 | fn create_matches(len: usize) -> Box<[Lrc<NamedMatchVec>]> { |
a1dfa0c6 XL |
293 | if len == 0 { |
294 | vec![] | |
295 | } else { | |
9fa01778 | 296 | let empty_matches = Lrc::new(SmallVec::new()); |
0731742a | 297 | vec![empty_matches; len] |
dfeec247 XL |
298 | } |
299 | .into_boxed_slice() | |
2c00a5a8 XL |
300 | } |
301 | ||
9fa01778 | 302 | /// Generates the top-level matcher position in which the "dot" is before the first token of the |
60c5eb7d XL |
303 | /// matcher `ms`. |
304 | fn initial_matcher_pos<'root, 'tt>(ms: &'tt [TokenTree]) -> MatcherPos<'root, 'tt> { | |
94b46f34 | 305 | let match_idx_hi = count_names(ms); |
476ff2be | 306 | let matches = create_matches(match_idx_hi); |
94b46f34 | 307 | MatcherPos { |
2c00a5a8 XL |
308 | // Start with the top level matcher given to us |
309 | top_elts: TtSeq(ms), // "elts" is an abbr. for "elements" | |
310 | // The "dot" is before the first token of the matcher | |
85aaf69f | 311 | idx: 0, |
2c00a5a8 XL |
312 | |
313 | // Initialize `matches` to a bunch of empty `Vec`s -- one for each metavar in `top_elts`. | |
314 | // `match_lo` for `top_elts` is 0 and `match_hi` is `matches.len()`. `match_cur` is 0 since | |
315 | // we haven't actually matched anything yet. | |
3b2f2976 | 316 | matches, |
85aaf69f SL |
317 | match_lo: 0, |
318 | match_cur: 0, | |
223e47cc | 319 | match_hi: match_idx_hi, |
2c00a5a8 XL |
320 | |
321 | // Haven't descended into any delimiters, so empty stack | |
a1dfa0c6 | 322 | stack: smallvec![], |
2c00a5a8 XL |
323 | |
324 | // Haven't descended into any sequences, so both of these are `None`. | |
325 | seq_op: None, | |
326 | sep: None, | |
327 | up: None, | |
94b46f34 | 328 | } |
223e47cc LB |
329 | } |
330 | ||
7cac9316 | 331 | /// `NamedMatch` is a pattern-match result for a single `token::MATCH_NONTERMINAL`: |
1a4d82fc | 332 | /// so it is associated with a single ident in a parse, and all |
9fa01778 | 333 | /// `MatchedNonterminal`s in the `NamedMatch` have the same non-terminal type |
7cac9316 XL |
334 | /// (expr, item, etc). Each leaf in a single `NamedMatch` corresponds to a |
335 | /// single `token::MATCH_NONTERMINAL` in the `TokenTree` that produced it. | |
1a4d82fc | 336 | /// |
7cac9316 | 337 | /// The in-memory structure of a particular `NamedMatch` represents the match |
1a4d82fc JJ |
338 | /// that occurred when a particular subset of a matcher was applied to a |
339 | /// particular token tree. | |
340 | /// | |
7cac9316 XL |
341 | /// The width of each `MatchedSeq` in the `NamedMatch`, and the identity of |
342 | /// the `MatchedNonterminal`s, will depend on the token tree it was applied | |
343 | /// to: each `MatchedSeq` corresponds to a single `TTSeq` in the originating | |
344 | /// token tree. The depth of the `NamedMatch` structure will therefore depend | |
1a4d82fc JJ |
345 | /// only on the nesting depth of `ast::TTSeq`s in the originating |
346 | /// token tree it was derived from. | |
041b39d2 | 347 | #[derive(Debug, Clone)] |
e74abb32 | 348 | crate enum NamedMatch { |
60c5eb7d | 349 | MatchedSeq(Lrc<NamedMatchVec>), |
9fa01778 | 350 | MatchedNonterminal(Lrc<Nonterminal>), |
223e47cc LB |
351 | } |
352 | ||
2c00a5a8 XL |
353 | /// Takes a sequence of token trees `ms` representing a matcher which successfully matched input |
354 | /// and an iterator of items that matched input and produces a `NamedParseResult`. | |
355 | fn nameize<I: Iterator<Item = NamedMatch>>( | |
356 | sess: &ParseSess, | |
357 | ms: &[TokenTree], | |
358 | mut res: I, | |
359 | ) -> NamedParseResult { | |
0731742a | 360 | // Recursively descend into each type of matcher (e.g., sequences, delimited, metavars) and make |
2c00a5a8 XL |
361 | // sure that each metavar has _exactly one_ binding. If a metavar does not have exactly one |
362 | // binding, then there is an error. If it does, then we insert the binding into the | |
363 | // `NamedParseResult`. | |
364 | fn n_rec<I: Iterator<Item = NamedMatch>>( | |
365 | sess: &ParseSess, | |
366 | m: &TokenTree, | |
367 | res: &mut I, | |
ba9703b0 | 368 | ret_val: &mut FxHashMap<MacroRulesNormalizedIdent, NamedMatch>, |
dfeec247 | 369 | ) -> Result<(), (rustc_span::Span, String)> { |
92a42be0 | 370 | match *m { |
dfeec247 XL |
371 | TokenTree::Sequence(_, ref seq) => { |
372 | for next_m in &seq.tts { | |
373 | n_rec(sess, next_m, res.by_ref(), ret_val)? | |
374 | } | |
375 | } | |
376 | TokenTree::Delimited(_, ref delim) => { | |
377 | for next_m in &delim.tts { | |
378 | n_rec(sess, next_m, res.by_ref(), ret_val)?; | |
379 | } | |
380 | } | |
6c58768f XL |
381 | TokenTree::MetaVarDecl(span, _, None) => { |
382 | if sess.missing_fragment_specifiers.borrow_mut().remove(&span).is_some() { | |
383 | return Err((span, "missing fragment specifier".to_string())); | |
384 | } | |
385 | } | |
ba9703b0 XL |
386 | TokenTree::MetaVarDecl(sp, bind_name, _) => match ret_val |
387 | .entry(MacroRulesNormalizedIdent::new(bind_name)) | |
388 | { | |
dfeec247 XL |
389 | Vacant(spot) => { |
390 | spot.insert(res.next().unwrap()); | |
1a4d82fc | 391 | } |
dfeec247 XL |
392 | Occupied(..) => return Err((sp, format!("duplicated bind name: {}", bind_name))), |
393 | }, | |
041b39d2 | 394 | TokenTree::MetaVar(..) | TokenTree::Token(..) => (), |
223e47cc | 395 | } |
92a42be0 SL |
396 | |
397 | Ok(()) | |
223e47cc | 398 | } |
92a42be0 | 399 | |
b7449926 | 400 | let mut ret_val = FxHashMap::default(); |
92a42be0 | 401 | for m in ms { |
8bb4bdeb | 402 | match n_rec(sess, m, res.by_ref(), &mut ret_val) { |
2c00a5a8 | 403 | Ok(_) => {} |
92a42be0 SL |
404 | Err((sp, msg)) => return Error(sp, msg), |
405 | } | |
406 | } | |
407 | ||
408 | Success(ret_val) | |
223e47cc LB |
409 | } |
410 | ||
9fa01778 | 411 | /// Performs a token equality check, ignoring syntax context (that is, an unhygienic comparison) |
2c00a5a8 | 412 | fn token_name_eq(t1: &Token, t2: &Token) -> bool { |
dc9dc135 XL |
413 | if let (Some((ident1, is_raw1)), Some((ident2, is_raw2))) = (t1.ident(), t2.ident()) { |
414 | ident1.name == ident2.name && is_raw1 == is_raw2 | |
415 | } else if let (Some(ident1), Some(ident2)) = (t1.lifetime(), t2.lifetime()) { | |
416 | ident1.name == ident2.name | |
cc61c64b | 417 | } else { |
dc9dc135 | 418 | t1.kind == t2.kind |
1a4d82fc JJ |
419 | } |
420 | } | |
421 | ||
2c00a5a8 XL |
422 | /// Process the matcher positions of `cur_items` until it is empty. In the process, this will |
423 | /// produce more items in `next_items`, `eof_items`, and `bb_items`. | |
424 | /// | |
425 | /// For more info about the how this happens, see the module-level doc comments and the inline | |
426 | /// comments of this function. | |
427 | /// | |
428 | /// # Parameters | |
429 | /// | |
430 | /// - `sess`: the parsing session into which errors are emitted. | |
431 | /// - `cur_items`: the set of current items to be processed. This should be empty by the end of a | |
432 | /// successful execution of this function. | |
433 | /// - `next_items`: the set of newly generated items. These are used to replenish `cur_items` in | |
434 | /// the function `parse`. | |
435 | /// - `eof_items`: the set of items that would be valid if this was the EOF. | |
436 | /// - `bb_items`: the set of items that are waiting for the black-box parser. | |
437 | /// - `token`: the current token of the parser. | |
438 | /// - `span`: the `Span` in the source code corresponding to the token trees we are trying to match | |
439 | /// against the matcher positions in `cur_items`. | |
440 | /// | |
441 | /// # Returns | |
442 | /// | |
443 | /// A `ParseResult`. Note that matches are kept track of through the items generated. | |
a1dfa0c6 | 444 | fn inner_parse_loop<'root, 'tt>( |
6c58768f | 445 | sess: &ParseSess, |
a1dfa0c6 XL |
446 | cur_items: &mut SmallVec<[MatcherPosHandle<'root, 'tt>; 1]>, |
447 | next_items: &mut Vec<MatcherPosHandle<'root, 'tt>>, | |
448 | eof_items: &mut SmallVec<[MatcherPosHandle<'root, 'tt>; 1]>, | |
449 | bb_items: &mut SmallVec<[MatcherPosHandle<'root, 'tt>; 1]>, | |
2c00a5a8 | 450 | token: &Token, |
2c00a5a8 XL |
451 | ) -> ParseResult<()> { |
452 | // Pop items from `cur_items` until it is empty. | |
3b2f2976 | 453 | while let Some(mut item) = cur_items.pop() { |
2c00a5a8 XL |
454 | // When unzipped trees end, remove them. This corresponds to backtracking out of a |
455 | // delimited submatcher into which we already descended. In backtracking out again, we need | |
456 | // to advance the "dot" past the delimiters in the outer matcher. | |
3b2f2976 XL |
457 | while item.idx >= item.top_elts.len() { |
458 | match item.stack.pop() { | |
476ff2be | 459 | Some(MatcherTtFrame { elts, idx }) => { |
3b2f2976 XL |
460 | item.top_elts = elts; |
461 | item.idx = idx + 1; | |
1a4d82fc | 462 | } |
2c00a5a8 | 463 | None => break, |
1a4d82fc | 464 | } |
476ff2be | 465 | } |
223e47cc | 466 | |
2c00a5a8 XL |
467 | // Get the current position of the "dot" (`idx`) in `item` and the number of token trees in |
468 | // the matcher (`len`). | |
3b2f2976 XL |
469 | let idx = item.idx; |
470 | let len = item.top_elts.len(); | |
476ff2be | 471 | |
2c00a5a8 | 472 | // If `idx >= len`, then we are at or past the end of the matcher of `item`. |
476ff2be | 473 | if idx >= len { |
2c00a5a8 XL |
474 | // We are repeating iff there is a parent. If the matcher is inside of a repetition, |
475 | // then we could be at the end of a sequence or at the beginning of the next | |
476 | // repetition. | |
3b2f2976 | 477 | if item.up.is_some() { |
2c00a5a8 XL |
478 | // At this point, regardless of whether there is a separator, we should add all |
479 | // matches from the complete repetition of the sequence to the shared, top-level | |
480 | // `matches` list (actually, `up.matches`, which could itself not be the top-level, | |
481 | // but anyway...). Moreover, we add another item to `cur_items` in which the "dot" | |
482 | // is at the end of the `up` matcher. This ensures that the "dot" in the `up` | |
483 | // matcher is also advanced sufficiently. | |
484 | // | |
485 | // NOTE: removing the condition `idx == len` allows trailing separators. | |
476ff2be | 486 | if idx == len { |
2c00a5a8 | 487 | // Get the `up` matcher |
3b2f2976 | 488 | let mut new_pos = item.up.clone().unwrap(); |
476ff2be | 489 | |
2c00a5a8 | 490 | // Add matches from this repetition to the `matches` of `up` |
3b2f2976 XL |
491 | for idx in item.match_lo..item.match_hi { |
492 | let sub = item.matches[idx].clone(); | |
60c5eb7d | 493 | new_pos.push_match(idx, MatchedSeq(sub)); |
223e47cc LB |
494 | } |
495 | ||
2c00a5a8 | 496 | // Move the "dot" past the repetition in `up` |
3b2f2976 | 497 | new_pos.match_cur = item.match_hi; |
476ff2be | 498 | new_pos.idx += 1; |
3b2f2976 | 499 | cur_items.push(new_pos); |
223e47cc | 500 | } |
223e47cc | 501 | |
2c00a5a8 | 502 | // Check if we need a separator. |
3b2f2976 | 503 | if idx == len && item.sep.is_some() { |
2c00a5a8 XL |
504 | // We have a separator, and it is the current token. We can advance past the |
505 | // separator token. | |
dfeec247 | 506 | if item.sep.as_ref().map(|sep| token_name_eq(token, sep)).unwrap_or(false) { |
3b2f2976 XL |
507 | item.idx += 1; |
508 | next_items.push(item); | |
223e47cc | 509 | } |
2c00a5a8 XL |
510 | } |
511 | // We don't need a separator. Move the "dot" back to the beginning of the matcher | |
512 | // and try to match again UNLESS we are only allowed to have _one_ repetition. | |
e74abb32 | 513 | else if item.seq_op != Some(mbe::KleeneOp::ZeroOrOne) { |
3b2f2976 XL |
514 | item.match_cur = item.match_lo; |
515 | item.idx = 0; | |
516 | cur_items.push(item); | |
476ff2be | 517 | } |
2c00a5a8 XL |
518 | } |
519 | // If we are not in a repetition, then being at the end of a matcher means that we have | |
520 | // reached the potential end of the input. | |
521 | else { | |
3b2f2976 | 522 | eof_items.push(item); |
476ff2be | 523 | } |
2c00a5a8 XL |
524 | } |
525 | // We are in the middle of a matcher. | |
526 | else { | |
527 | // Look at what token in the matcher we are trying to match the current token (`token`) | |
528 | // against. Depending on that, we may generate new items. | |
3b2f2976 | 529 | match item.top_elts.get_tt(idx) { |
2c00a5a8 | 530 | // Need to descend into a sequence |
476ff2be | 531 | TokenTree::Sequence(sp, seq) => { |
48663c56 XL |
532 | // Examine the case where there are 0 matches of this sequence. We are |
533 | // implicitly disallowing OneOrMore from having 0 matches here. Thus, that will | |
534 | // result in a "no rules expected token" error by virtue of this matcher not | |
535 | // working. | |
e74abb32 XL |
536 | if seq.kleene.op == mbe::KleeneOp::ZeroOrMore |
537 | || seq.kleene.op == mbe::KleeneOp::ZeroOrOne | |
2c00a5a8 | 538 | { |
3b2f2976 XL |
539 | let mut new_item = item.clone(); |
540 | new_item.match_cur += seq.num_captures; | |
541 | new_item.idx += 1; | |
542 | for idx in item.match_cur..item.match_cur + seq.num_captures { | |
60c5eb7d | 543 | new_item.push_match(idx, MatchedSeq(Lrc::new(smallvec![]))); |
1a4d82fc | 544 | } |
3b2f2976 | 545 | cur_items.push(new_item); |
1a4d82fc | 546 | } |
476ff2be | 547 | |
3b2f2976 | 548 | let matches = create_matches(item.matches.len()); |
94b46f34 | 549 | cur_items.push(MatcherPosHandle::Box(Box::new(MatcherPos { |
a1dfa0c6 | 550 | stack: smallvec![], |
476ff2be | 551 | sep: seq.separator.clone(), |
416331ca | 552 | seq_op: Some(seq.kleene.op), |
476ff2be | 553 | idx: 0, |
3b2f2976 XL |
554 | matches, |
555 | match_lo: item.match_cur, | |
556 | match_cur: item.match_cur, | |
557 | match_hi: item.match_cur + seq.num_captures, | |
558 | up: Some(item), | |
476ff2be | 559 | top_elts: Tt(TokenTree::Sequence(sp, seq)), |
94b46f34 | 560 | }))); |
476ff2be | 561 | } |
2c00a5a8 | 562 | |
6c58768f XL |
563 | // We need to match a metavar (but the identifier is invalid)... this is an error |
564 | TokenTree::MetaVarDecl(span, _, None) => { | |
565 | if sess.missing_fragment_specifiers.borrow_mut().remove(&span).is_some() { | |
566 | return Error(span, "missing fragment specifier".to_string()); | |
567 | } | |
568 | } | |
569 | ||
2c00a5a8 XL |
570 | // We need to match a metavar with a valid ident... call out to the black-box |
571 | // parser by adding an item to `bb_items`. | |
6c58768f | 572 | TokenTree::MetaVarDecl(_, _, Some(kind)) => { |
476ff2be SL |
573 | // Built-in nonterminals never start with these tokens, |
574 | // so we can eliminate them from consideration. | |
3dfed10e | 575 | if Parser::nonterminal_may_begin_with(kind, token) { |
3b2f2976 | 576 | bb_items.push(item); |
1a4d82fc | 577 | } |
476ff2be | 578 | } |
2c00a5a8 XL |
579 | |
580 | // We need to descend into a delimited submatcher or a doc comment. To do this, we | |
581 | // push the current matcher onto a stack and push a new item containing the | |
582 | // submatcher onto `cur_items`. | |
583 | // | |
584 | // At the beginning of the loop, if we reach the end of the delimited submatcher, | |
585 | // we pop the stack to backtrack out of the descent. | |
ba9703b0 XL |
586 | seq |
587 | @ | |
588 | (TokenTree::Delimited(..) | |
589 | | TokenTree::Token(Token { kind: DocComment(..), .. })) => { | |
3b2f2976 XL |
590 | let lower_elts = mem::replace(&mut item.top_elts, Tt(seq)); |
591 | let idx = item.idx; | |
dfeec247 | 592 | item.stack.push(MatcherTtFrame { elts: lower_elts, idx }); |
3b2f2976 XL |
593 | item.idx = 0; |
594 | cur_items.push(item); | |
476ff2be | 595 | } |
2c00a5a8 XL |
596 | |
597 | // We just matched a normal token. We can just advance the parser. | |
dc9dc135 | 598 | TokenTree::Token(t) if token_name_eq(&t, token) => { |
3b2f2976 XL |
599 | item.idx += 1; |
600 | next_items.push(item); | |
223e47cc | 601 | } |
2c00a5a8 XL |
602 | |
603 | // There was another token that was not `token`... This means we can't add any | |
604 | // rules. NOTE that this is not necessarily an error unless _all_ items in | |
605 | // `cur_items` end up doing this. There may still be some other matchers that do | |
606 | // end up working out. | |
041b39d2 | 607 | TokenTree::Token(..) | TokenTree::MetaVar(..) => {} |
223e47cc LB |
608 | } |
609 | } | |
476ff2be SL |
610 | } |
611 | ||
2c00a5a8 | 612 | // Yay a successful parse (so far)! |
476ff2be SL |
613 | Success(()) |
614 | } | |
615 | ||
74b04a01 XL |
616 | /// Use the given sequence of token trees (`ms`) as a matcher. Match the token |
617 | /// stream from the given `parser` against it and return the match. | |
618 | pub(super) fn parse_tt(parser: &mut Cow<'_, Parser<'_>>, ms: &[TokenTree]) -> NamedParseResult { | |
2c00a5a8 XL |
619 | // A queue of possible matcher positions. We initialize it with the matcher position in which |
620 | // the "dot" is before the first token of the first token tree in `ms`. `inner_parse_loop` then | |
b7449926 | 621 | // processes all of these possible matcher positions and produces possible next positions into |
2c00a5a8 XL |
622 | // `next_items`. After some post-processing, the contents of `next_items` replenish `cur_items` |
623 | // and we start over again. | |
94b46f34 XL |
624 | // |
625 | // This MatcherPos instance is allocated on the stack. All others -- and | |
626 | // there are frequently *no* others! -- are allocated on the heap. | |
60c5eb7d | 627 | let mut initial = initial_matcher_pos(ms); |
b7449926 | 628 | let mut cur_items = smallvec![MatcherPosHandle::Ref(&mut initial)]; |
2c00a5a8 | 629 | let mut next_items = Vec::new(); |
476ff2be SL |
630 | |
631 | loop { | |
2c00a5a8 | 632 | // Matcher positions black-box parsed by parser.rs (`parser`) |
0bf4aa26 | 633 | let mut bb_items = SmallVec::new(); |
2c00a5a8 XL |
634 | |
635 | // Matcher positions that would be valid if the macro invocation was over now | |
0bf4aa26 | 636 | let mut eof_items = SmallVec::new(); |
3b2f2976 | 637 | assert!(next_items.is_empty()); |
476ff2be | 638 | |
2c00a5a8 XL |
639 | // Process `cur_items` until either we have finished the input or we need to get some |
640 | // parsing from the black-box parser done. The result is that `next_items` will contain a | |
641 | // bunch of possible next matcher positions in `next_items`. | |
642 | match inner_parse_loop( | |
6c58768f | 643 | parser.sess, |
2c00a5a8 XL |
644 | &mut cur_items, |
645 | &mut next_items, | |
646 | &mut eof_items, | |
647 | &mut bb_items, | |
648 | &parser.token, | |
2c00a5a8 XL |
649 | ) { |
650 | Success(_) => {} | |
dc9dc135 | 651 | Failure(token, msg) => return Failure(token, msg), |
476ff2be | 652 | Error(sp, msg) => return Error(sp, msg), |
ba9703b0 | 653 | ErrorReported => return ErrorReported, |
476ff2be SL |
654 | } |
655 | ||
3b2f2976 XL |
656 | // inner parse loop handled all cur_items, so it's empty |
657 | assert!(cur_items.is_empty()); | |
223e47cc | 658 | |
2c00a5a8 XL |
659 | // We need to do some post processing after the `inner_parser_loop`. |
660 | // | |
661 | // Error messages here could be improved with links to original rules. | |
662 | ||
663 | // If we reached the EOF, check that there is EXACTLY ONE possible matcher. Otherwise, | |
a1dfa0c6 | 664 | // either the parse is ambiguous (which should never happen) or there is a syntax error. |
dc9dc135 | 665 | if parser.token == token::Eof { |
3b2f2976 | 666 | if eof_items.len() == 1 { |
dfeec247 XL |
667 | let matches = |
668 | eof_items[0].matches.iter_mut().map(|dv| Lrc::make_mut(dv).pop().unwrap()); | |
74b04a01 | 669 | return nameize(parser.sess, ms, matches); |
3b2f2976 | 670 | } else if eof_items.len() > 1 { |
2c00a5a8 | 671 | return Error( |
dc9dc135 | 672 | parser.token.span, |
2c00a5a8 XL |
673 | "ambiguity: multiple successful parses".to_string(), |
674 | ); | |
223e47cc | 675 | } else { |
a1dfa0c6 | 676 | return Failure( |
dfeec247 XL |
677 | Token::new( |
678 | token::Eof, | |
679 | if parser.token.span.is_dummy() { | |
680 | parser.token.span | |
681 | } else { | |
682 | parser.token.span.shrink_to_hi() | |
683 | }, | |
684 | ), | |
0731742a | 685 | "missing tokens in macro arguments", |
a1dfa0c6 | 686 | ); |
223e47cc | 687 | } |
2c00a5a8 | 688 | } |
8faf50e0 XL |
689 | // Performance hack: eof_items may share matchers via Rc with other things that we want |
690 | // to modify. Dropping eof_items now may drop these refcounts to 1, preventing an | |
691 | // unnecessary implicit clone later in Rc::make_mut. | |
692 | drop(eof_items); | |
693 | ||
74b04a01 XL |
694 | // If there are no possible next positions AND we aren't waiting for the black-box parser, |
695 | // then there is a syntax error. | |
696 | if bb_items.is_empty() && next_items.is_empty() { | |
697 | return Failure(parser.token.clone(), "no rules expected this token in macro call"); | |
698 | } | |
2c00a5a8 XL |
699 | // Another possibility is that we need to call out to parse some rust nonterminal |
700 | // (black-box) parser. However, if there is not EXACTLY ONE of these, something is wrong. | |
74b04a01 | 701 | else if (!bb_items.is_empty() && !next_items.is_empty()) || bb_items.len() > 1 { |
2c00a5a8 XL |
702 | let nts = bb_items |
703 | .iter() | |
704 | .map(|item| match item.top_elts.get_tt(item.idx) { | |
6c58768f | 705 | TokenTree::MetaVarDecl(_, bind, Some(kind)) => format!("{} ('{}')", kind, bind), |
2c00a5a8 XL |
706 | _ => panic!(), |
707 | }) | |
708 | .collect::<Vec<String>>() | |
709 | .join(" or "); | |
710 | ||
711 | return Error( | |
dc9dc135 | 712 | parser.token.span, |
2c00a5a8 XL |
713 | format!( |
714 | "local ambiguity: multiple parsing options: {}", | |
715 | match next_items.len() { | |
716 | 0 => format!("built-in NTs {}.", nts), | |
717 | 1 => format!("built-in NTs {} or 1 other option.", nts), | |
718 | n => format!("built-in NTs {} or {} other options.", nts, n), | |
719 | } | |
720 | ), | |
721 | ); | |
722 | } | |
2c00a5a8 XL |
723 | // Dump all possible `next_items` into `cur_items` for the next iteration. |
724 | else if !next_items.is_empty() { | |
725 | // Now process the next token | |
3b2f2976 | 726 | cur_items.extend(next_items.drain(..)); |
74b04a01 | 727 | parser.to_mut().bump(); |
2c00a5a8 XL |
728 | } |
729 | // Finally, we have the case where we need to call the black-box parser to get some | |
730 | // nonterminal. | |
731 | else { | |
732 | assert_eq!(bb_items.len(), 1); | |
733 | ||
3b2f2976 | 734 | let mut item = bb_items.pop().unwrap(); |
6c58768f | 735 | if let TokenTree::MetaVarDecl(span, _, Some(kind)) = item.top_elts.get_tt(item.idx) { |
3b2f2976 | 736 | let match_cur = item.match_cur; |
3dfed10e XL |
737 | let nt = match parser.to_mut().parse_nonterminal(kind) { |
738 | Err(mut err) => { | |
739 | err.span_label( | |
740 | span, | |
741 | format!("while parsing argument for this `{}` macro fragment", kind), | |
742 | ) | |
743 | .emit(); | |
744 | return ErrorReported; | |
745 | } | |
ba9703b0 XL |
746 | Ok(nt) => nt, |
747 | }; | |
748 | item.push_match(match_cur, MatchedNonterminal(Lrc::new(nt))); | |
3b2f2976 XL |
749 | item.idx += 1; |
750 | item.match_cur += 1; | |
476ff2be SL |
751 | } else { |
752 | unreachable!() | |
223e47cc | 753 | } |
3b2f2976 | 754 | cur_items.push(item); |
223e47cc LB |
755 | } |
756 | ||
3b2f2976 | 757 | assert!(!cur_items.is_empty()); |
223e47cc LB |
758 | } |
759 | } |