]>
Commit | Line | Data |
---|---|---|
1a4d82fc | 1 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT |
223e47cc LB |
2 | // file at the top-level directory of this distribution and at |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
1a4d82fc JJ |
10 | |
11 | //! This is an Earley-like parser, without support for in-grammar nonterminals, | |
12 | //! only by calling out to the main rust parser for named nonterminals (which it | |
13 | //! commits to fully when it hits one in a grammar). This means that there are no | |
14 | //! completer or predictor rules, and therefore no need to store one column per | |
15 | //! token: instead, there's a set of current Earley items and a set of next | |
16 | //! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in | |
17 | //! pathological cases, is worse than traditional Earley parsing, but it's an | |
18 | //! easier fit for Macro-by-Example-style rules, and I think the overhead is | |
19 | //! lower. (In order to prevent the pathological case, we'd need to lazily | |
20 | //! construct the resulting `NamedMatch`es at the very end. It'd be a pain, | |
21 | //! and require more memory to keep around old items, but it would also save | |
22 | //! overhead) | |
23 | //! | |
24 | //! Quick intro to how the parser works: | |
25 | //! | |
26 | //! A 'position' is a dot in the middle of a matcher, usually represented as a | |
27 | //! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`. | |
28 | //! | |
29 | //! The parser walks through the input a character at a time, maintaining a list | |
30 | //! of items consistent with the current position in the input string: `cur_eis`. | |
31 | //! | |
32 | //! As it processes them, it fills up `eof_eis` with items that would be valid if | |
33 | //! the macro invocation is now over, `bb_eis` with items that are waiting on | |
34 | //! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting | |
b039eaaf | 35 | //! on a particular token. Most of the logic concerns moving the · through the |
1a4d82fc JJ |
36 | //! repetitions indicated by Kleene stars. It only advances or calls out to the |
37 | //! real Rust parser when no `cur_eis` items remain | |
38 | //! | |
39 | //! Example: Start parsing `a a a a b` against [· a $( a )* a b]. | |
40 | //! | |
41 | //! Remaining input: `a a a a b` | |
42 | //! next_eis: [· a $( a )* a b] | |
43 | //! | |
44 | //! - - - Advance over an `a`. - - - | |
45 | //! | |
46 | //! Remaining input: `a a a b` | |
47 | //! cur: [a · $( a )* a b] | |
48 | //! Descend/Skip (first item). | |
49 | //! next: [a $( · a )* a b] [a $( a )* · a b]. | |
50 | //! | |
51 | //! - - - Advance over an `a`. - - - | |
52 | //! | |
53 | //! Remaining input: `a a b` | |
54 | //! cur: [a $( a · )* a b] next: [a $( a )* a · b] | |
55 | //! Finish/Repeat (first item) | |
56 | //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] | |
57 | //! | |
58 | //! - - - Advance over an `a`. - - - (this looks exactly like the last step) | |
59 | //! | |
60 | //! Remaining input: `a b` | |
61 | //! cur: [a $( a · )* a b] next: [a $( a )* a · b] | |
62 | //! Finish/Repeat (first item) | |
63 | //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] | |
64 | //! | |
65 | //! - - - Advance over an `a`. - - - (this looks exactly like the last step) | |
66 | //! | |
67 | //! Remaining input: `b` | |
68 | //! cur: [a $( a · )* a b] next: [a $( a )* a · b] | |
69 | //! Finish/Repeat (first item) | |
70 | //! next: [a $( a )* · a b] [a $( · a )* a b] | |
71 | //! | |
72 | //! - - - Advance over a `b`. - - - | |
73 | //! | |
74 | //! Remaining input: `` | |
75 | //! eof: [a $( a )* a b ·] | |
76 | ||
77 | pub use self::NamedMatch::*; | |
78 | pub use self::ParseResult::*; | |
79 | use self::TokenTreeOrTokenTreeVec::*; | |
970d7e83 | 80 | |
223e47cc | 81 | use ast; |
5bcae85e | 82 | use ast::Ident; |
3157f602 XL |
83 | use syntax_pos::{self, BytePos, mk_sp, Span}; |
84 | use codemap::Spanned; | |
9cc50fc6 | 85 | use errors::FatalError; |
223e47cc LB |
86 | use parse::lexer::*; //resolve bug? |
87 | use parse::ParseSess; | |
a7813a04 | 88 | use parse::parser::{PathStyle, Parser}; |
9cc50fc6 | 89 | use parse::token::{DocComment, MatchNt, SubstNt}; |
1a4d82fc | 90 | use parse::token::{Token, Nonterminal}; |
223e47cc | 91 | use parse::token; |
1a4d82fc JJ |
92 | use print::pprust; |
93 | use ptr::P; | |
3157f602 | 94 | use tokenstream::{self, TokenTree}; |
223e47cc | 95 | |
1a4d82fc JJ |
96 | use std::mem; |
97 | use std::rc::Rc; | |
98 | use std::collections::HashMap; | |
99 | use std::collections::hash_map::Entry::{Vacant, Occupied}; | |
223e47cc | 100 | |
1a4d82fc JJ |
101 | // To avoid costly uniqueness checks, we require that `MatchSeq` always has |
102 | // a nonempty body. | |
223e47cc | 103 | |
1a4d82fc JJ |
104 | #[derive(Clone)] |
105 | enum TokenTreeOrTokenTreeVec { | |
3157f602 XL |
106 | Tt(tokenstream::TokenTree), |
107 | TtSeq(Rc<Vec<tokenstream::TokenTree>>), | |
1a4d82fc | 108 | } |
223e47cc | 109 | |
1a4d82fc | 110 | impl TokenTreeOrTokenTreeVec { |
85aaf69f | 111 | fn len(&self) -> usize { |
92a42be0 SL |
112 | match *self { |
113 | TtSeq(ref v) => v.len(), | |
114 | Tt(ref tt) => tt.len(), | |
1a4d82fc JJ |
115 | } |
116 | } | |
223e47cc | 117 | |
85aaf69f | 118 | fn get_tt(&self, index: usize) -> TokenTree { |
92a42be0 SL |
119 | match *self { |
120 | TtSeq(ref v) => v[index].clone(), | |
121 | Tt(ref tt) => tt.get_tt(index), | |
1a4d82fc JJ |
122 | } |
123 | } | |
223e47cc LB |
124 | } |
125 | ||
1a4d82fc JJ |
126 | /// an unzipping of `TokenTree`s |
127 | #[derive(Clone)] | |
128 | struct MatcherTtFrame { | |
129 | elts: TokenTreeOrTokenTreeVec, | |
85aaf69f | 130 | idx: usize, |
223e47cc LB |
131 | } |
132 | ||
1a4d82fc | 133 | #[derive(Clone)] |
223e47cc | 134 | pub struct MatcherPos { |
1a4d82fc JJ |
135 | stack: Vec<MatcherTtFrame>, |
136 | top_elts: TokenTreeOrTokenTreeVec, | |
223e47cc | 137 | sep: Option<Token>, |
85aaf69f | 138 | idx: usize, |
1a4d82fc JJ |
139 | up: Option<Box<MatcherPos>>, |
140 | matches: Vec<Vec<Rc<NamedMatch>>>, | |
85aaf69f SL |
141 | match_lo: usize, |
142 | match_cur: usize, | |
143 | match_hi: usize, | |
223e47cc LB |
144 | sp_lo: BytePos, |
145 | } | |
146 | ||
85aaf69f | 147 | pub fn count_names(ms: &[TokenTree]) -> usize { |
1a4d82fc | 148 | ms.iter().fold(0, |count, elt| { |
92a42be0 SL |
149 | count + match *elt { |
150 | TokenTree::Sequence(_, ref seq) => { | |
1a4d82fc JJ |
151 | seq.num_captures |
152 | } | |
92a42be0 | 153 | TokenTree::Delimited(_, ref delim) => { |
c34b1796 | 154 | count_names(&delim.tts) |
1a4d82fc | 155 | } |
92a42be0 | 156 | TokenTree::Token(_, MatchNt(..)) => { |
1a4d82fc JJ |
157 | 1 |
158 | } | |
9e0c209e | 159 | TokenTree::Token(..) => 0, |
1a4d82fc JJ |
160 | } |
161 | }) | |
223e47cc LB |
162 | } |
163 | ||
1a4d82fc JJ |
164 | pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos) |
165 | -> Box<MatcherPos> { | |
85aaf69f SL |
166 | let match_idx_hi = count_names(&ms[..]); |
167 | let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect(); | |
d9579d0f | 168 | Box::new(MatcherPos { |
1a4d82fc JJ |
169 | stack: vec![], |
170 | top_elts: TtSeq(ms), | |
223e47cc | 171 | sep: sep, |
85aaf69f | 172 | idx: 0, |
1a4d82fc | 173 | up: None, |
223e47cc | 174 | matches: matches, |
85aaf69f SL |
175 | match_lo: 0, |
176 | match_cur: 0, | |
223e47cc LB |
177 | match_hi: match_idx_hi, |
178 | sp_lo: lo | |
d9579d0f | 179 | }) |
223e47cc LB |
180 | } |
181 | ||
1a4d82fc JJ |
182 | /// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL: |
183 | /// so it is associated with a single ident in a parse, and all | |
184 | /// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type | |
185 | /// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a | |
186 | /// single token::MATCH_NONTERMINAL in the TokenTree that produced it. | |
187 | /// | |
188 | /// The in-memory structure of a particular NamedMatch represents the match | |
189 | /// that occurred when a particular subset of a matcher was applied to a | |
190 | /// particular token tree. | |
191 | /// | |
192 | /// The width of each MatchedSeq in the NamedMatch, and the identity of the | |
193 | /// `MatchedNonterminal`s, will depend on the token tree it was applied to: | |
194 | /// each MatchedSeq corresponds to a single TTSeq in the originating | |
195 | /// token tree. The depth of the NamedMatch structure will therefore depend | |
196 | /// only on the nesting depth of `ast::TTSeq`s in the originating | |
197 | /// token tree it was derived from. | |
198 | ||
199 | pub enum NamedMatch { | |
3157f602 | 200 | MatchedSeq(Vec<Rc<NamedMatch>>, syntax_pos::Span), |
1a4d82fc | 201 | MatchedNonterminal(Nonterminal) |
223e47cc LB |
202 | } |
203 | ||
1a4d82fc | 204 | pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>]) |
5bcae85e | 205 | -> ParseResult<HashMap<Ident, Rc<NamedMatch>>> { |
1a4d82fc | 206 | fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>], |
5bcae85e | 207 | ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) |
3157f602 | 208 | -> Result<(), (syntax_pos::Span, String)> { |
92a42be0 SL |
209 | match *m { |
210 | TokenTree::Sequence(_, ref seq) => { | |
85aaf69f | 211 | for next_m in &seq.tts { |
54a0048b | 212 | n_rec(p_s, next_m, res, ret_val, idx)? |
1a4d82fc JJ |
213 | } |
214 | } | |
92a42be0 | 215 | TokenTree::Delimited(_, ref delim) => { |
85aaf69f | 216 | for next_m in &delim.tts { |
54a0048b | 217 | n_rec(p_s, next_m, res, ret_val, idx)?; |
1a4d82fc JJ |
218 | } |
219 | } | |
a7813a04 | 220 | TokenTree::Token(sp, MatchNt(bind_name, _)) => { |
5bcae85e | 221 | match ret_val.entry(bind_name) { |
1a4d82fc JJ |
222 | Vacant(spot) => { |
223 | spot.insert(res[*idx].clone()); | |
224 | *idx += 1; | |
225 | } | |
226 | Occupied(..) => { | |
92a42be0 | 227 | return Err((sp, format!("duplicated bind name: {}", bind_name))) |
1a4d82fc JJ |
228 | } |
229 | } | |
223e47cc | 230 | } |
92a42be0 SL |
231 | TokenTree::Token(sp, SubstNt(..)) => { |
232 | return Err((sp, "missing fragment specifier".to_string())) | |
233 | } | |
9e0c209e | 234 | TokenTree::Token(..) => (), |
223e47cc | 235 | } |
92a42be0 SL |
236 | |
237 | Ok(()) | |
223e47cc | 238 | } |
92a42be0 | 239 | |
970d7e83 | 240 | let mut ret_val = HashMap::new(); |
85aaf69f | 241 | let mut idx = 0; |
92a42be0 SL |
242 | for m in ms { |
243 | match n_rec(p_s, m, res, &mut ret_val, &mut idx) { | |
244 | Ok(_) => {}, | |
245 | Err((sp, msg)) => return Error(sp, msg), | |
246 | } | |
247 | } | |
248 | ||
249 | Success(ret_val) | |
223e47cc LB |
250 | } |
251 | ||
c34b1796 AL |
252 | pub enum ParseResult<T> { |
253 | Success(T), | |
92a42be0 | 254 | /// Arm failed to match |
3157f602 | 255 | Failure(syntax_pos::Span, String), |
92a42be0 | 256 | /// Fatal error (malformed macro?). Abort compilation. |
3157f602 | 257 | Error(syntax_pos::Span, String) |
223e47cc LB |
258 | } |
259 | ||
5bcae85e | 260 | pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>; |
c34b1796 AL |
261 | pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>; |
262 | ||
1a4d82fc JJ |
263 | /// Perform a token equality check, ignoring syntax context (that is, an |
264 | /// unhygienic comparison) | |
265 | pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool { | |
266 | match (t1,t2) { | |
a7813a04 | 267 | (&token::Ident(id1),&token::Ident(id2)) |
1a4d82fc JJ |
268 | | (&token::Lifetime(id1),&token::Lifetime(id2)) => |
269 | id1.name == id2.name, | |
270 | _ => *t1 == *t2 | |
271 | } | |
272 | } | |
273 | ||
274 | pub fn parse(sess: &ParseSess, | |
275 | cfg: ast::CrateConfig, | |
276 | mut rdr: TtReader, | |
277 | ms: &[TokenTree]) | |
c34b1796 | 278 | -> NamedParseResult { |
1a4d82fc JJ |
279 | let mut cur_eis = Vec::new(); |
280 | cur_eis.push(initial_matcher_pos(Rc::new(ms.iter() | |
85aaf69f | 281 | .cloned() |
1a4d82fc JJ |
282 | .collect()), |
283 | None, | |
284 | rdr.peek().sp.lo)); | |
223e47cc LB |
285 | |
286 | loop { | |
1a4d82fc JJ |
287 | let mut bb_eis = Vec::new(); // black-box parsed by parser.rs |
288 | let mut next_eis = Vec::new(); // or proceed normally | |
289 | let mut eof_eis = Vec::new(); | |
223e47cc | 290 | |
1a4d82fc | 291 | let TokenAndSpan { tok, sp } = rdr.peek(); |
223e47cc LB |
292 | |
293 | /* we append new items to this while we go */ | |
1a4d82fc JJ |
294 | loop { |
295 | let mut ei = match cur_eis.pop() { | |
296 | None => break, /* for each Earley Item */ | |
297 | Some(ei) => ei, | |
298 | }; | |
299 | ||
300 | // When unzipped trees end, remove them | |
301 | while ei.idx >= ei.top_elts.len() { | |
302 | match ei.stack.pop() { | |
303 | Some(MatcherTtFrame { elts, idx }) => { | |
304 | ei.top_elts = elts; | |
305 | ei.idx = idx + 1; | |
306 | } | |
307 | None => break | |
308 | } | |
309 | } | |
223e47cc LB |
310 | |
311 | let idx = ei.idx; | |
1a4d82fc | 312 | let len = ei.top_elts.len(); |
223e47cc LB |
313 | |
314 | /* at end of sequence */ | |
315 | if idx >= len { | |
316 | // can't move out of `match`es, so: | |
1a4d82fc | 317 | if ei.up.is_some() { |
223e47cc LB |
318 | // hack: a matcher sequence is repeating iff it has a |
319 | // parent (the top level is just a container) | |
320 | ||
321 | ||
322 | // disregard separator, try to go up | |
323 | // (remove this condition to make trailing seps ok) | |
324 | if idx == len { | |
325 | // pop from the matcher position | |
326 | ||
1a4d82fc | 327 | let mut new_pos = ei.up.clone().unwrap(); |
223e47cc LB |
328 | |
329 | // update matches (the MBE "parse tree") by appending | |
330 | // each tree as a subtree. | |
331 | ||
332 | // I bet this is a perf problem: we're preemptively | |
333 | // doing a lot of array work that will get thrown away | |
334 | // most of the time. | |
335 | ||
336 | // Only touch the binders we have actually bound | |
85aaf69f | 337 | for idx in ei.match_lo..ei.match_hi { |
1a4d82fc JJ |
338 | let sub = (ei.matches[idx]).clone(); |
339 | (&mut new_pos.matches[idx]) | |
340 | .push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo, | |
341 | sp.hi)))); | |
223e47cc LB |
342 | } |
343 | ||
1a4d82fc | 344 | new_pos.match_cur = ei.match_hi; |
223e47cc LB |
345 | new_pos.idx += 1; |
346 | cur_eis.push(new_pos); | |
347 | } | |
348 | ||
349 | // can we go around again? | |
350 | ||
351 | // the *_t vars are workarounds for the lack of unary move | |
1a4d82fc JJ |
352 | match ei.sep { |
353 | Some(ref t) if idx == len => { // we need a separator | |
354 | // i'm conflicted about whether this should be hygienic.... | |
355 | // though in this case, if the separators are never legal | |
356 | // idents, it shouldn't matter. | |
357 | if token_name_eq(&tok, t) { //pass the separator | |
358 | let mut ei_t = ei.clone(); | |
359 | // ei_t.match_cur = ei_t.match_lo; | |
360 | ei_t.idx += 1; | |
361 | next_eis.push(ei_t); | |
362 | } | |
363 | } | |
364 | _ => { // we don't need a separator | |
223e47cc | 365 | let mut ei_t = ei; |
1a4d82fc JJ |
366 | ei_t.match_cur = ei_t.match_lo; |
367 | ei_t.idx = 0; | |
368 | cur_eis.push(ei_t); | |
223e47cc | 369 | } |
223e47cc LB |
370 | } |
371 | } else { | |
372 | eof_eis.push(ei); | |
373 | } | |
374 | } else { | |
1a4d82fc JJ |
375 | match ei.top_elts.get_tt(idx) { |
376 | /* need to descend into sequence */ | |
92a42be0 | 377 | TokenTree::Sequence(sp, seq) => { |
3157f602 | 378 | if seq.op == tokenstream::KleeneOp::ZeroOrMore { |
1a4d82fc JJ |
379 | let mut new_ei = ei.clone(); |
380 | new_ei.match_cur += seq.num_captures; | |
85aaf69f | 381 | new_ei.idx += 1; |
1a4d82fc | 382 | //we specifically matched zero repeats. |
85aaf69f | 383 | for idx in ei.match_cur..ei.match_cur + seq.num_captures { |
1a4d82fc JJ |
384 | (&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp))); |
385 | } | |
386 | ||
387 | cur_eis.push(new_ei); | |
223e47cc LB |
388 | } |
389 | ||
85aaf69f | 390 | let matches: Vec<_> = (0..ei.matches.len()) |
1a4d82fc JJ |
391 | .map(|_| Vec::new()).collect(); |
392 | let ei_t = ei; | |
d9579d0f | 393 | cur_eis.push(Box::new(MatcherPos { |
1a4d82fc JJ |
394 | stack: vec![], |
395 | sep: seq.separator.clone(), | |
85aaf69f | 396 | idx: 0, |
1a4d82fc JJ |
397 | matches: matches, |
398 | match_lo: ei_t.match_cur, | |
399 | match_cur: ei_t.match_cur, | |
400 | match_hi: ei_t.match_cur + seq.num_captures, | |
401 | up: Some(ei_t), | |
402 | sp_lo: sp.lo, | |
92a42be0 | 403 | top_elts: Tt(TokenTree::Sequence(sp, seq)), |
d9579d0f | 404 | })); |
223e47cc | 405 | } |
92a42be0 | 406 | TokenTree::Token(_, MatchNt(..)) => { |
1a4d82fc JJ |
407 | // Built-in nonterminals never start with these tokens, |
408 | // so we can eliminate them from consideration. | |
409 | match tok { | |
410 | token::CloseDelim(_) => {}, | |
411 | _ => bb_eis.push(ei), | |
412 | } | |
413 | } | |
92a42be0 | 414 | TokenTree::Token(sp, SubstNt(..)) => { |
e9174d1e | 415 | return Error(sp, "missing fragment specifier".to_string()) |
1a4d82fc | 416 | } |
92a42be0 | 417 | seq @ TokenTree::Delimited(..) | seq @ TokenTree::Token(_, DocComment(..)) => { |
1a4d82fc JJ |
418 | let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq)); |
419 | let idx = ei.idx; | |
420 | ei.stack.push(MatcherTtFrame { | |
421 | elts: lower_elts, | |
422 | idx: idx, | |
423 | }); | |
424 | ei.idx = 0; | |
425 | cur_eis.push(ei); | |
426 | } | |
92a42be0 | 427 | TokenTree::Token(_, ref t) => { |
1a4d82fc JJ |
428 | let mut ei_t = ei.clone(); |
429 | if token_name_eq(t,&tok) { | |
430 | ei_t.idx += 1; | |
431 | next_eis.push(ei_t); | |
432 | } | |
223e47cc | 433 | } |
223e47cc LB |
434 | } |
435 | } | |
436 | } | |
437 | ||
438 | /* error messages here could be improved with links to orig. rules */ | |
1a4d82fc | 439 | if token_name_eq(&tok, &token::Eof) { |
85aaf69f | 440 | if eof_eis.len() == 1 { |
1a4d82fc | 441 | let mut v = Vec::new(); |
85aaf69f | 442 | for dv in &mut (&mut eof_eis[0]).matches { |
1a4d82fc | 443 | v.push(dv.pop().unwrap()); |
223e47cc | 444 | } |
92a42be0 | 445 | return nameize(sess, ms, &v[..]); |
85aaf69f | 446 | } else if eof_eis.len() > 1 { |
1a4d82fc | 447 | return Error(sp, "ambiguity: multiple successful parses".to_string()); |
223e47cc | 448 | } else { |
1a4d82fc | 449 | return Failure(sp, "unexpected end of macro invocation".to_string()); |
223e47cc LB |
450 | } |
451 | } else { | |
9346a6ac | 452 | if (!bb_eis.is_empty() && !next_eis.is_empty()) |
85aaf69f | 453 | || bb_eis.len() > 1 { |
e9174d1e | 454 | let nts = bb_eis.iter().map(|ei| match ei.top_elts.get_tt(ei.idx) { |
a7813a04 | 455 | TokenTree::Token(_, MatchNt(bind, name)) => { |
c1a9b12d | 456 | format!("{} ('{}')", name, bind) |
e9174d1e SL |
457 | } |
458 | _ => panic!() | |
459 | }).collect::<Vec<String>>().join(" or "); | |
460 | ||
1a4d82fc | 461 | return Error(sp, format!( |
e9174d1e SL |
462 | "local ambiguity: multiple parsing options: {}", |
463 | match next_eis.len() { | |
464 | 0 => format!("built-in NTs {}.", nts), | |
465 | 1 => format!("built-in NTs {} or 1 other option.", nts), | |
466 | n => format!("built-in NTs {} or {} other options.", nts, n), | |
467 | } | |
468 | )) | |
9346a6ac | 469 | } else if bb_eis.is_empty() && next_eis.is_empty() { |
1a4d82fc | 470 | return Failure(sp, format!("no rules expected the token `{}`", |
e9174d1e | 471 | pprust::token_to_string(&tok))); |
9346a6ac | 472 | } else if !next_eis.is_empty() { |
223e47cc | 473 | /* Now process the next token */ |
9346a6ac | 474 | while !next_eis.is_empty() { |
1a4d82fc | 475 | cur_eis.push(next_eis.pop().unwrap()); |
223e47cc LB |
476 | } |
477 | rdr.next_token(); | |
478 | } else /* bb_eis.len() == 1 */ { | |
c34b1796 | 479 | let mut rust_parser = Parser::new(sess, cfg.clone(), Box::new(rdr.clone())); |
1a4d82fc JJ |
480 | |
481 | let mut ei = bb_eis.pop().unwrap(); | |
482 | match ei.top_elts.get_tt(ei.idx) { | |
a7813a04 | 483 | TokenTree::Token(span, MatchNt(_, ident)) => { |
e9174d1e SL |
484 | let match_cur = ei.match_cur; |
485 | (&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal( | |
486 | parse_nt(&mut rust_parser, span, &ident.name.as_str())))); | |
487 | ei.idx += 1; | |
488 | ei.match_cur += 1; | |
489 | } | |
490 | _ => panic!() | |
223e47cc LB |
491 | } |
492 | cur_eis.push(ei); | |
493 | ||
85aaf69f | 494 | for _ in 0..rust_parser.tokens_consumed { |
1a4d82fc | 495 | let _ = rdr.next_token(); |
223e47cc LB |
496 | } |
497 | } | |
498 | } | |
499 | ||
9346a6ac | 500 | assert!(!cur_eis.is_empty()); |
223e47cc LB |
501 | } |
502 | } | |
503 | ||
9cc50fc6 | 504 | pub fn parse_nt<'a>(p: &mut Parser<'a>, sp: Span, name: &str) -> Nonterminal { |
223e47cc | 505 | match name { |
1a4d82fc | 506 | "tt" => { |
85aaf69f | 507 | p.quote_depth += 1; //but in theory, non-quoted tts might be useful |
9cc50fc6 SL |
508 | let res: ::parse::PResult<'a, _> = p.parse_token_tree(); |
509 | let res = token::NtTT(P(panictry!(res))); | |
85aaf69f | 510 | p.quote_depth -= 1; |
1a4d82fc JJ |
511 | return res; |
512 | } | |
513 | _ => {} | |
514 | } | |
515 | // check at the beginning and the parser checks after each bump | |
9cc50fc6 | 516 | p.check_unknown_macro_variable(); |
1a4d82fc | 517 | match name { |
92a42be0 | 518 | "item" => match panictry!(p.parse_item()) { |
e9174d1e | 519 | Some(i) => token::NtItem(i), |
9cc50fc6 SL |
520 | None => { |
521 | p.fatal("expected an item keyword").emit(); | |
522 | panic!(FatalError); | |
523 | } | |
e9174d1e SL |
524 | }, |
525 | "block" => token::NtBlock(panictry!(p.parse_block())), | |
92a42be0 | 526 | "stmt" => match panictry!(p.parse_stmt()) { |
7453a54e | 527 | Some(s) => token::NtStmt(P(s)), |
9cc50fc6 SL |
528 | None => { |
529 | p.fatal("expected a statement").emit(); | |
530 | panic!(FatalError); | |
531 | } | |
e9174d1e | 532 | }, |
92a42be0 SL |
533 | "pat" => token::NtPat(panictry!(p.parse_pat())), |
534 | "expr" => token::NtExpr(panictry!(p.parse_expr())), | |
535 | "ty" => token::NtTy(panictry!(p.parse_ty())), | |
e9174d1e SL |
536 | // this could be handled like a token, since it is one |
537 | "ident" => match p.token { | |
a7813a04 | 538 | token::Ident(sn) => { |
9cc50fc6 | 539 | p.bump(); |
a7813a04 | 540 | token::NtIdent(Box::new(Spanned::<Ident>{node: sn, span: p.span})) |
9cc50fc6 | 541 | } |
e9174d1e SL |
542 | _ => { |
543 | let token_str = pprust::token_to_string(&p.token); | |
9cc50fc6 SL |
544 | p.fatal(&format!("expected ident, found {}", |
545 | &token_str[..])).emit(); | |
546 | panic!(FatalError) | |
e9174d1e SL |
547 | } |
548 | }, | |
549 | "path" => { | |
a7813a04 | 550 | token::NtPath(Box::new(panictry!(p.parse_path(PathStyle::Type)))) |
e9174d1e | 551 | }, |
92a42be0 | 552 | "meta" => token::NtMeta(panictry!(p.parse_meta_item())), |
3157f602 XL |
553 | // this is not supposed to happen, since it has been checked |
554 | // when compiling the macro. | |
555 | _ => p.span_bug(sp, "invalid fragment specifier") | |
223e47cc LB |
556 | } |
557 | } |