]>
Commit | Line | Data |
---|---|---|
1a4d82fc | 1 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT |
223e47cc LB |
2 | // file at the top-level directory of this distribution and at |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
1a4d82fc JJ |
10 | |
11 | //! This is an Earley-like parser, without support for in-grammar nonterminals, | |
12 | //! only by calling out to the main rust parser for named nonterminals (which it | |
13 | //! commits to fully when it hits one in a grammar). This means that there are no | |
14 | //! completer or predictor rules, and therefore no need to store one column per | |
15 | //! token: instead, there's a set of current Earley items and a set of next | |
16 | //! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in | |
17 | //! pathological cases, is worse than traditional Earley parsing, but it's an | |
18 | //! easier fit for Macro-by-Example-style rules, and I think the overhead is | |
19 | //! lower. (In order to prevent the pathological case, we'd need to lazily | |
20 | //! construct the resulting `NamedMatch`es at the very end. It'd be a pain, | |
21 | //! and require more memory to keep around old items, but it would also save | |
22 | //! overhead) | |
23 | //! | |
24 | //! Quick intro to how the parser works: | |
25 | //! | |
26 | //! A 'position' is a dot in the middle of a matcher, usually represented as a | |
27 | //! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`. | |
28 | //! | |
29 | //! The parser walks through the input a character at a time, maintaining a list | |
30 | //! of items consistent with the current position in the input string: `cur_eis`. | |
31 | //! | |
32 | //! As it processes them, it fills up `eof_eis` with items that would be valid if | |
33 | //! the macro invocation is now over, `bb_eis` with items that are waiting on | |
34 | //! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting | |
b039eaaf | 35 | //! on a particular token. Most of the logic concerns moving the · through the |
1a4d82fc JJ |
36 | //! repetitions indicated by Kleene stars. It only advances or calls out to the |
37 | //! real Rust parser when no `cur_eis` items remain | |
38 | //! | |
39 | //! Example: Start parsing `a a a a b` against [· a $( a )* a b]. | |
40 | //! | |
41 | //! Remaining input: `a a a a b` | |
42 | //! next_eis: [· a $( a )* a b] | |
43 | //! | |
44 | //! - - - Advance over an `a`. - - - | |
45 | //! | |
46 | //! Remaining input: `a a a b` | |
47 | //! cur: [a · $( a )* a b] | |
48 | //! Descend/Skip (first item). | |
49 | //! next: [a $( · a )* a b] [a $( a )* · a b]. | |
50 | //! | |
51 | //! - - - Advance over an `a`. - - - | |
52 | //! | |
53 | //! Remaining input: `a a b` | |
54 | //! cur: [a $( a · )* a b] next: [a $( a )* a · b] | |
55 | //! Finish/Repeat (first item) | |
56 | //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] | |
57 | //! | |
58 | //! - - - Advance over an `a`. - - - (this looks exactly like the last step) | |
59 | //! | |
60 | //! Remaining input: `a b` | |
61 | //! cur: [a $( a · )* a b] next: [a $( a )* a · b] | |
62 | //! Finish/Repeat (first item) | |
63 | //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] | |
64 | //! | |
65 | //! - - - Advance over an `a`. - - - (this looks exactly like the last step) | |
66 | //! | |
67 | //! Remaining input: `b` | |
68 | //! cur: [a $( a · )* a b] next: [a $( a )* a · b] | |
69 | //! Finish/Repeat (first item) | |
70 | //! next: [a $( a )* · a b] [a $( · a )* a b] | |
71 | //! | |
72 | //! - - - Advance over a `b`. - - - | |
73 | //! | |
74 | //! Remaining input: `` | |
75 | //! eof: [a $( a )* a b ·] | |
76 | ||
77 | pub use self::NamedMatch::*; | |
78 | pub use self::ParseResult::*; | |
79 | use self::TokenTreeOrTokenTreeVec::*; | |
970d7e83 | 80 | |
223e47cc | 81 | use ast; |
b039eaaf | 82 | use ast::{TokenTree, Name}; |
85aaf69f | 83 | use codemap::{BytePos, mk_sp, Span}; |
223e47cc LB |
84 | use codemap; |
85 | use parse::lexer::*; //resolve bug? | |
86 | use parse::ParseSess; | |
1a4d82fc JJ |
87 | use parse::parser::{LifetimeAndTypesWithoutColons, Parser}; |
88 | use parse::token::{Eof, DocComment, MatchNt, SubstNt}; | |
89 | use parse::token::{Token, Nonterminal}; | |
223e47cc | 90 | use parse::token; |
1a4d82fc JJ |
91 | use print::pprust; |
92 | use ptr::P; | |
223e47cc | 93 | |
1a4d82fc JJ |
94 | use std::mem; |
95 | use std::rc::Rc; | |
96 | use std::collections::HashMap; | |
97 | use std::collections::hash_map::Entry::{Vacant, Occupied}; | |
223e47cc | 98 | |
1a4d82fc JJ |
99 | // To avoid costly uniqueness checks, we require that `MatchSeq` always has |
100 | // a nonempty body. | |
223e47cc | 101 | |
1a4d82fc JJ |
102 | #[derive(Clone)] |
103 | enum TokenTreeOrTokenTreeVec { | |
104 | Tt(ast::TokenTree), | |
105 | TtSeq(Rc<Vec<ast::TokenTree>>), | |
106 | } | |
223e47cc | 107 | |
1a4d82fc | 108 | impl TokenTreeOrTokenTreeVec { |
85aaf69f | 109 | fn len(&self) -> usize { |
92a42be0 SL |
110 | match *self { |
111 | TtSeq(ref v) => v.len(), | |
112 | Tt(ref tt) => tt.len(), | |
1a4d82fc JJ |
113 | } |
114 | } | |
223e47cc | 115 | |
85aaf69f | 116 | fn get_tt(&self, index: usize) -> TokenTree { |
92a42be0 SL |
117 | match *self { |
118 | TtSeq(ref v) => v[index].clone(), | |
119 | Tt(ref tt) => tt.get_tt(index), | |
1a4d82fc JJ |
120 | } |
121 | } | |
223e47cc LB |
122 | } |
123 | ||
1a4d82fc JJ |
124 | /// an unzipping of `TokenTree`s |
125 | #[derive(Clone)] | |
126 | struct MatcherTtFrame { | |
127 | elts: TokenTreeOrTokenTreeVec, | |
85aaf69f | 128 | idx: usize, |
223e47cc LB |
129 | } |
130 | ||
1a4d82fc | 131 | #[derive(Clone)] |
223e47cc | 132 | pub struct MatcherPos { |
1a4d82fc JJ |
133 | stack: Vec<MatcherTtFrame>, |
134 | top_elts: TokenTreeOrTokenTreeVec, | |
223e47cc | 135 | sep: Option<Token>, |
85aaf69f | 136 | idx: usize, |
1a4d82fc JJ |
137 | up: Option<Box<MatcherPos>>, |
138 | matches: Vec<Vec<Rc<NamedMatch>>>, | |
85aaf69f SL |
139 | match_lo: usize, |
140 | match_cur: usize, | |
141 | match_hi: usize, | |
223e47cc LB |
142 | sp_lo: BytePos, |
143 | } | |
144 | ||
85aaf69f | 145 | pub fn count_names(ms: &[TokenTree]) -> usize { |
1a4d82fc | 146 | ms.iter().fold(0, |count, elt| { |
92a42be0 SL |
147 | count + match *elt { |
148 | TokenTree::Sequence(_, ref seq) => { | |
1a4d82fc JJ |
149 | seq.num_captures |
150 | } | |
92a42be0 | 151 | TokenTree::Delimited(_, ref delim) => { |
c34b1796 | 152 | count_names(&delim.tts) |
1a4d82fc | 153 | } |
92a42be0 | 154 | TokenTree::Token(_, MatchNt(..)) => { |
1a4d82fc JJ |
155 | 1 |
156 | } | |
92a42be0 | 157 | TokenTree::Token(_, _) => 0, |
1a4d82fc JJ |
158 | } |
159 | }) | |
223e47cc LB |
160 | } |
161 | ||
1a4d82fc JJ |
162 | pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos) |
163 | -> Box<MatcherPos> { | |
85aaf69f SL |
164 | let match_idx_hi = count_names(&ms[..]); |
165 | let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect(); | |
d9579d0f | 166 | Box::new(MatcherPos { |
1a4d82fc JJ |
167 | stack: vec![], |
168 | top_elts: TtSeq(ms), | |
223e47cc | 169 | sep: sep, |
85aaf69f | 170 | idx: 0, |
1a4d82fc | 171 | up: None, |
223e47cc | 172 | matches: matches, |
85aaf69f SL |
173 | match_lo: 0, |
174 | match_cur: 0, | |
223e47cc LB |
175 | match_hi: match_idx_hi, |
176 | sp_lo: lo | |
d9579d0f | 177 | }) |
223e47cc LB |
178 | } |
179 | ||
1a4d82fc JJ |
180 | /// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL: |
181 | /// so it is associated with a single ident in a parse, and all | |
182 | /// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type | |
183 | /// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a | |
184 | /// single token::MATCH_NONTERMINAL in the TokenTree that produced it. | |
185 | /// | |
186 | /// The in-memory structure of a particular NamedMatch represents the match | |
187 | /// that occurred when a particular subset of a matcher was applied to a | |
188 | /// particular token tree. | |
189 | /// | |
190 | /// The width of each MatchedSeq in the NamedMatch, and the identity of the | |
191 | /// `MatchedNonterminal`s, will depend on the token tree it was applied to: | |
192 | /// each MatchedSeq corresponds to a single TTSeq in the originating | |
193 | /// token tree. The depth of the NamedMatch structure will therefore depend | |
194 | /// only on the nesting depth of `ast::TTSeq`s in the originating | |
195 | /// token tree it was derived from. | |
196 | ||
197 | pub enum NamedMatch { | |
198 | MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span), | |
199 | MatchedNonterminal(Nonterminal) | |
223e47cc LB |
200 | } |
201 | ||
1a4d82fc | 202 | pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>]) |
92a42be0 | 203 | -> ParseResult<HashMap<Name, Rc<NamedMatch>>> { |
1a4d82fc | 204 | fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>], |
92a42be0 SL |
205 | ret_val: &mut HashMap<Name, Rc<NamedMatch>>, idx: &mut usize) |
206 | -> Result<(), (codemap::Span, String)> { | |
207 | match *m { | |
208 | TokenTree::Sequence(_, ref seq) => { | |
85aaf69f | 209 | for next_m in &seq.tts { |
92a42be0 | 210 | try!(n_rec(p_s, next_m, res, ret_val, idx)) |
1a4d82fc JJ |
211 | } |
212 | } | |
92a42be0 | 213 | TokenTree::Delimited(_, ref delim) => { |
85aaf69f | 214 | for next_m in &delim.tts { |
92a42be0 | 215 | try!(n_rec(p_s, next_m, res, ret_val, idx)); |
1a4d82fc JJ |
216 | } |
217 | } | |
92a42be0 | 218 | TokenTree::Token(sp, MatchNt(bind_name, _, _, _)) => { |
b039eaaf | 219 | match ret_val.entry(bind_name.name) { |
1a4d82fc JJ |
220 | Vacant(spot) => { |
221 | spot.insert(res[*idx].clone()); | |
222 | *idx += 1; | |
223 | } | |
224 | Occupied(..) => { | |
92a42be0 | 225 | return Err((sp, format!("duplicated bind name: {}", bind_name))) |
1a4d82fc JJ |
226 | } |
227 | } | |
223e47cc | 228 | } |
92a42be0 SL |
229 | TokenTree::Token(sp, SubstNt(..)) => { |
230 | return Err((sp, "missing fragment specifier".to_string())) | |
231 | } | |
232 | TokenTree::Token(_, _) => (), | |
223e47cc | 233 | } |
92a42be0 SL |
234 | |
235 | Ok(()) | |
223e47cc | 236 | } |
92a42be0 | 237 | |
970d7e83 | 238 | let mut ret_val = HashMap::new(); |
85aaf69f | 239 | let mut idx = 0; |
92a42be0 SL |
240 | for m in ms { |
241 | match n_rec(p_s, m, res, &mut ret_val, &mut idx) { | |
242 | Ok(_) => {}, | |
243 | Err((sp, msg)) => return Error(sp, msg), | |
244 | } | |
245 | } | |
246 | ||
247 | Success(ret_val) | |
223e47cc LB |
248 | } |
249 | ||
c34b1796 AL |
250 | pub enum ParseResult<T> { |
251 | Success(T), | |
92a42be0 | 252 | /// Arm failed to match |
1a4d82fc | 253 | Failure(codemap::Span, String), |
92a42be0 | 254 | /// Fatal error (malformed macro?). Abort compilation. |
1a4d82fc | 255 | Error(codemap::Span, String) |
223e47cc LB |
256 | } |
257 | ||
b039eaaf | 258 | pub type NamedParseResult = ParseResult<HashMap<Name, Rc<NamedMatch>>>; |
c34b1796 AL |
259 | pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>; |
260 | ||
1a4d82fc JJ |
261 | /// Perform a token equality check, ignoring syntax context (that is, an |
262 | /// unhygienic comparison) | |
263 | pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool { | |
264 | match (t1,t2) { | |
265 | (&token::Ident(id1,_),&token::Ident(id2,_)) | |
266 | | (&token::Lifetime(id1),&token::Lifetime(id2)) => | |
267 | id1.name == id2.name, | |
268 | _ => *t1 == *t2 | |
269 | } | |
270 | } | |
271 | ||
272 | pub fn parse(sess: &ParseSess, | |
273 | cfg: ast::CrateConfig, | |
274 | mut rdr: TtReader, | |
275 | ms: &[TokenTree]) | |
c34b1796 | 276 | -> NamedParseResult { |
1a4d82fc JJ |
277 | let mut cur_eis = Vec::new(); |
278 | cur_eis.push(initial_matcher_pos(Rc::new(ms.iter() | |
85aaf69f | 279 | .cloned() |
1a4d82fc JJ |
280 | .collect()), |
281 | None, | |
282 | rdr.peek().sp.lo)); | |
223e47cc LB |
283 | |
284 | loop { | |
1a4d82fc JJ |
285 | let mut bb_eis = Vec::new(); // black-box parsed by parser.rs |
286 | let mut next_eis = Vec::new(); // or proceed normally | |
287 | let mut eof_eis = Vec::new(); | |
223e47cc | 288 | |
1a4d82fc | 289 | let TokenAndSpan { tok, sp } = rdr.peek(); |
223e47cc LB |
290 | |
291 | /* we append new items to this while we go */ | |
1a4d82fc JJ |
292 | loop { |
293 | let mut ei = match cur_eis.pop() { | |
294 | None => break, /* for each Earley Item */ | |
295 | Some(ei) => ei, | |
296 | }; | |
297 | ||
298 | // When unzipped trees end, remove them | |
299 | while ei.idx >= ei.top_elts.len() { | |
300 | match ei.stack.pop() { | |
301 | Some(MatcherTtFrame { elts, idx }) => { | |
302 | ei.top_elts = elts; | |
303 | ei.idx = idx + 1; | |
304 | } | |
305 | None => break | |
306 | } | |
307 | } | |
223e47cc LB |
308 | |
309 | let idx = ei.idx; | |
1a4d82fc | 310 | let len = ei.top_elts.len(); |
223e47cc LB |
311 | |
312 | /* at end of sequence */ | |
313 | if idx >= len { | |
314 | // can't move out of `match`es, so: | |
1a4d82fc | 315 | if ei.up.is_some() { |
223e47cc LB |
316 | // hack: a matcher sequence is repeating iff it has a |
317 | // parent (the top level is just a container) | |
318 | ||
319 | ||
320 | // disregard separator, try to go up | |
321 | // (remove this condition to make trailing seps ok) | |
322 | if idx == len { | |
323 | // pop from the matcher position | |
324 | ||
1a4d82fc | 325 | let mut new_pos = ei.up.clone().unwrap(); |
223e47cc LB |
326 | |
327 | // update matches (the MBE "parse tree") by appending | |
328 | // each tree as a subtree. | |
329 | ||
330 | // I bet this is a perf problem: we're preemptively | |
331 | // doing a lot of array work that will get thrown away | |
332 | // most of the time. | |
333 | ||
334 | // Only touch the binders we have actually bound | |
85aaf69f | 335 | for idx in ei.match_lo..ei.match_hi { |
1a4d82fc JJ |
336 | let sub = (ei.matches[idx]).clone(); |
337 | (&mut new_pos.matches[idx]) | |
338 | .push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo, | |
339 | sp.hi)))); | |
223e47cc LB |
340 | } |
341 | ||
1a4d82fc | 342 | new_pos.match_cur = ei.match_hi; |
223e47cc LB |
343 | new_pos.idx += 1; |
344 | cur_eis.push(new_pos); | |
345 | } | |
346 | ||
347 | // can we go around again? | |
348 | ||
349 | // the *_t vars are workarounds for the lack of unary move | |
1a4d82fc JJ |
350 | match ei.sep { |
351 | Some(ref t) if idx == len => { // we need a separator | |
352 | // i'm conflicted about whether this should be hygienic.... | |
353 | // though in this case, if the separators are never legal | |
354 | // idents, it shouldn't matter. | |
355 | if token_name_eq(&tok, t) { //pass the separator | |
356 | let mut ei_t = ei.clone(); | |
357 | // ei_t.match_cur = ei_t.match_lo; | |
358 | ei_t.idx += 1; | |
359 | next_eis.push(ei_t); | |
360 | } | |
361 | } | |
362 | _ => { // we don't need a separator | |
223e47cc | 363 | let mut ei_t = ei; |
1a4d82fc JJ |
364 | ei_t.match_cur = ei_t.match_lo; |
365 | ei_t.idx = 0; | |
366 | cur_eis.push(ei_t); | |
223e47cc | 367 | } |
223e47cc LB |
368 | } |
369 | } else { | |
370 | eof_eis.push(ei); | |
371 | } | |
372 | } else { | |
1a4d82fc JJ |
373 | match ei.top_elts.get_tt(idx) { |
374 | /* need to descend into sequence */ | |
92a42be0 | 375 | TokenTree::Sequence(sp, seq) => { |
1a4d82fc JJ |
376 | if seq.op == ast::ZeroOrMore { |
377 | let mut new_ei = ei.clone(); | |
378 | new_ei.match_cur += seq.num_captures; | |
85aaf69f | 379 | new_ei.idx += 1; |
1a4d82fc | 380 | //we specifically matched zero repeats. |
85aaf69f | 381 | for idx in ei.match_cur..ei.match_cur + seq.num_captures { |
1a4d82fc JJ |
382 | (&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp))); |
383 | } | |
384 | ||
385 | cur_eis.push(new_ei); | |
223e47cc LB |
386 | } |
387 | ||
85aaf69f | 388 | let matches: Vec<_> = (0..ei.matches.len()) |
1a4d82fc JJ |
389 | .map(|_| Vec::new()).collect(); |
390 | let ei_t = ei; | |
d9579d0f | 391 | cur_eis.push(Box::new(MatcherPos { |
1a4d82fc JJ |
392 | stack: vec![], |
393 | sep: seq.separator.clone(), | |
85aaf69f | 394 | idx: 0, |
1a4d82fc JJ |
395 | matches: matches, |
396 | match_lo: ei_t.match_cur, | |
397 | match_cur: ei_t.match_cur, | |
398 | match_hi: ei_t.match_cur + seq.num_captures, | |
399 | up: Some(ei_t), | |
400 | sp_lo: sp.lo, | |
92a42be0 | 401 | top_elts: Tt(TokenTree::Sequence(sp, seq)), |
d9579d0f | 402 | })); |
223e47cc | 403 | } |
92a42be0 | 404 | TokenTree::Token(_, MatchNt(..)) => { |
1a4d82fc JJ |
405 | // Built-in nonterminals never start with these tokens, |
406 | // so we can eliminate them from consideration. | |
407 | match tok { | |
408 | token::CloseDelim(_) => {}, | |
409 | _ => bb_eis.push(ei), | |
410 | } | |
411 | } | |
92a42be0 | 412 | TokenTree::Token(sp, SubstNt(..)) => { |
e9174d1e | 413 | return Error(sp, "missing fragment specifier".to_string()) |
1a4d82fc | 414 | } |
92a42be0 | 415 | seq @ TokenTree::Delimited(..) | seq @ TokenTree::Token(_, DocComment(..)) => { |
1a4d82fc JJ |
416 | let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq)); |
417 | let idx = ei.idx; | |
418 | ei.stack.push(MatcherTtFrame { | |
419 | elts: lower_elts, | |
420 | idx: idx, | |
421 | }); | |
422 | ei.idx = 0; | |
423 | cur_eis.push(ei); | |
424 | } | |
92a42be0 | 425 | TokenTree::Token(_, ref t) => { |
1a4d82fc JJ |
426 | let mut ei_t = ei.clone(); |
427 | if token_name_eq(t,&tok) { | |
428 | ei_t.idx += 1; | |
429 | next_eis.push(ei_t); | |
430 | } | |
223e47cc | 431 | } |
223e47cc LB |
432 | } |
433 | } | |
434 | } | |
435 | ||
436 | /* error messages here could be improved with links to orig. rules */ | |
1a4d82fc | 437 | if token_name_eq(&tok, &token::Eof) { |
85aaf69f | 438 | if eof_eis.len() == 1 { |
1a4d82fc | 439 | let mut v = Vec::new(); |
85aaf69f | 440 | for dv in &mut (&mut eof_eis[0]).matches { |
1a4d82fc | 441 | v.push(dv.pop().unwrap()); |
223e47cc | 442 | } |
92a42be0 | 443 | return nameize(sess, ms, &v[..]); |
85aaf69f | 444 | } else if eof_eis.len() > 1 { |
1a4d82fc | 445 | return Error(sp, "ambiguity: multiple successful parses".to_string()); |
223e47cc | 446 | } else { |
1a4d82fc | 447 | return Failure(sp, "unexpected end of macro invocation".to_string()); |
223e47cc LB |
448 | } |
449 | } else { | |
9346a6ac | 450 | if (!bb_eis.is_empty() && !next_eis.is_empty()) |
85aaf69f | 451 | || bb_eis.len() > 1 { |
e9174d1e | 452 | let nts = bb_eis.iter().map(|ei| match ei.top_elts.get_tt(ei.idx) { |
92a42be0 | 453 | TokenTree::Token(_, MatchNt(bind, name, _, _)) => { |
c1a9b12d | 454 | format!("{} ('{}')", name, bind) |
e9174d1e SL |
455 | } |
456 | _ => panic!() | |
457 | }).collect::<Vec<String>>().join(" or "); | |
458 | ||
1a4d82fc | 459 | return Error(sp, format!( |
e9174d1e SL |
460 | "local ambiguity: multiple parsing options: {}", |
461 | match next_eis.len() { | |
462 | 0 => format!("built-in NTs {}.", nts), | |
463 | 1 => format!("built-in NTs {} or 1 other option.", nts), | |
464 | n => format!("built-in NTs {} or {} other options.", nts, n), | |
465 | } | |
466 | )) | |
9346a6ac | 467 | } else if bb_eis.is_empty() && next_eis.is_empty() { |
1a4d82fc | 468 | return Failure(sp, format!("no rules expected the token `{}`", |
e9174d1e | 469 | pprust::token_to_string(&tok))); |
9346a6ac | 470 | } else if !next_eis.is_empty() { |
223e47cc | 471 | /* Now process the next token */ |
9346a6ac | 472 | while !next_eis.is_empty() { |
1a4d82fc | 473 | cur_eis.push(next_eis.pop().unwrap()); |
223e47cc LB |
474 | } |
475 | rdr.next_token(); | |
476 | } else /* bb_eis.len() == 1 */ { | |
c34b1796 | 477 | let mut rust_parser = Parser::new(sess, cfg.clone(), Box::new(rdr.clone())); |
1a4d82fc JJ |
478 | |
479 | let mut ei = bb_eis.pop().unwrap(); | |
480 | match ei.top_elts.get_tt(ei.idx) { | |
92a42be0 | 481 | TokenTree::Token(span, MatchNt(_, ident, _, _)) => { |
e9174d1e SL |
482 | let match_cur = ei.match_cur; |
483 | (&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal( | |
484 | parse_nt(&mut rust_parser, span, &ident.name.as_str())))); | |
485 | ei.idx += 1; | |
486 | ei.match_cur += 1; | |
487 | } | |
488 | _ => panic!() | |
223e47cc LB |
489 | } |
490 | cur_eis.push(ei); | |
491 | ||
85aaf69f | 492 | for _ in 0..rust_parser.tokens_consumed { |
1a4d82fc | 493 | let _ = rdr.next_token(); |
223e47cc LB |
494 | } |
495 | } | |
496 | } | |
497 | ||
9346a6ac | 498 | assert!(!cur_eis.is_empty()); |
223e47cc LB |
499 | } |
500 | } | |
501 | ||
85aaf69f | 502 | pub fn parse_nt(p: &mut Parser, sp: Span, name: &str) -> Nonterminal { |
223e47cc | 503 | match name { |
1a4d82fc | 504 | "tt" => { |
85aaf69f | 505 | p.quote_depth += 1; //but in theory, non-quoted tts might be useful |
9346a6ac | 506 | let res = token::NtTT(P(panictry!(p.parse_token_tree()))); |
85aaf69f | 507 | p.quote_depth -= 1; |
1a4d82fc JJ |
508 | return res; |
509 | } | |
510 | _ => {} | |
511 | } | |
512 | // check at the beginning and the parser checks after each bump | |
9346a6ac | 513 | panictry!(p.check_unknown_macro_variable()); |
1a4d82fc | 514 | match name { |
92a42be0 | 515 | "item" => match panictry!(p.parse_item()) { |
e9174d1e SL |
516 | Some(i) => token::NtItem(i), |
517 | None => panic!(p.fatal("expected an item keyword")) | |
518 | }, | |
519 | "block" => token::NtBlock(panictry!(p.parse_block())), | |
92a42be0 | 520 | "stmt" => match panictry!(p.parse_stmt()) { |
e9174d1e SL |
521 | Some(s) => token::NtStmt(s), |
522 | None => panic!(p.fatal("expected a statement")) | |
523 | }, | |
92a42be0 SL |
524 | "pat" => token::NtPat(panictry!(p.parse_pat())), |
525 | "expr" => token::NtExpr(panictry!(p.parse_expr())), | |
526 | "ty" => token::NtTy(panictry!(p.parse_ty())), | |
e9174d1e SL |
527 | // this could be handled like a token, since it is one |
528 | "ident" => match p.token { | |
529 | token::Ident(sn,b) => { panictry!(p.bump()); token::NtIdent(Box::new(sn),b) } | |
530 | _ => { | |
531 | let token_str = pprust::token_to_string(&p.token); | |
532 | panic!(p.fatal(&format!("expected ident, found {}", | |
533 | &token_str[..]))) | |
534 | } | |
535 | }, | |
536 | "path" => { | |
537 | token::NtPath(Box::new(panictry!(p.parse_path(LifetimeAndTypesWithoutColons)))) | |
538 | }, | |
92a42be0 | 539 | "meta" => token::NtMeta(panictry!(p.parse_meta_item())), |
1a4d82fc | 540 | _ => { |
e9174d1e | 541 | panic!(p.span_fatal_help(sp, |
85aaf69f SL |
542 | &format!("invalid fragment specifier `{}`", name), |
543 | "valid fragment specifiers are `ident`, `block`, \ | |
544 | `stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt` \ | |
9346a6ac | 545 | and `item`")) |
e9174d1e | 546 | } |
223e47cc LB |
547 | } |
548 | } |