]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_expand/src/mbe/macro_rules.rs
New upstream version 1.64.0+dfsg1
[rustc.git] / compiler / rustc_expand / src / mbe / macro_rules.rs
1 use crate::base::{DummyResult, ExtCtxt, MacResult, TTMacroExpander};
2 use crate::base::{SyntaxExtension, SyntaxExtensionKind};
3 use crate::expand::{ensure_complete_parse, parse_ast_fragment, AstFragment, AstFragmentKind};
4 use crate::mbe;
5 use crate::mbe::macro_check;
6 use crate::mbe::macro_parser::{Error, ErrorReported, Failure, Success, TtParser};
7 use crate::mbe::macro_parser::{MatchedSeq, MatchedTokenTree, MatcherLoc};
8 use crate::mbe::transcribe::transcribe;
9
10 use rustc_ast as ast;
11 use rustc_ast::token::{self, Delimiter, NonterminalKind, Token, TokenKind, TokenKind::*};
12 use rustc_ast::tokenstream::{DelimSpan, TokenStream};
13 use rustc_ast::{NodeId, DUMMY_NODE_ID};
14 use rustc_ast_pretty::pprust;
15 use rustc_attr::{self as attr, TransparencyError};
16 use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
17 use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
18 use rustc_feature::Features;
19 use rustc_lint_defs::builtin::{
20 RUST_2021_INCOMPATIBLE_OR_PATTERNS, SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
21 };
22 use rustc_lint_defs::BuiltinLintDiagnostics;
23 use rustc_parse::parser::Parser;
24 use rustc_session::parse::ParseSess;
25 use rustc_session::Session;
26 use rustc_span::edition::Edition;
27 use rustc_span::hygiene::Transparency;
28 use rustc_span::source_map::SourceMap;
29 use rustc_span::symbol::{kw, sym, Ident, MacroRulesNormalizedIdent};
30 use rustc_span::Span;
31
32 use std::borrow::Cow;
33 use std::collections::hash_map::Entry;
34 use std::{mem, slice};
35 use tracing::debug;
36
37 pub(crate) struct ParserAnyMacro<'a> {
38 parser: Parser<'a>,
39
40 /// Span of the expansion site of the macro this parser is for
41 site_span: Span,
42 /// The ident of the macro we're parsing
43 macro_ident: Ident,
44 lint_node_id: NodeId,
45 is_trailing_mac: bool,
46 arm_span: Span,
47 /// Whether or not this macro is defined in the current crate
48 is_local: bool,
49 }
50
51 pub(crate) fn annotate_err_with_kind(err: &mut Diagnostic, kind: AstFragmentKind, span: Span) {
52 match kind {
53 AstFragmentKind::Ty => {
54 err.span_label(span, "this macro call doesn't expand to a type");
55 }
56 AstFragmentKind::Pat => {
57 err.span_label(span, "this macro call doesn't expand to a pattern");
58 }
59 _ => {}
60 };
61 }
62
63 fn emit_frag_parse_err(
64 mut e: DiagnosticBuilder<'_, rustc_errors::ErrorGuaranteed>,
65 parser: &Parser<'_>,
66 orig_parser: &mut Parser<'_>,
67 site_span: Span,
68 arm_span: Span,
69 kind: AstFragmentKind,
70 ) {
71 // FIXME(davidtwco): avoid depending on the error message text
72 if parser.token == token::Eof && e.message[0].0.expect_str().ends_with(", found `<eof>`") {
73 if !e.span.is_dummy() {
74 // early end of macro arm (#52866)
75 e.replace_span_with(parser.sess.source_map().next_point(parser.token.span));
76 }
77 let msg = &e.message[0];
78 e.message[0] = (
79 rustc_errors::DiagnosticMessage::Str(format!(
80 "macro expansion ends with an incomplete expression: {}",
81 msg.0.expect_str().replace(", found `<eof>`", ""),
82 )),
83 msg.1,
84 );
85 }
86 if e.span.is_dummy() {
87 // Get around lack of span in error (#30128)
88 e.replace_span_with(site_span);
89 if !parser.sess.source_map().is_imported(arm_span) {
90 e.span_label(arm_span, "in this macro arm");
91 }
92 } else if parser.sess.source_map().is_imported(parser.token.span) {
93 e.span_label(site_span, "in this macro invocation");
94 }
95 match kind {
96 // Try a statement if an expression is wanted but failed and suggest adding `;` to call.
97 AstFragmentKind::Expr => match parse_ast_fragment(orig_parser, AstFragmentKind::Stmts) {
98 Err(err) => err.cancel(),
99 Ok(_) => {
100 e.note(
101 "the macro call doesn't expand to an expression, but it can expand to a statement",
102 );
103 e.span_suggestion_verbose(
104 site_span.shrink_to_hi(),
105 "add `;` to interpret the expansion as a statement",
106 ";",
107 Applicability::MaybeIncorrect,
108 );
109 }
110 },
111 _ => annotate_err_with_kind(&mut e, kind, site_span),
112 };
113 e.emit();
114 }
115
116 impl<'a> ParserAnyMacro<'a> {
117 pub(crate) fn make(mut self: Box<ParserAnyMacro<'a>>, kind: AstFragmentKind) -> AstFragment {
118 let ParserAnyMacro {
119 site_span,
120 macro_ident,
121 ref mut parser,
122 lint_node_id,
123 arm_span,
124 is_trailing_mac,
125 is_local,
126 } = *self;
127 let snapshot = &mut parser.create_snapshot_for_diagnostic();
128 let fragment = match parse_ast_fragment(parser, kind) {
129 Ok(f) => f,
130 Err(err) => {
131 emit_frag_parse_err(err, parser, snapshot, site_span, arm_span, kind);
132 return kind.dummy(site_span);
133 }
134 };
135
136 // We allow semicolons at the end of expressions -- e.g., the semicolon in
137 // `macro_rules! m { () => { panic!(); } }` isn't parsed by `.parse_expr()`,
138 // but `m!()` is allowed in expression positions (cf. issue #34706).
139 if kind == AstFragmentKind::Expr && parser.token == token::Semi {
140 if is_local {
141 parser.sess.buffer_lint_with_diagnostic(
142 SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
143 parser.token.span,
144 lint_node_id,
145 "trailing semicolon in macro used in expression position",
146 BuiltinLintDiagnostics::TrailingMacro(is_trailing_mac, macro_ident),
147 );
148 }
149 parser.bump();
150 }
151
152 // Make sure we don't have any tokens left to parse so we don't silently drop anything.
153 let path = ast::Path::from_ident(macro_ident.with_span_pos(site_span));
154 ensure_complete_parse(parser, &path, kind.name(), site_span);
155 fragment
156 }
157 }
158
159 struct MacroRulesMacroExpander {
160 node_id: NodeId,
161 name: Ident,
162 span: Span,
163 transparency: Transparency,
164 lhses: Vec<Vec<MatcherLoc>>,
165 rhses: Vec<mbe::TokenTree>,
166 valid: bool,
167 }
168
169 impl TTMacroExpander for MacroRulesMacroExpander {
170 fn expand<'cx>(
171 &self,
172 cx: &'cx mut ExtCtxt<'_>,
173 sp: Span,
174 input: TokenStream,
175 ) -> Box<dyn MacResult + 'cx> {
176 if !self.valid {
177 return DummyResult::any(sp);
178 }
179 expand_macro(
180 cx,
181 sp,
182 self.span,
183 self.node_id,
184 self.name,
185 self.transparency,
186 input,
187 &self.lhses,
188 &self.rhses,
189 )
190 }
191 }
192
193 fn macro_rules_dummy_expander<'cx>(
194 _: &'cx mut ExtCtxt<'_>,
195 span: Span,
196 _: TokenStream,
197 ) -> Box<dyn MacResult + 'cx> {
198 DummyResult::any(span)
199 }
200
201 fn trace_macros_note(cx_expansions: &mut FxIndexMap<Span, Vec<String>>, sp: Span, message: String) {
202 let sp = sp.macro_backtrace().last().map_or(sp, |trace| trace.call_site);
203 cx_expansions.entry(sp).or_default().push(message);
204 }
205
206 /// Expands the rules based macro defined by `lhses` and `rhses` for a given
207 /// input `arg`.
208 fn expand_macro<'cx>(
209 cx: &'cx mut ExtCtxt<'_>,
210 sp: Span,
211 def_span: Span,
212 node_id: NodeId,
213 name: Ident,
214 transparency: Transparency,
215 arg: TokenStream,
216 lhses: &[Vec<MatcherLoc>],
217 rhses: &[mbe::TokenTree],
218 ) -> Box<dyn MacResult + 'cx> {
219 let sess = &cx.sess.parse_sess;
220 // Macros defined in the current crate have a real node id,
221 // whereas macros from an external crate have a dummy id.
222 let is_local = node_id != DUMMY_NODE_ID;
223
224 if cx.trace_macros() {
225 let msg = format!("expanding `{}! {{ {} }}`", name, pprust::tts_to_string(&arg));
226 trace_macros_note(&mut cx.expansions, sp, msg);
227 }
228
229 // Which arm's failure should we report? (the one furthest along)
230 let mut best_failure: Option<(Token, &str)> = None;
231
232 // We create a base parser that can be used for the "black box" parts.
233 // Every iteration needs a fresh copy of that parser. However, the parser
234 // is not mutated on many of the iterations, particularly when dealing with
235 // macros like this:
236 //
237 // macro_rules! foo {
238 // ("a") => (A);
239 // ("b") => (B);
240 // ("c") => (C);
241 // // ... etc. (maybe hundreds more)
242 // }
243 //
244 // as seen in the `html5ever` benchmark. We use a `Cow` so that the base
245 // parser is only cloned when necessary (upon mutation). Furthermore, we
246 // reinitialize the `Cow` with the base parser at the start of every
247 // iteration, so that any mutated parsers are not reused. This is all quite
248 // hacky, but speeds up the `html5ever` benchmark significantly. (Issue
249 // 68836 suggests a more comprehensive but more complex change to deal with
250 // this situation.)
251 let parser = parser_from_cx(sess, arg.clone());
252
253 // Try each arm's matchers.
254 let mut tt_parser = TtParser::new(name);
255 for (i, lhs) in lhses.iter().enumerate() {
256 // Take a snapshot of the state of pre-expansion gating at this point.
257 // This is used so that if a matcher is not `Success(..)`ful,
258 // then the spans which became gated when parsing the unsuccessful matcher
259 // are not recorded. On the first `Success(..)`ful matcher, the spans are merged.
260 let mut gated_spans_snapshot = mem::take(&mut *sess.gated_spans.spans.borrow_mut());
261
262 match tt_parser.parse_tt(&mut Cow::Borrowed(&parser), lhs) {
263 Success(named_matches) => {
264 // The matcher was `Success(..)`ful.
265 // Merge the gated spans from parsing the matcher with the pre-existing ones.
266 sess.gated_spans.merge(gated_spans_snapshot);
267
268 let (rhs, rhs_span): (&mbe::Delimited, DelimSpan) = match &rhses[i] {
269 mbe::TokenTree::Delimited(span, delimited) => (&delimited, *span),
270 _ => cx.span_bug(sp, "malformed macro rhs"),
271 };
272 let arm_span = rhses[i].span();
273
274 let rhs_spans = rhs.tts.iter().map(|t| t.span()).collect::<Vec<_>>();
275 // rhs has holes ( `$id` and `$(...)` that need filled)
276 let mut tts = match transcribe(cx, &named_matches, &rhs, rhs_span, transparency) {
277 Ok(tts) => tts,
278 Err(mut err) => {
279 err.emit();
280 return DummyResult::any(arm_span);
281 }
282 };
283
284 // Replace all the tokens for the corresponding positions in the macro, to maintain
285 // proper positions in error reporting, while maintaining the macro_backtrace.
286 if rhs_spans.len() == tts.len() {
287 tts = tts.map_enumerated(|i, tt| {
288 let mut tt = tt.clone();
289 let mut sp = rhs_spans[i];
290 sp = sp.with_ctxt(tt.span().ctxt());
291 tt.set_span(sp);
292 tt
293 });
294 }
295
296 if cx.trace_macros() {
297 let msg = format!("to `{}`", pprust::tts_to_string(&tts));
298 trace_macros_note(&mut cx.expansions, sp, msg);
299 }
300
301 let mut p = Parser::new(sess, tts, false, None);
302 p.last_type_ascription = cx.current_expansion.prior_type_ascription;
303
304 if is_local {
305 cx.resolver.record_macro_rule_usage(node_id, i);
306 }
307
308 // Let the context choose how to interpret the result.
309 // Weird, but useful for X-macros.
310 return Box::new(ParserAnyMacro {
311 parser: p,
312
313 // Pass along the original expansion site and the name of the macro
314 // so we can print a useful error message if the parse of the expanded
315 // macro leaves unparsed tokens.
316 site_span: sp,
317 macro_ident: name,
318 lint_node_id: cx.current_expansion.lint_node_id,
319 is_trailing_mac: cx.current_expansion.is_trailing_mac,
320 arm_span,
321 is_local,
322 });
323 }
324 Failure(token, msg) => match best_failure {
325 Some((ref best_token, _)) if best_token.span.lo() >= token.span.lo() => {}
326 _ => best_failure = Some((token, msg)),
327 },
328 Error(err_sp, ref msg) => {
329 let span = err_sp.substitute_dummy(sp);
330 cx.struct_span_err(span, &msg).emit();
331 return DummyResult::any(span);
332 }
333 ErrorReported => return DummyResult::any(sp),
334 }
335
336 // The matcher was not `Success(..)`ful.
337 // Restore to the state before snapshotting and maybe try again.
338 mem::swap(&mut gated_spans_snapshot, &mut sess.gated_spans.spans.borrow_mut());
339 }
340 drop(parser);
341
342 let (token, label) = best_failure.expect("ran no matchers");
343 let span = token.span.substitute_dummy(sp);
344 let mut err = cx.struct_span_err(span, &parse_failure_msg(&token));
345 err.span_label(span, label);
346 if !def_span.is_dummy() && !cx.source_map().is_imported(def_span) {
347 err.span_label(cx.source_map().guess_head_span(def_span), "when calling this macro");
348 }
349 annotate_doc_comment(&mut err, sess.source_map(), span);
350 // Check whether there's a missing comma in this macro call, like `println!("{}" a);`
351 if let Some((arg, comma_span)) = arg.add_comma() {
352 for lhs in lhses {
353 let parser = parser_from_cx(sess, arg.clone());
354 if let Success(_) = tt_parser.parse_tt(&mut Cow::Borrowed(&parser), lhs) {
355 if comma_span.is_dummy() {
356 err.note("you might be missing a comma");
357 } else {
358 err.span_suggestion_short(
359 comma_span,
360 "missing comma here",
361 ", ",
362 Applicability::MachineApplicable,
363 );
364 }
365 }
366 }
367 }
368 err.emit();
369 cx.trace_macros_diag();
370 DummyResult::any(sp)
371 }
372
373 // Note that macro-by-example's input is also matched against a token tree:
374 // $( $lhs:tt => $rhs:tt );+
375 //
376 // Holy self-referential!
377
378 /// Converts a macro item into a syntax extension.
379 pub fn compile_declarative_macro(
380 sess: &Session,
381 features: &Features,
382 def: &ast::Item,
383 edition: Edition,
384 ) -> (SyntaxExtension, Vec<(usize, Span)>) {
385 debug!("compile_declarative_macro: {:?}", def);
386 let mk_syn_ext = |expander| {
387 SyntaxExtension::new(
388 sess,
389 SyntaxExtensionKind::LegacyBang(expander),
390 def.span,
391 Vec::new(),
392 edition,
393 def.ident.name,
394 &def.attrs,
395 )
396 };
397 let dummy_syn_ext = || (mk_syn_ext(Box::new(macro_rules_dummy_expander)), Vec::new());
398
399 let diag = &sess.parse_sess.span_diagnostic;
400 let lhs_nm = Ident::new(sym::lhs, def.span);
401 let rhs_nm = Ident::new(sym::rhs, def.span);
402 let tt_spec = Some(NonterminalKind::TT);
403
404 // Parse the macro_rules! invocation
405 let (macro_rules, body) = match &def.kind {
406 ast::ItemKind::MacroDef(def) => (def.macro_rules, def.body.inner_tokens()),
407 _ => unreachable!(),
408 };
409
410 // The pattern that macro_rules matches.
411 // The grammar for macro_rules! is:
412 // $( $lhs:tt => $rhs:tt );+
413 // ...quasiquoting this would be nice.
414 // These spans won't matter, anyways
415 let argument_gram = vec![
416 mbe::TokenTree::Sequence(
417 DelimSpan::dummy(),
418 mbe::SequenceRepetition {
419 tts: vec![
420 mbe::TokenTree::MetaVarDecl(def.span, lhs_nm, tt_spec),
421 mbe::TokenTree::token(token::FatArrow, def.span),
422 mbe::TokenTree::MetaVarDecl(def.span, rhs_nm, tt_spec),
423 ],
424 separator: Some(Token::new(
425 if macro_rules { token::Semi } else { token::Comma },
426 def.span,
427 )),
428 kleene: mbe::KleeneToken::new(mbe::KleeneOp::OneOrMore, def.span),
429 num_captures: 2,
430 },
431 ),
432 // to phase into semicolon-termination instead of semicolon-separation
433 mbe::TokenTree::Sequence(
434 DelimSpan::dummy(),
435 mbe::SequenceRepetition {
436 tts: vec![mbe::TokenTree::token(
437 if macro_rules { token::Semi } else { token::Comma },
438 def.span,
439 )],
440 separator: None,
441 kleene: mbe::KleeneToken::new(mbe::KleeneOp::ZeroOrMore, def.span),
442 num_captures: 0,
443 },
444 ),
445 ];
446 // Convert it into `MatcherLoc` form.
447 let argument_gram = mbe::macro_parser::compute_locs(&argument_gram);
448
449 let parser = Parser::new(&sess.parse_sess, body, true, rustc_parse::MACRO_ARGUMENTS);
450 let mut tt_parser =
451 TtParser::new(Ident::with_dummy_span(if macro_rules { kw::MacroRules } else { kw::Macro }));
452 let argument_map = match tt_parser.parse_tt(&mut Cow::Borrowed(&parser), &argument_gram) {
453 Success(m) => m,
454 Failure(token, msg) => {
455 let s = parse_failure_msg(&token);
456 let sp = token.span.substitute_dummy(def.span);
457 let mut err = sess.parse_sess.span_diagnostic.struct_span_err(sp, &s);
458 err.span_label(sp, msg);
459 annotate_doc_comment(&mut err, sess.source_map(), sp);
460 err.emit();
461 return dummy_syn_ext();
462 }
463 Error(sp, msg) => {
464 sess.parse_sess
465 .span_diagnostic
466 .struct_span_err(sp.substitute_dummy(def.span), &msg)
467 .emit();
468 return dummy_syn_ext();
469 }
470 ErrorReported => {
471 return dummy_syn_ext();
472 }
473 };
474
475 let mut valid = true;
476
477 // Extract the arguments:
478 let lhses = match argument_map[&MacroRulesNormalizedIdent::new(lhs_nm)] {
479 MatchedSeq(ref s) => s
480 .iter()
481 .map(|m| {
482 if let MatchedTokenTree(ref tt) = *m {
483 let tt = mbe::quoted::parse(
484 TokenStream::new(vec![tt.clone()]),
485 true,
486 &sess.parse_sess,
487 def.id,
488 features,
489 edition,
490 )
491 .pop()
492 .unwrap();
493 valid &= check_lhs_nt_follows(&sess.parse_sess, &def, &tt);
494 return tt;
495 }
496 sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs")
497 })
498 .collect::<Vec<mbe::TokenTree>>(),
499 _ => sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs"),
500 };
501
502 let rhses = match argument_map[&MacroRulesNormalizedIdent::new(rhs_nm)] {
503 MatchedSeq(ref s) => s
504 .iter()
505 .map(|m| {
506 if let MatchedTokenTree(ref tt) = *m {
507 return mbe::quoted::parse(
508 TokenStream::new(vec![tt.clone()]),
509 false,
510 &sess.parse_sess,
511 def.id,
512 features,
513 edition,
514 )
515 .pop()
516 .unwrap();
517 }
518 sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs")
519 })
520 .collect::<Vec<mbe::TokenTree>>(),
521 _ => sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured rhs"),
522 };
523
524 for rhs in &rhses {
525 valid &= check_rhs(&sess.parse_sess, rhs);
526 }
527
528 // don't abort iteration early, so that errors for multiple lhses can be reported
529 for lhs in &lhses {
530 valid &= check_lhs_no_empty_seq(&sess.parse_sess, slice::from_ref(lhs));
531 }
532
533 valid &= macro_check::check_meta_variables(&sess.parse_sess, def.id, def.span, &lhses, &rhses);
534
535 let (transparency, transparency_error) = attr::find_transparency(&def.attrs, macro_rules);
536 match transparency_error {
537 Some(TransparencyError::UnknownTransparency(value, span)) => {
538 diag.span_err(span, &format!("unknown macro transparency: `{}`", value));
539 }
540 Some(TransparencyError::MultipleTransparencyAttrs(old_span, new_span)) => {
541 diag.span_err(vec![old_span, new_span], "multiple macro transparency attributes");
542 }
543 None => {}
544 }
545
546 // Compute the spans of the macro rules for unused rule linting.
547 // To avoid warning noise, only consider the rules of this
548 // macro for the lint, if all rules are valid.
549 // Also, we are only interested in non-foreign macros.
550 let rule_spans = if valid && def.id != DUMMY_NODE_ID {
551 lhses
552 .iter()
553 .zip(rhses.iter())
554 .enumerate()
555 // If the rhs contains an invocation like compile_error!,
556 // don't consider the rule for the unused rule lint.
557 .filter(|(_idx, (_lhs, rhs))| !has_compile_error_macro(rhs))
558 // We only take the span of the lhs here,
559 // so that the spans of created warnings are smaller.
560 .map(|(idx, (lhs, _rhs))| (idx, lhs.span()))
561 .collect::<Vec<_>>()
562 } else {
563 Vec::new()
564 };
565
566 // Convert the lhses into `MatcherLoc` form, which is better for doing the
567 // actual matching. Unless the matcher is invalid.
568 let lhses = if valid {
569 lhses
570 .iter()
571 .map(|lhs| {
572 // Ignore the delimiters around the matcher.
573 match lhs {
574 mbe::TokenTree::Delimited(_, delimited) => {
575 mbe::macro_parser::compute_locs(&delimited.tts)
576 }
577 _ => sess.parse_sess.span_diagnostic.span_bug(def.span, "malformed macro lhs"),
578 }
579 })
580 .collect()
581 } else {
582 vec![]
583 };
584
585 let expander = Box::new(MacroRulesMacroExpander {
586 name: def.ident,
587 span: def.span,
588 node_id: def.id,
589 transparency,
590 lhses,
591 rhses,
592 valid,
593 });
594 (mk_syn_ext(expander), rule_spans)
595 }
596
597 #[derive(SessionSubdiagnostic)]
598 enum ExplainDocComment {
599 #[label(expand::explain_doc_comment_inner)]
600 Inner {
601 #[primary_span]
602 span: Span,
603 },
604 #[label(expand::explain_doc_comment_outer)]
605 Outer {
606 #[primary_span]
607 span: Span,
608 },
609 }
610
611 fn annotate_doc_comment(
612 err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
613 sm: &SourceMap,
614 span: Span,
615 ) {
616 if let Ok(src) = sm.span_to_snippet(span) {
617 if src.starts_with("///") || src.starts_with("/**") {
618 err.subdiagnostic(ExplainDocComment::Outer { span });
619 } else if src.starts_with("//!") || src.starts_with("/*!") {
620 err.subdiagnostic(ExplainDocComment::Inner { span });
621 }
622 }
623 }
624
625 fn check_lhs_nt_follows(sess: &ParseSess, def: &ast::Item, lhs: &mbe::TokenTree) -> bool {
626 // lhs is going to be like TokenTree::Delimited(...), where the
627 // entire lhs is those tts. Or, it can be a "bare sequence", not wrapped in parens.
628 if let mbe::TokenTree::Delimited(_, delimited) = lhs {
629 check_matcher(sess, def, &delimited.tts)
630 } else {
631 let msg = "invalid macro matcher; matchers must be contained in balanced delimiters";
632 sess.span_diagnostic.span_err(lhs.span(), msg);
633 false
634 }
635 // we don't abort on errors on rejection, the driver will do that for us
636 // after parsing/expansion. we can report every error in every macro this way.
637 }
638
639 /// Checks that the lhs contains no repetition which could match an empty token
640 /// tree, because then the matcher would hang indefinitely.
641 fn check_lhs_no_empty_seq(sess: &ParseSess, tts: &[mbe::TokenTree]) -> bool {
642 use mbe::TokenTree;
643 for tt in tts {
644 match *tt {
645 TokenTree::Token(..)
646 | TokenTree::MetaVar(..)
647 | TokenTree::MetaVarDecl(..)
648 | TokenTree::MetaVarExpr(..) => (),
649 TokenTree::Delimited(_, ref del) => {
650 if !check_lhs_no_empty_seq(sess, &del.tts) {
651 return false;
652 }
653 }
654 TokenTree::Sequence(span, ref seq) => {
655 if seq.separator.is_none()
656 && seq.tts.iter().all(|seq_tt| match *seq_tt {
657 TokenTree::MetaVarDecl(_, _, Some(NonterminalKind::Vis)) => true,
658 TokenTree::Sequence(_, ref sub_seq) => {
659 sub_seq.kleene.op == mbe::KleeneOp::ZeroOrMore
660 || sub_seq.kleene.op == mbe::KleeneOp::ZeroOrOne
661 }
662 _ => false,
663 })
664 {
665 let sp = span.entire();
666 sess.span_diagnostic.span_err(sp, "repetition matches empty token tree");
667 return false;
668 }
669 if !check_lhs_no_empty_seq(sess, &seq.tts) {
670 return false;
671 }
672 }
673 }
674 }
675
676 true
677 }
678
679 fn check_rhs(sess: &ParseSess, rhs: &mbe::TokenTree) -> bool {
680 match *rhs {
681 mbe::TokenTree::Delimited(..) => return true,
682 _ => {
683 sess.span_diagnostic.span_err(rhs.span(), "macro rhs must be delimited");
684 }
685 }
686 false
687 }
688
689 fn check_matcher(sess: &ParseSess, def: &ast::Item, matcher: &[mbe::TokenTree]) -> bool {
690 let first_sets = FirstSets::new(matcher);
691 let empty_suffix = TokenSet::empty();
692 let err = sess.span_diagnostic.err_count();
693 check_matcher_core(sess, def, &first_sets, matcher, &empty_suffix);
694 err == sess.span_diagnostic.err_count()
695 }
696
697 fn has_compile_error_macro(rhs: &mbe::TokenTree) -> bool {
698 match rhs {
699 mbe::TokenTree::Delimited(_sp, d) => {
700 let has_compile_error = d.tts.array_windows::<3>().any(|[ident, bang, args]| {
701 if let mbe::TokenTree::Token(ident) = ident &&
702 let TokenKind::Ident(ident, _) = ident.kind &&
703 ident == sym::compile_error &&
704 let mbe::TokenTree::Token(bang) = bang &&
705 let TokenKind::Not = bang.kind &&
706 let mbe::TokenTree::Delimited(_, del) = args &&
707 del.delim != Delimiter::Invisible
708 {
709 true
710 } else {
711 false
712 }
713 });
714 if has_compile_error { true } else { d.tts.iter().any(has_compile_error_macro) }
715 }
716 _ => false,
717 }
718 }
719
720 // `The FirstSets` for a matcher is a mapping from subsequences in the
721 // matcher to the FIRST set for that subsequence.
722 //
723 // This mapping is partially precomputed via a backwards scan over the
724 // token trees of the matcher, which provides a mapping from each
725 // repetition sequence to its *first* set.
726 //
727 // (Hypothetically, sequences should be uniquely identifiable via their
728 // spans, though perhaps that is false, e.g., for macro-generated macros
729 // that do not try to inject artificial span information. My plan is
730 // to try to catch such cases ahead of time and not include them in
731 // the precomputed mapping.)
732 struct FirstSets<'tt> {
733 // this maps each TokenTree::Sequence `$(tt ...) SEP OP` that is uniquely identified by its
734 // span in the original matcher to the First set for the inner sequence `tt ...`.
735 //
736 // If two sequences have the same span in a matcher, then map that
737 // span to None (invalidating the mapping here and forcing the code to
738 // use a slow path).
739 first: FxHashMap<Span, Option<TokenSet<'tt>>>,
740 }
741
742 impl<'tt> FirstSets<'tt> {
743 fn new(tts: &'tt [mbe::TokenTree]) -> FirstSets<'tt> {
744 use mbe::TokenTree;
745
746 let mut sets = FirstSets { first: FxHashMap::default() };
747 build_recur(&mut sets, tts);
748 return sets;
749
750 // walks backward over `tts`, returning the FIRST for `tts`
751 // and updating `sets` at the same time for all sequence
752 // substructure we find within `tts`.
753 fn build_recur<'tt>(sets: &mut FirstSets<'tt>, tts: &'tt [TokenTree]) -> TokenSet<'tt> {
754 let mut first = TokenSet::empty();
755 for tt in tts.iter().rev() {
756 match *tt {
757 TokenTree::Token(..)
758 | TokenTree::MetaVar(..)
759 | TokenTree::MetaVarDecl(..)
760 | TokenTree::MetaVarExpr(..) => {
761 first.replace_with(TtHandle::TtRef(tt));
762 }
763 TokenTree::Delimited(span, ref delimited) => {
764 build_recur(sets, &delimited.tts);
765 first.replace_with(TtHandle::from_token_kind(
766 token::OpenDelim(delimited.delim),
767 span.open,
768 ));
769 }
770 TokenTree::Sequence(sp, ref seq_rep) => {
771 let subfirst = build_recur(sets, &seq_rep.tts);
772
773 match sets.first.entry(sp.entire()) {
774 Entry::Vacant(vac) => {
775 vac.insert(Some(subfirst.clone()));
776 }
777 Entry::Occupied(mut occ) => {
778 // if there is already an entry, then a span must have collided.
779 // This should not happen with typical macro_rules macros,
780 // but syntax extensions need not maintain distinct spans,
781 // so distinct syntax trees can be assigned the same span.
782 // In such a case, the map cannot be trusted; so mark this
783 // entry as unusable.
784 occ.insert(None);
785 }
786 }
787
788 // If the sequence contents can be empty, then the first
789 // token could be the separator token itself.
790
791 if let (Some(sep), true) = (&seq_rep.separator, subfirst.maybe_empty) {
792 first.add_one_maybe(TtHandle::from_token(sep.clone()));
793 }
794
795 // Reverse scan: Sequence comes before `first`.
796 if subfirst.maybe_empty
797 || seq_rep.kleene.op == mbe::KleeneOp::ZeroOrMore
798 || seq_rep.kleene.op == mbe::KleeneOp::ZeroOrOne
799 {
800 // If sequence is potentially empty, then
801 // union them (preserving first emptiness).
802 first.add_all(&TokenSet { maybe_empty: true, ..subfirst });
803 } else {
804 // Otherwise, sequence guaranteed
805 // non-empty; replace first.
806 first = subfirst;
807 }
808 }
809 }
810 }
811
812 first
813 }
814 }
815
816 // walks forward over `tts` until all potential FIRST tokens are
817 // identified.
818 fn first(&self, tts: &'tt [mbe::TokenTree]) -> TokenSet<'tt> {
819 use mbe::TokenTree;
820
821 let mut first = TokenSet::empty();
822 for tt in tts.iter() {
823 assert!(first.maybe_empty);
824 match *tt {
825 TokenTree::Token(..)
826 | TokenTree::MetaVar(..)
827 | TokenTree::MetaVarDecl(..)
828 | TokenTree::MetaVarExpr(..) => {
829 first.add_one(TtHandle::TtRef(tt));
830 return first;
831 }
832 TokenTree::Delimited(span, ref delimited) => {
833 first.add_one(TtHandle::from_token_kind(
834 token::OpenDelim(delimited.delim),
835 span.open,
836 ));
837 return first;
838 }
839 TokenTree::Sequence(sp, ref seq_rep) => {
840 let subfirst_owned;
841 let subfirst = match self.first.get(&sp.entire()) {
842 Some(&Some(ref subfirst)) => subfirst,
843 Some(&None) => {
844 subfirst_owned = self.first(&seq_rep.tts);
845 &subfirst_owned
846 }
847 None => {
848 panic!("We missed a sequence during FirstSets construction");
849 }
850 };
851
852 // If the sequence contents can be empty, then the first
853 // token could be the separator token itself.
854 if let (Some(sep), true) = (&seq_rep.separator, subfirst.maybe_empty) {
855 first.add_one_maybe(TtHandle::from_token(sep.clone()));
856 }
857
858 assert!(first.maybe_empty);
859 first.add_all(subfirst);
860 if subfirst.maybe_empty
861 || seq_rep.kleene.op == mbe::KleeneOp::ZeroOrMore
862 || seq_rep.kleene.op == mbe::KleeneOp::ZeroOrOne
863 {
864 // Continue scanning for more first
865 // tokens, but also make sure we
866 // restore empty-tracking state.
867 first.maybe_empty = true;
868 continue;
869 } else {
870 return first;
871 }
872 }
873 }
874 }
875
876 // we only exit the loop if `tts` was empty or if every
877 // element of `tts` matches the empty sequence.
878 assert!(first.maybe_empty);
879 first
880 }
881 }
882
883 // Most `mbe::TokenTree`s are pre-existing in the matcher, but some are defined
884 // implicitly, such as opening/closing delimiters and sequence repetition ops.
885 // This type encapsulates both kinds. It implements `Clone` while avoiding the
886 // need for `mbe::TokenTree` to implement `Clone`.
887 #[derive(Debug)]
888 enum TtHandle<'tt> {
889 /// This is used in most cases.
890 TtRef(&'tt mbe::TokenTree),
891
892 /// This is only used for implicit token trees. The `mbe::TokenTree` *must*
893 /// be `mbe::TokenTree::Token`. No other variants are allowed. We store an
894 /// `mbe::TokenTree` rather than a `Token` so that `get()` can return a
895 /// `&mbe::TokenTree`.
896 Token(mbe::TokenTree),
897 }
898
899 impl<'tt> TtHandle<'tt> {
900 fn from_token(tok: Token) -> Self {
901 TtHandle::Token(mbe::TokenTree::Token(tok))
902 }
903
904 fn from_token_kind(kind: TokenKind, span: Span) -> Self {
905 TtHandle::from_token(Token::new(kind, span))
906 }
907
908 // Get a reference to a token tree.
909 fn get(&'tt self) -> &'tt mbe::TokenTree {
910 match self {
911 TtHandle::TtRef(tt) => tt,
912 TtHandle::Token(token_tt) => &token_tt,
913 }
914 }
915 }
916
917 impl<'tt> PartialEq for TtHandle<'tt> {
918 fn eq(&self, other: &TtHandle<'tt>) -> bool {
919 self.get() == other.get()
920 }
921 }
922
923 impl<'tt> Clone for TtHandle<'tt> {
924 fn clone(&self) -> Self {
925 match self {
926 TtHandle::TtRef(tt) => TtHandle::TtRef(tt),
927
928 // This variant *must* contain a `mbe::TokenTree::Token`, and not
929 // any other variant of `mbe::TokenTree`.
930 TtHandle::Token(mbe::TokenTree::Token(tok)) => {
931 TtHandle::Token(mbe::TokenTree::Token(tok.clone()))
932 }
933
934 _ => unreachable!(),
935 }
936 }
937 }
938
939 // A set of `mbe::TokenTree`s, which may include `TokenTree::Match`s
940 // (for macro-by-example syntactic variables). It also carries the
941 // `maybe_empty` flag; that is true if and only if the matcher can
942 // match an empty token sequence.
943 //
944 // The First set is computed on submatchers like `$($a:expr b),* $(c)* d`,
945 // which has corresponding FIRST = {$a:expr, c, d}.
946 // Likewise, `$($a:expr b),* $(c)+ d` has FIRST = {$a:expr, c}.
947 //
948 // (Notably, we must allow for *-op to occur zero times.)
949 #[derive(Clone, Debug)]
950 struct TokenSet<'tt> {
951 tokens: Vec<TtHandle<'tt>>,
952 maybe_empty: bool,
953 }
954
955 impl<'tt> TokenSet<'tt> {
956 // Returns a set for the empty sequence.
957 fn empty() -> Self {
958 TokenSet { tokens: Vec::new(), maybe_empty: true }
959 }
960
961 // Returns the set `{ tok }` for the single-token (and thus
962 // non-empty) sequence [tok].
963 fn singleton(tt: TtHandle<'tt>) -> Self {
964 TokenSet { tokens: vec![tt], maybe_empty: false }
965 }
966
967 // Changes self to be the set `{ tok }`.
968 // Since `tok` is always present, marks self as non-empty.
969 fn replace_with(&mut self, tt: TtHandle<'tt>) {
970 self.tokens.clear();
971 self.tokens.push(tt);
972 self.maybe_empty = false;
973 }
974
975 // Changes self to be the empty set `{}`; meant for use when
976 // the particular token does not matter, but we want to
977 // record that it occurs.
978 fn replace_with_irrelevant(&mut self) {
979 self.tokens.clear();
980 self.maybe_empty = false;
981 }
982
983 // Adds `tok` to the set for `self`, marking sequence as non-empy.
984 fn add_one(&mut self, tt: TtHandle<'tt>) {
985 if !self.tokens.contains(&tt) {
986 self.tokens.push(tt);
987 }
988 self.maybe_empty = false;
989 }
990
991 // Adds `tok` to the set for `self`. (Leaves `maybe_empty` flag alone.)
992 fn add_one_maybe(&mut self, tt: TtHandle<'tt>) {
993 if !self.tokens.contains(&tt) {
994 self.tokens.push(tt);
995 }
996 }
997
998 // Adds all elements of `other` to this.
999 //
1000 // (Since this is a set, we filter out duplicates.)
1001 //
1002 // If `other` is potentially empty, then preserves the previous
1003 // setting of the empty flag of `self`. If `other` is guaranteed
1004 // non-empty, then `self` is marked non-empty.
1005 fn add_all(&mut self, other: &Self) {
1006 for tt in &other.tokens {
1007 if !self.tokens.contains(tt) {
1008 self.tokens.push(tt.clone());
1009 }
1010 }
1011 if !other.maybe_empty {
1012 self.maybe_empty = false;
1013 }
1014 }
1015 }
1016
1017 // Checks that `matcher` is internally consistent and that it
1018 // can legally be followed by a token `N`, for all `N` in `follow`.
1019 // (If `follow` is empty, then it imposes no constraint on
1020 // the `matcher`.)
1021 //
1022 // Returns the set of NT tokens that could possibly come last in
1023 // `matcher`. (If `matcher` matches the empty sequence, then
1024 // `maybe_empty` will be set to true.)
1025 //
1026 // Requires that `first_sets` is pre-computed for `matcher`;
1027 // see `FirstSets::new`.
1028 fn check_matcher_core<'tt>(
1029 sess: &ParseSess,
1030 def: &ast::Item,
1031 first_sets: &FirstSets<'tt>,
1032 matcher: &'tt [mbe::TokenTree],
1033 follow: &TokenSet<'tt>,
1034 ) -> TokenSet<'tt> {
1035 use mbe::TokenTree;
1036
1037 let mut last = TokenSet::empty();
1038
1039 // 2. For each token and suffix [T, SUFFIX] in M:
1040 // ensure that T can be followed by SUFFIX, and if SUFFIX may be empty,
1041 // then ensure T can also be followed by any element of FOLLOW.
1042 'each_token: for i in 0..matcher.len() {
1043 let token = &matcher[i];
1044 let suffix = &matcher[i + 1..];
1045
1046 let build_suffix_first = || {
1047 let mut s = first_sets.first(suffix);
1048 if s.maybe_empty {
1049 s.add_all(follow);
1050 }
1051 s
1052 };
1053
1054 // (we build `suffix_first` on demand below; you can tell
1055 // which cases are supposed to fall through by looking for the
1056 // initialization of this variable.)
1057 let suffix_first;
1058
1059 // First, update `last` so that it corresponds to the set
1060 // of NT tokens that might end the sequence `... token`.
1061 match *token {
1062 TokenTree::Token(..)
1063 | TokenTree::MetaVar(..)
1064 | TokenTree::MetaVarDecl(..)
1065 | TokenTree::MetaVarExpr(..) => {
1066 if token_can_be_followed_by_any(token) {
1067 // don't need to track tokens that work with any,
1068 last.replace_with_irrelevant();
1069 // ... and don't need to check tokens that can be
1070 // followed by anything against SUFFIX.
1071 continue 'each_token;
1072 } else {
1073 last.replace_with(TtHandle::TtRef(token));
1074 suffix_first = build_suffix_first();
1075 }
1076 }
1077 TokenTree::Delimited(span, ref d) => {
1078 let my_suffix = TokenSet::singleton(TtHandle::from_token_kind(
1079 token::CloseDelim(d.delim),
1080 span.close,
1081 ));
1082 check_matcher_core(sess, def, first_sets, &d.tts, &my_suffix);
1083 // don't track non NT tokens
1084 last.replace_with_irrelevant();
1085
1086 // also, we don't need to check delimited sequences
1087 // against SUFFIX
1088 continue 'each_token;
1089 }
1090 TokenTree::Sequence(_, ref seq_rep) => {
1091 suffix_first = build_suffix_first();
1092 // The trick here: when we check the interior, we want
1093 // to include the separator (if any) as a potential
1094 // (but not guaranteed) element of FOLLOW. So in that
1095 // case, we make a temp copy of suffix and stuff
1096 // delimiter in there.
1097 //
1098 // FIXME: Should I first scan suffix_first to see if
1099 // delimiter is already in it before I go through the
1100 // work of cloning it? But then again, this way I may
1101 // get a "tighter" span?
1102 let mut new;
1103 let my_suffix = if let Some(sep) = &seq_rep.separator {
1104 new = suffix_first.clone();
1105 new.add_one_maybe(TtHandle::from_token(sep.clone()));
1106 &new
1107 } else {
1108 &suffix_first
1109 };
1110
1111 // At this point, `suffix_first` is built, and
1112 // `my_suffix` is some TokenSet that we can use
1113 // for checking the interior of `seq_rep`.
1114 let next = check_matcher_core(sess, def, first_sets, &seq_rep.tts, my_suffix);
1115 if next.maybe_empty {
1116 last.add_all(&next);
1117 } else {
1118 last = next;
1119 }
1120
1121 // the recursive call to check_matcher_core already ran the 'each_last
1122 // check below, so we can just keep going forward here.
1123 continue 'each_token;
1124 }
1125 }
1126
1127 // (`suffix_first` guaranteed initialized once reaching here.)
1128
1129 // Now `last` holds the complete set of NT tokens that could
1130 // end the sequence before SUFFIX. Check that every one works with `suffix`.
1131 for tt in &last.tokens {
1132 if let &TokenTree::MetaVarDecl(span, name, Some(kind)) = tt.get() {
1133 for next_token in &suffix_first.tokens {
1134 let next_token = next_token.get();
1135
1136 // Check if the old pat is used and the next token is `|`
1137 // to warn about incompatibility with Rust 2021.
1138 // We only emit this lint if we're parsing the original
1139 // definition of this macro_rules, not while (re)parsing
1140 // the macro when compiling another crate that is using the
1141 // macro. (See #86567.)
1142 // Macros defined in the current crate have a real node id,
1143 // whereas macros from an external crate have a dummy id.
1144 if def.id != DUMMY_NODE_ID
1145 && matches!(kind, NonterminalKind::PatParam { inferred: true })
1146 && matches!(next_token, TokenTree::Token(token) if token.kind == BinOp(token::BinOpToken::Or))
1147 {
1148 // It is suggestion to use pat_param, for example: $x:pat -> $x:pat_param.
1149 let suggestion = quoted_tt_to_string(&TokenTree::MetaVarDecl(
1150 span,
1151 name,
1152 Some(NonterminalKind::PatParam { inferred: false }),
1153 ));
1154 sess.buffer_lint_with_diagnostic(
1155 &RUST_2021_INCOMPATIBLE_OR_PATTERNS,
1156 span,
1157 ast::CRATE_NODE_ID,
1158 "the meaning of the `pat` fragment specifier is changing in Rust 2021, which may affect this macro",
1159 BuiltinLintDiagnostics::OrPatternsBackCompat(span, suggestion),
1160 );
1161 }
1162 match is_in_follow(next_token, kind) {
1163 IsInFollow::Yes => {}
1164 IsInFollow::No(possible) => {
1165 let may_be = if last.tokens.len() == 1 && suffix_first.tokens.len() == 1
1166 {
1167 "is"
1168 } else {
1169 "may be"
1170 };
1171
1172 let sp = next_token.span();
1173 let mut err = sess.span_diagnostic.struct_span_err(
1174 sp,
1175 &format!(
1176 "`${name}:{frag}` {may_be} followed by `{next}`, which \
1177 is not allowed for `{frag}` fragments",
1178 name = name,
1179 frag = kind,
1180 next = quoted_tt_to_string(next_token),
1181 may_be = may_be
1182 ),
1183 );
1184 err.span_label(sp, format!("not allowed after `{}` fragments", kind));
1185
1186 if kind == NonterminalKind::PatWithOr
1187 && sess.edition.rust_2021()
1188 && next_token.is_token(&BinOp(token::BinOpToken::Or))
1189 {
1190 let suggestion = quoted_tt_to_string(&TokenTree::MetaVarDecl(
1191 span,
1192 name,
1193 Some(NonterminalKind::PatParam { inferred: false }),
1194 ));
1195 err.span_suggestion(
1196 span,
1197 "try a `pat_param` fragment specifier instead",
1198 suggestion,
1199 Applicability::MaybeIncorrect,
1200 );
1201 }
1202
1203 let msg = "allowed there are: ";
1204 match possible {
1205 &[] => {}
1206 &[t] => {
1207 err.note(&format!(
1208 "only {} is allowed after `{}` fragments",
1209 t, kind,
1210 ));
1211 }
1212 ts => {
1213 err.note(&format!(
1214 "{}{} or {}",
1215 msg,
1216 ts[..ts.len() - 1]
1217 .iter()
1218 .copied()
1219 .collect::<Vec<_>>()
1220 .join(", "),
1221 ts[ts.len() - 1],
1222 ));
1223 }
1224 }
1225 err.emit();
1226 }
1227 }
1228 }
1229 }
1230 }
1231 }
1232 last
1233 }
1234
1235 fn token_can_be_followed_by_any(tok: &mbe::TokenTree) -> bool {
1236 if let mbe::TokenTree::MetaVarDecl(_, _, Some(kind)) = *tok {
1237 frag_can_be_followed_by_any(kind)
1238 } else {
1239 // (Non NT's can always be followed by anything in matchers.)
1240 true
1241 }
1242 }
1243
1244 /// Returns `true` if a fragment of type `frag` can be followed by any sort of
1245 /// token. We use this (among other things) as a useful approximation
1246 /// for when `frag` can be followed by a repetition like `$(...)*` or
1247 /// `$(...)+`. In general, these can be a bit tricky to reason about,
1248 /// so we adopt a conservative position that says that any fragment
1249 /// specifier which consumes at most one token tree can be followed by
1250 /// a fragment specifier (indeed, these fragments can be followed by
1251 /// ANYTHING without fear of future compatibility hazards).
1252 fn frag_can_be_followed_by_any(kind: NonterminalKind) -> bool {
1253 matches!(
1254 kind,
1255 NonterminalKind::Item // always terminated by `}` or `;`
1256 | NonterminalKind::Block // exactly one token tree
1257 | NonterminalKind::Ident // exactly one token tree
1258 | NonterminalKind::Literal // exactly one token tree
1259 | NonterminalKind::Meta // exactly one token tree
1260 | NonterminalKind::Lifetime // exactly one token tree
1261 | NonterminalKind::TT // exactly one token tree
1262 )
1263 }
1264
1265 enum IsInFollow {
1266 Yes,
1267 No(&'static [&'static str]),
1268 }
1269
1270 /// Returns `true` if `frag` can legally be followed by the token `tok`. For
1271 /// fragments that can consume an unbounded number of tokens, `tok`
1272 /// must be within a well-defined follow set. This is intended to
1273 /// guarantee future compatibility: for example, without this rule, if
1274 /// we expanded `expr` to include a new binary operator, we might
1275 /// break macros that were relying on that binary operator as a
1276 /// separator.
1277 // when changing this do not forget to update doc/book/macros.md!
1278 fn is_in_follow(tok: &mbe::TokenTree, kind: NonterminalKind) -> IsInFollow {
1279 use mbe::TokenTree;
1280
1281 if let TokenTree::Token(Token { kind: token::CloseDelim(_), .. }) = *tok {
1282 // closing a token tree can never be matched by any fragment;
1283 // iow, we always require that `(` and `)` match, etc.
1284 IsInFollow::Yes
1285 } else {
1286 match kind {
1287 NonterminalKind::Item => {
1288 // since items *must* be followed by either a `;` or a `}`, we can
1289 // accept anything after them
1290 IsInFollow::Yes
1291 }
1292 NonterminalKind::Block => {
1293 // anything can follow block, the braces provide an easy boundary to
1294 // maintain
1295 IsInFollow::Yes
1296 }
1297 NonterminalKind::Stmt | NonterminalKind::Expr => {
1298 const TOKENS: &[&str] = &["`=>`", "`,`", "`;`"];
1299 match tok {
1300 TokenTree::Token(token) => match token.kind {
1301 FatArrow | Comma | Semi => IsInFollow::Yes,
1302 _ => IsInFollow::No(TOKENS),
1303 },
1304 _ => IsInFollow::No(TOKENS),
1305 }
1306 }
1307 NonterminalKind::PatParam { .. } => {
1308 const TOKENS: &[&str] = &["`=>`", "`,`", "`=`", "`|`", "`if`", "`in`"];
1309 match tok {
1310 TokenTree::Token(token) => match token.kind {
1311 FatArrow | Comma | Eq | BinOp(token::Or) => IsInFollow::Yes,
1312 Ident(name, false) if name == kw::If || name == kw::In => IsInFollow::Yes,
1313 _ => IsInFollow::No(TOKENS),
1314 },
1315 _ => IsInFollow::No(TOKENS),
1316 }
1317 }
1318 NonterminalKind::PatWithOr { .. } => {
1319 const TOKENS: &[&str] = &["`=>`", "`,`", "`=`", "`if`", "`in`"];
1320 match tok {
1321 TokenTree::Token(token) => match token.kind {
1322 FatArrow | Comma | Eq => IsInFollow::Yes,
1323 Ident(name, false) if name == kw::If || name == kw::In => IsInFollow::Yes,
1324 _ => IsInFollow::No(TOKENS),
1325 },
1326 _ => IsInFollow::No(TOKENS),
1327 }
1328 }
1329 NonterminalKind::Path | NonterminalKind::Ty => {
1330 const TOKENS: &[&str] = &[
1331 "`{`", "`[`", "`=>`", "`,`", "`>`", "`=`", "`:`", "`;`", "`|`", "`as`",
1332 "`where`",
1333 ];
1334 match tok {
1335 TokenTree::Token(token) => match token.kind {
1336 OpenDelim(Delimiter::Brace)
1337 | OpenDelim(Delimiter::Bracket)
1338 | Comma
1339 | FatArrow
1340 | Colon
1341 | Eq
1342 | Gt
1343 | BinOp(token::Shr)
1344 | Semi
1345 | BinOp(token::Or) => IsInFollow::Yes,
1346 Ident(name, false) if name == kw::As || name == kw::Where => {
1347 IsInFollow::Yes
1348 }
1349 _ => IsInFollow::No(TOKENS),
1350 },
1351 TokenTree::MetaVarDecl(_, _, Some(NonterminalKind::Block)) => IsInFollow::Yes,
1352 _ => IsInFollow::No(TOKENS),
1353 }
1354 }
1355 NonterminalKind::Ident | NonterminalKind::Lifetime => {
1356 // being a single token, idents and lifetimes are harmless
1357 IsInFollow::Yes
1358 }
1359 NonterminalKind::Literal => {
1360 // literals may be of a single token, or two tokens (negative numbers)
1361 IsInFollow::Yes
1362 }
1363 NonterminalKind::Meta | NonterminalKind::TT => {
1364 // being either a single token or a delimited sequence, tt is
1365 // harmless
1366 IsInFollow::Yes
1367 }
1368 NonterminalKind::Vis => {
1369 // Explicitly disallow `priv`, on the off chance it comes back.
1370 const TOKENS: &[&str] = &["`,`", "an ident", "a type"];
1371 match tok {
1372 TokenTree::Token(token) => match token.kind {
1373 Comma => IsInFollow::Yes,
1374 Ident(name, is_raw) if is_raw || name != kw::Priv => IsInFollow::Yes,
1375 _ => {
1376 if token.can_begin_type() {
1377 IsInFollow::Yes
1378 } else {
1379 IsInFollow::No(TOKENS)
1380 }
1381 }
1382 },
1383 TokenTree::MetaVarDecl(
1384 _,
1385 _,
1386 Some(NonterminalKind::Ident | NonterminalKind::Ty | NonterminalKind::Path),
1387 ) => IsInFollow::Yes,
1388 _ => IsInFollow::No(TOKENS),
1389 }
1390 }
1391 }
1392 }
1393 }
1394
1395 fn quoted_tt_to_string(tt: &mbe::TokenTree) -> String {
1396 match *tt {
1397 mbe::TokenTree::Token(ref token) => pprust::token_to_string(&token).into(),
1398 mbe::TokenTree::MetaVar(_, name) => format!("${}", name),
1399 mbe::TokenTree::MetaVarDecl(_, name, Some(kind)) => format!("${}:{}", name, kind),
1400 mbe::TokenTree::MetaVarDecl(_, name, None) => format!("${}:", name),
1401 _ => panic!(
1402 "{}",
1403 "unexpected mbe::TokenTree::{Sequence or Delimited} \
1404 in follow set checker"
1405 ),
1406 }
1407 }
1408
1409 fn parser_from_cx(sess: &ParseSess, tts: TokenStream) -> Parser<'_> {
1410 Parser::new(sess, tts, true, rustc_parse::MACRO_ARGUMENTS)
1411 }
1412
1413 /// Generates an appropriate parsing failure message. For EOF, this is "unexpected end...". For
1414 /// other tokens, this is "unexpected token...".
1415 fn parse_failure_msg(tok: &Token) -> String {
1416 match tok.kind {
1417 token::Eof => "unexpected end of macro invocation".to_string(),
1418 _ => format!("no rules expected the token `{}`", pprust::token_to_string(tok),),
1419 }
1420 }