1 // Copyright (c) 2001-2011 Hartmut Kaiser
3 // Distributed under the Boost Software License, Version 1.0. (See accompanying
4 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 // #define BOOST_SPIRIT_LEXERTL_DEBUG 1
8 #include <boost/spirit/include/lex_lexertl.hpp>
9 #include <boost/spirit/include/qi.hpp>
10 #include <boost/phoenix.hpp>
12 namespace lex
= boost::spirit::lex
;
13 namespace qi
= boost::spirit::qi
;
14 namespace phoenix
= boost::phoenix
;
16 ///////////////////////////////////////////////////////////////////////////////
17 template <typename Lexer
>
18 struct language_tokens
: lex::lexer
<Lexer
>
24 floatlit
= "[0-9]+\\.[0-9]*";
27 identifier
= "[a-zA-Z_][a-zA-Z_0-9]*";
29 this->self
= ws
[lex::_pass
= lex::pass_flags::pass_ignore
];
30 this->self
+= tok_float
| tok_int
| floatlit
| intlit
| identifier
;
31 this->self
+= lex::char_('=');
34 lex::token_def
<> tok_float
, tok_int
;
36 lex::token_def
<double> floatlit
;
37 lex::token_def
<int> intlit
;
38 lex::token_def
<> identifier
;
41 ///////////////////////////////////////////////////////////////////////////////
42 template <typename Iterator
>
43 struct language_grammar
: qi::grammar
<Iterator
>
45 template <typename Lexer
>
46 language_grammar(language_tokens
<Lexer
> const& tok
)
47 : language_grammar::base_type(declarations
)
49 declarations
= +number
;
51 tok
.tok_float
>> tok
.identifier
>> '=' >> tok
.floatlit
52 | tok
.tok_int
>> tok
.identifier
>> '=' >> tok
.intlit
55 declarations
.name("declarations");
56 number
.name("number");
61 qi::rule
<Iterator
> declarations
;
62 qi::rule
<Iterator
> number
;
65 ///////////////////////////////////////////////////////////////////////////////
68 // iterator type used to expose the underlying input stream
69 typedef std::string::iterator base_iterator_type
;
72 typedef lex::lexertl::actor_lexer
<
74 base_iterator_type
, boost::mpl::vector2
<double, int>
77 // iterator type exposed by the lexer
78 typedef language_tokens
<lexer_type
>::iterator_type iterator_type
;
80 // now we use the types defined above to create the lexer and grammar
81 // object instances needed to invoke the parsing process
82 language_tokens
<lexer_type
> tokenizer
; // Our lexer
83 language_grammar
<iterator_type
> g (tokenizer
); // Our parser
85 // Parsing is done based on the token stream, not the character
86 // stream read from the input.
87 std::string
str ("float f = 3.4\nint i = 6\n");
88 base_iterator_type first
= str
.begin();
90 bool r
= lex::tokenize_and_parse(first
, str
.end(), tokenizer
, g
);
93 std::cout
<< "-------------------------\n";
94 std::cout
<< "Parsing succeeded\n";
95 std::cout
<< "-------------------------\n";
98 std::string
rest(first
, str
.end());
99 std::cout
<< "-------------------------\n";
100 std::cout
<< "Parsing failed\n";
101 std::cout
<< "stopped at: \"" << rest
<< "\"\n";
102 std::cout
<< "-------------------------\n";
105 std::cout
<< "Bye... :-) \n\n";