]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // Copyright (c) 2001-2010 Hartmut Kaiser |
2 | // | |
3 | // Distributed under the Boost Software License, Version 1.0. (See accompanying | |
4 | // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) | |
5 | ||
6 | // The purpose of this example is to show, how it is possible to use a lexer | |
7 | // token definition for two purposes: | |
8 | // | |
9 | // . To generate C++ code implementing a static lexical analyzer allowing | |
10 | // to recognize all defined tokens | |
11 | // . To integrate the generated C++ lexer into the /Spirit/ framework. | |
12 | // | |
13 | ||
14 | // #define BOOST_SPIRIT_DEBUG | |
15 | // #define BOOST_SPIRIT_LEXERTL_DEBUG | |
16 | ||
7c673cae FG |
17 | #include <boost/spirit/include/lex_static_lexertl.hpp> |
18 | ||
19 | #include <iostream> | |
20 | #include <string> | |
21 | ||
22 | #include "../example.hpp" | |
23 | #include "word_count_lexer_tokens.hpp" // token definition | |
24 | #include "word_count_lexer_static.hpp" // generated tokenizer | |
25 | ||
26 | using namespace boost::spirit; | |
27 | ||
28 | /////////////////////////////////////////////////////////////////////////////// | |
29 | //[wcl_static_main | |
30 | int main(int argc, char* argv[]) | |
31 | { | |
32 | // read input from the given file | |
33 | std::string str (read_from_file(1 == argc ? "word_count.input" : argv[1])); | |
34 | ||
35 | // Specifying 'omit' as the token attribute type generates a token class | |
36 | // notholding any token attribute at all (not even the iterator_range of the | |
37 | // matched input sequence), therefor optimizing the token, the lexer, and | |
38 | // possibly the parser implementation as much as possible. | |
39 | // | |
40 | // Specifying mpl::false_ as the 3rd template parameter generates a token | |
41 | // type and an iterator, both holding no lexer state, allowing for even more | |
42 | // aggressive optimizations. | |
43 | // | |
44 | // As a result the token instances contain the token ids as the only data | |
45 | // member. | |
46 | typedef lex::lexertl::token<char const*, lex::omit, boost::mpl::false_> token_type; | |
47 | ||
48 | // Define the lexer type to be used as the base class for our token | |
49 | // definition. | |
50 | // | |
51 | // This is the only place where the code is different from an equivalent | |
52 | // dynamic lexical analyzer. We use the `lexertl::static_lexer<>` instead of | |
f67539c2 | 53 | // the `lexertl::lexer<>` as the base class for our token definition type. |
7c673cae FG |
54 | // |
55 | // As we specified the suffix "wcl" while generating the static tables we | |
56 | // need to pass the type lexertl::static_::lexer_wcl as the second template | |
57 | // parameter below (see word_count_lexer_generate.cpp). | |
58 | typedef lex::lexertl::static_actor_lexer< | |
59 | token_type, lex::lexertl::static_::lexer_wcl | |
60 | > lexer_type; | |
61 | ||
62 | // create the lexer object instance needed to invoke the lexical analysis | |
63 | word_count_lexer_tokens<lexer_type> word_count_lexer; | |
64 | ||
65 | // tokenize the given string, all generated tokens are discarded | |
66 | char const* first = str.c_str(); | |
67 | char const* last = &first[str.size()]; | |
68 | bool r = lex::tokenize(first, last, word_count_lexer); | |
69 | ||
70 | if (r) { | |
71 | std::cout << "lines: " << word_count_lexer.l | |
72 | << ", words: " << word_count_lexer.w | |
73 | << ", characters: " << word_count_lexer.c | |
74 | << "\n"; | |
75 | } | |
76 | else { | |
77 | std::string rest(first, last); | |
78 | std::cout << "Lexical analysis failed\n" << "stopped at: \"" | |
79 | << rest << "\"\n"; | |
80 | } | |
81 | return 0; | |
82 | } | |
83 | //] |