]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/spirit/test/lex/string_token_id.cpp
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / boost / libs / spirit / test / lex / string_token_id.cpp
1 // Copyright (c) 2001-2010 Hartmut Kaiser
2 //
3 // Distributed under the Boost Software License, Version 1.0. (See accompanying
4 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
5
6 // #define BOOST_SPIRIT_LEXERTL_DEBUG
7 #define BOOST_VARIANT_MINIMIZE_SIZE
8
9 #include <boost/detail/lightweight_test.hpp>
10 #include <boost/config/warning_disable.hpp>
11
12 #include <boost/spirit/include/qi.hpp>
13 #include <boost/spirit/include/lex_lexertl.hpp>
14 #include <boost/spirit/include/phoenix_operator.hpp>
15 #include <boost/spirit/include/phoenix_statement.hpp>
16 #include <boost/spirit/include/phoenix_container.hpp>
17
18 #include <iostream>
19 #include <string>
20
21 namespace qi = boost::spirit::qi;
22 namespace lex = boost::spirit::lex;
23
24 enum tokenids
25 {
26 IDWORD = lex::min_token_id,
27 IDCHAR,
28 IDANY
29 };
30
31 template <typename Lexer>
32 struct word_count_tokens : lex::lexer<Lexer>
33 {
34 word_count_tokens()
35 {
36 this->self.add_pattern
37 ("TEST", "A")
38 ;
39
40 this->self =
41 lex::string("{TEST}", IDWORD)
42 | lex::char_('a', IDCHAR)
43 | lex::string(".", IDANY)
44 ;
45 }
46 };
47
48 template <typename Iterator>
49 struct word_count_grammar : qi::grammar<Iterator>
50 {
51 template <typename TokenDef>
52 word_count_grammar(TokenDef const&)
53 : word_count_grammar::base_type(start)
54 , w(0), c(0), a(0)
55 {
56 using boost::phoenix::ref;
57 using qi::token;
58
59 start = *( token(IDWORD) [++ref(w)]
60 | token(IDCHAR) [++ref(c)]
61 | token(IDANY) [++ref(a)]
62 )
63 ;
64 }
65 std::size_t w, c, a;
66 qi::rule<Iterator> start;
67 };
68
69
70 int main()
71 {
72 typedef lex::lexertl::token<
73 const char*, boost::mpl::vector<std::string>
74 > token_type;
75
76 typedef lex::lexertl::lexer<token_type> lexer_type;
77 typedef word_count_tokens<lexer_type>::iterator_type iterator_type;
78 word_count_tokens<lexer_type> word_count; // Our lexer
79 word_count_grammar<iterator_type> g (word_count); // Our parser
80
81 std::string str ("AaBCD");
82 char const* first = str.c_str();
83 char const* last = &first[str.size()];
84
85 BOOST_TEST(lex::tokenize_and_parse(first, last, word_count, g));
86 BOOST_TEST(g.w == 1 && g.c == 1 && g.a == 3);
87
88 return boost::report_errors();
89 }