]> git.proxmox.com Git - mirror_qemu.git/blob - qobject/json-lexer.c
json: Nicer recovery from lexical errors
[mirror_qemu.git] / qobject / json-lexer.c
1 /*
2 * JSON lexer
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "json-parser-int.h"
16
17 #define MAX_TOKEN_SIZE (64ULL << 20)
18
19 /*
20 * From RFC 8259 "The JavaScript Object Notation (JSON) Data
21 * Interchange Format", with [comments in brackets]:
22 *
23 * The set of tokens includes six structural characters, strings,
24 * numbers, and three literal names.
25 *
26 * These are the six structural characters:
27 *
28 * begin-array = ws %x5B ws ; [ left square bracket
29 * begin-object = ws %x7B ws ; { left curly bracket
30 * end-array = ws %x5D ws ; ] right square bracket
31 * end-object = ws %x7D ws ; } right curly bracket
32 * name-separator = ws %x3A ws ; : colon
33 * value-separator = ws %x2C ws ; , comma
34 *
35 * Insignificant whitespace is allowed before or after any of the six
36 * structural characters.
37 * [This lexer accepts it before or after any token, which is actually
38 * the same, as the grammar always has structural characters between
39 * other tokens.]
40 *
41 * ws = *(
42 * %x20 / ; Space
43 * %x09 / ; Horizontal tab
44 * %x0A / ; Line feed or New line
45 * %x0D ) ; Carriage return
46 *
47 * [...] three literal names:
48 * false null true
49 * [This lexer accepts [a-z]+, and leaves rejecting unknown literal
50 * names to the parser.]
51 *
52 * [Numbers:]
53 *
54 * number = [ minus ] int [ frac ] [ exp ]
55 * decimal-point = %x2E ; .
56 * digit1-9 = %x31-39 ; 1-9
57 * e = %x65 / %x45 ; e E
58 * exp = e [ minus / plus ] 1*DIGIT
59 * frac = decimal-point 1*DIGIT
60 * int = zero / ( digit1-9 *DIGIT )
61 * minus = %x2D ; -
62 * plus = %x2B ; +
63 * zero = %x30 ; 0
64 *
65 * [Strings:]
66 * string = quotation-mark *char quotation-mark
67 *
68 * char = unescaped /
69 * escape (
70 * %x22 / ; " quotation mark U+0022
71 * %x5C / ; \ reverse solidus U+005C
72 * %x2F / ; / solidus U+002F
73 * %x62 / ; b backspace U+0008
74 * %x66 / ; f form feed U+000C
75 * %x6E / ; n line feed U+000A
76 * %x72 / ; r carriage return U+000D
77 * %x74 / ; t tab U+0009
78 * %x75 4HEXDIG ) ; uXXXX U+XXXX
79 * escape = %x5C ; \
80 * quotation-mark = %x22 ; "
81 * unescaped = %x20-21 / %x23-5B / %x5D-10FFFF
82 * [This lexer accepts any non-control character after escape, and
83 * leaves rejecting invalid ones to the parser.]
84 *
85 *
86 * Extensions over RFC 8259:
87 * - Extra escape sequence in strings:
88 * 0x27 (apostrophe) is recognized after escape, too
89 * - Single-quoted strings:
90 * Like double-quoted strings, except they're delimited by %x27
91 * (apostrophe) instead of %x22 (quotation mark), and can't contain
92 * unescaped apostrophe, but can contain unescaped quotation mark.
93 * - Interpolation, if enabled:
94 * The lexer accepts %[A-Za-z0-9]*, and leaves rejecting invalid
95 * ones to the parser.
96 *
97 * Note:
98 * - Input must be encoded in modified UTF-8.
99 * - Decoding and validating is left to the parser.
100 */
101
102 enum json_lexer_state {
103 IN_ERROR = 0, /* must really be 0, see json_lexer[] */
104 IN_RECOVERY,
105 IN_DQ_STRING_ESCAPE,
106 IN_DQ_STRING,
107 IN_SQ_STRING_ESCAPE,
108 IN_SQ_STRING,
109 IN_ZERO,
110 IN_EXP_DIGITS,
111 IN_EXP_SIGN,
112 IN_EXP_E,
113 IN_MANTISSA,
114 IN_MANTISSA_DIGITS,
115 IN_DIGITS,
116 IN_SIGN,
117 IN_KEYWORD,
118 IN_INTERP,
119 IN_WHITESPACE,
120 IN_START,
121 IN_START_INTERP, /* must be IN_START + 1 */
122 };
123
124 QEMU_BUILD_BUG_ON((int)JSON_MIN <= (int)IN_START_INTERP);
125 QEMU_BUILD_BUG_ON(JSON_MAX >= 0x80);
126 QEMU_BUILD_BUG_ON(IN_START_INTERP != IN_START + 1);
127
128 #define LOOKAHEAD 0x80
129 #define TERMINAL(state) [0 ... 0xFF] = ((state) | LOOKAHEAD)
130
131 static const uint8_t json_lexer[][256] = {
132 /* Relies on default initialization to IN_ERROR! */
133
134 /* error recovery */
135 [IN_RECOVERY] = {
136 /*
137 * Skip characters until a structural character, an ASCII
138 * control character other than '\t', or impossible UTF-8
139 * bytes '\xFE', '\xFF'. Structural characters and line
140 * endings are promising resynchronization points. Clients
141 * may use the others to force the JSON parser into known-good
142 * state; see docs/interop/qmp-spec.txt.
143 */
144 [0 ... 0x1F] = IN_START | LOOKAHEAD,
145 [0x20 ... 0xFD] = IN_RECOVERY,
146 [0xFE ... 0xFF] = IN_START | LOOKAHEAD,
147 ['\t'] = IN_RECOVERY,
148 ['['] = IN_START | LOOKAHEAD,
149 [']'] = IN_START | LOOKAHEAD,
150 ['{'] = IN_START | LOOKAHEAD,
151 ['}'] = IN_START | LOOKAHEAD,
152 [':'] = IN_START | LOOKAHEAD,
153 [','] = IN_START | LOOKAHEAD,
154 },
155
156 /* double quote string */
157 [IN_DQ_STRING_ESCAPE] = {
158 [0x20 ... 0xFD] = IN_DQ_STRING,
159 },
160 [IN_DQ_STRING] = {
161 [0x20 ... 0xFD] = IN_DQ_STRING,
162 ['\\'] = IN_DQ_STRING_ESCAPE,
163 ['"'] = JSON_STRING,
164 },
165
166 /* single quote string */
167 [IN_SQ_STRING_ESCAPE] = {
168 [0x20 ... 0xFD] = IN_SQ_STRING,
169 },
170 [IN_SQ_STRING] = {
171 [0x20 ... 0xFD] = IN_SQ_STRING,
172 ['\\'] = IN_SQ_STRING_ESCAPE,
173 ['\''] = JSON_STRING,
174 },
175
176 /* Zero */
177 [IN_ZERO] = {
178 TERMINAL(JSON_INTEGER),
179 ['0' ... '9'] = IN_ERROR,
180 ['.'] = IN_MANTISSA,
181 },
182
183 /* Float */
184 [IN_EXP_DIGITS] = {
185 TERMINAL(JSON_FLOAT),
186 ['0' ... '9'] = IN_EXP_DIGITS,
187 },
188
189 [IN_EXP_SIGN] = {
190 ['0' ... '9'] = IN_EXP_DIGITS,
191 },
192
193 [IN_EXP_E] = {
194 ['-'] = IN_EXP_SIGN,
195 ['+'] = IN_EXP_SIGN,
196 ['0' ... '9'] = IN_EXP_DIGITS,
197 },
198
199 [IN_MANTISSA_DIGITS] = {
200 TERMINAL(JSON_FLOAT),
201 ['0' ... '9'] = IN_MANTISSA_DIGITS,
202 ['e'] = IN_EXP_E,
203 ['E'] = IN_EXP_E,
204 },
205
206 [IN_MANTISSA] = {
207 ['0' ... '9'] = IN_MANTISSA_DIGITS,
208 },
209
210 /* Number */
211 [IN_DIGITS] = {
212 TERMINAL(JSON_INTEGER),
213 ['0' ... '9'] = IN_DIGITS,
214 ['e'] = IN_EXP_E,
215 ['E'] = IN_EXP_E,
216 ['.'] = IN_MANTISSA,
217 },
218
219 [IN_SIGN] = {
220 ['0'] = IN_ZERO,
221 ['1' ... '9'] = IN_DIGITS,
222 },
223
224 /* keywords */
225 [IN_KEYWORD] = {
226 TERMINAL(JSON_KEYWORD),
227 ['a' ... 'z'] = IN_KEYWORD,
228 },
229
230 /* whitespace */
231 [IN_WHITESPACE] = {
232 TERMINAL(JSON_SKIP),
233 [' '] = IN_WHITESPACE,
234 ['\t'] = IN_WHITESPACE,
235 ['\r'] = IN_WHITESPACE,
236 ['\n'] = IN_WHITESPACE,
237 },
238
239 /* interpolation */
240 [IN_INTERP] = {
241 TERMINAL(JSON_INTERP),
242 ['A' ... 'Z'] = IN_INTERP,
243 ['a' ... 'z'] = IN_INTERP,
244 ['0' ... '9'] = IN_INTERP,
245 },
246
247 /*
248 * Two start states:
249 * - IN_START recognizes JSON tokens with our string extensions
250 * - IN_START_INTERP additionally recognizes interpolation.
251 */
252 [IN_START ... IN_START_INTERP] = {
253 ['"'] = IN_DQ_STRING,
254 ['\''] = IN_SQ_STRING,
255 ['0'] = IN_ZERO,
256 ['1' ... '9'] = IN_DIGITS,
257 ['-'] = IN_SIGN,
258 ['{'] = JSON_LCURLY,
259 ['}'] = JSON_RCURLY,
260 ['['] = JSON_LSQUARE,
261 [']'] = JSON_RSQUARE,
262 [','] = JSON_COMMA,
263 [':'] = JSON_COLON,
264 ['a' ... 'z'] = IN_KEYWORD,
265 [' '] = IN_WHITESPACE,
266 ['\t'] = IN_WHITESPACE,
267 ['\r'] = IN_WHITESPACE,
268 ['\n'] = IN_WHITESPACE,
269 },
270 [IN_START_INTERP]['%'] = IN_INTERP,
271 };
272
273 static inline uint8_t next_state(JSONLexer *lexer, char ch, bool flush,
274 bool *char_consumed)
275 {
276 uint8_t next;
277
278 assert(lexer->state <= ARRAY_SIZE(json_lexer));
279 next = json_lexer[lexer->state][(uint8_t)ch];
280 *char_consumed = !flush && !(next & LOOKAHEAD);
281 return next & ~LOOKAHEAD;
282 }
283
284 void json_lexer_init(JSONLexer *lexer, bool enable_interpolation)
285 {
286 lexer->start_state = lexer->state = enable_interpolation
287 ? IN_START_INTERP : IN_START;
288 lexer->token = g_string_sized_new(3);
289 lexer->x = lexer->y = 0;
290 }
291
292 static void json_lexer_feed_char(JSONLexer *lexer, char ch, bool flush)
293 {
294 int new_state;
295 bool char_consumed = false;
296
297 lexer->x++;
298 if (ch == '\n') {
299 lexer->x = 0;
300 lexer->y++;
301 }
302
303 while (flush ? lexer->state != lexer->start_state : !char_consumed) {
304 new_state = next_state(lexer, ch, flush, &char_consumed);
305 if (char_consumed) {
306 assert(!flush);
307 g_string_append_c(lexer->token, ch);
308 }
309
310 switch (new_state) {
311 case JSON_LCURLY:
312 case JSON_RCURLY:
313 case JSON_LSQUARE:
314 case JSON_RSQUARE:
315 case JSON_COLON:
316 case JSON_COMMA:
317 case JSON_INTERP:
318 case JSON_INTEGER:
319 case JSON_FLOAT:
320 case JSON_KEYWORD:
321 case JSON_STRING:
322 json_message_process_token(lexer, lexer->token, new_state,
323 lexer->x, lexer->y);
324 /* fall through */
325 case JSON_SKIP:
326 g_string_truncate(lexer->token, 0);
327 /* fall through */
328 case IN_START:
329 new_state = lexer->start_state;
330 break;
331 case IN_ERROR:
332 json_message_process_token(lexer, lexer->token, JSON_ERROR,
333 lexer->x, lexer->y);
334 new_state = IN_RECOVERY;
335 /* fall through */
336 case IN_RECOVERY:
337 g_string_truncate(lexer->token, 0);
338 break;
339 default:
340 break;
341 }
342 lexer->state = new_state;
343 }
344
345 /* Do not let a single token grow to an arbitrarily large size,
346 * this is a security consideration.
347 */
348 if (lexer->token->len > MAX_TOKEN_SIZE) {
349 json_message_process_token(lexer, lexer->token, lexer->state,
350 lexer->x, lexer->y);
351 g_string_truncate(lexer->token, 0);
352 lexer->state = lexer->start_state;
353 }
354 }
355
356 void json_lexer_feed(JSONLexer *lexer, const char *buffer, size_t size)
357 {
358 size_t i;
359
360 for (i = 0; i < size; i++) {
361 json_lexer_feed_char(lexer, buffer[i], false);
362 }
363 }
364
365 void json_lexer_flush(JSONLexer *lexer)
366 {
367 json_lexer_feed_char(lexer, 0, true);
368 assert(lexer->state == lexer->start_state);
369 json_message_process_token(lexer, lexer->token, JSON_END_OF_INPUT,
370 lexer->x, lexer->y);
371 }
372
373 void json_lexer_destroy(JSONLexer *lexer)
374 {
375 g_string_free(lexer->token, true);
376 }