]> git.proxmox.com Git - mirror_qemu.git/blob - qapi/opts-visitor.c
qapi-visit: Add visitor.type classification
[mirror_qemu.git] / qapi / opts-visitor.c
1 /*
2 * Options Visitor
3 *
4 * Copyright Red Hat, Inc. 2012-2016
5 *
6 * Author: Laszlo Ersek <lersek@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
9 * See the COPYING.LIB file in the top-level directory.
10 *
11 */
12
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "qemu/cutils.h"
16 #include "qapi/qmp/qerror.h"
17 #include "qapi/opts-visitor.h"
18 #include "qemu/queue.h"
19 #include "qemu/option_int.h"
20 #include "qapi/visitor-impl.h"
21
22
23 enum ListMode
24 {
25 LM_NONE, /* not traversing a list of repeated options */
26 LM_STARTED, /* opts_start_list() succeeded */
27
28 LM_IN_PROGRESS, /* opts_next_list() has been called.
29 *
30 * Generating the next list link will consume the most
31 * recently parsed QemuOpt instance of the repeated
32 * option.
33 *
34 * Parsing a value into the list link will examine the
35 * next QemuOpt instance of the repeated option, and
36 * possibly enter LM_SIGNED_INTERVAL or
37 * LM_UNSIGNED_INTERVAL.
38 */
39
40 LM_SIGNED_INTERVAL, /* opts_next_list() has been called.
41 *
42 * Generating the next list link will consume the most
43 * recently stored element from the signed interval,
44 * parsed from the most recent QemuOpt instance of the
45 * repeated option. This may consume QemuOpt itself
46 * and return to LM_IN_PROGRESS.
47 *
48 * Parsing a value into the list link will store the
49 * next element of the signed interval.
50 */
51
52 LM_UNSIGNED_INTERVAL /* Same as above, only for an unsigned interval. */
53 };
54
55 typedef enum ListMode ListMode;
56
57 struct OptsVisitor
58 {
59 Visitor visitor;
60
61 /* Ownership remains with opts_visitor_new()'s caller. */
62 const QemuOpts *opts_root;
63
64 unsigned depth;
65
66 /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value
67 * is a non-empty GQueue, enumerating all QemuOpt occurrences with that
68 * name. */
69 GHashTable *unprocessed_opts;
70
71 /* The list currently being traversed with opts_start_list() /
72 * opts_next_list(). The list must have a struct element type in the
73 * schema, with a single mandatory scalar member. */
74 ListMode list_mode;
75 GQueue *repeated_opts;
76
77 /* When parsing a list of repeating options as integers, values of the form
78 * "a-b", representing a closed interval, are allowed. Elements in the
79 * range are generated individually.
80 */
81 union {
82 int64_t s;
83 uint64_t u;
84 } range_next, range_limit;
85
86 /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for
87 * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does
88 * not survive or escape the OptsVisitor object.
89 */
90 QemuOpt *fake_id_opt;
91 };
92
93
94 static OptsVisitor *to_ov(Visitor *v)
95 {
96 return container_of(v, OptsVisitor, visitor);
97 }
98
99
100 static void
101 destroy_list(gpointer list)
102 {
103 g_queue_free(list);
104 }
105
106
107 static void
108 opts_visitor_insert(GHashTable *unprocessed_opts, const QemuOpt *opt)
109 {
110 GQueue *list;
111
112 list = g_hash_table_lookup(unprocessed_opts, opt->name);
113 if (list == NULL) {
114 list = g_queue_new();
115
116 /* GHashTable will never try to free the keys -- we supply NULL as
117 * "key_destroy_func" in opts_start_struct(). Thus cast away key
118 * const-ness in order to suppress gcc's warning.
119 */
120 g_hash_table_insert(unprocessed_opts, (gpointer)opt->name, list);
121 }
122
123 /* Similarly, destroy_list() doesn't call g_queue_free_full(). */
124 g_queue_push_tail(list, (gpointer)opt);
125 }
126
127
128 static void
129 opts_start_struct(Visitor *v, const char *name, void **obj,
130 size_t size, Error **errp)
131 {
132 OptsVisitor *ov = to_ov(v);
133 const QemuOpt *opt;
134
135 if (obj) {
136 *obj = g_malloc0(size > 0 ? size : 1);
137 }
138 if (ov->depth++ > 0) {
139 return;
140 }
141
142 ov->unprocessed_opts = g_hash_table_new_full(&g_str_hash, &g_str_equal,
143 NULL, &destroy_list);
144 QTAILQ_FOREACH(opt, &ov->opts_root->head, next) {
145 /* ensured by qemu-option.c::opts_do_parse() */
146 assert(strcmp(opt->name, "id") != 0);
147
148 opts_visitor_insert(ov->unprocessed_opts, opt);
149 }
150
151 if (ov->opts_root->id != NULL) {
152 ov->fake_id_opt = g_malloc0(sizeof *ov->fake_id_opt);
153
154 ov->fake_id_opt->name = g_strdup("id");
155 ov->fake_id_opt->str = g_strdup(ov->opts_root->id);
156 opts_visitor_insert(ov->unprocessed_opts, ov->fake_id_opt);
157 }
158 }
159
160
161 static void
162 opts_end_struct(Visitor *v, Error **errp)
163 {
164 OptsVisitor *ov = to_ov(v);
165 GHashTableIter iter;
166 GQueue *any;
167
168 if (--ov->depth > 0) {
169 return;
170 }
171
172 /* we should have processed all (distinct) QemuOpt instances */
173 g_hash_table_iter_init(&iter, ov->unprocessed_opts);
174 if (g_hash_table_iter_next(&iter, NULL, (void **)&any)) {
175 const QemuOpt *first;
176
177 first = g_queue_peek_head(any);
178 error_setg(errp, QERR_INVALID_PARAMETER, first->name);
179 }
180 g_hash_table_destroy(ov->unprocessed_opts);
181 ov->unprocessed_opts = NULL;
182 if (ov->fake_id_opt) {
183 g_free(ov->fake_id_opt->name);
184 g_free(ov->fake_id_opt->str);
185 g_free(ov->fake_id_opt);
186 }
187 ov->fake_id_opt = NULL;
188 }
189
190
191 static GQueue *
192 lookup_distinct(const OptsVisitor *ov, const char *name, Error **errp)
193 {
194 GQueue *list;
195
196 list = g_hash_table_lookup(ov->unprocessed_opts, name);
197 if (!list) {
198 error_setg(errp, QERR_MISSING_PARAMETER, name);
199 }
200 return list;
201 }
202
203
204 static void
205 opts_start_list(Visitor *v, const char *name, Error **errp)
206 {
207 OptsVisitor *ov = to_ov(v);
208
209 /* we can't traverse a list in a list */
210 assert(ov->list_mode == LM_NONE);
211 ov->repeated_opts = lookup_distinct(ov, name, errp);
212 if (ov->repeated_opts != NULL) {
213 ov->list_mode = LM_STARTED;
214 }
215 }
216
217
218 static GenericList *
219 opts_next_list(Visitor *v, GenericList **list, size_t size)
220 {
221 OptsVisitor *ov = to_ov(v);
222 GenericList **link;
223
224 switch (ov->list_mode) {
225 case LM_STARTED:
226 ov->list_mode = LM_IN_PROGRESS;
227 link = list;
228 break;
229
230 case LM_SIGNED_INTERVAL:
231 case LM_UNSIGNED_INTERVAL:
232 link = &(*list)->next;
233
234 if (ov->list_mode == LM_SIGNED_INTERVAL) {
235 if (ov->range_next.s < ov->range_limit.s) {
236 ++ov->range_next.s;
237 break;
238 }
239 } else if (ov->range_next.u < ov->range_limit.u) {
240 ++ov->range_next.u;
241 break;
242 }
243 ov->list_mode = LM_IN_PROGRESS;
244 /* range has been completed, fall through in order to pop option */
245
246 case LM_IN_PROGRESS: {
247 const QemuOpt *opt;
248
249 opt = g_queue_pop_head(ov->repeated_opts);
250 if (g_queue_is_empty(ov->repeated_opts)) {
251 g_hash_table_remove(ov->unprocessed_opts, opt->name);
252 return NULL;
253 }
254 link = &(*list)->next;
255 break;
256 }
257
258 default:
259 abort();
260 }
261
262 *link = g_malloc0(size);
263 return *link;
264 }
265
266
267 static void
268 opts_end_list(Visitor *v)
269 {
270 OptsVisitor *ov = to_ov(v);
271
272 assert(ov->list_mode == LM_STARTED ||
273 ov->list_mode == LM_IN_PROGRESS ||
274 ov->list_mode == LM_SIGNED_INTERVAL ||
275 ov->list_mode == LM_UNSIGNED_INTERVAL);
276 ov->repeated_opts = NULL;
277 ov->list_mode = LM_NONE;
278 }
279
280
281 static const QemuOpt *
282 lookup_scalar(const OptsVisitor *ov, const char *name, Error **errp)
283 {
284 if (ov->list_mode == LM_NONE) {
285 GQueue *list;
286
287 /* the last occurrence of any QemuOpt takes effect when queried by name
288 */
289 list = lookup_distinct(ov, name, errp);
290 return list ? g_queue_peek_tail(list) : NULL;
291 }
292 assert(ov->list_mode == LM_IN_PROGRESS);
293 return g_queue_peek_head(ov->repeated_opts);
294 }
295
296
297 static void
298 processed(OptsVisitor *ov, const char *name)
299 {
300 if (ov->list_mode == LM_NONE) {
301 g_hash_table_remove(ov->unprocessed_opts, name);
302 return;
303 }
304 assert(ov->list_mode == LM_IN_PROGRESS);
305 /* do nothing */
306 }
307
308
309 static void
310 opts_type_str(Visitor *v, const char *name, char **obj, Error **errp)
311 {
312 OptsVisitor *ov = to_ov(v);
313 const QemuOpt *opt;
314
315 opt = lookup_scalar(ov, name, errp);
316 if (!opt) {
317 return;
318 }
319 *obj = g_strdup(opt->str ? opt->str : "");
320 /* Note that we consume a string even if this is called as part of
321 * an enum visit that later fails because the string is not a
322 * valid enum value; this is harmless because tracking what gets
323 * consumed only matters to visit_end_struct() as the final error
324 * check if there were no other failures during the visit. */
325 processed(ov, name);
326 }
327
328
329 /* mimics qemu-option.c::parse_option_bool() */
330 static void
331 opts_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
332 {
333 OptsVisitor *ov = to_ov(v);
334 const QemuOpt *opt;
335
336 opt = lookup_scalar(ov, name, errp);
337 if (!opt) {
338 return;
339 }
340
341 if (opt->str) {
342 if (strcmp(opt->str, "on") == 0 ||
343 strcmp(opt->str, "yes") == 0 ||
344 strcmp(opt->str, "y") == 0) {
345 *obj = true;
346 } else if (strcmp(opt->str, "off") == 0 ||
347 strcmp(opt->str, "no") == 0 ||
348 strcmp(opt->str, "n") == 0) {
349 *obj = false;
350 } else {
351 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
352 "on|yes|y|off|no|n");
353 return;
354 }
355 } else {
356 *obj = true;
357 }
358
359 processed(ov, name);
360 }
361
362
363 static void
364 opts_type_int64(Visitor *v, const char *name, int64_t *obj, Error **errp)
365 {
366 OptsVisitor *ov = to_ov(v);
367 const QemuOpt *opt;
368 const char *str;
369 long long val;
370 char *endptr;
371
372 if (ov->list_mode == LM_SIGNED_INTERVAL) {
373 *obj = ov->range_next.s;
374 return;
375 }
376
377 opt = lookup_scalar(ov, name, errp);
378 if (!opt) {
379 return;
380 }
381 str = opt->str ? opt->str : "";
382
383 /* we've gotten past lookup_scalar() */
384 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
385
386 errno = 0;
387 val = strtoll(str, &endptr, 0);
388 if (errno == 0 && endptr > str && INT64_MIN <= val && val <= INT64_MAX) {
389 if (*endptr == '\0') {
390 *obj = val;
391 processed(ov, name);
392 return;
393 }
394 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
395 long long val2;
396
397 str = endptr + 1;
398 val2 = strtoll(str, &endptr, 0);
399 if (errno == 0 && endptr > str && *endptr == '\0' &&
400 INT64_MIN <= val2 && val2 <= INT64_MAX && val <= val2 &&
401 (val > INT64_MAX - OPTS_VISITOR_RANGE_MAX ||
402 val2 < val + OPTS_VISITOR_RANGE_MAX)) {
403 ov->range_next.s = val;
404 ov->range_limit.s = val2;
405 ov->list_mode = LM_SIGNED_INTERVAL;
406
407 /* as if entering on the top */
408 *obj = ov->range_next.s;
409 return;
410 }
411 }
412 }
413 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
414 (ov->list_mode == LM_NONE) ? "an int64 value" :
415 "an int64 value or range");
416 }
417
418
419 static void
420 opts_type_uint64(Visitor *v, const char *name, uint64_t *obj, Error **errp)
421 {
422 OptsVisitor *ov = to_ov(v);
423 const QemuOpt *opt;
424 const char *str;
425 unsigned long long val;
426 char *endptr;
427
428 if (ov->list_mode == LM_UNSIGNED_INTERVAL) {
429 *obj = ov->range_next.u;
430 return;
431 }
432
433 opt = lookup_scalar(ov, name, errp);
434 if (!opt) {
435 return;
436 }
437 str = opt->str;
438
439 /* we've gotten past lookup_scalar() */
440 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
441
442 if (parse_uint(str, &val, &endptr, 0) == 0 && val <= UINT64_MAX) {
443 if (*endptr == '\0') {
444 *obj = val;
445 processed(ov, name);
446 return;
447 }
448 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
449 unsigned long long val2;
450
451 str = endptr + 1;
452 if (parse_uint_full(str, &val2, 0) == 0 &&
453 val2 <= UINT64_MAX && val <= val2 &&
454 val2 - val < OPTS_VISITOR_RANGE_MAX) {
455 ov->range_next.u = val;
456 ov->range_limit.u = val2;
457 ov->list_mode = LM_UNSIGNED_INTERVAL;
458
459 /* as if entering on the top */
460 *obj = ov->range_next.u;
461 return;
462 }
463 }
464 }
465 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
466 (ov->list_mode == LM_NONE) ? "a uint64 value" :
467 "a uint64 value or range");
468 }
469
470
471 static void
472 opts_type_size(Visitor *v, const char *name, uint64_t *obj, Error **errp)
473 {
474 OptsVisitor *ov = to_ov(v);
475 const QemuOpt *opt;
476 int64_t val;
477 char *endptr;
478
479 opt = lookup_scalar(ov, name, errp);
480 if (!opt) {
481 return;
482 }
483
484 val = qemu_strtosz_suffix(opt->str ? opt->str : "", &endptr,
485 QEMU_STRTOSZ_DEFSUFFIX_B);
486 if (val < 0 || *endptr) {
487 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
488 "a size value representible as a non-negative int64");
489 return;
490 }
491
492 *obj = val;
493 processed(ov, name);
494 }
495
496
497 static void
498 opts_optional(Visitor *v, const char *name, bool *present)
499 {
500 OptsVisitor *ov = to_ov(v);
501
502 /* we only support a single mandatory scalar field in a list node */
503 assert(ov->list_mode == LM_NONE);
504 *present = (lookup_distinct(ov, name, NULL) != NULL);
505 }
506
507
508 OptsVisitor *
509 opts_visitor_new(const QemuOpts *opts)
510 {
511 OptsVisitor *ov;
512
513 ov = g_malloc0(sizeof *ov);
514
515 ov->visitor.type = VISITOR_INPUT;
516
517 ov->visitor.start_struct = &opts_start_struct;
518 ov->visitor.end_struct = &opts_end_struct;
519
520 ov->visitor.start_list = &opts_start_list;
521 ov->visitor.next_list = &opts_next_list;
522 ov->visitor.end_list = &opts_end_list;
523
524 ov->visitor.type_int64 = &opts_type_int64;
525 ov->visitor.type_uint64 = &opts_type_uint64;
526 ov->visitor.type_size = &opts_type_size;
527 ov->visitor.type_bool = &opts_type_bool;
528 ov->visitor.type_str = &opts_type_str;
529
530 /* type_number() is not filled in, but this is not the first visitor to
531 * skip some mandatory methods... */
532
533 ov->visitor.optional = &opts_optional;
534
535 ov->opts_root = opts;
536
537 return ov;
538 }
539
540
541 void
542 opts_visitor_cleanup(OptsVisitor *ov)
543 {
544 if (ov->unprocessed_opts != NULL) {
545 g_hash_table_destroy(ov->unprocessed_opts);
546 }
547 g_free(ov->fake_id_opt);
548 g_free(ov);
549 }
550
551
552 Visitor *
553 opts_get_visitor(OptsVisitor *ov)
554 {
555 return &ov->visitor;
556 }