]> git.proxmox.com Git - mirror_qemu.git/blob - qapi/opts-visitor.c
Merge remote-tracking branch 'remotes/thibault/tags/samuel-thibault' into staging
[mirror_qemu.git] / qapi / opts-visitor.c
1 /*
2 * Options Visitor
3 *
4 * Copyright Red Hat, Inc. 2012-2016
5 *
6 * Author: Laszlo Ersek <lersek@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
9 * See the COPYING.LIB file in the top-level directory.
10 *
11 */
12
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "qemu/cutils.h"
16 #include "qapi/qmp/qerror.h"
17 #include "qapi/opts-visitor.h"
18 #include "qemu/queue.h"
19 #include "qemu/option_int.h"
20 #include "qapi/visitor-impl.h"
21
22
23 enum ListMode
24 {
25 LM_NONE, /* not traversing a list of repeated options */
26
27 LM_IN_PROGRESS, /*
28 * opts_next_list() ready to be called.
29 *
30 * Generating the next list link will consume the most
31 * recently parsed QemuOpt instance of the repeated
32 * option.
33 *
34 * Parsing a value into the list link will examine the
35 * next QemuOpt instance of the repeated option, and
36 * possibly enter LM_SIGNED_INTERVAL or
37 * LM_UNSIGNED_INTERVAL.
38 */
39
40 LM_SIGNED_INTERVAL, /*
41 * opts_next_list() has been called.
42 *
43 * Generating the next list link will consume the most
44 * recently stored element from the signed interval,
45 * parsed from the most recent QemuOpt instance of the
46 * repeated option. This may consume QemuOpt itself
47 * and return to LM_IN_PROGRESS.
48 *
49 * Parsing a value into the list link will store the
50 * next element of the signed interval.
51 */
52
53 LM_UNSIGNED_INTERVAL, /* Same as above, only for an unsigned interval. */
54
55 LM_TRAVERSED /*
56 * opts_next_list() has been called.
57 *
58 * No more QemuOpt instance in the list.
59 * The traversal has been completed.
60 */
61 };
62
63 typedef enum ListMode ListMode;
64
65 struct OptsVisitor
66 {
67 Visitor visitor;
68
69 /* Ownership remains with opts_visitor_new()'s caller. */
70 const QemuOpts *opts_root;
71
72 unsigned depth;
73
74 /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value
75 * is a non-empty GQueue, enumerating all QemuOpt occurrences with that
76 * name. */
77 GHashTable *unprocessed_opts;
78
79 /* The list currently being traversed with opts_start_list() /
80 * opts_next_list(). The list must have a struct element type in the
81 * schema, with a single mandatory scalar member. */
82 ListMode list_mode;
83 GQueue *repeated_opts;
84
85 /* When parsing a list of repeating options as integers, values of the form
86 * "a-b", representing a closed interval, are allowed. Elements in the
87 * range are generated individually.
88 */
89 union {
90 int64_t s;
91 uint64_t u;
92 } range_next, range_limit;
93
94 /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for
95 * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does
96 * not survive or escape the OptsVisitor object.
97 */
98 QemuOpt *fake_id_opt;
99 };
100
101
102 static OptsVisitor *to_ov(Visitor *v)
103 {
104 return container_of(v, OptsVisitor, visitor);
105 }
106
107
108 static void
109 destroy_list(gpointer list)
110 {
111 g_queue_free(list);
112 }
113
114
115 static void
116 opts_visitor_insert(GHashTable *unprocessed_opts, const QemuOpt *opt)
117 {
118 GQueue *list;
119
120 list = g_hash_table_lookup(unprocessed_opts, opt->name);
121 if (list == NULL) {
122 list = g_queue_new();
123
124 /* GHashTable will never try to free the keys -- we supply NULL as
125 * "key_destroy_func" in opts_start_struct(). Thus cast away key
126 * const-ness in order to suppress gcc's warning.
127 */
128 g_hash_table_insert(unprocessed_opts, (gpointer)opt->name, list);
129 }
130
131 /* Similarly, destroy_list() doesn't call g_queue_free_full(). */
132 g_queue_push_tail(list, (gpointer)opt);
133 }
134
135
136 static void
137 opts_start_struct(Visitor *v, const char *name, void **obj,
138 size_t size, Error **errp)
139 {
140 OptsVisitor *ov = to_ov(v);
141 const QemuOpt *opt;
142
143 if (obj) {
144 *obj = g_malloc0(size);
145 }
146 if (ov->depth++ > 0) {
147 return;
148 }
149
150 ov->unprocessed_opts = g_hash_table_new_full(&g_str_hash, &g_str_equal,
151 NULL, &destroy_list);
152 QTAILQ_FOREACH(opt, &ov->opts_root->head, next) {
153 /* ensured by qemu-option.c::opts_do_parse() */
154 assert(strcmp(opt->name, "id") != 0);
155
156 opts_visitor_insert(ov->unprocessed_opts, opt);
157 }
158
159 if (ov->opts_root->id != NULL) {
160 ov->fake_id_opt = g_malloc0(sizeof *ov->fake_id_opt);
161
162 ov->fake_id_opt->name = g_strdup("id");
163 ov->fake_id_opt->str = g_strdup(ov->opts_root->id);
164 opts_visitor_insert(ov->unprocessed_opts, ov->fake_id_opt);
165 }
166 }
167
168
169 static void
170 opts_check_struct(Visitor *v, Error **errp)
171 {
172 OptsVisitor *ov = to_ov(v);
173 GHashTableIter iter;
174 GQueue *any;
175
176 if (ov->depth > 1) {
177 return;
178 }
179
180 /* we should have processed all (distinct) QemuOpt instances */
181 g_hash_table_iter_init(&iter, ov->unprocessed_opts);
182 if (g_hash_table_iter_next(&iter, NULL, (void **)&any)) {
183 const QemuOpt *first;
184
185 first = g_queue_peek_head(any);
186 error_setg(errp, QERR_INVALID_PARAMETER, first->name);
187 }
188 }
189
190
191 static void
192 opts_end_struct(Visitor *v, void **obj)
193 {
194 OptsVisitor *ov = to_ov(v);
195
196 if (--ov->depth > 0) {
197 return;
198 }
199
200 g_hash_table_destroy(ov->unprocessed_opts);
201 ov->unprocessed_opts = NULL;
202 if (ov->fake_id_opt) {
203 g_free(ov->fake_id_opt->name);
204 g_free(ov->fake_id_opt->str);
205 g_free(ov->fake_id_opt);
206 }
207 ov->fake_id_opt = NULL;
208 }
209
210
211 static GQueue *
212 lookup_distinct(const OptsVisitor *ov, const char *name, Error **errp)
213 {
214 GQueue *list;
215
216 list = g_hash_table_lookup(ov->unprocessed_opts, name);
217 if (!list) {
218 error_setg(errp, QERR_MISSING_PARAMETER, name);
219 }
220 return list;
221 }
222
223
224 static void
225 opts_start_list(Visitor *v, const char *name, GenericList **list, size_t size,
226 Error **errp)
227 {
228 OptsVisitor *ov = to_ov(v);
229
230 /* we can't traverse a list in a list */
231 assert(ov->list_mode == LM_NONE);
232 /* we don't support visits without a list */
233 assert(list);
234 ov->repeated_opts = lookup_distinct(ov, name, errp);
235 if (ov->repeated_opts) {
236 ov->list_mode = LM_IN_PROGRESS;
237 *list = g_malloc0(size);
238 } else {
239 *list = NULL;
240 }
241 }
242
243
244 static GenericList *
245 opts_next_list(Visitor *v, GenericList *tail, size_t size)
246 {
247 OptsVisitor *ov = to_ov(v);
248
249 switch (ov->list_mode) {
250 case LM_TRAVERSED:
251 return NULL;
252 case LM_SIGNED_INTERVAL:
253 case LM_UNSIGNED_INTERVAL:
254 if (ov->list_mode == LM_SIGNED_INTERVAL) {
255 if (ov->range_next.s < ov->range_limit.s) {
256 ++ov->range_next.s;
257 break;
258 }
259 } else if (ov->range_next.u < ov->range_limit.u) {
260 ++ov->range_next.u;
261 break;
262 }
263 ov->list_mode = LM_IN_PROGRESS;
264 /* range has been completed, fall through in order to pop option */
265
266 case LM_IN_PROGRESS: {
267 const QemuOpt *opt;
268
269 opt = g_queue_pop_head(ov->repeated_opts);
270 if (g_queue_is_empty(ov->repeated_opts)) {
271 g_hash_table_remove(ov->unprocessed_opts, opt->name);
272 ov->repeated_opts = NULL;
273 ov->list_mode = LM_TRAVERSED;
274 return NULL;
275 }
276 break;
277 }
278
279 default:
280 abort();
281 }
282
283 tail->next = g_malloc0(size);
284 return tail->next;
285 }
286
287
288 static void
289 opts_check_list(Visitor *v, Error **errp)
290 {
291 /*
292 * Unvisited list elements will be reported later when checking
293 * whether unvisited struct members remain.
294 */
295 }
296
297
298 static void
299 opts_end_list(Visitor *v, void **obj)
300 {
301 OptsVisitor *ov = to_ov(v);
302
303 assert(ov->list_mode == LM_IN_PROGRESS ||
304 ov->list_mode == LM_SIGNED_INTERVAL ||
305 ov->list_mode == LM_UNSIGNED_INTERVAL ||
306 ov->list_mode == LM_TRAVERSED);
307 ov->repeated_opts = NULL;
308 ov->list_mode = LM_NONE;
309 }
310
311
312 static const QemuOpt *
313 lookup_scalar(const OptsVisitor *ov, const char *name, Error **errp)
314 {
315 if (ov->list_mode == LM_NONE) {
316 GQueue *list;
317
318 /* the last occurrence of any QemuOpt takes effect when queried by name
319 */
320 list = lookup_distinct(ov, name, errp);
321 return list ? g_queue_peek_tail(list) : NULL;
322 }
323 if (ov->list_mode == LM_TRAVERSED) {
324 error_setg(errp, "Fewer list elements than expected");
325 return NULL;
326 }
327 assert(ov->list_mode == LM_IN_PROGRESS);
328 return g_queue_peek_head(ov->repeated_opts);
329 }
330
331
332 static void
333 processed(OptsVisitor *ov, const char *name)
334 {
335 if (ov->list_mode == LM_NONE) {
336 g_hash_table_remove(ov->unprocessed_opts, name);
337 return;
338 }
339 assert(ov->list_mode == LM_IN_PROGRESS);
340 /* do nothing */
341 }
342
343
344 static void
345 opts_type_str(Visitor *v, const char *name, char **obj, Error **errp)
346 {
347 OptsVisitor *ov = to_ov(v);
348 const QemuOpt *opt;
349
350 opt = lookup_scalar(ov, name, errp);
351 if (!opt) {
352 *obj = NULL;
353 return;
354 }
355 *obj = g_strdup(opt->str ? opt->str : "");
356 /* Note that we consume a string even if this is called as part of
357 * an enum visit that later fails because the string is not a
358 * valid enum value; this is harmless because tracking what gets
359 * consumed only matters to visit_end_struct() as the final error
360 * check if there were no other failures during the visit. */
361 processed(ov, name);
362 }
363
364
365 /* mimics qemu-option.c::parse_option_bool() */
366 static void
367 opts_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
368 {
369 OptsVisitor *ov = to_ov(v);
370 const QemuOpt *opt;
371
372 opt = lookup_scalar(ov, name, errp);
373 if (!opt) {
374 return;
375 }
376
377 if (opt->str) {
378 if (strcmp(opt->str, "on") == 0 ||
379 strcmp(opt->str, "yes") == 0 ||
380 strcmp(opt->str, "y") == 0) {
381 *obj = true;
382 } else if (strcmp(opt->str, "off") == 0 ||
383 strcmp(opt->str, "no") == 0 ||
384 strcmp(opt->str, "n") == 0) {
385 *obj = false;
386 } else {
387 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
388 "on|yes|y|off|no|n");
389 return;
390 }
391 } else {
392 *obj = true;
393 }
394
395 processed(ov, name);
396 }
397
398
399 static void
400 opts_type_int64(Visitor *v, const char *name, int64_t *obj, Error **errp)
401 {
402 OptsVisitor *ov = to_ov(v);
403 const QemuOpt *opt;
404 const char *str;
405 long long val;
406 char *endptr;
407
408 if (ov->list_mode == LM_SIGNED_INTERVAL) {
409 *obj = ov->range_next.s;
410 return;
411 }
412
413 opt = lookup_scalar(ov, name, errp);
414 if (!opt) {
415 return;
416 }
417 str = opt->str ? opt->str : "";
418
419 /* we've gotten past lookup_scalar() */
420 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
421
422 errno = 0;
423 val = strtoll(str, &endptr, 0);
424 if (errno == 0 && endptr > str && INT64_MIN <= val && val <= INT64_MAX) {
425 if (*endptr == '\0') {
426 *obj = val;
427 processed(ov, name);
428 return;
429 }
430 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
431 long long val2;
432
433 str = endptr + 1;
434 val2 = strtoll(str, &endptr, 0);
435 if (errno == 0 && endptr > str && *endptr == '\0' &&
436 INT64_MIN <= val2 && val2 <= INT64_MAX && val <= val2 &&
437 (val > INT64_MAX - OPTS_VISITOR_RANGE_MAX ||
438 val2 < val + OPTS_VISITOR_RANGE_MAX)) {
439 ov->range_next.s = val;
440 ov->range_limit.s = val2;
441 ov->list_mode = LM_SIGNED_INTERVAL;
442
443 /* as if entering on the top */
444 *obj = ov->range_next.s;
445 return;
446 }
447 }
448 }
449 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
450 (ov->list_mode == LM_NONE) ? "an int64 value" :
451 "an int64 value or range");
452 }
453
454
455 static void
456 opts_type_uint64(Visitor *v, const char *name, uint64_t *obj, Error **errp)
457 {
458 OptsVisitor *ov = to_ov(v);
459 const QemuOpt *opt;
460 const char *str;
461 unsigned long long val;
462 char *endptr;
463
464 if (ov->list_mode == LM_UNSIGNED_INTERVAL) {
465 *obj = ov->range_next.u;
466 return;
467 }
468
469 opt = lookup_scalar(ov, name, errp);
470 if (!opt) {
471 return;
472 }
473 str = opt->str;
474
475 /* we've gotten past lookup_scalar() */
476 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
477
478 if (parse_uint(str, &val, &endptr, 0) == 0 && val <= UINT64_MAX) {
479 if (*endptr == '\0') {
480 *obj = val;
481 processed(ov, name);
482 return;
483 }
484 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
485 unsigned long long val2;
486
487 str = endptr + 1;
488 if (parse_uint_full(str, &val2, 0) == 0 &&
489 val2 <= UINT64_MAX && val <= val2 &&
490 val2 - val < OPTS_VISITOR_RANGE_MAX) {
491 ov->range_next.u = val;
492 ov->range_limit.u = val2;
493 ov->list_mode = LM_UNSIGNED_INTERVAL;
494
495 /* as if entering on the top */
496 *obj = ov->range_next.u;
497 return;
498 }
499 }
500 }
501 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
502 (ov->list_mode == LM_NONE) ? "a uint64 value" :
503 "a uint64 value or range");
504 }
505
506
507 static void
508 opts_type_size(Visitor *v, const char *name, uint64_t *obj, Error **errp)
509 {
510 OptsVisitor *ov = to_ov(v);
511 const QemuOpt *opt;
512 int err;
513
514 opt = lookup_scalar(ov, name, errp);
515 if (!opt) {
516 return;
517 }
518
519 err = qemu_strtosz(opt->str ? opt->str : "", NULL, obj);
520 if (err < 0) {
521 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
522 "a size value");
523 return;
524 }
525
526 processed(ov, name);
527 }
528
529
530 static void
531 opts_optional(Visitor *v, const char *name, bool *present)
532 {
533 OptsVisitor *ov = to_ov(v);
534
535 /* we only support a single mandatory scalar field in a list node */
536 assert(ov->list_mode == LM_NONE);
537 *present = (lookup_distinct(ov, name, NULL) != NULL);
538 }
539
540
541 static void
542 opts_free(Visitor *v)
543 {
544 OptsVisitor *ov = to_ov(v);
545
546 if (ov->unprocessed_opts != NULL) {
547 g_hash_table_destroy(ov->unprocessed_opts);
548 }
549 g_free(ov->fake_id_opt);
550 g_free(ov);
551 }
552
553
554 Visitor *
555 opts_visitor_new(const QemuOpts *opts)
556 {
557 OptsVisitor *ov;
558
559 assert(opts);
560 ov = g_malloc0(sizeof *ov);
561
562 ov->visitor.type = VISITOR_INPUT;
563
564 ov->visitor.start_struct = &opts_start_struct;
565 ov->visitor.check_struct = &opts_check_struct;
566 ov->visitor.end_struct = &opts_end_struct;
567
568 ov->visitor.start_list = &opts_start_list;
569 ov->visitor.next_list = &opts_next_list;
570 ov->visitor.check_list = &opts_check_list;
571 ov->visitor.end_list = &opts_end_list;
572
573 ov->visitor.type_int64 = &opts_type_int64;
574 ov->visitor.type_uint64 = &opts_type_uint64;
575 ov->visitor.type_size = &opts_type_size;
576 ov->visitor.type_bool = &opts_type_bool;
577 ov->visitor.type_str = &opts_type_str;
578
579 /* type_number() is not filled in, but this is not the first visitor to
580 * skip some mandatory methods... */
581
582 ov->visitor.optional = &opts_optional;
583 ov->visitor.free = opts_free;
584
585 ov->opts_root = opts;
586
587 return &ov->visitor;
588 }