1 /*
2 * Options Visitor
3 *
4 * Copyright Red Hat, Inc. 2012-2016
5 *
6 * Author: Laszlo Ersek <lersek@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
9 * See the COPYING.LIB file in the top-level directory.
10 *
11 */
12
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "qemu/cutils.h"
16 #include "qapi/qmp/qerror.h"
17 #include "qapi/opts-visitor.h"
18 #include "qemu/queue.h"
19 #include "qemu/option_int.h"
20 #include "qapi/visitor-impl.h"
21
22
23 enum ListMode
24 {
25 LM_NONE, /* not traversing a list of repeated options */
26
27 LM_IN_PROGRESS, /*
28 * opts_next_list() ready to be called.
29 *
30 * Generating the next list link will consume the most
31 * recently parsed QemuOpt instance of the repeated
32 * option.
33 *
34 * Parsing a value into the list link will examine the
35 * next QemuOpt instance of the repeated option, and
36 * possibly enter LM_SIGNED_INTERVAL or
37 * LM_UNSIGNED_INTERVAL.
38 */
39
40 LM_SIGNED_INTERVAL, /*
41 * opts_next_list() has been called.
42 *
43 * Generating the next list link will consume the most
44 * recently stored element from the signed interval,
45 * parsed from the most recent QemuOpt instance of the
46 * repeated option. This may consume QemuOpt itself
47 * and return to LM_IN_PROGRESS.
48 *
49 * Parsing a value into the list link will store the
50 * next element of the signed interval.
51 */
52
53 LM_UNSIGNED_INTERVAL, /* Same as above, only for an unsigned interval. */
54
55 LM_TRAVERSED /*
56 * opts_next_list() has been called.
57 *
58 * No more QemuOpt instance in the list.
59 * The traversal has been completed.
60 */
61 };
62
63 typedef enum ListMode ListMode;
64
65 struct OptsVisitor
66 {
67 Visitor visitor;
68
69 /* Ownership remains with opts_visitor_new()'s caller. */
70 const QemuOpts *opts_root;
71
72 unsigned depth;
73
74 /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value
75 * is a non-empty GQueue, enumerating all QemuOpt occurrences with that
76 * name. */
77 GHashTable *unprocessed_opts;
78
79 /* The list currently being traversed with opts_start_list() /
80 * opts_next_list(). The list must have a struct element type in the
81 * schema, with a single mandatory scalar member. */
82 ListMode list_mode;
83 GQueue *repeated_opts;
84
85 /* When parsing a list of repeating options as integers, values of the form
86 * "a-b", representing a closed interval, are allowed. Elements in the
87 * range are generated individually.
88 */
89 union {
90 int64_t s;
91 uint64_t u;
92 } range_next, range_limit;
93
94 /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for
95 * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does
96 * not survive or escape the OptsVisitor object.
97 */
98 QemuOpt *fake_id_opt;
99 };
100
101
to_ov(Visitor * v)102 static OptsVisitor *to_ov(Visitor *v)
103 {
104 return container_of(v, OptsVisitor, visitor);
105 }
106
107
108 static void
destroy_list(gpointer list)109 destroy_list(gpointer list)
110 {
111 g_queue_free(list);
112 }
113
114
115 static void
opts_visitor_insert(GHashTable * unprocessed_opts,const QemuOpt * opt)116 opts_visitor_insert(GHashTable *unprocessed_opts, const QemuOpt *opt)
117 {
118 GQueue *list;
119
120 list = g_hash_table_lookup(unprocessed_opts, opt->name);
121 if (list == NULL) {
122 list = g_queue_new();
123
124 /* GHashTable will never try to free the keys -- we supply NULL as
125 * "key_destroy_func" in opts_start_struct(). Thus cast away key
126 * const-ness in order to suppress gcc's warning.
127 */
128 g_hash_table_insert(unprocessed_opts, (gpointer)opt->name, list);
129 }
130
131 /* Similarly, destroy_list() doesn't call g_queue_free_full(). */
132 g_queue_push_tail(list, (gpointer)opt);
133 }
134
135
136 static bool
opts_start_struct(Visitor * v,const char * name,void ** obj,size_t size,Error ** errp)137 opts_start_struct(Visitor *v, const char *name, void **obj,
138 size_t size, Error **errp)
139 {
140 OptsVisitor *ov = to_ov(v);
141 const QemuOpt *opt;
142
143 if (obj) {
144 *obj = g_malloc0(size);
145 }
146 if (ov->depth++ > 0) {
147 return true;
148 }
149
150 ov->unprocessed_opts = g_hash_table_new_full(&g_str_hash, &g_str_equal,
151 NULL, &destroy_list);
152 QTAILQ_FOREACH(opt, &ov->opts_root->head, next) {
153 /* ensured by qemu-option.c::opts_do_parse() */
154 assert(strcmp(opt->name, "id") != 0);
155
156 opts_visitor_insert(ov->unprocessed_opts, opt);
157 }
158
159 if (ov->opts_root->id != NULL) {
160 ov->fake_id_opt = g_malloc0(sizeof *ov->fake_id_opt);
161
162 ov->fake_id_opt->name = g_strdup("id");
163 ov->fake_id_opt->str = g_strdup(ov->opts_root->id);
164 opts_visitor_insert(ov->unprocessed_opts, ov->fake_id_opt);
165 }
166 return true;
167 }
168
169
170 static bool
opts_check_struct(Visitor * v,Error ** errp)171 opts_check_struct(Visitor *v, Error **errp)
172 {
173 OptsVisitor *ov = to_ov(v);
174 GHashTableIter iter;
175 GQueue *any;
176
177 if (ov->depth > 1) {
178 return true;
179 }
180
181 /* we should have processed all (distinct) QemuOpt instances */
182 g_hash_table_iter_init(&iter, ov->unprocessed_opts);
183 if (g_hash_table_iter_next(&iter, NULL, (void **)&any)) {
184 const QemuOpt *first;
185
186 first = g_queue_peek_head(any);
187 error_setg(errp, "Invalid parameter '%s'", first->name);
188 return false;
189 }
190 return true;
191 }
192
193
194 static void
opts_end_struct(Visitor * v,void ** obj)195 opts_end_struct(Visitor *v, void **obj)
196 {
197 OptsVisitor *ov = to_ov(v);
198
199 if (--ov->depth > 0) {
200 return;
201 }
202
203 g_hash_table_destroy(ov->unprocessed_opts);
204 ov->unprocessed_opts = NULL;
205 if (ov->fake_id_opt) {
206 g_free(ov->fake_id_opt->name);
207 g_free(ov->fake_id_opt->str);
208 g_free(ov->fake_id_opt);
209 }
210 ov->fake_id_opt = NULL;
211 }
212
213
214 static GQueue *
lookup_distinct(const OptsVisitor * ov,const char * name,Error ** errp)215 lookup_distinct(const OptsVisitor *ov, const char *name, Error **errp)
216 {
217 GQueue *list;
218
219 list = g_hash_table_lookup(ov->unprocessed_opts, name);
220 if (!list) {
221 error_setg(errp, QERR_MISSING_PARAMETER, name);
222 }
223 return list;
224 }
225
226
227 static bool
opts_start_list(Visitor * v,const char * name,GenericList ** list,size_t size,Error ** errp)228 opts_start_list(Visitor *v, const char *name, GenericList **list, size_t size,
229 Error **errp)
230 {
231 OptsVisitor *ov = to_ov(v);
232
233 /* we can't traverse a list in a list */
234 assert(ov->list_mode == LM_NONE);
235 /* we don't support visits without a list */
236 assert(list);
237 ov->repeated_opts = lookup_distinct(ov, name, errp);
238 if (!ov->repeated_opts) {
239 *list = NULL;
240 return false;
241 }
242 ov->list_mode = LM_IN_PROGRESS;
243 *list = g_malloc0(size);
244 return true;
245 }
246
247
248 static GenericList *
opts_next_list(Visitor * v,GenericList * tail,size_t size)249 opts_next_list(Visitor *v, GenericList *tail, size_t size)
250 {
251 OptsVisitor *ov = to_ov(v);
252
253 switch (ov->list_mode) {
254 case LM_TRAVERSED:
255 return NULL;
256 case LM_SIGNED_INTERVAL:
257 case LM_UNSIGNED_INTERVAL:
258 if (ov->list_mode == LM_SIGNED_INTERVAL) {
259 if (ov->range_next.s < ov->range_limit.s) {
260 ++ov->range_next.s;
261 break;
262 }
263 } else if (ov->range_next.u < ov->range_limit.u) {
264 ++ov->range_next.u;
265 break;
266 }
267 ov->list_mode = LM_IN_PROGRESS;
268 /* range has been completed, fall through in order to pop option */
269
270 case LM_IN_PROGRESS: {
271 const QemuOpt *opt;
272
273 opt = g_queue_pop_head(ov->repeated_opts);
274 if (g_queue_is_empty(ov->repeated_opts)) {
275 g_hash_table_remove(ov->unprocessed_opts, opt->name);
276 ov->repeated_opts = NULL;
277 ov->list_mode = LM_TRAVERSED;
278 return NULL;
279 }
280 break;
281 }
282
283 default:
284 abort();
285 }
286
287 tail->next = g_malloc0(size);
288 return tail->next;
289 }
290
291
292 static bool
opts_check_list(Visitor * v,Error ** errp)293 opts_check_list(Visitor *v, Error **errp)
294 {
295 /*
296 * Unvisited list elements will be reported later when checking
297 * whether unvisited struct members remain.
298 */
299 return true;
300 }
301
302
303 static void
opts_end_list(Visitor * v,void ** obj)304 opts_end_list(Visitor *v, void **obj)
305 {
306 OptsVisitor *ov = to_ov(v);
307
308 assert(ov->list_mode == LM_IN_PROGRESS ||
309 ov->list_mode == LM_SIGNED_INTERVAL ||
310 ov->list_mode == LM_UNSIGNED_INTERVAL ||
311 ov->list_mode == LM_TRAVERSED);
312 ov->repeated_opts = NULL;
313 ov->list_mode = LM_NONE;
314 }
315
316
317 static const QemuOpt *
lookup_scalar(const OptsVisitor * ov,const char * name,Error ** errp)318 lookup_scalar(const OptsVisitor *ov, const char *name, Error **errp)
319 {
320 if (ov->list_mode == LM_NONE) {
321 GQueue *list;
322
323 /* the last occurrence of any QemuOpt takes effect when queried by name
324 */
325 list = lookup_distinct(ov, name, errp);
326 return list ? g_queue_peek_tail(list) : NULL;
327 }
328 if (ov->list_mode == LM_TRAVERSED) {
329 error_setg(errp, "Fewer list elements than expected");
330 return NULL;
331 }
332 assert(ov->list_mode == LM_IN_PROGRESS);
333 return g_queue_peek_head(ov->repeated_opts);
334 }
335
336
337 static void
processed(OptsVisitor * ov,const char * name)338 processed(OptsVisitor *ov, const char *name)
339 {
340 if (ov->list_mode == LM_NONE) {
341 g_hash_table_remove(ov->unprocessed_opts, name);
342 return;
343 }
344 assert(ov->list_mode == LM_IN_PROGRESS);
345 /* do nothing */
346 }
347
348
349 static bool
opts_type_str(Visitor * v,const char * name,char ** obj,Error ** errp)350 opts_type_str(Visitor *v, const char *name, char **obj, Error **errp)
351 {
352 OptsVisitor *ov = to_ov(v);
353 const QemuOpt *opt;
354
355 opt = lookup_scalar(ov, name, errp);
356 if (!opt) {
357 *obj = NULL;
358 return false;
359 }
360 *obj = g_strdup(opt->str ? opt->str : "");
361 /* Note that we consume a string even if this is called as part of
362 * an enum visit that later fails because the string is not a
363 * valid enum value; this is harmless because tracking what gets
364 * consumed only matters to visit_end_struct() as the final error
365 * check if there were no other failures during the visit. */
366 processed(ov, name);
367 return true;
368 }
369
370
371 static bool
opts_type_bool(Visitor * v,const char * name,bool * obj,Error ** errp)372 opts_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
373 {
374 OptsVisitor *ov = to_ov(v);
375 const QemuOpt *opt;
376
377 opt = lookup_scalar(ov, name, errp);
378 if (!opt) {
379 return false;
380 }
381 if (opt->str) {
382 if (!qapi_bool_parse(opt->name, opt->str, obj, errp)) {
383 return false;
384 }
385 } else {
386 *obj = true;
387 }
388
389 processed(ov, name);
390 return true;
391 }
392
393
394 static bool
opts_type_int64(Visitor * v,const char * name,int64_t * obj,Error ** errp)395 opts_type_int64(Visitor *v, const char *name, int64_t *obj, Error **errp)
396 {
397 OptsVisitor *ov = to_ov(v);
398 const QemuOpt *opt;
399 const char *str;
400 long long val;
401 char *endptr;
402
403 if (ov->list_mode == LM_SIGNED_INTERVAL) {
404 *obj = ov->range_next.s;
405 return true;
406 }
407
408 opt = lookup_scalar(ov, name, errp);
409 if (!opt) {
410 return false;
411 }
412 str = opt->str ? opt->str : "";
413
414 /* we've gotten past lookup_scalar() */
415 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
416
417 errno = 0;
418 val = strtoll(str, &endptr, 0);
419 if (errno == 0 && endptr > str && INT64_MIN <= val && val <= INT64_MAX) {
420 if (*endptr == '\0') {
421 *obj = val;
422 processed(ov, name);
423 return true;
424 }
425 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
426 long long val2;
427
428 str = endptr + 1;
429 val2 = strtoll(str, &endptr, 0);
430 if (errno == 0 && endptr > str && *endptr == '\0' &&
431 INT64_MIN <= val2 && val2 <= INT64_MAX && val <= val2 &&
432 (val > INT64_MAX - OPTS_VISITOR_RANGE_MAX ||
433 val2 < val + OPTS_VISITOR_RANGE_MAX)) {
434 ov->range_next.s = val;
435 ov->range_limit.s = val2;
436 ov->list_mode = LM_SIGNED_INTERVAL;
437
438 /* as if entering on the top */
439 *obj = ov->range_next.s;
440 return true;
441 }
442 }
443 }
444 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
445 (ov->list_mode == LM_NONE) ? "an int64 value" :
446 "an int64 value or range");
447 return false;
448 }
449
450
451 static bool
opts_type_uint64(Visitor * v,const char * name,uint64_t * obj,Error ** errp)452 opts_type_uint64(Visitor *v, const char *name, uint64_t *obj, Error **errp)
453 {
454 OptsVisitor *ov = to_ov(v);
455 const QemuOpt *opt;
456 const char *str;
457 uint64_t val;
458 const char *endptr;
459
460 if (ov->list_mode == LM_UNSIGNED_INTERVAL) {
461 *obj = ov->range_next.u;
462 return true;
463 }
464
465 opt = lookup_scalar(ov, name, errp);
466 if (!opt) {
467 return false;
468 }
469 str = opt->str;
470
471 /* we've gotten past lookup_scalar() */
472 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
473
474 if (parse_uint(str, &endptr, 0, &val) == 0) {
475 if (*endptr == '\0') {
476 *obj = val;
477 processed(ov, name);
478 return true;
479 }
480 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
481 uint64_t val2;
482
483 str = endptr + 1;
484 if (parse_uint_full(str, 0, &val2) == 0 &&
485 val <= val2 &&
486 val2 - val < OPTS_VISITOR_RANGE_MAX) {
487 ov->range_next.u = val;
488 ov->range_limit.u = val2;
489 ov->list_mode = LM_UNSIGNED_INTERVAL;
490
491 /* as if entering on the top */
492 *obj = ov->range_next.u;
493 return true;
494 }
495 }
496 }
497 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
498 (ov->list_mode == LM_NONE) ? "a uint64 value" :
499 "a uint64 value or range");
500 return false;
501 }
502
503
504 static bool
opts_type_size(Visitor * v,const char * name,uint64_t * obj,Error ** errp)505 opts_type_size(Visitor *v, const char *name, uint64_t *obj, Error **errp)
506 {
507 OptsVisitor *ov = to_ov(v);
508 const QemuOpt *opt;
509 int err;
510
511 opt = lookup_scalar(ov, name, errp);
512 if (!opt) {
513 return false;
514 }
515
516 err = qemu_strtosz(opt->str ? opt->str : "", NULL, obj);
517 if (err < 0) {
518 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
519 "a size value");
520 return false;
521 }
522
523 processed(ov, name);
524 return true;
525 }
526
527
528 static void
opts_optional(Visitor * v,const char * name,bool * present)529 opts_optional(Visitor *v, const char *name, bool *present)
530 {
531 OptsVisitor *ov = to_ov(v);
532
533 /* we only support a single mandatory scalar field in a list node */
534 assert(ov->list_mode == LM_NONE);
535 *present = (lookup_distinct(ov, name, NULL) != NULL);
536 }
537
538
539 static void
opts_free(Visitor * v)540 opts_free(Visitor *v)
541 {
542 OptsVisitor *ov = to_ov(v);
543
544 if (ov->unprocessed_opts != NULL) {
545 g_hash_table_destroy(ov->unprocessed_opts);
546 }
547 g_free(ov->fake_id_opt);
548 g_free(ov);
549 }
550
551
552 Visitor *
opts_visitor_new(const QemuOpts * opts)553 opts_visitor_new(const QemuOpts *opts)
554 {
555 OptsVisitor *ov;
556
557 assert(opts);
558 ov = g_malloc0(sizeof *ov);
559
560 ov->visitor.type = VISITOR_INPUT;
561
562 ov->visitor.start_struct = &opts_start_struct;
563 ov->visitor.check_struct = &opts_check_struct;
564 ov->visitor.end_struct = &opts_end_struct;
565
566 ov->visitor.start_list = &opts_start_list;
567 ov->visitor.next_list = &opts_next_list;
568 ov->visitor.check_list = &opts_check_list;
569 ov->visitor.end_list = &opts_end_list;
570
571 ov->visitor.type_int64 = &opts_type_int64;
572 ov->visitor.type_uint64 = &opts_type_uint64;
573 ov->visitor.type_size = &opts_type_size;
574 ov->visitor.type_bool = &opts_type_bool;
575 ov->visitor.type_str = &opts_type_str;
576
577 /* type_number() is not filled in, but this is not the first visitor to
578 * skip some mandatory methods... */
579
580 ov->visitor.optional = &opts_optional;
581 ov->visitor.free = opts_free;
582
583 ov->opts_root = opts;
584
585 return &ov->visitor;
586 }
587