1 /**
2 * @file printer_lyb.c
3 * @author Michal Vasko <mvasko@cesnet.cz>
4 * @brief LYB printer for libyang data structure
5 *
6 * Copyright (c) 2018 CESNET, z.s.p.o.
7 *
8 * This source code is licensed under BSD 3-Clause License (the "License").
9 * You may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * https://opensource.org/licenses/BSD-3-Clause
13 */
14
15 #include <stdlib.h>
16 #include <stdio.h>
17 #include <stdint.h>
18 #include <string.h>
19 #include <assert.h>
20 #include <stdint.h>
21
22 #include "common.h"
23 #include "printer.h"
24 #include "tree_schema.h"
25 #include "tree_data.h"
26 #include "resolve.h"
27 #include "tree_internal.h"
28
29 static int
lyb_hash_equal_cb(void * UNUSED (val1_p),void * UNUSED (val2_p),int UNUSED (mod),void * UNUSED (cb_data))30 lyb_hash_equal_cb(void *UNUSED(val1_p), void *UNUSED(val2_p), int UNUSED(mod), void *UNUSED(cb_data))
31 {
32 /* for this purpose, if hash matches, the value does also, we do not want 2 values to have the same hash */
33 return 1;
34 }
35
36 static int
lyb_ptr_equal_cb(void * val1_p,void * val2_p,int UNUSED (mod),void * UNUSED (cb_data))37 lyb_ptr_equal_cb(void *val1_p, void *val2_p, int UNUSED(mod), void *UNUSED(cb_data))
38 {
39 struct lys_node *val1 = *(struct lys_node **)val1_p;
40 struct lys_node *val2 = *(struct lys_node **)val2_p;
41
42 if (val1 == val2) {
43 return 1;
44 }
45 return 0;
46 }
47
48 /* check that sibling collision hash i is safe to insert into ht
49 * return: 0 - no whole hash sequence collision, 1 - whole hash sequence collision, -1 - fatal error
50 */
51 static int
lyb_hash_sequence_check(struct hash_table * ht,struct lys_node * sibling,int ht_col_id,int compare_col_id)52 lyb_hash_sequence_check(struct hash_table *ht, struct lys_node *sibling, int ht_col_id, int compare_col_id)
53 {
54 int j;
55 struct lys_node **col_node;
56
57 /* get the first node inserted with last hash col ID ht_col_id */
58 if (lyht_find(ht, &sibling, lyb_hash(sibling, ht_col_id), (void **)&col_node)) {
59 /* there is none. valid situation */
60 return 0;
61 }
62
63 lyht_set_cb(ht, lyb_ptr_equal_cb);
64 do {
65 for (j = compare_col_id; j > -1; --j) {
66 if (lyb_hash(sibling, j) != lyb_hash(*col_node, j)) {
67 /* one non-colliding hash */
68 break;
69 }
70 }
71 if (j == -1) {
72 /* all whole hash sequences of nodes inserted with last hash col ID compare_col_id collide */
73 lyht_set_cb(ht, lyb_hash_equal_cb);
74 return 1;
75 }
76
77 /* get next node inserted with last hash col ID ht_col_id */
78 } while (!lyht_find_next(ht, col_node, lyb_hash(*col_node, ht_col_id), (void **)&col_node));
79
80 lyht_set_cb(ht, lyb_hash_equal_cb);
81 return 0;
82 }
83
84 #ifndef NDEBUG
85
86 static int
lyb_check_augment_collision(struct hash_table * ht,struct lys_node * aug1,struct lys_node * aug2)87 lyb_check_augment_collision(struct hash_table *ht, struct lys_node *aug1, struct lys_node *aug2)
88 {
89 struct lys_node *iter1 = NULL, *iter2 = NULL;
90 int i, coliding = 0;
91 values_equal_cb cb = NULL;
92 LYB_HASH hash1, hash2;
93
94 /* go through combination of all nodes and check if coliding hash is used */
95 while ((iter1 = (struct lys_node *)lys_getnext(iter1, aug1, lys_node_module(aug1), 0))) {
96 iter2 = NULL;
97 while ((iter2 = (struct lys_node *)lys_getnext(iter2, aug2, lys_node_module(aug2), 0))) {
98 coliding = 0;
99 for (i = 0; i < LYB_HASH_BITS; i++) {
100 hash1 = lyb_hash(iter1, i);
101 hash2 = lyb_hash(iter2, i);
102 LY_CHECK_ERR_RETURN(!hash1 || !hash2, LOGINT(aug1->module->ctx), 0);
103
104 if (hash1 == hash2) {
105 coliding++;
106 /* if one of values with coliding hash is in hash table, we have a problem */
107 cb = lyht_set_cb(ht, lyb_ptr_equal_cb);
108 if ((lyht_find(ht, &iter1, hash1, NULL) == 0) || (lyht_find(ht, &iter2, hash2, NULL) == 0)) {
109 LOGWRN(aug1->module->ctx, "Augmentations from modules \"%s\" and \"%s\" have fatal hash collision.",
110 lys_node_module(iter1)->name, lys_node_module(iter2)->name);
111 LOGWRN(aug1->module->ctx, "It will cause no errors if module \"%s\" is always loaded before \"%s\".",
112 lys_node_module(iter1)->name, lys_node_module(iter2)->name);
113 lyht_set_cb(ht, cb);
114 return 1;
115 }
116 lyht_set_cb(ht, cb);
117 }
118 }
119 LY_CHECK_ERR_RETURN(coliding == LYB_HASH_BITS, LOGINT(aug1->module->ctx), 1);
120 }
121 }
122
123 /* no used hashes with collision found */
124 return 0;
125 }
126
127 static void
lyb_check_augments(struct lys_node * parent,struct hash_table * ht)128 lyb_check_augments(struct lys_node *parent, struct hash_table *ht)
129 {
130 struct lys_node *sibling = NULL, **augs = NULL;
131 void *ret;
132 int augs_size = 1, augs_found = 0, i, j, found;
133 struct lys_module *mod;
134
135 assert(parent);
136 mod = lys_node_module(parent);
137
138 augs = malloc(sizeof sibling * augs_size);
139 LY_CHECK_ERR_RETURN(!augs, LOGMEM(mod->ctx), );
140
141 while ((sibling = (struct lys_node *)lys_getnext(sibling, parent, NULL, 0))) {
142 /* build array of all augments from different modules */
143 if (sibling->parent->nodetype == LYS_AUGMENT && lys_node_module(sibling->parent) != mod) {
144 found = 0;
145 for (i = 0; i < augs_found; i++) {
146 if (lys_node_module(augs[i]) == lys_node_module(sibling)) {
147 found = 1;
148 break;
149 }
150 }
151 if (!found) {
152 if (augs_size == augs_found) {
153 augs_size *= 2;
154 ret = realloc(augs, sizeof sibling * augs_size);
155 if (!ret) {
156 LOGMEM(mod->ctx);
157 free(augs);
158 return;
159 }
160 augs = ret;
161 }
162 augs[augs_found] = sibling;
163 augs_found++;
164 }
165 }
166 }
167 /* check collisions for every pair */
168 for (i = 0; i < augs_found; i++) {
169 for (j = i + 1; j < augs_found; j++) {
170 if (lyb_check_augment_collision(ht, augs[i]->parent, augs[j]->parent)) {
171 free(augs);
172 return;
173 }
174 }
175 }
176 free(augs);
177 return;
178 }
179
180 #endif
181
182 static struct hash_table *
lyb_hash_siblings(struct lys_node * sibling,const struct lys_module ** models,int mod_count)183 lyb_hash_siblings(struct lys_node *sibling, const struct lys_module **models, int mod_count)
184 {
185 struct hash_table *ht;
186 struct lys_node *parent;
187 const struct lys_module *mod;
188 int i, j;
189 #ifndef NDEBUG
190 int aug_col = 0;
191 const struct lys_module *aug_mod = NULL;
192 #endif
193
194 ht = lyht_new(1, sizeof(struct lys_node *), lyb_hash_equal_cb, NULL, 1);
195 LY_CHECK_ERR_RETURN(!ht, LOGMEM(sibling->module->ctx), NULL);
196
197 for (parent = lys_parent(sibling);
198 parent && (parent->nodetype & (LYS_USES | LYS_CHOICE | LYS_CASE));
199 parent = lys_parent(parent));
200 mod = lys_node_module(sibling);
201
202 sibling = NULL;
203 /* ignore features so that their state does not affect hashes */
204 while ((sibling = (struct lys_node *)lys_getnext(sibling, parent, mod, LYS_GETNEXT_NOSTATECHECK))) {
205 if (models && !lyb_has_schema_model(sibling, models, mod_count)) {
206 /* ignore models not present during printing */
207 continue;
208 }
209
210 #ifndef NDEBUG
211 if (sibling->parent && sibling->parent->nodetype == LYS_AUGMENT && lys_node_module(sibling->parent) != mod) {
212 if (aug_mod && aug_mod != lys_node_module(sibling->parent)) {
213 aug_col = 1;
214 }
215 aug_mod = lys_node_module(sibling);
216 }
217 #endif
218
219 /* find the first non-colliding hash (or specifically non-colliding hash sequence) */
220 for (i = 0; i < LYB_HASH_BITS; ++i) {
221 /* check that we are not colliding with nodes inserted with a lower collision ID than ours */
222 for (j = i - 1; j > -1; --j) {
223 if (lyb_hash_sequence_check(ht, sibling, j, i)) {
224 break;
225 }
226 }
227 if (j > -1) {
228 /* some check failed, we must use a higher collision ID */
229 continue;
230 }
231
232 /* try to insert node with the current collision ID */
233 if (!lyht_insert_with_resize_cb(ht, &sibling, lyb_hash(sibling, i), lyb_ptr_equal_cb, NULL)) {
234 /* success, no collision */
235 break;
236 }
237
238 /* make sure we really cannot insert it with this hash col ID (meaning the whole hash sequence is colliding) */
239 if (i && !lyb_hash_sequence_check(ht, sibling, i, i)) {
240 /* it can be inserted after all, even though there is already a node with the same last collision ID */
241 lyht_set_cb(ht, lyb_ptr_equal_cb);
242 if (lyht_insert(ht, &sibling, lyb_hash(sibling, i), NULL)) {
243 lyht_set_cb(ht, lyb_hash_equal_cb);
244 LOGINT(sibling->module->ctx);
245 lyht_free(ht);
246 return NULL;
247 }
248 lyht_set_cb(ht, lyb_hash_equal_cb);
249 break;
250 }
251 /* there is still another colliding schema node with the same hash sequence, try higher collision ID */
252 }
253
254 if (i == LYB_HASH_BITS) {
255 /* wow */
256 LOGINT(sibling->module->ctx);
257 lyht_free(ht);
258 return NULL;
259 }
260 }
261
262 #ifndef NDEBUG
263 if (aug_col) {
264 lyb_check_augments(parent, ht);
265 }
266 #endif
267
268 /* change val equal callback so that the HT is usable for finding value hashes */
269 lyht_set_cb(ht, lyb_ptr_equal_cb);
270
271 return ht;
272 }
273
274 static LYB_HASH
lyb_hash_find(struct hash_table * ht,struct lys_node * node)275 lyb_hash_find(struct hash_table *ht, struct lys_node *node)
276 {
277 LYB_HASH hash;
278 uint32_t i;
279
280 for (i = 0; i < LYB_HASH_BITS; ++i) {
281 hash = lyb_hash(node, i);
282 if (!hash) {
283 LOGINT(node->module->ctx);
284 return 0;
285 }
286
287 if (!lyht_find(ht, &node, hash, NULL)) {
288 /* success, no collision */
289 break;
290 }
291 }
292 /* cannot happen, we already calculated the hash */
293 if (i == LYB_HASH_BITS) {
294 LOGINT(node->module->ctx);
295 return 0;
296 }
297
298 return hash;
299 }
300
301 /* writing function handles writing size information */
302 static int
lyb_write(struct lyout * out,const uint8_t * buf,size_t count,struct lyb_state * lybs)303 lyb_write(struct lyout *out, const uint8_t *buf, size_t count, struct lyb_state *lybs)
304 {
305 int ret = 0, i, full_chunk_i;
306 size_t r, to_write;
307 uint8_t meta_buf[LYB_META_BYTES];
308
309 assert(out && lybs);
310
311 while (1) {
312 /* check for full data chunks */
313 to_write = count;
314 full_chunk_i = -1;
315 for (i = 0; i < lybs->used; ++i) {
316 /* we want the innermost chunks resolved first, so replace previous full chunks */
317 if (lybs->written[i] + to_write >= LYB_SIZE_MAX) {
318 /* full chunk, do not write more than allowed */
319 to_write = LYB_SIZE_MAX - lybs->written[i];
320 full_chunk_i = i;
321 }
322 }
323
324 if ((full_chunk_i == -1) && !count) {
325 break;
326 }
327
328 /* we are actually writing some data, not just finishing another chunk */
329 if (to_write) {
330 r = ly_write(out, (char *)buf, to_write);
331 if (r < to_write) {
332 return -1;
333 }
334
335 for (i = 0; i < lybs->used; ++i) {
336 /* increase all written counters */
337 lybs->written[i] += r;
338 assert(lybs->written[i] <= LYB_SIZE_MAX);
339 }
340 /* decrease count/buf */
341 count -= r;
342 buf += r;
343
344 ret += r;
345 }
346
347 if (full_chunk_i > -1) {
348 /* write the meta information (inner chunk count and chunk size) */
349 meta_buf[0] = lybs->written[full_chunk_i] & 0xFF;
350 meta_buf[1] = lybs->inner_chunks[full_chunk_i] & 0xFF;
351
352 r = ly_write_skipped(out, lybs->position[full_chunk_i], (char *)meta_buf, LYB_META_BYTES);
353 if (r < LYB_META_BYTES) {
354 return -1;
355 }
356
357 /* zero written and inner chunks */
358 lybs->written[full_chunk_i] = 0;
359 lybs->inner_chunks[full_chunk_i] = 0;
360
361 /* skip space for another chunk size */
362 r = ly_write_skip(out, LYB_META_BYTES, &lybs->position[full_chunk_i]);
363 if (r < LYB_META_BYTES) {
364 return -1;
365 }
366
367 ret += r;
368
369 /* increase inner chunk count */
370 for (i = 0; i < full_chunk_i; ++i) {
371 if (lybs->inner_chunks[i] == LYB_INCHUNK_MAX) {
372 LOGINT(lybs->ctx);
373 return -1;
374 }
375 ++lybs->inner_chunks[i];
376 }
377 }
378 }
379
380 return ret;
381 }
382
383 static int
lyb_write_stop_subtree(struct lyout * out,struct lyb_state * lybs)384 lyb_write_stop_subtree(struct lyout *out, struct lyb_state *lybs)
385 {
386 int r;
387 uint8_t meta_buf[LYB_META_BYTES];
388
389 /* write the meta chunk information */
390 meta_buf[0] = lybs->written[lybs->used - 1] & 0xFF;
391 meta_buf[1] = lybs->inner_chunks[lybs->used - 1] & 0xFF;
392
393 r = ly_write_skipped(out, lybs->position[lybs->used - 1], (char *)&meta_buf, LYB_META_BYTES);
394 if (r < LYB_META_BYTES) {
395 return -1;
396 }
397
398 --lybs->used;
399 return 0;
400 }
401
402 static int
lyb_write_start_subtree(struct lyout * out,struct lyb_state * lybs)403 lyb_write_start_subtree(struct lyout *out, struct lyb_state *lybs)
404 {
405 int i;
406
407 if (lybs->used == lybs->size) {
408 lybs->size += LYB_STATE_STEP;
409 lybs->written = ly_realloc(lybs->written, lybs->size * sizeof *lybs->written);
410 lybs->position = ly_realloc(lybs->position, lybs->size * sizeof *lybs->position);
411 lybs->inner_chunks = ly_realloc(lybs->inner_chunks, lybs->size * sizeof *lybs->inner_chunks);
412 LY_CHECK_ERR_RETURN(!lybs->written || !lybs->position || !lybs->inner_chunks, LOGMEM(lybs->ctx), -1);
413 }
414
415 ++lybs->used;
416 lybs->written[lybs->used - 1] = 0;
417 lybs->inner_chunks[lybs->used - 1] = 0;
418
419 /* another inner chunk */
420 for (i = 0; i < lybs->used - 1; ++i) {
421 if (lybs->inner_chunks[i] == LYB_INCHUNK_MAX) {
422 LOGINT(lybs->ctx);
423 return -1;
424 }
425 ++lybs->inner_chunks[i];
426 }
427
428 return ly_write_skip(out, LYB_META_BYTES, &lybs->position[lybs->used - 1]);
429 }
430
431 static int
lyb_write_number(uint64_t num,size_t bytes,struct lyout * out,struct lyb_state * lybs)432 lyb_write_number(uint64_t num, size_t bytes, struct lyout *out, struct lyb_state *lybs)
433 {
434 /* correct byte order */
435 num = htole64(num);
436
437 return lyb_write(out, (uint8_t *)&num, bytes, lybs);
438 }
439
440 static int
lyb_write_enum(uint32_t enum_idx,uint32_t count,struct lyout * out,struct lyb_state * lybs)441 lyb_write_enum(uint32_t enum_idx, uint32_t count, struct lyout *out, struct lyb_state *lybs)
442 {
443 size_t bytes;
444
445 assert(enum_idx < count);
446
447 if (count < (1 << 8)) {
448 bytes = 1;
449 } else if (count < (1 << 16)) {
450 bytes = 2;
451 } else if (count < (1 << 24)) {
452 bytes = 3;
453 } else {
454 bytes = 4;
455 }
456
457 return lyb_write_number(enum_idx, bytes, out, lybs);
458 }
459
460 static int
lyb_write_string(const char * str,size_t str_len,int with_length,struct lyout * out,struct lyb_state * lybs)461 lyb_write_string(const char *str, size_t str_len, int with_length, struct lyout *out, struct lyb_state *lybs)
462 {
463 int r, ret = 0;
464
465 if (!str_len) {
466 str_len = strlen(str);
467 }
468
469 if (with_length) {
470 /* print length on 2 bytes */
471 if (str_len > UINT16_MAX) {
472 LOGINT(lybs->ctx);
473 return -1;
474 }
475 ret += (r = lyb_write_number(str_len, 2, out, lybs));
476 if (r < 0) {
477 return -1;
478 }
479 }
480
481 ret += (r = lyb_write(out, (const uint8_t *)str, str_len, lybs));
482 if (r < 0) {
483 return -1;
484 }
485
486 return ret;
487 }
488
489 static int
lyb_print_model(struct lyout * out,const struct lys_module * mod,struct lyb_state * lybs)490 lyb_print_model(struct lyout *out, const struct lys_module *mod, struct lyb_state *lybs)
491 {
492 int r, ret = 0;
493 uint16_t revision;
494
495 /* model name length and model name */
496 ret += (r = lyb_write_string(mod->name, 0, 1, out, lybs));
497 if (r < 0) {
498 return -1;
499 }
500
501 /* model revision as XXXX XXXX XXXX XXXX (2B) (year is offset from 2000)
502 * YYYY YYYM MMMD DDDD */
503 revision = 0;
504 if (mod->rev_size) {
505 r = atoi(mod->rev[0].date);
506 r -= 2000;
507 r <<= 9;
508
509 revision |= r;
510
511 r = atoi(mod->rev[0].date + 5);
512 r <<= 5;
513
514 revision |= r;
515
516 r = atoi(mod->rev[0].date + 8);
517
518 revision |= r;
519 }
520 ret += (r = lyb_write_number(revision, sizeof revision, out, lybs));
521 if (r < 0) {
522 return -1;
523 }
524
525 return ret;
526 }
527
528 static int
is_added_model(const struct lys_module ** models,size_t mod_count,const struct lys_module * mod)529 is_added_model(const struct lys_module **models, size_t mod_count, const struct lys_module *mod)
530 {
531 size_t i;
532
533 for (i = 0; i < mod_count; ++i) {
534 if (models[i] == mod) {
535 return 1;
536 }
537 }
538
539 return 0;
540 }
541
542 static void
add_model(const struct lys_module *** models,size_t * mod_count,const struct lys_module * mod)543 add_model(const struct lys_module ***models, size_t *mod_count, const struct lys_module *mod)
544 {
545 if (is_added_model(*models, *mod_count, mod)) {
546 return;
547 }
548
549 *models = ly_realloc(*models, ++(*mod_count) * sizeof **models);
550 (*models)[*mod_count - 1] = mod;
551 }
552
553 static int
lyb_print_data_models(struct lyout * out,const struct lyd_node * root,struct lyb_state * lybs)554 lyb_print_data_models(struct lyout *out, const struct lyd_node *root, struct lyb_state *lybs)
555 {
556 int ret = 0;
557 const struct lys_module **models = NULL, *mod;
558 const struct lys_submodule *submod;
559 const struct lyd_node *node;
560 size_t mod_count = 0;
561 uint32_t idx = 0, i, j;
562
563 /* first, collect all data node modules */
564 LY_TREE_FOR(root, node) {
565 mod = lyd_node_module(node);
566 add_model(&models, &mod_count, mod);
567 }
568
569 if (root) {
570 /* then add all models augmenting or deviating the used models */
571 idx = ly_ctx_internal_modules_count(root->schema->module->ctx);
572 while ((mod = ly_ctx_get_module_iter(root->schema->module->ctx, &idx))) {
573 if (!mod->implemented) {
574 next_mod:
575 continue;
576 }
577
578 for (i = 0; i < mod->deviation_size; ++i) {
579 if (mod->deviation[i].orig_node && is_added_model(models, mod_count, lys_node_module(mod->deviation[i].orig_node))) {
580 add_model(&models, &mod_count, mod);
581 goto next_mod;
582 }
583 }
584 for (i = 0; i < mod->augment_size; ++i) {
585 if (is_added_model(models, mod_count, lys_node_module(mod->augment[i].target))) {
586 add_model(&models, &mod_count, mod);
587 goto next_mod;
588 }
589 }
590
591 /* submodules */
592 for (j = 0; j < mod->inc_size; ++j) {
593 submod = mod->inc[j].submodule;
594
595 for (i = 0; i < submod->deviation_size; ++i) {
596 if (submod->deviation[i].orig_node && is_added_model(models, mod_count, lys_node_module(submod->deviation[i].orig_node))) {
597 add_model(&models, &mod_count, mod);
598 goto next_mod;
599 }
600 }
601 for (i = 0; i < submod->augment_size; ++i) {
602 if (is_added_model(models, mod_count, lys_node_module(submod->augment[i].target))) {
603 add_model(&models, &mod_count, mod);
604 goto next_mod;
605 }
606 }
607 }
608 }
609 }
610
611 /* now write module count on 2 bytes */
612 ret += lyb_write_number(mod_count, 2, out, lybs);
613
614 /* and all the used models */
615 for (i = 0; i < mod_count; ++i) {
616 ret += lyb_print_model(out, models[i], lybs);
617 }
618
619 free(models);
620 return ret;
621 }
622
623 static int
lyb_print_magic_number(struct lyout * out)624 lyb_print_magic_number(struct lyout *out)
625 {
626 uint32_t magic_number;
627
628 /* 'l', 'y', 'b' - 0x6c7962 */
629 ((char *)&magic_number)[0] = 'l';
630 ((char *)&magic_number)[1] = 'y';
631 ((char *)&magic_number)[2] = 'b';
632
633 return ly_write(out, (char *)&magic_number, 3);
634 }
635
636 static int
lyb_print_header(struct lyout * out)637 lyb_print_header(struct lyout *out)
638 {
639 int ret = 0;
640 uint8_t byte = 0;
641
642 /* TODO version, some other flags? */
643 ret += ly_write(out, (char *)&byte, sizeof byte);
644
645 return ret;
646 }
647
648 static int
lyb_print_anydata(struct lyd_node_anydata * anydata,struct lyout * out,struct lyb_state * lybs)649 lyb_print_anydata(struct lyd_node_anydata *anydata, struct lyout *out, struct lyb_state *lybs)
650 {
651 int ret = 0, len;
652 char *buf;
653
654 if (anydata->value_type == LYD_ANYDATA_XML) {
655 /* transform XML into CONSTSTRING */
656 lyxml_print_mem(&buf, anydata->value.xml, LYXML_PRINT_SIBLINGS);
657 lyxml_free_withsiblings(anydata->schema->module->ctx, anydata->value.xml);
658
659 anydata->value_type = LYD_ANYDATA_CONSTSTRING;
660 anydata->value.str = lydict_insert_zc(anydata->schema->module->ctx, buf);
661 } else if (anydata->value_type == LYD_ANYDATA_DATATREE) {
662 /* print data tree into LYB */
663 lyd_print_mem(&buf, anydata->value.tree, LYD_LYB, LYP_WITHSIBLINGS);
664 lyd_free_withsiblings(anydata->value.tree);
665
666 anydata->value_type = LYD_ANYDATA_LYB;
667 anydata->value.mem = buf;
668 } else if (anydata->value_type & LYD_ANYDATA_STRING) {
669 /* dynamic value, only used for input */
670 LOGERR(lybs->ctx, LY_EINT, "Unsupported anydata value type to print.");
671 return -1;
672 }
673
674 /* first byte is type */
675 ret += lyb_write(out, (uint8_t *)&anydata->value_type, sizeof anydata->value_type, lybs);
676
677 /* followed by the content */
678 if (anydata->value_type == LYD_ANYDATA_LYB) {
679 len = lyd_lyb_data_length(anydata->value.mem);
680 if (len > -1) {
681 ret += lyb_write_string(anydata->value.str, (size_t)len, 0, out, lybs);
682 } else {
683 ret = len;
684 }
685 } else {
686 ret += lyb_write_string(anydata->value.str, 0, 0, out, lybs);
687 }
688
689 return ret;
690 }
691
692 static int
lyb_print_value(const struct lys_type * type,const char * value_str,lyd_val value,LY_DATA_TYPE value_type,uint8_t value_flags,uint8_t dflt,struct lyout * out,struct lyb_state * lybs)693 lyb_print_value(const struct lys_type *type, const char *value_str, lyd_val value, LY_DATA_TYPE value_type,
694 uint8_t value_flags, uint8_t dflt, struct lyout *out, struct lyb_state *lybs)
695 {
696 int ret = 0;
697 uint8_t byte = 0;
698 size_t count, i, bits_i;
699 LY_DATA_TYPE dtype;
700
701 /* value type byte - ABCD DDDD
702 *
703 * A - dflt flag
704 * B - user type flag
705 * C - unres flag
706 * D (5b) - data type value
707 */
708 if (dflt) {
709 byte |= 0x80;
710 }
711 if (value_flags & LY_VALUE_USER) {
712 byte |= 0x40;
713 }
714 if (value_flags & LY_VALUE_UNRES) {
715 byte |= 0x20;
716 }
717
718 /* we have only 5b available, must be enough */
719 assert((value_type & 0x1f) == value_type);
720
721 /* find actual type */
722 while (type->base == LY_TYPE_LEAFREF) {
723 type = &type->info.lref.target->type;
724 }
725
726 if ((value_flags & LY_VALUE_USER) || (type->base == LY_TYPE_UNION)) {
727 value_type = LY_TYPE_STRING;
728 } else while (value_type == LY_TYPE_LEAFREF) {
729 assert(!(value_flags & LY_VALUE_UNRES));
730
731 /* update value_type and value to that of the target */
732 value_type = ((struct lyd_node_leaf_list *)value.leafref)->value_type;
733 value = ((struct lyd_node_leaf_list *)value.leafref)->value;
734 }
735
736 /* store the value type */
737 byte |= value_type & 0x1f;
738
739 /* write value type byte */
740 ret += lyb_write(out, &byte, sizeof byte, lybs);
741
742 /* print value itself */
743 if (value_flags & LY_VALUE_USER) {
744 dtype = LY_TYPE_STRING;
745 } else {
746 dtype = value_type;
747 }
748 switch (dtype) {
749 case LY_TYPE_BINARY:
750 case LY_TYPE_INST:
751 case LY_TYPE_STRING:
752 case LY_TYPE_UNION:
753 case LY_TYPE_IDENT:
754 case LY_TYPE_UNKNOWN:
755 /* store string */
756 ret += lyb_write_string(value_str, 0, 0, out, lybs);
757 break;
758 case LY_TYPE_BITS:
759 /* find the correct structure */
760 for (; !type->info.bits.count; type = &type->der->type);
761
762 /* store a bitfield */
763 bits_i = 0;
764
765 for (count = type->info.bits.count / 8; count; --count) {
766 /* will be a full byte */
767 for (byte = 0, i = 0; i < 8; ++i) {
768 if (value.bit[bits_i + i]) {
769 byte |= (1 << i);
770 }
771 }
772 ret += lyb_write(out, &byte, sizeof byte, lybs);
773 bits_i += 8;
774 }
775
776 /* store the remainder */
777 if (type->info.bits.count % 8) {
778 for (byte = 0, i = 0; i < type->info.bits.count % 8; ++i) {
779 if (value.bit[bits_i + i]) {
780 byte |= (1 << i);
781 }
782 }
783 ret += lyb_write(out, &byte, sizeof byte, lybs);
784 }
785 break;
786 case LY_TYPE_BOOL:
787 /* store the whole byte */
788 byte = 0;
789 if (value.bln) {
790 byte = 1;
791 }
792 ret += lyb_write(out, &byte, sizeof byte, lybs);
793 break;
794 case LY_TYPE_EMPTY:
795 /* nothing to store */
796 break;
797 case LY_TYPE_ENUM:
798 /* find the correct structure */
799 for (; !type->info.enums.count; type = &type->der->type);
800
801 /* store the enum index (save bytes if possible) */
802 i = value.enm - type->info.enums.enm;
803 ret += lyb_write_enum(i, type->info.enums.count, out, lybs);
804 break;
805 case LY_TYPE_INT8:
806 case LY_TYPE_UINT8:
807 ret += lyb_write_number(value.uint8, 1, out, lybs);
808 break;
809 case LY_TYPE_INT16:
810 case LY_TYPE_UINT16:
811 ret += lyb_write_number(value.uint16, 2, out, lybs);
812 break;
813 case LY_TYPE_INT32:
814 case LY_TYPE_UINT32:
815 ret += lyb_write_number(value.uint32, 4, out, lybs);
816 break;
817 case LY_TYPE_DEC64:
818 case LY_TYPE_INT64:
819 case LY_TYPE_UINT64:
820 ret += lyb_write_number(value.uint64, 8, out, lybs);
821 break;
822 default:
823 return 0;
824 }
825
826 return ret;
827 }
828
829 static int
lyb_print_attributes(struct lyout * out,struct lyd_attr * attr,struct lyb_state * lybs)830 lyb_print_attributes(struct lyout *out, struct lyd_attr *attr, struct lyb_state *lybs)
831 {
832 int r, ret = 0;
833 uint8_t count;
834 struct lyd_attr *iter;
835 struct lys_type **type;
836
837 /* count attributes */
838 for (count = 0, iter = attr; iter; ++count, iter = iter->next) {
839 if (count == UINT8_MAX) {
840 LOGERR(lybs->ctx, LY_EINT, "Maximum supported number of data node attributes is %u.", UINT8_MAX);
841 return -1;
842 }
843 }
844
845 /* write number of attributes on 1 byte */
846 ret += (r = lyb_write(out, &count, 1, lybs));
847 if (r < 0) {
848 return -1;
849 }
850
851 /* write all the attributes */
852 LY_TREE_FOR(attr, iter) {
853 /* each attribute is a subtree */
854 ret += (r = lyb_write_start_subtree(out, lybs));
855 if (r < 0) {
856 return -1;
857 }
858
859 /* model */
860 ret += (r = lyb_print_model(out, iter->annotation->module, lybs));
861 if (r < 0) {
862 return -1;
863 }
864
865 /* annotation name with length */
866 ret += (r = lyb_write_string(iter->annotation->arg_value, 0, 1, out, lybs));
867 if (r < 0) {
868 return -1;
869 }
870
871 /* get the type */
872 type = (struct lys_type **)lys_ext_complex_get_substmt(LY_STMT_TYPE, iter->annotation, NULL);
873 if (!type || !(*type)) {
874 return -1;
875 }
876
877 /* attribute value */
878 ret += (r = lyb_print_value(*type, iter->value_str, iter->value, iter->value_type, iter->value_flags, 0, out, lybs));
879 if (r < 0) {
880 return -1;
881 }
882
883 /* finish attribute subtree */
884 ret += (r = lyb_write_stop_subtree(out, lybs));
885 if (r < 0) {
886 return -1;
887 }
888 }
889
890 return ret;
891 }
892
893 static int
lyb_print_schema_hash(struct lyout * out,struct lys_node * schema,struct hash_table ** sibling_ht,struct lyb_state * lybs)894 lyb_print_schema_hash(struct lyout *out, struct lys_node *schema, struct hash_table **sibling_ht, struct lyb_state *lybs)
895 {
896 int r, ret = 0;
897 void *mem;
898 uint32_t i;
899 LYB_HASH hash;
900 struct lys_node *first_sibling, *parent;
901
902 /* create whole sibling HT if not already created and saved */
903 if (!*sibling_ht) {
904 /* get first schema data sibling (or input/output) */
905 for (parent = lys_parent(schema);
906 parent && (parent->nodetype & (LYS_USES | LYS_CASE | LYS_CHOICE));
907 parent = lys_parent(parent));
908
909 first_sibling = (struct lys_node *)lys_getnext(NULL, parent, lys_node_module(schema), 0);
910 for (r = 0; r < lybs->sib_ht_count; ++r) {
911 if (lybs->sib_ht[r].first_sibling == first_sibling) {
912 /* we have already created a hash table for these siblings */
913 *sibling_ht = lybs->sib_ht[r].ht;
914 break;
915 }
916 }
917
918 if (!*sibling_ht) {
919 /* we must create sibling hash table */
920 *sibling_ht = lyb_hash_siblings(first_sibling, NULL, 0);
921 if (!*sibling_ht) {
922 return -1;
923 }
924
925 /* and save it */
926 ++lybs->sib_ht_count;
927 mem = realloc(lybs->sib_ht, lybs->sib_ht_count * sizeof *lybs->sib_ht);
928 LY_CHECK_ERR_RETURN(!mem, LOGMEM(lybs->ctx), -1);
929 lybs->sib_ht = mem;
930
931 lybs->sib_ht[lybs->sib_ht_count - 1].first_sibling = first_sibling;
932 lybs->sib_ht[lybs->sib_ht_count - 1].ht = *sibling_ht;
933 }
934 }
935
936 /* get our hash */
937 hash = lyb_hash_find(*sibling_ht, schema);
938 if (!hash) {
939 return -1;
940 }
941
942 /* write the hash */
943 ret += (r = lyb_write(out, &hash, sizeof hash, lybs));
944 if (r < 0) {
945 return -1;
946 }
947
948 if (hash & LYB_HASH_COLLISION_ID) {
949 /* no collision for this hash, we are done */
950 return ret;
951 }
952
953 /* written hash was a collision, write also all the preceding hashes */
954 for (i = 0; !(hash & (LYB_HASH_COLLISION_ID >> i)); ++i);
955
956 for (; i; --i) {
957 hash = lyb_hash(schema, i - 1);
958 if (!hash) {
959 return -1;
960 }
961 assert(hash & (LYB_HASH_COLLISION_ID >> (i - 1)));
962
963 ret += (r = lyb_write(out, &hash, sizeof hash, lybs));
964 if (r < 0) {
965 return -1;
966 }
967 }
968
969 return ret;
970 }
971
972 static int
lyb_print_subtree(struct lyout * out,const struct lyd_node * node,struct hash_table ** sibling_ht,struct lyb_state * lybs,int top_level)973 lyb_print_subtree(struct lyout *out, const struct lyd_node *node, struct hash_table **sibling_ht, struct lyb_state *lybs,
974 int top_level)
975 {
976 int r, ret = 0;
977 struct lyd_node_leaf_list *leaf;
978 struct hash_table *child_ht = NULL;
979
980 /* register a new subtree */
981 ret += (r = lyb_write_start_subtree(out, lybs));
982 if (r < 0) {
983 return -1;
984 }
985
986 /*
987 * write the node information
988 */
989 if (top_level) {
990 /* write model info first */
991 ret += (r = lyb_print_model(out, lyd_node_module(node), lybs));
992 if (r < 0) {
993 return -1;
994 }
995 }
996
997 ret += (r = lyb_print_schema_hash(out, node->schema, sibling_ht, lybs));
998 if (r < 0) {
999 return -1;
1000 }
1001
1002 ret += (r = lyb_print_attributes(out, node->attr, lybs));
1003 if (r < 0) {
1004 return -1;
1005 }
1006
1007 /* write node content */
1008 switch (node->schema->nodetype) {
1009 case LYS_CONTAINER:
1010 case LYS_LIST:
1011 case LYS_NOTIF:
1012 case LYS_RPC:
1013 case LYS_ACTION:
1014 /* nothing to write */
1015 break;
1016 case LYS_LEAF:
1017 case LYS_LEAFLIST:
1018 leaf = (struct lyd_node_leaf_list *)node;
1019 ret += (r = lyb_print_value(&((struct lys_node_leaf *)leaf->schema)->type, leaf->value_str, leaf->value,
1020 leaf->value_type, leaf->value_flags, leaf->dflt, out, lybs));
1021 if (r < 0) {
1022 return -1;
1023 }
1024 break;
1025 case LYS_ANYXML:
1026 case LYS_ANYDATA:
1027 ret += (r = lyb_print_anydata((struct lyd_node_anydata *)node, out, lybs));
1028 if (r < 0) {
1029 return -1;
1030 }
1031 break;
1032 default:
1033 return -1;
1034 }
1035
1036 /* recursively write all the descendants */
1037 r = 0;
1038 if (node->schema->nodetype & (LYS_CONTAINER | LYS_LIST | LYS_NOTIF | LYS_RPC | LYS_ACTION)) {
1039 LY_TREE_FOR(node->child, node) {
1040 ret += (r = lyb_print_subtree(out, node, &child_ht, lybs, 0));
1041 if (r < 0) {
1042 break;
1043 }
1044 }
1045 }
1046 if (r < 0) {
1047 return -1;
1048 }
1049
1050 /* finish this subtree */
1051 ret += (r = lyb_write_stop_subtree(out, lybs));
1052 if (r < 0) {
1053 return -1;
1054 }
1055
1056 return ret;
1057 }
1058
1059 int
lyb_print_data(struct lyout * out,const struct lyd_node * root,int options)1060 lyb_print_data(struct lyout *out, const struct lyd_node *root, int options)
1061 {
1062 int r, ret = 0, rc = EXIT_SUCCESS;
1063 uint8_t zero = 0;
1064 struct hash_table *top_sibling_ht = NULL;
1065 const struct lys_module *prev_mod = NULL;
1066 struct lys_node *parent;
1067 struct lyb_state lybs;
1068
1069 memset(&lybs, 0, sizeof lybs);
1070
1071 if (root) {
1072 lybs.ctx = lyd_node_module(root)->ctx;
1073
1074 for (parent = lys_parent(root->schema); parent && (parent->nodetype == LYS_USES); parent = lys_parent(parent));
1075 if (parent && (parent->nodetype != LYS_EXT)) {
1076 LOGERR(lybs.ctx, LY_EINVAL, "LYB printer supports only printing top-level nodes.");
1077 return EXIT_FAILURE;
1078 }
1079 }
1080
1081 /* LYB magic number */
1082 ret += (r = lyb_print_magic_number(out));
1083 if (r < 0) {
1084 rc = EXIT_FAILURE;
1085 goto finish;
1086 }
1087
1088 /* LYB header */
1089 ret += (r = lyb_print_header(out));
1090 if (r < 0) {
1091 rc = EXIT_FAILURE;
1092 goto finish;
1093 }
1094
1095 /* all used models */
1096 ret += (r = lyb_print_data_models(out, root, &lybs));
1097 if (r < 0) {
1098 rc = EXIT_FAILURE;
1099 goto finish;
1100 }
1101
1102 LY_TREE_FOR(root, root) {
1103 /* do not reuse sibling hash tables from different modules */
1104 if (lyd_node_module(root) != prev_mod) {
1105 top_sibling_ht = NULL;
1106 prev_mod = lyd_node_module(root);
1107 }
1108
1109 ret += (r = lyb_print_subtree(out, root, &top_sibling_ht, &lybs, 1));
1110 if (r < 0) {
1111 rc = EXIT_FAILURE;
1112 goto finish;
1113 }
1114
1115 if (!(options & LYP_WITHSIBLINGS)) {
1116 break;
1117 }
1118 }
1119
1120 /* ending zero byte */
1121 ret += (r = lyb_write(out, &zero, sizeof zero, &lybs));
1122 if (r < 0) {
1123 rc = EXIT_FAILURE;
1124 }
1125
1126 finish:
1127 free(lybs.written);
1128 free(lybs.position);
1129 free(lybs.inner_chunks);
1130 for (r = 0; r < lybs.sib_ht_count; ++r) {
1131 lyht_free(lybs.sib_ht[r].ht);
1132 }
1133 free(lybs.sib_ht);
1134
1135 return rc;
1136 }
1137