1 /*
2   Copyright 2008-2015 David Robillard <http://drobilla.net>
3 
4   Permission to use, copy, modify, and/or distribute this software for any
5   purpose with or without fee is hereby granted, provided that the above
6   copyright notice and this permission notice appear in all copies.
7 
8   THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9   WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10   MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11   ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16 
17 /**
18    @file util.h Helper functions for the LV2 Atom extension.
19 
20    Note these functions are all static inline, do not take their address.
21 
22    This header is non-normative, it is provided for convenience.
23 */
24 
25 /**
26    @defgroup util Utilities
27    @ingroup atom
28    @{
29 */
30 
31 #ifndef LV2_ATOM_UTIL_H
32 #define LV2_ATOM_UTIL_H
33 
34 #include <stdarg.h>
35 #include <stdint.h>
36 #include <string.h>
37 
38 #include "atom.h"
39 
40 #ifdef __cplusplus
41 extern "C" {
42 #else
43 #    include <stdbool.h>
44 #endif
45 
46 /** Pad a size to 64 bits. */
47 static inline uint32_t
lv2_atom_pad_size(uint32_t size)48 lv2_atom_pad_size(uint32_t size)
49 {
50 	return (size + 7U) & (~7U);
51 }
52 
53 /** Return the total size of `atom`, including the header. */
54 static inline uint32_t
lv2_atom_total_size(const LV2_Atom * atom)55 lv2_atom_total_size(const LV2_Atom* atom)
56 {
57 	return (uint32_t)sizeof(LV2_Atom) + atom->size;
58 }
59 
60 /** Return true iff `atom` is null. */
61 static inline bool
lv2_atom_is_null(const LV2_Atom * atom)62 lv2_atom_is_null(const LV2_Atom* atom)
63 {
64 	return !atom || (atom->type == 0 && atom->size == 0);
65 }
66 
67 /** Return true iff `a` is equal to `b`. */
68 static inline bool
lv2_atom_equals(const LV2_Atom * a,const LV2_Atom * b)69 lv2_atom_equals(const LV2_Atom* a, const LV2_Atom* b)
70 {
71 	return (a == b) || ((a->type == b->type) &&
72 	                    (a->size == b->size) &&
73 	                    !memcmp(a + 1, b + 1, a->size));
74 }
75 
76 /**
77    @name Sequence Iterator
78    @{
79 */
80 
81 /** Get an iterator pointing to the first event in a Sequence body. */
82 static inline const LV2_Atom_Event*
lv2_atom_sequence_begin(const LV2_Atom_Sequence_Body * body)83 lv2_atom_sequence_begin(const LV2_Atom_Sequence_Body* body)
84 {
85 	return (const LV2_Atom_Event*)(body + 1);
86 }
87 
88 /** Get an iterator pointing to the end of a Sequence body. */
89 static inline const LV2_Atom_Event*
lv2_atom_sequence_end(const LV2_Atom_Sequence_Body * body,uint32_t size)90 lv2_atom_sequence_end(const LV2_Atom_Sequence_Body* body, uint32_t size)
91 {
92 	return (const LV2_Atom_Event*)((const uint8_t*)body + lv2_atom_pad_size(size));
93 }
94 
95 /** Get an iterator pointing to the end of a Sequence body. */
96 static inline LV2_Atom_Event*
lv2_atom_sequence_end2(LV2_Atom_Sequence_Body * body,uint32_t size)97 lv2_atom_sequence_end2(LV2_Atom_Sequence_Body* body, uint32_t size)
98 {
99 	return (LV2_Atom_Event*)((uint8_t*)body + lv2_atom_pad_size(size));
100 }
101 
102 /** Return true iff `i` has reached the end of `body`. */
103 static inline bool
lv2_atom_sequence_is_end(const LV2_Atom_Sequence_Body * body,uint32_t size,const LV2_Atom_Event * i)104 lv2_atom_sequence_is_end(const LV2_Atom_Sequence_Body* body,
105                          uint32_t                      size,
106                          const LV2_Atom_Event*         i)
107 {
108 	return (const uint8_t*)i >= ((const uint8_t*)body + size);
109 }
110 
111 /** Return an iterator to the element following `i`. */
112 static inline const LV2_Atom_Event*
lv2_atom_sequence_next(const LV2_Atom_Event * i)113 lv2_atom_sequence_next(const LV2_Atom_Event* i)
114 {
115 	return (const LV2_Atom_Event*)((const uint8_t*)i
116 	                               + sizeof(LV2_Atom_Event)
117 	                               + lv2_atom_pad_size(i->body.size));
118 }
119 
120 /**
121    A macro for iterating over all events in a Sequence.
122    @param seq  The sequence to iterate over
123    @param iter The name of the iterator
124 
125    This macro is used similarly to a for loop (which it expands to), e.g.:
126    @code
127    LV2_ATOM_SEQUENCE_FOREACH(sequence, ev) {
128        // Do something with ev (an LV2_Atom_Event*) here...
129    }
130    @endcode
131 */
132 #define LV2_ATOM_SEQUENCE_FOREACH(seq, iter) \
133 	for (const LV2_Atom_Event* iter = lv2_atom_sequence_begin(&(seq)->body); \
134 	     !lv2_atom_sequence_is_end(&(seq)->body, (seq)->atom.size, (iter)); \
135 	     iter = lv2_atom_sequence_next(iter))
136 
137 /** Like LV2_ATOM_SEQUENCE_FOREACH but for a headerless sequence body. */
138 #define LV2_ATOM_SEQUENCE_BODY_FOREACH(body, size, iter) \
139 	for (const LV2_Atom_Event* iter = lv2_atom_sequence_begin(body); \
140 	     !lv2_atom_sequence_is_end(body, size, (iter)); \
141 	     iter = lv2_atom_sequence_next(iter))
142 
143 /**
144    @}
145    @name Sequence Utilities
146    @{
147 */
148 
149 /**
150    Clear all events from `sequence`.
151 
152    This simply resets the size field, the other fields are left untouched.
153 */
154 static inline void
lv2_atom_sequence_clear(LV2_Atom_Sequence * seq)155 lv2_atom_sequence_clear(LV2_Atom_Sequence* seq)
156 {
157 	seq->atom.size = sizeof(LV2_Atom_Sequence_Body);
158 }
159 
160 /**
161    Append an event at the end of `sequence`.
162 
163    @param seq Sequence to append to.
164    @param capacity Total capacity of the sequence atom
165    (e.g. as set by the host for sequence output ports).
166    @param event Event to write.
167 
168    @return A pointer to the newly written event in `seq`,
169    or NULL on failure (insufficient space).
170 */
171 static inline LV2_Atom_Event*
lv2_atom_sequence_append_event(LV2_Atom_Sequence * seq,uint32_t capacity,const LV2_Atom_Event * event)172 lv2_atom_sequence_append_event(LV2_Atom_Sequence*    seq,
173                                uint32_t              capacity,
174                                const LV2_Atom_Event* event)
175 {
176 	const uint32_t total_size = (uint32_t)sizeof(*event) + event->body.size;
177 	if (capacity - seq->atom.size < total_size) {
178 		return NULL;
179 	}
180 
181 	LV2_Atom_Event* e = lv2_atom_sequence_end2(&seq->body, seq->atom.size);
182 	memcpy(e, event, total_size);
183 
184 	seq->atom.size += lv2_atom_pad_size(total_size);
185 
186 	return e;
187 }
188 
189 /**
190    @}
191    @name Tuple Iterator
192    @{
193 */
194 
195 /** Get an iterator pointing to the first element in `tup`. */
196 static inline const LV2_Atom*
lv2_atom_tuple_begin(const LV2_Atom_Tuple * tup)197 lv2_atom_tuple_begin(const LV2_Atom_Tuple* tup)
198 {
199 	return (const LV2_Atom*)(LV2_ATOM_BODY_CONST(tup));
200 }
201 
202 /** Return true iff `i` has reached the end of `body`. */
203 static inline bool
lv2_atom_tuple_is_end(const void * body,uint32_t size,const LV2_Atom * i)204 lv2_atom_tuple_is_end(const void* body, uint32_t size, const LV2_Atom* i)
205 {
206 	return (const uint8_t*)i >= ((const uint8_t*)body + size);
207 }
208 
209 /** Return an iterator to the element following `i`. */
210 static inline const LV2_Atom*
lv2_atom_tuple_next(const LV2_Atom * i)211 lv2_atom_tuple_next(const LV2_Atom* i)
212 {
213 	return (const LV2_Atom*)(
214 		(const uint8_t*)i + sizeof(LV2_Atom) + lv2_atom_pad_size(i->size));
215 }
216 
217 /**
218    A macro for iterating over all properties of a Tuple.
219    @param tuple The tuple to iterate over
220    @param iter The name of the iterator
221 
222    This macro is used similarly to a for loop (which it expands to), e.g.:
223    @code
224    LV2_ATOM_TUPLE_FOREACH(tuple, elem) {
225        // Do something with elem (an LV2_Atom*) here...
226    }
227    @endcode
228 */
229 #define LV2_ATOM_TUPLE_FOREACH(tuple, iter) \
230 	for (const LV2_Atom* iter = lv2_atom_tuple_begin(tuple); \
231 	     !lv2_atom_tuple_is_end(LV2_ATOM_BODY_CONST(tuple), (tuple)->atom.size, (iter)); \
232 	     iter = lv2_atom_tuple_next(iter))
233 
234 /** Like LV2_ATOM_TUPLE_FOREACH but for a headerless tuple body. */
235 #define LV2_ATOM_TUPLE_BODY_FOREACH(body, size, iter) \
236 	for (const LV2_Atom* iter = (const LV2_Atom*)body; \
237 	     !lv2_atom_tuple_is_end(body, size, (iter)); \
238 	     iter = lv2_atom_tuple_next(iter))
239 
240 /**
241    @}
242    @name Object Iterator
243    @{
244 */
245 
246 /** Return a pointer to the first property in `body`. */
247 static inline const LV2_Atom_Property_Body*
lv2_atom_object_begin(const LV2_Atom_Object_Body * body)248 lv2_atom_object_begin(const LV2_Atom_Object_Body* body)
249 {
250 	return (const LV2_Atom_Property_Body*)(body + 1);
251 }
252 
253 /** Return true iff `i` has reached the end of `obj`. */
254 static inline bool
lv2_atom_object_is_end(const LV2_Atom_Object_Body * body,uint32_t size,const LV2_Atom_Property_Body * i)255 lv2_atom_object_is_end(const LV2_Atom_Object_Body*   body,
256                        uint32_t                      size,
257                        const LV2_Atom_Property_Body* i)
258 {
259 	return (const uint8_t*)i >= ((const uint8_t*)body + size);
260 }
261 
262 /** Return an iterator to the property following `i`. */
263 static inline const LV2_Atom_Property_Body*
lv2_atom_object_next(const LV2_Atom_Property_Body * i)264 lv2_atom_object_next(const LV2_Atom_Property_Body* i)
265 {
266 	const LV2_Atom* const value = (const LV2_Atom*)(
267 		(const uint8_t*)i + 2 * sizeof(uint32_t));
268 	return (const LV2_Atom_Property_Body*)(
269 		(const uint8_t*)i + lv2_atom_pad_size(
270 			(uint32_t)sizeof(LV2_Atom_Property_Body) + value->size));
271 }
272 
273 /**
274    A macro for iterating over all properties of an Object.
275    @param obj The object to iterate over
276    @param iter The name of the iterator
277 
278    This macro is used similarly to a for loop (which it expands to), e.g.:
279    @code
280    LV2_ATOM_OBJECT_FOREACH(object, i) {
281        // Do something with prop (an LV2_Atom_Property_Body*) here...
282    }
283    @endcode
284 */
285 #define LV2_ATOM_OBJECT_FOREACH(obj, iter) \
286 	for (const LV2_Atom_Property_Body* iter = lv2_atom_object_begin(&(obj)->body); \
287 	     !lv2_atom_object_is_end(&(obj)->body, (obj)->atom.size, (iter)); \
288 	     iter = lv2_atom_object_next(iter))
289 
290 /** Like LV2_ATOM_OBJECT_FOREACH but for a headerless object body. */
291 #define LV2_ATOM_OBJECT_BODY_FOREACH(body, size, iter) \
292 	for (const LV2_Atom_Property_Body* iter = lv2_atom_object_begin(body); \
293 	     !lv2_atom_object_is_end(body, size, (iter)); \
294 	     iter = lv2_atom_object_next(iter))
295 
296 /**
297    @}
298    @name Object Query
299    @{
300 */
301 
302 /** A single entry in an Object query. */
303 typedef struct {
304 	uint32_t         key;    /**< Key to query (input set by user) */
305 	const LV2_Atom** value;  /**< Found value (output set by query function) */
306 } LV2_Atom_Object_Query;
307 
308 static const LV2_Atom_Object_Query LV2_ATOM_OBJECT_QUERY_END = { 0, NULL };
309 
310 /**
311    Get an object's values for various keys.
312 
313    The value pointer of each item in `query` will be set to the location of
314    the corresponding value in `object`.  Every value pointer in `query` MUST
315    be initialised to NULL.  This function reads `object` in a single linear
316    sweep.  By allocating `query` on the stack, objects can be "queried"
317    quickly without allocating any memory.  This function is realtime safe.
318 
319    This function can only do "flat" queries, it is not smart enough to match
320    variables in nested objects.
321 
322    For example:
323    @code
324    const LV2_Atom* name = NULL;
325    const LV2_Atom* age  = NULL;
326    LV2_Atom_Object_Query q[] = {
327        { urids.eg_name, &name },
328        { urids.eg_age,  &age },
329        LV2_ATOM_OBJECT_QUERY_END
330    };
331    lv2_atom_object_query(obj, q);
332    // name and age are now set to the appropriate values in obj, or NULL.
333    @endcode
334 */
335 static inline int
lv2_atom_object_query(const LV2_Atom_Object * object,LV2_Atom_Object_Query * query)336 lv2_atom_object_query(const LV2_Atom_Object* object,
337                       LV2_Atom_Object_Query* query)
338 {
339 	int matches   = 0;
340 	int n_queries = 0;
341 
342 	/* Count number of query keys so we can short-circuit when done */
343 	for (LV2_Atom_Object_Query* q = query; q->key; ++q) {
344 		++n_queries;
345 	}
346 
347 	LV2_ATOM_OBJECT_FOREACH(object, prop) {
348 		for (LV2_Atom_Object_Query* q = query; q->key; ++q) {
349 			if (q->key == prop->key && !*q->value) {
350 				*q->value = &prop->value;
351 				if (++matches == n_queries) {
352 					return matches;
353 				}
354 				break;
355 			}
356 		}
357 	}
358 	return matches;
359 }
360 
361 /**
362    Body only version of lv2_atom_object_get().
363 */
364 static inline int
lv2_atom_object_body_get(uint32_t size,const LV2_Atom_Object_Body * body,...)365 lv2_atom_object_body_get(uint32_t size, const LV2_Atom_Object_Body* body, ...)
366 {
367 	int matches   = 0;
368 	int n_queries = 0;
369 
370 	/* Count number of keys so we can short-circuit when done */
371 	va_list args;
372 	va_start(args, body);
373 	for (n_queries = 0; va_arg(args, uint32_t); ++n_queries) {
374 		if (!va_arg(args, const LV2_Atom**)) {
375 			va_end(args);
376 			return -1;
377 		}
378 	}
379 	va_end(args);
380 
381 	LV2_ATOM_OBJECT_BODY_FOREACH(body, size, prop) {
382 		va_start(args, body);
383 		for (int i = 0; i < n_queries; ++i) {
384 			uint32_t         qkey = va_arg(args, uint32_t);
385 			const LV2_Atom** qval = va_arg(args, const LV2_Atom**);
386 			if (qkey == prop->key && !*qval) {
387 				*qval = &prop->value;
388 				if (++matches == n_queries) {
389 					va_end(args);
390 					return matches;
391 				}
392 				break;
393 			}
394 		}
395 		va_end(args);
396 	}
397 	return matches;
398 }
399 
400 /**
401    Variable argument version of lv2_atom_object_query().
402 
403    This is nicer-looking in code, but a bit more error-prone since it is not
404    type safe and the argument list must be terminated.
405 
406    The arguments should be a series of uint32_t key and const LV2_Atom** value
407    pairs, terminated by a zero key.  The value pointers MUST be initialized to
408    NULL.  For example:
409 
410    @code
411    const LV2_Atom* name = NULL;
412    const LV2_Atom* age  = NULL;
413    lv2_atom_object_get(obj,
414                        uris.name_key, &name,
415                        uris.age_key,  &age,
416                        0);
417    @endcode
418 */
419 static inline int
lv2_atom_object_get(const LV2_Atom_Object * object,...)420 lv2_atom_object_get(const LV2_Atom_Object* object, ...)
421 {
422 	int matches   = 0;
423 	int n_queries = 0;
424 
425 	/* Count number of keys so we can short-circuit when done */
426 	va_list args;
427 	va_start(args, object);
428 	for (n_queries = 0; va_arg(args, uint32_t); ++n_queries) {
429 		if (!va_arg(args, const LV2_Atom**)) {
430 			return -1;
431 		}
432 	}
433 	va_end(args);
434 
435 	LV2_ATOM_OBJECT_FOREACH(object, prop) {
436 		va_start(args, object);
437 		for (int i = 0; i < n_queries; ++i) {
438 			uint32_t         qkey = va_arg(args, uint32_t);
439 			const LV2_Atom** qval = va_arg(args, const LV2_Atom**);
440 			if (qkey == prop->key && !*qval) {
441 				*qval = &prop->value;
442 				if (++matches == n_queries) {
443 					return matches;
444 				}
445 				break;
446 			}
447 		}
448 		va_end(args);
449 	}
450 	return matches;
451 }
452 
453 /**
454    Variable argument version of lv2_atom_object_query() with types.
455 
456    This is like lv2_atom_object_get(), but each entry has an additional
457    parameter to specify the required type.  Only atoms with a matching type
458    will be selected.
459 
460    The arguments should be a series of uint32_t key, const LV2_Atom**, uint32_t
461    type triples, terminated by a zero key.  The value pointers MUST be
462    initialized to NULL.  For example:
463 
464    @code
465    const LV2_Atom_String* name = NULL;
466    const LV2_Atom_Int*    age  = NULL;
467    lv2_atom_object_get(obj,
468                        uris.name_key, &name, uris.atom_String,
469                        uris.age_key,  &age, uris.atom_Int
470                        0);
471    @endcode
472 */
473 static inline int
lv2_atom_object_get_typed(const LV2_Atom_Object * object,...)474 lv2_atom_object_get_typed(const LV2_Atom_Object* object, ...)
475 {
476 	int matches   = 0;
477 	int n_queries = 0;
478 
479 	/* Count number of keys so we can short-circuit when done */
480 	va_list args;
481 	va_start(args, object);
482 	for (n_queries = 0; va_arg(args, uint32_t); ++n_queries) {
483 		if (!va_arg(args, const LV2_Atom**) ||
484 		    !va_arg(args, uint32_t)) {
485 			return -1;
486 		}
487 	}
488 	va_end(args);
489 
490 	LV2_ATOM_OBJECT_FOREACH(object, prop) {
491 		va_start(args, object);
492 		for (int i = 0; i < n_queries; ++i) {
493 			const uint32_t   qkey  = va_arg(args, uint32_t);
494 			const LV2_Atom** qval  = va_arg(args, const LV2_Atom**);
495 			const uint32_t   qtype = va_arg(args, uint32_t);
496 			if (!*qval && qkey == prop->key && qtype == prop->value.type) {
497 				*qval = &prop->value;
498 				if (++matches == n_queries) {
499 					return matches;
500 				}
501 				break;
502 			}
503 		}
504 		va_end(args);
505 	}
506 	return matches;
507 }
508 
509 /**
510    @}
511    @}
512 */
513 
514 #ifdef __cplusplus
515 }  /* extern "C" */
516 #endif
517 
518 #endif /* LV2_ATOM_UTIL_H */
519