1 /*
2 * Copyright (c) 2016-2021 Hanspeter Portner (dev@open-music-kontrollers.ch)
3 *
4 * This is free software: you can redistribute it and/or modify
5 * it under the terms of the Artistic License 2.0 as published by
6 * The Perl Foundation.
7 *
8 * This source is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * Artistic License 2.0 for more details.
12 *
13 * You should have received a copy of the Artistic License 2.0
14 * along the source as a COPYING file. If not, obtain it from
15 * http://www.perlfoundation.org/artistic_license_2_0.
16 */
17
18 #include <stdio.h>
19 #include <stdlib.h>
20
21 #include <eteroj.h>
22 #include <varchunk.h>
23 #include <osc.lv2/util.h>
24 #include <osc.lv2/forge.h>
25 #include <props.h>
26
27 #define NETATOM_IMPLEMENTATION
28 #include <netatom.lv2/netatom.h>
29
30 #define NS_RDF "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
31 #define BUF_SIZE 8192
32
33 #define MAX_NPROPS 1
34
35 typedef struct _atom_ser_t atom_ser_t;
36 typedef struct _plughandle_t plughandle_t;
37 typedef struct _plugstate_t plugstate_t;
38
39 struct _atom_ser_t {
40 uint32_t size;
41 uint8_t *buf;
42 uint32_t offset;
43 };
44
45 struct _plugstate_t {
46 int32_t synchronous;
47 };
48
49 struct _plughandle_t {
50 LV2_URID_Map *map;
51 LV2_URID_Unmap *unmap;
52 LV2_Worker_Schedule *sched;
53 LV2_Log_Log *log;
54 LV2_Log_Logger logger;
55
56 atom_ser_t ser;
57
58 const LV2_Atom_Sequence *event_in;
59 LV2_Atom_Sequence *event_out;
60 LV2_Atom_Forge forge;
61 LV2_OSC_URID osc_urid;
62
63 struct {
64 LV2_Atom_Forge *forge;
65 LV2_Atom_Forge_Ref *ref;
66 int64_t frames;
67 } unroll;
68
69 netatom_t *netatom;
70
71 varchunk_t *to_worker;
72 varchunk_t *from_worker;
73
74 PROPS_T(props, MAX_NPROPS);
75 plugstate_t state;
76 plugstate_t stash;
77
78 uint8_t buf [BUF_SIZE];
79 };
80
81 static const char *base_path = "/ninja";
82
83 static inline LV2_Atom_Forge_Ref
_sink(LV2_Atom_Forge_Sink_Handle handle,const void * buf,uint32_t size)84 _sink(LV2_Atom_Forge_Sink_Handle handle, const void *buf, uint32_t size)
85 {
86 atom_ser_t *ser = handle;
87
88 const LV2_Atom_Forge_Ref ref = ser->offset + 1;
89
90 const uint32_t new_offset = ser->offset + size;
91 if(new_offset > ser->size)
92 {
93 uint32_t new_size = ser->size << 1;
94 while(new_offset > new_size)
95 new_size <<= 1;
96
97 if(!(ser->buf = realloc(ser->buf, new_size)))
98 return 0; // realloc failed
99
100 ser->size = new_size;
101 }
102
103 memcpy(ser->buf + ser->offset, buf, size);
104 ser->offset = new_offset;
105
106 return ref;
107 }
108
109 static inline LV2_Atom *
_deref(LV2_Atom_Forge_Sink_Handle handle,LV2_Atom_Forge_Ref ref)110 _deref(LV2_Atom_Forge_Sink_Handle handle, LV2_Atom_Forge_Ref ref)
111 {
112 atom_ser_t *ser = handle;
113
114 const uint32_t offset = ref - 1;
115
116 return (LV2_Atom *)(ser->buf + offset);
117 }
118
119 static const props_def_t defs [MAX_NPROPS] = {
120 {
121 .property = ETEROJ_URI"#ninja_synchronous",
122 .offset = offsetof(plugstate_t, synchronous),
123 .type = LV2_ATOM__Bool
124 }
125 };
126
127 static LV2_Handle
instantiate(const LV2_Descriptor * descriptor,double rate,const char * bundle_path,const LV2_Feature * const * features)128 instantiate(const LV2_Descriptor* descriptor, double rate,
129 const char *bundle_path, const LV2_Feature *const *features)
130 {
131 plughandle_t *handle = calloc(1, sizeof(plughandle_t));
132 if(!handle)
133 return NULL;
134 mlock(handle, sizeof(plughandle_t));
135
136 for(unsigned i=0; features[i]; i++)
137 {
138 if(!strcmp(features[i]->URI, LV2_URID__map))
139 handle->map = features[i]->data;
140 else if(!strcmp(features[i]->URI, LV2_URID__unmap))
141 handle->unmap = features[i]->data;
142 else if(!strcmp(features[i]->URI, LV2_WORKER__schedule))
143 handle->sched= features[i]->data;
144 else if(!strcmp(features[i]->URI, LV2_LOG__log))
145 handle->log = features[i]->data;
146 }
147
148 if(!handle->map)
149 {
150 fprintf(stderr, "%s: Host does not support urid:map\n", descriptor->URI);
151 free(handle);
152 return NULL;
153 }
154 if(!handle->unmap)
155 {
156 fprintf(stderr, "%s: Host does not support urid:unmap\n", descriptor->URI);
157 free(handle);
158 return NULL;
159 }
160 if(!handle->sched)
161 {
162 fprintf(stderr, "%s: Host does not support work:schedule\n", descriptor->URI);
163 free(handle);
164 return NULL;
165 }
166
167 lv2_atom_forge_init(&handle->forge, handle->map);
168 lv2_osc_urid_init(&handle->osc_urid, handle->map);
169
170 if(handle->log)
171 {
172 lv2_log_logger_init(&handle->logger, handle->map, handle->log);
173 }
174
175 if(!props_init(&handle->props, descriptor->URI,
176 defs, MAX_NPROPS, &handle->state, &handle->stash,
177 handle->map, handle))
178 {
179 free(handle);
180 return NULL;
181 }
182
183 handle->netatom= netatom_new(handle->map, handle->unmap, true);
184 if(!handle->netatom)
185 {
186 netatom_free(handle->netatom);
187 free(handle);
188 return NULL;
189 }
190
191 handle->to_worker = varchunk_new(BUF_SIZE, true);
192 handle->from_worker = varchunk_new(BUF_SIZE, true);
193
194 if(!handle->to_worker || !handle->from_worker)
195 {
196 free(handle);
197 return NULL;
198 }
199
200 handle->ser.size = 2018;
201 handle->ser.offset = 0;
202 handle->ser.buf = malloc(handle->ser.size); //TODO check
203
204 return handle;
205 }
206
207 static void
connect_port(LV2_Handle instance,uint32_t port,void * data)208 connect_port(LV2_Handle instance, uint32_t port, void *data)
209 {
210 plughandle_t *handle = (plughandle_t *)instance;
211
212 switch(port)
213 {
214 case 0:
215 handle->event_in = (const LV2_Atom_Sequence *)data;
216 break;
217 case 1:
218 handle->event_out = (LV2_Atom_Sequence *)data;
219 break;
220 default:
221 break;
222 }
223 }
224
225 static void
_unroll(const char * path,const LV2_Atom_Tuple * arguments,void * data)226 _unroll(const char *path, const LV2_Atom_Tuple *arguments, void *data)
227 {
228 plughandle_t *handle = data;
229 LV2_Atom_Forge *forge = handle->unroll.forge;
230
231 if(strcmp(path, base_path))
232 return;
233
234 const LV2_Atom *itr = lv2_atom_tuple_begin(arguments);
235 if(itr->type != forge->Chunk)
236 return;
237
238 memcpy(handle->buf, LV2_ATOM_BODY(itr), itr->size);
239
240 const LV2_Atom *atom = netatom_deserialize(handle->netatom,
241 handle->buf, itr->size);
242 if(atom)
243 {
244 if(*handle->unroll.ref)
245 *handle->unroll.ref = lv2_atom_forge_frame_time(forge, handle->unroll.frames);
246 if(*handle->unroll.ref)
247 *handle->unroll.ref = lv2_atom_forge_write(forge, atom, lv2_atom_total_size(atom));
248 }
249 else if(handle->log)
250 {
251 lv2_log_trace(&handle->logger, "%s: failed to deserialize\n", __func__);
252 }
253 }
254
255 static void
_convert_seq(plughandle_t * handle,LV2_Atom_Forge * forge,const LV2_Atom_Sequence * seq,LV2_Atom_Forge_Ref * ref)256 _convert_seq(plughandle_t *handle, LV2_Atom_Forge *forge, const LV2_Atom_Sequence *seq,
257 LV2_Atom_Forge_Ref *ref)
258 {
259 LV2_OSC_URID *osc_urid = &handle->osc_urid;
260
261 LV2_ATOM_SEQUENCE_FOREACH(seq, ev)
262 {
263 const LV2_Atom *atom = &ev->body;
264 const LV2_Atom_Object *obj = (const LV2_Atom_Object *)&ev->body;
265
266 if(lv2_osc_is_message_or_bundle_type(osc_urid, obj->body.otype))
267 {
268 handle->unroll.frames = ev->time.frames;
269 handle->unroll.ref = ref;
270 handle->unroll.forge = forge;
271
272 lv2_osc_unroll(osc_urid, obj, _unroll, handle);
273 }
274 else
275 {
276 memcpy(handle->buf, atom, lv2_atom_total_size(atom)); //FIXME check < BUF_SIZE
277
278 size_t sz;
279 const uint8_t *buf = netatom_serialize(handle->netatom, (LV2_Atom *)handle->buf, BUF_SIZE, &sz);
280 if(buf)
281 {
282 if(*ref)
283 *ref = lv2_atom_forge_frame_time(forge, ev->time.frames);
284 if(*ref)
285 *ref = lv2_osc_forge_message_vararg(forge, osc_urid, base_path, "b", sz, buf);
286 }
287 else if(handle->log)
288 {
289 lv2_log_trace(&handle->logger, "%s: failed to serialize\n", __func__);
290 }
291 }
292 }
293 }
294
295 static void
run(LV2_Handle instance,uint32_t nsamples)296 run(LV2_Handle instance, uint32_t nsamples)
297 {
298 plughandle_t *handle = (plughandle_t *)instance;
299
300 // prepare osc atom forge
301 const uint32_t capacity = handle->event_out->atom.size;
302 lv2_atom_forge_set_buffer(&handle->forge, (uint8_t *)handle->event_out, capacity);
303 LV2_Atom_Forge_Frame frame;
304 LV2_Atom_Forge_Ref ref = lv2_atom_forge_sequence_head(&handle->forge, &frame, 0);
305
306 props_idle(&handle->props, &handle->forge, 0, &ref);
307
308 LV2_ATOM_SEQUENCE_FOREACH(handle->event_in, ev)
309 {
310 const LV2_Atom_Object *obj = (const LV2_Atom_Object *)&ev->body;
311
312 props_advance(&handle->props, &handle->forge, 0, obj, &ref); //NOTE 0!
313 }
314
315 if(handle->state.synchronous)
316 {
317 _convert_seq(handle, &handle->forge, handle->event_in, &ref);
318 }
319 else // asynchronous
320 {
321 // move input events to worker thread
322 if(handle->event_in->atom.size > sizeof(LV2_Atom_Sequence_Body))
323 {
324 const size_t size = lv2_atom_total_size(&handle->event_in->atom);
325 LV2_Atom_Sequence *seq;
326 if((seq = varchunk_write_request(handle->to_worker, size)))
327 {
328 memcpy(seq, handle->event_in, size);
329 varchunk_write_advance(handle->to_worker, size);
330
331 const int32_t dummy;
332 handle->sched->schedule_work(handle->sched->handle, sizeof(int32_t), &dummy);
333 }
334 else if(handle->log)
335 {
336 lv2_log_trace(&handle->logger, "%s: ringbuffer overflow\n", __func__);
337 }
338 }
339
340 {
341 // move output events from worker thread
342 size_t size;
343 const LV2_Atom_Sequence *seq;
344 if((seq = varchunk_read_request(handle->from_worker, &size)))
345 {
346 LV2_ATOM_SEQUENCE_FOREACH(seq, ev)
347 {
348 if(ref)
349 ref = lv2_atom_forge_frame_time(&handle->forge, ev->time.frames);
350 if(ref)
351 ref = lv2_atom_forge_write(&handle->forge, &ev->body, lv2_atom_total_size(&ev->body));
352 }
353 varchunk_read_advance(handle->from_worker);
354 }
355 }
356 }
357
358 if(ref)
359 lv2_atom_forge_pop(&handle->forge, &frame);
360 else
361 lv2_atom_sequence_clear(handle->event_out);
362 }
363
364 static void
cleanup(LV2_Handle instance)365 cleanup(LV2_Handle instance)
366 {
367 plughandle_t *handle = (plughandle_t *)instance;
368
369 if(handle->ser.buf)
370 free(handle->ser.buf);
371 if(handle->to_worker)
372 varchunk_free(handle->to_worker);
373 if(handle->from_worker)
374 varchunk_free(handle->from_worker);
375 netatom_free(handle->netatom);
376 munlock(handle, sizeof(plughandle_t));
377 free(handle);
378 }
379
380 // non-rt thread
381 static LV2_Worker_Status
_work(LV2_Handle instance,LV2_Worker_Respond_Function respond,LV2_Worker_Respond_Handle target,uint32_t size,const void * body)382 _work(LV2_Handle instance,
383 LV2_Worker_Respond_Function respond,
384 LV2_Worker_Respond_Handle target,
385 uint32_t size,
386 const void *body)
387 {
388 plughandle_t *handle = instance;
389 LV2_Atom_Forge forge = handle->forge; // clone forge
390 LV2_OSC_URID *osc_urid = &handle->osc_urid;
391
392 (void)respond;
393 (void)target;
394 (void)size;
395 (void)body;
396
397 size_t _size;
398 const LV2_Atom_Sequence *seq;
399 while((seq = varchunk_read_request(handle->to_worker, &_size)))
400 {
401 handle->ser.offset = 0;
402 lv2_atom_forge_set_sink(&forge, _sink, _deref, &handle->ser);
403 LV2_Atom_Forge_Frame frame;
404 LV2_Atom_Forge_Ref ref = lv2_atom_forge_sequence_head(&forge, &frame, 0);
405
406 _convert_seq(handle, &forge, seq, &ref);
407
408 if(ref)
409 {
410 lv2_atom_forge_pop(&forge, &frame);
411 }
412 else if(handle->log)
413 {
414 lv2_log_trace(&handle->logger, "%s: failed to forge\n", __func__);
415 }
416
417 seq = (const LV2_Atom_Sequence *)handle->ser.buf;
418 if(seq->atom.size > sizeof(LV2_Atom_Sequence_Body))
419 {
420 const size_t len = lv2_atom_total_size(&seq->atom);
421 LV2_Atom_Sequence *dst;
422 if((dst = varchunk_write_request(handle->from_worker, len)))
423 {
424 memcpy(dst, seq, len);
425 varchunk_write_advance(handle->from_worker, len);
426 }
427 else if(handle->log)
428 {
429 lv2_log_trace(&handle->logger, "%s: ringbuffer overflow\n", __func__);
430 }
431 }
432
433 varchunk_read_advance(handle->to_worker);
434 }
435
436 return LV2_WORKER_SUCCESS;
437 }
438
439 // rt-thread
440 static LV2_Worker_Status
_work_response(LV2_Handle instance,uint32_t size,const void * body)441 _work_response(LV2_Handle instance, uint32_t size, const void *body)
442 {
443 // do nothing
444
445 return LV2_WORKER_SUCCESS;
446 }
447
448 // rt-thread
449 static LV2_Worker_Status
_end_run(LV2_Handle instance)450 _end_run(LV2_Handle instance)
451 {
452 // do nothing
453
454 return LV2_WORKER_SUCCESS;
455 }
456
457 static const LV2_Worker_Interface work_iface = {
458 .work = _work,
459 .work_response = _work_response,
460 .end_run = _end_run
461 };
462
463 static LV2_State_Status
_state_save(LV2_Handle instance,LV2_State_Store_Function store,LV2_State_Handle state,uint32_t flags,const LV2_Feature * const * features)464 _state_save(LV2_Handle instance, LV2_State_Store_Function store,
465 LV2_State_Handle state, uint32_t flags,
466 const LV2_Feature *const *features)
467 {
468 plughandle_t *handle = (plughandle_t *)instance;
469
470 return props_save(&handle->props, store, state, flags, features);
471 }
472
473 static LV2_State_Status
_state_restore(LV2_Handle instance,LV2_State_Retrieve_Function retrieve,LV2_State_Handle state,uint32_t flags,const LV2_Feature * const * features)474 _state_restore(LV2_Handle instance, LV2_State_Retrieve_Function retrieve,
475 LV2_State_Handle state, uint32_t flags,
476 const LV2_Feature *const *features)
477 {
478 plughandle_t *handle = (plughandle_t *)instance;
479
480 return props_restore(&handle->props, retrieve, state, flags, features);
481 }
482
483 static const LV2_State_Interface state_iface = {
484 .save = _state_save,
485 .restore = _state_restore
486 };
487
488 static const void*
extension_data(const char * uri)489 extension_data(const char* uri)
490 {
491 if(!strcmp(uri, LV2_WORKER__interface))
492 return &work_iface;
493 else if(!strcmp(uri, LV2_STATE__interface))
494 return &state_iface;
495
496 return NULL;
497 }
498
499 const LV2_Descriptor eteroj_ninja = {
500 .URI = ETEROJ_NINJA_URI,
501 .instantiate = instantiate,
502 .connect_port = connect_port,
503 .activate = NULL,
504 .run = run,
505 .deactivate = NULL,
506 .cleanup = cleanup,
507 .extension_data = extension_data
508 };
509