1 /*
2  * Copyright (c) 2015-2016 Hanspeter Portner (dev@open-music-kontrollers.ch)
3  *
4  * This is free software: you can redistribute it and/or modify
5  * it under the terms of the Artistic License 2.0 as published by
6  * The Perl Foundation.
7  *
8  * This source is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  * Artistic License 2.0 for more details.
12  *
13  * You should have received a copy of the Artistic License 2.0
14  * along the source as a COPYING file. If not, obtain it from
15  * http://www.perlfoundation.org/artistic_license_2_0.
16  */
17 
18 #include <synthpod_app_private.h>
19 #include <synthpod_patcher.h>
20 
21 #include <osc.lv2/util.h>
22 #include <osc.lv2/forge.h>
23 
24 #if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) || defined(__OpenBSD__)
25 #	include <pthread_np.h>
26 typedef cpuset_t cpu_set_t;
27 #endif
28 
29 // non-rt
30 void
sp_app_activate(sp_app_t * app)31 sp_app_activate(sp_app_t *app)
32 {
33 	//TODO
34 }
35 
36 static inline void
_sp_app_update_system_sources(sp_app_t * app)37 _sp_app_update_system_sources(sp_app_t *app)
38 {
39 	int num_system_sources = 0;
40 
41 	for(unsigned m=0; m<app->num_mods; m++)
42 	{
43 		mod_t *mod = app->mods[m];
44 
45 		if(!mod->system_ports) // has system ports?
46 			continue; // skip
47 
48 		for(unsigned p=0; p<mod->num_ports; p++)
49 		{
50 			port_t *port = &mod->ports[p];
51 
52 			if(port->sys.type == SYSTEM_PORT_NONE)
53 				continue; // skip
54 
55 			if(port->direction == PORT_DIRECTION_OUTPUT)
56 			{
57 				app->system_sources[num_system_sources].type = port->sys.type;
58 				app->system_sources[num_system_sources].buf = PORT_BASE_ALIGNED(port);
59 				app->system_sources[num_system_sources].sys_port = port->sys.data;
60 				num_system_sources += 1;
61 			}
62 		}
63 	}
64 
65 	// sentinel
66 	app->system_sources[num_system_sources].type = SYSTEM_PORT_NONE;
67 	app->system_sources[num_system_sources].buf = NULL;
68 	app->system_sources[num_system_sources].sys_port = NULL;
69 }
70 
71 static inline void
_sp_app_update_system_sinks(sp_app_t * app)72 _sp_app_update_system_sinks(sp_app_t *app)
73 {
74 	int num_system_sinks = 0;
75 
76 	for(unsigned m=0; m<app->num_mods; m++)
77 	{
78 		mod_t *mod = app->mods[m];
79 
80 		if(!mod->system_ports) // has system ports?
81 			continue;
82 
83 		for(unsigned p=0; p<mod->num_ports; p++)
84 		{
85 			port_t *port = &mod->ports[p];
86 
87 			if(port->sys.type == SYSTEM_PORT_NONE)
88 				continue; // skip
89 
90 			if(port->direction == PORT_DIRECTION_INPUT)
91 			{
92 				app->system_sinks[num_system_sinks].type = port->sys.type;
93 				app->system_sinks[num_system_sinks].buf = PORT_BASE_ALIGNED(port);
94 				app->system_sinks[num_system_sinks].sys_port = port->sys.data;
95 				num_system_sinks += 1;
96 			}
97 		}
98 	}
99 
100 	// sentinel
101 	app->system_sinks[num_system_sinks].type = SYSTEM_PORT_NONE;
102 	app->system_sinks[num_system_sinks].buf = NULL;
103 	app->system_sinks[num_system_sinks].sys_port = NULL;
104 }
105 
106 const sp_app_system_source_t *
sp_app_get_system_sources(sp_app_t * app)107 sp_app_get_system_sources(sp_app_t *app)
108 {
109 	_sp_app_update_system_sources(app);
110 
111 	return app->system_sources;
112 }
113 
114 const sp_app_system_sink_t *
sp_app_get_system_sinks(sp_app_t * app)115 sp_app_get_system_sinks(sp_app_t *app)
116 {
117 	_sp_app_update_system_sinks(app);
118 
119 	return app->system_sinks;
120 }
121 
122 #pragma GCC diagnostic push
123 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
124 __non_realtime static uint32_t
_uri_to_id(LV2_URI_Map_Callback_Data handle,const char * _,const char * uri)125 _uri_to_id(LV2_URI_Map_Callback_Data handle, const char *_, const char *uri)
126 {
127 	sp_app_t *app = handle;
128 
129 	LV2_URID_Map *map = app->driver->map;
130 
131 	return map->map(map->handle, uri);
132 }
133 #pragma GCC diagnostic pop
134 
135 __realtime static inline bool
_sp_app_has_source_automations(mod_t * mod)136 _sp_app_has_source_automations(mod_t *mod)
137 {
138 	for(unsigned i = 0; i < MAX_AUTOMATIONS; i++)
139 	{
140 		auto_t *automation = &mod->automations[i];
141 
142 		if(  (automation->type != AUTO_TYPE_NONE)
143 			&& automation->src_enabled )
144 		{
145 			return true; // has automations
146 		}
147 	}
148 
149 	return false; // has no automations
150 }
151 
152 __realtime static inline auto_t *
_sp_app_find_automation_for_port(mod_t * mod,uint32_t index)153 _sp_app_find_automation_for_port(mod_t *mod, uint32_t index)
154 {
155 	for(unsigned i = 0; i < MAX_AUTOMATIONS; i++)
156 	{
157 		auto_t *automation = &mod->automations[i];
158 
159 		if(automation->type == AUTO_TYPE_NONE)
160 			continue; // skip empty slot
161 
162 		if( (automation->property == 0) && (automation->index == index) )
163 			return automation; // found match
164 	}
165 
166 	return NULL;
167 }
168 
169 __realtime static inline auto_t *
_sp_app_find_automation_for_property(mod_t * mod,LV2_URID property)170 _sp_app_find_automation_for_property(mod_t *mod, LV2_URID property)
171 {
172 	for(unsigned i = 0; i < MAX_AUTOMATIONS; i++)
173 	{
174 		auto_t *automation = &mod->automations[i];
175 
176 		if(automation->type == AUTO_TYPE_NONE)
177 			continue; // skip empty slot
178 
179 		if(automation->property == property)
180 			return automation; // found match
181 	}
182 
183 	return NULL;
184 }
185 
186 __realtime static inline LV2_Atom_Forge_Ref
_sp_app_automation_out(sp_app_t * app,LV2_Atom_Forge * forge,auto_t * automation,uint32_t frames,double value)187 _sp_app_automation_out(sp_app_t *app, LV2_Atom_Forge *forge, auto_t *automation, uint32_t frames, double value)
188 {
189 	LV2_Atom_Forge_Ref ref = 0;
190 
191 	if(automation->type == AUTO_TYPE_MIDI)
192 	{
193 		midi_auto_t *mauto = &automation->midi;
194 
195 		const uint8_t channel = (mauto->channel >= 0)
196 			? mauto->channel
197 			: 0;
198 		const uint8_t controller = (mauto->controller >= 0)
199 			? mauto->controller
200 			: 0;
201 		const uint8_t msg [3] = {0xb0 | channel, controller, floor(value)};
202 
203 		ref = lv2_atom_forge_frame_time(forge, frames);
204 		if(ref)
205 			ref = lv2_atom_forge_atom(forge, 3, app->regs.port.midi.urid);
206 		if(ref)
207 			ref = lv2_atom_forge_write(forge, msg, 3);
208 	}
209 	else if(automation->type == AUTO_TYPE_OSC)
210 	{
211 		osc_auto_t *oauto = &automation->osc;
212 
213 		ref = lv2_atom_forge_frame_time(forge, frames);
214 		if(ref)
215 			ref = lv2_osc_forge_message_vararg(forge, &app->osc_urid, oauto->path, "d", value); //FIXME what type should be used?
216 	}
217 
218 	return ref;
219 }
220 
221 __realtime static inline void
_sp_app_process_single_run(mod_t * mod,uint32_t nsamples)222 _sp_app_process_single_run(mod_t *mod, uint32_t nsamples)
223 {
224 	sp_app_t *app = mod->app;
225 
226 	struct timespec mod_t1;
227 	struct timespec mod_t2;
228 	cross_clock_gettime(&app->clk_mono, &mod_t1);
229 
230 	// multiplex multiple sources to single sink where needed
231 	for(int p=mod->num_ports-1; p>=0; p--)
232 	{
233 		port_t *port = &mod->ports[p];
234 
235 		if(port->direction == PORT_DIRECTION_OUTPUT)
236 		{
237 			if(  (port->type == PORT_TYPE_ATOM)
238 				&& (port->atom.buffer_type == PORT_BUFFER_TYPE_SEQUENCE)
239 				&& (!mod->system_ports) ) // don't overwrite source buffer events
240 			{
241 				LV2_Atom_Sequence *seq = PORT_BASE_ALIGNED(port);
242 				seq->atom.size = port->size;
243 				seq->atom.type = app->forge.Sequence;
244 				seq->body.unit = 0;
245 				seq->body.pad = 0;
246 			}
247 		}
248 		else // PORT_DIRECTION_INPUT
249 		{
250 			if(port->driver->multiplex)
251 				port->driver->multiplex(app, port, nsamples);
252 		}
253 	}
254 
255 	mod_worker_t *mod_worker = &mod->mod_worker;
256 	if(mod_worker->app_from_worker)
257 	{
258 		const void *payload;
259 		size_t size;
260 		while((payload = varchunk_read_request(mod_worker->app_from_worker, &size)))
261 		{
262 			if(mod->worker.iface && mod->worker.iface->work_response)
263 			{
264 				mod->worker.iface->work_response(mod->handle, size, payload);
265 				//TODO check return status
266 			}
267 
268 			varchunk_read_advance(mod_worker->app_from_worker);
269 		}
270 
271 		// handle end of work
272 		if(mod->worker.iface && mod->worker.iface->end_run)
273 		{
274 			mod->worker.iface->end_run(mod->handle);
275 		}
276 	}
277 
278 	// is module currently loading a preset asynchronously?
279 	if(!mod->bypassed)
280 	{
281 		// run plugin
282 		if(!mod->disabled)
283 		{
284 			lilv_instance_run(mod->inst, nsamples);
285 		}
286 	}
287 
288 	//handle automation output
289 	{
290 		const unsigned ao = mod->num_ports - 1;
291 		port_t *auto_port = &mod->ports[ao];
292 		LV2_Atom_Sequence *seq = PORT_BASE_ALIGNED(auto_port);
293 		//const uint32_t capacity = seq->atom.size;
294 		const uint32_t capacity = PORT_SIZE(auto_port);
295 		LV2_Atom_Forge_Frame frame;
296 
297 		LV2_Atom_Forge forge = app->forge; //FIXME do this only once
298 		lv2_atom_forge_set_buffer(&forge, (uint8_t *)seq, capacity);
299 		LV2_Atom_Forge_Ref ref = lv2_atom_forge_sequence_head(&forge, &frame, 0);
300 
301 		if(_sp_app_has_source_automations(mod))
302 		{
303 			uint32_t t0 = 0;
304 
305 			for(unsigned p=0; p<mod->num_ports; p++)
306 			{
307 				port_t *port = &mod->ports[p];
308 
309 				if(port->type == PORT_TYPE_CONTROL)
310 				{
311 					const float *val = PORT_BASE_ALIGNED(port);
312 
313 					if(  (*val != port->control.last)
314 						|| (port->control.auto_dirty) ) // has changed since last cycle
315 					{
316 						auto_t *automation = _sp_app_find_automation_for_port(mod, p);
317 
318 						if(automation && automation->src_enabled)
319 						{
320 							const double value = (*val - automation->add) / automation->mul;
321 
322 							if(ref)
323 								ref = _sp_app_automation_out(app, &forge, automation, t0, value);
324 						}
325 
326 						port->control.auto_dirty = false;
327 					}
328 				}
329 				else if( (port->type == PORT_TYPE_ATOM)
330 					&& port->atom.patchable )
331 				{
332 					const LV2_Atom_Sequence *patch_seq = PORT_BASE_ALIGNED(port);
333 
334 					LV2_ATOM_SEQUENCE_FOREACH(patch_seq, ev)
335 					{
336 						const LV2_Atom_Object *obj = (const LV2_Atom_Object *)&ev->body;
337 
338 						if(  lv2_atom_forge_is_object_type(&forge, obj->atom.type)
339 							&& (obj->body.otype == app->regs.patch.set.urid) ) //FIXME also consider patch:Put
340 						{
341 							const LV2_Atom_URID *patch_property = NULL;
342 							const LV2_Atom *patch_value = NULL;
343 
344 							lv2_atom_object_get(obj,
345 								app->regs.patch.property.urid, &patch_property,
346 								app->regs.patch.value.urid, &patch_value,
347 								0);
348 
349 							if(!patch_property || (patch_property->atom.type != forge.URID) || !patch_value)
350 								continue;
351 
352 							auto_t *automation = _sp_app_find_automation_for_property(mod, patch_property->body);
353 							if(automation && automation->src_enabled && (patch_value->type == automation->range))
354 							{
355 								double val = 0.0;
356 
357 								if(patch_value->type == forge.Bool)
358 									val = ((const LV2_Atom_Bool *)patch_value)->body;
359 								else if(patch_value->type == forge.Int)
360 									val = ((const LV2_Atom_Int *)patch_value)->body;
361 								else if(patch_value->type == forge.Long)
362 									val = ((const LV2_Atom_Long *)patch_value)->body;
363 								else if(patch_value->type == forge.Float)
364 									val = ((const LV2_Atom_Float *)patch_value)->body;
365 								else if(patch_value->type == forge.Double)
366 									val = ((const LV2_Atom_Double *)patch_value)->body;
367 								//FIXME support more types
368 
369 								const double value = (val - automation->add) / automation->mul;
370 
371 								if(ref)
372 									ref = _sp_app_automation_out(app, &forge, automation, ev->time.frames, value);
373 
374 								t0 = ev->time.frames;
375 							}
376 						}
377 					}
378 				}
379 			}
380 		}
381 
382 		if(ref)
383 			lv2_atom_forge_pop(&forge, &frame);
384 		else
385 		{
386 			lv2_atom_sequence_clear(seq);
387 			sp_app_log_trace(app, "%s: automation out buffer overflow\n", __func__);
388 		}
389 	}
390 
391 	cross_clock_gettime(&app->clk_mono, &mod_t2);
392 
393 	// profiling
394 	const unsigned run_time = (mod_t2.tv_sec - mod_t1.tv_sec)*1000000000
395 		+ mod_t2.tv_nsec - mod_t1.tv_nsec;
396 	mod->prof.sum += run_time;
397 
398 	if(run_time < mod->prof.min)
399 		mod->prof.min = run_time;
400 	else if(run_time > mod->prof.max)
401 		mod->prof.max = run_time;
402 }
403 
404 __realtime static void
_sync_midi_automation_to_ui(sp_app_t * app,mod_t * mod,auto_t * automation)405 _sync_midi_automation_to_ui(sp_app_t *app, mod_t *mod, auto_t *automation)
406 {
407 	LV2_Atom *answer = _sp_app_to_ui_request_atom(app);
408 	if(answer)
409 	{
410 		const LV2_URID subj = 0; //FIXME
411 		const int32_t sn = 0; //FIXME
412 		const LV2_URID prop = app->regs.synthpod.automation_list.urid;
413 		port_t *port = &mod->ports[automation->index]; //FIXME handle prop
414 
415 		LV2_Atom_Forge_Frame frame [3];
416 		LV2_Atom_Forge_Ref ref = synthpod_patcher_add_object(
417 			&app->regs, &app->forge, &frame[0], subj, sn, prop);
418 
419 		if(ref)
420 			ref = _sp_app_forge_midi_automation(app, &frame[2], mod, port, automation);
421 
422 		if(ref)
423 		{
424 			synthpod_patcher_pop(&app->forge, frame, 2);
425 			_sp_app_to_ui_advance_atom(app, answer);
426 		}
427 		else
428 		{
429 			_sp_app_to_ui_overflow(app);
430 		}
431 	}
432 	else
433 	{
434 		_sp_app_to_ui_overflow(app);
435 	}
436 }
437 
438 __realtime static void
_sync_osc_automation_to_ui(sp_app_t * app,mod_t * mod,auto_t * automation)439 _sync_osc_automation_to_ui(sp_app_t *app, mod_t *mod, auto_t *automation)
440 {
441 	LV2_Atom *answer = _sp_app_to_ui_request_atom(app);
442 	if(answer)
443 	{
444 		const LV2_URID subj = 0; //FIXME
445 		const int32_t sn = 0; //FIXME
446 		const LV2_URID prop = app->regs.synthpod.automation_list.urid;
447 		port_t *port = &mod->ports[automation->index]; //FIXME handle prop
448 
449 		LV2_Atom_Forge_Frame frame [3];
450 		LV2_Atom_Forge_Ref ref = synthpod_patcher_add_object(
451 			&app->regs, &app->forge, &frame[0], subj, sn, prop);
452 
453 		if(ref)
454 			ref = _sp_app_forge_osc_automation(app, &frame[2], mod, port, automation);
455 
456 		if(ref)
457 		{
458 			synthpod_patcher_pop(&app->forge, frame, 2);
459 			_sp_app_to_ui_advance_atom(app, answer);
460 		}
461 		else
462 		{
463 			_sp_app_to_ui_overflow(app);
464 		}
465 	}
466 	else
467 	{
468 		_sp_app_to_ui_overflow(app);
469 	}
470 }
471 
472 __realtime static inline void
_sp_app_process_single_post(mod_t * mod,uint32_t nsamples,bool sparse_update_timeout)473 _sp_app_process_single_post(mod_t *mod, uint32_t nsamples, bool sparse_update_timeout)
474 {
475 	sp_app_t *app = mod->app;
476 
477 	// handle mod ui post
478 	for(unsigned i=0; i<mod->num_ports; i++)
479 	{
480 		port_t *port = &mod->ports[i];
481 
482 		// no notification/subscription and no support for patch:Message
483 		const bool subscribed = port->subscriptions != 0;
484 		if(!subscribed)
485 			continue; // skip this port
486 		if( (port->type == PORT_TYPE_ATOM) && !port->atom.patchable)
487 			continue; // skip this port
488 
489 		if(port->driver->transfer && (port->driver->sparse_update ? sparse_update_timeout : true))
490 			port->driver->transfer(app, port, nsamples);
491 	}
492 
493 	// handle inline display
494 	if(mod->idisp.iface)
495 	{
496 		mod->idisp.counter += nsamples;
497 
498 		// trylock
499 		if(!atomic_flag_test_and_set(&mod->idisp.lock))
500 		{
501 			const LV2_Inline_Display_Image_Surface *surf= mod->idisp.surf;
502 			if(surf)
503 			{
504 				// to nk
505 				LV2_Atom *answer = _sp_app_to_ui_request_atom(app);
506 				if(answer)
507 				{
508 					LV2_Atom_Forge_Frame frame [3];
509 
510 					LV2_Atom_Forge_Ref ref = synthpod_patcher_set_object(&app->regs, &app->forge, &frame[0],
511 						mod->urn, 0, app->regs.idisp.surface.urid); //TODO seqn
512 					if(ref)
513 						ref = lv2_atom_forge_tuple(&app->forge, &frame[1]);
514 					if(ref)
515 						ref = lv2_atom_forge_int(&app->forge, surf->width);
516 					if(ref)
517 						ref = lv2_atom_forge_int(&app->forge, surf->height);
518 					if(ref)
519 						ref = lv2_atom_forge_vector_head(&app->forge, &frame[2], sizeof(int32_t), app->forge.Int);
520 					if(surf->stride == surf->width * sizeof(uint32_t))
521 					{
522 						if(ref)
523 							ref = lv2_atom_forge_write(&app->forge, surf->data, surf->height * surf->stride);
524 					}
525 					else
526 					{
527 						for(int h = 0; h < surf->height; h++)
528 						{
529 							const uint8_t *row = &surf->data[surf->stride * h];
530 
531 							if(ref)
532 								ref = lv2_atom_forge_raw(&app->forge, row, surf->width * sizeof(uint32_t));
533 						}
534 						if(ref)
535 							lv2_atom_forge_pad(&app->forge, surf->height * surf->width * sizeof(uint32_t));
536 					}
537 
538 					if(ref)
539 						synthpod_patcher_pop(&app->forge, frame, 3);
540 
541 					if(ref)
542 					{
543 						_sp_app_to_ui_advance_atom(app, answer);
544 					}
545 					else
546 					{
547 						_sp_app_to_ui_overflow(app);
548 					}
549 				}
550 				else
551 				{
552 					_sp_app_to_ui_overflow(app);
553 				}
554 
555 				mod->idisp.surf = NULL; // invalidate
556 			}
557 
558 			// unlock
559 			atomic_flag_clear(&mod->idisp.lock);
560 		}
561 	}
562 
563 	// handle automation learn
564 	for(unsigned i = 0; i < MAX_AUTOMATIONS; i++)
565 	{
566 		auto_t *automation = &mod->automations[i];
567 
568 		if(automation->sync)
569 		{
570 			if(automation->type == AUTO_TYPE_MIDI)
571 			{
572 				_sync_midi_automation_to_ui(app, mod, automation);
573 			}
574 			else if(automation->type == AUTO_TYPE_OSC)
575 			{
576 				_sync_osc_automation_to_ui(app, mod, automation);
577 			}
578 
579 			automation->sync = false;
580 		}
581 	}
582 }
583 
584 __realtime static inline int
_dsp_slave_fetch(dsp_master_t * dsp_master,int head)585 _dsp_slave_fetch(dsp_master_t *dsp_master, int head)
586 {
587 	sp_app_t *app = (void *)dsp_master - offsetof(sp_app_t, dsp_master);
588 
589 	const unsigned M = head;
590 	head = -1; // assume no more work left
591 
592 	for(unsigned m=M; m<app->num_mods; m++)
593 	{
594 		mod_t *mod = app->mods[m];
595 		dsp_client_t *dsp_client = &mod->dsp_client;
596 
597 		int expected = 0;
598 		const int desired = -1; // mark as done
599 		const bool match = atomic_compare_exchange_weak(&dsp_client->ref_count,
600 			&expected, desired);
601 		if(match) // needs to run now
602 		{
603 			_sp_app_process_single_run(mod, dsp_master->nsamples);
604 
605 			for(unsigned j=0; j<dsp_client->num_sinks; j++)
606 			{
607 				dsp_client_t *sink = dsp_client->sinks[j];
608 				const int32_t ref_count = atomic_fetch_sub(&sink->ref_count, 1);
609 				assert(ref_count >= 0);
610 			}
611 		}
612 		else if(expected >= 0) // needs to run later
613 		{
614 			if(head == -1) // only set heading position once per loop
615 			{
616 				head = m;
617 			}
618 		}
619 	}
620 
621 	return head;
622 }
623 
624 __realtime static inline void
_dsp_slave_spin(sp_app_t * app,dsp_master_t * dsp_master,bool post)625 _dsp_slave_spin(sp_app_t *app, dsp_master_t *dsp_master, bool post)
626 {
627 	int head = 0;
628 
629 	while(!atomic_load(&dsp_master->emergency_exit))
630 	{
631 		head = _dsp_slave_fetch(dsp_master, head);
632 		if(head == -1) // no more work left
633 		{
634 			break;
635 		}
636 	}
637 
638 	if(post)
639 	{
640 		sem_post(&dsp_master->sem);
641 	}
642 }
643 
644 __non_realtime static void *
_dsp_slave_thread(void * data)645 _dsp_slave_thread(void *data)
646 {
647 	dsp_slave_t *dsp_slave = data;
648 	dsp_master_t *dsp_master = dsp_slave->dsp_master;
649 	sp_app_t *app = (void *)dsp_master - offsetof(sp_app_t, dsp_master);
650 	const int num = dsp_slave - dsp_master->dsp_slaves + 1;
651 	//printf("thread: %i\n", num);
652 
653 	struct sched_param schedp;
654 	memset(&schedp, 0, sizeof(struct sched_param));
655 	schedp.sched_priority = app->driver->audio_prio - 1;
656 
657 	const pthread_t self = pthread_self();
658 	if(pthread_setschedparam(self, SCHED_FIFO, &schedp))
659 		sp_app_log_error(app, "%s: pthread_setschedparam error\n", __func__);
660 
661 	if(app->driver->cpu_affinity)
662 	{
663 		cpu_set_t cpuset;
664 		CPU_ZERO(&cpuset);
665 		CPU_SET(num, &cpuset);
666 		if(pthread_setaffinity_np(self, sizeof(cpu_set_t), &cpuset))
667 			sp_app_log_error(app, "%s: pthread_setaffinity_np error\n", __func__);
668 	}
669 
670 	while(true)
671 	{
672 		sem_wait(&dsp_slave->sem);
673 
674 		_dsp_slave_spin(app, dsp_master, true);
675 
676 		if(atomic_load(&dsp_master->kill))
677 			break;
678 
679 		//sched_yield();
680 	}
681 
682 	return NULL;
683 }
684 
685 __realtime static inline void
_dsp_master_post(dsp_master_t * dsp_master,unsigned num)686 _dsp_master_post(dsp_master_t *dsp_master, unsigned num)
687 {
688 	for(unsigned i=0; i<num; i++)
689 	{
690 		dsp_slave_t *dsp_slave = &dsp_master->dsp_slaves[i];
691 
692 		sem_post(&dsp_slave->sem);
693 	}
694 }
695 
696 __realtime static inline void
_dsp_master_wait(sp_app_t * app,dsp_master_t * dsp_master,unsigned num)697 _dsp_master_wait(sp_app_t *app, dsp_master_t *dsp_master, unsigned num)
698 {
699 	// derive timeout
700 	struct timespec to;
701 	cross_clock_gettime(&app->clk_real, &to);
702 	to.tv_sec += 1; // if workers have not finished in due 1s, do emergency exit!
703 
704 	// wait for worker threads to have finished
705 	for(unsigned c = 0; c < num; )
706 	{
707 		if(sem_timedwait(&dsp_master->sem, &to) == -1)
708 		{
709 			switch(errno)
710 			{
711 				case ETIMEDOUT:
712 				{
713 					fprintf(stderr, "%s: taking emergency exit\n", __func__);
714 					atomic_store(&dsp_master->emergency_exit, true);
715 				} continue;
716 				case EINTR:
717 				{
718 					// nothing
719 				} continue;
720 			}
721 		}
722 
723 		c++;
724 	}
725 }
726 
727 __realtime static inline void
_dsp_master_process(sp_app_t * app,dsp_master_t * dsp_master,unsigned nsamples)728 _dsp_master_process(sp_app_t *app, dsp_master_t *dsp_master, unsigned nsamples)
729 {
730 	for(unsigned m=0; m<app->num_mods; m++)
731 	{
732 		mod_t *mod = app->mods[m];
733 		dsp_client_t *dsp_client = &mod->dsp_client;
734 
735 		atomic_store(&dsp_client->ref_count, dsp_client->num_sources);
736 	}
737 
738 	dsp_master->nsamples = nsamples;
739 
740 	unsigned num_slaves = dsp_master->concurrent - 1;
741 	if(num_slaves > dsp_master->num_slaves)
742 		num_slaves = dsp_master->num_slaves;
743 
744 	_dsp_master_post(dsp_master, num_slaves); // wake up other slaves
745 	_dsp_slave_spin(app, dsp_master, false); // runs jobs itself
746 	_dsp_master_wait(app, dsp_master, num_slaves);
747 }
748 
749 void
_sp_app_reset(sp_app_t * app)750 _sp_app_reset(sp_app_t *app)
751 {
752 	// remove existing modules
753 	int num_mods = app->num_mods;
754 
755 	app->num_mods = 0;
756 
757 	for(int m=0; m<num_mods; m++)
758 		_sp_app_mod_del(app, app->mods[m]);
759 }
760 
761 void
_sp_app_populate(sp_app_t * app)762 _sp_app_populate(sp_app_t *app)
763 {
764 	const char *uri_str;
765 	mod_t *mod;
766 
767 	// inject source mod
768 	uri_str = SYNTHPOD_PREFIX"source";
769 	mod = _sp_app_mod_add(app, uri_str, 0);
770 	if(mod)
771 	{
772 		app->mods[app->num_mods] = mod;
773 		app->num_mods += 1;
774 	}
775 	else
776 	{
777 		sp_app_log_error(app, "%s: failed to create system source\n", __func__);
778 	}
779 
780 	// inject sink mod
781 	uri_str = SYNTHPOD_PREFIX"sink";
782 	mod = _sp_app_mod_add(app, uri_str, 0);
783 	if(mod)
784 	{
785 		app->mods[app->num_mods] = mod;
786 		app->num_mods += 1;
787 	}
788 	else
789 	{
790 		sp_app_log_error(app, "%s: failed to create system sink\n", __func__);
791 	}
792 }
793 
794 sp_app_t *
sp_app_new(const LilvWorld * world,sp_app_driver_t * driver,void * data)795 sp_app_new(const LilvWorld *world, sp_app_driver_t *driver, void *data)
796 {
797 	if(!driver || !data)
798 		return NULL;
799 
800 	srand(time(NULL)); // seed random number generator for UUID generator
801 
802 	sp_app_t *app = calloc(1, sizeof(sp_app_t));
803 	if(!app)
804 		return NULL;
805 
806 	atomic_init(&app->dirty, false);
807 
808 	app->dir.home = getenv("HOME");
809 
810 	//printf("%s %s %s\n", app->dir.home, app->dir.config, app->dir.data);
811 
812 	app->driver = driver;
813 	app->data = data;
814 
815 	if(world)
816 	{
817 		app->world = (LilvWorld *)world;
818 		app->embedded = 1;
819 	}
820 	else
821 	{
822 		app->world = lilv_world_new();
823 		if(!app->world)
824 		{
825 			free(app);
826 			return NULL;
827 		}
828 		LilvNode *node_false = lilv_new_bool(app->world, false);
829 		if(node_false)
830 		{
831 			lilv_world_set_option(app->world, LILV_OPTION_DYN_MANIFEST, node_false);
832 			lilv_node_free(node_false);
833 		}
834 		lilv_world_load_all(app->world);
835 		LilvNode *synthpod_bundle = lilv_new_file_uri(app->world, NULL, SYNTHPOD_BUNDLE_DIR);
836 		if(synthpod_bundle)
837 		{
838 			lilv_world_load_bundle(app->world, synthpod_bundle);
839 			lilv_node_free(synthpod_bundle);
840 		}
841 	}
842 	app->plugs = lilv_world_get_all_plugins(app->world);
843 
844 	lv2_atom_forge_init(&app->forge, app->driver->map);
845 	sp_regs_init(&app->regs, app->world, app->driver->map);
846 
847 	_sp_app_populate(app);
848 
849 	app->fps.bound = driver->sample_rate / driver->update_rate;
850 	app->fps.counter = 0;
851 
852 	app->ramp_samples = driver->sample_rate / 10; // ramp over 0.1s FIXME make this configurable
853 
854 	// populate uri_to_id
855 	app->uri_to_id.callback_data = app;
856 	app->uri_to_id.uri_to_id = _uri_to_id;
857 
858 	app->sratom = sratom_new(app->driver->map);
859 	if(app->sratom)
860 		sratom_set_pretty_numbers(app->sratom, false);
861 
862 	// initialize DSP load profiler
863 	cross_clock_init(&app->clk_mono, CROSS_CLOCK_MONOTONIC);
864 	cross_clock_init(&app->clk_real, CROSS_CLOCK_REALTIME);
865 	cross_clock_gettime(&app->clk_mono, &app->prof.t0);
866 	app->prof.min = UINT_MAX;
867 	app->prof.max = 0;
868 	app->prof.sum = 0;
869 	app->prof.count = 0;
870 
871 	// initialize grid dimensions
872 	app->ncols = 3;
873 	app->nrows = 2;
874 	app->nleft = 0.2;
875 
876 	// initialize parallel processing
877 	dsp_master_t *dsp_master = &app->dsp_master;
878 	atomic_init(&dsp_master->kill, false);
879 	atomic_init(&dsp_master->emergency_exit, false);
880 	sem_init(&dsp_master->sem, 0, 0);
881 	dsp_master->num_slaves = driver->num_slaves;
882 	dsp_master->concurrent = dsp_master->num_slaves; // this is a safe fallback
883 	for(unsigned i=0; i<dsp_master->num_slaves; i++)
884 	{
885 		dsp_slave_t *dsp_slave = &dsp_master->dsp_slaves[i];
886 
887 		dsp_slave->dsp_master = dsp_master;
888 		sem_init(&dsp_slave->sem, 0, 0);
889 		pthread_attr_t attr;
890 		pthread_attr_init(&attr);
891 		pthread_create(&dsp_slave->thread, &attr, _dsp_slave_thread, dsp_slave);
892 	}
893 
894 	lv2_osc_urid_init(&app->osc_urid, driver->map);
895 
896 	return app;
897 }
898 
899 void
sp_app_run_pre(sp_app_t * app,uint32_t nsamples)900 sp_app_run_pre(sp_app_t *app, uint32_t nsamples)
901 {
902 	mod_t *del_me = NULL;
903 
904 	cross_clock_gettime(&app->clk_mono, &app->prof.t1);
905 
906 	// iterate over all modules
907 	for(unsigned m=0; m<app->num_mods; m++)
908 	{
909 		mod_t *mod = app->mods[m];
910 
911 		if(mod->delete_request && !del_me) // only delete 1 module at once
912 		{
913 			del_me = mod;
914 			mod->delete_request = false;
915 		}
916 
917 		for(unsigned p=0; p<mod->num_ports; p++)
918 		{
919 			port_t *port = &mod->ports[p];
920 
921 			// stash control port values
922 			if( (port->type == PORT_TYPE_CONTROL) && port->control.stashing)
923 			{
924 				port->control.stashing = false;
925 				_sp_app_port_control_stash(port);
926 			}
927 
928 			if(port->direction == PORT_DIRECTION_OUTPUT)
929 				continue; // ignore output ports
930 
931 			// clear atom sequence input buffers
932 			if(  (port->type == PORT_TYPE_ATOM)
933 				&& (port->atom.buffer_type == PORT_BUFFER_TYPE_SEQUENCE) )
934 			{
935 				LV2_Atom_Sequence *seq = PORT_BASE_ALIGNED(port);
936 				seq->atom.size = sizeof(LV2_Atom_Sequence_Body); // empty sequence
937 				seq->atom.type = app->regs.port.sequence.urid;
938 				seq->body.unit = 0;
939 				seq->body.pad = 0;
940 			}
941 		}
942 	}
943 
944 	if(del_me)
945 		_sp_app_mod_eject(app, del_me);
946 }
947 
948 static inline void
_sp_app_process_serial(sp_app_t * app,uint32_t nsamples,bool sparse_update_timeout)949 _sp_app_process_serial(sp_app_t *app, uint32_t nsamples, bool sparse_update_timeout)
950 {
951 	// iterate over all modules
952 	for(unsigned m=0; m<app->num_mods; m++)
953 	{
954 		mod_t *mod = app->mods[m];
955 
956 		_sp_app_process_single_run(mod, nsamples);
957 		_sp_app_process_single_post(mod, nsamples, sparse_update_timeout);
958 	}
959 }
960 
961 static inline void
_sp_app_process_parallel(sp_app_t * app,uint32_t nsamples,bool sparse_update_timeout)962 _sp_app_process_parallel(sp_app_t *app, uint32_t nsamples, bool sparse_update_timeout)
963 {
964 	_dsp_master_process(app, &app->dsp_master, nsamples);
965 
966 	// iterate over all modules
967 	for(unsigned m=0; m<app->num_mods; m++)
968 	{
969 		mod_t *mod = app->mods[m];
970 
971 		_sp_app_process_single_post(mod, nsamples, sparse_update_timeout);
972 	}
973 }
974 
975 void
sp_app_run_post(sp_app_t * app,uint32_t nsamples)976 sp_app_run_post(sp_app_t *app, uint32_t nsamples)
977 {
978 	bool sparse_update_timeout = false;
979 
980 	app->fps.counter += nsamples; // increase sample counter
981 	app->fps.period_cnt += 1; // increase period counter
982 	if(app->fps.counter >= app->fps.bound) // check whether we reached boundary
983 	{
984 		sparse_update_timeout = true;
985 		app->fps.counter -= app->fps.bound; // reset sample counter
986 	}
987 
988 	dsp_master_t *dsp_master = &app->dsp_master;
989 	if( (dsp_master->num_slaves > 0) && (dsp_master->concurrent > 1) ) // parallel processing makes sense here
990 		_sp_app_process_parallel(app, nsamples, sparse_update_timeout);
991 	else
992 		_sp_app_process_serial(app, nsamples, sparse_update_timeout);
993 
994 	if(atomic_exchange(&dsp_master->emergency_exit, false))
995 	{
996 		app->dsp_master.concurrent = dsp_master->num_slaves; // spin up all cores
997 		sp_app_log_trace(app, "%s: had to take emergency exit\n", __func__);
998 	}
999 
1000 	// profiling
1001 	struct timespec app_t2;
1002 	cross_clock_gettime(&app->clk_mono, &app_t2);
1003 
1004 	const unsigned run_time = (app_t2.tv_sec - app->prof.t1.tv_sec)*1000000000
1005 		+ app_t2.tv_nsec - app->prof.t1.tv_nsec;
1006 	app->prof.sum += run_time;
1007 	app->prof.count += 1;
1008 
1009 	if(run_time < app->prof.min)
1010 		app->prof.min = run_time;
1011 	else if(run_time > app->prof.max)
1012 		app->prof.max = run_time;
1013 
1014 	if(app_t2.tv_sec > app->prof.t0.tv_sec) // a second has passed
1015 	{
1016 		const unsigned tot_time = (app_t2.tv_sec - app->prof.t0.tv_sec)*1000000000
1017 			+ app_t2.tv_nsec - app->prof.t0.tv_nsec;
1018 		const float tot_time_1 = 100.f / tot_time;
1019 
1020 #if defined(USE_DYNAMIC_PARALLELIZER)
1021 		// reset DAG weights
1022 		for(unsigned m=0; m<app->num_mods; m++)
1023 		{
1024 			mod_t *mod = app->mods[m];
1025 			dsp_client_t *dsp_client = &mod->dsp_client;
1026 
1027 			dsp_client->weight = 0;
1028 		}
1029 
1030 		unsigned T1 = 0;
1031 		unsigned Tinf = 0;
1032 
1033 		// calculate DAG weights
1034 		for(unsigned m1=0; m1<app->num_mods; m1++)
1035 		{
1036 			mod_t *mod1 = app->mods[m1];
1037 			dsp_client_t *dsp_client1 = &mod1->dsp_client;
1038 
1039 			unsigned gsw = 0; // greatest sink weight
1040 
1041 			for(unsigned m2=0; m2<m1; m2++)
1042 			{
1043 				mod_t *mod2 = app->mods[m2];
1044 				dsp_client_t *dsp_client2 = &mod2->dsp_client;
1045 
1046 				for(unsigned s=0; s<dsp_client2->num_sinks; s++)
1047 				{
1048 					dsp_client_t *dsp_client3 = dsp_client2->sinks[s];
1049 
1050 					if(dsp_client3 == dsp_client1) // mod2 is source of mod1
1051 					{
1052 						if(dsp_client2->weight > gsw)
1053 							gsw = dsp_client2->weight;
1054 
1055 						break;
1056 					}
1057 				}
1058 			}
1059 
1060 			const unsigned w1 = mod1->prof.sum;
1061 
1062 			T1 += w1;
1063 			dsp_client1->weight = gsw + w1;
1064 
1065 			if(dsp_client1->weight > Tinf)
1066 				Tinf = dsp_client1->weight;
1067 		}
1068 
1069 		// derive average parallelism
1070 		const float parallelism = (float)T1 / Tinf; //TODO add some head-room?
1071 		app->dsp_master.concurrent = ceilf(parallelism);
1072 
1073 		// to nk
1074 		{
1075 			LV2_Atom *answer;
1076 			answer = _sp_app_to_ui_request_atom(app);
1077 			if(answer)
1078 			{
1079 				const int32_t cpus_used = (app->dsp_master.concurrent > app->dsp_master.num_slaves + 1)
1080 					? app->dsp_master.num_slaves + 1
1081 					: app->dsp_master.concurrent;
1082 
1083 				LV2_Atom_Forge_Ref ref = synthpod_patcher_set(
1084 					&app->regs, &app->forge, 0, 0, app->regs.synthpod.cpus_used.urid,
1085 					sizeof(int32_t), app->forge.Int, &cpus_used); //TODO subj, seqn
1086 				if(ref)
1087 				{
1088 					_sp_app_to_ui_advance_atom(app, answer);
1089 				}
1090 				else
1091 				{
1092 					_sp_app_to_ui_overflow(app);
1093 				}
1094 			}
1095 			else
1096 			{
1097 				_sp_app_to_ui_overflow(app);
1098 			}
1099 		}
1100 #endif
1101 
1102 		for(unsigned m=0; m<app->num_mods; m++)
1103 		{
1104 			mod_t *mod = app->mods[m];
1105 
1106 			const float mod_min = mod->prof.min * app->prof.count * tot_time_1;
1107 			const float mod_avg = mod->prof.sum * tot_time_1;
1108 			const float mod_max = mod->prof.max * app->prof.count * tot_time_1;
1109 
1110 			// to nk
1111 			LV2_Atom *answer = _sp_app_to_ui_request_atom(app);
1112 			if(answer)
1113 			{
1114 				const float vec [] = {
1115 					mod_min, mod_avg, mod_max
1116 				};
1117 
1118 				LV2_Atom_Forge_Frame frame [1];
1119 				LV2_Atom_Forge_Ref ref = synthpod_patcher_set_object(
1120 					&app->regs, &app->forge, &frame[0], mod->urn, 0, app->regs.synthpod.module_profiling.urid); //TODO seqn
1121 				if(ref)
1122 					ref = lv2_atom_forge_vector(&app->forge, sizeof(float), app->forge.Float, 3, vec);
1123 				if(ref)
1124 				{
1125 					synthpod_patcher_pop(&app->forge, frame, 1);
1126 					_sp_app_to_ui_advance_atom(app, answer);
1127 				}
1128 				else
1129 				{
1130 					_sp_app_to_ui_overflow(app);
1131 				}
1132 			}
1133 			else
1134 			{
1135 				_sp_app_to_ui_overflow(app);
1136 			}
1137 
1138 			mod->prof.min = UINT_MAX;
1139 			mod->prof.max = 0;
1140 			mod->prof.sum = 0;
1141 		}
1142 
1143 		{
1144 			const float app_min = app->prof.min * app->prof.count * tot_time_1;
1145 			const float app_avg = app->prof.sum * tot_time_1;
1146 			const float app_max = app->prof.max * app->prof.count * tot_time_1;
1147 
1148 			// to nk
1149 			LV2_Atom *answer = _sp_app_to_ui_request_atom(app);
1150 			if(answer)
1151 			{
1152 				const float vec [] = {
1153 					app_min, app_avg, app_max
1154 				};
1155 
1156 				LV2_Atom_Forge_Frame frame [1];
1157 				LV2_Atom_Forge_Ref ref = synthpod_patcher_set_object(
1158 					&app->regs, &app->forge, &frame[0], 0, 0, app->regs.synthpod.dsp_profiling.urid); //TODO subj, seqn
1159 				if(ref)
1160 					ref = lv2_atom_forge_vector(&app->forge, sizeof(float), app->forge.Float, 3, vec);
1161 				if(ref)
1162 				{
1163 					synthpod_patcher_pop(&app->forge, frame, 1);
1164 					_sp_app_to_ui_advance_atom(app, answer);
1165 				}
1166 				else
1167 				{
1168 					_sp_app_to_ui_overflow(app);
1169 				}
1170 			}
1171 			else
1172 			{
1173 				_sp_app_to_ui_overflow(app);
1174 			}
1175 
1176 			app->prof.t0.tv_sec = app_t2.tv_sec;
1177 			app->prof.t0.tv_nsec = app_t2.tv_nsec;
1178 			app->prof.min = UINT_MAX;
1179 			app->prof.max = 0;
1180 			app->prof.sum = 0;
1181 			app->prof.count = 0;
1182 		}
1183 	}
1184 
1185 	// handle app ui post
1186 	bool expected = true;
1187 	const bool desired = false;
1188 	if(atomic_compare_exchange_weak(&app->dirty, &expected, desired))
1189 	{
1190 		// to nk
1191 		_sp_app_ui_set_modlist(app, 0, 0); //FIXME subj, seqn
1192 
1193 		// recalculate concurrency
1194 		_dsp_master_reorder(app);
1195 		//printf("concurrency: %i\n", app->dsp_master.concurrent);
1196 	}
1197 }
1198 
1199 void
sp_app_deactivate(sp_app_t * app)1200 sp_app_deactivate(sp_app_t *app)
1201 {
1202 	//TODO
1203 }
1204 
1205 void
sp_app_free(sp_app_t * app)1206 sp_app_free(sp_app_t *app)
1207 {
1208 	if(!app)
1209 		return;
1210 
1211 	// deinit parallel processing
1212 	dsp_master_t *dsp_master = &app->dsp_master;
1213 	atomic_store(&dsp_master->kill, true);
1214 	//printf("finish\n");
1215 	_dsp_master_post(dsp_master, dsp_master->num_slaves);
1216 	_dsp_master_wait(app, dsp_master, dsp_master->num_slaves);
1217 
1218 	for(unsigned i=0; i<dsp_master->num_slaves; i++)
1219 	{
1220 		dsp_slave_t *dsp_slave = &dsp_master->dsp_slaves[i];
1221 
1222 		void *ret;
1223 		pthread_join(dsp_slave->thread, &ret);
1224 		sem_destroy(&dsp_slave->sem);
1225 	}
1226 	sem_destroy(&dsp_master->sem);
1227 
1228 	// free mods
1229 	for(unsigned m=0; m<app->num_mods; m++)
1230 		_sp_app_mod_del(app, app->mods[m]);
1231 
1232 	sp_regs_deinit(&app->regs);
1233 
1234 	if(!app->embedded)
1235 		lilv_world_free(app->world);
1236 
1237 	if(app->bundle_path)
1238 		free(app->bundle_path);
1239 	if(app->bundle_filename)
1240 		free(app->bundle_filename);
1241 
1242 	if(app->sratom)
1243 		sratom_free(app->sratom);
1244 
1245 	cross_clock_deinit(&app->clk_mono);
1246 	cross_clock_deinit(&app->clk_real);
1247 
1248 	free(app);
1249 }
1250 
1251 bool
sp_app_bypassed(sp_app_t * app)1252 sp_app_bypassed(sp_app_t *app)
1253 {
1254 	return app->load_bundle && (app->block_state == BLOCKING_STATE_WAIT);
1255 }
1256 
1257 __realtime uint32_t
sp_app_options_set(sp_app_t * app,const LV2_Options_Option * options)1258 sp_app_options_set(sp_app_t *app, const LV2_Options_Option *options)
1259 {
1260 	LV2_Options_Status status = LV2_OPTIONS_SUCCESS;
1261 
1262 	for(unsigned m=0; m<app->num_mods; m++)
1263 	{
1264 		mod_t *mod = app->mods[m];
1265 
1266 		if(mod->opts.iface && mod->opts.iface->set)
1267 			status |= mod->opts.iface->set(mod->handle, options);
1268 	}
1269 
1270 	return status;
1271 }
1272 
1273 static void
_sp_app_reinitialize(sp_app_t * app)1274 _sp_app_reinitialize(sp_app_t *app)
1275 {
1276 	for(unsigned m=0; m<app->num_mods; m++)
1277 	{
1278 		mod_t *mod = app->mods[m];
1279 
1280 		_sp_app_mod_reinitialize(mod);
1281 	}
1282 
1283 	// refresh all connections
1284 	for(unsigned m=0; m<app->num_mods; m++)
1285 	{
1286 		mod_t *mod = app->mods[m];
1287 
1288 		for(unsigned i=0; i<mod->num_ports - 2; i++)
1289 		{
1290 			port_t *tar = &mod->ports[i];
1291 
1292 			// set port buffer
1293 			lilv_instance_connect_port(mod->inst, i, tar->base);
1294 		}
1295 
1296 		lilv_instance_activate(mod->inst);
1297 	}
1298 }
1299 
1300 int
sp_app_nominal_block_length(sp_app_t * app,uint32_t nsamples)1301 sp_app_nominal_block_length(sp_app_t *app, uint32_t nsamples)
1302 {
1303 	if(nsamples <= app->driver->max_block_size)
1304 	{
1305 		for(unsigned m=0; m<app->num_mods; m++)
1306 		{
1307 			mod_t *mod = app->mods[m];
1308 
1309 			if(mod->opts.iface && mod->opts.iface->set)
1310 			{
1311 				if(nsamples < app->driver->min_block_size)
1312 				{
1313 					// update driver struct
1314 					app->driver->min_block_size = nsamples;
1315 
1316 					const LV2_Options_Option options [2] = {{
1317 						.context = LV2_OPTIONS_INSTANCE,
1318 						.subject = 0, // is ignored
1319 						.key = app->regs.bufsz.min_block_length.urid,
1320 						.size = sizeof(int32_t),
1321 						.type = app->forge.Int,
1322 						.value = &app->driver->min_block_size
1323 					}, {
1324 						.key = 0, // sentinel
1325 						.value =NULL // sentinel
1326 					}};
1327 
1328 					// notify new minimalBlockLength
1329 					if(mod->opts.iface->set(mod->handle, options) != LV2_OPTIONS_SUCCESS)
1330 						sp_app_log_error(app, "%s:setting of minBlockSize failed\n", __func__);
1331 				}
1332 
1333 				const int32_t nominal_block_length = nsamples;
1334 
1335 				const LV2_Options_Option options [2] = {{
1336 					.context = LV2_OPTIONS_INSTANCE,
1337 					.subject = 0, // is ignored
1338 					.key = app->regs.bufsz.nominal_block_length.urid,
1339 					.size = sizeof(int32_t),
1340 					.type = app->forge.Int,
1341 					.value = &nominal_block_length
1342 				}, {
1343 					.key = 0, // sentinel
1344 					.value =NULL // sentinel
1345 				}};
1346 
1347 				// notify new nominalBlockLength
1348 				if(mod->opts.iface->set(mod->handle, options) != LV2_OPTIONS_SUCCESS)
1349 					sp_app_log_error(app, "%s:setting of nominalblockSize failed\n", __func__);
1350 			}
1351 		}
1352 	}
1353 	else // nsamples > max_block_size
1354 	{
1355 		// update driver struct
1356 		app->driver->max_block_size = nsamples;
1357 
1358 		_sp_app_reinitialize(app);
1359 	}
1360 
1361 	return 0;
1362 }
1363 
1364 int
sp_app_com_event(sp_app_t * app,LV2_URID otype)1365 sp_app_com_event(sp_app_t *app, LV2_URID otype)
1366 {
1367 	// it is a com event, if it is not an official port protocol
1368 	if(  (otype == app->regs.port.float_protocol.urid)
1369 		|| (otype == app->regs.port.peak_protocol.urid)
1370 		|| (otype == app->regs.port.atom_transfer.urid)
1371 		|| (otype == app->regs.port.event_transfer.urid) )
1372 		return 0;
1373 
1374 	return 1;
1375 }
1376 
1377 // sort according to position
1378 __realtime static void
_sp_app_mod_qsort(mod_t ** A,int n)1379 _sp_app_mod_qsort(mod_t **A, int n)
1380 {
1381 	if(n < 2)
1382 		return;
1383 
1384 	const mod_t *p = A[0];
1385 
1386 	int i = -1;
1387 	int j = n;
1388 
1389 	while(true)
1390 	{
1391 		do {
1392 			i += 1;
1393 		} while( (A[i]->pos.x < p->pos.x) || ( (A[i]->pos.x == p->pos.x) && (A[i]->pos.y < p->pos.y) ) );
1394 
1395 		do {
1396 			j -= 1;
1397 		} while( (A[j]->pos.x > p->pos.x) || ( (A[j]->pos.x == p->pos.x) && (A[j]->pos.y > p->pos.y) ) );
1398 
1399 		if(i >= j)
1400 			break;
1401 
1402 		mod_t *tmp = A[i];
1403 		A[i] = A[j];
1404 		A[j] = tmp;
1405 	}
1406 
1407 	_sp_app_mod_qsort(A, j + 1);
1408 	_sp_app_mod_qsort(A + j + 1, n - j - 1);
1409 }
1410 
1411 /*
1412 __non_realtime static void
1413 _sp_app_order_dump(sp_app_t *app)
1414 {
1415 	for(unsigned m = 0; m < app->num_mods; m++)
1416 	{
1417 		mod_t *mod = app->mods[m];
1418 
1419 		printf("%u: %u\n", m, mod->uid);
1420 	}
1421 	printf("\n");
1422 }
1423 */
1424 
1425 __realtime void
_sp_app_order(sp_app_t * app)1426 _sp_app_order(sp_app_t *app)
1427 {
1428 	//_sp_app_order_dump(app);
1429 	_sp_app_mod_qsort(app->mods, app->num_mods);
1430 	//_sp_app_order_dump(app);
1431 
1432 	_dsp_master_reorder(app);
1433 }
1434 
1435 __non_realtime int
sp_app_log_error(sp_app_t * app,const char * fmt,...)1436 sp_app_log_error(sp_app_t *app, const char *fmt, ...)
1437 {
1438   va_list args;
1439 	int ret;
1440 
1441   va_start (args, fmt);
1442 	ret = app->driver->log->vprintf(app->driver->log->handle, app->regs.log.error.urid, fmt, args);
1443   va_end(args);
1444 
1445 	return ret;
1446 }
1447 
1448 __non_realtime int
sp_app_log_note(sp_app_t * app,const char * fmt,...)1449 sp_app_log_note(sp_app_t *app, const char *fmt, ...)
1450 {
1451   va_list args;
1452 	int ret;
1453 
1454   va_start (args, fmt);
1455 	ret = app->driver->log->vprintf(app->driver->log->handle, app->regs.log.note.urid, fmt, args);
1456   va_end(args);
1457 
1458 	return ret;
1459 }
1460 
1461 __non_realtime int
sp_app_log_warning(sp_app_t * app,const char * fmt,...)1462 sp_app_log_warning(sp_app_t *app, const char *fmt, ...)
1463 {
1464   va_list args;
1465 	int ret;
1466 
1467   va_start (args, fmt);
1468 	ret = app->driver->log->vprintf(app->driver->log->handle, app->regs.log.warning.urid, fmt, args);
1469   va_end(args);
1470 
1471 	return ret;
1472 }
1473 
1474 __realtime int
sp_app_log_trace(sp_app_t * app,const char * fmt,...)1475 sp_app_log_trace(sp_app_t *app, const char *fmt, ...)
1476 {
1477   va_list args;
1478 	int ret;
1479 
1480   va_start (args, fmt);
1481 	ret = app->driver->log->vprintf(app->driver->log->handle, app->regs.log.trace.urid, fmt, args);
1482   va_end(args);
1483 
1484 	return ret;
1485 }
1486 
1487 void
sp_app_set_bundle_path(sp_app_t * app,const char * bundle_path)1488 sp_app_set_bundle_path(sp_app_t *app, const char *bundle_path)
1489 {
1490 	if(app->bundle_path)
1491 		free(app->bundle_path);
1492 
1493 	app->bundle_path = strdup(bundle_path);
1494 }
1495