1 #ifdef HAVE_CONFIG_H
2 # include <config.h>
3 #endif
4
5 #include <stdlib.h>
6 #include <sys/time.h>
7 #include <assert.h>
8 #include <sys/types.h>
9 #include <unistd.h>
10
11 #include "Ecore.h"
12 #include "ecore_private.h"
13
14 # define LK(x) Eina_Lock x
15 # define LKI(x) eina_lock_new(&(x))
16 # define LKD(x) eina_lock_free(&(x))
17 # define LKL(x) eina_lock_take(&(x))
18 # define LKU(x) eina_lock_release(&(x))
19
20 # define SLK(x) Eina_Spinlock x
21 # define SLKI(x) eina_spinlock_new(&(x))
22 # define SLKD(x) eina_spinlock_free(&(x))
23 # define SLKL(x) eina_spinlock_take(&(x))
24 # define SLKU(x) eina_spinlock_release(&(x))
25
26 # define CD(x) Eina_Condition x
27 # define CDI(x, m) eina_condition_new(&(x), &(m))
28 # define CDD(x) eina_condition_free(&(x))
29 # define CDB(x) eina_condition_broadcast(&(x))
30 # define CDW(x, t) eina_condition_timedwait(&(x), t)
31
32 # define LRWK(x) Eina_RWLock x
33 # define LRWKI(x) eina_rwlock_new(&(x));
34 # define LRWKD(x) eina_rwlock_free(&(x));
35 # define LRWKWL(x) eina_rwlock_take_write(&(x));
36 # define LRWKRL(x) eina_rwlock_take_read(&(x));
37 # define LRWKU(x) eina_rwlock_release(&(x));
38
39 # define PH(x) Eina_Thread x
40 # define PHE(x, y) eina_thread_equal(x, y)
41 # define PHS() eina_thread_self()
42 # define PHC(x, f, d) eina_thread_create(&(x), EINA_THREAD_BACKGROUND, -1, (void *)f, d)
43 # define PHC2(x, f, d)eina_thread_create(&(x), EINA_THREAD_URGENT, -1, (void *)f, d)
44 # define PHJ(x) eina_thread_join(x)
45
46 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
47 typedef struct _Ecore_Pthread Ecore_Pthread;
48 typedef struct _Ecore_Thread_Data Ecore_Thread_Data;
49 typedef struct _Ecore_Thread_Waiter Ecore_Thread_Waiter;
50
51 struct _Ecore_Thread_Waiter
52 {
53 Ecore_Thread_Cb func_cancel;
54 Ecore_Thread_Cb func_end;
55 Eina_Bool waiting;
56 };
57
58 struct _Ecore_Thread_Data
59 {
60 void *data;
61 Eina_Free_Cb cb;
62 };
63
64 struct _Ecore_Pthread_Worker
65 {
66 union
67 {
68 struct
69 {
70 Ecore_Thread_Cb func_blocking;
71 } short_run;
72 struct
73 {
74 Ecore_Thread_Cb func_heavy;
75 Ecore_Thread_Notify_Cb func_notify;
76
77 Ecore_Pthread_Worker *direct_worker;
78
79 int send;
80 int received;
81 } feedback_run;
82 struct
83 {
84 Ecore_Thread_Cb func_main;
85 Ecore_Thread_Notify_Cb func_notify;
86
87 Ecore_Pipe *send;
88 Ecore_Pthread_Worker *direct_worker;
89
90 struct
91 {
92 int send;
93 int received;
94 } from, to;
95 } message_run;
96 } u;
97
98 Ecore_Thread_Waiter *waiter;
99 Ecore_Thread_Cb func_cancel;
100 Ecore_Thread_Cb func_end;
101 PH(self);
102 Eina_Hash *hash;
103 CD(cond);
104 LK(mutex);
105
106 const void *data;
107
108 int cancel;
109
110 SLK(cancel_mutex);
111
112 Eina_Bool message_run : 1;
113 Eina_Bool feedback_run : 1;
114 Eina_Bool kill : 1;
115 Eina_Bool reschedule : 1;
116 Eina_Bool no_queue : 1;
117 };
118
119 typedef struct _Ecore_Pthread_Notify Ecore_Pthread_Notify;
120 struct _Ecore_Pthread_Notify
121 {
122 Ecore_Pthread_Worker *work;
123 const void *user_data;
124 };
125
126 typedef void *(*Ecore_Thread_Sync_Cb)(void *data, Ecore_Thread *thread);
127
128 typedef struct _Ecore_Pthread_Message Ecore_Pthread_Message;
129 struct _Ecore_Pthread_Message
130 {
131 union
132 {
133 Ecore_Thread_Cb async;
134 Ecore_Thread_Sync_Cb sync;
135 } u;
136
137 const void *data;
138
139 int code;
140
141 Eina_Bool callback : 1;
142 Eina_Bool sync : 1;
143 };
144
145 static int _ecore_thread_count_max = 0;
146
147 static void _ecore_thread_handler(void *data);
148
149 static int _ecore_thread_count = 0;
150 static int _ecore_thread_count_no_queue = 0;
151
152 static Eina_List *_ecore_running_job = NULL;
153 static Eina_List *_ecore_pending_job_threads = NULL;
154 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
155 static SLK(_ecore_pending_job_threads_mutex);
156 static SLK(_ecore_running_job_mutex);
157
158 static Eina_Hash *_ecore_thread_global_hash = NULL;
159 static LRWK(_ecore_thread_global_hash_lock);
160 static LK(_ecore_thread_global_hash_mutex);
161 static CD(_ecore_thread_global_hash_cond);
162
163 static Eina_Bool have_main_loop_thread = 0;
164
165 static Eina_Trash *_ecore_thread_worker_trash = NULL;
166 static int _ecore_thread_worker_count = 0;
167
168 static void *_ecore_thread_worker(void *, Eina_Thread);
169 static Ecore_Pthread_Worker *_ecore_thread_worker_new(void);
170
PH(get_main_loop_thread)171 static PH(get_main_loop_thread) (void)
172 {
173 static PH(main_loop_thread);
174 static pid_t main_loop_pid;
175 pid_t pid = getpid();
176
177 if (pid != main_loop_pid)
178 {
179 main_loop_pid = pid;
180 main_loop_thread = PHS();
181 have_main_loop_thread = 1;
182 }
183
184 return main_loop_thread;
185 }
186
187 static void
_ecore_thread_worker_free(Ecore_Pthread_Worker * worker)188 _ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
189 {
190 SLKD(worker->cancel_mutex);
191 CDD(worker->cond);
192 LKD(worker->mutex);
193
194 if (_ecore_thread_worker_count > ((_ecore_thread_count_max + 1) * 16))
195 {
196 _ecore_thread_worker_count--;
197 free(worker);
198 return;
199 }
200
201 eina_trash_push(&_ecore_thread_worker_trash, worker);
202 }
203
204 static void
_ecore_thread_data_free(void * data)205 _ecore_thread_data_free(void *data)
206 {
207 Ecore_Thread_Data *d = data;
208
209 if (d->cb) d->cb(d->data);
210 free(d);
211 }
212
213 void
_ecore_thread_join(void * data)214 _ecore_thread_join(void *data)
215 {
216 PH(thread) = (uintptr_t)data;
217 DBG("joining thread=%" PRIu64, (uint64_t)thread);
218 PHJ(thread);
219 }
220
221 static void
_ecore_thread_kill(Ecore_Pthread_Worker * work)222 _ecore_thread_kill(Ecore_Pthread_Worker *work)
223 {
224 if (work->cancel)
225 {
226 if (work->func_cancel)
227 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
228 }
229 else
230 {
231 if (work->func_end)
232 work->func_end((void *)work->data, (Ecore_Thread *)work);
233 }
234
235 if (work->feedback_run)
236 {
237 if (work->u.feedback_run.direct_worker)
238 _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
239 }
240 if (work->hash)
241 eina_hash_free(work->hash);
242 _ecore_thread_worker_free(work);
243 }
244
245 static void
_ecore_thread_handler(void * data)246 _ecore_thread_handler(void *data)
247 {
248 Ecore_Pthread_Worker *work = data;
249
250 if (work->feedback_run)
251 {
252 if (work->u.feedback_run.send != work->u.feedback_run.received)
253 {
254 work->kill = EINA_TRUE;
255 return;
256 }
257 }
258
259 _ecore_thread_kill(work);
260 }
261
262 #if 0
263 static void
264 _ecore_nothing_handler(void *data EINA_UNUSED, void *buffer EINA_UNUSED, unsigned int nbyte EINA_UNUSED)
265 {
266 }
267
268 #endif
269
270 static void
_ecore_notify_handler(void * data)271 _ecore_notify_handler(void *data)
272 {
273 Ecore_Pthread_Notify *notify = data;
274 Ecore_Pthread_Worker *work = notify->work;
275 void *user_data = (void *)notify->user_data;
276
277 work->u.feedback_run.received++;
278
279 if (work->u.feedback_run.func_notify)
280 work->u.feedback_run.func_notify((void *)work->data, (Ecore_Thread *)work, user_data);
281
282 /* Force reading all notify event before killing the thread */
283 if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
284 {
285 _ecore_thread_kill(work);
286 }
287
288 free(notify);
289 }
290
291 static void
_ecore_message_notify_handler(void * data)292 _ecore_message_notify_handler(void *data)
293 {
294 Ecore_Pthread_Notify *notify = data;
295 Ecore_Pthread_Worker *work = notify->work;
296 Ecore_Pthread_Message *user_data = (void *)notify->user_data;
297 Eina_Bool delete = EINA_TRUE;
298
299 work->u.message_run.from.received++;
300
301 if (!user_data->callback)
302 {
303 if (work->u.message_run.func_notify)
304 work->u.message_run.func_notify((void *)work->data, (Ecore_Thread *)work, (void *)user_data->data);
305 }
306 else
307 {
308 if (user_data->sync)
309 {
310 user_data->data = user_data->u.sync((void *)user_data->data, (Ecore_Thread *)work);
311 user_data->callback = EINA_FALSE;
312 user_data->code = INT_MAX;
313 ecore_pipe_write(work->u.message_run.send, &user_data, sizeof (Ecore_Pthread_Message *));
314
315 delete = EINA_FALSE;
316 }
317 else
318 {
319 user_data->u.async((void *)user_data->data, (Ecore_Thread *)work);
320 }
321 }
322
323 if (delete)
324 {
325 free(user_data);
326 }
327
328 /* Force reading all notify event before killing the thread */
329 if (work->kill && work->u.message_run.from.send == work->u.message_run.from.received)
330 {
331 _ecore_thread_kill(work);
332 }
333 free(notify);
334 }
335
336 static void
_ecore_short_job_cleanup(void * data)337 _ecore_short_job_cleanup(void *data)
338 {
339 Ecore_Pthread_Worker *work = data;
340
341 DBG("cleanup work=%p, thread=%" PRIu64, work, (uint64_t)work->self);
342
343 SLKL(_ecore_running_job_mutex);
344 _ecore_running_job = eina_list_remove(_ecore_running_job, work);
345 SLKU(_ecore_running_job_mutex);
346
347 if (work->reschedule)
348 {
349 work->reschedule = EINA_FALSE;
350
351 SLKL(_ecore_pending_job_threads_mutex);
352 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
353 SLKU(_ecore_pending_job_threads_mutex);
354 }
355 else
356 {
357 ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
358 }
359 }
360
361 static void
_ecore_short_job(PH (thread))362 _ecore_short_job(PH(thread))
363 {
364 Ecore_Pthread_Worker *work;
365 int cancel;
366
367 SLKL(_ecore_pending_job_threads_mutex);
368
369 if (!_ecore_pending_job_threads)
370 {
371 SLKU(_ecore_pending_job_threads_mutex);
372 return;
373 }
374
375 work = eina_list_data_get(_ecore_pending_job_threads);
376 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
377 _ecore_pending_job_threads);
378 SLKU(_ecore_pending_job_threads_mutex);
379
380 SLKL(_ecore_running_job_mutex);
381 _ecore_running_job = eina_list_append(_ecore_running_job, work);
382 SLKU(_ecore_running_job_mutex);
383
384 SLKL(work->cancel_mutex);
385 cancel = work->cancel;
386 SLKU(work->cancel_mutex);
387 work->self = thread;
388
389 EINA_THREAD_CLEANUP_PUSH(_ecore_short_job_cleanup, work);
390 if (!cancel)
391 work->u.short_run.func_blocking((void *)work->data, (Ecore_Thread *)work);
392 eina_thread_cancellable_set(EINA_FALSE, NULL);
393 EINA_THREAD_CLEANUP_POP(EINA_TRUE);
394 }
395
396 static void
_ecore_feedback_job_cleanup(void * data)397 _ecore_feedback_job_cleanup(void *data)
398 {
399 Ecore_Pthread_Worker *work = data;
400
401 DBG("cleanup work=%p, thread=%" PRIu64, work, (uint64_t)work->self);
402
403 SLKL(_ecore_running_job_mutex);
404 _ecore_running_job = eina_list_remove(_ecore_running_job, work);
405 SLKU(_ecore_running_job_mutex);
406
407 if (work->reschedule)
408 {
409 work->reschedule = EINA_FALSE;
410
411 SLKL(_ecore_pending_job_threads_mutex);
412 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
413 SLKU(_ecore_pending_job_threads_mutex);
414 }
415 else
416 {
417 ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
418 }
419 }
420
421 static void
_ecore_feedback_job(PH (thread))422 _ecore_feedback_job(PH(thread))
423 {
424 Ecore_Pthread_Worker *work;
425 int cancel;
426
427 SLKL(_ecore_pending_job_threads_mutex);
428
429 if (!_ecore_pending_job_threads_feedback)
430 {
431 SLKU(_ecore_pending_job_threads_mutex);
432 return;
433 }
434
435 work = eina_list_data_get(_ecore_pending_job_threads_feedback);
436 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
437 _ecore_pending_job_threads_feedback);
438 SLKU(_ecore_pending_job_threads_mutex);
439 SLKL(_ecore_running_job_mutex);
440 _ecore_running_job = eina_list_append(_ecore_running_job, work);
441 SLKU(_ecore_running_job_mutex);
442
443 SLKL(work->cancel_mutex);
444 cancel = work->cancel;
445 SLKU(work->cancel_mutex);
446 work->self = thread;
447
448 EINA_THREAD_CLEANUP_PUSH(_ecore_feedback_job_cleanup, work);
449 if (!cancel)
450 work->u.feedback_run.func_heavy((void *)work->data, (Ecore_Thread *)work);
451 eina_thread_cancellable_set(EINA_FALSE, NULL);
452 EINA_THREAD_CLEANUP_POP(EINA_TRUE);
453 }
454
455 static void
_ecore_direct_worker_cleanup(void * data)456 _ecore_direct_worker_cleanup(void *data)
457 {
458 Ecore_Pthread_Worker *work = data;
459
460 DBG("cleanup work=%p, thread=%" PRIu64 " (should join)", work, (uint64_t)work->self);
461
462 SLKL(_ecore_pending_job_threads_mutex);
463 _ecore_thread_count_no_queue--;
464 ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
465
466 ecore_main_loop_thread_safe_call_async((Ecore_Cb)_ecore_thread_join,
467 (void *)(intptr_t)PHS());
468 SLKU(_ecore_pending_job_threads_mutex);
469 }
470
471 static void *
_ecore_direct_worker(void * data,Eina_Thread t EINA_UNUSED)472 _ecore_direct_worker(void *data, Eina_Thread t EINA_UNUSED)
473 {
474 Ecore_Pthread_Worker *work = data;
475 eina_thread_cancellable_set(EINA_FALSE, NULL);
476 eina_thread_name_set(eina_thread_self(), "Ethread-feedback");
477 work->self = PHS();
478
479 EINA_THREAD_CLEANUP_PUSH(_ecore_direct_worker_cleanup, work);
480 if (work->message_run)
481 work->u.message_run.func_main((void *)work->data, (Ecore_Thread *)work);
482 else
483 work->u.feedback_run.func_heavy((void *)work->data, (Ecore_Thread *)work);
484 eina_thread_cancellable_set(EINA_FALSE, NULL);
485 EINA_THREAD_CLEANUP_POP(EINA_TRUE);
486
487 return NULL;
488 }
489
490 static void
_ecore_thread_worker_cleanup(void * data EINA_UNUSED)491 _ecore_thread_worker_cleanup(void *data EINA_UNUSED)
492 {
493 DBG("cleanup thread=%" PRIuPTR " (should join)", PHS());
494 SLKL(_ecore_pending_job_threads_mutex);
495 _ecore_thread_count--;
496 ecore_main_loop_thread_safe_call_async((Ecore_Cb)_ecore_thread_join,
497 (void *)(intptr_t)PHS());
498 SLKU(_ecore_pending_job_threads_mutex);
499 }
500
501 static void *
_ecore_thread_worker(void * data EINA_UNUSED,Eina_Thread t EINA_UNUSED)502 _ecore_thread_worker(void *data EINA_UNUSED, Eina_Thread t EINA_UNUSED)
503 {
504 eina_thread_cancellable_set(EINA_FALSE, NULL);
505 EINA_THREAD_CLEANUP_PUSH(_ecore_thread_worker_cleanup, NULL);
506 restart:
507
508 /* these 2 are cancellation points as user cb may enable */
509 _ecore_short_job(PHS());
510 _ecore_feedback_job(PHS());
511
512 /* from here on, cancellations are guaranteed to be disabled */
513
514 /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
515 eina_thread_name_set(eina_thread_self(), "Ethread-worker");
516
517 SLKL(_ecore_pending_job_threads_mutex);
518 if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
519 {
520 SLKU(_ecore_pending_job_threads_mutex);
521 goto restart;
522 }
523 SLKU(_ecore_pending_job_threads_mutex);
524
525 /* Sleep a little to prevent premature death */
526 #ifdef _WIN32
527 Sleep(1); /* around 50ms */
528 #else
529 usleep(50);
530 #endif
531
532 SLKL(_ecore_pending_job_threads_mutex);
533 if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
534 {
535 SLKU(_ecore_pending_job_threads_mutex);
536 goto restart;
537 }
538 SLKU(_ecore_pending_job_threads_mutex);
539
540 EINA_THREAD_CLEANUP_POP(EINA_TRUE);
541
542 return NULL;
543 }
544
545 static Ecore_Pthread_Worker *
_ecore_thread_worker_new(void)546 _ecore_thread_worker_new(void)
547 {
548 Ecore_Pthread_Worker *result;
549
550 result = eina_trash_pop(&_ecore_thread_worker_trash);
551
552 if (!result)
553 {
554 result = calloc(1, sizeof(Ecore_Pthread_Worker));
555 _ecore_thread_worker_count++;
556 }
557 else
558 {
559 memset(result, 0, sizeof(Ecore_Pthread_Worker));
560 }
561
562 SLKI(result->cancel_mutex);
563 LKI(result->mutex);
564 CDI(result->cond, result->mutex);
565
566 return result;
567 }
568
569 void
_ecore_thread_init(void)570 _ecore_thread_init(void)
571 {
572 _ecore_thread_count_max = eina_cpu_count() * 4;
573 if (_ecore_thread_count_max <= 0)
574 _ecore_thread_count_max = 1;
575
576 SLKI(_ecore_pending_job_threads_mutex);
577 LRWKI(_ecore_thread_global_hash_lock);
578 LKI(_ecore_thread_global_hash_mutex);
579 SLKI(_ecore_running_job_mutex);
580 CDI(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex);
581 }
582
583 void
_ecore_thread_shutdown(void)584 _ecore_thread_shutdown(void)
585 {
586 /* FIXME: If function are still running in the background, should we kill them ? */
587 Ecore_Pthread_Worker *work;
588 Eina_List *l;
589 Eina_Bool test;
590 int iteration = 0;
591
592 SLKL(_ecore_pending_job_threads_mutex);
593
594 EINA_LIST_FREE(_ecore_pending_job_threads, work)
595 {
596 if (work->func_cancel)
597 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
598 free(work);
599 }
600
601 EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
602 {
603 if (work->func_cancel)
604 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
605 free(work);
606 }
607
608 SLKU(_ecore_pending_job_threads_mutex);
609 SLKL(_ecore_running_job_mutex);
610
611 EINA_LIST_FOREACH(_ecore_running_job, l, work)
612 ecore_thread_cancel((Ecore_Thread *)work);
613
614 SLKU(_ecore_running_job_mutex);
615
616 do
617 {
618 SLKL(_ecore_pending_job_threads_mutex);
619 if (_ecore_thread_count + _ecore_thread_count_no_queue > 0)
620 {
621 test = EINA_TRUE;
622 }
623 else
624 {
625 test = EINA_FALSE;
626 }
627 SLKU(_ecore_pending_job_threads_mutex);
628 iteration++;
629 if (test)
630 {
631 _ecore_main_call_flush();
632 usleep(1000);
633 }
634 } while (test == EINA_TRUE && iteration < 50);
635
636 if (iteration == 20 && _ecore_thread_count > 0)
637 {
638 ERR("%i of the child thread are still running after 1s. This can lead to a segv. Sorry.", _ecore_thread_count);
639 }
640
641 if (_ecore_thread_global_hash)
642 eina_hash_free(_ecore_thread_global_hash);
643 have_main_loop_thread = 0;
644
645 while ((work = eina_trash_pop(&_ecore_thread_worker_trash)))
646 {
647 free(work);
648 }
649
650 SLKD(_ecore_pending_job_threads_mutex);
651 LRWKD(_ecore_thread_global_hash_lock);
652 LKD(_ecore_thread_global_hash_mutex);
653 SLKD(_ecore_running_job_mutex);
654 CDD(_ecore_thread_global_hash_cond);
655 }
656
657 EAPI Ecore_Thread *
ecore_thread_run(Ecore_Thread_Cb func_blocking,Ecore_Thread_Cb func_end,Ecore_Thread_Cb func_cancel,const void * data)658 ecore_thread_run(Ecore_Thread_Cb func_blocking,
659 Ecore_Thread_Cb func_end,
660 Ecore_Thread_Cb func_cancel,
661 const void *data)
662 {
663 Ecore_Pthread_Worker *work;
664 Eina_Bool tried = EINA_FALSE;
665 PH(thread);
666
667 EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
668
669 if (!func_blocking) return NULL;
670
671 work = _ecore_thread_worker_new();
672 if (!work)
673 {
674 if (func_cancel)
675 func_cancel((void *)data, NULL);
676 return NULL;
677 }
678
679 work->u.short_run.func_blocking = func_blocking;
680 work->func_end = func_end;
681 work->func_cancel = func_cancel;
682 work->cancel = EINA_FALSE;
683 work->feedback_run = EINA_FALSE;
684 work->message_run = EINA_FALSE;
685 work->kill = EINA_FALSE;
686 work->reschedule = EINA_FALSE;
687 work->no_queue = EINA_FALSE;
688 work->data = data;
689
690 work->self = 0;
691 work->hash = NULL;
692
693 SLKL(_ecore_pending_job_threads_mutex);
694 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
695
696 if (_ecore_thread_count == _ecore_thread_count_max)
697 {
698 SLKU(_ecore_pending_job_threads_mutex);
699 return (Ecore_Thread *)work;
700 }
701
702 SLKU(_ecore_pending_job_threads_mutex);
703
704 /* One more thread could be created. */
705 eina_threads_init();
706
707 SLKL(_ecore_pending_job_threads_mutex);
708
709 retry:
710 if (PHC(thread, _ecore_thread_worker, NULL))
711 {
712 _ecore_thread_count++;
713 SLKU(_ecore_pending_job_threads_mutex);
714 return (Ecore_Thread *)work;
715 }
716 if (!tried)
717 {
718 _ecore_main_call_flush();
719 tried = EINA_TRUE;
720 goto retry;
721 }
722
723 if (_ecore_thread_count == 0)
724 {
725 _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
726
727 if (work->func_cancel)
728 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
729
730 _ecore_thread_worker_free(work);
731 work = NULL;
732 }
733 SLKU(_ecore_pending_job_threads_mutex);
734
735 eina_threads_shutdown();
736
737 return (Ecore_Thread *)work;
738 }
739
740 EAPI Eina_Bool
ecore_thread_cancel(Ecore_Thread * thread)741 ecore_thread_cancel(Ecore_Thread *thread)
742 {
743 Ecore_Pthread_Worker *volatile work = (Ecore_Pthread_Worker *)thread;
744 Eina_List *l;
745 int cancel;
746
747 if (!work)
748 return EINA_TRUE;
749 SLKL(work->cancel_mutex);
750 cancel = work->cancel;
751 SLKU(work->cancel_mutex);
752 if (cancel)
753 return EINA_FALSE;
754
755 if (work->feedback_run)
756 {
757 if (work->kill)
758 return EINA_TRUE;
759 if (work->u.feedback_run.send != work->u.feedback_run.received)
760 goto on_exit;
761 }
762
763 SLKL(_ecore_pending_job_threads_mutex);
764
765 if ((have_main_loop_thread) &&
766 (PHE(get_main_loop_thread(), PHS())))
767 {
768 if (!work->feedback_run)
769 EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
770 {
771 if ((void *)work == (void *)thread)
772 {
773 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
774
775 SLKU(_ecore_pending_job_threads_mutex);
776
777 if (work->func_cancel)
778 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
779 free(work);
780
781 return EINA_TRUE;
782 }
783 }
784 else
785 EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
786 {
787 if ((void *)work == (void *)thread)
788 {
789 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
790
791 SLKU(_ecore_pending_job_threads_mutex);
792
793 if (work->func_cancel)
794 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
795 free(work);
796
797 return EINA_TRUE;
798 }
799 }
800 }
801
802 SLKU(_ecore_pending_job_threads_mutex);
803
804 work = (Ecore_Pthread_Worker *)thread;
805
806 /* Delay the destruction */
807 on_exit:
808 eina_thread_cancel(work->self); /* noop unless eina_thread_cancellable_set() was used by user */
809 SLKL(work->cancel_mutex);
810 work->cancel = EINA_TRUE;
811 SLKU(work->cancel_mutex);
812
813 return EINA_FALSE;
814 }
815
816 static void
_ecore_thread_wait_reset(Ecore_Thread_Waiter * waiter,Ecore_Pthread_Worker * worker)817 _ecore_thread_wait_reset(Ecore_Thread_Waiter *waiter,
818 Ecore_Pthread_Worker *worker)
819 {
820 worker->func_cancel = waiter->func_cancel;
821 worker->func_end = waiter->func_end;
822 worker->waiter = NULL;
823
824 waiter->func_end = NULL;
825 waiter->func_cancel = NULL;
826 waiter->waiting = EINA_FALSE;
827 }
828
829 static void
_ecore_thread_wait_cancel(void * data EINA_UNUSED,Ecore_Thread * thread)830 _ecore_thread_wait_cancel(void *data EINA_UNUSED, Ecore_Thread *thread)
831 {
832 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
833 Ecore_Thread_Waiter *waiter = worker->waiter;
834
835 if (waiter->func_cancel) waiter->func_cancel(data, thread);
836 _ecore_thread_wait_reset(waiter, worker);
837 }
838
839 static void
_ecore_thread_wait_end(void * data EINA_UNUSED,Ecore_Thread * thread)840 _ecore_thread_wait_end(void *data EINA_UNUSED, Ecore_Thread *thread)
841 {
842 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
843 Ecore_Thread_Waiter *waiter = worker->waiter;
844
845 if (waiter->func_end) waiter->func_end(data, thread);
846 _ecore_thread_wait_reset(waiter, worker);
847 }
848
849 EAPI Eina_Bool
ecore_thread_wait(Ecore_Thread * thread,double wait)850 ecore_thread_wait(Ecore_Thread *thread, double wait)
851 {
852 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
853 Ecore_Thread_Waiter waiter;
854
855 if (!thread) return EINA_TRUE;
856
857 waiter.func_end = worker->func_end;
858 waiter.func_cancel = worker->func_cancel;
859 waiter.waiting = EINA_TRUE;
860
861 // Now trick the thread to call the wrapper function
862 worker->waiter = &waiter;
863 worker->func_cancel = _ecore_thread_wait_cancel;
864 worker->func_end = _ecore_thread_wait_end;
865
866 while (waiter.waiting == EINA_TRUE)
867 {
868 double start, end;
869
870 start = ecore_time_get();
871 _ecore_main_call_flush();
872 ecore_main_loop_thread_safe_call_wait(0.0001);
873 end = ecore_time_get();
874
875 wait -= end - start;
876
877 if (wait <= 0) break;
878 }
879
880 if (waiter.waiting == EINA_FALSE)
881 {
882 return EINA_TRUE;
883 }
884 else
885 {
886 _ecore_thread_wait_reset(&waiter, worker);
887 return EINA_FALSE;
888 }
889 }
890
891 EAPI Eina_Bool
ecore_thread_check(Ecore_Thread * thread)892 ecore_thread_check(Ecore_Thread *thread)
893 {
894 Ecore_Pthread_Worker *volatile worker = (Ecore_Pthread_Worker *)thread;
895 int cancel;
896
897 if (!worker) return EINA_TRUE;
898 SLKL(worker->cancel_mutex);
899
900 cancel = worker->cancel;
901 /* FIXME: there is an insane bug driving me nuts here. I don't know if
902 it's a race condition, some cache issue or some alien attack on our software.
903 But ecore_thread_check will only work correctly with a printf, all the volatile,
904 lock and even usleep don't help here... */
905 /* fprintf(stderr, "wc: %i\n", cancel); */
906 SLKU(worker->cancel_mutex);
907 return cancel;
908 }
909
910 EAPI Ecore_Thread *
ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,Ecore_Thread_Notify_Cb func_notify,Ecore_Thread_Cb func_end,Ecore_Thread_Cb func_cancel,const void * data,Eina_Bool try_no_queue)911 ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,
912 Ecore_Thread_Notify_Cb func_notify,
913 Ecore_Thread_Cb func_end,
914 Ecore_Thread_Cb func_cancel,
915 const void *data,
916 Eina_Bool try_no_queue)
917 {
918 Ecore_Pthread_Worker *worker;
919 Eina_Bool tried = EINA_FALSE;
920 PH(thread);
921
922 EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
923
924 if (!func_heavy) return NULL;
925
926 worker = _ecore_thread_worker_new();
927 if (!worker) goto on_error;
928
929 worker->u.feedback_run.func_heavy = func_heavy;
930 worker->u.feedback_run.func_notify = func_notify;
931 worker->hash = NULL;
932 worker->func_cancel = func_cancel;
933 worker->func_end = func_end;
934 worker->data = data;
935 worker->cancel = EINA_FALSE;
936 worker->message_run = EINA_FALSE;
937 worker->feedback_run = EINA_TRUE;
938 worker->kill = EINA_FALSE;
939 worker->reschedule = EINA_FALSE;
940 worker->self = 0;
941
942 worker->u.feedback_run.send = 0;
943 worker->u.feedback_run.received = 0;
944
945 worker->u.feedback_run.direct_worker = NULL;
946
947 if (try_no_queue)
948 {
949 PH(t);
950
951 worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
952 worker->no_queue = EINA_TRUE;
953
954 eina_threads_init();
955
956 retry_direct:
957 if (PHC2(t, _ecore_direct_worker, worker))
958 {
959 SLKL(_ecore_pending_job_threads_mutex);
960 _ecore_thread_count_no_queue++;
961 SLKU(_ecore_pending_job_threads_mutex);
962 return (Ecore_Thread *)worker;
963 }
964 if (!tried)
965 {
966 _ecore_main_call_flush();
967 tried = EINA_TRUE;
968 goto retry_direct;
969 }
970
971 if (worker->u.feedback_run.direct_worker)
972 {
973 _ecore_thread_worker_free(worker->u.feedback_run.direct_worker);
974 worker->u.feedback_run.direct_worker = NULL;
975 }
976
977 eina_threads_shutdown();
978 }
979
980 worker->no_queue = EINA_FALSE;
981
982 SLKL(_ecore_pending_job_threads_mutex);
983 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
984
985 if (_ecore_thread_count == _ecore_thread_count_max)
986 {
987 SLKU(_ecore_pending_job_threads_mutex);
988 return (Ecore_Thread *)worker;
989 }
990
991 SLKU(_ecore_pending_job_threads_mutex);
992
993 /* One more thread could be created. */
994 eina_threads_init();
995
996 SLKL(_ecore_pending_job_threads_mutex);
997 retry:
998 if (PHC(thread, _ecore_thread_worker, NULL))
999 {
1000 _ecore_thread_count++;
1001 SLKU(_ecore_pending_job_threads_mutex);
1002 return (Ecore_Thread *)worker;
1003 }
1004 if (!tried)
1005 {
1006 _ecore_main_call_flush();
1007 tried = EINA_TRUE;
1008 goto retry;
1009 }
1010 SLKU(_ecore_pending_job_threads_mutex);
1011
1012 eina_threads_shutdown();
1013
1014 on_error:
1015 SLKL(_ecore_pending_job_threads_mutex);
1016 if (_ecore_thread_count == 0)
1017 {
1018 _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
1019 worker);
1020
1021 if (func_cancel) func_cancel((void *)data, NULL);
1022
1023 if (worker)
1024 {
1025 CDD(worker->cond);
1026 LKD(worker->mutex);
1027 free(worker);
1028 worker = NULL;
1029 }
1030 }
1031 SLKU(_ecore_pending_job_threads_mutex);
1032
1033 return (Ecore_Thread *)worker;
1034 }
1035
1036 EAPI Eina_Bool
ecore_thread_feedback(Ecore_Thread * thread,const void * data)1037 ecore_thread_feedback(Ecore_Thread *thread,
1038 const void *data)
1039 {
1040 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1041
1042 if (!worker) return EINA_FALSE;
1043
1044 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1045
1046 if (worker->feedback_run)
1047 {
1048 Ecore_Pthread_Notify *notify;
1049
1050 notify = malloc(sizeof (Ecore_Pthread_Notify));
1051 if (!notify) return EINA_FALSE;
1052
1053 notify->user_data = data;
1054 notify->work = worker;
1055 worker->u.feedback_run.send++;
1056
1057 ecore_main_loop_thread_safe_call_async(_ecore_notify_handler, notify);
1058 }
1059 else if (worker->message_run)
1060 {
1061 Ecore_Pthread_Message *msg;
1062 Ecore_Pthread_Notify *notify;
1063
1064 msg = malloc(sizeof (Ecore_Pthread_Message));
1065 if (!msg) return EINA_FALSE;
1066 msg->data = data;
1067 msg->callback = EINA_FALSE;
1068 msg->sync = EINA_FALSE;
1069
1070 notify = malloc(sizeof (Ecore_Pthread_Notify));
1071 if (!notify)
1072 {
1073 free(msg);
1074 return EINA_FALSE;
1075 }
1076 notify->work = worker;
1077 notify->user_data = msg;
1078
1079 worker->u.message_run.from.send++;
1080 ecore_main_loop_thread_safe_call_async(_ecore_message_notify_handler, notify);
1081 }
1082 else
1083 return EINA_FALSE;
1084
1085 return EINA_TRUE;
1086 }
1087
1088 #if 0
1089 EAPI Ecore_Thread *
1090 ecore_thread_message_run(Ecore_Thread_Cb func_main,
1091 Ecore_Thread_Notify_Cb func_notify,
1092 Ecore_Thread_Cb func_end,
1093 Ecore_Thread_Cb func_cancel,
1094 const void *data)
1095 {
1096 Ecore_Pthread_Worker *worker;
1097 PH(t);
1098
1099 if (!func_main) return NULL;
1100
1101 worker = _ecore_thread_worker_new();
1102 if (!worker) return NULL;
1103
1104 worker->u.message_run.func_main = func_main;
1105 worker->u.message_run.func_notify = func_notify;
1106 worker->u.message_run.direct_worker = _ecore_thread_worker_new();
1107 worker->u.message_run.send = ecore_pipe_add(_ecore_nothing_handler, worker);
1108 worker->u.message_run.from.send = 0;
1109 worker->u.message_run.from.received = 0;
1110 worker->u.message_run.to.send = 0;
1111 worker->u.message_run.to.received = 0;
1112
1113 ecore_pipe_freeze(worker->u.message_run.send);
1114
1115 worker->func_cancel = func_cancel;
1116 worker->func_end = func_end;
1117 worker->hash = NULL;
1118 worker->data = data;
1119
1120 worker->cancel = EINA_FALSE;
1121 worker->message_run = EINA_TRUE;
1122 worker->feedback_run = EINA_FALSE;
1123 worker->kill = EINA_FALSE;
1124 worker->reschedule = EINA_FALSE;
1125 worker->no_queue = EINA_FALSE;
1126 worker->self = 0;
1127
1128 eina_threads_init();
1129
1130 if (PHC(t, _ecore_direct_worker, worker))
1131 return (Ecore_Thread *)worker;
1132
1133 eina_threads_shutdown();
1134
1135 if (worker->u.message_run.direct_worker) _ecore_thread_worker_free(worker->u.message_run.direct_worker);
1136 if (worker->u.message_run.send) ecore_pipe_del(worker->u.message_run.send);
1137
1138 CDD(worker->cond);
1139 LKD(worker->mutex);
1140
1141 func_cancel((void *)data, NULL);
1142
1143 return NULL;
1144 }
1145
1146 #endif
1147
1148 EAPI Eina_Bool
ecore_thread_reschedule(Ecore_Thread * thread)1149 ecore_thread_reschedule(Ecore_Thread *thread)
1150 {
1151 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1152
1153 if (!worker) return EINA_FALSE;
1154
1155 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1156
1157 worker->reschedule = EINA_TRUE;
1158 return EINA_TRUE;
1159 }
1160
1161 EAPI int
ecore_thread_active_get(void)1162 ecore_thread_active_get(void)
1163 {
1164 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1165 return _ecore_thread_count;
1166 }
1167
1168 EAPI int
ecore_thread_pending_get(void)1169 ecore_thread_pending_get(void)
1170 {
1171 int ret;
1172
1173 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1174 SLKL(_ecore_pending_job_threads_mutex);
1175 ret = eina_list_count(_ecore_pending_job_threads);
1176 SLKU(_ecore_pending_job_threads_mutex);
1177 return ret;
1178 }
1179
1180 EAPI int
ecore_thread_pending_feedback_get(void)1181 ecore_thread_pending_feedback_get(void)
1182 {
1183 int ret;
1184
1185 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1186 SLKL(_ecore_pending_job_threads_mutex);
1187 ret = eina_list_count(_ecore_pending_job_threads_feedback);
1188 SLKU(_ecore_pending_job_threads_mutex);
1189 return ret;
1190 }
1191
1192 EAPI int
ecore_thread_pending_total_get(void)1193 ecore_thread_pending_total_get(void)
1194 {
1195 int ret;
1196
1197 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1198 SLKL(_ecore_pending_job_threads_mutex);
1199 ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1200 SLKU(_ecore_pending_job_threads_mutex);
1201 return ret;
1202 }
1203
1204 EAPI int
ecore_thread_max_get(void)1205 ecore_thread_max_get(void)
1206 {
1207 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1208 return _ecore_thread_count_max;
1209 }
1210
1211 EAPI void
ecore_thread_max_set(int num)1212 ecore_thread_max_set(int num)
1213 {
1214 EINA_MAIN_LOOP_CHECK_RETURN;
1215 if (num < 1) return;
1216 /* avoid doing something hilarious by blocking dumb users */
1217 if (num > (32 * eina_cpu_count())) num = 32 * eina_cpu_count();
1218
1219 _ecore_thread_count_max = num;
1220 }
1221
1222 EAPI void
ecore_thread_max_reset(void)1223 ecore_thread_max_reset(void)
1224 {
1225 EINA_MAIN_LOOP_CHECK_RETURN;
1226 _ecore_thread_count_max = eina_cpu_count() * 4;
1227 }
1228
1229 EAPI int
ecore_thread_available_get(void)1230 ecore_thread_available_get(void)
1231 {
1232 int ret;
1233
1234 SLKL(_ecore_pending_job_threads_mutex);
1235 ret = _ecore_thread_count_max - _ecore_thread_count;
1236 SLKU(_ecore_pending_job_threads_mutex);
1237 return ret;
1238 }
1239
1240 EAPI Eina_Bool
ecore_thread_local_data_add(Ecore_Thread * thread,const char * key,void * value,Eina_Free_Cb cb,Eina_Bool direct)1241 ecore_thread_local_data_add(Ecore_Thread *thread,
1242 const char *key,
1243 void *value,
1244 Eina_Free_Cb cb,
1245 Eina_Bool direct)
1246 {
1247 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1248 Ecore_Thread_Data *d;
1249 Eina_Bool ret;
1250
1251 if ((!thread) || (!key) || (!value))
1252 return EINA_FALSE;
1253
1254 LKL(worker->mutex);
1255 if (!worker->hash)
1256 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1257 LKU(worker->mutex);
1258
1259 if (!worker->hash)
1260 return EINA_FALSE;
1261
1262 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1263 return EINA_FALSE;
1264
1265 d->data = value;
1266 d->cb = cb;
1267
1268 LKL(worker->mutex);
1269 if (direct)
1270 ret = eina_hash_direct_add(worker->hash, key, d);
1271 else
1272 ret = eina_hash_add(worker->hash, key, d);
1273 LKU(worker->mutex);
1274 CDB(worker->cond);
1275 return ret;
1276 }
1277
1278 EAPI void *
ecore_thread_local_data_set(Ecore_Thread * thread,const char * key,void * value,Eina_Free_Cb cb)1279 ecore_thread_local_data_set(Ecore_Thread *thread,
1280 const char *key,
1281 void *value,
1282 Eina_Free_Cb cb)
1283 {
1284 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1285 Ecore_Thread_Data *d, *r;
1286 void *ret;
1287
1288 if ((!thread) || (!key) || (!value))
1289 return NULL;
1290
1291 LKL(worker->mutex);
1292 if (!worker->hash)
1293 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1294 LKU(worker->mutex);
1295
1296 if (!worker->hash)
1297 return NULL;
1298
1299 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1300 return NULL;
1301
1302 d->data = value;
1303 d->cb = cb;
1304
1305 LKL(worker->mutex);
1306 r = eina_hash_set(worker->hash, key, d);
1307 LKU(worker->mutex);
1308 CDB(worker->cond);
1309
1310 if (r)
1311 {
1312 ret = r->data;
1313 free(r);
1314 return ret;
1315 }
1316 return NULL;
1317 }
1318
1319 EAPI void *
ecore_thread_local_data_find(Ecore_Thread * thread,const char * key)1320 ecore_thread_local_data_find(Ecore_Thread *thread,
1321 const char *key)
1322 {
1323 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1324 Ecore_Thread_Data *d;
1325
1326 if ((!thread) || (!key))
1327 return NULL;
1328
1329 if (!worker->hash)
1330 return NULL;
1331
1332 LKL(worker->mutex);
1333 d = eina_hash_find(worker->hash, key);
1334 LKU(worker->mutex);
1335 if (d)
1336 return d->data;
1337 return NULL;
1338 }
1339
1340 EAPI Eina_Bool
ecore_thread_local_data_del(Ecore_Thread * thread,const char * key)1341 ecore_thread_local_data_del(Ecore_Thread *thread,
1342 const char *key)
1343 {
1344 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1345 Eina_Bool r;
1346
1347 if ((!thread) || (!key))
1348 return EINA_FALSE;
1349
1350 if (!worker->hash)
1351 return EINA_FALSE;
1352
1353 LKL(worker->mutex);
1354 r = eina_hash_del_by_key(worker->hash, key);
1355 LKU(worker->mutex);
1356 return r;
1357 }
1358
1359 EAPI Eina_Bool
ecore_thread_global_data_add(const char * key,void * value,Eina_Free_Cb cb,Eina_Bool direct)1360 ecore_thread_global_data_add(const char *key,
1361 void *value,
1362 Eina_Free_Cb cb,
1363 Eina_Bool direct)
1364 {
1365 Ecore_Thread_Data *d;
1366 Eina_Bool ret;
1367
1368 if ((!key) || (!value))
1369 return EINA_FALSE;
1370
1371 LRWKWL(_ecore_thread_global_hash_lock);
1372 if (!_ecore_thread_global_hash)
1373 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1374 LRWKU(_ecore_thread_global_hash_lock);
1375
1376 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1377 return EINA_FALSE;
1378
1379 d->data = value;
1380 d->cb = cb;
1381
1382 if (!_ecore_thread_global_hash)
1383 {
1384 free(d);
1385 return EINA_FALSE;
1386 }
1387
1388 LRWKWL(_ecore_thread_global_hash_lock);
1389 if (direct)
1390 ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1391 else
1392 ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1393 LRWKU(_ecore_thread_global_hash_lock);
1394 CDB(_ecore_thread_global_hash_cond);
1395 return ret;
1396 }
1397
1398 EAPI void *
ecore_thread_global_data_set(const char * key,void * value,Eina_Free_Cb cb)1399 ecore_thread_global_data_set(const char *key,
1400 void *value,
1401 Eina_Free_Cb cb)
1402 {
1403 Ecore_Thread_Data *d, *r;
1404 void *ret;
1405
1406 if ((!key) || (!value))
1407 return NULL;
1408
1409 LRWKWL(_ecore_thread_global_hash_lock);
1410 if (!_ecore_thread_global_hash)
1411 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1412 LRWKU(_ecore_thread_global_hash_lock);
1413
1414 if (!_ecore_thread_global_hash)
1415 return NULL;
1416
1417 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1418 return NULL;
1419
1420 d->data = value;
1421 d->cb = cb;
1422
1423 LRWKWL(_ecore_thread_global_hash_lock);
1424 r = eina_hash_set(_ecore_thread_global_hash, key, d);
1425 LRWKU(_ecore_thread_global_hash_lock);
1426 CDB(_ecore_thread_global_hash_cond);
1427
1428 if (r)
1429 {
1430 ret = r->data;
1431 free(r);
1432 return ret;
1433 }
1434 return NULL;
1435 }
1436
1437 EAPI void *
ecore_thread_global_data_find(const char * key)1438 ecore_thread_global_data_find(const char *key)
1439 {
1440 Ecore_Thread_Data *ret;
1441
1442 if (!key)
1443 return NULL;
1444
1445 if (!_ecore_thread_global_hash) return NULL;
1446
1447 LRWKRL(_ecore_thread_global_hash_lock);
1448 ret = eina_hash_find(_ecore_thread_global_hash, key);
1449 LRWKU(_ecore_thread_global_hash_lock);
1450 if (ret)
1451 return ret->data;
1452 return NULL;
1453 }
1454
1455 EAPI Eina_Bool
ecore_thread_global_data_del(const char * key)1456 ecore_thread_global_data_del(const char *key)
1457 {
1458 Eina_Bool ret;
1459
1460 if (!key)
1461 return EINA_FALSE;
1462
1463 if (!_ecore_thread_global_hash)
1464 return EINA_FALSE;
1465
1466 LRWKWL(_ecore_thread_global_hash_lock);
1467 ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1468 LRWKU(_ecore_thread_global_hash_lock);
1469 return ret;
1470 }
1471
1472 EAPI void *
ecore_thread_global_data_wait(const char * key,double seconds)1473 ecore_thread_global_data_wait(const char *key,
1474 double seconds)
1475 {
1476 double tm = 0;
1477 Ecore_Thread_Data *ret = NULL;
1478
1479 if (!key)
1480 return NULL;
1481
1482 if (seconds > 0)
1483 tm = ecore_time_get() + seconds;
1484
1485 while (1)
1486 {
1487 LRWKRL(_ecore_thread_global_hash_lock);
1488 if (_ecore_thread_global_hash)
1489 ret = eina_hash_find(_ecore_thread_global_hash, key);
1490 LRWKU(_ecore_thread_global_hash_lock);
1491 if ((ret) ||
1492 (!EINA_DBL_EQ(seconds, 0.0)) ||
1493 ((seconds > 0) && (tm <= ecore_time_get())))
1494 break;
1495 LKL(_ecore_thread_global_hash_mutex);
1496 CDW(_ecore_thread_global_hash_cond, tm - ecore_time_get());
1497 LKU(_ecore_thread_global_hash_mutex);
1498 }
1499 if (ret) return ret->data;
1500 return NULL;
1501 }
1502
1503