1 /*****************************************************************
2  * gmerlin - a general purpose multimedia framework and applications
3  *
4  * Copyright (c) 2001 - 2011 Members of the Gmerlin project
5  * gmerlin-general@lists.sourceforge.net
6  * http://gmerlin.sourceforge.net
7  *
8  * This program is free software: you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation, either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  * *****************************************************************/
21 
22 #include <pthread.h>
23 #include <stdlib.h>
24 
25 
26 #include <gavl/gavl.h>
27 #include <gmerlin/parameter.h>
28 #include <gmerlin/bggavl.h>
29 #include <gmerlin/bg_sem.h>
30 
31 typedef struct
32   {
33   pthread_t t;
34   sem_t run_sem;
35   sem_t done_sem;
36   pthread_mutex_t stop_mutex;
37   int do_stop;
38   void (*func)(void*, int, int);
39   void * data;
40   int start;
41   int len;
42   } thread_t;
43 
44 struct bg_thread_pool_s
45   {
46   int num_threads;
47   thread_t * threads;
48   };
49 
thread_func(void * data)50 static void * thread_func(void * data)
51   {
52   thread_t * t = data;
53   int do_stop;
54   while(1)
55     {
56     sem_wait(&t->run_sem);
57 
58     pthread_mutex_lock(&t->stop_mutex);
59     do_stop = t->do_stop;
60     pthread_mutex_unlock(&t->stop_mutex);
61 
62     if(do_stop)
63       break;
64     t->func(t->data, t->start, t->len);
65     sem_post(&t->done_sem);
66     }
67   return NULL;
68   }
69 
bg_thread_pool_create(int num_threads)70 bg_thread_pool_t * bg_thread_pool_create(int num_threads)
71   {
72   int i;
73   bg_thread_pool_t * ret = calloc(1, sizeof(*ret));
74   ret->num_threads = num_threads;
75   ret->threads = calloc(num_threads, sizeof(*ret->threads));
76 
77   for(i = 0; i < ret->num_threads; i++)
78     {
79     pthread_mutex_init(&ret->threads[i].stop_mutex, NULL);
80     sem_init(&ret->threads[i].run_sem, 0, 0);
81     sem_init(&ret->threads[i].done_sem, 0, 0);
82     pthread_create(&ret->threads[i].t,
83                    NULL,
84                    thread_func, &ret->threads[i]);
85     }
86   return ret;
87   }
88 
bg_thread_pool_destroy(bg_thread_pool_t * p)89 void bg_thread_pool_destroy(bg_thread_pool_t * p)
90   {
91   int i;
92   for(i = 0; i < p->num_threads; i++)
93     {
94     pthread_mutex_lock(&p->threads[i].stop_mutex);
95     p->threads[i].do_stop = 1;
96     pthread_mutex_unlock(&p->threads[i].stop_mutex);
97 
98     sem_post(&p->threads[i].run_sem);
99 
100     pthread_join(p->threads[i].t, NULL);
101     pthread_mutex_destroy(&p->threads[i].stop_mutex);
102     sem_destroy(&p->threads[i].run_sem);
103     sem_destroy(&p->threads[i].done_sem);
104     }
105   free(p->threads);
106   free(p);
107   }
108 
bg_thread_pool_run(void (* func)(void *,int start,int len),void * gavl_data,int start,int len,void * client_data,int thread)109 void bg_thread_pool_run(void (*func)(void*,int start, int len),
110                         void * gavl_data,
111                         int start, int len,
112                         void * client_data, int thread)
113   {
114   bg_thread_pool_t * p     = client_data;
115   p->threads[thread].func  = func;
116   p->threads[thread].data  = gavl_data;
117   p->threads[thread].start = start;
118   p->threads[thread].len   = len;
119 
120   sem_post(&p->threads[thread].run_sem);
121   }
122 
bg_thread_pool_stop(void * client_data,int thread)123 void bg_thread_pool_stop(void * client_data, int thread)
124   {
125   bg_thread_pool_t * p     = client_data;
126   sem_wait(&p->threads[thread].done_sem);
127   }
128 
129