1 /*-
2  * Copyright (c) 2009 Hans Petter Selasky. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include <linux/rcupdate.h>
27 
28 TAILQ_HEAD(work_head, work_struct);
29 
30 static struct work_struct *work_curr;
31 static struct work_head work_head = TAILQ_HEAD_INITIALIZER(work_head);
32 static pthread_t work_thread;
33 static pthread_cond_t work_cond;
34 static int flush_work_var;
35 
36 int
schedule_work(struct work_struct * work)37 schedule_work(struct work_struct *work)
38 {
39 	int retval;
40 
41 	atomic_lock();
42 	if (work->entry.tqe_prev == NULL) {
43 		TAILQ_INSERT_TAIL(&work_head, work, entry);
44 		pthread_cond_signal(&work_cond);
45 		retval = 1;
46 	} else {
47 		retval = 0;
48 	}
49 	atomic_unlock();
50 	return (retval);
51 }
52 
53 static void
delayed_work_timer_fn(struct timer_list * t)54 delayed_work_timer_fn(struct timer_list *t)
55 {
56 	struct delayed_work *dwork = from_timer(dwork, t, timer);
57 
58 	schedule_work(&dwork->work);
59 }
60 
61 int
queue_delayed_work(struct workqueue_struct * dummy,struct delayed_work * pwork,unsigned long delay)62 queue_delayed_work(struct workqueue_struct *dummy,
63     struct delayed_work *pwork, unsigned long delay)
64 {
65 	return (schedule_delayed_work(pwork, delay));
66 }
67 
68 int
schedule_delayed_work(struct delayed_work * work,unsigned long delay)69 schedule_delayed_work(struct delayed_work *work, unsigned long delay)
70 {
71 	int retval;
72 
73 	if (delay == 0)
74 		return (schedule_work(&work->work));
75 
76 	if (timer_pending(&work->timer)) {
77 		retval = 0;
78 	} else {
79 		retval = 1;
80 	}
81 
82 	if (retval) {
83 		work->timer.expires = jiffies + delay;
84 		work->timer.function = delayed_work_timer_fn;
85 		add_timer(&work->timer);
86 	}
87 	return (retval);
88 }
89 
90 void
INIT_WORK(struct work_struct * work,work_func_t func)91 INIT_WORK(struct work_struct *work, work_func_t func)
92 {
93 	memset(work, 0, sizeof(*work));
94 	work->func = func;
95 }
96 
97 void
INIT_DELAYED_WORK(struct delayed_work * work,work_func_t func)98 INIT_DELAYED_WORK(struct delayed_work *work, work_func_t func)
99 {
100 	memset(work, 0, sizeof(*work));
101 	work->work.func = func;
102 }
103 
104 static void *
work_exec(void * arg)105 work_exec(void *arg)
106 {
107 	struct work_struct *t;
108 
109 	atomic_lock();
110 	while (1) {
111 		t = TAILQ_FIRST(&work_head);
112 		if (t != NULL) {
113 			TAILQ_REMOVE(&work_head, t, entry);
114 			t->entry.tqe_prev = NULL;
115 			work_curr = t;
116 			atomic_unlock();
117 			t->func(t);
118 			atomic_lock();
119 			work_curr = NULL;
120 		} else {
121 			flush_work_var = 0;
122 			atomic_pre_sleep();
123 			pthread_cond_wait(&work_cond, atomic_get_lock());
124 			atomic_post_sleep();
125 		}
126 	}
127 	atomic_unlock();
128 	return (NULL);
129 }
130 
131 int
queue_work(struct workqueue_struct * wq,struct work_struct * work)132 queue_work(struct workqueue_struct *wq, struct work_struct *work)
133 {
134 	return (schedule_work(work));
135 }
136 
137 bool
flush_work(struct work_struct * work)138 flush_work(struct work_struct *work)
139 {
140 	bool retval;
141 
142 	atomic_lock();
143 	retval = (work->entry.tqe_prev != NULL);
144 	while (work->entry.tqe_prev != NULL || work_curr == work)
145 		schedule();
146 	atomic_unlock();
147 
148 	return (retval);
149 }
150 
151 void
flush_workqueue(struct workqueue_struct * wq)152 flush_workqueue(struct workqueue_struct *wq)
153 {
154 	flush_scheduled_work();
155 }
156 
157 void
cancel_delayed_work(struct delayed_work * _work)158 cancel_delayed_work(struct delayed_work *_work)
159 {
160 	cancel_work(&_work->work);
161 }
162 
163 void
cancel_delayed_work_sync(struct delayed_work * _work)164 cancel_delayed_work_sync(struct delayed_work *_work)
165 {
166 	cancel_work(&_work->work);
167 
168 	cancel_work_sync(&_work->work);
169 }
170 
171 void
cancel_rearming_delayed_work(struct delayed_work * _work)172 cancel_rearming_delayed_work(struct delayed_work *_work)
173 {
174 	cancel_work(&_work->work);
175 }
176 
177 void
cancel_work(struct work_struct * work)178 cancel_work(struct work_struct *work)
179 {
180 	atomic_lock();
181 	if (work->entry.tqe_prev != NULL) {
182 		TAILQ_REMOVE(&work_head, work, entry);
183 		work->entry.tqe_prev = NULL;
184 	}
185 	atomic_unlock();
186 }
187 
188 void
cancel_work_sync(struct work_struct * work)189 cancel_work_sync(struct work_struct *work)
190 {
191 	atomic_lock();
192 	if (work->entry.tqe_prev != NULL) {
193 		TAILQ_REMOVE(&work_head, work, entry);
194 		work->entry.tqe_prev = NULL;
195 	}
196 	while (work == work_curr)
197 		schedule();
198 	atomic_unlock();
199 }
200 
201 void
flush_scheduled_work(void)202 flush_scheduled_work(void)
203 {
204 	uint32_t drops;
205 
206 	atomic_lock();
207 	flush_work_var = 1;
208 	while (1) {
209 		pthread_cond_signal(&work_cond);
210 		if (flush_work_var == 0)
211 			break;
212 		drops = atomic_drop();
213 		atomic_unlock();
214 		usleep(10000);
215 		atomic_lock();
216 		atomic_pickup(drops);
217 	}
218 	atomic_unlock();
219 }
220 
221 void
destroy_workqueue(struct workqueue_struct * wq)222 destroy_workqueue(struct workqueue_struct *wq)
223 {
224 
225 }
226 
227 struct workqueue_struct *
create_workqueue(const char * name)228 create_workqueue(const char *name)
229 {
230 	/* TODO: we currently reuse the existing thread */
231 	return ((struct workqueue_struct *)1);
232 }
233 
234 struct workqueue_struct *
create_singlethread_workqueue(const char * name)235 create_singlethread_workqueue(const char *name)
236 {
237 	/* TODO: we currently reuse the existing thread */
238 	return ((struct workqueue_struct *)1);
239 }
240 
241 static int
work_init(void)242 work_init(void)
243 {
244 	pthread_cond_init(&work_cond, NULL);
245 
246 	if (pthread_create(&work_thread, NULL, work_exec, NULL)) {
247 		printf("Failed creating work process\n");
248 	}
249 	return (0);
250 }
251 
252 module_init(work_init);
253 
254 static void
tasklet_wrapper_callback(struct work_struct * work)255 tasklet_wrapper_callback(struct work_struct *work)
256 {
257 	struct tasklet_struct *task =
258 	(struct tasklet_struct *)work;
259 
260 	(task->func) (task->data);
261 }
262 
263 void
tasklet_schedule(struct tasklet_struct * t)264 tasklet_schedule(struct tasklet_struct *t)
265 {
266 	schedule_work(&t->work);
267 }
268 
269 void
tasklet_init(struct tasklet_struct * t,tasklet_func_t * func,unsigned long data)270 tasklet_init(struct tasklet_struct *t, tasklet_func_t *func,
271     unsigned long data)
272 {
273 	INIT_WORK(&t->work, tasklet_wrapper_callback);
274 
275 	t->func = func;
276 	t->data = data;
277 }
278 
279 void
tasklet_setup(struct tasklet_struct * t,tasklet_callback_t * func)280 tasklet_setup(struct tasklet_struct *t, tasklet_callback_t *func)
281 {
282 	INIT_WORK(&t->work, tasklet_wrapper_callback);
283 
284 	t->func = (tasklet_func_t *)func;
285 	t->data = (long)t;
286 }
287 
288 void
tasklet_kill(struct tasklet_struct * t)289 tasklet_kill(struct tasklet_struct *t)
290 {
291 	atomic_lock();
292 	if (t->work.entry.tqe_prev != NULL) {
293 		TAILQ_REMOVE(&work_head, &t->work, entry);
294 		t->work.entry.tqe_prev = NULL;
295 	}
296 	atomic_unlock();
297 }
298 
299 static pthread_t rcu_thread;
300 static pthread_cond_t rcu_cond;
301 static struct rcu_head *rcu_head;
302 
303 static void *
rcu_exec(void * arg)304 rcu_exec(void *arg)
305 {
306 	struct rcu_head *t;
307 
308 	atomic_lock();
309 	while (1) {
310 		t = rcu_head;
311 		if (t != NULL) {
312 			rcu_head = t->next;
313 			t->next = NULL;
314 			atomic_unlock();
315 			t->func(t);
316 			atomic_lock();
317 		} else {
318 			atomic_pre_sleep();
319 			pthread_cond_wait(&rcu_cond, atomic_get_lock());
320 			atomic_post_sleep();
321 		}
322 	}
323 	atomic_unlock();
324 	return (NULL);
325 }
326 
327 static int
rcu_init(void)328 rcu_init(void)
329 {
330 	pthread_cond_init(&rcu_cond, NULL);
331 
332 	if (pthread_create(&rcu_thread, NULL, rcu_exec, NULL)) {
333 		printf("Failed creating RCU process\n");
334 	}
335 	return (0);
336 }
337 
338 module_init(rcu_init);
339 
340 void
call_rcu(struct rcu_head * head,rcu_func_t * func)341 call_rcu(struct rcu_head *head, rcu_func_t *func)
342 {
343 	atomic_lock();
344 	if (head->next == NULL) {
345 		head->next = rcu_head;
346 		head->func = func;
347 		rcu_head = head;
348 		pthread_cond_signal(&rcu_cond);
349 	}
350 	atomic_unlock();
351 }
352