xref: /dragonfly/sys/dev/drm/linux_workqueue.c (revision c9c5aa9e)
1 /*
2  * Copyright (c) 2015-2020 François Tigeot <ftigeot@wolfpond.org>
3  * Copyright (c) 2020 Matthew Dillon <dillon@backplane.com>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <drm/drmP.h>
29 #include <linux/workqueue.h>
30 
31 #include <sys/kthread.h>
32 
33 /*
34    Running behaviour, from kernel.org docs:
35    - While there are work items on the workqueue the worker executes the functions
36    associated with the work items one after the other.
37    - When there is no work item left on the workqueue the worker becomes idle.
38 
39    There are two worker-pools,
40    one for normal work items
41    and the other for high priority ones, for each possible CPU
42    and some extra worker-pools to serve work items queued on unbound workqueues
43    - the number of these backing pools is dynamic.
44  */
45 
46 /* XXX: Linux functions often enable/disable irqs on the CPU they run on
47  * this should be investigated */
48 
49 struct workqueue_struct *system_wq;
50 struct workqueue_struct *system_highpri_wq;
51 struct workqueue_struct *system_long_wq;
52 struct workqueue_struct *system_unbound_wq;
53 struct workqueue_struct *system_power_efficient_wq;
54 
55 /*
56  * Linux now uses these worker pools:
57  * - (per cpu) regular
58  * - (per cpu) regular high priority
59  * - ordered
60  * - ordered high priority
61  * - unbound
62  * - unbound high priority
63  */
64 
65 static inline void
66 process_all_work(struct workqueue_worker *worker)
67 {
68 	struct work_struct *work;
69 	bool didcan;
70 
71 	while (STAILQ_FIRST(&worker->ws_list_head)) {
72 		work = STAILQ_FIRST(&worker->ws_list_head);
73 		STAILQ_REMOVE_HEAD(&worker->ws_list_head, ws_entries);
74 		work->on_queue = false;
75 
76 		/* A work shouldn't be executed concurrently on a single cpu */
77 		if (work->running)
78 			continue;
79 
80 		/* Do not run canceled works */
81 		if (work->canceled) {
82 			/* XXX: should we allow canceled works to be reenabled ? */
83 			work->canceled = false;
84 			continue;
85 		}
86 
87 		work->running = true;
88 		lockmgr(&worker->worker_lock, LK_RELEASE);
89 		work->func(work);
90 		lwkt_yield();
91 		lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
92 		if (work->on_queue == false)
93 			work->worker = NULL;
94 		didcan = work->canceled;
95 		cpu_sfence();
96 		work->running = false;
97 		if (didcan == true)
98 			wakeup(work);
99 	}
100 }
101 
102 static void
103 wq_worker_thread(void *arg)
104 {
105 	struct workqueue_worker *worker = arg;
106 
107 	lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
108 	while (1) {
109 		process_all_work(worker);
110 		lksleep(worker, &worker->worker_lock, 0, "wqidle", 0);
111 	}
112 	lockmgr(&worker->worker_lock, LK_RELEASE);
113 }
114 
115 /*
116  * Return false if work was already on a queue
117  * Return true and queue it if this was not the case
118  */
119 int
120 queue_work(struct workqueue_struct *wq, struct work_struct *work)
121 {
122 	struct workqueue_worker *worker;
123 	int ret = false;
124 
125 	/* XXX: should we block instead ? */
126 	if (wq->is_draining)
127 		return false;
128 
129 	if (wq->num_workers > 1)
130 		worker = &(*wq->workers)[mycpuid];
131 	else
132 		worker = &(*wq->workers)[0];
133 
134 	lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
135 	work->canceled = false;
136 	if (work->on_queue == false || work->running == false) {
137 		if (work->on_queue == false) {
138 			STAILQ_INSERT_TAIL(&worker->ws_list_head, work,
139 					   ws_entries);
140 			work->on_queue = true;
141 			work->worker = worker;
142 			wakeup_one(worker);
143 		}
144 		ret = true;
145 	}
146 	lockmgr(&worker->worker_lock, LK_RELEASE);
147 
148 	return ret;
149 }
150 
151 static inline void
152 _delayed_work_fn(void *arg)
153 {
154 	struct delayed_work *dw = arg;
155 
156 	queue_work(system_wq, &dw->work);
157 }
158 
159 int
160 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
161     unsigned long delay)
162 {
163 	int pending = work->work.on_queue; // XXX: running too ?
164 	if (delay != 0) {
165 		callout_reset(&work->timer, delay, _delayed_work_fn, work);
166 	} else {
167 		_delayed_work_fn((void *)work);
168 	}
169 
170 	return (!pending);
171 }
172 
173 static int
174 init_workqueues(void *arg)
175 {
176 	system_wq = alloc_workqueue("system_wq", 0, 1);
177 	system_highpri_wq = alloc_workqueue("system_highpri_wq", WQ_HIGHPRI, 1);
178 	system_long_wq = alloc_workqueue("system_long_wq", 0, 1);
179 	system_unbound_wq = alloc_workqueue("system_unbound_wq", WQ_UNBOUND, 1);
180 	system_power_efficient_wq = alloc_workqueue("system_power_efficient_wq", 0, 1);
181 
182 	return 0;
183 }
184 
185 static int destroy_workqueues(void *arg)
186 {
187 	destroy_workqueue(system_wq);
188 	destroy_workqueue(system_highpri_wq);
189 	destroy_workqueue(system_long_wq);
190 	destroy_workqueue(system_unbound_wq);
191 	destroy_workqueue(system_power_efficient_wq);
192 
193 	return 0;
194 }
195 
196 struct workqueue_struct *
197 _create_workqueue_common(const char *name, int flags)
198 {
199 	struct workqueue_struct *wq;
200 	int priority, error;
201 
202 	wq = kmalloc(sizeof(*wq), M_DRM, M_WAITOK | M_ZERO);
203 
204 	if (flags & WQ_HIGHPRI)
205 		priority = TDPRI_INT_SUPPORT;
206 	else
207 		priority = TDPRI_KERN_DAEMON;
208 
209 	if (flags & WQ_UNBOUND) {
210 		wq->num_workers = 1;
211 	} else {
212 		wq->num_workers = ncpus;
213 	}
214 	wq->workers = kmalloc(sizeof(struct workqueue_worker) * wq->num_workers,
215 			M_DRM, M_WAITOK | M_ZERO);
216 
217 	for (int i = 0;i < wq->num_workers; i++) {
218 		struct workqueue_worker *worker = &(*wq->workers)[i];
219 
220 		lockinit(&worker->worker_lock, "lwq", 0, 0);
221 		STAILQ_INIT(&worker->ws_list_head);
222 		if (wq->num_workers > 1) {
223 			error = lwkt_create(wq_worker_thread, worker,
224 				    &worker->worker_thread, NULL, TDF_NOSTART, i, "%s/%d", name, i);
225 		} else {
226 			error = lwkt_create(wq_worker_thread, worker,
227 				    &worker->worker_thread, NULL, TDF_NOSTART, -1, name);
228 		}
229 		if (error) {
230 			kprintf("%s: lwkt_create(%s/%d): error %d",
231 			    __func__, name, i, error);
232 			/* XXX: destroy kernel threads and free workers[] if applicable */
233 			kfree(wq);
234 			return NULL;
235 		}
236 		lwkt_setpri_initial(worker->worker_thread, priority);
237 		lwkt_schedule(worker->worker_thread);
238 	}
239 
240 	return wq;
241 }
242 
243 void
244 destroy_workqueue(struct workqueue_struct *wq)
245 {
246 	drain_workqueue(wq);
247 //	wq->is_draining = true;
248 #if 0	/* XXX TODO */
249 	kill_all_threads;
250 	kfree(wq->wq_threads);
251 	kfree(wq);
252 #endif
253 }
254 
255 SYSINIT(linux_workqueue_init, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, init_workqueues, NULL);
256 SYSUNINIT(linux_workqueue_destroy, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, destroy_workqueues, NULL);
257 
258 bool
259 flush_delayed_work(struct delayed_work *dwork)
260 {
261 	callout_drain(&dwork->timer);
262 	return flush_work(&dwork->work);
263 }
264 
265 /* Wait until the wq becomes empty */
266 void
267 drain_workqueue(struct workqueue_struct *wq)
268 {
269 	struct workqueue_worker *worker;
270 
271 	wq->is_draining = true;
272 
273 	for (int i=0;i < wq->num_workers; i++) {
274 		worker = &(*wq->workers)[i];
275 
276 		lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
277 		while (!STAILQ_EMPTY(&worker->ws_list_head)) {
278 		/* XXX: introduces latency */
279 			tsleep(&drain_workqueue, 0, "wkdrain", 1);
280 		}
281 		lockmgr(&worker->worker_lock, LK_RELEASE);
282 	}
283 
284 	/* XXX: No more work will be queued. is that right ? */
285 //	wq->is_draining = false;
286 }
287 
288 bool
289 work_pending(struct work_struct *work)
290 {
291 	/* XXX: is on_queue the only constraint ? */
292 	return work->on_queue;
293 }
294 
295 unsigned int
296 work_busy(struct work_struct *work)
297 {
298 	return (work->on_queue || work->running);
299 }
300 
301 static inline void
302 __flush_work_func(struct work_struct *work)
303 {
304 	wakeup_one(work);
305 }
306 
307 /* XXX introduces latency ? */
308 void
309 flush_workqueue(struct workqueue_struct *wq)
310 {
311 	struct work_struct __flush_work;
312 
313 	INIT_WORK(&__flush_work, __flush_work_func);
314 
315 	queue_work(wq, &__flush_work);
316 	while (__flush_work.on_queue || __flush_work.running) {
317 		tsleep(&__flush_work, 0, "flshwq", 0);
318 	}
319 }
320 
321 /*
322  * Wait until a work is done (has been executed)
323  * Return true if this function had to wait, and false otherwise
324  */
325 bool
326 flush_work(struct work_struct *work)
327 {
328 	int ret = false;
329 
330 	/* XXX: probably unreliable */
331 	while (work->on_queue || work->running) {
332 		ret = true;
333 		/* XXX: use something more intelligent than tsleep() */
334 		tsleep(&flush_work, 0, "flshwrk", 1);
335 	}
336 
337 	return ret;
338 }
339 
340 static inline bool
341 _cancel_work(struct work_struct *work, bool sync_wait)
342 {
343 	struct workqueue_worker *worker;
344 	bool ret;
345 
346 	ret = false;
347 
348 	for (;;) {
349 		if (work->on_queue) {
350 			worker = work->worker;
351 			if (worker == NULL)
352 				continue;
353 			lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
354 			if (worker != work->worker || work->on_queue == false) {
355 				lockmgr(&worker->worker_lock, LK_RELEASE);
356 				continue;
357 			}
358 			STAILQ_REMOVE(&worker->ws_list_head, work,
359 				      work_struct, ws_entries);
360 			work->on_queue = false;
361 			ret = true;
362 			lockmgr(&worker->worker_lock, LK_RELEASE);
363 		}
364 		if (work->running == false)
365 			break;
366 
367 		worker = work->worker;
368 		if (worker == NULL)
369 			continue;
370 		lockmgr(&worker->worker_lock, LK_EXCLUSIVE);
371 		if (worker != work->worker || work->running == false) {
372 			lockmgr(&worker->worker_lock, LK_RELEASE);
373 			continue;
374 		}
375 		work->canceled = true;
376 		ret = true;
377 		if (sync_wait == false) {
378 			lockmgr(&worker->worker_lock, LK_RELEASE);
379 			break;
380 		}
381 		/* XXX this races */
382 		lksleep(work, &worker->worker_lock, 0, "wqcan", 1);
383 		lockmgr(&worker->worker_lock, LK_RELEASE);
384 		/* retest */
385 	}
386 
387 	return ret;
388 }
389 
390 /*
391  * If work was queued, remove it from the queue and return true.
392  * If work was not queued, return false.
393  * In any case, wait for work to complete or be removed from the workqueue,
394  * callers may free associated data structures after this call.
395  */
396 bool
397 cancel_work_sync(struct work_struct *work)
398 {
399 	return _cancel_work(work, true);
400 }
401 
402 /* Return false if work wasn't pending
403  * Return true if work was pending and canceled */
404 bool
405 cancel_delayed_work(struct delayed_work *dwork)
406 {
407 	struct work_struct *work = &dwork->work;
408 
409 	work->canceled = true;
410 	callout_cancel(&dwork->timer);
411 
412 	return _cancel_work(work, false);
413 }
414 
415 bool
416 cancel_delayed_work_sync(struct delayed_work *dwork)
417 {
418 	struct work_struct *work = &dwork->work;
419 
420 	work->canceled = true;
421 	callout_cancel(&dwork->timer);
422 
423 	return _cancel_work(work, true);
424 }
425 
426 bool
427 delayed_work_pending(struct delayed_work *dw)
428 {
429 	/* XXX: possibly wrong if the timer hasn't yet fired */
430 	return work_pending(&dw->work);
431 }
432 
433 void
434 destroy_work_on_stack(struct work_struct *work)
435 {
436 }
437 
438 void
439 destroy_delayed_work_on_stack(struct delayed_work *work)
440 {
441 }
442