xref: /linux/fs/btrfs/async-thread.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  * Copyright (C) 2014 Fujitsu.  All rights reserved.
5  */
6 
7 #include <linux/kthread.h>
8 #include <linux/slab.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <linux/freezer.h>
12 #include "async-thread.h"
13 #include "ctree.h"
14 
15 enum {
16 	WORK_DONE_BIT,
17 	WORK_ORDER_DONE_BIT,
18 	WORK_HIGH_PRIO_BIT,
19 };
20 
21 #define NO_THRESHOLD (-1)
22 #define DFT_THRESHOLD (32)
23 
24 struct __btrfs_workqueue {
25 	struct workqueue_struct *normal_wq;
26 
27 	/* File system this workqueue services */
28 	struct btrfs_fs_info *fs_info;
29 
30 	/* List head pointing to ordered work list */
31 	struct list_head ordered_list;
32 
33 	/* Spinlock for ordered_list */
34 	spinlock_t list_lock;
35 
36 	/* Thresholding related variants */
37 	atomic_t pending;
38 
39 	/* Up limit of concurrency workers */
40 	int limit_active;
41 
42 	/* Current number of concurrency workers */
43 	int current_active;
44 
45 	/* Threshold to change current_active */
46 	int thresh;
47 	unsigned int count;
48 	spinlock_t thres_lock;
49 };
50 
51 struct btrfs_workqueue {
52 	struct __btrfs_workqueue *normal;
53 	struct __btrfs_workqueue *high;
54 };
55 
56 static void normal_work_helper(struct btrfs_work *work);
57 
58 #define BTRFS_WORK_HELPER(name)					\
59 noinline_for_stack void btrfs_##name(struct work_struct *arg)		\
60 {									\
61 	struct btrfs_work *work = container_of(arg, struct btrfs_work,	\
62 					       normal_work);		\
63 	normal_work_helper(work);					\
64 }
65 
66 struct btrfs_fs_info *
67 btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
68 {
69 	return wq->fs_info;
70 }
71 
72 struct btrfs_fs_info *
73 btrfs_work_owner(const struct btrfs_work *work)
74 {
75 	return work->wq->fs_info;
76 }
77 
78 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
79 {
80 	/*
81 	 * We could compare wq->normal->pending with num_online_cpus()
82 	 * to support "thresh == NO_THRESHOLD" case, but it requires
83 	 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
84 	 * postpone it until someone needs the support of that case.
85 	 */
86 	if (wq->normal->thresh == NO_THRESHOLD)
87 		return false;
88 
89 	return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
90 }
91 
92 BTRFS_WORK_HELPER(worker_helper);
93 BTRFS_WORK_HELPER(delalloc_helper);
94 BTRFS_WORK_HELPER(flush_delalloc_helper);
95 BTRFS_WORK_HELPER(cache_helper);
96 BTRFS_WORK_HELPER(submit_helper);
97 BTRFS_WORK_HELPER(fixup_helper);
98 BTRFS_WORK_HELPER(endio_helper);
99 BTRFS_WORK_HELPER(endio_meta_helper);
100 BTRFS_WORK_HELPER(endio_meta_write_helper);
101 BTRFS_WORK_HELPER(endio_raid56_helper);
102 BTRFS_WORK_HELPER(endio_repair_helper);
103 BTRFS_WORK_HELPER(rmw_helper);
104 BTRFS_WORK_HELPER(endio_write_helper);
105 BTRFS_WORK_HELPER(freespace_write_helper);
106 BTRFS_WORK_HELPER(delayed_meta_helper);
107 BTRFS_WORK_HELPER(readahead_helper);
108 BTRFS_WORK_HELPER(qgroup_rescan_helper);
109 BTRFS_WORK_HELPER(extent_refs_helper);
110 BTRFS_WORK_HELPER(scrub_helper);
111 BTRFS_WORK_HELPER(scrubwrc_helper);
112 BTRFS_WORK_HELPER(scrubnc_helper);
113 BTRFS_WORK_HELPER(scrubparity_helper);
114 
115 static struct __btrfs_workqueue *
116 __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
117 			unsigned int flags, int limit_active, int thresh)
118 {
119 	struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
120 
121 	if (!ret)
122 		return NULL;
123 
124 	ret->fs_info = fs_info;
125 	ret->limit_active = limit_active;
126 	atomic_set(&ret->pending, 0);
127 	if (thresh == 0)
128 		thresh = DFT_THRESHOLD;
129 	/* For low threshold, disabling threshold is a better choice */
130 	if (thresh < DFT_THRESHOLD) {
131 		ret->current_active = limit_active;
132 		ret->thresh = NO_THRESHOLD;
133 	} else {
134 		/*
135 		 * For threshold-able wq, let its concurrency grow on demand.
136 		 * Use minimal max_active at alloc time to reduce resource
137 		 * usage.
138 		 */
139 		ret->current_active = 1;
140 		ret->thresh = thresh;
141 	}
142 
143 	if (flags & WQ_HIGHPRI)
144 		ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags,
145 						 ret->current_active, name);
146 	else
147 		ret->normal_wq = alloc_workqueue("btrfs-%s", flags,
148 						 ret->current_active, name);
149 	if (!ret->normal_wq) {
150 		kfree(ret);
151 		return NULL;
152 	}
153 
154 	INIT_LIST_HEAD(&ret->ordered_list);
155 	spin_lock_init(&ret->list_lock);
156 	spin_lock_init(&ret->thres_lock);
157 	trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
158 	return ret;
159 }
160 
161 static inline void
162 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
163 
164 struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
165 					      const char *name,
166 					      unsigned int flags,
167 					      int limit_active,
168 					      int thresh)
169 {
170 	struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
171 
172 	if (!ret)
173 		return NULL;
174 
175 	ret->normal = __btrfs_alloc_workqueue(fs_info, name,
176 					      flags & ~WQ_HIGHPRI,
177 					      limit_active, thresh);
178 	if (!ret->normal) {
179 		kfree(ret);
180 		return NULL;
181 	}
182 
183 	if (flags & WQ_HIGHPRI) {
184 		ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
185 						    limit_active, thresh);
186 		if (!ret->high) {
187 			__btrfs_destroy_workqueue(ret->normal);
188 			kfree(ret);
189 			return NULL;
190 		}
191 	}
192 	return ret;
193 }
194 
195 /*
196  * Hook for threshold which will be called in btrfs_queue_work.
197  * This hook WILL be called in IRQ handler context,
198  * so workqueue_set_max_active MUST NOT be called in this hook
199  */
200 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
201 {
202 	if (wq->thresh == NO_THRESHOLD)
203 		return;
204 	atomic_inc(&wq->pending);
205 }
206 
207 /*
208  * Hook for threshold which will be called before executing the work,
209  * This hook is called in kthread content.
210  * So workqueue_set_max_active is called here.
211  */
212 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
213 {
214 	int new_current_active;
215 	long pending;
216 	int need_change = 0;
217 
218 	if (wq->thresh == NO_THRESHOLD)
219 		return;
220 
221 	atomic_dec(&wq->pending);
222 	spin_lock(&wq->thres_lock);
223 	/*
224 	 * Use wq->count to limit the calling frequency of
225 	 * workqueue_set_max_active.
226 	 */
227 	wq->count++;
228 	wq->count %= (wq->thresh / 4);
229 	if (!wq->count)
230 		goto  out;
231 	new_current_active = wq->current_active;
232 
233 	/*
234 	 * pending may be changed later, but it's OK since we really
235 	 * don't need it so accurate to calculate new_max_active.
236 	 */
237 	pending = atomic_read(&wq->pending);
238 	if (pending > wq->thresh)
239 		new_current_active++;
240 	if (pending < wq->thresh / 2)
241 		new_current_active--;
242 	new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
243 	if (new_current_active != wq->current_active)  {
244 		need_change = 1;
245 		wq->current_active = new_current_active;
246 	}
247 out:
248 	spin_unlock(&wq->thres_lock);
249 
250 	if (need_change) {
251 		workqueue_set_max_active(wq->normal_wq, wq->current_active);
252 	}
253 }
254 
255 static void run_ordered_work(struct __btrfs_workqueue *wq)
256 {
257 	struct list_head *list = &wq->ordered_list;
258 	struct btrfs_work *work;
259 	spinlock_t *lock = &wq->list_lock;
260 	unsigned long flags;
261 
262 	while (1) {
263 		void *wtag;
264 
265 		spin_lock_irqsave(lock, flags);
266 		if (list_empty(list))
267 			break;
268 		work = list_entry(list->next, struct btrfs_work,
269 				  ordered_list);
270 		if (!test_bit(WORK_DONE_BIT, &work->flags))
271 			break;
272 
273 		/*
274 		 * we are going to call the ordered done function, but
275 		 * we leave the work item on the list as a barrier so
276 		 * that later work items that are done don't have their
277 		 * functions called before this one returns
278 		 */
279 		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
280 			break;
281 		trace_btrfs_ordered_sched(work);
282 		spin_unlock_irqrestore(lock, flags);
283 		work->ordered_func(work);
284 
285 		/* now take the lock again and drop our item from the list */
286 		spin_lock_irqsave(lock, flags);
287 		list_del(&work->ordered_list);
288 		spin_unlock_irqrestore(lock, flags);
289 
290 		/*
291 		 * We don't want to call the ordered free functions with the
292 		 * lock held though. Save the work as tag for the trace event,
293 		 * because the callback could free the structure.
294 		 */
295 		wtag = work;
296 		work->ordered_free(work);
297 		trace_btrfs_all_work_done(wq->fs_info, wtag);
298 	}
299 	spin_unlock_irqrestore(lock, flags);
300 }
301 
302 static void normal_work_helper(struct btrfs_work *work)
303 {
304 	struct __btrfs_workqueue *wq;
305 	void *wtag;
306 	int need_order = 0;
307 
308 	/*
309 	 * We should not touch things inside work in the following cases:
310 	 * 1) after work->func() if it has no ordered_free
311 	 *    Since the struct is freed in work->func().
312 	 * 2) after setting WORK_DONE_BIT
313 	 *    The work may be freed in other threads almost instantly.
314 	 * So we save the needed things here.
315 	 */
316 	if (work->ordered_func)
317 		need_order = 1;
318 	wq = work->wq;
319 	/* Safe for tracepoints in case work gets freed by the callback */
320 	wtag = work;
321 
322 	trace_btrfs_work_sched(work);
323 	thresh_exec_hook(wq);
324 	work->func(work);
325 	if (need_order) {
326 		set_bit(WORK_DONE_BIT, &work->flags);
327 		run_ordered_work(wq);
328 	}
329 	if (!need_order)
330 		trace_btrfs_all_work_done(wq->fs_info, wtag);
331 }
332 
333 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
334 		     btrfs_func_t func,
335 		     btrfs_func_t ordered_func,
336 		     btrfs_func_t ordered_free)
337 {
338 	work->func = func;
339 	work->ordered_func = ordered_func;
340 	work->ordered_free = ordered_free;
341 	INIT_WORK(&work->normal_work, uniq_func);
342 	INIT_LIST_HEAD(&work->ordered_list);
343 	work->flags = 0;
344 }
345 
346 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
347 				      struct btrfs_work *work)
348 {
349 	unsigned long flags;
350 
351 	work->wq = wq;
352 	thresh_queue_hook(wq);
353 	if (work->ordered_func) {
354 		spin_lock_irqsave(&wq->list_lock, flags);
355 		list_add_tail(&work->ordered_list, &wq->ordered_list);
356 		spin_unlock_irqrestore(&wq->list_lock, flags);
357 	}
358 	trace_btrfs_work_queued(work);
359 	queue_work(wq->normal_wq, &work->normal_work);
360 }
361 
362 void btrfs_queue_work(struct btrfs_workqueue *wq,
363 		      struct btrfs_work *work)
364 {
365 	struct __btrfs_workqueue *dest_wq;
366 
367 	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
368 		dest_wq = wq->high;
369 	else
370 		dest_wq = wq->normal;
371 	__btrfs_queue_work(dest_wq, work);
372 }
373 
374 static inline void
375 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
376 {
377 	destroy_workqueue(wq->normal_wq);
378 	trace_btrfs_workqueue_destroy(wq);
379 	kfree(wq);
380 }
381 
382 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
383 {
384 	if (!wq)
385 		return;
386 	if (wq->high)
387 		__btrfs_destroy_workqueue(wq->high);
388 	__btrfs_destroy_workqueue(wq->normal);
389 	kfree(wq);
390 }
391 
392 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
393 {
394 	if (!wq)
395 		return;
396 	wq->normal->limit_active = limit_active;
397 	if (wq->high)
398 		wq->high->limit_active = limit_active;
399 }
400 
401 void btrfs_set_work_high_priority(struct btrfs_work *work)
402 {
403 	set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
404 }
405