1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 * Copyright (C) 2014 Fujitsu. All rights reserved.
5 */
6
7 #include <linux/kthread.h>
8 #include <linux/slab.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <linux/freezer.h>
12 #include "async-thread.h"
13 #include "ctree.h"
14
15 #define WORK_DONE_BIT 0
16 #define WORK_ORDER_DONE_BIT 1
17 #define WORK_HIGH_PRIO_BIT 2
18
19 #define NO_THRESHOLD (-1)
20 #define DFT_THRESHOLD (32)
21
22 struct __btrfs_workqueue {
23 struct workqueue_struct *normal_wq;
24
25 /* File system this workqueue services */
26 struct btrfs_fs_info *fs_info;
27
28 /* List head pointing to ordered work list */
29 struct list_head ordered_list;
30
31 /* Spinlock for ordered_list */
32 spinlock_t list_lock;
33
34 /* Thresholding related variants */
35 atomic_t pending;
36
37 /* Up limit of concurrency workers */
38 int limit_active;
39
40 /* Current number of concurrency workers */
41 int current_active;
42
43 /* Threshold to change current_active */
44 int thresh;
45 unsigned int count;
46 spinlock_t thres_lock;
47 };
48
49 struct btrfs_workqueue {
50 struct __btrfs_workqueue *normal;
51 struct __btrfs_workqueue *high;
52 };
53
54 static void normal_work_helper(struct btrfs_work *work);
55
56 #define BTRFS_WORK_HELPER(name) \
57 noinline_for_stack void btrfs_##name(struct work_struct *arg) \
58 { \
59 struct btrfs_work *work = container_of(arg, struct btrfs_work, \
60 normal_work); \
61 normal_work_helper(work); \
62 }
63
64 struct btrfs_fs_info *
btrfs_workqueue_owner(const struct __btrfs_workqueue * wq)65 btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
66 {
67 return wq->fs_info;
68 }
69
70 struct btrfs_fs_info *
btrfs_work_owner(const struct btrfs_work * work)71 btrfs_work_owner(const struct btrfs_work *work)
72 {
73 return work->wq->fs_info;
74 }
75
btrfs_workqueue_normal_congested(const struct btrfs_workqueue * wq)76 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
77 {
78 /*
79 * We could compare wq->normal->pending with num_online_cpus()
80 * to support "thresh == NO_THRESHOLD" case, but it requires
81 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
82 * postpone it until someone needs the support of that case.
83 */
84 if (wq->normal->thresh == NO_THRESHOLD)
85 return false;
86
87 return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
88 }
89
90 BTRFS_WORK_HELPER(worker_helper);
91 BTRFS_WORK_HELPER(delalloc_helper);
92 BTRFS_WORK_HELPER(flush_delalloc_helper);
93 BTRFS_WORK_HELPER(cache_helper);
94 BTRFS_WORK_HELPER(submit_helper);
95 BTRFS_WORK_HELPER(fixup_helper);
96 BTRFS_WORK_HELPER(endio_helper);
97 BTRFS_WORK_HELPER(endio_meta_helper);
98 BTRFS_WORK_HELPER(endio_meta_write_helper);
99 BTRFS_WORK_HELPER(endio_raid56_helper);
100 BTRFS_WORK_HELPER(endio_repair_helper);
101 BTRFS_WORK_HELPER(rmw_helper);
102 BTRFS_WORK_HELPER(endio_write_helper);
103 BTRFS_WORK_HELPER(freespace_write_helper);
104 BTRFS_WORK_HELPER(delayed_meta_helper);
105 BTRFS_WORK_HELPER(readahead_helper);
106 BTRFS_WORK_HELPER(qgroup_rescan_helper);
107 BTRFS_WORK_HELPER(extent_refs_helper);
108 BTRFS_WORK_HELPER(scrub_helper);
109 BTRFS_WORK_HELPER(scrubwrc_helper);
110 BTRFS_WORK_HELPER(scrubnc_helper);
111 BTRFS_WORK_HELPER(scrubparity_helper);
112
113 static struct __btrfs_workqueue *
__btrfs_alloc_workqueue(struct btrfs_fs_info * fs_info,const char * name,unsigned int flags,int limit_active,int thresh)114 __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
115 unsigned int flags, int limit_active, int thresh)
116 {
117 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
118
119 if (!ret)
120 return NULL;
121
122 ret->fs_info = fs_info;
123 ret->limit_active = limit_active;
124 atomic_set(&ret->pending, 0);
125 if (thresh == 0)
126 thresh = DFT_THRESHOLD;
127 /* For low threshold, disabling threshold is a better choice */
128 if (thresh < DFT_THRESHOLD) {
129 ret->current_active = limit_active;
130 ret->thresh = NO_THRESHOLD;
131 } else {
132 /*
133 * For threshold-able wq, let its concurrency grow on demand.
134 * Use minimal max_active at alloc time to reduce resource
135 * usage.
136 */
137 ret->current_active = 1;
138 ret->thresh = thresh;
139 }
140
141 if (flags & WQ_HIGHPRI)
142 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
143 ret->current_active, "btrfs",
144 name);
145 else
146 ret->normal_wq = alloc_workqueue("%s-%s", flags,
147 ret->current_active, "btrfs",
148 name);
149 if (!ret->normal_wq) {
150 kfree(ret);
151 return NULL;
152 }
153
154 INIT_LIST_HEAD(&ret->ordered_list);
155 spin_lock_init(&ret->list_lock);
156 spin_lock_init(&ret->thres_lock);
157 trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
158 return ret;
159 }
160
161 static inline void
162 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
163
btrfs_alloc_workqueue(struct btrfs_fs_info * fs_info,const char * name,unsigned int flags,int limit_active,int thresh)164 struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
165 const char *name,
166 unsigned int flags,
167 int limit_active,
168 int thresh)
169 {
170 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
171
172 if (!ret)
173 return NULL;
174
175 ret->normal = __btrfs_alloc_workqueue(fs_info, name,
176 flags & ~WQ_HIGHPRI,
177 limit_active, thresh);
178 if (!ret->normal) {
179 kfree(ret);
180 return NULL;
181 }
182
183 if (flags & WQ_HIGHPRI) {
184 ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
185 limit_active, thresh);
186 if (!ret->high) {
187 __btrfs_destroy_workqueue(ret->normal);
188 kfree(ret);
189 return NULL;
190 }
191 }
192 return ret;
193 }
194
195 /*
196 * Hook for threshold which will be called in btrfs_queue_work.
197 * This hook WILL be called in IRQ handler context,
198 * so workqueue_set_max_active MUST NOT be called in this hook
199 */
thresh_queue_hook(struct __btrfs_workqueue * wq)200 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
201 {
202 if (wq->thresh == NO_THRESHOLD)
203 return;
204 atomic_inc(&wq->pending);
205 }
206
207 /*
208 * Hook for threshold which will be called before executing the work,
209 * This hook is called in kthread content.
210 * So workqueue_set_max_active is called here.
211 */
thresh_exec_hook(struct __btrfs_workqueue * wq)212 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
213 {
214 int new_current_active;
215 long pending;
216 int need_change = 0;
217
218 if (wq->thresh == NO_THRESHOLD)
219 return;
220
221 atomic_dec(&wq->pending);
222 spin_lock(&wq->thres_lock);
223 /*
224 * Use wq->count to limit the calling frequency of
225 * workqueue_set_max_active.
226 */
227 wq->count++;
228 wq->count %= (wq->thresh / 4);
229 if (!wq->count)
230 goto out;
231 new_current_active = wq->current_active;
232
233 /*
234 * pending may be changed later, but it's OK since we really
235 * don't need it so accurate to calculate new_max_active.
236 */
237 pending = atomic_read(&wq->pending);
238 if (pending > wq->thresh)
239 new_current_active++;
240 if (pending < wq->thresh / 2)
241 new_current_active--;
242 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
243 if (new_current_active != wq->current_active) {
244 need_change = 1;
245 wq->current_active = new_current_active;
246 }
247 out:
248 spin_unlock(&wq->thres_lock);
249
250 if (need_change) {
251 workqueue_set_max_active(wq->normal_wq, wq->current_active);
252 }
253 }
254
run_ordered_work(struct __btrfs_workqueue * wq,struct btrfs_work * self)255 static void run_ordered_work(struct __btrfs_workqueue *wq,
256 struct btrfs_work *self)
257 {
258 struct list_head *list = &wq->ordered_list;
259 struct btrfs_work *work;
260 spinlock_t *lock = &wq->list_lock;
261 unsigned long flags;
262 void *wtag;
263 bool free_self = false;
264
265 while (1) {
266 spin_lock_irqsave(lock, flags);
267 if (list_empty(list))
268 break;
269 work = list_entry(list->next, struct btrfs_work,
270 ordered_list);
271 if (!test_bit(WORK_DONE_BIT, &work->flags))
272 break;
273 /*
274 * Orders all subsequent loads after reading WORK_DONE_BIT,
275 * paired with the smp_mb__before_atomic in btrfs_work_helper
276 * this guarantees that the ordered function will see all
277 * updates from ordinary work function.
278 */
279 smp_rmb();
280
281 /*
282 * we are going to call the ordered done function, but
283 * we leave the work item on the list as a barrier so
284 * that later work items that are done don't have their
285 * functions called before this one returns
286 */
287 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
288 break;
289 trace_btrfs_ordered_sched(work);
290 spin_unlock_irqrestore(lock, flags);
291 work->ordered_func(work);
292
293 /* now take the lock again and drop our item from the list */
294 spin_lock_irqsave(lock, flags);
295 list_del(&work->ordered_list);
296 spin_unlock_irqrestore(lock, flags);
297
298 if (work == self) {
299 /*
300 * This is the work item that the worker is currently
301 * executing.
302 *
303 * The kernel workqueue code guarantees non-reentrancy
304 * of work items. I.e., if a work item with the same
305 * address and work function is queued twice, the second
306 * execution is blocked until the first one finishes. A
307 * work item may be freed and recycled with the same
308 * work function; the workqueue code assumes that the
309 * original work item cannot depend on the recycled work
310 * item in that case (see find_worker_executing_work()).
311 *
312 * Note that the work of one Btrfs filesystem may depend
313 * on the work of another Btrfs filesystem via, e.g., a
314 * loop device. Therefore, we must not allow the current
315 * work item to be recycled until we are really done,
316 * otherwise we break the above assumption and can
317 * deadlock.
318 */
319 free_self = true;
320 } else {
321 /*
322 * We don't want to call the ordered free functions with
323 * the lock held though. Save the work as tag for the
324 * trace event, because the callback could free the
325 * structure.
326 */
327 wtag = work;
328 work->ordered_free(work);
329 trace_btrfs_all_work_done(wq->fs_info, wtag);
330 }
331 }
332 spin_unlock_irqrestore(lock, flags);
333
334 if (free_self) {
335 wtag = self;
336 self->ordered_free(self);
337 trace_btrfs_all_work_done(wq->fs_info, wtag);
338 }
339 }
340
normal_work_helper(struct btrfs_work * work)341 static void normal_work_helper(struct btrfs_work *work)
342 {
343 struct __btrfs_workqueue *wq;
344 void *wtag;
345 int need_order = 0;
346
347 /*
348 * We should not touch things inside work in the following cases:
349 * 1) after work->func() if it has no ordered_free
350 * Since the struct is freed in work->func().
351 * 2) after setting WORK_DONE_BIT
352 * The work may be freed in other threads almost instantly.
353 * So we save the needed things here.
354 */
355 if (work->ordered_func)
356 need_order = 1;
357 wq = work->wq;
358 /* Safe for tracepoints in case work gets freed by the callback */
359 wtag = work;
360
361 trace_btrfs_work_sched(work);
362 thresh_exec_hook(wq);
363 work->func(work);
364 if (need_order) {
365 /*
366 * Ensures all memory accesses done in the work function are
367 * ordered before setting the WORK_DONE_BIT. Ensuring the thread
368 * which is going to executed the ordered work sees them.
369 * Pairs with the smp_rmb in run_ordered_work.
370 */
371 smp_mb__before_atomic();
372 set_bit(WORK_DONE_BIT, &work->flags);
373 run_ordered_work(wq, work);
374 }
375 if (!need_order)
376 trace_btrfs_all_work_done(wq->fs_info, wtag);
377 }
378
btrfs_init_work(struct btrfs_work * work,btrfs_work_func_t uniq_func,btrfs_func_t func,btrfs_func_t ordered_func,btrfs_func_t ordered_free)379 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
380 btrfs_func_t func,
381 btrfs_func_t ordered_func,
382 btrfs_func_t ordered_free)
383 {
384 work->func = func;
385 work->ordered_func = ordered_func;
386 work->ordered_free = ordered_free;
387 INIT_WORK(&work->normal_work, uniq_func);
388 INIT_LIST_HEAD(&work->ordered_list);
389 work->flags = 0;
390 }
391
__btrfs_queue_work(struct __btrfs_workqueue * wq,struct btrfs_work * work)392 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
393 struct btrfs_work *work)
394 {
395 unsigned long flags;
396
397 work->wq = wq;
398 thresh_queue_hook(wq);
399 if (work->ordered_func) {
400 spin_lock_irqsave(&wq->list_lock, flags);
401 list_add_tail(&work->ordered_list, &wq->ordered_list);
402 spin_unlock_irqrestore(&wq->list_lock, flags);
403 }
404 trace_btrfs_work_queued(work);
405 queue_work(wq->normal_wq, &work->normal_work);
406 }
407
btrfs_queue_work(struct btrfs_workqueue * wq,struct btrfs_work * work)408 void btrfs_queue_work(struct btrfs_workqueue *wq,
409 struct btrfs_work *work)
410 {
411 struct __btrfs_workqueue *dest_wq;
412
413 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
414 dest_wq = wq->high;
415 else
416 dest_wq = wq->normal;
417 __btrfs_queue_work(dest_wq, work);
418 }
419
420 static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue * wq)421 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
422 {
423 destroy_workqueue(wq->normal_wq);
424 trace_btrfs_workqueue_destroy(wq);
425 kfree(wq);
426 }
427
btrfs_destroy_workqueue(struct btrfs_workqueue * wq)428 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
429 {
430 if (!wq)
431 return;
432 if (wq->high)
433 __btrfs_destroy_workqueue(wq->high);
434 __btrfs_destroy_workqueue(wq->normal);
435 kfree(wq);
436 }
437
btrfs_workqueue_set_max(struct btrfs_workqueue * wq,int limit_active)438 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
439 {
440 if (!wq)
441 return;
442 wq->normal->limit_active = limit_active;
443 if (wq->high)
444 wq->high->limit_active = limit_active;
445 }
446
btrfs_set_work_high_priority(struct btrfs_work * work)447 void btrfs_set_work_high_priority(struct btrfs_work *work)
448 {
449 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
450 }
451
btrfs_flush_workqueue(struct btrfs_workqueue * wq)452 void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
453 {
454 if (wq->high)
455 flush_workqueue(wq->high->normal_wq);
456
457 flush_workqueue(wq->normal->normal_wq);
458 }
459