btrfs: zoned: let the for_treelog test in the allocator stand out
[sfrench/cifs-2.6.git] / fs / btrfs / async-thread.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  * Copyright (C) 2014 Fujitsu.  All rights reserved.
5  */
6
7 #include <linux/kthread.h>
8 #include <linux/slab.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <linux/freezer.h>
12 #include "async-thread.h"
13 #include "ctree.h"
14
15 enum {
16         WORK_DONE_BIT,
17         WORK_ORDER_DONE_BIT,
18         WORK_HIGH_PRIO_BIT,
19 };
20
21 #define NO_THRESHOLD (-1)
22 #define DFT_THRESHOLD (32)
23
24 struct __btrfs_workqueue {
25         struct workqueue_struct *normal_wq;
26
27         /* File system this workqueue services */
28         struct btrfs_fs_info *fs_info;
29
30         /* List head pointing to ordered work list */
31         struct list_head ordered_list;
32
33         /* Spinlock for ordered_list */
34         spinlock_t list_lock;
35
36         /* Thresholding related variants */
37         atomic_t pending;
38
39         /* Up limit of concurrency workers */
40         int limit_active;
41
42         /* Current number of concurrency workers */
43         int current_active;
44
45         /* Threshold to change current_active */
46         int thresh;
47         unsigned int count;
48         spinlock_t thres_lock;
49 };
50
51 struct btrfs_workqueue {
52         struct __btrfs_workqueue *normal;
53         struct __btrfs_workqueue *high;
54 };
55
56 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
57 {
58         return wq->fs_info;
59 }
60
61 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
62 {
63         return work->wq->fs_info;
64 }
65
66 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
67 {
68         /*
69          * We could compare wq->normal->pending with num_online_cpus()
70          * to support "thresh == NO_THRESHOLD" case, but it requires
71          * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
72          * postpone it until someone needs the support of that case.
73          */
74         if (wq->normal->thresh == NO_THRESHOLD)
75                 return false;
76
77         return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
78 }
79
80 static struct __btrfs_workqueue *
81 __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
82                         unsigned int flags, int limit_active, int thresh)
83 {
84         struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
85
86         if (!ret)
87                 return NULL;
88
89         ret->fs_info = fs_info;
90         ret->limit_active = limit_active;
91         atomic_set(&ret->pending, 0);
92         if (thresh == 0)
93                 thresh = DFT_THRESHOLD;
94         /* For low threshold, disabling threshold is a better choice */
95         if (thresh < DFT_THRESHOLD) {
96                 ret->current_active = limit_active;
97                 ret->thresh = NO_THRESHOLD;
98         } else {
99                 /*
100                  * For threshold-able wq, let its concurrency grow on demand.
101                  * Use minimal max_active at alloc time to reduce resource
102                  * usage.
103                  */
104                 ret->current_active = 1;
105                 ret->thresh = thresh;
106         }
107
108         if (flags & WQ_HIGHPRI)
109                 ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags,
110                                                  ret->current_active, name);
111         else
112                 ret->normal_wq = alloc_workqueue("btrfs-%s", flags,
113                                                  ret->current_active, name);
114         if (!ret->normal_wq) {
115                 kfree(ret);
116                 return NULL;
117         }
118
119         INIT_LIST_HEAD(&ret->ordered_list);
120         spin_lock_init(&ret->list_lock);
121         spin_lock_init(&ret->thres_lock);
122         trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
123         return ret;
124 }
125
126 static inline void
127 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
128
129 struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
130                                               const char *name,
131                                               unsigned int flags,
132                                               int limit_active,
133                                               int thresh)
134 {
135         struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
136
137         if (!ret)
138                 return NULL;
139
140         ret->normal = __btrfs_alloc_workqueue(fs_info, name,
141                                               flags & ~WQ_HIGHPRI,
142                                               limit_active, thresh);
143         if (!ret->normal) {
144                 kfree(ret);
145                 return NULL;
146         }
147
148         if (flags & WQ_HIGHPRI) {
149                 ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
150                                                     limit_active, thresh);
151                 if (!ret->high) {
152                         __btrfs_destroy_workqueue(ret->normal);
153                         kfree(ret);
154                         return NULL;
155                 }
156         }
157         return ret;
158 }
159
160 /*
161  * Hook for threshold which will be called in btrfs_queue_work.
162  * This hook WILL be called in IRQ handler context,
163  * so workqueue_set_max_active MUST NOT be called in this hook
164  */
165 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
166 {
167         if (wq->thresh == NO_THRESHOLD)
168                 return;
169         atomic_inc(&wq->pending);
170 }
171
172 /*
173  * Hook for threshold which will be called before executing the work,
174  * This hook is called in kthread content.
175  * So workqueue_set_max_active is called here.
176  */
177 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
178 {
179         int new_current_active;
180         long pending;
181         int need_change = 0;
182
183         if (wq->thresh == NO_THRESHOLD)
184                 return;
185
186         atomic_dec(&wq->pending);
187         spin_lock(&wq->thres_lock);
188         /*
189          * Use wq->count to limit the calling frequency of
190          * workqueue_set_max_active.
191          */
192         wq->count++;
193         wq->count %= (wq->thresh / 4);
194         if (!wq->count)
195                 goto  out;
196         new_current_active = wq->current_active;
197
198         /*
199          * pending may be changed later, but it's OK since we really
200          * don't need it so accurate to calculate new_max_active.
201          */
202         pending = atomic_read(&wq->pending);
203         if (pending > wq->thresh)
204                 new_current_active++;
205         if (pending < wq->thresh / 2)
206                 new_current_active--;
207         new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
208         if (new_current_active != wq->current_active)  {
209                 need_change = 1;
210                 wq->current_active = new_current_active;
211         }
212 out:
213         spin_unlock(&wq->thres_lock);
214
215         if (need_change) {
216                 workqueue_set_max_active(wq->normal_wq, wq->current_active);
217         }
218 }
219
220 static void run_ordered_work(struct __btrfs_workqueue *wq,
221                              struct btrfs_work *self)
222 {
223         struct list_head *list = &wq->ordered_list;
224         struct btrfs_work *work;
225         spinlock_t *lock = &wq->list_lock;
226         unsigned long flags;
227         bool free_self = false;
228
229         while (1) {
230                 spin_lock_irqsave(lock, flags);
231                 if (list_empty(list))
232                         break;
233                 work = list_entry(list->next, struct btrfs_work,
234                                   ordered_list);
235                 if (!test_bit(WORK_DONE_BIT, &work->flags))
236                         break;
237
238                 /*
239                  * we are going to call the ordered done function, but
240                  * we leave the work item on the list as a barrier so
241                  * that later work items that are done don't have their
242                  * functions called before this one returns
243                  */
244                 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
245                         break;
246                 trace_btrfs_ordered_sched(work);
247                 spin_unlock_irqrestore(lock, flags);
248                 work->ordered_func(work);
249
250                 /* now take the lock again and drop our item from the list */
251                 spin_lock_irqsave(lock, flags);
252                 list_del(&work->ordered_list);
253                 spin_unlock_irqrestore(lock, flags);
254
255                 if (work == self) {
256                         /*
257                          * This is the work item that the worker is currently
258                          * executing.
259                          *
260                          * The kernel workqueue code guarantees non-reentrancy
261                          * of work items. I.e., if a work item with the same
262                          * address and work function is queued twice, the second
263                          * execution is blocked until the first one finishes. A
264                          * work item may be freed and recycled with the same
265                          * work function; the workqueue code assumes that the
266                          * original work item cannot depend on the recycled work
267                          * item in that case (see find_worker_executing_work()).
268                          *
269                          * Note that different types of Btrfs work can depend on
270                          * each other, and one type of work on one Btrfs
271                          * filesystem may even depend on the same type of work
272                          * on another Btrfs filesystem via, e.g., a loop device.
273                          * Therefore, we must not allow the current work item to
274                          * be recycled until we are really done, otherwise we
275                          * break the above assumption and can deadlock.
276                          */
277                         free_self = true;
278                 } else {
279                         /*
280                          * We don't want to call the ordered free functions with
281                          * the lock held.
282                          */
283                         work->ordered_free(work);
284                         /* NB: work must not be dereferenced past this point. */
285                         trace_btrfs_all_work_done(wq->fs_info, work);
286                 }
287         }
288         spin_unlock_irqrestore(lock, flags);
289
290         if (free_self) {
291                 self->ordered_free(self);
292                 /* NB: self must not be dereferenced past this point. */
293                 trace_btrfs_all_work_done(wq->fs_info, self);
294         }
295 }
296
297 static void btrfs_work_helper(struct work_struct *normal_work)
298 {
299         struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
300                                                normal_work);
301         struct __btrfs_workqueue *wq;
302         int need_order = 0;
303
304         /*
305          * We should not touch things inside work in the following cases:
306          * 1) after work->func() if it has no ordered_free
307          *    Since the struct is freed in work->func().
308          * 2) after setting WORK_DONE_BIT
309          *    The work may be freed in other threads almost instantly.
310          * So we save the needed things here.
311          */
312         if (work->ordered_func)
313                 need_order = 1;
314         wq = work->wq;
315
316         trace_btrfs_work_sched(work);
317         thresh_exec_hook(wq);
318         work->func(work);
319         if (need_order) {
320                 set_bit(WORK_DONE_BIT, &work->flags);
321                 run_ordered_work(wq, work);
322         } else {
323                 /* NB: work must not be dereferenced past this point. */
324                 trace_btrfs_all_work_done(wq->fs_info, work);
325         }
326 }
327
328 void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
329                      btrfs_func_t ordered_func, btrfs_func_t ordered_free)
330 {
331         work->func = func;
332         work->ordered_func = ordered_func;
333         work->ordered_free = ordered_free;
334         INIT_WORK(&work->normal_work, btrfs_work_helper);
335         INIT_LIST_HEAD(&work->ordered_list);
336         work->flags = 0;
337 }
338
339 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
340                                       struct btrfs_work *work)
341 {
342         unsigned long flags;
343
344         work->wq = wq;
345         thresh_queue_hook(wq);
346         if (work->ordered_func) {
347                 spin_lock_irqsave(&wq->list_lock, flags);
348                 list_add_tail(&work->ordered_list, &wq->ordered_list);
349                 spin_unlock_irqrestore(&wq->list_lock, flags);
350         }
351         trace_btrfs_work_queued(work);
352         queue_work(wq->normal_wq, &work->normal_work);
353 }
354
355 void btrfs_queue_work(struct btrfs_workqueue *wq,
356                       struct btrfs_work *work)
357 {
358         struct __btrfs_workqueue *dest_wq;
359
360         if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
361                 dest_wq = wq->high;
362         else
363                 dest_wq = wq->normal;
364         __btrfs_queue_work(dest_wq, work);
365 }
366
367 static inline void
368 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
369 {
370         destroy_workqueue(wq->normal_wq);
371         trace_btrfs_workqueue_destroy(wq);
372         kfree(wq);
373 }
374
375 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
376 {
377         if (!wq)
378                 return;
379         if (wq->high)
380                 __btrfs_destroy_workqueue(wq->high);
381         __btrfs_destroy_workqueue(wq->normal);
382         kfree(wq);
383 }
384
385 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
386 {
387         if (!wq)
388                 return;
389         wq->normal->limit_active = limit_active;
390         if (wq->high)
391                 wq->high->limit_active = limit_active;
392 }
393
394 void btrfs_set_work_high_priority(struct btrfs_work *work)
395 {
396         set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
397 }
398
399 void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
400 {
401         if (wq->high)
402                 flush_workqueue(wq->high->normal_wq);
403
404         flush_workqueue(wq->normal->normal_wq);
405 }