bdi: Make wb->bdi a proper reference
[sfrench/cifs-2.6.git] / mm / backing-dev.c
1
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
14
15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16
17 struct backing_dev_info noop_backing_dev_info = {
18         .name           = "noop",
19         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
20 };
21 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
22
23 static struct class *bdi_class;
24
25 /*
26  * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
27  * locking.
28  */
29 DEFINE_SPINLOCK(bdi_lock);
30 LIST_HEAD(bdi_list);
31
32 /* bdi_wq serves all asynchronous writeback tasks */
33 struct workqueue_struct *bdi_wq;
34
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38
39 static struct dentry *bdi_debug_root;
40
41 static void bdi_debug_init(void)
42 {
43         bdi_debug_root = debugfs_create_dir("bdi", NULL);
44 }
45
46 static int bdi_debug_stats_show(struct seq_file *m, void *v)
47 {
48         struct backing_dev_info *bdi = m->private;
49         struct bdi_writeback *wb = &bdi->wb;
50         unsigned long background_thresh;
51         unsigned long dirty_thresh;
52         unsigned long wb_thresh;
53         unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
54         struct inode *inode;
55
56         nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
57         spin_lock(&wb->list_lock);
58         list_for_each_entry(inode, &wb->b_dirty, i_io_list)
59                 nr_dirty++;
60         list_for_each_entry(inode, &wb->b_io, i_io_list)
61                 nr_io++;
62         list_for_each_entry(inode, &wb->b_more_io, i_io_list)
63                 nr_more_io++;
64         list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
65                 if (inode->i_state & I_DIRTY_TIME)
66                         nr_dirty_time++;
67         spin_unlock(&wb->list_lock);
68
69         global_dirty_limits(&background_thresh, &dirty_thresh);
70         wb_thresh = wb_calc_thresh(wb, dirty_thresh);
71
72 #define K(x) ((x) << (PAGE_SHIFT - 10))
73         seq_printf(m,
74                    "BdiWriteback:       %10lu kB\n"
75                    "BdiReclaimable:     %10lu kB\n"
76                    "BdiDirtyThresh:     %10lu kB\n"
77                    "DirtyThresh:        %10lu kB\n"
78                    "BackgroundThresh:   %10lu kB\n"
79                    "BdiDirtied:         %10lu kB\n"
80                    "BdiWritten:         %10lu kB\n"
81                    "BdiWriteBandwidth:  %10lu kBps\n"
82                    "b_dirty:            %10lu\n"
83                    "b_io:               %10lu\n"
84                    "b_more_io:          %10lu\n"
85                    "b_dirty_time:       %10lu\n"
86                    "bdi_list:           %10u\n"
87                    "state:              %10lx\n",
88                    (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
89                    (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
90                    K(wb_thresh),
91                    K(dirty_thresh),
92                    K(background_thresh),
93                    (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
94                    (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
95                    (unsigned long) K(wb->write_bandwidth),
96                    nr_dirty,
97                    nr_io,
98                    nr_more_io,
99                    nr_dirty_time,
100                    !list_empty(&bdi->bdi_list), bdi->wb.state);
101 #undef K
102
103         return 0;
104 }
105
106 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
107 {
108         return single_open(file, bdi_debug_stats_show, inode->i_private);
109 }
110
111 static const struct file_operations bdi_debug_stats_fops = {
112         .open           = bdi_debug_stats_open,
113         .read           = seq_read,
114         .llseek         = seq_lseek,
115         .release        = single_release,
116 };
117
118 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
119 {
120         bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
121         bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
122                                                bdi, &bdi_debug_stats_fops);
123 }
124
125 static void bdi_debug_unregister(struct backing_dev_info *bdi)
126 {
127         debugfs_remove(bdi->debug_stats);
128         debugfs_remove(bdi->debug_dir);
129 }
130 #else
131 static inline void bdi_debug_init(void)
132 {
133 }
134 static inline void bdi_debug_register(struct backing_dev_info *bdi,
135                                       const char *name)
136 {
137 }
138 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
139 {
140 }
141 #endif
142
143 static ssize_t read_ahead_kb_store(struct device *dev,
144                                   struct device_attribute *attr,
145                                   const char *buf, size_t count)
146 {
147         struct backing_dev_info *bdi = dev_get_drvdata(dev);
148         unsigned long read_ahead_kb;
149         ssize_t ret;
150
151         ret = kstrtoul(buf, 10, &read_ahead_kb);
152         if (ret < 0)
153                 return ret;
154
155         bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
156
157         return count;
158 }
159
160 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
161
162 #define BDI_SHOW(name, expr)                                            \
163 static ssize_t name##_show(struct device *dev,                          \
164                            struct device_attribute *attr, char *page)   \
165 {                                                                       \
166         struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
167                                                                         \
168         return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
169 }                                                                       \
170 static DEVICE_ATTR_RW(name);
171
172 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
173
174 static ssize_t min_ratio_store(struct device *dev,
175                 struct device_attribute *attr, const char *buf, size_t count)
176 {
177         struct backing_dev_info *bdi = dev_get_drvdata(dev);
178         unsigned int ratio;
179         ssize_t ret;
180
181         ret = kstrtouint(buf, 10, &ratio);
182         if (ret < 0)
183                 return ret;
184
185         ret = bdi_set_min_ratio(bdi, ratio);
186         if (!ret)
187                 ret = count;
188
189         return ret;
190 }
191 BDI_SHOW(min_ratio, bdi->min_ratio)
192
193 static ssize_t max_ratio_store(struct device *dev,
194                 struct device_attribute *attr, const char *buf, size_t count)
195 {
196         struct backing_dev_info *bdi = dev_get_drvdata(dev);
197         unsigned int ratio;
198         ssize_t ret;
199
200         ret = kstrtouint(buf, 10, &ratio);
201         if (ret < 0)
202                 return ret;
203
204         ret = bdi_set_max_ratio(bdi, ratio);
205         if (!ret)
206                 ret = count;
207
208         return ret;
209 }
210 BDI_SHOW(max_ratio, bdi->max_ratio)
211
212 static ssize_t stable_pages_required_show(struct device *dev,
213                                           struct device_attribute *attr,
214                                           char *page)
215 {
216         struct backing_dev_info *bdi = dev_get_drvdata(dev);
217
218         return snprintf(page, PAGE_SIZE-1, "%d\n",
219                         bdi_cap_stable_pages_required(bdi) ? 1 : 0);
220 }
221 static DEVICE_ATTR_RO(stable_pages_required);
222
223 static struct attribute *bdi_dev_attrs[] = {
224         &dev_attr_read_ahead_kb.attr,
225         &dev_attr_min_ratio.attr,
226         &dev_attr_max_ratio.attr,
227         &dev_attr_stable_pages_required.attr,
228         NULL,
229 };
230 ATTRIBUTE_GROUPS(bdi_dev);
231
232 static __init int bdi_class_init(void)
233 {
234         bdi_class = class_create(THIS_MODULE, "bdi");
235         if (IS_ERR(bdi_class))
236                 return PTR_ERR(bdi_class);
237
238         bdi_class->dev_groups = bdi_dev_groups;
239         bdi_debug_init();
240
241         return 0;
242 }
243 postcore_initcall(bdi_class_init);
244
245 static int __init default_bdi_init(void)
246 {
247         int err;
248
249         bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
250                                               WQ_UNBOUND | WQ_SYSFS, 0);
251         if (!bdi_wq)
252                 return -ENOMEM;
253
254         err = bdi_init(&noop_backing_dev_info);
255
256         return err;
257 }
258 subsys_initcall(default_bdi_init);
259
260 /*
261  * This function is used when the first inode for this wb is marked dirty. It
262  * wakes-up the corresponding bdi thread which should then take care of the
263  * periodic background write-out of dirty inodes. Since the write-out would
264  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
265  * set up a timer which wakes the bdi thread up later.
266  *
267  * Note, we wouldn't bother setting up the timer, but this function is on the
268  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
269  * by delaying the wake-up.
270  *
271  * We have to be careful not to postpone flush work if it is scheduled for
272  * earlier. Thus we use queue_delayed_work().
273  */
274 void wb_wakeup_delayed(struct bdi_writeback *wb)
275 {
276         unsigned long timeout;
277
278         timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
279         spin_lock_bh(&wb->work_lock);
280         if (test_bit(WB_registered, &wb->state))
281                 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
282         spin_unlock_bh(&wb->work_lock);
283 }
284
285 /*
286  * Initial write bandwidth: 100 MB/s
287  */
288 #define INIT_BW         (100 << (20 - PAGE_SHIFT))
289
290 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
291                    int blkcg_id, gfp_t gfp)
292 {
293         int i, err;
294
295         memset(wb, 0, sizeof(*wb));
296
297         if (wb != &bdi->wb)
298                 bdi_get(bdi);
299         wb->bdi = bdi;
300         wb->last_old_flush = jiffies;
301         INIT_LIST_HEAD(&wb->b_dirty);
302         INIT_LIST_HEAD(&wb->b_io);
303         INIT_LIST_HEAD(&wb->b_more_io);
304         INIT_LIST_HEAD(&wb->b_dirty_time);
305         spin_lock_init(&wb->list_lock);
306
307         wb->bw_time_stamp = jiffies;
308         wb->balanced_dirty_ratelimit = INIT_BW;
309         wb->dirty_ratelimit = INIT_BW;
310         wb->write_bandwidth = INIT_BW;
311         wb->avg_write_bandwidth = INIT_BW;
312
313         spin_lock_init(&wb->work_lock);
314         INIT_LIST_HEAD(&wb->work_list);
315         INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
316         wb->dirty_sleep = jiffies;
317
318         wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
319         if (!wb->congested) {
320                 err = -ENOMEM;
321                 goto out_put_bdi;
322         }
323
324         err = fprop_local_init_percpu(&wb->completions, gfp);
325         if (err)
326                 goto out_put_cong;
327
328         for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
329                 err = percpu_counter_init(&wb->stat[i], 0, gfp);
330                 if (err)
331                         goto out_destroy_stat;
332         }
333
334         return 0;
335
336 out_destroy_stat:
337         while (i--)
338                 percpu_counter_destroy(&wb->stat[i]);
339         fprop_local_destroy_percpu(&wb->completions);
340 out_put_cong:
341         wb_congested_put(wb->congested);
342 out_put_bdi:
343         if (wb != &bdi->wb)
344                 bdi_put(bdi);
345         return err;
346 }
347
348 /*
349  * Remove bdi from the global list and shutdown any threads we have running
350  */
351 static void wb_shutdown(struct bdi_writeback *wb)
352 {
353         /* Make sure nobody queues further work */
354         spin_lock_bh(&wb->work_lock);
355         if (!test_and_clear_bit(WB_registered, &wb->state)) {
356                 spin_unlock_bh(&wb->work_lock);
357                 return;
358         }
359         spin_unlock_bh(&wb->work_lock);
360
361         /*
362          * Drain work list and shutdown the delayed_work.  !WB_registered
363          * tells wb_workfn() that @wb is dying and its work_list needs to
364          * be drained no matter what.
365          */
366         mod_delayed_work(bdi_wq, &wb->dwork, 0);
367         flush_delayed_work(&wb->dwork);
368         WARN_ON(!list_empty(&wb->work_list));
369 }
370
371 static void wb_exit(struct bdi_writeback *wb)
372 {
373         int i;
374
375         WARN_ON(delayed_work_pending(&wb->dwork));
376
377         for (i = 0; i < NR_WB_STAT_ITEMS; i++)
378                 percpu_counter_destroy(&wb->stat[i]);
379
380         fprop_local_destroy_percpu(&wb->completions);
381         wb_congested_put(wb->congested);
382         if (wb != &wb->bdi->wb)
383                 bdi_put(wb->bdi);
384 }
385
386 #ifdef CONFIG_CGROUP_WRITEBACK
387
388 #include <linux/memcontrol.h>
389
390 /*
391  * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
392  * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
393  * protected.  cgwb_release_wait is used to wait for the completion of cgwb
394  * releases from bdi destruction path.
395  */
396 static DEFINE_SPINLOCK(cgwb_lock);
397 static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait);
398
399 /**
400  * wb_congested_get_create - get or create a wb_congested
401  * @bdi: associated bdi
402  * @blkcg_id: ID of the associated blkcg
403  * @gfp: allocation mask
404  *
405  * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
406  * The returned wb_congested has its reference count incremented.  Returns
407  * NULL on failure.
408  */
409 struct bdi_writeback_congested *
410 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
411 {
412         struct bdi_writeback_congested *new_congested = NULL, *congested;
413         struct rb_node **node, *parent;
414         unsigned long flags;
415 retry:
416         spin_lock_irqsave(&cgwb_lock, flags);
417
418         node = &bdi->cgwb_congested_tree.rb_node;
419         parent = NULL;
420
421         while (*node != NULL) {
422                 parent = *node;
423                 congested = rb_entry(parent, struct bdi_writeback_congested,
424                                      rb_node);
425                 if (congested->blkcg_id < blkcg_id)
426                         node = &parent->rb_left;
427                 else if (congested->blkcg_id > blkcg_id)
428                         node = &parent->rb_right;
429                 else
430                         goto found;
431         }
432
433         if (new_congested) {
434                 /* !found and storage for new one already allocated, insert */
435                 congested = new_congested;
436                 new_congested = NULL;
437                 rb_link_node(&congested->rb_node, parent, node);
438                 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
439                 goto found;
440         }
441
442         spin_unlock_irqrestore(&cgwb_lock, flags);
443
444         /* allocate storage for new one and retry */
445         new_congested = kzalloc(sizeof(*new_congested), gfp);
446         if (!new_congested)
447                 return NULL;
448
449         atomic_set(&new_congested->refcnt, 0);
450         new_congested->__bdi = bdi;
451         new_congested->blkcg_id = blkcg_id;
452         goto retry;
453
454 found:
455         atomic_inc(&congested->refcnt);
456         spin_unlock_irqrestore(&cgwb_lock, flags);
457         kfree(new_congested);
458         return congested;
459 }
460
461 /**
462  * wb_congested_put - put a wb_congested
463  * @congested: wb_congested to put
464  *
465  * Put @congested and destroy it if the refcnt reaches zero.
466  */
467 void wb_congested_put(struct bdi_writeback_congested *congested)
468 {
469         unsigned long flags;
470
471         local_irq_save(flags);
472         if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
473                 local_irq_restore(flags);
474                 return;
475         }
476
477         /* bdi might already have been destroyed leaving @congested unlinked */
478         if (congested->__bdi) {
479                 rb_erase(&congested->rb_node,
480                          &congested->__bdi->cgwb_congested_tree);
481                 congested->__bdi = NULL;
482         }
483
484         spin_unlock_irqrestore(&cgwb_lock, flags);
485         kfree(congested);
486 }
487
488 static void cgwb_release_workfn(struct work_struct *work)
489 {
490         struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
491                                                 release_work);
492         struct backing_dev_info *bdi = wb->bdi;
493
494         spin_lock_irq(&cgwb_lock);
495         list_del_rcu(&wb->bdi_node);
496         spin_unlock_irq(&cgwb_lock);
497
498         wb_shutdown(wb);
499
500         css_put(wb->memcg_css);
501         css_put(wb->blkcg_css);
502
503         fprop_local_destroy_percpu(&wb->memcg_completions);
504         percpu_ref_exit(&wb->refcnt);
505         wb_exit(wb);
506         kfree_rcu(wb, rcu);
507
508         if (atomic_dec_and_test(&bdi->usage_cnt))
509                 wake_up_all(&cgwb_release_wait);
510 }
511
512 static void cgwb_release(struct percpu_ref *refcnt)
513 {
514         struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
515                                                 refcnt);
516         schedule_work(&wb->release_work);
517 }
518
519 static void cgwb_kill(struct bdi_writeback *wb)
520 {
521         lockdep_assert_held(&cgwb_lock);
522
523         WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
524         list_del(&wb->memcg_node);
525         list_del(&wb->blkcg_node);
526         percpu_ref_kill(&wb->refcnt);
527 }
528
529 static int cgwb_create(struct backing_dev_info *bdi,
530                        struct cgroup_subsys_state *memcg_css, gfp_t gfp)
531 {
532         struct mem_cgroup *memcg;
533         struct cgroup_subsys_state *blkcg_css;
534         struct blkcg *blkcg;
535         struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
536         struct bdi_writeback *wb;
537         unsigned long flags;
538         int ret = 0;
539
540         memcg = mem_cgroup_from_css(memcg_css);
541         blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
542         blkcg = css_to_blkcg(blkcg_css);
543         memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
544         blkcg_cgwb_list = &blkcg->cgwb_list;
545
546         /* look up again under lock and discard on blkcg mismatch */
547         spin_lock_irqsave(&cgwb_lock, flags);
548         wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
549         if (wb && wb->blkcg_css != blkcg_css) {
550                 cgwb_kill(wb);
551                 wb = NULL;
552         }
553         spin_unlock_irqrestore(&cgwb_lock, flags);
554         if (wb)
555                 goto out_put;
556
557         /* need to create a new one */
558         wb = kmalloc(sizeof(*wb), gfp);
559         if (!wb)
560                 return -ENOMEM;
561
562         ret = wb_init(wb, bdi, blkcg_css->id, gfp);
563         if (ret)
564                 goto err_free;
565
566         ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
567         if (ret)
568                 goto err_wb_exit;
569
570         ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
571         if (ret)
572                 goto err_ref_exit;
573
574         wb->memcg_css = memcg_css;
575         wb->blkcg_css = blkcg_css;
576         INIT_WORK(&wb->release_work, cgwb_release_workfn);
577         set_bit(WB_registered, &wb->state);
578
579         /*
580          * The root wb determines the registered state of the whole bdi and
581          * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
582          * whether they're still online.  Don't link @wb if any is dead.
583          * See wb_memcg_offline() and wb_blkcg_offline().
584          */
585         ret = -ENODEV;
586         spin_lock_irqsave(&cgwb_lock, flags);
587         if (test_bit(WB_registered, &bdi->wb.state) &&
588             blkcg_cgwb_list->next && memcg_cgwb_list->next) {
589                 /* we might have raced another instance of this function */
590                 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
591                 if (!ret) {
592                         atomic_inc(&bdi->usage_cnt);
593                         list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
594                         list_add(&wb->memcg_node, memcg_cgwb_list);
595                         list_add(&wb->blkcg_node, blkcg_cgwb_list);
596                         css_get(memcg_css);
597                         css_get(blkcg_css);
598                 }
599         }
600         spin_unlock_irqrestore(&cgwb_lock, flags);
601         if (ret) {
602                 if (ret == -EEXIST)
603                         ret = 0;
604                 goto err_fprop_exit;
605         }
606         goto out_put;
607
608 err_fprop_exit:
609         fprop_local_destroy_percpu(&wb->memcg_completions);
610 err_ref_exit:
611         percpu_ref_exit(&wb->refcnt);
612 err_wb_exit:
613         wb_exit(wb);
614 err_free:
615         kfree(wb);
616 out_put:
617         css_put(blkcg_css);
618         return ret;
619 }
620
621 /**
622  * wb_get_create - get wb for a given memcg, create if necessary
623  * @bdi: target bdi
624  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
625  * @gfp: allocation mask to use
626  *
627  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
628  * create one.  The returned wb has its refcount incremented.
629  *
630  * This function uses css_get() on @memcg_css and thus expects its refcnt
631  * to be positive on invocation.  IOW, rcu_read_lock() protection on
632  * @memcg_css isn't enough.  try_get it before calling this function.
633  *
634  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
635  * memcg on the default hierarchy, memcg association is guaranteed to be
636  * more specific (equal or descendant to the associated blkcg) and thus can
637  * identify both the memcg and blkcg associations.
638  *
639  * Because the blkcg associated with a memcg may change as blkcg is enabled
640  * and disabled closer to root in the hierarchy, each wb keeps track of
641  * both the memcg and blkcg associated with it and verifies the blkcg on
642  * each lookup.  On mismatch, the existing wb is discarded and a new one is
643  * created.
644  */
645 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
646                                     struct cgroup_subsys_state *memcg_css,
647                                     gfp_t gfp)
648 {
649         struct bdi_writeback *wb;
650
651         might_sleep_if(gfpflags_allow_blocking(gfp));
652
653         if (!memcg_css->parent)
654                 return &bdi->wb;
655
656         do {
657                 rcu_read_lock();
658                 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
659                 if (wb) {
660                         struct cgroup_subsys_state *blkcg_css;
661
662                         /* see whether the blkcg association has changed */
663                         blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
664                                                      &io_cgrp_subsys);
665                         if (unlikely(wb->blkcg_css != blkcg_css ||
666                                      !wb_tryget(wb)))
667                                 wb = NULL;
668                         css_put(blkcg_css);
669                 }
670                 rcu_read_unlock();
671         } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
672
673         return wb;
674 }
675
676 static int cgwb_bdi_init(struct backing_dev_info *bdi)
677 {
678         int ret;
679
680         INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
681         bdi->cgwb_congested_tree = RB_ROOT;
682         atomic_set(&bdi->usage_cnt, 1);
683
684         ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
685         if (!ret) {
686                 bdi->wb.memcg_css = &root_mem_cgroup->css;
687                 bdi->wb.blkcg_css = blkcg_root_css;
688         }
689         return ret;
690 }
691
692 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
693 {
694         struct radix_tree_iter iter;
695         void **slot;
696
697         WARN_ON(test_bit(WB_registered, &bdi->wb.state));
698
699         spin_lock_irq(&cgwb_lock);
700         radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
701                 cgwb_kill(*slot);
702         spin_unlock_irq(&cgwb_lock);
703
704         /*
705          * All cgwb's must be shutdown and released before returning.  Drain
706          * the usage counter to wait for all cgwb's ever created on @bdi.
707          */
708         atomic_dec(&bdi->usage_cnt);
709         wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
710         /*
711          * Grab back our reference so that we hold it when @bdi gets
712          * re-registered.
713          */
714         atomic_inc(&bdi->usage_cnt);
715 }
716
717 /**
718  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
719  * @memcg: memcg being offlined
720  *
721  * Also prevents creation of any new wb's associated with @memcg.
722  */
723 void wb_memcg_offline(struct mem_cgroup *memcg)
724 {
725         LIST_HEAD(to_destroy);
726         struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
727         struct bdi_writeback *wb, *next;
728
729         spin_lock_irq(&cgwb_lock);
730         list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
731                 cgwb_kill(wb);
732         memcg_cgwb_list->next = NULL;   /* prevent new wb's */
733         spin_unlock_irq(&cgwb_lock);
734 }
735
736 /**
737  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
738  * @blkcg: blkcg being offlined
739  *
740  * Also prevents creation of any new wb's associated with @blkcg.
741  */
742 void wb_blkcg_offline(struct blkcg *blkcg)
743 {
744         LIST_HEAD(to_destroy);
745         struct bdi_writeback *wb, *next;
746
747         spin_lock_irq(&cgwb_lock);
748         list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
749                 cgwb_kill(wb);
750         blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
751         spin_unlock_irq(&cgwb_lock);
752 }
753
754 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
755 {
756         struct rb_node *rbn;
757
758         spin_lock_irq(&cgwb_lock);
759         while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
760                 struct bdi_writeback_congested *congested =
761                         rb_entry(rbn, struct bdi_writeback_congested, rb_node);
762
763                 rb_erase(rbn, &bdi->cgwb_congested_tree);
764                 congested->__bdi = NULL;        /* mark @congested unlinked */
765         }
766         spin_unlock_irq(&cgwb_lock);
767 }
768
769 #else   /* CONFIG_CGROUP_WRITEBACK */
770
771 static int cgwb_bdi_init(struct backing_dev_info *bdi)
772 {
773         int err;
774
775         bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
776         if (!bdi->wb_congested)
777                 return -ENOMEM;
778
779         atomic_set(&bdi->wb_congested->refcnt, 1);
780
781         err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
782         if (err) {
783                 wb_congested_put(bdi->wb_congested);
784                 return err;
785         }
786         return 0;
787 }
788
789 static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
790
791 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
792 {
793         wb_congested_put(bdi->wb_congested);
794 }
795
796 #endif  /* CONFIG_CGROUP_WRITEBACK */
797
798 int bdi_init(struct backing_dev_info *bdi)
799 {
800         int ret;
801
802         bdi->dev = NULL;
803
804         kref_init(&bdi->refcnt);
805         bdi->min_ratio = 0;
806         bdi->max_ratio = 100;
807         bdi->max_prop_frac = FPROP_FRAC_BASE;
808         INIT_LIST_HEAD(&bdi->bdi_list);
809         INIT_LIST_HEAD(&bdi->wb_list);
810         init_waitqueue_head(&bdi->wb_waitq);
811
812         ret = cgwb_bdi_init(bdi);
813
814         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
815
816         return ret;
817 }
818 EXPORT_SYMBOL(bdi_init);
819
820 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
821 {
822         struct backing_dev_info *bdi;
823
824         bdi = kmalloc_node(sizeof(struct backing_dev_info),
825                            gfp_mask | __GFP_ZERO, node_id);
826         if (!bdi)
827                 return NULL;
828
829         if (bdi_init(bdi)) {
830                 kfree(bdi);
831                 return NULL;
832         }
833         return bdi;
834 }
835
836 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
837                 const char *fmt, ...)
838 {
839         va_list args;
840         struct device *dev;
841
842         if (bdi->dev)   /* The driver needs to use separate queues per device */
843                 return 0;
844
845         va_start(args, fmt);
846         dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
847         va_end(args);
848         if (IS_ERR(dev))
849                 return PTR_ERR(dev);
850
851         bdi->dev = dev;
852
853         bdi_debug_register(bdi, dev_name(dev));
854         set_bit(WB_registered, &bdi->wb.state);
855
856         spin_lock_bh(&bdi_lock);
857         list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
858         spin_unlock_bh(&bdi_lock);
859
860         trace_writeback_bdi_register(bdi);
861         return 0;
862 }
863 EXPORT_SYMBOL(bdi_register);
864
865 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
866 {
867         return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
868 }
869 EXPORT_SYMBOL(bdi_register_dev);
870
871 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
872 {
873         int rc;
874
875         rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt),
876                         MINOR(owner->devt));
877         if (rc)
878                 return rc;
879         /* Leaking owner reference... */
880         WARN_ON(bdi->owner);
881         bdi->owner = owner;
882         get_device(owner);
883         return 0;
884 }
885 EXPORT_SYMBOL(bdi_register_owner);
886
887 /*
888  * Remove bdi from bdi_list, and ensure that it is no longer visible
889  */
890 static void bdi_remove_from_list(struct backing_dev_info *bdi)
891 {
892         spin_lock_bh(&bdi_lock);
893         list_del_rcu(&bdi->bdi_list);
894         spin_unlock_bh(&bdi_lock);
895
896         synchronize_rcu_expedited();
897 }
898
899 void bdi_unregister(struct backing_dev_info *bdi)
900 {
901         /* make sure nobody finds us on the bdi_list anymore */
902         bdi_remove_from_list(bdi);
903         wb_shutdown(&bdi->wb);
904         cgwb_bdi_destroy(bdi);
905
906         if (bdi->dev) {
907                 bdi_debug_unregister(bdi);
908                 device_unregister(bdi->dev);
909                 bdi->dev = NULL;
910         }
911
912         if (bdi->owner) {
913                 put_device(bdi->owner);
914                 bdi->owner = NULL;
915         }
916 }
917
918 static void bdi_exit(struct backing_dev_info *bdi)
919 {
920         WARN_ON_ONCE(bdi->dev);
921         wb_exit(&bdi->wb);
922         cgwb_bdi_exit(bdi);
923 }
924
925 static void release_bdi(struct kref *ref)
926 {
927         struct backing_dev_info *bdi =
928                         container_of(ref, struct backing_dev_info, refcnt);
929
930         bdi_exit(bdi);
931         kfree(bdi);
932 }
933
934 void bdi_put(struct backing_dev_info *bdi)
935 {
936         kref_put(&bdi->refcnt, release_bdi);
937 }
938
939 void bdi_destroy(struct backing_dev_info *bdi)
940 {
941         bdi_unregister(bdi);
942         bdi_exit(bdi);
943 }
944 EXPORT_SYMBOL(bdi_destroy);
945
946 /*
947  * For use from filesystems to quickly init and register a bdi associated
948  * with dirty writeback
949  */
950 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
951 {
952         int err;
953
954         bdi->name = name;
955         bdi->capabilities = 0;
956         err = bdi_init(bdi);
957         if (err)
958                 return err;
959
960         err = bdi_register(bdi, NULL, "%.28s-%ld", name,
961                            atomic_long_inc_return(&bdi_seq));
962         if (err) {
963                 bdi_destroy(bdi);
964                 return err;
965         }
966
967         return 0;
968 }
969 EXPORT_SYMBOL(bdi_setup_and_register);
970
971 static wait_queue_head_t congestion_wqh[2] = {
972                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
973                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
974         };
975 static atomic_t nr_wb_congested[2];
976
977 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
978 {
979         wait_queue_head_t *wqh = &congestion_wqh[sync];
980         enum wb_congested_state bit;
981
982         bit = sync ? WB_sync_congested : WB_async_congested;
983         if (test_and_clear_bit(bit, &congested->state))
984                 atomic_dec(&nr_wb_congested[sync]);
985         smp_mb__after_atomic();
986         if (waitqueue_active(wqh))
987                 wake_up(wqh);
988 }
989 EXPORT_SYMBOL(clear_wb_congested);
990
991 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
992 {
993         enum wb_congested_state bit;
994
995         bit = sync ? WB_sync_congested : WB_async_congested;
996         if (!test_and_set_bit(bit, &congested->state))
997                 atomic_inc(&nr_wb_congested[sync]);
998 }
999 EXPORT_SYMBOL(set_wb_congested);
1000
1001 /**
1002  * congestion_wait - wait for a backing_dev to become uncongested
1003  * @sync: SYNC or ASYNC IO
1004  * @timeout: timeout in jiffies
1005  *
1006  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1007  * write congestion.  If no backing_devs are congested then just wait for the
1008  * next write to be completed.
1009  */
1010 long congestion_wait(int sync, long timeout)
1011 {
1012         long ret;
1013         unsigned long start = jiffies;
1014         DEFINE_WAIT(wait);
1015         wait_queue_head_t *wqh = &congestion_wqh[sync];
1016
1017         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1018         ret = io_schedule_timeout(timeout);
1019         finish_wait(wqh, &wait);
1020
1021         trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1022                                         jiffies_to_usecs(jiffies - start));
1023
1024         return ret;
1025 }
1026 EXPORT_SYMBOL(congestion_wait);
1027
1028 /**
1029  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1030  * @pgdat: A pgdat to check if it is heavily congested
1031  * @sync: SYNC or ASYNC IO
1032  * @timeout: timeout in jiffies
1033  *
1034  * In the event of a congested backing_dev (any backing_dev) and the given
1035  * @pgdat has experienced recent congestion, this waits for up to @timeout
1036  * jiffies for either a BDI to exit congestion of the given @sync queue
1037  * or a write to complete.
1038  *
1039  * In the absence of pgdat congestion, cond_resched() is called to yield
1040  * the processor if necessary but otherwise does not sleep.
1041  *
1042  * The return value is 0 if the sleep is for the full timeout. Otherwise,
1043  * it is the number of jiffies that were still remaining when the function
1044  * returned. return_value == timeout implies the function did not sleep.
1045  */
1046 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
1047 {
1048         long ret;
1049         unsigned long start = jiffies;
1050         DEFINE_WAIT(wait);
1051         wait_queue_head_t *wqh = &congestion_wqh[sync];
1052
1053         /*
1054          * If there is no congestion, or heavy congestion is not being
1055          * encountered in the current pgdat, yield if necessary instead
1056          * of sleeping on the congestion queue
1057          */
1058         if (atomic_read(&nr_wb_congested[sync]) == 0 ||
1059             !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
1060                 cond_resched();
1061
1062                 /* In case we scheduled, work out time remaining */
1063                 ret = timeout - (jiffies - start);
1064                 if (ret < 0)
1065                         ret = 0;
1066
1067                 goto out;
1068         }
1069
1070         /* Sleep until uncongested or a write happens */
1071         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1072         ret = io_schedule_timeout(timeout);
1073         finish_wait(wqh, &wait);
1074
1075 out:
1076         trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1077                                         jiffies_to_usecs(jiffies - start));
1078
1079         return ret;
1080 }
1081 EXPORT_SYMBOL(wait_iff_congested);
1082
1083 int pdflush_proc_obsolete(struct ctl_table *table, int write,
1084                         void __user *buffer, size_t *lenp, loff_t *ppos)
1085 {
1086         char kbuf[] = "0\n";
1087
1088         if (*ppos || *lenp < sizeof(kbuf)) {
1089                 *lenp = 0;
1090                 return 0;
1091         }
1092
1093         if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1094                 return -EFAULT;
1095         pr_warn_once("%s exported in /proc is scheduled for removal\n",
1096                      table->procname);
1097
1098         *lenp = 2;
1099         *ppos += *lenp;
1100         return 2;
1101 }