e3d56dba4da8d77136b2187f43bbfb83a028c1a5
[sfrench/cifs-2.6.git] / mm / backing-dev.c
1
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
14
15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16
17 struct backing_dev_info noop_backing_dev_info = {
18         .name           = "noop",
19         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
20 };
21 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
22
23 static struct class *bdi_class;
24
25 /*
26  * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
27  * locking.
28  */
29 DEFINE_SPINLOCK(bdi_lock);
30 LIST_HEAD(bdi_list);
31
32 /* bdi_wq serves all asynchronous writeback tasks */
33 struct workqueue_struct *bdi_wq;
34
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38
39 static struct dentry *bdi_debug_root;
40
41 static void bdi_debug_init(void)
42 {
43         bdi_debug_root = debugfs_create_dir("bdi", NULL);
44 }
45
46 static int bdi_debug_stats_show(struct seq_file *m, void *v)
47 {
48         struct backing_dev_info *bdi = m->private;
49         struct bdi_writeback *wb = &bdi->wb;
50         unsigned long background_thresh;
51         unsigned long dirty_thresh;
52         unsigned long wb_thresh;
53         unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
54         struct inode *inode;
55
56         nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
57         spin_lock(&wb->list_lock);
58         list_for_each_entry(inode, &wb->b_dirty, i_io_list)
59                 nr_dirty++;
60         list_for_each_entry(inode, &wb->b_io, i_io_list)
61                 nr_io++;
62         list_for_each_entry(inode, &wb->b_more_io, i_io_list)
63                 nr_more_io++;
64         list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
65                 if (inode->i_state & I_DIRTY_TIME)
66                         nr_dirty_time++;
67         spin_unlock(&wb->list_lock);
68
69         global_dirty_limits(&background_thresh, &dirty_thresh);
70         wb_thresh = wb_calc_thresh(wb, dirty_thresh);
71
72 #define K(x) ((x) << (PAGE_SHIFT - 10))
73         seq_printf(m,
74                    "BdiWriteback:       %10lu kB\n"
75                    "BdiReclaimable:     %10lu kB\n"
76                    "BdiDirtyThresh:     %10lu kB\n"
77                    "DirtyThresh:        %10lu kB\n"
78                    "BackgroundThresh:   %10lu kB\n"
79                    "BdiDirtied:         %10lu kB\n"
80                    "BdiWritten:         %10lu kB\n"
81                    "BdiWriteBandwidth:  %10lu kBps\n"
82                    "b_dirty:            %10lu\n"
83                    "b_io:               %10lu\n"
84                    "b_more_io:          %10lu\n"
85                    "b_dirty_time:       %10lu\n"
86                    "bdi_list:           %10u\n"
87                    "state:              %10lx\n",
88                    (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
89                    (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
90                    K(wb_thresh),
91                    K(dirty_thresh),
92                    K(background_thresh),
93                    (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
94                    (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
95                    (unsigned long) K(wb->write_bandwidth),
96                    nr_dirty,
97                    nr_io,
98                    nr_more_io,
99                    nr_dirty_time,
100                    !list_empty(&bdi->bdi_list), bdi->wb.state);
101 #undef K
102
103         return 0;
104 }
105
106 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
107 {
108         return single_open(file, bdi_debug_stats_show, inode->i_private);
109 }
110
111 static const struct file_operations bdi_debug_stats_fops = {
112         .open           = bdi_debug_stats_open,
113         .read           = seq_read,
114         .llseek         = seq_lseek,
115         .release        = single_release,
116 };
117
118 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
119 {
120         bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
121         bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
122                                                bdi, &bdi_debug_stats_fops);
123 }
124
125 static void bdi_debug_unregister(struct backing_dev_info *bdi)
126 {
127         debugfs_remove(bdi->debug_stats);
128         debugfs_remove(bdi->debug_dir);
129 }
130 #else
131 static inline void bdi_debug_init(void)
132 {
133 }
134 static inline void bdi_debug_register(struct backing_dev_info *bdi,
135                                       const char *name)
136 {
137 }
138 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
139 {
140 }
141 #endif
142
143 static ssize_t read_ahead_kb_store(struct device *dev,
144                                   struct device_attribute *attr,
145                                   const char *buf, size_t count)
146 {
147         struct backing_dev_info *bdi = dev_get_drvdata(dev);
148         unsigned long read_ahead_kb;
149         ssize_t ret;
150
151         ret = kstrtoul(buf, 10, &read_ahead_kb);
152         if (ret < 0)
153                 return ret;
154
155         bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
156
157         return count;
158 }
159
160 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
161
162 #define BDI_SHOW(name, expr)                                            \
163 static ssize_t name##_show(struct device *dev,                          \
164                            struct device_attribute *attr, char *page)   \
165 {                                                                       \
166         struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
167                                                                         \
168         return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
169 }                                                                       \
170 static DEVICE_ATTR_RW(name);
171
172 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
173
174 static ssize_t min_ratio_store(struct device *dev,
175                 struct device_attribute *attr, const char *buf, size_t count)
176 {
177         struct backing_dev_info *bdi = dev_get_drvdata(dev);
178         unsigned int ratio;
179         ssize_t ret;
180
181         ret = kstrtouint(buf, 10, &ratio);
182         if (ret < 0)
183                 return ret;
184
185         ret = bdi_set_min_ratio(bdi, ratio);
186         if (!ret)
187                 ret = count;
188
189         return ret;
190 }
191 BDI_SHOW(min_ratio, bdi->min_ratio)
192
193 static ssize_t max_ratio_store(struct device *dev,
194                 struct device_attribute *attr, const char *buf, size_t count)
195 {
196         struct backing_dev_info *bdi = dev_get_drvdata(dev);
197         unsigned int ratio;
198         ssize_t ret;
199
200         ret = kstrtouint(buf, 10, &ratio);
201         if (ret < 0)
202                 return ret;
203
204         ret = bdi_set_max_ratio(bdi, ratio);
205         if (!ret)
206                 ret = count;
207
208         return ret;
209 }
210 BDI_SHOW(max_ratio, bdi->max_ratio)
211
212 static ssize_t stable_pages_required_show(struct device *dev,
213                                           struct device_attribute *attr,
214                                           char *page)
215 {
216         struct backing_dev_info *bdi = dev_get_drvdata(dev);
217
218         return snprintf(page, PAGE_SIZE-1, "%d\n",
219                         bdi_cap_stable_pages_required(bdi) ? 1 : 0);
220 }
221 static DEVICE_ATTR_RO(stable_pages_required);
222
223 static struct attribute *bdi_dev_attrs[] = {
224         &dev_attr_read_ahead_kb.attr,
225         &dev_attr_min_ratio.attr,
226         &dev_attr_max_ratio.attr,
227         &dev_attr_stable_pages_required.attr,
228         NULL,
229 };
230 ATTRIBUTE_GROUPS(bdi_dev);
231
232 static __init int bdi_class_init(void)
233 {
234         bdi_class = class_create(THIS_MODULE, "bdi");
235         if (IS_ERR(bdi_class))
236                 return PTR_ERR(bdi_class);
237
238         bdi_class->dev_groups = bdi_dev_groups;
239         bdi_debug_init();
240
241         return 0;
242 }
243 postcore_initcall(bdi_class_init);
244
245 static int __init default_bdi_init(void)
246 {
247         int err;
248
249         bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
250                                               WQ_UNBOUND | WQ_SYSFS, 0);
251         if (!bdi_wq)
252                 return -ENOMEM;
253
254         err = bdi_init(&noop_backing_dev_info);
255
256         return err;
257 }
258 subsys_initcall(default_bdi_init);
259
260 /*
261  * This function is used when the first inode for this wb is marked dirty. It
262  * wakes-up the corresponding bdi thread which should then take care of the
263  * periodic background write-out of dirty inodes. Since the write-out would
264  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
265  * set up a timer which wakes the bdi thread up later.
266  *
267  * Note, we wouldn't bother setting up the timer, but this function is on the
268  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
269  * by delaying the wake-up.
270  *
271  * We have to be careful not to postpone flush work if it is scheduled for
272  * earlier. Thus we use queue_delayed_work().
273  */
274 void wb_wakeup_delayed(struct bdi_writeback *wb)
275 {
276         unsigned long timeout;
277
278         timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
279         spin_lock_bh(&wb->work_lock);
280         if (test_bit(WB_registered, &wb->state))
281                 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
282         spin_unlock_bh(&wb->work_lock);
283 }
284
285 /*
286  * Initial write bandwidth: 100 MB/s
287  */
288 #define INIT_BW         (100 << (20 - PAGE_SHIFT))
289
290 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
291                    int blkcg_id, gfp_t gfp)
292 {
293         int i, err;
294
295         memset(wb, 0, sizeof(*wb));
296
297         if (wb != &bdi->wb)
298                 bdi_get(bdi);
299         wb->bdi = bdi;
300         wb->last_old_flush = jiffies;
301         INIT_LIST_HEAD(&wb->b_dirty);
302         INIT_LIST_HEAD(&wb->b_io);
303         INIT_LIST_HEAD(&wb->b_more_io);
304         INIT_LIST_HEAD(&wb->b_dirty_time);
305         spin_lock_init(&wb->list_lock);
306
307         wb->bw_time_stamp = jiffies;
308         wb->balanced_dirty_ratelimit = INIT_BW;
309         wb->dirty_ratelimit = INIT_BW;
310         wb->write_bandwidth = INIT_BW;
311         wb->avg_write_bandwidth = INIT_BW;
312
313         spin_lock_init(&wb->work_lock);
314         INIT_LIST_HEAD(&wb->work_list);
315         INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
316         wb->dirty_sleep = jiffies;
317
318         wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
319         if (!wb->congested) {
320                 err = -ENOMEM;
321                 goto out_put_bdi;
322         }
323
324         err = fprop_local_init_percpu(&wb->completions, gfp);
325         if (err)
326                 goto out_put_cong;
327
328         for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
329                 err = percpu_counter_init(&wb->stat[i], 0, gfp);
330                 if (err)
331                         goto out_destroy_stat;
332         }
333
334         return 0;
335
336 out_destroy_stat:
337         while (i--)
338                 percpu_counter_destroy(&wb->stat[i]);
339         fprop_local_destroy_percpu(&wb->completions);
340 out_put_cong:
341         wb_congested_put(wb->congested);
342 out_put_bdi:
343         if (wb != &bdi->wb)
344                 bdi_put(bdi);
345         return err;
346 }
347
348 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
349
350 /*
351  * Remove bdi from the global list and shutdown any threads we have running
352  */
353 static void wb_shutdown(struct bdi_writeback *wb)
354 {
355         /* Make sure nobody queues further work */
356         spin_lock_bh(&wb->work_lock);
357         if (!test_and_clear_bit(WB_registered, &wb->state)) {
358                 spin_unlock_bh(&wb->work_lock);
359                 return;
360         }
361         spin_unlock_bh(&wb->work_lock);
362
363         cgwb_remove_from_bdi_list(wb);
364         /*
365          * Drain work list and shutdown the delayed_work.  !WB_registered
366          * tells wb_workfn() that @wb is dying and its work_list needs to
367          * be drained no matter what.
368          */
369         mod_delayed_work(bdi_wq, &wb->dwork, 0);
370         flush_delayed_work(&wb->dwork);
371         WARN_ON(!list_empty(&wb->work_list));
372 }
373
374 static void wb_exit(struct bdi_writeback *wb)
375 {
376         int i;
377
378         WARN_ON(delayed_work_pending(&wb->dwork));
379
380         for (i = 0; i < NR_WB_STAT_ITEMS; i++)
381                 percpu_counter_destroy(&wb->stat[i]);
382
383         fprop_local_destroy_percpu(&wb->completions);
384         wb_congested_put(wb->congested);
385         if (wb != &wb->bdi->wb)
386                 bdi_put(wb->bdi);
387 }
388
389 #ifdef CONFIG_CGROUP_WRITEBACK
390
391 #include <linux/memcontrol.h>
392
393 /*
394  * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
395  * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
396  * protected.  cgwb_release_wait is used to wait for the completion of cgwb
397  * releases from bdi destruction path.
398  */
399 static DEFINE_SPINLOCK(cgwb_lock);
400 static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait);
401
402 /**
403  * wb_congested_get_create - get or create a wb_congested
404  * @bdi: associated bdi
405  * @blkcg_id: ID of the associated blkcg
406  * @gfp: allocation mask
407  *
408  * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
409  * The returned wb_congested has its reference count incremented.  Returns
410  * NULL on failure.
411  */
412 struct bdi_writeback_congested *
413 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
414 {
415         struct bdi_writeback_congested *new_congested = NULL, *congested;
416         struct rb_node **node, *parent;
417         unsigned long flags;
418 retry:
419         spin_lock_irqsave(&cgwb_lock, flags);
420
421         node = &bdi->cgwb_congested_tree.rb_node;
422         parent = NULL;
423
424         while (*node != NULL) {
425                 parent = *node;
426                 congested = rb_entry(parent, struct bdi_writeback_congested,
427                                      rb_node);
428                 if (congested->blkcg_id < blkcg_id)
429                         node = &parent->rb_left;
430                 else if (congested->blkcg_id > blkcg_id)
431                         node = &parent->rb_right;
432                 else
433                         goto found;
434         }
435
436         if (new_congested) {
437                 /* !found and storage for new one already allocated, insert */
438                 congested = new_congested;
439                 new_congested = NULL;
440                 rb_link_node(&congested->rb_node, parent, node);
441                 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
442                 goto found;
443         }
444
445         spin_unlock_irqrestore(&cgwb_lock, flags);
446
447         /* allocate storage for new one and retry */
448         new_congested = kzalloc(sizeof(*new_congested), gfp);
449         if (!new_congested)
450                 return NULL;
451
452         atomic_set(&new_congested->refcnt, 0);
453         new_congested->__bdi = bdi;
454         new_congested->blkcg_id = blkcg_id;
455         goto retry;
456
457 found:
458         atomic_inc(&congested->refcnt);
459         spin_unlock_irqrestore(&cgwb_lock, flags);
460         kfree(new_congested);
461         return congested;
462 }
463
464 /**
465  * wb_congested_put - put a wb_congested
466  * @congested: wb_congested to put
467  *
468  * Put @congested and destroy it if the refcnt reaches zero.
469  */
470 void wb_congested_put(struct bdi_writeback_congested *congested)
471 {
472         unsigned long flags;
473
474         local_irq_save(flags);
475         if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
476                 local_irq_restore(flags);
477                 return;
478         }
479
480         /* bdi might already have been destroyed leaving @congested unlinked */
481         if (congested->__bdi) {
482                 rb_erase(&congested->rb_node,
483                          &congested->__bdi->cgwb_congested_tree);
484                 congested->__bdi = NULL;
485         }
486
487         spin_unlock_irqrestore(&cgwb_lock, flags);
488         kfree(congested);
489 }
490
491 static void cgwb_release_workfn(struct work_struct *work)
492 {
493         struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
494                                                 release_work);
495         struct backing_dev_info *bdi = wb->bdi;
496
497         wb_shutdown(wb);
498
499         css_put(wb->memcg_css);
500         css_put(wb->blkcg_css);
501
502         fprop_local_destroy_percpu(&wb->memcg_completions);
503         percpu_ref_exit(&wb->refcnt);
504         wb_exit(wb);
505         kfree_rcu(wb, rcu);
506
507         if (atomic_dec_and_test(&bdi->usage_cnt))
508                 wake_up_all(&cgwb_release_wait);
509 }
510
511 static void cgwb_release(struct percpu_ref *refcnt)
512 {
513         struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
514                                                 refcnt);
515         schedule_work(&wb->release_work);
516 }
517
518 static void cgwb_kill(struct bdi_writeback *wb)
519 {
520         lockdep_assert_held(&cgwb_lock);
521
522         WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
523         list_del(&wb->memcg_node);
524         list_del(&wb->blkcg_node);
525         percpu_ref_kill(&wb->refcnt);
526 }
527
528 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
529 {
530         spin_lock_irq(&cgwb_lock);
531         list_del_rcu(&wb->bdi_node);
532         spin_unlock_irq(&cgwb_lock);
533 }
534
535 static int cgwb_create(struct backing_dev_info *bdi,
536                        struct cgroup_subsys_state *memcg_css, gfp_t gfp)
537 {
538         struct mem_cgroup *memcg;
539         struct cgroup_subsys_state *blkcg_css;
540         struct blkcg *blkcg;
541         struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
542         struct bdi_writeback *wb;
543         unsigned long flags;
544         int ret = 0;
545
546         memcg = mem_cgroup_from_css(memcg_css);
547         blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
548         blkcg = css_to_blkcg(blkcg_css);
549         memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
550         blkcg_cgwb_list = &blkcg->cgwb_list;
551
552         /* look up again under lock and discard on blkcg mismatch */
553         spin_lock_irqsave(&cgwb_lock, flags);
554         wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
555         if (wb && wb->blkcg_css != blkcg_css) {
556                 cgwb_kill(wb);
557                 wb = NULL;
558         }
559         spin_unlock_irqrestore(&cgwb_lock, flags);
560         if (wb)
561                 goto out_put;
562
563         /* need to create a new one */
564         wb = kmalloc(sizeof(*wb), gfp);
565         if (!wb)
566                 return -ENOMEM;
567
568         ret = wb_init(wb, bdi, blkcg_css->id, gfp);
569         if (ret)
570                 goto err_free;
571
572         ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
573         if (ret)
574                 goto err_wb_exit;
575
576         ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
577         if (ret)
578                 goto err_ref_exit;
579
580         wb->memcg_css = memcg_css;
581         wb->blkcg_css = blkcg_css;
582         INIT_WORK(&wb->release_work, cgwb_release_workfn);
583         set_bit(WB_registered, &wb->state);
584
585         /*
586          * The root wb determines the registered state of the whole bdi and
587          * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
588          * whether they're still online.  Don't link @wb if any is dead.
589          * See wb_memcg_offline() and wb_blkcg_offline().
590          */
591         ret = -ENODEV;
592         spin_lock_irqsave(&cgwb_lock, flags);
593         if (test_bit(WB_registered, &bdi->wb.state) &&
594             blkcg_cgwb_list->next && memcg_cgwb_list->next) {
595                 /* we might have raced another instance of this function */
596                 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
597                 if (!ret) {
598                         atomic_inc(&bdi->usage_cnt);
599                         list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
600                         list_add(&wb->memcg_node, memcg_cgwb_list);
601                         list_add(&wb->blkcg_node, blkcg_cgwb_list);
602                         css_get(memcg_css);
603                         css_get(blkcg_css);
604                 }
605         }
606         spin_unlock_irqrestore(&cgwb_lock, flags);
607         if (ret) {
608                 if (ret == -EEXIST)
609                         ret = 0;
610                 goto err_fprop_exit;
611         }
612         goto out_put;
613
614 err_fprop_exit:
615         fprop_local_destroy_percpu(&wb->memcg_completions);
616 err_ref_exit:
617         percpu_ref_exit(&wb->refcnt);
618 err_wb_exit:
619         wb_exit(wb);
620 err_free:
621         kfree(wb);
622 out_put:
623         css_put(blkcg_css);
624         return ret;
625 }
626
627 /**
628  * wb_get_create - get wb for a given memcg, create if necessary
629  * @bdi: target bdi
630  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
631  * @gfp: allocation mask to use
632  *
633  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
634  * create one.  The returned wb has its refcount incremented.
635  *
636  * This function uses css_get() on @memcg_css and thus expects its refcnt
637  * to be positive on invocation.  IOW, rcu_read_lock() protection on
638  * @memcg_css isn't enough.  try_get it before calling this function.
639  *
640  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
641  * memcg on the default hierarchy, memcg association is guaranteed to be
642  * more specific (equal or descendant to the associated blkcg) and thus can
643  * identify both the memcg and blkcg associations.
644  *
645  * Because the blkcg associated with a memcg may change as blkcg is enabled
646  * and disabled closer to root in the hierarchy, each wb keeps track of
647  * both the memcg and blkcg associated with it and verifies the blkcg on
648  * each lookup.  On mismatch, the existing wb is discarded and a new one is
649  * created.
650  */
651 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
652                                     struct cgroup_subsys_state *memcg_css,
653                                     gfp_t gfp)
654 {
655         struct bdi_writeback *wb;
656
657         might_sleep_if(gfpflags_allow_blocking(gfp));
658
659         if (!memcg_css->parent)
660                 return &bdi->wb;
661
662         do {
663                 rcu_read_lock();
664                 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
665                 if (wb) {
666                         struct cgroup_subsys_state *blkcg_css;
667
668                         /* see whether the blkcg association has changed */
669                         blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
670                                                      &io_cgrp_subsys);
671                         if (unlikely(wb->blkcg_css != blkcg_css ||
672                                      !wb_tryget(wb)))
673                                 wb = NULL;
674                         css_put(blkcg_css);
675                 }
676                 rcu_read_unlock();
677         } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
678
679         return wb;
680 }
681
682 static int cgwb_bdi_init(struct backing_dev_info *bdi)
683 {
684         int ret;
685
686         INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
687         bdi->cgwb_congested_tree = RB_ROOT;
688         atomic_set(&bdi->usage_cnt, 1);
689
690         ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
691         if (!ret) {
692                 bdi->wb.memcg_css = &root_mem_cgroup->css;
693                 bdi->wb.blkcg_css = blkcg_root_css;
694         }
695         return ret;
696 }
697
698 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
699 {
700         struct radix_tree_iter iter;
701         void **slot;
702
703         WARN_ON(test_bit(WB_registered, &bdi->wb.state));
704
705         spin_lock_irq(&cgwb_lock);
706         radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
707                 cgwb_kill(*slot);
708         spin_unlock_irq(&cgwb_lock);
709
710         /*
711          * All cgwb's must be shutdown and released before returning.  Drain
712          * the usage counter to wait for all cgwb's ever created on @bdi.
713          */
714         atomic_dec(&bdi->usage_cnt);
715         wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
716         /*
717          * Grab back our reference so that we hold it when @bdi gets
718          * re-registered.
719          */
720         atomic_inc(&bdi->usage_cnt);
721 }
722
723 /**
724  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
725  * @memcg: memcg being offlined
726  *
727  * Also prevents creation of any new wb's associated with @memcg.
728  */
729 void wb_memcg_offline(struct mem_cgroup *memcg)
730 {
731         LIST_HEAD(to_destroy);
732         struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
733         struct bdi_writeback *wb, *next;
734
735         spin_lock_irq(&cgwb_lock);
736         list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
737                 cgwb_kill(wb);
738         memcg_cgwb_list->next = NULL;   /* prevent new wb's */
739         spin_unlock_irq(&cgwb_lock);
740 }
741
742 /**
743  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
744  * @blkcg: blkcg being offlined
745  *
746  * Also prevents creation of any new wb's associated with @blkcg.
747  */
748 void wb_blkcg_offline(struct blkcg *blkcg)
749 {
750         LIST_HEAD(to_destroy);
751         struct bdi_writeback *wb, *next;
752
753         spin_lock_irq(&cgwb_lock);
754         list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
755                 cgwb_kill(wb);
756         blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
757         spin_unlock_irq(&cgwb_lock);
758 }
759
760 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
761 {
762         struct rb_node *rbn;
763
764         spin_lock_irq(&cgwb_lock);
765         while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
766                 struct bdi_writeback_congested *congested =
767                         rb_entry(rbn, struct bdi_writeback_congested, rb_node);
768
769                 rb_erase(rbn, &bdi->cgwb_congested_tree);
770                 congested->__bdi = NULL;        /* mark @congested unlinked */
771         }
772         spin_unlock_irq(&cgwb_lock);
773 }
774
775 static void cgwb_bdi_register(struct backing_dev_info *bdi)
776 {
777         spin_lock_irq(&cgwb_lock);
778         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
779         spin_unlock_irq(&cgwb_lock);
780 }
781
782 #else   /* CONFIG_CGROUP_WRITEBACK */
783
784 static int cgwb_bdi_init(struct backing_dev_info *bdi)
785 {
786         int err;
787
788         bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
789         if (!bdi->wb_congested)
790                 return -ENOMEM;
791
792         atomic_set(&bdi->wb_congested->refcnt, 1);
793
794         err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
795         if (err) {
796                 wb_congested_put(bdi->wb_congested);
797                 return err;
798         }
799         return 0;
800 }
801
802 static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
803
804 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
805 {
806         wb_congested_put(bdi->wb_congested);
807 }
808
809 static void cgwb_bdi_register(struct backing_dev_info *bdi)
810 {
811         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
812 }
813
814 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
815 {
816         list_del_rcu(&wb->bdi_node);
817 }
818
819 #endif  /* CONFIG_CGROUP_WRITEBACK */
820
821 int bdi_init(struct backing_dev_info *bdi)
822 {
823         int ret;
824
825         bdi->dev = NULL;
826
827         kref_init(&bdi->refcnt);
828         bdi->min_ratio = 0;
829         bdi->max_ratio = 100;
830         bdi->max_prop_frac = FPROP_FRAC_BASE;
831         INIT_LIST_HEAD(&bdi->bdi_list);
832         INIT_LIST_HEAD(&bdi->wb_list);
833         init_waitqueue_head(&bdi->wb_waitq);
834
835         ret = cgwb_bdi_init(bdi);
836
837         return ret;
838 }
839 EXPORT_SYMBOL(bdi_init);
840
841 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
842 {
843         struct backing_dev_info *bdi;
844
845         bdi = kmalloc_node(sizeof(struct backing_dev_info),
846                            gfp_mask | __GFP_ZERO, node_id);
847         if (!bdi)
848                 return NULL;
849
850         if (bdi_init(bdi)) {
851                 kfree(bdi);
852                 return NULL;
853         }
854         return bdi;
855 }
856
857 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
858                 const char *fmt, ...)
859 {
860         va_list args;
861         struct device *dev;
862
863         if (bdi->dev)   /* The driver needs to use separate queues per device */
864                 return 0;
865
866         va_start(args, fmt);
867         dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
868         va_end(args);
869         if (IS_ERR(dev))
870                 return PTR_ERR(dev);
871
872         cgwb_bdi_register(bdi);
873         bdi->dev = dev;
874
875         bdi_debug_register(bdi, dev_name(dev));
876         set_bit(WB_registered, &bdi->wb.state);
877
878         spin_lock_bh(&bdi_lock);
879         list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
880         spin_unlock_bh(&bdi_lock);
881
882         trace_writeback_bdi_register(bdi);
883         return 0;
884 }
885 EXPORT_SYMBOL(bdi_register);
886
887 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
888 {
889         return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
890 }
891 EXPORT_SYMBOL(bdi_register_dev);
892
893 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
894 {
895         int rc;
896
897         rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt),
898                         MINOR(owner->devt));
899         if (rc)
900                 return rc;
901         /* Leaking owner reference... */
902         WARN_ON(bdi->owner);
903         bdi->owner = owner;
904         get_device(owner);
905         return 0;
906 }
907 EXPORT_SYMBOL(bdi_register_owner);
908
909 /*
910  * Remove bdi from bdi_list, and ensure that it is no longer visible
911  */
912 static void bdi_remove_from_list(struct backing_dev_info *bdi)
913 {
914         spin_lock_bh(&bdi_lock);
915         list_del_rcu(&bdi->bdi_list);
916         spin_unlock_bh(&bdi_lock);
917
918         synchronize_rcu_expedited();
919 }
920
921 void bdi_unregister(struct backing_dev_info *bdi)
922 {
923         /* make sure nobody finds us on the bdi_list anymore */
924         bdi_remove_from_list(bdi);
925         wb_shutdown(&bdi->wb);
926         cgwb_bdi_destroy(bdi);
927
928         if (bdi->dev) {
929                 bdi_debug_unregister(bdi);
930                 device_unregister(bdi->dev);
931                 bdi->dev = NULL;
932         }
933
934         if (bdi->owner) {
935                 put_device(bdi->owner);
936                 bdi->owner = NULL;
937         }
938 }
939
940 static void bdi_exit(struct backing_dev_info *bdi)
941 {
942         WARN_ON_ONCE(bdi->dev);
943         wb_exit(&bdi->wb);
944         cgwb_bdi_exit(bdi);
945 }
946
947 static void release_bdi(struct kref *ref)
948 {
949         struct backing_dev_info *bdi =
950                         container_of(ref, struct backing_dev_info, refcnt);
951
952         bdi_exit(bdi);
953         kfree(bdi);
954 }
955
956 void bdi_put(struct backing_dev_info *bdi)
957 {
958         kref_put(&bdi->refcnt, release_bdi);
959 }
960
961 void bdi_destroy(struct backing_dev_info *bdi)
962 {
963         bdi_unregister(bdi);
964         bdi_exit(bdi);
965 }
966 EXPORT_SYMBOL(bdi_destroy);
967
968 /*
969  * For use from filesystems to quickly init and register a bdi associated
970  * with dirty writeback
971  */
972 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
973 {
974         int err;
975
976         bdi->name = name;
977         bdi->capabilities = 0;
978         err = bdi_init(bdi);
979         if (err)
980                 return err;
981
982         err = bdi_register(bdi, NULL, "%.28s-%ld", name,
983                            atomic_long_inc_return(&bdi_seq));
984         if (err) {
985                 bdi_destroy(bdi);
986                 return err;
987         }
988
989         return 0;
990 }
991 EXPORT_SYMBOL(bdi_setup_and_register);
992
993 static wait_queue_head_t congestion_wqh[2] = {
994                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
995                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
996         };
997 static atomic_t nr_wb_congested[2];
998
999 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
1000 {
1001         wait_queue_head_t *wqh = &congestion_wqh[sync];
1002         enum wb_congested_state bit;
1003
1004         bit = sync ? WB_sync_congested : WB_async_congested;
1005         if (test_and_clear_bit(bit, &congested->state))
1006                 atomic_dec(&nr_wb_congested[sync]);
1007         smp_mb__after_atomic();
1008         if (waitqueue_active(wqh))
1009                 wake_up(wqh);
1010 }
1011 EXPORT_SYMBOL(clear_wb_congested);
1012
1013 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
1014 {
1015         enum wb_congested_state bit;
1016
1017         bit = sync ? WB_sync_congested : WB_async_congested;
1018         if (!test_and_set_bit(bit, &congested->state))
1019                 atomic_inc(&nr_wb_congested[sync]);
1020 }
1021 EXPORT_SYMBOL(set_wb_congested);
1022
1023 /**
1024  * congestion_wait - wait for a backing_dev to become uncongested
1025  * @sync: SYNC or ASYNC IO
1026  * @timeout: timeout in jiffies
1027  *
1028  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1029  * write congestion.  If no backing_devs are congested then just wait for the
1030  * next write to be completed.
1031  */
1032 long congestion_wait(int sync, long timeout)
1033 {
1034         long ret;
1035         unsigned long start = jiffies;
1036         DEFINE_WAIT(wait);
1037         wait_queue_head_t *wqh = &congestion_wqh[sync];
1038
1039         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1040         ret = io_schedule_timeout(timeout);
1041         finish_wait(wqh, &wait);
1042
1043         trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1044                                         jiffies_to_usecs(jiffies - start));
1045
1046         return ret;
1047 }
1048 EXPORT_SYMBOL(congestion_wait);
1049
1050 /**
1051  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1052  * @pgdat: A pgdat to check if it is heavily congested
1053  * @sync: SYNC or ASYNC IO
1054  * @timeout: timeout in jiffies
1055  *
1056  * In the event of a congested backing_dev (any backing_dev) and the given
1057  * @pgdat has experienced recent congestion, this waits for up to @timeout
1058  * jiffies for either a BDI to exit congestion of the given @sync queue
1059  * or a write to complete.
1060  *
1061  * In the absence of pgdat congestion, cond_resched() is called to yield
1062  * the processor if necessary but otherwise does not sleep.
1063  *
1064  * The return value is 0 if the sleep is for the full timeout. Otherwise,
1065  * it is the number of jiffies that were still remaining when the function
1066  * returned. return_value == timeout implies the function did not sleep.
1067  */
1068 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
1069 {
1070         long ret;
1071         unsigned long start = jiffies;
1072         DEFINE_WAIT(wait);
1073         wait_queue_head_t *wqh = &congestion_wqh[sync];
1074
1075         /*
1076          * If there is no congestion, or heavy congestion is not being
1077          * encountered in the current pgdat, yield if necessary instead
1078          * of sleeping on the congestion queue
1079          */
1080         if (atomic_read(&nr_wb_congested[sync]) == 0 ||
1081             !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
1082                 cond_resched();
1083
1084                 /* In case we scheduled, work out time remaining */
1085                 ret = timeout - (jiffies - start);
1086                 if (ret < 0)
1087                         ret = 0;
1088
1089                 goto out;
1090         }
1091
1092         /* Sleep until uncongested or a write happens */
1093         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1094         ret = io_schedule_timeout(timeout);
1095         finish_wait(wqh, &wait);
1096
1097 out:
1098         trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1099                                         jiffies_to_usecs(jiffies - start));
1100
1101         return ret;
1102 }
1103 EXPORT_SYMBOL(wait_iff_congested);
1104
1105 int pdflush_proc_obsolete(struct ctl_table *table, int write,
1106                         void __user *buffer, size_t *lenp, loff_t *ppos)
1107 {
1108         char kbuf[] = "0\n";
1109
1110         if (*ppos || *lenp < sizeof(kbuf)) {
1111                 *lenp = 0;
1112                 return 0;
1113         }
1114
1115         if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1116                 return -EFAULT;
1117         pr_warn_once("%s exported in /proc is scheduled for removal\n",
1118                      table->procname);
1119
1120         *lenp = 2;
1121         *ppos += *lenp;
1122         return 2;
1123 }