mtd: spi-nor: Kill check with no effect
[sfrench/cifs-2.6.git] / mm / backing-dev.c
1
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
14
15 struct backing_dev_info noop_backing_dev_info = {
16         .name           = "noop",
17         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
18 };
19 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
20
21 static struct class *bdi_class;
22
23 /*
24  * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
25  * locking.
26  */
27 DEFINE_SPINLOCK(bdi_lock);
28 LIST_HEAD(bdi_list);
29
30 /* bdi_wq serves all asynchronous writeback tasks */
31 struct workqueue_struct *bdi_wq;
32
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36
37 static struct dentry *bdi_debug_root;
38
39 static void bdi_debug_init(void)
40 {
41         bdi_debug_root = debugfs_create_dir("bdi", NULL);
42 }
43
44 static int bdi_debug_stats_show(struct seq_file *m, void *v)
45 {
46         struct backing_dev_info *bdi = m->private;
47         struct bdi_writeback *wb = &bdi->wb;
48         unsigned long background_thresh;
49         unsigned long dirty_thresh;
50         unsigned long wb_thresh;
51         unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
52         struct inode *inode;
53
54         nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
55         spin_lock(&wb->list_lock);
56         list_for_each_entry(inode, &wb->b_dirty, i_io_list)
57                 nr_dirty++;
58         list_for_each_entry(inode, &wb->b_io, i_io_list)
59                 nr_io++;
60         list_for_each_entry(inode, &wb->b_more_io, i_io_list)
61                 nr_more_io++;
62         list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
63                 if (inode->i_state & I_DIRTY_TIME)
64                         nr_dirty_time++;
65         spin_unlock(&wb->list_lock);
66
67         global_dirty_limits(&background_thresh, &dirty_thresh);
68         wb_thresh = wb_calc_thresh(wb, dirty_thresh);
69
70 #define K(x) ((x) << (PAGE_SHIFT - 10))
71         seq_printf(m,
72                    "BdiWriteback:       %10lu kB\n"
73                    "BdiReclaimable:     %10lu kB\n"
74                    "BdiDirtyThresh:     %10lu kB\n"
75                    "DirtyThresh:        %10lu kB\n"
76                    "BackgroundThresh:   %10lu kB\n"
77                    "BdiDirtied:         %10lu kB\n"
78                    "BdiWritten:         %10lu kB\n"
79                    "BdiWriteBandwidth:  %10lu kBps\n"
80                    "b_dirty:            %10lu\n"
81                    "b_io:               %10lu\n"
82                    "b_more_io:          %10lu\n"
83                    "b_dirty_time:       %10lu\n"
84                    "bdi_list:           %10u\n"
85                    "state:              %10lx\n",
86                    (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
87                    (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
88                    K(wb_thresh),
89                    K(dirty_thresh),
90                    K(background_thresh),
91                    (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
92                    (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
93                    (unsigned long) K(wb->write_bandwidth),
94                    nr_dirty,
95                    nr_io,
96                    nr_more_io,
97                    nr_dirty_time,
98                    !list_empty(&bdi->bdi_list), bdi->wb.state);
99 #undef K
100
101         return 0;
102 }
103
104 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
105 {
106         return single_open(file, bdi_debug_stats_show, inode->i_private);
107 }
108
109 static const struct file_operations bdi_debug_stats_fops = {
110         .open           = bdi_debug_stats_open,
111         .read           = seq_read,
112         .llseek         = seq_lseek,
113         .release        = single_release,
114 };
115
116 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
117 {
118         bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
119         bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
120                                                bdi, &bdi_debug_stats_fops);
121 }
122
123 static void bdi_debug_unregister(struct backing_dev_info *bdi)
124 {
125         debugfs_remove(bdi->debug_stats);
126         debugfs_remove(bdi->debug_dir);
127 }
128 #else
129 static inline void bdi_debug_init(void)
130 {
131 }
132 static inline void bdi_debug_register(struct backing_dev_info *bdi,
133                                       const char *name)
134 {
135 }
136 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
137 {
138 }
139 #endif
140
141 static ssize_t read_ahead_kb_store(struct device *dev,
142                                   struct device_attribute *attr,
143                                   const char *buf, size_t count)
144 {
145         struct backing_dev_info *bdi = dev_get_drvdata(dev);
146         unsigned long read_ahead_kb;
147         ssize_t ret;
148
149         ret = kstrtoul(buf, 10, &read_ahead_kb);
150         if (ret < 0)
151                 return ret;
152
153         bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
154
155         return count;
156 }
157
158 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
159
160 #define BDI_SHOW(name, expr)                                            \
161 static ssize_t name##_show(struct device *dev,                          \
162                            struct device_attribute *attr, char *page)   \
163 {                                                                       \
164         struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
165                                                                         \
166         return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
167 }                                                                       \
168 static DEVICE_ATTR_RW(name);
169
170 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
171
172 static ssize_t min_ratio_store(struct device *dev,
173                 struct device_attribute *attr, const char *buf, size_t count)
174 {
175         struct backing_dev_info *bdi = dev_get_drvdata(dev);
176         unsigned int ratio;
177         ssize_t ret;
178
179         ret = kstrtouint(buf, 10, &ratio);
180         if (ret < 0)
181                 return ret;
182
183         ret = bdi_set_min_ratio(bdi, ratio);
184         if (!ret)
185                 ret = count;
186
187         return ret;
188 }
189 BDI_SHOW(min_ratio, bdi->min_ratio)
190
191 static ssize_t max_ratio_store(struct device *dev,
192                 struct device_attribute *attr, const char *buf, size_t count)
193 {
194         struct backing_dev_info *bdi = dev_get_drvdata(dev);
195         unsigned int ratio;
196         ssize_t ret;
197
198         ret = kstrtouint(buf, 10, &ratio);
199         if (ret < 0)
200                 return ret;
201
202         ret = bdi_set_max_ratio(bdi, ratio);
203         if (!ret)
204                 ret = count;
205
206         return ret;
207 }
208 BDI_SHOW(max_ratio, bdi->max_ratio)
209
210 static ssize_t stable_pages_required_show(struct device *dev,
211                                           struct device_attribute *attr,
212                                           char *page)
213 {
214         struct backing_dev_info *bdi = dev_get_drvdata(dev);
215
216         return snprintf(page, PAGE_SIZE-1, "%d\n",
217                         bdi_cap_stable_pages_required(bdi) ? 1 : 0);
218 }
219 static DEVICE_ATTR_RO(stable_pages_required);
220
221 static struct attribute *bdi_dev_attrs[] = {
222         &dev_attr_read_ahead_kb.attr,
223         &dev_attr_min_ratio.attr,
224         &dev_attr_max_ratio.attr,
225         &dev_attr_stable_pages_required.attr,
226         NULL,
227 };
228 ATTRIBUTE_GROUPS(bdi_dev);
229
230 static __init int bdi_class_init(void)
231 {
232         bdi_class = class_create(THIS_MODULE, "bdi");
233         if (IS_ERR(bdi_class))
234                 return PTR_ERR(bdi_class);
235
236         bdi_class->dev_groups = bdi_dev_groups;
237         bdi_debug_init();
238
239         return 0;
240 }
241 postcore_initcall(bdi_class_init);
242
243 static int bdi_init(struct backing_dev_info *bdi);
244
245 static int __init default_bdi_init(void)
246 {
247         int err;
248
249         bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
250                                               WQ_UNBOUND | WQ_SYSFS, 0);
251         if (!bdi_wq)
252                 return -ENOMEM;
253
254         err = bdi_init(&noop_backing_dev_info);
255
256         return err;
257 }
258 subsys_initcall(default_bdi_init);
259
260 /*
261  * This function is used when the first inode for this wb is marked dirty. It
262  * wakes-up the corresponding bdi thread which should then take care of the
263  * periodic background write-out of dirty inodes. Since the write-out would
264  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
265  * set up a timer which wakes the bdi thread up later.
266  *
267  * Note, we wouldn't bother setting up the timer, but this function is on the
268  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
269  * by delaying the wake-up.
270  *
271  * We have to be careful not to postpone flush work if it is scheduled for
272  * earlier. Thus we use queue_delayed_work().
273  */
274 void wb_wakeup_delayed(struct bdi_writeback *wb)
275 {
276         unsigned long timeout;
277
278         timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
279         spin_lock_bh(&wb->work_lock);
280         if (test_bit(WB_registered, &wb->state))
281                 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
282         spin_unlock_bh(&wb->work_lock);
283 }
284
285 /*
286  * Initial write bandwidth: 100 MB/s
287  */
288 #define INIT_BW         (100 << (20 - PAGE_SHIFT))
289
290 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
291                    int blkcg_id, gfp_t gfp)
292 {
293         int i, err;
294
295         memset(wb, 0, sizeof(*wb));
296
297         if (wb != &bdi->wb)
298                 bdi_get(bdi);
299         wb->bdi = bdi;
300         wb->last_old_flush = jiffies;
301         INIT_LIST_HEAD(&wb->b_dirty);
302         INIT_LIST_HEAD(&wb->b_io);
303         INIT_LIST_HEAD(&wb->b_more_io);
304         INIT_LIST_HEAD(&wb->b_dirty_time);
305         spin_lock_init(&wb->list_lock);
306
307         wb->bw_time_stamp = jiffies;
308         wb->balanced_dirty_ratelimit = INIT_BW;
309         wb->dirty_ratelimit = INIT_BW;
310         wb->write_bandwidth = INIT_BW;
311         wb->avg_write_bandwidth = INIT_BW;
312
313         spin_lock_init(&wb->work_lock);
314         INIT_LIST_HEAD(&wb->work_list);
315         INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
316         wb->dirty_sleep = jiffies;
317
318         wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
319         if (!wb->congested) {
320                 err = -ENOMEM;
321                 goto out_put_bdi;
322         }
323
324         err = fprop_local_init_percpu(&wb->completions, gfp);
325         if (err)
326                 goto out_put_cong;
327
328         for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
329                 err = percpu_counter_init(&wb->stat[i], 0, gfp);
330                 if (err)
331                         goto out_destroy_stat;
332         }
333
334         return 0;
335
336 out_destroy_stat:
337         while (i--)
338                 percpu_counter_destroy(&wb->stat[i]);
339         fprop_local_destroy_percpu(&wb->completions);
340 out_put_cong:
341         wb_congested_put(wb->congested);
342 out_put_bdi:
343         if (wb != &bdi->wb)
344                 bdi_put(bdi);
345         return err;
346 }
347
348 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
349
350 /*
351  * Remove bdi from the global list and shutdown any threads we have running
352  */
353 static void wb_shutdown(struct bdi_writeback *wb)
354 {
355         /* Make sure nobody queues further work */
356         spin_lock_bh(&wb->work_lock);
357         if (!test_and_clear_bit(WB_registered, &wb->state)) {
358                 spin_unlock_bh(&wb->work_lock);
359                 /*
360                  * Wait for wb shutdown to finish if someone else is just
361                  * running wb_shutdown(). Otherwise we could proceed to wb /
362                  * bdi destruction before wb_shutdown() is finished.
363                  */
364                 wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
365                 return;
366         }
367         set_bit(WB_shutting_down, &wb->state);
368         spin_unlock_bh(&wb->work_lock);
369
370         cgwb_remove_from_bdi_list(wb);
371         /*
372          * Drain work list and shutdown the delayed_work.  !WB_registered
373          * tells wb_workfn() that @wb is dying and its work_list needs to
374          * be drained no matter what.
375          */
376         mod_delayed_work(bdi_wq, &wb->dwork, 0);
377         flush_delayed_work(&wb->dwork);
378         WARN_ON(!list_empty(&wb->work_list));
379         /*
380          * Make sure bit gets cleared after shutdown is finished. Matches with
381          * the barrier provided by test_and_clear_bit() above.
382          */
383         smp_wmb();
384         clear_bit(WB_shutting_down, &wb->state);
385 }
386
387 static void wb_exit(struct bdi_writeback *wb)
388 {
389         int i;
390
391         WARN_ON(delayed_work_pending(&wb->dwork));
392
393         for (i = 0; i < NR_WB_STAT_ITEMS; i++)
394                 percpu_counter_destroy(&wb->stat[i]);
395
396         fprop_local_destroy_percpu(&wb->completions);
397         wb_congested_put(wb->congested);
398         if (wb != &wb->bdi->wb)
399                 bdi_put(wb->bdi);
400 }
401
402 #ifdef CONFIG_CGROUP_WRITEBACK
403
404 #include <linux/memcontrol.h>
405
406 /*
407  * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
408  * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
409  * protected.
410  */
411 static DEFINE_SPINLOCK(cgwb_lock);
412
413 /**
414  * wb_congested_get_create - get or create a wb_congested
415  * @bdi: associated bdi
416  * @blkcg_id: ID of the associated blkcg
417  * @gfp: allocation mask
418  *
419  * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
420  * The returned wb_congested has its reference count incremented.  Returns
421  * NULL on failure.
422  */
423 struct bdi_writeback_congested *
424 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
425 {
426         struct bdi_writeback_congested *new_congested = NULL, *congested;
427         struct rb_node **node, *parent;
428         unsigned long flags;
429 retry:
430         spin_lock_irqsave(&cgwb_lock, flags);
431
432         node = &bdi->cgwb_congested_tree.rb_node;
433         parent = NULL;
434
435         while (*node != NULL) {
436                 parent = *node;
437                 congested = rb_entry(parent, struct bdi_writeback_congested,
438                                      rb_node);
439                 if (congested->blkcg_id < blkcg_id)
440                         node = &parent->rb_left;
441                 else if (congested->blkcg_id > blkcg_id)
442                         node = &parent->rb_right;
443                 else
444                         goto found;
445         }
446
447         if (new_congested) {
448                 /* !found and storage for new one already allocated, insert */
449                 congested = new_congested;
450                 new_congested = NULL;
451                 rb_link_node(&congested->rb_node, parent, node);
452                 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
453                 goto found;
454         }
455
456         spin_unlock_irqrestore(&cgwb_lock, flags);
457
458         /* allocate storage for new one and retry */
459         new_congested = kzalloc(sizeof(*new_congested), gfp);
460         if (!new_congested)
461                 return NULL;
462
463         atomic_set(&new_congested->refcnt, 0);
464         new_congested->__bdi = bdi;
465         new_congested->blkcg_id = blkcg_id;
466         goto retry;
467
468 found:
469         atomic_inc(&congested->refcnt);
470         spin_unlock_irqrestore(&cgwb_lock, flags);
471         kfree(new_congested);
472         return congested;
473 }
474
475 /**
476  * wb_congested_put - put a wb_congested
477  * @congested: wb_congested to put
478  *
479  * Put @congested and destroy it if the refcnt reaches zero.
480  */
481 void wb_congested_put(struct bdi_writeback_congested *congested)
482 {
483         unsigned long flags;
484
485         local_irq_save(flags);
486         if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
487                 local_irq_restore(flags);
488                 return;
489         }
490
491         /* bdi might already have been destroyed leaving @congested unlinked */
492         if (congested->__bdi) {
493                 rb_erase(&congested->rb_node,
494                          &congested->__bdi->cgwb_congested_tree);
495                 congested->__bdi = NULL;
496         }
497
498         spin_unlock_irqrestore(&cgwb_lock, flags);
499         kfree(congested);
500 }
501
502 static void cgwb_release_workfn(struct work_struct *work)
503 {
504         struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
505                                                 release_work);
506
507         wb_shutdown(wb);
508
509         css_put(wb->memcg_css);
510         css_put(wb->blkcg_css);
511
512         fprop_local_destroy_percpu(&wb->memcg_completions);
513         percpu_ref_exit(&wb->refcnt);
514         wb_exit(wb);
515         kfree_rcu(wb, rcu);
516 }
517
518 static void cgwb_release(struct percpu_ref *refcnt)
519 {
520         struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
521                                                 refcnt);
522         schedule_work(&wb->release_work);
523 }
524
525 static void cgwb_kill(struct bdi_writeback *wb)
526 {
527         lockdep_assert_held(&cgwb_lock);
528
529         WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
530         list_del(&wb->memcg_node);
531         list_del(&wb->blkcg_node);
532         percpu_ref_kill(&wb->refcnt);
533 }
534
535 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
536 {
537         spin_lock_irq(&cgwb_lock);
538         list_del_rcu(&wb->bdi_node);
539         spin_unlock_irq(&cgwb_lock);
540 }
541
542 static int cgwb_create(struct backing_dev_info *bdi,
543                        struct cgroup_subsys_state *memcg_css, gfp_t gfp)
544 {
545         struct mem_cgroup *memcg;
546         struct cgroup_subsys_state *blkcg_css;
547         struct blkcg *blkcg;
548         struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
549         struct bdi_writeback *wb;
550         unsigned long flags;
551         int ret = 0;
552
553         memcg = mem_cgroup_from_css(memcg_css);
554         blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
555         blkcg = css_to_blkcg(blkcg_css);
556         memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
557         blkcg_cgwb_list = &blkcg->cgwb_list;
558
559         /* look up again under lock and discard on blkcg mismatch */
560         spin_lock_irqsave(&cgwb_lock, flags);
561         wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
562         if (wb && wb->blkcg_css != blkcg_css) {
563                 cgwb_kill(wb);
564                 wb = NULL;
565         }
566         spin_unlock_irqrestore(&cgwb_lock, flags);
567         if (wb)
568                 goto out_put;
569
570         /* need to create a new one */
571         wb = kmalloc(sizeof(*wb), gfp);
572         if (!wb) {
573                 ret = -ENOMEM;
574                 goto out_put;
575         }
576
577         ret = wb_init(wb, bdi, blkcg_css->id, gfp);
578         if (ret)
579                 goto err_free;
580
581         ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
582         if (ret)
583                 goto err_wb_exit;
584
585         ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
586         if (ret)
587                 goto err_ref_exit;
588
589         wb->memcg_css = memcg_css;
590         wb->blkcg_css = blkcg_css;
591         INIT_WORK(&wb->release_work, cgwb_release_workfn);
592         set_bit(WB_registered, &wb->state);
593
594         /*
595          * The root wb determines the registered state of the whole bdi and
596          * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
597          * whether they're still online.  Don't link @wb if any is dead.
598          * See wb_memcg_offline() and wb_blkcg_offline().
599          */
600         ret = -ENODEV;
601         spin_lock_irqsave(&cgwb_lock, flags);
602         if (test_bit(WB_registered, &bdi->wb.state) &&
603             blkcg_cgwb_list->next && memcg_cgwb_list->next) {
604                 /* we might have raced another instance of this function */
605                 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
606                 if (!ret) {
607                         list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
608                         list_add(&wb->memcg_node, memcg_cgwb_list);
609                         list_add(&wb->blkcg_node, blkcg_cgwb_list);
610                         css_get(memcg_css);
611                         css_get(blkcg_css);
612                 }
613         }
614         spin_unlock_irqrestore(&cgwb_lock, flags);
615         if (ret) {
616                 if (ret == -EEXIST)
617                         ret = 0;
618                 goto err_fprop_exit;
619         }
620         goto out_put;
621
622 err_fprop_exit:
623         fprop_local_destroy_percpu(&wb->memcg_completions);
624 err_ref_exit:
625         percpu_ref_exit(&wb->refcnt);
626 err_wb_exit:
627         wb_exit(wb);
628 err_free:
629         kfree(wb);
630 out_put:
631         css_put(blkcg_css);
632         return ret;
633 }
634
635 /**
636  * wb_get_create - get wb for a given memcg, create if necessary
637  * @bdi: target bdi
638  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
639  * @gfp: allocation mask to use
640  *
641  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
642  * create one.  The returned wb has its refcount incremented.
643  *
644  * This function uses css_get() on @memcg_css and thus expects its refcnt
645  * to be positive on invocation.  IOW, rcu_read_lock() protection on
646  * @memcg_css isn't enough.  try_get it before calling this function.
647  *
648  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
649  * memcg on the default hierarchy, memcg association is guaranteed to be
650  * more specific (equal or descendant to the associated blkcg) and thus can
651  * identify both the memcg and blkcg associations.
652  *
653  * Because the blkcg associated with a memcg may change as blkcg is enabled
654  * and disabled closer to root in the hierarchy, each wb keeps track of
655  * both the memcg and blkcg associated with it and verifies the blkcg on
656  * each lookup.  On mismatch, the existing wb is discarded and a new one is
657  * created.
658  */
659 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
660                                     struct cgroup_subsys_state *memcg_css,
661                                     gfp_t gfp)
662 {
663         struct bdi_writeback *wb;
664
665         might_sleep_if(gfpflags_allow_blocking(gfp));
666
667         if (!memcg_css->parent)
668                 return &bdi->wb;
669
670         do {
671                 rcu_read_lock();
672                 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
673                 if (wb) {
674                         struct cgroup_subsys_state *blkcg_css;
675
676                         /* see whether the blkcg association has changed */
677                         blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
678                                                      &io_cgrp_subsys);
679                         if (unlikely(wb->blkcg_css != blkcg_css ||
680                                      !wb_tryget(wb)))
681                                 wb = NULL;
682                         css_put(blkcg_css);
683                 }
684                 rcu_read_unlock();
685         } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
686
687         return wb;
688 }
689
690 static int cgwb_bdi_init(struct backing_dev_info *bdi)
691 {
692         int ret;
693
694         INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
695         bdi->cgwb_congested_tree = RB_ROOT;
696
697         ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
698         if (!ret) {
699                 bdi->wb.memcg_css = &root_mem_cgroup->css;
700                 bdi->wb.blkcg_css = blkcg_root_css;
701         }
702         return ret;
703 }
704
705 static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
706 {
707         struct radix_tree_iter iter;
708         void **slot;
709         struct bdi_writeback *wb;
710
711         WARN_ON(test_bit(WB_registered, &bdi->wb.state));
712
713         spin_lock_irq(&cgwb_lock);
714         radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
715                 cgwb_kill(*slot);
716
717         while (!list_empty(&bdi->wb_list)) {
718                 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
719                                       bdi_node);
720                 spin_unlock_irq(&cgwb_lock);
721                 wb_shutdown(wb);
722                 spin_lock_irq(&cgwb_lock);
723         }
724         spin_unlock_irq(&cgwb_lock);
725 }
726
727 /**
728  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
729  * @memcg: memcg being offlined
730  *
731  * Also prevents creation of any new wb's associated with @memcg.
732  */
733 void wb_memcg_offline(struct mem_cgroup *memcg)
734 {
735         LIST_HEAD(to_destroy);
736         struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
737         struct bdi_writeback *wb, *next;
738
739         spin_lock_irq(&cgwb_lock);
740         list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
741                 cgwb_kill(wb);
742         memcg_cgwb_list->next = NULL;   /* prevent new wb's */
743         spin_unlock_irq(&cgwb_lock);
744 }
745
746 /**
747  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
748  * @blkcg: blkcg being offlined
749  *
750  * Also prevents creation of any new wb's associated with @blkcg.
751  */
752 void wb_blkcg_offline(struct blkcg *blkcg)
753 {
754         LIST_HEAD(to_destroy);
755         struct bdi_writeback *wb, *next;
756
757         spin_lock_irq(&cgwb_lock);
758         list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
759                 cgwb_kill(wb);
760         blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
761         spin_unlock_irq(&cgwb_lock);
762 }
763
764 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
765 {
766         struct rb_node *rbn;
767
768         spin_lock_irq(&cgwb_lock);
769         while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
770                 struct bdi_writeback_congested *congested =
771                         rb_entry(rbn, struct bdi_writeback_congested, rb_node);
772
773                 rb_erase(rbn, &bdi->cgwb_congested_tree);
774                 congested->__bdi = NULL;        /* mark @congested unlinked */
775         }
776         spin_unlock_irq(&cgwb_lock);
777 }
778
779 static void cgwb_bdi_register(struct backing_dev_info *bdi)
780 {
781         spin_lock_irq(&cgwb_lock);
782         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
783         spin_unlock_irq(&cgwb_lock);
784 }
785
786 #else   /* CONFIG_CGROUP_WRITEBACK */
787
788 static int cgwb_bdi_init(struct backing_dev_info *bdi)
789 {
790         int err;
791
792         bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
793         if (!bdi->wb_congested)
794                 return -ENOMEM;
795
796         atomic_set(&bdi->wb_congested->refcnt, 1);
797
798         err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
799         if (err) {
800                 wb_congested_put(bdi->wb_congested);
801                 return err;
802         }
803         return 0;
804 }
805
806 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
807
808 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
809 {
810         wb_congested_put(bdi->wb_congested);
811 }
812
813 static void cgwb_bdi_register(struct backing_dev_info *bdi)
814 {
815         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
816 }
817
818 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
819 {
820         list_del_rcu(&wb->bdi_node);
821 }
822
823 #endif  /* CONFIG_CGROUP_WRITEBACK */
824
825 static int bdi_init(struct backing_dev_info *bdi)
826 {
827         int ret;
828
829         bdi->dev = NULL;
830
831         kref_init(&bdi->refcnt);
832         bdi->min_ratio = 0;
833         bdi->max_ratio = 100;
834         bdi->max_prop_frac = FPROP_FRAC_BASE;
835         INIT_LIST_HEAD(&bdi->bdi_list);
836         INIT_LIST_HEAD(&bdi->wb_list);
837         init_waitqueue_head(&bdi->wb_waitq);
838
839         ret = cgwb_bdi_init(bdi);
840
841         return ret;
842 }
843
844 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
845 {
846         struct backing_dev_info *bdi;
847
848         bdi = kmalloc_node(sizeof(struct backing_dev_info),
849                            gfp_mask | __GFP_ZERO, node_id);
850         if (!bdi)
851                 return NULL;
852
853         if (bdi_init(bdi)) {
854                 kfree(bdi);
855                 return NULL;
856         }
857         return bdi;
858 }
859 EXPORT_SYMBOL(bdi_alloc_node);
860
861 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
862 {
863         struct device *dev;
864
865         if (bdi->dev)   /* The driver needs to use separate queues per device */
866                 return 0;
867
868         dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
869         if (IS_ERR(dev))
870                 return PTR_ERR(dev);
871
872         cgwb_bdi_register(bdi);
873         bdi->dev = dev;
874
875         bdi_debug_register(bdi, dev_name(dev));
876         set_bit(WB_registered, &bdi->wb.state);
877
878         spin_lock_bh(&bdi_lock);
879         list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
880         spin_unlock_bh(&bdi_lock);
881
882         trace_writeback_bdi_register(bdi);
883         return 0;
884 }
885 EXPORT_SYMBOL(bdi_register_va);
886
887 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
888 {
889         va_list args;
890         int ret;
891
892         va_start(args, fmt);
893         ret = bdi_register_va(bdi, fmt, args);
894         va_end(args);
895         return ret;
896 }
897 EXPORT_SYMBOL(bdi_register);
898
899 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
900 {
901         int rc;
902
903         rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
904         if (rc)
905                 return rc;
906         /* Leaking owner reference... */
907         WARN_ON(bdi->owner);
908         bdi->owner = owner;
909         get_device(owner);
910         return 0;
911 }
912 EXPORT_SYMBOL(bdi_register_owner);
913
914 /*
915  * Remove bdi from bdi_list, and ensure that it is no longer visible
916  */
917 static void bdi_remove_from_list(struct backing_dev_info *bdi)
918 {
919         spin_lock_bh(&bdi_lock);
920         list_del_rcu(&bdi->bdi_list);
921         spin_unlock_bh(&bdi_lock);
922
923         synchronize_rcu_expedited();
924 }
925
926 void bdi_unregister(struct backing_dev_info *bdi)
927 {
928         /* make sure nobody finds us on the bdi_list anymore */
929         bdi_remove_from_list(bdi);
930         wb_shutdown(&bdi->wb);
931         cgwb_bdi_unregister(bdi);
932
933         if (bdi->dev) {
934                 bdi_debug_unregister(bdi);
935                 device_unregister(bdi->dev);
936                 bdi->dev = NULL;
937         }
938
939         if (bdi->owner) {
940                 put_device(bdi->owner);
941                 bdi->owner = NULL;
942         }
943 }
944
945 static void release_bdi(struct kref *ref)
946 {
947         struct backing_dev_info *bdi =
948                         container_of(ref, struct backing_dev_info, refcnt);
949
950         if (test_bit(WB_registered, &bdi->wb.state))
951                 bdi_unregister(bdi);
952         WARN_ON_ONCE(bdi->dev);
953         wb_exit(&bdi->wb);
954         cgwb_bdi_exit(bdi);
955         kfree(bdi);
956 }
957
958 void bdi_put(struct backing_dev_info *bdi)
959 {
960         kref_put(&bdi->refcnt, release_bdi);
961 }
962 EXPORT_SYMBOL(bdi_put);
963
964 static wait_queue_head_t congestion_wqh[2] = {
965                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
966                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
967         };
968 static atomic_t nr_wb_congested[2];
969
970 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
971 {
972         wait_queue_head_t *wqh = &congestion_wqh[sync];
973         enum wb_congested_state bit;
974
975         bit = sync ? WB_sync_congested : WB_async_congested;
976         if (test_and_clear_bit(bit, &congested->state))
977                 atomic_dec(&nr_wb_congested[sync]);
978         smp_mb__after_atomic();
979         if (waitqueue_active(wqh))
980                 wake_up(wqh);
981 }
982 EXPORT_SYMBOL(clear_wb_congested);
983
984 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
985 {
986         enum wb_congested_state bit;
987
988         bit = sync ? WB_sync_congested : WB_async_congested;
989         if (!test_and_set_bit(bit, &congested->state))
990                 atomic_inc(&nr_wb_congested[sync]);
991 }
992 EXPORT_SYMBOL(set_wb_congested);
993
994 /**
995  * congestion_wait - wait for a backing_dev to become uncongested
996  * @sync: SYNC or ASYNC IO
997  * @timeout: timeout in jiffies
998  *
999  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1000  * write congestion.  If no backing_devs are congested then just wait for the
1001  * next write to be completed.
1002  */
1003 long congestion_wait(int sync, long timeout)
1004 {
1005         long ret;
1006         unsigned long start = jiffies;
1007         DEFINE_WAIT(wait);
1008         wait_queue_head_t *wqh = &congestion_wqh[sync];
1009
1010         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1011         ret = io_schedule_timeout(timeout);
1012         finish_wait(wqh, &wait);
1013
1014         trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1015                                         jiffies_to_usecs(jiffies - start));
1016
1017         return ret;
1018 }
1019 EXPORT_SYMBOL(congestion_wait);
1020
1021 /**
1022  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1023  * @pgdat: A pgdat to check if it is heavily congested
1024  * @sync: SYNC or ASYNC IO
1025  * @timeout: timeout in jiffies
1026  *
1027  * In the event of a congested backing_dev (any backing_dev) and the given
1028  * @pgdat has experienced recent congestion, this waits for up to @timeout
1029  * jiffies for either a BDI to exit congestion of the given @sync queue
1030  * or a write to complete.
1031  *
1032  * In the absence of pgdat congestion, cond_resched() is called to yield
1033  * the processor if necessary but otherwise does not sleep.
1034  *
1035  * The return value is 0 if the sleep is for the full timeout. Otherwise,
1036  * it is the number of jiffies that were still remaining when the function
1037  * returned. return_value == timeout implies the function did not sleep.
1038  */
1039 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
1040 {
1041         long ret;
1042         unsigned long start = jiffies;
1043         DEFINE_WAIT(wait);
1044         wait_queue_head_t *wqh = &congestion_wqh[sync];
1045
1046         /*
1047          * If there is no congestion, or heavy congestion is not being
1048          * encountered in the current pgdat, yield if necessary instead
1049          * of sleeping on the congestion queue
1050          */
1051         if (atomic_read(&nr_wb_congested[sync]) == 0 ||
1052             !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
1053                 cond_resched();
1054
1055                 /* In case we scheduled, work out time remaining */
1056                 ret = timeout - (jiffies - start);
1057                 if (ret < 0)
1058                         ret = 0;
1059
1060                 goto out;
1061         }
1062
1063         /* Sleep until uncongested or a write happens */
1064         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1065         ret = io_schedule_timeout(timeout);
1066         finish_wait(wqh, &wait);
1067
1068 out:
1069         trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1070                                         jiffies_to_usecs(jiffies - start));
1071
1072         return ret;
1073 }
1074 EXPORT_SYMBOL(wait_iff_congested);
1075
1076 int pdflush_proc_obsolete(struct ctl_table *table, int write,
1077                         void __user *buffer, size_t *lenp, loff_t *ppos)
1078 {
1079         char kbuf[] = "0\n";
1080
1081         if (*ppos || *lenp < sizeof(kbuf)) {
1082                 *lenp = 0;
1083                 return 0;
1084         }
1085
1086         if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1087                 return -EFAULT;
1088         pr_warn_once("%s exported in /proc is scheduled for removal\n",
1089                      table->procname);
1090
1091         *lenp = 2;
1092         *ppos += *lenp;
1093         return 2;
1094 }