batman-adv: fix TT sync flag inconsistencies
[sfrench/cifs-2.6.git] / drivers / md / dm-bufio.c
1 /*
2  * Copyright (C) 2009-2011 Red Hat, Inc.
3  *
4  * Author: Mikulas Patocka <mpatocka@redhat.com>
5  *
6  * This file is released under the GPL.
7  */
8
9 #include "dm-bufio.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/sched/mm.h>
15 #include <linux/jiffies.h>
16 #include <linux/vmalloc.h>
17 #include <linux/shrinker.h>
18 #include <linux/module.h>
19 #include <linux/rbtree.h>
20 #include <linux/stacktrace.h>
21
22 #define DM_MSG_PREFIX "bufio"
23
24 /*
25  * Memory management policy:
26  *      Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
27  *      or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
28  *      Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
29  *      Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
30  *      dirty buffers.
31  */
32 #define DM_BUFIO_MIN_BUFFERS            8
33
34 #define DM_BUFIO_MEMORY_PERCENT         2
35 #define DM_BUFIO_VMALLOC_PERCENT        25
36 #define DM_BUFIO_WRITEBACK_PERCENT      75
37
38 /*
39  * Check buffer ages in this interval (seconds)
40  */
41 #define DM_BUFIO_WORK_TIMER_SECS        30
42
43 /*
44  * Free buffers when they are older than this (seconds)
45  */
46 #define DM_BUFIO_DEFAULT_AGE_SECS       300
47
48 /*
49  * The nr of bytes of cached data to keep around.
50  */
51 #define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
52
53 /*
54  * The number of bvec entries that are embedded directly in the buffer.
55  * If the chunk size is larger, dm-io is used to do the io.
56  */
57 #define DM_BUFIO_INLINE_VECS            16
58
59 /*
60  * Don't try to use kmem_cache_alloc for blocks larger than this.
61  * For explanation, see alloc_buffer_data below.
62  */
63 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT  (PAGE_SIZE >> 1)
64 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT   (PAGE_SIZE << (MAX_ORDER - 1))
65
66 /*
67  * dm_buffer->list_mode
68  */
69 #define LIST_CLEAN      0
70 #define LIST_DIRTY      1
71 #define LIST_SIZE       2
72
73 /*
74  * Linking of buffers:
75  *      All buffers are linked to cache_hash with their hash_list field.
76  *
77  *      Clean buffers that are not being written (B_WRITING not set)
78  *      are linked to lru[LIST_CLEAN] with their lru_list field.
79  *
80  *      Dirty and clean buffers that are being written are linked to
81  *      lru[LIST_DIRTY] with their lru_list field. When the write
82  *      finishes, the buffer cannot be relinked immediately (because we
83  *      are in an interrupt context and relinking requires process
84  *      context), so some clean-not-writing buffers can be held on
85  *      dirty_lru too.  They are later added to lru in the process
86  *      context.
87  */
88 struct dm_bufio_client {
89         struct mutex lock;
90
91         struct list_head lru[LIST_SIZE];
92         unsigned long n_buffers[LIST_SIZE];
93
94         struct block_device *bdev;
95         unsigned block_size;
96         unsigned char sectors_per_block_bits;
97         unsigned char pages_per_block_bits;
98         unsigned char blocks_per_page_bits;
99         unsigned aux_size;
100         void (*alloc_callback)(struct dm_buffer *);
101         void (*write_callback)(struct dm_buffer *);
102
103         struct dm_io_client *dm_io;
104
105         struct list_head reserved_buffers;
106         unsigned need_reserved_buffers;
107
108         unsigned minimum_buffers;
109
110         struct rb_root buffer_tree;
111         wait_queue_head_t free_buffer_wait;
112
113         sector_t start;
114
115         int async_write_error;
116
117         struct list_head client_list;
118         struct shrinker shrinker;
119 };
120
121 /*
122  * Buffer state bits.
123  */
124 #define B_READING       0
125 #define B_WRITING       1
126 #define B_DIRTY         2
127
128 /*
129  * Describes how the block was allocated:
130  * kmem_cache_alloc(), __get_free_pages() or vmalloc().
131  * See the comment at alloc_buffer_data.
132  */
133 enum data_mode {
134         DATA_MODE_SLAB = 0,
135         DATA_MODE_GET_FREE_PAGES = 1,
136         DATA_MODE_VMALLOC = 2,
137         DATA_MODE_LIMIT = 3
138 };
139
140 struct dm_buffer {
141         struct rb_node node;
142         struct list_head lru_list;
143         sector_t block;
144         void *data;
145         enum data_mode data_mode;
146         unsigned char list_mode;                /* LIST_* */
147         unsigned hold_count;
148         blk_status_t read_error;
149         blk_status_t write_error;
150         unsigned long state;
151         unsigned long last_accessed;
152         struct dm_bufio_client *c;
153         struct list_head write_list;
154         struct bio bio;
155         struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
156 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
157 #define MAX_STACK 10
158         struct stack_trace stack_trace;
159         unsigned long stack_entries[MAX_STACK];
160 #endif
161 };
162
163 /*----------------------------------------------------------------*/
164
165 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
166 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
167
168 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
169 {
170         unsigned ret = c->blocks_per_page_bits - 1;
171
172         BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
173
174         return ret;
175 }
176
177 #define DM_BUFIO_CACHE(c)       (dm_bufio_caches[dm_bufio_cache_index(c)])
178 #define DM_BUFIO_CACHE_NAME(c)  (dm_bufio_cache_names[dm_bufio_cache_index(c)])
179
180 #define dm_bufio_in_request()   (!!current->bio_list)
181
182 static void dm_bufio_lock(struct dm_bufio_client *c)
183 {
184         mutex_lock_nested(&c->lock, dm_bufio_in_request());
185 }
186
187 static int dm_bufio_trylock(struct dm_bufio_client *c)
188 {
189         return mutex_trylock(&c->lock);
190 }
191
192 static void dm_bufio_unlock(struct dm_bufio_client *c)
193 {
194         mutex_unlock(&c->lock);
195 }
196
197 /*----------------------------------------------------------------*/
198
199 /*
200  * Default cache size: available memory divided by the ratio.
201  */
202 static unsigned long dm_bufio_default_cache_size;
203
204 /*
205  * Total cache size set by the user.
206  */
207 static unsigned long dm_bufio_cache_size;
208
209 /*
210  * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
211  * at any time.  If it disagrees, the user has changed cache size.
212  */
213 static unsigned long dm_bufio_cache_size_latch;
214
215 static DEFINE_SPINLOCK(param_spinlock);
216
217 /*
218  * Buffers are freed after this timeout
219  */
220 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
221 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
222
223 static unsigned long dm_bufio_peak_allocated;
224 static unsigned long dm_bufio_allocated_kmem_cache;
225 static unsigned long dm_bufio_allocated_get_free_pages;
226 static unsigned long dm_bufio_allocated_vmalloc;
227 static unsigned long dm_bufio_current_allocated;
228
229 /*----------------------------------------------------------------*/
230
231 /*
232  * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
233  */
234 static unsigned long dm_bufio_cache_size_per_client;
235
236 /*
237  * The current number of clients.
238  */
239 static int dm_bufio_client_count;
240
241 /*
242  * The list of all clients.
243  */
244 static LIST_HEAD(dm_bufio_all_clients);
245
246 /*
247  * This mutex protects dm_bufio_cache_size_latch,
248  * dm_bufio_cache_size_per_client and dm_bufio_client_count
249  */
250 static DEFINE_MUTEX(dm_bufio_clients_lock);
251
252 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
253 static void buffer_record_stack(struct dm_buffer *b)
254 {
255         b->stack_trace.nr_entries = 0;
256         b->stack_trace.max_entries = MAX_STACK;
257         b->stack_trace.entries = b->stack_entries;
258         b->stack_trace.skip = 2;
259         save_stack_trace(&b->stack_trace);
260 }
261 #endif
262
263 /*----------------------------------------------------------------
264  * A red/black tree acts as an index for all the buffers.
265  *--------------------------------------------------------------*/
266 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
267 {
268         struct rb_node *n = c->buffer_tree.rb_node;
269         struct dm_buffer *b;
270
271         while (n) {
272                 b = container_of(n, struct dm_buffer, node);
273
274                 if (b->block == block)
275                         return b;
276
277                 n = (b->block < block) ? n->rb_left : n->rb_right;
278         }
279
280         return NULL;
281 }
282
283 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
284 {
285         struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
286         struct dm_buffer *found;
287
288         while (*new) {
289                 found = container_of(*new, struct dm_buffer, node);
290
291                 if (found->block == b->block) {
292                         BUG_ON(found != b);
293                         return;
294                 }
295
296                 parent = *new;
297                 new = (found->block < b->block) ?
298                         &((*new)->rb_left) : &((*new)->rb_right);
299         }
300
301         rb_link_node(&b->node, parent, new);
302         rb_insert_color(&b->node, &c->buffer_tree);
303 }
304
305 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
306 {
307         rb_erase(&b->node, &c->buffer_tree);
308 }
309
310 /*----------------------------------------------------------------*/
311
312 static void adjust_total_allocated(enum data_mode data_mode, long diff)
313 {
314         static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
315                 &dm_bufio_allocated_kmem_cache,
316                 &dm_bufio_allocated_get_free_pages,
317                 &dm_bufio_allocated_vmalloc,
318         };
319
320         spin_lock(&param_spinlock);
321
322         *class_ptr[data_mode] += diff;
323
324         dm_bufio_current_allocated += diff;
325
326         if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
327                 dm_bufio_peak_allocated = dm_bufio_current_allocated;
328
329         spin_unlock(&param_spinlock);
330 }
331
332 /*
333  * Change the number of clients and recalculate per-client limit.
334  */
335 static void __cache_size_refresh(void)
336 {
337         BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
338         BUG_ON(dm_bufio_client_count < 0);
339
340         dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
341
342         /*
343          * Use default if set to 0 and report the actual cache size used.
344          */
345         if (!dm_bufio_cache_size_latch) {
346                 (void)cmpxchg(&dm_bufio_cache_size, 0,
347                               dm_bufio_default_cache_size);
348                 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
349         }
350
351         dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
352                                          (dm_bufio_client_count ? : 1);
353 }
354
355 /*
356  * Allocating buffer data.
357  *
358  * Small buffers are allocated with kmem_cache, to use space optimally.
359  *
360  * For large buffers, we choose between get_free_pages and vmalloc.
361  * Each has advantages and disadvantages.
362  *
363  * __get_free_pages can randomly fail if the memory is fragmented.
364  * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
365  * as low as 128M) so using it for caching is not appropriate.
366  *
367  * If the allocation may fail we use __get_free_pages. Memory fragmentation
368  * won't have a fatal effect here, but it just causes flushes of some other
369  * buffers and more I/O will be performed. Don't use __get_free_pages if it
370  * always fails (i.e. order >= MAX_ORDER).
371  *
372  * If the allocation shouldn't fail we use __vmalloc. This is only for the
373  * initial reserve allocation, so there's no risk of wasting all vmalloc
374  * space.
375  */
376 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
377                                enum data_mode *data_mode)
378 {
379         unsigned noio_flag;
380         void *ptr;
381
382         if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
383                 *data_mode = DATA_MODE_SLAB;
384                 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
385         }
386
387         if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
388             gfp_mask & __GFP_NORETRY) {
389                 *data_mode = DATA_MODE_GET_FREE_PAGES;
390                 return (void *)__get_free_pages(gfp_mask,
391                                                 c->pages_per_block_bits);
392         }
393
394         *data_mode = DATA_MODE_VMALLOC;
395
396         /*
397          * __vmalloc allocates the data pages and auxiliary structures with
398          * gfp_flags that were specified, but pagetables are always allocated
399          * with GFP_KERNEL, no matter what was specified as gfp_mask.
400          *
401          * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
402          * all allocations done by this process (including pagetables) are done
403          * as if GFP_NOIO was specified.
404          */
405
406         if (gfp_mask & __GFP_NORETRY)
407                 noio_flag = memalloc_noio_save();
408
409         ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
410
411         if (gfp_mask & __GFP_NORETRY)
412                 memalloc_noio_restore(noio_flag);
413
414         return ptr;
415 }
416
417 /*
418  * Free buffer's data.
419  */
420 static void free_buffer_data(struct dm_bufio_client *c,
421                              void *data, enum data_mode data_mode)
422 {
423         switch (data_mode) {
424         case DATA_MODE_SLAB:
425                 kmem_cache_free(DM_BUFIO_CACHE(c), data);
426                 break;
427
428         case DATA_MODE_GET_FREE_PAGES:
429                 free_pages((unsigned long)data, c->pages_per_block_bits);
430                 break;
431
432         case DATA_MODE_VMALLOC:
433                 vfree(data);
434                 break;
435
436         default:
437                 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
438                        data_mode);
439                 BUG();
440         }
441 }
442
443 /*
444  * Allocate buffer and its data.
445  */
446 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
447 {
448         struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
449                                       gfp_mask);
450
451         if (!b)
452                 return NULL;
453
454         b->c = c;
455
456         b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
457         if (!b->data) {
458                 kfree(b);
459                 return NULL;
460         }
461
462         adjust_total_allocated(b->data_mode, (long)c->block_size);
463
464 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
465         memset(&b->stack_trace, 0, sizeof(b->stack_trace));
466 #endif
467         return b;
468 }
469
470 /*
471  * Free buffer and its data.
472  */
473 static void free_buffer(struct dm_buffer *b)
474 {
475         struct dm_bufio_client *c = b->c;
476
477         adjust_total_allocated(b->data_mode, -(long)c->block_size);
478
479         free_buffer_data(c, b->data, b->data_mode);
480         kfree(b);
481 }
482
483 /*
484  * Link buffer to the hash list and clean or dirty queue.
485  */
486 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
487 {
488         struct dm_bufio_client *c = b->c;
489
490         c->n_buffers[dirty]++;
491         b->block = block;
492         b->list_mode = dirty;
493         list_add(&b->lru_list, &c->lru[dirty]);
494         __insert(b->c, b);
495         b->last_accessed = jiffies;
496 }
497
498 /*
499  * Unlink buffer from the hash list and dirty or clean queue.
500  */
501 static void __unlink_buffer(struct dm_buffer *b)
502 {
503         struct dm_bufio_client *c = b->c;
504
505         BUG_ON(!c->n_buffers[b->list_mode]);
506
507         c->n_buffers[b->list_mode]--;
508         __remove(b->c, b);
509         list_del(&b->lru_list);
510 }
511
512 /*
513  * Place the buffer to the head of dirty or clean LRU queue.
514  */
515 static void __relink_lru(struct dm_buffer *b, int dirty)
516 {
517         struct dm_bufio_client *c = b->c;
518
519         BUG_ON(!c->n_buffers[b->list_mode]);
520
521         c->n_buffers[b->list_mode]--;
522         c->n_buffers[dirty]++;
523         b->list_mode = dirty;
524         list_move(&b->lru_list, &c->lru[dirty]);
525         b->last_accessed = jiffies;
526 }
527
528 /*----------------------------------------------------------------
529  * Submit I/O on the buffer.
530  *
531  * Bio interface is faster but it has some problems:
532  *      the vector list is limited (increasing this limit increases
533  *      memory-consumption per buffer, so it is not viable);
534  *
535  *      the memory must be direct-mapped, not vmalloced;
536  *
537  *      the I/O driver can reject requests spuriously if it thinks that
538  *      the requests are too big for the device or if they cross a
539  *      controller-defined memory boundary.
540  *
541  * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
542  * it is not vmalloced, try using the bio interface.
543  *
544  * If the buffer is big, if it is vmalloced or if the underlying device
545  * rejects the bio because it is too large, use dm-io layer to do the I/O.
546  * The dm-io layer splits the I/O into multiple requests, avoiding the above
547  * shortcomings.
548  *--------------------------------------------------------------*/
549
550 /*
551  * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
552  * that the request was handled directly with bio interface.
553  */
554 static void dmio_complete(unsigned long error, void *context)
555 {
556         struct dm_buffer *b = context;
557
558         b->bio.bi_status = error ? BLK_STS_IOERR : 0;
559         b->bio.bi_end_io(&b->bio);
560 }
561
562 static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
563                      unsigned n_sectors, bio_end_io_t *end_io)
564 {
565         int r;
566         struct dm_io_request io_req = {
567                 .bi_op = rw,
568                 .bi_op_flags = 0,
569                 .notify.fn = dmio_complete,
570                 .notify.context = b,
571                 .client = b->c->dm_io,
572         };
573         struct dm_io_region region = {
574                 .bdev = b->c->bdev,
575                 .sector = sector,
576                 .count = n_sectors,
577         };
578
579         if (b->data_mode != DATA_MODE_VMALLOC) {
580                 io_req.mem.type = DM_IO_KMEM;
581                 io_req.mem.ptr.addr = b->data;
582         } else {
583                 io_req.mem.type = DM_IO_VMA;
584                 io_req.mem.ptr.vma = b->data;
585         }
586
587         b->bio.bi_end_io = end_io;
588
589         r = dm_io(&io_req, 1, &region, NULL);
590         if (r) {
591                 b->bio.bi_status = errno_to_blk_status(r);
592                 end_io(&b->bio);
593         }
594 }
595
596 static void inline_endio(struct bio *bio)
597 {
598         bio_end_io_t *end_fn = bio->bi_private;
599         blk_status_t status = bio->bi_status;
600
601         /*
602          * Reset the bio to free any attached resources
603          * (e.g. bio integrity profiles).
604          */
605         bio_reset(bio);
606
607         bio->bi_status = status;
608         end_fn(bio);
609 }
610
611 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
612                            unsigned n_sectors, bio_end_io_t *end_io)
613 {
614         char *ptr;
615         int len;
616
617         bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
618         b->bio.bi_iter.bi_sector = sector;
619         b->bio.bi_bdev = b->c->bdev;
620         b->bio.bi_end_io = inline_endio;
621         /*
622          * Use of .bi_private isn't a problem here because
623          * the dm_buffer's inline bio is local to bufio.
624          */
625         b->bio.bi_private = end_io;
626         bio_set_op_attrs(&b->bio, rw, 0);
627
628         /*
629          * We assume that if len >= PAGE_SIZE ptr is page-aligned.
630          * If len < PAGE_SIZE the buffer doesn't cross page boundary.
631          */
632         ptr = b->data;
633         len = n_sectors << SECTOR_SHIFT;
634
635         if (len >= PAGE_SIZE)
636                 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
637         else
638                 BUG_ON((unsigned long)ptr & (len - 1));
639
640         do {
641                 if (!bio_add_page(&b->bio, virt_to_page(ptr),
642                                   len < PAGE_SIZE ? len : PAGE_SIZE,
643                                   offset_in_page(ptr))) {
644                         BUG_ON(b->c->block_size <= PAGE_SIZE);
645                         use_dmio(b, rw, sector, n_sectors, end_io);
646                         return;
647                 }
648
649                 len -= PAGE_SIZE;
650                 ptr += PAGE_SIZE;
651         } while (len > 0);
652
653         submit_bio(&b->bio);
654 }
655
656 static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
657 {
658         unsigned n_sectors;
659         sector_t sector;
660
661         if (rw == WRITE && b->c->write_callback)
662                 b->c->write_callback(b);
663
664         sector = (b->block << b->c->sectors_per_block_bits) + b->c->start;
665         n_sectors = 1 << b->c->sectors_per_block_bits;
666
667         if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) &&
668             b->data_mode != DATA_MODE_VMALLOC)
669                 use_inline_bio(b, rw, sector, n_sectors, end_io);
670         else
671                 use_dmio(b, rw, sector, n_sectors, end_io);
672 }
673
674 /*----------------------------------------------------------------
675  * Writing dirty buffers
676  *--------------------------------------------------------------*/
677
678 /*
679  * The endio routine for write.
680  *
681  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
682  * it.
683  */
684 static void write_endio(struct bio *bio)
685 {
686         struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
687
688         b->write_error = bio->bi_status;
689         if (unlikely(bio->bi_status)) {
690                 struct dm_bufio_client *c = b->c;
691
692                 (void)cmpxchg(&c->async_write_error, 0,
693                                 blk_status_to_errno(bio->bi_status));
694         }
695
696         BUG_ON(!test_bit(B_WRITING, &b->state));
697
698         smp_mb__before_atomic();
699         clear_bit(B_WRITING, &b->state);
700         smp_mb__after_atomic();
701
702         wake_up_bit(&b->state, B_WRITING);
703 }
704
705 /*
706  * Initiate a write on a dirty buffer, but don't wait for it.
707  *
708  * - If the buffer is not dirty, exit.
709  * - If there some previous write going on, wait for it to finish (we can't
710  *   have two writes on the same buffer simultaneously).
711  * - Submit our write and don't wait on it. We set B_WRITING indicating
712  *   that there is a write in progress.
713  */
714 static void __write_dirty_buffer(struct dm_buffer *b,
715                                  struct list_head *write_list)
716 {
717         if (!test_bit(B_DIRTY, &b->state))
718                 return;
719
720         clear_bit(B_DIRTY, &b->state);
721         wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
722
723         if (!write_list)
724                 submit_io(b, WRITE, write_endio);
725         else
726                 list_add_tail(&b->write_list, write_list);
727 }
728
729 static void __flush_write_list(struct list_head *write_list)
730 {
731         struct blk_plug plug;
732         blk_start_plug(&plug);
733         while (!list_empty(write_list)) {
734                 struct dm_buffer *b =
735                         list_entry(write_list->next, struct dm_buffer, write_list);
736                 list_del(&b->write_list);
737                 submit_io(b, WRITE, write_endio);
738                 cond_resched();
739         }
740         blk_finish_plug(&plug);
741 }
742
743 /*
744  * Wait until any activity on the buffer finishes.  Possibly write the
745  * buffer if it is dirty.  When this function finishes, there is no I/O
746  * running on the buffer and the buffer is not dirty.
747  */
748 static void __make_buffer_clean(struct dm_buffer *b)
749 {
750         BUG_ON(b->hold_count);
751
752         if (!b->state)  /* fast case */
753                 return;
754
755         wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
756         __write_dirty_buffer(b, NULL);
757         wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
758 }
759
760 /*
761  * Find some buffer that is not held by anybody, clean it, unlink it and
762  * return it.
763  */
764 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
765 {
766         struct dm_buffer *b;
767
768         list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
769                 BUG_ON(test_bit(B_WRITING, &b->state));
770                 BUG_ON(test_bit(B_DIRTY, &b->state));
771
772                 if (!b->hold_count) {
773                         __make_buffer_clean(b);
774                         __unlink_buffer(b);
775                         return b;
776                 }
777                 cond_resched();
778         }
779
780         list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
781                 BUG_ON(test_bit(B_READING, &b->state));
782
783                 if (!b->hold_count) {
784                         __make_buffer_clean(b);
785                         __unlink_buffer(b);
786                         return b;
787                 }
788                 cond_resched();
789         }
790
791         return NULL;
792 }
793
794 /*
795  * Wait until some other threads free some buffer or release hold count on
796  * some buffer.
797  *
798  * This function is entered with c->lock held, drops it and regains it
799  * before exiting.
800  */
801 static void __wait_for_free_buffer(struct dm_bufio_client *c)
802 {
803         DECLARE_WAITQUEUE(wait, current);
804
805         add_wait_queue(&c->free_buffer_wait, &wait);
806         set_current_state(TASK_UNINTERRUPTIBLE);
807         dm_bufio_unlock(c);
808
809         io_schedule();
810
811         remove_wait_queue(&c->free_buffer_wait, &wait);
812
813         dm_bufio_lock(c);
814 }
815
816 enum new_flag {
817         NF_FRESH = 0,
818         NF_READ = 1,
819         NF_GET = 2,
820         NF_PREFETCH = 3
821 };
822
823 /*
824  * Allocate a new buffer. If the allocation is not possible, wait until
825  * some other thread frees a buffer.
826  *
827  * May drop the lock and regain it.
828  */
829 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
830 {
831         struct dm_buffer *b;
832         bool tried_noio_alloc = false;
833
834         /*
835          * dm-bufio is resistant to allocation failures (it just keeps
836          * one buffer reserved in cases all the allocations fail).
837          * So set flags to not try too hard:
838          *      GFP_NOWAIT: don't wait; if we need to sleep we'll release our
839          *                  mutex and wait ourselves.
840          *      __GFP_NORETRY: don't retry and rather return failure
841          *      __GFP_NOMEMALLOC: don't use emergency reserves
842          *      __GFP_NOWARN: don't print a warning in case of failure
843          *
844          * For debugging, if we set the cache size to 1, no new buffers will
845          * be allocated.
846          */
847         while (1) {
848                 if (dm_bufio_cache_size_latch != 1) {
849                         b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
850                         if (b)
851                                 return b;
852                 }
853
854                 if (nf == NF_PREFETCH)
855                         return NULL;
856
857                 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
858                         dm_bufio_unlock(c);
859                         b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
860                         dm_bufio_lock(c);
861                         if (b)
862                                 return b;
863                         tried_noio_alloc = true;
864                 }
865
866                 if (!list_empty(&c->reserved_buffers)) {
867                         b = list_entry(c->reserved_buffers.next,
868                                        struct dm_buffer, lru_list);
869                         list_del(&b->lru_list);
870                         c->need_reserved_buffers++;
871
872                         return b;
873                 }
874
875                 b = __get_unclaimed_buffer(c);
876                 if (b)
877                         return b;
878
879                 __wait_for_free_buffer(c);
880         }
881 }
882
883 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
884 {
885         struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
886
887         if (!b)
888                 return NULL;
889
890         if (c->alloc_callback)
891                 c->alloc_callback(b);
892
893         return b;
894 }
895
896 /*
897  * Free a buffer and wake other threads waiting for free buffers.
898  */
899 static void __free_buffer_wake(struct dm_buffer *b)
900 {
901         struct dm_bufio_client *c = b->c;
902
903         if (!c->need_reserved_buffers)
904                 free_buffer(b);
905         else {
906                 list_add(&b->lru_list, &c->reserved_buffers);
907                 c->need_reserved_buffers--;
908         }
909
910         wake_up(&c->free_buffer_wait);
911 }
912
913 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
914                                         struct list_head *write_list)
915 {
916         struct dm_buffer *b, *tmp;
917
918         list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
919                 BUG_ON(test_bit(B_READING, &b->state));
920
921                 if (!test_bit(B_DIRTY, &b->state) &&
922                     !test_bit(B_WRITING, &b->state)) {
923                         __relink_lru(b, LIST_CLEAN);
924                         continue;
925                 }
926
927                 if (no_wait && test_bit(B_WRITING, &b->state))
928                         return;
929
930                 __write_dirty_buffer(b, write_list);
931                 cond_resched();
932         }
933 }
934
935 /*
936  * Get writeback threshold and buffer limit for a given client.
937  */
938 static void __get_memory_limit(struct dm_bufio_client *c,
939                                unsigned long *threshold_buffers,
940                                unsigned long *limit_buffers)
941 {
942         unsigned long buffers;
943
944         if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
945                 if (mutex_trylock(&dm_bufio_clients_lock)) {
946                         __cache_size_refresh();
947                         mutex_unlock(&dm_bufio_clients_lock);
948                 }
949         }
950
951         buffers = dm_bufio_cache_size_per_client >>
952                   (c->sectors_per_block_bits + SECTOR_SHIFT);
953
954         if (buffers < c->minimum_buffers)
955                 buffers = c->minimum_buffers;
956
957         *limit_buffers = buffers;
958         *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
959 }
960
961 /*
962  * Check if we're over watermark.
963  * If we are over threshold_buffers, start freeing buffers.
964  * If we're over "limit_buffers", block until we get under the limit.
965  */
966 static void __check_watermark(struct dm_bufio_client *c,
967                               struct list_head *write_list)
968 {
969         unsigned long threshold_buffers, limit_buffers;
970
971         __get_memory_limit(c, &threshold_buffers, &limit_buffers);
972
973         while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
974                limit_buffers) {
975
976                 struct dm_buffer *b = __get_unclaimed_buffer(c);
977
978                 if (!b)
979                         return;
980
981                 __free_buffer_wake(b);
982                 cond_resched();
983         }
984
985         if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
986                 __write_dirty_buffers_async(c, 1, write_list);
987 }
988
989 /*----------------------------------------------------------------
990  * Getting a buffer
991  *--------------------------------------------------------------*/
992
993 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
994                                      enum new_flag nf, int *need_submit,
995                                      struct list_head *write_list)
996 {
997         struct dm_buffer *b, *new_b = NULL;
998
999         *need_submit = 0;
1000
1001         b = __find(c, block);
1002         if (b)
1003                 goto found_buffer;
1004
1005         if (nf == NF_GET)
1006                 return NULL;
1007
1008         new_b = __alloc_buffer_wait(c, nf);
1009         if (!new_b)
1010                 return NULL;
1011
1012         /*
1013          * We've had a period where the mutex was unlocked, so need to
1014          * recheck the hash table.
1015          */
1016         b = __find(c, block);
1017         if (b) {
1018                 __free_buffer_wake(new_b);
1019                 goto found_buffer;
1020         }
1021
1022         __check_watermark(c, write_list);
1023
1024         b = new_b;
1025         b->hold_count = 1;
1026         b->read_error = 0;
1027         b->write_error = 0;
1028         __link_buffer(b, block, LIST_CLEAN);
1029
1030         if (nf == NF_FRESH) {
1031                 b->state = 0;
1032                 return b;
1033         }
1034
1035         b->state = 1 << B_READING;
1036         *need_submit = 1;
1037
1038         return b;
1039
1040 found_buffer:
1041         if (nf == NF_PREFETCH)
1042                 return NULL;
1043         /*
1044          * Note: it is essential that we don't wait for the buffer to be
1045          * read if dm_bufio_get function is used. Both dm_bufio_get and
1046          * dm_bufio_prefetch can be used in the driver request routine.
1047          * If the user called both dm_bufio_prefetch and dm_bufio_get on
1048          * the same buffer, it would deadlock if we waited.
1049          */
1050         if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1051                 return NULL;
1052
1053         b->hold_count++;
1054         __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1055                      test_bit(B_WRITING, &b->state));
1056         return b;
1057 }
1058
1059 /*
1060  * The endio routine for reading: set the error, clear the bit and wake up
1061  * anyone waiting on the buffer.
1062  */
1063 static void read_endio(struct bio *bio)
1064 {
1065         struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1066
1067         b->read_error = bio->bi_status;
1068
1069         BUG_ON(!test_bit(B_READING, &b->state));
1070
1071         smp_mb__before_atomic();
1072         clear_bit(B_READING, &b->state);
1073         smp_mb__after_atomic();
1074
1075         wake_up_bit(&b->state, B_READING);
1076 }
1077
1078 /*
1079  * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1080  * functions is similar except that dm_bufio_new doesn't read the
1081  * buffer from the disk (assuming that the caller overwrites all the data
1082  * and uses dm_bufio_mark_buffer_dirty to write new data back).
1083  */
1084 static void *new_read(struct dm_bufio_client *c, sector_t block,
1085                       enum new_flag nf, struct dm_buffer **bp)
1086 {
1087         int need_submit;
1088         struct dm_buffer *b;
1089
1090         LIST_HEAD(write_list);
1091
1092         dm_bufio_lock(c);
1093         b = __bufio_new(c, block, nf, &need_submit, &write_list);
1094 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1095         if (b && b->hold_count == 1)
1096                 buffer_record_stack(b);
1097 #endif
1098         dm_bufio_unlock(c);
1099
1100         __flush_write_list(&write_list);
1101
1102         if (!b)
1103                 return NULL;
1104
1105         if (need_submit)
1106                 submit_io(b, READ, read_endio);
1107
1108         wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1109
1110         if (b->read_error) {
1111                 int error = blk_status_to_errno(b->read_error);
1112
1113                 dm_bufio_release(b);
1114
1115                 return ERR_PTR(error);
1116         }
1117
1118         *bp = b;
1119
1120         return b->data;
1121 }
1122
1123 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1124                    struct dm_buffer **bp)
1125 {
1126         return new_read(c, block, NF_GET, bp);
1127 }
1128 EXPORT_SYMBOL_GPL(dm_bufio_get);
1129
1130 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1131                     struct dm_buffer **bp)
1132 {
1133         BUG_ON(dm_bufio_in_request());
1134
1135         return new_read(c, block, NF_READ, bp);
1136 }
1137 EXPORT_SYMBOL_GPL(dm_bufio_read);
1138
1139 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1140                    struct dm_buffer **bp)
1141 {
1142         BUG_ON(dm_bufio_in_request());
1143
1144         return new_read(c, block, NF_FRESH, bp);
1145 }
1146 EXPORT_SYMBOL_GPL(dm_bufio_new);
1147
1148 void dm_bufio_prefetch(struct dm_bufio_client *c,
1149                        sector_t block, unsigned n_blocks)
1150 {
1151         struct blk_plug plug;
1152
1153         LIST_HEAD(write_list);
1154
1155         BUG_ON(dm_bufio_in_request());
1156
1157         blk_start_plug(&plug);
1158         dm_bufio_lock(c);
1159
1160         for (; n_blocks--; block++) {
1161                 int need_submit;
1162                 struct dm_buffer *b;
1163                 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1164                                 &write_list);
1165                 if (unlikely(!list_empty(&write_list))) {
1166                         dm_bufio_unlock(c);
1167                         blk_finish_plug(&plug);
1168                         __flush_write_list(&write_list);
1169                         blk_start_plug(&plug);
1170                         dm_bufio_lock(c);
1171                 }
1172                 if (unlikely(b != NULL)) {
1173                         dm_bufio_unlock(c);
1174
1175                         if (need_submit)
1176                                 submit_io(b, READ, read_endio);
1177                         dm_bufio_release(b);
1178
1179                         cond_resched();
1180
1181                         if (!n_blocks)
1182                                 goto flush_plug;
1183                         dm_bufio_lock(c);
1184                 }
1185         }
1186
1187         dm_bufio_unlock(c);
1188
1189 flush_plug:
1190         blk_finish_plug(&plug);
1191 }
1192 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1193
1194 void dm_bufio_release(struct dm_buffer *b)
1195 {
1196         struct dm_bufio_client *c = b->c;
1197
1198         dm_bufio_lock(c);
1199
1200         BUG_ON(!b->hold_count);
1201
1202         b->hold_count--;
1203         if (!b->hold_count) {
1204                 wake_up(&c->free_buffer_wait);
1205
1206                 /*
1207                  * If there were errors on the buffer, and the buffer is not
1208                  * to be written, free the buffer. There is no point in caching
1209                  * invalid buffer.
1210                  */
1211                 if ((b->read_error || b->write_error) &&
1212                     !test_bit(B_READING, &b->state) &&
1213                     !test_bit(B_WRITING, &b->state) &&
1214                     !test_bit(B_DIRTY, &b->state)) {
1215                         __unlink_buffer(b);
1216                         __free_buffer_wake(b);
1217                 }
1218         }
1219
1220         dm_bufio_unlock(c);
1221 }
1222 EXPORT_SYMBOL_GPL(dm_bufio_release);
1223
1224 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1225 {
1226         struct dm_bufio_client *c = b->c;
1227
1228         dm_bufio_lock(c);
1229
1230         BUG_ON(test_bit(B_READING, &b->state));
1231
1232         if (!test_and_set_bit(B_DIRTY, &b->state))
1233                 __relink_lru(b, LIST_DIRTY);
1234
1235         dm_bufio_unlock(c);
1236 }
1237 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1238
1239 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1240 {
1241         LIST_HEAD(write_list);
1242
1243         BUG_ON(dm_bufio_in_request());
1244
1245         dm_bufio_lock(c);
1246         __write_dirty_buffers_async(c, 0, &write_list);
1247         dm_bufio_unlock(c);
1248         __flush_write_list(&write_list);
1249 }
1250 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1251
1252 /*
1253  * For performance, it is essential that the buffers are written asynchronously
1254  * and simultaneously (so that the block layer can merge the writes) and then
1255  * waited upon.
1256  *
1257  * Finally, we flush hardware disk cache.
1258  */
1259 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1260 {
1261         blk_status_t a;
1262         int f;
1263         unsigned long buffers_processed = 0;
1264         struct dm_buffer *b, *tmp;
1265
1266         LIST_HEAD(write_list);
1267
1268         dm_bufio_lock(c);
1269         __write_dirty_buffers_async(c, 0, &write_list);
1270         dm_bufio_unlock(c);
1271         __flush_write_list(&write_list);
1272         dm_bufio_lock(c);
1273
1274 again:
1275         list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1276                 int dropped_lock = 0;
1277
1278                 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1279                         buffers_processed++;
1280
1281                 BUG_ON(test_bit(B_READING, &b->state));
1282
1283                 if (test_bit(B_WRITING, &b->state)) {
1284                         if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1285                                 dropped_lock = 1;
1286                                 b->hold_count++;
1287                                 dm_bufio_unlock(c);
1288                                 wait_on_bit_io(&b->state, B_WRITING,
1289                                                TASK_UNINTERRUPTIBLE);
1290                                 dm_bufio_lock(c);
1291                                 b->hold_count--;
1292                         } else
1293                                 wait_on_bit_io(&b->state, B_WRITING,
1294                                                TASK_UNINTERRUPTIBLE);
1295                 }
1296
1297                 if (!test_bit(B_DIRTY, &b->state) &&
1298                     !test_bit(B_WRITING, &b->state))
1299                         __relink_lru(b, LIST_CLEAN);
1300
1301                 cond_resched();
1302
1303                 /*
1304                  * If we dropped the lock, the list is no longer consistent,
1305                  * so we must restart the search.
1306                  *
1307                  * In the most common case, the buffer just processed is
1308                  * relinked to the clean list, so we won't loop scanning the
1309                  * same buffer again and again.
1310                  *
1311                  * This may livelock if there is another thread simultaneously
1312                  * dirtying buffers, so we count the number of buffers walked
1313                  * and if it exceeds the total number of buffers, it means that
1314                  * someone is doing some writes simultaneously with us.  In
1315                  * this case, stop, dropping the lock.
1316                  */
1317                 if (dropped_lock)
1318                         goto again;
1319         }
1320         wake_up(&c->free_buffer_wait);
1321         dm_bufio_unlock(c);
1322
1323         a = xchg(&c->async_write_error, 0);
1324         f = dm_bufio_issue_flush(c);
1325         if (a)
1326                 return a;
1327
1328         return f;
1329 }
1330 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1331
1332 /*
1333  * Use dm-io to send and empty barrier flush the device.
1334  */
1335 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1336 {
1337         struct dm_io_request io_req = {
1338                 .bi_op = REQ_OP_WRITE,
1339                 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1340                 .mem.type = DM_IO_KMEM,
1341                 .mem.ptr.addr = NULL,
1342                 .client = c->dm_io,
1343         };
1344         struct dm_io_region io_reg = {
1345                 .bdev = c->bdev,
1346                 .sector = 0,
1347                 .count = 0,
1348         };
1349
1350         BUG_ON(dm_bufio_in_request());
1351
1352         return dm_io(&io_req, 1, &io_reg, NULL);
1353 }
1354 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1355
1356 /*
1357  * We first delete any other buffer that may be at that new location.
1358  *
1359  * Then, we write the buffer to the original location if it was dirty.
1360  *
1361  * Then, if we are the only one who is holding the buffer, relink the buffer
1362  * in the hash queue for the new location.
1363  *
1364  * If there was someone else holding the buffer, we write it to the new
1365  * location but not relink it, because that other user needs to have the buffer
1366  * at the same place.
1367  */
1368 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1369 {
1370         struct dm_bufio_client *c = b->c;
1371         struct dm_buffer *new;
1372
1373         BUG_ON(dm_bufio_in_request());
1374
1375         dm_bufio_lock(c);
1376
1377 retry:
1378         new = __find(c, new_block);
1379         if (new) {
1380                 if (new->hold_count) {
1381                         __wait_for_free_buffer(c);
1382                         goto retry;
1383                 }
1384
1385                 /*
1386                  * FIXME: Is there any point waiting for a write that's going
1387                  * to be overwritten in a bit?
1388                  */
1389                 __make_buffer_clean(new);
1390                 __unlink_buffer(new);
1391                 __free_buffer_wake(new);
1392         }
1393
1394         BUG_ON(!b->hold_count);
1395         BUG_ON(test_bit(B_READING, &b->state));
1396
1397         __write_dirty_buffer(b, NULL);
1398         if (b->hold_count == 1) {
1399                 wait_on_bit_io(&b->state, B_WRITING,
1400                                TASK_UNINTERRUPTIBLE);
1401                 set_bit(B_DIRTY, &b->state);
1402                 __unlink_buffer(b);
1403                 __link_buffer(b, new_block, LIST_DIRTY);
1404         } else {
1405                 sector_t old_block;
1406                 wait_on_bit_lock_io(&b->state, B_WRITING,
1407                                     TASK_UNINTERRUPTIBLE);
1408                 /*
1409                  * Relink buffer to "new_block" so that write_callback
1410                  * sees "new_block" as a block number.
1411                  * After the write, link the buffer back to old_block.
1412                  * All this must be done in bufio lock, so that block number
1413                  * change isn't visible to other threads.
1414                  */
1415                 old_block = b->block;
1416                 __unlink_buffer(b);
1417                 __link_buffer(b, new_block, b->list_mode);
1418                 submit_io(b, WRITE, write_endio);
1419                 wait_on_bit_io(&b->state, B_WRITING,
1420                                TASK_UNINTERRUPTIBLE);
1421                 __unlink_buffer(b);
1422                 __link_buffer(b, old_block, b->list_mode);
1423         }
1424
1425         dm_bufio_unlock(c);
1426         dm_bufio_release(b);
1427 }
1428 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1429
1430 /*
1431  * Free the given buffer.
1432  *
1433  * This is just a hint, if the buffer is in use or dirty, this function
1434  * does nothing.
1435  */
1436 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1437 {
1438         struct dm_buffer *b;
1439
1440         dm_bufio_lock(c);
1441
1442         b = __find(c, block);
1443         if (b && likely(!b->hold_count) && likely(!b->state)) {
1444                 __unlink_buffer(b);
1445                 __free_buffer_wake(b);
1446         }
1447
1448         dm_bufio_unlock(c);
1449 }
1450 EXPORT_SYMBOL(dm_bufio_forget);
1451
1452 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1453 {
1454         c->minimum_buffers = n;
1455 }
1456 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1457
1458 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1459 {
1460         return c->block_size;
1461 }
1462 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1463
1464 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1465 {
1466         return i_size_read(c->bdev->bd_inode) >>
1467                            (SECTOR_SHIFT + c->sectors_per_block_bits);
1468 }
1469 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1470
1471 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1472 {
1473         return b->block;
1474 }
1475 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1476
1477 void *dm_bufio_get_block_data(struct dm_buffer *b)
1478 {
1479         return b->data;
1480 }
1481 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1482
1483 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1484 {
1485         return b + 1;
1486 }
1487 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1488
1489 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1490 {
1491         return b->c;
1492 }
1493 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1494
1495 static void drop_buffers(struct dm_bufio_client *c)
1496 {
1497         struct dm_buffer *b;
1498         int i;
1499         bool warned = false;
1500
1501         BUG_ON(dm_bufio_in_request());
1502
1503         /*
1504          * An optimization so that the buffers are not written one-by-one.
1505          */
1506         dm_bufio_write_dirty_buffers_async(c);
1507
1508         dm_bufio_lock(c);
1509
1510         while ((b = __get_unclaimed_buffer(c)))
1511                 __free_buffer_wake(b);
1512
1513         for (i = 0; i < LIST_SIZE; i++)
1514                 list_for_each_entry(b, &c->lru[i], lru_list) {
1515                         WARN_ON(!warned);
1516                         warned = true;
1517                         DMERR("leaked buffer %llx, hold count %u, list %d",
1518                               (unsigned long long)b->block, b->hold_count, i);
1519 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1520                         print_stack_trace(&b->stack_trace, 1);
1521                         b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
1522 #endif
1523                 }
1524
1525 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1526         while ((b = __get_unclaimed_buffer(c)))
1527                 __free_buffer_wake(b);
1528 #endif
1529
1530         for (i = 0; i < LIST_SIZE; i++)
1531                 BUG_ON(!list_empty(&c->lru[i]));
1532
1533         dm_bufio_unlock(c);
1534 }
1535
1536 /*
1537  * We may not be able to evict this buffer if IO pending or the client
1538  * is still using it.  Caller is expected to know buffer is too old.
1539  *
1540  * And if GFP_NOFS is used, we must not do any I/O because we hold
1541  * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1542  * rerouted to different bufio client.
1543  */
1544 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1545 {
1546         if (!(gfp & __GFP_FS)) {
1547                 if (test_bit(B_READING, &b->state) ||
1548                     test_bit(B_WRITING, &b->state) ||
1549                     test_bit(B_DIRTY, &b->state))
1550                         return false;
1551         }
1552
1553         if (b->hold_count)
1554                 return false;
1555
1556         __make_buffer_clean(b);
1557         __unlink_buffer(b);
1558         __free_buffer_wake(b);
1559
1560         return true;
1561 }
1562
1563 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1564 {
1565         unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1566         return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
1567 }
1568
1569 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1570                             gfp_t gfp_mask)
1571 {
1572         int l;
1573         struct dm_buffer *b, *tmp;
1574         unsigned long freed = 0;
1575         unsigned long count = nr_to_scan;
1576         unsigned long retain_target = get_retain_buffers(c);
1577
1578         for (l = 0; l < LIST_SIZE; l++) {
1579                 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1580                         if (__try_evict_buffer(b, gfp_mask))
1581                                 freed++;
1582                         if (!--nr_to_scan || ((count - freed) <= retain_target))
1583                                 return freed;
1584                         cond_resched();
1585                 }
1586         }
1587         return freed;
1588 }
1589
1590 static unsigned long
1591 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1592 {
1593         struct dm_bufio_client *c;
1594         unsigned long freed;
1595
1596         c = container_of(shrink, struct dm_bufio_client, shrinker);
1597         if (sc->gfp_mask & __GFP_FS)
1598                 dm_bufio_lock(c);
1599         else if (!dm_bufio_trylock(c))
1600                 return SHRINK_STOP;
1601
1602         freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1603         dm_bufio_unlock(c);
1604         return freed;
1605 }
1606
1607 static unsigned long
1608 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1609 {
1610         struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1611
1612         return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
1613 }
1614
1615 /*
1616  * Create the buffering interface
1617  */
1618 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1619                                                unsigned reserved_buffers, unsigned aux_size,
1620                                                void (*alloc_callback)(struct dm_buffer *),
1621                                                void (*write_callback)(struct dm_buffer *))
1622 {
1623         int r;
1624         struct dm_bufio_client *c;
1625         unsigned i;
1626
1627         BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1628                (block_size & (block_size - 1)));
1629
1630         c = kzalloc(sizeof(*c), GFP_KERNEL);
1631         if (!c) {
1632                 r = -ENOMEM;
1633                 goto bad_client;
1634         }
1635         c->buffer_tree = RB_ROOT;
1636
1637         c->bdev = bdev;
1638         c->block_size = block_size;
1639         c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1640         c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
1641                                   __ffs(block_size) - PAGE_SHIFT : 0;
1642         c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1643                                   PAGE_SHIFT - __ffs(block_size) : 0);
1644
1645         c->aux_size = aux_size;
1646         c->alloc_callback = alloc_callback;
1647         c->write_callback = write_callback;
1648
1649         for (i = 0; i < LIST_SIZE; i++) {
1650                 INIT_LIST_HEAD(&c->lru[i]);
1651                 c->n_buffers[i] = 0;
1652         }
1653
1654         mutex_init(&c->lock);
1655         INIT_LIST_HEAD(&c->reserved_buffers);
1656         c->need_reserved_buffers = reserved_buffers;
1657
1658         c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1659
1660         init_waitqueue_head(&c->free_buffer_wait);
1661         c->async_write_error = 0;
1662
1663         c->dm_io = dm_io_client_create();
1664         if (IS_ERR(c->dm_io)) {
1665                 r = PTR_ERR(c->dm_io);
1666                 goto bad_dm_io;
1667         }
1668
1669         mutex_lock(&dm_bufio_clients_lock);
1670         if (c->blocks_per_page_bits) {
1671                 if (!DM_BUFIO_CACHE_NAME(c)) {
1672                         DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1673                         if (!DM_BUFIO_CACHE_NAME(c)) {
1674                                 r = -ENOMEM;
1675                                 mutex_unlock(&dm_bufio_clients_lock);
1676                                 goto bad_cache;
1677                         }
1678                 }
1679
1680                 if (!DM_BUFIO_CACHE(c)) {
1681                         DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1682                                                               c->block_size,
1683                                                               c->block_size, 0, NULL);
1684                         if (!DM_BUFIO_CACHE(c)) {
1685                                 r = -ENOMEM;
1686                                 mutex_unlock(&dm_bufio_clients_lock);
1687                                 goto bad_cache;
1688                         }
1689                 }
1690         }
1691         mutex_unlock(&dm_bufio_clients_lock);
1692
1693         while (c->need_reserved_buffers) {
1694                 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1695
1696                 if (!b) {
1697                         r = -ENOMEM;
1698                         goto bad_buffer;
1699                 }
1700                 __free_buffer_wake(b);
1701         }
1702
1703         mutex_lock(&dm_bufio_clients_lock);
1704         dm_bufio_client_count++;
1705         list_add(&c->client_list, &dm_bufio_all_clients);
1706         __cache_size_refresh();
1707         mutex_unlock(&dm_bufio_clients_lock);
1708
1709         c->shrinker.count_objects = dm_bufio_shrink_count;
1710         c->shrinker.scan_objects = dm_bufio_shrink_scan;
1711         c->shrinker.seeks = 1;
1712         c->shrinker.batch = 0;
1713         register_shrinker(&c->shrinker);
1714
1715         return c;
1716
1717 bad_buffer:
1718 bad_cache:
1719         while (!list_empty(&c->reserved_buffers)) {
1720                 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1721                                                  struct dm_buffer, lru_list);
1722                 list_del(&b->lru_list);
1723                 free_buffer(b);
1724         }
1725         dm_io_client_destroy(c->dm_io);
1726 bad_dm_io:
1727         kfree(c);
1728 bad_client:
1729         return ERR_PTR(r);
1730 }
1731 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1732
1733 /*
1734  * Free the buffering interface.
1735  * It is required that there are no references on any buffers.
1736  */
1737 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1738 {
1739         unsigned i;
1740
1741         drop_buffers(c);
1742
1743         unregister_shrinker(&c->shrinker);
1744
1745         mutex_lock(&dm_bufio_clients_lock);
1746
1747         list_del(&c->client_list);
1748         dm_bufio_client_count--;
1749         __cache_size_refresh();
1750
1751         mutex_unlock(&dm_bufio_clients_lock);
1752
1753         BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1754         BUG_ON(c->need_reserved_buffers);
1755
1756         while (!list_empty(&c->reserved_buffers)) {
1757                 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1758                                                  struct dm_buffer, lru_list);
1759                 list_del(&b->lru_list);
1760                 free_buffer(b);
1761         }
1762
1763         for (i = 0; i < LIST_SIZE; i++)
1764                 if (c->n_buffers[i])
1765                         DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1766
1767         for (i = 0; i < LIST_SIZE; i++)
1768                 BUG_ON(c->n_buffers[i]);
1769
1770         dm_io_client_destroy(c->dm_io);
1771         kfree(c);
1772 }
1773 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1774
1775 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1776 {
1777         c->start = start;
1778 }
1779 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1780
1781 static unsigned get_max_age_hz(void)
1782 {
1783         unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1784
1785         if (max_age > UINT_MAX / HZ)
1786                 max_age = UINT_MAX / HZ;
1787
1788         return max_age * HZ;
1789 }
1790
1791 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1792 {
1793         return time_after_eq(jiffies, b->last_accessed + age_hz);
1794 }
1795
1796 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1797 {
1798         struct dm_buffer *b, *tmp;
1799         unsigned long retain_target = get_retain_buffers(c);
1800         unsigned long count;
1801         LIST_HEAD(write_list);
1802
1803         dm_bufio_lock(c);
1804
1805         __check_watermark(c, &write_list);
1806         if (unlikely(!list_empty(&write_list))) {
1807                 dm_bufio_unlock(c);
1808                 __flush_write_list(&write_list);
1809                 dm_bufio_lock(c);
1810         }
1811
1812         count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1813         list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1814                 if (count <= retain_target)
1815                         break;
1816
1817                 if (!older_than(b, age_hz))
1818                         break;
1819
1820                 if (__try_evict_buffer(b, 0))
1821                         count--;
1822
1823                 cond_resched();
1824         }
1825
1826         dm_bufio_unlock(c);
1827 }
1828
1829 static void cleanup_old_buffers(void)
1830 {
1831         unsigned long max_age_hz = get_max_age_hz();
1832         struct dm_bufio_client *c;
1833
1834         mutex_lock(&dm_bufio_clients_lock);
1835
1836         __cache_size_refresh();
1837
1838         list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1839                 __evict_old_buffers(c, max_age_hz);
1840
1841         mutex_unlock(&dm_bufio_clients_lock);
1842 }
1843
1844 static struct workqueue_struct *dm_bufio_wq;
1845 static struct delayed_work dm_bufio_work;
1846
1847 static void work_fn(struct work_struct *w)
1848 {
1849         cleanup_old_buffers();
1850
1851         queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1852                            DM_BUFIO_WORK_TIMER_SECS * HZ);
1853 }
1854
1855 /*----------------------------------------------------------------
1856  * Module setup
1857  *--------------------------------------------------------------*/
1858
1859 /*
1860  * This is called only once for the whole dm_bufio module.
1861  * It initializes memory limit.
1862  */
1863 static int __init dm_bufio_init(void)
1864 {
1865         __u64 mem;
1866
1867         dm_bufio_allocated_kmem_cache = 0;
1868         dm_bufio_allocated_get_free_pages = 0;
1869         dm_bufio_allocated_vmalloc = 0;
1870         dm_bufio_current_allocated = 0;
1871
1872         memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1873         memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1874
1875         mem = (__u64)((totalram_pages - totalhigh_pages) *
1876                       DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1877
1878         if (mem > ULONG_MAX)
1879                 mem = ULONG_MAX;
1880
1881 #ifdef CONFIG_MMU
1882         /*
1883          * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1884          * in fs/proc/internal.h
1885          */
1886         if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1887                 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1888 #endif
1889
1890         dm_bufio_default_cache_size = mem;
1891
1892         mutex_lock(&dm_bufio_clients_lock);
1893         __cache_size_refresh();
1894         mutex_unlock(&dm_bufio_clients_lock);
1895
1896         dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
1897         if (!dm_bufio_wq)
1898                 return -ENOMEM;
1899
1900         INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1901         queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1902                            DM_BUFIO_WORK_TIMER_SECS * HZ);
1903
1904         return 0;
1905 }
1906
1907 /*
1908  * This is called once when unloading the dm_bufio module.
1909  */
1910 static void __exit dm_bufio_exit(void)
1911 {
1912         int bug = 0;
1913         int i;
1914
1915         cancel_delayed_work_sync(&dm_bufio_work);
1916         destroy_workqueue(dm_bufio_wq);
1917
1918         for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1919                 kmem_cache_destroy(dm_bufio_caches[i]);
1920
1921         for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1922                 kfree(dm_bufio_cache_names[i]);
1923
1924         if (dm_bufio_client_count) {
1925                 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1926                         __func__, dm_bufio_client_count);
1927                 bug = 1;
1928         }
1929
1930         if (dm_bufio_current_allocated) {
1931                 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1932                         __func__, dm_bufio_current_allocated);
1933                 bug = 1;
1934         }
1935
1936         if (dm_bufio_allocated_get_free_pages) {
1937                 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1938                        __func__, dm_bufio_allocated_get_free_pages);
1939                 bug = 1;
1940         }
1941
1942         if (dm_bufio_allocated_vmalloc) {
1943                 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1944                        __func__, dm_bufio_allocated_vmalloc);
1945                 bug = 1;
1946         }
1947
1948         BUG_ON(bug);
1949 }
1950
1951 module_init(dm_bufio_init)
1952 module_exit(dm_bufio_exit)
1953
1954 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1955 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1956
1957 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1958 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1959
1960 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
1961 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1962
1963 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1964 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1965
1966 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1967 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1968
1969 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1970 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1971
1972 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1973 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1974
1975 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1976 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1977
1978 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1979 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1980 MODULE_LICENSE("GPL");