block: replace bi_bdev with a gendisk pointer and partitions index
[sfrench/cifs-2.6.git] / drivers / md / dm-era-target.c
1 #include "dm.h"
2 #include "persistent-data/dm-transaction-manager.h"
3 #include "persistent-data/dm-bitset.h"
4 #include "persistent-data/dm-space-map.h"
5
6 #include <linux/dm-io.h>
7 #include <linux/dm-kcopyd.h>
8 #include <linux/init.h>
9 #include <linux/mempool.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #define DM_MSG_PREFIX "era"
15
16 #define SUPERBLOCK_LOCATION 0
17 #define SUPERBLOCK_MAGIC 2126579579
18 #define SUPERBLOCK_CSUM_XOR 146538381
19 #define MIN_ERA_VERSION 1
20 #define MAX_ERA_VERSION 1
21 #define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION
22 #define MIN_BLOCK_SIZE 8
23
24 /*----------------------------------------------------------------
25  * Writeset
26  *--------------------------------------------------------------*/
27 struct writeset_metadata {
28         uint32_t nr_bits;
29         dm_block_t root;
30 };
31
32 struct writeset {
33         struct writeset_metadata md;
34
35         /*
36          * An in core copy of the bits to save constantly doing look ups on
37          * disk.
38          */
39         unsigned long *bits;
40 };
41
42 /*
43  * This does not free off the on disk bitset as this will normally be done
44  * after digesting into the era array.
45  */
46 static void writeset_free(struct writeset *ws)
47 {
48         vfree(ws->bits);
49 }
50
51 static int setup_on_disk_bitset(struct dm_disk_bitset *info,
52                                 unsigned nr_bits, dm_block_t *root)
53 {
54         int r;
55
56         r = dm_bitset_empty(info, root);
57         if (r)
58                 return r;
59
60         return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
61 }
62
63 static size_t bitset_size(unsigned nr_bits)
64 {
65         return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
66 }
67
68 /*
69  * Allocates memory for the in core bitset.
70  */
71 static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
72 {
73         ws->md.nr_bits = nr_blocks;
74         ws->md.root = INVALID_WRITESET_ROOT;
75         ws->bits = vzalloc(bitset_size(nr_blocks));
76         if (!ws->bits) {
77                 DMERR("%s: couldn't allocate in memory bitset", __func__);
78                 return -ENOMEM;
79         }
80
81         return 0;
82 }
83
84 /*
85  * Wipes the in-core bitset, and creates a new on disk bitset.
86  */
87 static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
88 {
89         int r;
90
91         memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
92
93         r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
94         if (r) {
95                 DMERR("%s: setup_on_disk_bitset failed", __func__);
96                 return r;
97         }
98
99         return 0;
100 }
101
102 static bool writeset_marked(struct writeset *ws, dm_block_t block)
103 {
104         return test_bit(block, ws->bits);
105 }
106
107 static int writeset_marked_on_disk(struct dm_disk_bitset *info,
108                                    struct writeset_metadata *m, dm_block_t block,
109                                    bool *result)
110 {
111         dm_block_t old = m->root;
112
113         /*
114          * The bitset was flushed when it was archived, so we know there'll
115          * be no change to the root.
116          */
117         int r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
118         if (r) {
119                 DMERR("%s: dm_bitset_test_bit failed", __func__);
120                 return r;
121         }
122
123         BUG_ON(m->root != old);
124
125         return r;
126 }
127
128 /*
129  * Returns < 0 on error, 0 if the bit wasn't previously set, 1 if it was.
130  */
131 static int writeset_test_and_set(struct dm_disk_bitset *info,
132                                  struct writeset *ws, uint32_t block)
133 {
134         int r;
135
136         if (!test_and_set_bit(block, ws->bits)) {
137                 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
138                 if (r) {
139                         /* FIXME: fail mode */
140                         return r;
141                 }
142
143                 return 0;
144         }
145
146         return 1;
147 }
148
149 /*----------------------------------------------------------------
150  * On disk metadata layout
151  *--------------------------------------------------------------*/
152 #define SPACE_MAP_ROOT_SIZE 128
153 #define UUID_LEN 16
154
155 struct writeset_disk {
156         __le32 nr_bits;
157         __le64 root;
158 } __packed;
159
160 struct superblock_disk {
161         __le32 csum;
162         __le32 flags;
163         __le64 blocknr;
164
165         __u8 uuid[UUID_LEN];
166         __le64 magic;
167         __le32 version;
168
169         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
170
171         __le32 data_block_size;
172         __le32 metadata_block_size;
173         __le32 nr_blocks;
174
175         __le32 current_era;
176         struct writeset_disk current_writeset;
177
178         /*
179          * Only these two fields are valid within the metadata snapshot.
180          */
181         __le64 writeset_tree_root;
182         __le64 era_array_root;
183
184         __le64 metadata_snap;
185 } __packed;
186
187 /*----------------------------------------------------------------
188  * Superblock validation
189  *--------------------------------------------------------------*/
190 static void sb_prepare_for_write(struct dm_block_validator *v,
191                                  struct dm_block *b,
192                                  size_t sb_block_size)
193 {
194         struct superblock_disk *disk = dm_block_data(b);
195
196         disk->blocknr = cpu_to_le64(dm_block_location(b));
197         disk->csum = cpu_to_le32(dm_bm_checksum(&disk->flags,
198                                                 sb_block_size - sizeof(__le32),
199                                                 SUPERBLOCK_CSUM_XOR));
200 }
201
202 static int check_metadata_version(struct superblock_disk *disk)
203 {
204         uint32_t metadata_version = le32_to_cpu(disk->version);
205         if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) {
206                 DMERR("Era metadata version %u found, but only versions between %u and %u supported.",
207                       metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION);
208                 return -EINVAL;
209         }
210
211         return 0;
212 }
213
214 static int sb_check(struct dm_block_validator *v,
215                     struct dm_block *b,
216                     size_t sb_block_size)
217 {
218         struct superblock_disk *disk = dm_block_data(b);
219         __le32 csum_le;
220
221         if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) {
222                 DMERR("sb_check failed: blocknr %llu: wanted %llu",
223                       le64_to_cpu(disk->blocknr),
224                       (unsigned long long)dm_block_location(b));
225                 return -ENOTBLK;
226         }
227
228         if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) {
229                 DMERR("sb_check failed: magic %llu: wanted %llu",
230                       le64_to_cpu(disk->magic),
231                       (unsigned long long) SUPERBLOCK_MAGIC);
232                 return -EILSEQ;
233         }
234
235         csum_le = cpu_to_le32(dm_bm_checksum(&disk->flags,
236                                              sb_block_size - sizeof(__le32),
237                                              SUPERBLOCK_CSUM_XOR));
238         if (csum_le != disk->csum) {
239                 DMERR("sb_check failed: csum %u: wanted %u",
240                       le32_to_cpu(csum_le), le32_to_cpu(disk->csum));
241                 return -EILSEQ;
242         }
243
244         return check_metadata_version(disk);
245 }
246
247 static struct dm_block_validator sb_validator = {
248         .name = "superblock",
249         .prepare_for_write = sb_prepare_for_write,
250         .check = sb_check
251 };
252
253 /*----------------------------------------------------------------
254  * Low level metadata handling
255  *--------------------------------------------------------------*/
256 #define DM_ERA_METADATA_BLOCK_SIZE 4096
257 #define ERA_MAX_CONCURRENT_LOCKS 5
258
259 struct era_metadata {
260         struct block_device *bdev;
261         struct dm_block_manager *bm;
262         struct dm_space_map *sm;
263         struct dm_transaction_manager *tm;
264
265         dm_block_t block_size;
266         uint32_t nr_blocks;
267
268         uint32_t current_era;
269
270         /*
271          * We preallocate 2 writesets.  When an era rolls over we
272          * switch between them. This means the allocation is done at
273          * preresume time, rather than on the io path.
274          */
275         struct writeset writesets[2];
276         struct writeset *current_writeset;
277
278         dm_block_t writeset_tree_root;
279         dm_block_t era_array_root;
280
281         struct dm_disk_bitset bitset_info;
282         struct dm_btree_info writeset_tree_info;
283         struct dm_array_info era_array_info;
284
285         dm_block_t metadata_snap;
286
287         /*
288          * A flag that is set whenever a writeset has been archived.
289          */
290         bool archived_writesets;
291
292         /*
293          * Reading the space map root can fail, so we read it into this
294          * buffer before the superblock is locked and updated.
295          */
296         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
297 };
298
299 static int superblock_read_lock(struct era_metadata *md,
300                                 struct dm_block **sblock)
301 {
302         return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION,
303                                &sb_validator, sblock);
304 }
305
306 static int superblock_lock_zero(struct era_metadata *md,
307                                 struct dm_block **sblock)
308 {
309         return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION,
310                                      &sb_validator, sblock);
311 }
312
313 static int superblock_lock(struct era_metadata *md,
314                            struct dm_block **sblock)
315 {
316         return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION,
317                                 &sb_validator, sblock);
318 }
319
320 /* FIXME: duplication with cache and thin */
321 static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
322 {
323         int r;
324         unsigned i;
325         struct dm_block *b;
326         __le64 *data_le, zero = cpu_to_le64(0);
327         unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
328
329         /*
330          * We can't use a validator here - it may be all zeroes.
331          */
332         r = dm_bm_read_lock(bm, SUPERBLOCK_LOCATION, NULL, &b);
333         if (r)
334                 return r;
335
336         data_le = dm_block_data(b);
337         *result = true;
338         for (i = 0; i < sb_block_size; i++) {
339                 if (data_le[i] != zero) {
340                         *result = false;
341                         break;
342                 }
343         }
344
345         dm_bm_unlock(b);
346
347         return 0;
348 }
349
350 /*----------------------------------------------------------------*/
351
352 static void ws_pack(const struct writeset_metadata *core, struct writeset_disk *disk)
353 {
354         disk->nr_bits = cpu_to_le32(core->nr_bits);
355         disk->root = cpu_to_le64(core->root);
356 }
357
358 static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata *core)
359 {
360         core->nr_bits = le32_to_cpu(disk->nr_bits);
361         core->root = le64_to_cpu(disk->root);
362 }
363
364 static void ws_inc(void *context, const void *value)
365 {
366         struct era_metadata *md = context;
367         struct writeset_disk ws_d;
368         dm_block_t b;
369
370         memcpy(&ws_d, value, sizeof(ws_d));
371         b = le64_to_cpu(ws_d.root);
372
373         dm_tm_inc(md->tm, b);
374 }
375
376 static void ws_dec(void *context, const void *value)
377 {
378         struct era_metadata *md = context;
379         struct writeset_disk ws_d;
380         dm_block_t b;
381
382         memcpy(&ws_d, value, sizeof(ws_d));
383         b = le64_to_cpu(ws_d.root);
384
385         dm_bitset_del(&md->bitset_info, b);
386 }
387
388 static int ws_eq(void *context, const void *value1, const void *value2)
389 {
390         return !memcmp(value1, value2, sizeof(struct writeset_metadata));
391 }
392
393 /*----------------------------------------------------------------*/
394
395 static void setup_writeset_tree_info(struct era_metadata *md)
396 {
397         struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type;
398         md->writeset_tree_info.tm = md->tm;
399         md->writeset_tree_info.levels = 1;
400         vt->context = md;
401         vt->size = sizeof(struct writeset_disk);
402         vt->inc = ws_inc;
403         vt->dec = ws_dec;
404         vt->equal = ws_eq;
405 }
406
407 static void setup_era_array_info(struct era_metadata *md)
408
409 {
410         struct dm_btree_value_type vt;
411         vt.context = NULL;
412         vt.size = sizeof(__le32);
413         vt.inc = NULL;
414         vt.dec = NULL;
415         vt.equal = NULL;
416
417         dm_array_info_init(&md->era_array_info, md->tm, &vt);
418 }
419
420 static void setup_infos(struct era_metadata *md)
421 {
422         dm_disk_bitset_init(md->tm, &md->bitset_info);
423         setup_writeset_tree_info(md);
424         setup_era_array_info(md);
425 }
426
427 /*----------------------------------------------------------------*/
428
429 static int create_fresh_metadata(struct era_metadata *md)
430 {
431         int r;
432
433         r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION,
434                                  &md->tm, &md->sm);
435         if (r < 0) {
436                 DMERR("dm_tm_create_with_sm failed");
437                 return r;
438         }
439
440         setup_infos(md);
441
442         r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root);
443         if (r) {
444                 DMERR("couldn't create new writeset tree");
445                 goto bad;
446         }
447
448         r = dm_array_empty(&md->era_array_info, &md->era_array_root);
449         if (r) {
450                 DMERR("couldn't create era array");
451                 goto bad;
452         }
453
454         return 0;
455
456 bad:
457         dm_sm_destroy(md->sm);
458         dm_tm_destroy(md->tm);
459
460         return r;
461 }
462
463 static int save_sm_root(struct era_metadata *md)
464 {
465         int r;
466         size_t metadata_len;
467
468         r = dm_sm_root_size(md->sm, &metadata_len);
469         if (r < 0)
470                 return r;
471
472         return dm_sm_copy_root(md->sm, &md->metadata_space_map_root,
473                                metadata_len);
474 }
475
476 static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk)
477 {
478         memcpy(&disk->metadata_space_map_root,
479                &md->metadata_space_map_root,
480                sizeof(md->metadata_space_map_root));
481 }
482
483 /*
484  * Writes a superblock, including the static fields that don't get updated
485  * with every commit (possible optimisation here).  'md' should be fully
486  * constructed when this is called.
487  */
488 static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk)
489 {
490         disk->magic = cpu_to_le64(SUPERBLOCK_MAGIC);
491         disk->flags = cpu_to_le32(0ul);
492
493         /* FIXME: can't keep blanking the uuid (uuid is currently unused though) */
494         memset(disk->uuid, 0, sizeof(disk->uuid));
495         disk->version = cpu_to_le32(MAX_ERA_VERSION);
496
497         copy_sm_root(md, disk);
498
499         disk->data_block_size = cpu_to_le32(md->block_size);
500         disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
501         disk->nr_blocks = cpu_to_le32(md->nr_blocks);
502         disk->current_era = cpu_to_le32(md->current_era);
503
504         ws_pack(&md->current_writeset->md, &disk->current_writeset);
505         disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root);
506         disk->era_array_root = cpu_to_le64(md->era_array_root);
507         disk->metadata_snap = cpu_to_le64(md->metadata_snap);
508 }
509
510 static int write_superblock(struct era_metadata *md)
511 {
512         int r;
513         struct dm_block *sblock;
514         struct superblock_disk *disk;
515
516         r = save_sm_root(md);
517         if (r) {
518                 DMERR("%s: save_sm_root failed", __func__);
519                 return r;
520         }
521
522         r = superblock_lock_zero(md, &sblock);
523         if (r)
524                 return r;
525
526         disk = dm_block_data(sblock);
527         prepare_superblock(md, disk);
528
529         return dm_tm_commit(md->tm, sblock);
530 }
531
532 /*
533  * Assumes block_size and the infos are set.
534  */
535 static int format_metadata(struct era_metadata *md)
536 {
537         int r;
538
539         r = create_fresh_metadata(md);
540         if (r)
541                 return r;
542
543         r = write_superblock(md);
544         if (r) {
545                 dm_sm_destroy(md->sm);
546                 dm_tm_destroy(md->tm);
547                 return r;
548         }
549
550         return 0;
551 }
552
553 static int open_metadata(struct era_metadata *md)
554 {
555         int r;
556         struct dm_block *sblock;
557         struct superblock_disk *disk;
558
559         r = superblock_read_lock(md, &sblock);
560         if (r) {
561                 DMERR("couldn't read_lock superblock");
562                 return r;
563         }
564
565         disk = dm_block_data(sblock);
566         r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
567                                disk->metadata_space_map_root,
568                                sizeof(disk->metadata_space_map_root),
569                                &md->tm, &md->sm);
570         if (r) {
571                 DMERR("dm_tm_open_with_sm failed");
572                 goto bad;
573         }
574
575         setup_infos(md);
576
577         md->block_size = le32_to_cpu(disk->data_block_size);
578         md->nr_blocks = le32_to_cpu(disk->nr_blocks);
579         md->current_era = le32_to_cpu(disk->current_era);
580
581         md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
582         md->era_array_root = le64_to_cpu(disk->era_array_root);
583         md->metadata_snap = le64_to_cpu(disk->metadata_snap);
584         md->archived_writesets = true;
585
586         dm_bm_unlock(sblock);
587
588         return 0;
589
590 bad:
591         dm_bm_unlock(sblock);
592         return r;
593 }
594
595 static int open_or_format_metadata(struct era_metadata *md,
596                                    bool may_format)
597 {
598         int r;
599         bool unformatted = false;
600
601         r = superblock_all_zeroes(md->bm, &unformatted);
602         if (r)
603                 return r;
604
605         if (unformatted)
606                 return may_format ? format_metadata(md) : -EPERM;
607
608         return open_metadata(md);
609 }
610
611 static int create_persistent_data_objects(struct era_metadata *md,
612                                           bool may_format)
613 {
614         int r;
615
616         md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE,
617                                          ERA_MAX_CONCURRENT_LOCKS);
618         if (IS_ERR(md->bm)) {
619                 DMERR("could not create block manager");
620                 return PTR_ERR(md->bm);
621         }
622
623         r = open_or_format_metadata(md, may_format);
624         if (r)
625                 dm_block_manager_destroy(md->bm);
626
627         return r;
628 }
629
630 static void destroy_persistent_data_objects(struct era_metadata *md)
631 {
632         dm_sm_destroy(md->sm);
633         dm_tm_destroy(md->tm);
634         dm_block_manager_destroy(md->bm);
635 }
636
637 /*
638  * This waits until all era_map threads have picked up the new filter.
639  */
640 static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset)
641 {
642         rcu_assign_pointer(md->current_writeset, new_writeset);
643         synchronize_rcu();
644 }
645
646 /*----------------------------------------------------------------
647  * Writesets get 'digested' into the main era array.
648  *
649  * We're using a coroutine here so the worker thread can do the digestion,
650  * thus avoiding synchronisation of the metadata.  Digesting a whole
651  * writeset in one go would cause too much latency.
652  *--------------------------------------------------------------*/
653 struct digest {
654         uint32_t era;
655         unsigned nr_bits, current_bit;
656         struct writeset_metadata writeset;
657         __le32 value;
658         struct dm_disk_bitset info;
659
660         int (*step)(struct era_metadata *, struct digest *);
661 };
662
663 static int metadata_digest_lookup_writeset(struct era_metadata *md,
664                                            struct digest *d);
665
666 static int metadata_digest_remove_writeset(struct era_metadata *md,
667                                            struct digest *d)
668 {
669         int r;
670         uint64_t key = d->era;
671
672         r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root,
673                             &key, &md->writeset_tree_root);
674         if (r) {
675                 DMERR("%s: dm_btree_remove failed", __func__);
676                 return r;
677         }
678
679         d->step = metadata_digest_lookup_writeset;
680         return 0;
681 }
682
683 #define INSERTS_PER_STEP 100
684
685 static int metadata_digest_transcribe_writeset(struct era_metadata *md,
686                                                struct digest *d)
687 {
688         int r;
689         bool marked;
690         unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
691
692         for (b = d->current_bit; b < e; b++) {
693                 r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
694                 if (r) {
695                         DMERR("%s: writeset_marked_on_disk failed", __func__);
696                         return r;
697                 }
698
699                 if (!marked)
700                         continue;
701
702                 __dm_bless_for_disk(&d->value);
703                 r = dm_array_set_value(&md->era_array_info, md->era_array_root,
704                                        b, &d->value, &md->era_array_root);
705                 if (r) {
706                         DMERR("%s: dm_array_set_value failed", __func__);
707                         return r;
708                 }
709         }
710
711         if (b == d->nr_bits)
712                 d->step = metadata_digest_remove_writeset;
713         else
714                 d->current_bit = b;
715
716         return 0;
717 }
718
719 static int metadata_digest_lookup_writeset(struct era_metadata *md,
720                                            struct digest *d)
721 {
722         int r;
723         uint64_t key;
724         struct writeset_disk disk;
725
726         r = dm_btree_find_lowest_key(&md->writeset_tree_info,
727                                      md->writeset_tree_root, &key);
728         if (r < 0)
729                 return r;
730
731         d->era = key;
732
733         r = dm_btree_lookup(&md->writeset_tree_info,
734                             md->writeset_tree_root, &key, &disk);
735         if (r) {
736                 if (r == -ENODATA) {
737                         d->step = NULL;
738                         return 0;
739                 }
740
741                 DMERR("%s: dm_btree_lookup failed", __func__);
742                 return r;
743         }
744
745         ws_unpack(&disk, &d->writeset);
746         d->value = cpu_to_le32(key);
747
748         d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
749         d->current_bit = 0;
750         d->step = metadata_digest_transcribe_writeset;
751
752         return 0;
753 }
754
755 static int metadata_digest_start(struct era_metadata *md, struct digest *d)
756 {
757         if (d->step)
758                 return 0;
759
760         memset(d, 0, sizeof(*d));
761
762         /*
763          * We initialise another bitset info to avoid any caching side
764          * effects with the previous one.
765          */
766         dm_disk_bitset_init(md->tm, &d->info);
767         d->step = metadata_digest_lookup_writeset;
768
769         return 0;
770 }
771
772 /*----------------------------------------------------------------
773  * High level metadata interface.  Target methods should use these, and not
774  * the lower level ones.
775  *--------------------------------------------------------------*/
776 static struct era_metadata *metadata_open(struct block_device *bdev,
777                                           sector_t block_size,
778                                           bool may_format)
779 {
780         int r;
781         struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL);
782
783         if (!md)
784                 return NULL;
785
786         md->bdev = bdev;
787         md->block_size = block_size;
788
789         md->writesets[0].md.root = INVALID_WRITESET_ROOT;
790         md->writesets[1].md.root = INVALID_WRITESET_ROOT;
791         md->current_writeset = &md->writesets[0];
792
793         r = create_persistent_data_objects(md, may_format);
794         if (r) {
795                 kfree(md);
796                 return ERR_PTR(r);
797         }
798
799         return md;
800 }
801
802 static void metadata_close(struct era_metadata *md)
803 {
804         destroy_persistent_data_objects(md);
805         kfree(md);
806 }
807
808 static bool valid_nr_blocks(dm_block_t n)
809 {
810         /*
811          * dm_bitset restricts us to 2^32.  test_bit & co. restrict us
812          * further to 2^31 - 1
813          */
814         return n < (1ull << 31);
815 }
816
817 static int metadata_resize(struct era_metadata *md, void *arg)
818 {
819         int r;
820         dm_block_t *new_size = arg;
821         __le32 value;
822
823         if (!valid_nr_blocks(*new_size)) {
824                 DMERR("Invalid number of origin blocks %llu",
825                       (unsigned long long) *new_size);
826                 return -EINVAL;
827         }
828
829         writeset_free(&md->writesets[0]);
830         writeset_free(&md->writesets[1]);
831
832         r = writeset_alloc(&md->writesets[0], *new_size);
833         if (r) {
834                 DMERR("%s: writeset_alloc failed for writeset 0", __func__);
835                 return r;
836         }
837
838         r = writeset_alloc(&md->writesets[1], *new_size);
839         if (r) {
840                 DMERR("%s: writeset_alloc failed for writeset 1", __func__);
841                 return r;
842         }
843
844         value = cpu_to_le32(0u);
845         __dm_bless_for_disk(&value);
846         r = dm_array_resize(&md->era_array_info, md->era_array_root,
847                             md->nr_blocks, *new_size,
848                             &value, &md->era_array_root);
849         if (r) {
850                 DMERR("%s: dm_array_resize failed", __func__);
851                 return r;
852         }
853
854         md->nr_blocks = *new_size;
855         return 0;
856 }
857
858 static int metadata_era_archive(struct era_metadata *md)
859 {
860         int r;
861         uint64_t keys[1];
862         struct writeset_disk value;
863
864         r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
865                             &md->current_writeset->md.root);
866         if (r) {
867                 DMERR("%s: dm_bitset_flush failed", __func__);
868                 return r;
869         }
870
871         ws_pack(&md->current_writeset->md, &value);
872         md->current_writeset->md.root = INVALID_WRITESET_ROOT;
873
874         keys[0] = md->current_era;
875         __dm_bless_for_disk(&value);
876         r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root,
877                             keys, &value, &md->writeset_tree_root);
878         if (r) {
879                 DMERR("%s: couldn't insert writeset into btree", __func__);
880                 /* FIXME: fail mode */
881                 return r;
882         }
883
884         md->archived_writesets = true;
885
886         return 0;
887 }
888
889 static struct writeset *next_writeset(struct era_metadata *md)
890 {
891         return (md->current_writeset == &md->writesets[0]) ?
892                 &md->writesets[1] : &md->writesets[0];
893 }
894
895 static int metadata_new_era(struct era_metadata *md)
896 {
897         int r;
898         struct writeset *new_writeset = next_writeset(md);
899
900         r = writeset_init(&md->bitset_info, new_writeset);
901         if (r) {
902                 DMERR("%s: writeset_init failed", __func__);
903                 return r;
904         }
905
906         swap_writeset(md, new_writeset);
907         md->current_era++;
908
909         return 0;
910 }
911
912 static int metadata_era_rollover(struct era_metadata *md)
913 {
914         int r;
915
916         if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
917                 r = metadata_era_archive(md);
918                 if (r) {
919                         DMERR("%s: metadata_archive_era failed", __func__);
920                         /* FIXME: fail mode? */
921                         return r;
922                 }
923         }
924
925         r = metadata_new_era(md);
926         if (r) {
927                 DMERR("%s: new era failed", __func__);
928                 /* FIXME: fail mode */
929                 return r;
930         }
931
932         return 0;
933 }
934
935 static bool metadata_current_marked(struct era_metadata *md, dm_block_t block)
936 {
937         bool r;
938         struct writeset *ws;
939
940         rcu_read_lock();
941         ws = rcu_dereference(md->current_writeset);
942         r = writeset_marked(ws, block);
943         rcu_read_unlock();
944
945         return r;
946 }
947
948 static int metadata_commit(struct era_metadata *md)
949 {
950         int r;
951         struct dm_block *sblock;
952
953         if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
954                 r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
955                                     &md->current_writeset->md.root);
956                 if (r) {
957                         DMERR("%s: bitset flush failed", __func__);
958                         return r;
959                 }
960         }
961
962         r = dm_tm_pre_commit(md->tm);
963         if (r) {
964                 DMERR("%s: pre commit failed", __func__);
965                 return r;
966         }
967
968         r = save_sm_root(md);
969         if (r) {
970                 DMERR("%s: save_sm_root failed", __func__);
971                 return r;
972         }
973
974         r = superblock_lock(md, &sblock);
975         if (r) {
976                 DMERR("%s: superblock lock failed", __func__);
977                 return r;
978         }
979
980         prepare_superblock(md, dm_block_data(sblock));
981
982         return dm_tm_commit(md->tm, sblock);
983 }
984
985 static int metadata_checkpoint(struct era_metadata *md)
986 {
987         /*
988          * For now we just rollover, but later I want to put a check in to
989          * avoid this if the filter is still pretty fresh.
990          */
991         return metadata_era_rollover(md);
992 }
993
994 /*
995  * Metadata snapshots allow userland to access era data.
996  */
997 static int metadata_take_snap(struct era_metadata *md)
998 {
999         int r, inc;
1000         struct dm_block *clone;
1001
1002         if (md->metadata_snap != SUPERBLOCK_LOCATION) {
1003                 DMERR("%s: metadata snapshot already exists", __func__);
1004                 return -EINVAL;
1005         }
1006
1007         r = metadata_era_rollover(md);
1008         if (r) {
1009                 DMERR("%s: era rollover failed", __func__);
1010                 return r;
1011         }
1012
1013         r = metadata_commit(md);
1014         if (r) {
1015                 DMERR("%s: pre commit failed", __func__);
1016                 return r;
1017         }
1018
1019         r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION);
1020         if (r) {
1021                 DMERR("%s: couldn't increment superblock", __func__);
1022                 return r;
1023         }
1024
1025         r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION,
1026                                &sb_validator, &clone, &inc);
1027         if (r) {
1028                 DMERR("%s: couldn't shadow superblock", __func__);
1029                 dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION);
1030                 return r;
1031         }
1032         BUG_ON(!inc);
1033
1034         r = dm_sm_inc_block(md->sm, md->writeset_tree_root);
1035         if (r) {
1036                 DMERR("%s: couldn't inc writeset tree root", __func__);
1037                 dm_tm_unlock(md->tm, clone);
1038                 return r;
1039         }
1040
1041         r = dm_sm_inc_block(md->sm, md->era_array_root);
1042         if (r) {
1043                 DMERR("%s: couldn't inc era tree root", __func__);
1044                 dm_sm_dec_block(md->sm, md->writeset_tree_root);
1045                 dm_tm_unlock(md->tm, clone);
1046                 return r;
1047         }
1048
1049         md->metadata_snap = dm_block_location(clone);
1050
1051         dm_tm_unlock(md->tm, clone);
1052
1053         return 0;
1054 }
1055
1056 static int metadata_drop_snap(struct era_metadata *md)
1057 {
1058         int r;
1059         dm_block_t location;
1060         struct dm_block *clone;
1061         struct superblock_disk *disk;
1062
1063         if (md->metadata_snap == SUPERBLOCK_LOCATION) {
1064                 DMERR("%s: no snap to drop", __func__);
1065                 return -EINVAL;
1066         }
1067
1068         r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone);
1069         if (r) {
1070                 DMERR("%s: couldn't read lock superblock clone", __func__);
1071                 return r;
1072         }
1073
1074         /*
1075          * Whatever happens now we'll commit with no record of the metadata
1076          * snap.
1077          */
1078         md->metadata_snap = SUPERBLOCK_LOCATION;
1079
1080         disk = dm_block_data(clone);
1081         r = dm_btree_del(&md->writeset_tree_info,
1082                          le64_to_cpu(disk->writeset_tree_root));
1083         if (r) {
1084                 DMERR("%s: error deleting writeset tree clone", __func__);
1085                 dm_tm_unlock(md->tm, clone);
1086                 return r;
1087         }
1088
1089         r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root));
1090         if (r) {
1091                 DMERR("%s: error deleting era array clone", __func__);
1092                 dm_tm_unlock(md->tm, clone);
1093                 return r;
1094         }
1095
1096         location = dm_block_location(clone);
1097         dm_tm_unlock(md->tm, clone);
1098
1099         return dm_sm_dec_block(md->sm, location);
1100 }
1101
1102 struct metadata_stats {
1103         dm_block_t used;
1104         dm_block_t total;
1105         dm_block_t snap;
1106         uint32_t era;
1107 };
1108
1109 static int metadata_get_stats(struct era_metadata *md, void *ptr)
1110 {
1111         int r;
1112         struct metadata_stats *s = ptr;
1113         dm_block_t nr_free, nr_total;
1114
1115         r = dm_sm_get_nr_free(md->sm, &nr_free);
1116         if (r) {
1117                 DMERR("dm_sm_get_nr_free returned %d", r);
1118                 return r;
1119         }
1120
1121         r = dm_sm_get_nr_blocks(md->sm, &nr_total);
1122         if (r) {
1123                 DMERR("dm_pool_get_metadata_dev_size returned %d", r);
1124                 return r;
1125         }
1126
1127         s->used = nr_total - nr_free;
1128         s->total = nr_total;
1129         s->snap = md->metadata_snap;
1130         s->era = md->current_era;
1131
1132         return 0;
1133 }
1134
1135 /*----------------------------------------------------------------*/
1136
1137 struct era {
1138         struct dm_target *ti;
1139         struct dm_target_callbacks callbacks;
1140
1141         struct dm_dev *metadata_dev;
1142         struct dm_dev *origin_dev;
1143
1144         dm_block_t nr_blocks;
1145         uint32_t sectors_per_block;
1146         int sectors_per_block_shift;
1147         struct era_metadata *md;
1148
1149         struct workqueue_struct *wq;
1150         struct work_struct worker;
1151
1152         spinlock_t deferred_lock;
1153         struct bio_list deferred_bios;
1154
1155         spinlock_t rpc_lock;
1156         struct list_head rpc_calls;
1157
1158         struct digest digest;
1159         atomic_t suspended;
1160 };
1161
1162 struct rpc {
1163         struct list_head list;
1164
1165         int (*fn0)(struct era_metadata *);
1166         int (*fn1)(struct era_metadata *, void *);
1167         void *arg;
1168         int result;
1169
1170         struct completion complete;
1171 };
1172
1173 /*----------------------------------------------------------------
1174  * Remapping.
1175  *---------------------------------------------------------------*/
1176 static bool block_size_is_power_of_two(struct era *era)
1177 {
1178         return era->sectors_per_block_shift >= 0;
1179 }
1180
1181 static dm_block_t get_block(struct era *era, struct bio *bio)
1182 {
1183         sector_t block_nr = bio->bi_iter.bi_sector;
1184
1185         if (!block_size_is_power_of_two(era))
1186                 (void) sector_div(block_nr, era->sectors_per_block);
1187         else
1188                 block_nr >>= era->sectors_per_block_shift;
1189
1190         return block_nr;
1191 }
1192
1193 static void remap_to_origin(struct era *era, struct bio *bio)
1194 {
1195         bio_set_dev(bio, era->origin_dev->bdev);
1196 }
1197
1198 /*----------------------------------------------------------------
1199  * Worker thread
1200  *--------------------------------------------------------------*/
1201 static void wake_worker(struct era *era)
1202 {
1203         if (!atomic_read(&era->suspended))
1204                 queue_work(era->wq, &era->worker);
1205 }
1206
1207 static void process_old_eras(struct era *era)
1208 {
1209         int r;
1210
1211         if (!era->digest.step)
1212                 return;
1213
1214         r = era->digest.step(era->md, &era->digest);
1215         if (r < 0) {
1216                 DMERR("%s: digest step failed, stopping digestion", __func__);
1217                 era->digest.step = NULL;
1218
1219         } else if (era->digest.step)
1220                 wake_worker(era);
1221 }
1222
1223 static void process_deferred_bios(struct era *era)
1224 {
1225         int r;
1226         struct bio_list deferred_bios, marked_bios;
1227         struct bio *bio;
1228         bool commit_needed = false;
1229         bool failed = false;
1230
1231         bio_list_init(&deferred_bios);
1232         bio_list_init(&marked_bios);
1233
1234         spin_lock(&era->deferred_lock);
1235         bio_list_merge(&deferred_bios, &era->deferred_bios);
1236         bio_list_init(&era->deferred_bios);
1237         spin_unlock(&era->deferred_lock);
1238
1239         while ((bio = bio_list_pop(&deferred_bios))) {
1240                 r = writeset_test_and_set(&era->md->bitset_info,
1241                                           era->md->current_writeset,
1242                                           get_block(era, bio));
1243                 if (r < 0) {
1244                         /*
1245                          * This is bad news, we need to rollback.
1246                          * FIXME: finish.
1247                          */
1248                         failed = true;
1249
1250                 } else if (r == 0)
1251                         commit_needed = true;
1252
1253                 bio_list_add(&marked_bios, bio);
1254         }
1255
1256         if (commit_needed) {
1257                 r = metadata_commit(era->md);
1258                 if (r)
1259                         failed = true;
1260         }
1261
1262         if (failed)
1263                 while ((bio = bio_list_pop(&marked_bios)))
1264                         bio_io_error(bio);
1265         else
1266                 while ((bio = bio_list_pop(&marked_bios)))
1267                         generic_make_request(bio);
1268 }
1269
1270 static void process_rpc_calls(struct era *era)
1271 {
1272         int r;
1273         bool need_commit = false;
1274         struct list_head calls;
1275         struct rpc *rpc, *tmp;
1276
1277         INIT_LIST_HEAD(&calls);
1278         spin_lock(&era->rpc_lock);
1279         list_splice_init(&era->rpc_calls, &calls);
1280         spin_unlock(&era->rpc_lock);
1281
1282         list_for_each_entry_safe(rpc, tmp, &calls, list) {
1283                 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg);
1284                 need_commit = true;
1285         }
1286
1287         if (need_commit) {
1288                 r = metadata_commit(era->md);
1289                 if (r)
1290                         list_for_each_entry_safe(rpc, tmp, &calls, list)
1291                                 rpc->result = r;
1292         }
1293
1294         list_for_each_entry_safe(rpc, tmp, &calls, list)
1295                 complete(&rpc->complete);
1296 }
1297
1298 static void kick_off_digest(struct era *era)
1299 {
1300         if (era->md->archived_writesets) {
1301                 era->md->archived_writesets = false;
1302                 metadata_digest_start(era->md, &era->digest);
1303         }
1304 }
1305
1306 static void do_work(struct work_struct *ws)
1307 {
1308         struct era *era = container_of(ws, struct era, worker);
1309
1310         kick_off_digest(era);
1311         process_old_eras(era);
1312         process_deferred_bios(era);
1313         process_rpc_calls(era);
1314 }
1315
1316 static void defer_bio(struct era *era, struct bio *bio)
1317 {
1318         spin_lock(&era->deferred_lock);
1319         bio_list_add(&era->deferred_bios, bio);
1320         spin_unlock(&era->deferred_lock);
1321
1322         wake_worker(era);
1323 }
1324
1325 /*
1326  * Make an rpc call to the worker to change the metadata.
1327  */
1328 static int perform_rpc(struct era *era, struct rpc *rpc)
1329 {
1330         rpc->result = 0;
1331         init_completion(&rpc->complete);
1332
1333         spin_lock(&era->rpc_lock);
1334         list_add(&rpc->list, &era->rpc_calls);
1335         spin_unlock(&era->rpc_lock);
1336
1337         wake_worker(era);
1338         wait_for_completion(&rpc->complete);
1339
1340         return rpc->result;
1341 }
1342
1343 static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
1344 {
1345         struct rpc rpc;
1346         rpc.fn0 = fn;
1347         rpc.fn1 = NULL;
1348
1349         return perform_rpc(era, &rpc);
1350 }
1351
1352 static int in_worker1(struct era *era,
1353                       int (*fn)(struct era_metadata *, void *), void *arg)
1354 {
1355         struct rpc rpc;
1356         rpc.fn0 = NULL;
1357         rpc.fn1 = fn;
1358         rpc.arg = arg;
1359
1360         return perform_rpc(era, &rpc);
1361 }
1362
1363 static void start_worker(struct era *era)
1364 {
1365         atomic_set(&era->suspended, 0);
1366 }
1367
1368 static void stop_worker(struct era *era)
1369 {
1370         atomic_set(&era->suspended, 1);
1371         flush_workqueue(era->wq);
1372 }
1373
1374 /*----------------------------------------------------------------
1375  * Target methods
1376  *--------------------------------------------------------------*/
1377 static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
1378 {
1379         struct request_queue *q = bdev_get_queue(dev->bdev);
1380         return bdi_congested(q->backing_dev_info, bdi_bits);
1381 }
1382
1383 static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1384 {
1385         struct era *era = container_of(cb, struct era, callbacks);
1386         return dev_is_congested(era->origin_dev, bdi_bits);
1387 }
1388
1389 static void era_destroy(struct era *era)
1390 {
1391         if (era->md)
1392                 metadata_close(era->md);
1393
1394         if (era->wq)
1395                 destroy_workqueue(era->wq);
1396
1397         if (era->origin_dev)
1398                 dm_put_device(era->ti, era->origin_dev);
1399
1400         if (era->metadata_dev)
1401                 dm_put_device(era->ti, era->metadata_dev);
1402
1403         kfree(era);
1404 }
1405
1406 static dm_block_t calc_nr_blocks(struct era *era)
1407 {
1408         return dm_sector_div_up(era->ti->len, era->sectors_per_block);
1409 }
1410
1411 static bool valid_block_size(dm_block_t block_size)
1412 {
1413         bool greater_than_zero = block_size > 0;
1414         bool multiple_of_min_block_size = (block_size & (MIN_BLOCK_SIZE - 1)) == 0;
1415
1416         return greater_than_zero && multiple_of_min_block_size;
1417 }
1418
1419 /*
1420  * <metadata dev> <data dev> <data block size (sectors)>
1421  */
1422 static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
1423 {
1424         int r;
1425         char dummy;
1426         struct era *era;
1427         struct era_metadata *md;
1428
1429         if (argc != 3) {
1430                 ti->error = "Invalid argument count";
1431                 return -EINVAL;
1432         }
1433
1434         era = kzalloc(sizeof(*era), GFP_KERNEL);
1435         if (!era) {
1436                 ti->error = "Error allocating era structure";
1437                 return -ENOMEM;
1438         }
1439
1440         era->ti = ti;
1441
1442         r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev);
1443         if (r) {
1444                 ti->error = "Error opening metadata device";
1445                 era_destroy(era);
1446                 return -EINVAL;
1447         }
1448
1449         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev);
1450         if (r) {
1451                 ti->error = "Error opening data device";
1452                 era_destroy(era);
1453                 return -EINVAL;
1454         }
1455
1456         r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy);
1457         if (r != 1) {
1458                 ti->error = "Error parsing block size";
1459                 era_destroy(era);
1460                 return -EINVAL;
1461         }
1462
1463         r = dm_set_target_max_io_len(ti, era->sectors_per_block);
1464         if (r) {
1465                 ti->error = "could not set max io len";
1466                 era_destroy(era);
1467                 return -EINVAL;
1468         }
1469
1470         if (!valid_block_size(era->sectors_per_block)) {
1471                 ti->error = "Invalid block size";
1472                 era_destroy(era);
1473                 return -EINVAL;
1474         }
1475         if (era->sectors_per_block & (era->sectors_per_block - 1))
1476                 era->sectors_per_block_shift = -1;
1477         else
1478                 era->sectors_per_block_shift = __ffs(era->sectors_per_block);
1479
1480         md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
1481         if (IS_ERR(md)) {
1482                 ti->error = "Error reading metadata";
1483                 era_destroy(era);
1484                 return PTR_ERR(md);
1485         }
1486         era->md = md;
1487
1488         era->nr_blocks = calc_nr_blocks(era);
1489
1490         r = metadata_resize(era->md, &era->nr_blocks);
1491         if (r) {
1492                 ti->error = "couldn't resize metadata";
1493                 era_destroy(era);
1494                 return -ENOMEM;
1495         }
1496
1497         era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1498         if (!era->wq) {
1499                 ti->error = "could not create workqueue for metadata object";
1500                 era_destroy(era);
1501                 return -ENOMEM;
1502         }
1503         INIT_WORK(&era->worker, do_work);
1504
1505         spin_lock_init(&era->deferred_lock);
1506         bio_list_init(&era->deferred_bios);
1507
1508         spin_lock_init(&era->rpc_lock);
1509         INIT_LIST_HEAD(&era->rpc_calls);
1510
1511         ti->private = era;
1512         ti->num_flush_bios = 1;
1513         ti->flush_supported = true;
1514
1515         ti->num_discard_bios = 1;
1516         ti->discards_supported = true;
1517         era->callbacks.congested_fn = era_is_congested;
1518         dm_table_add_target_callbacks(ti->table, &era->callbacks);
1519
1520         return 0;
1521 }
1522
1523 static void era_dtr(struct dm_target *ti)
1524 {
1525         era_destroy(ti->private);
1526 }
1527
1528 static int era_map(struct dm_target *ti, struct bio *bio)
1529 {
1530         struct era *era = ti->private;
1531         dm_block_t block = get_block(era, bio);
1532
1533         /*
1534          * All bios get remapped to the origin device.  We do this now, but
1535          * it may not get issued until later.  Depending on whether the
1536          * block is marked in this era.
1537          */
1538         remap_to_origin(era, bio);
1539
1540         /*
1541          * REQ_PREFLUSH bios carry no data, so we're not interested in them.
1542          */
1543         if (!(bio->bi_opf & REQ_PREFLUSH) &&
1544             (bio_data_dir(bio) == WRITE) &&
1545             !metadata_current_marked(era->md, block)) {
1546                 defer_bio(era, bio);
1547                 return DM_MAPIO_SUBMITTED;
1548         }
1549
1550         return DM_MAPIO_REMAPPED;
1551 }
1552
1553 static void era_postsuspend(struct dm_target *ti)
1554 {
1555         int r;
1556         struct era *era = ti->private;
1557
1558         r = in_worker0(era, metadata_era_archive);
1559         if (r) {
1560                 DMERR("%s: couldn't archive current era", __func__);
1561                 /* FIXME: fail mode */
1562         }
1563
1564         stop_worker(era);
1565 }
1566
1567 static int era_preresume(struct dm_target *ti)
1568 {
1569         int r;
1570         struct era *era = ti->private;
1571         dm_block_t new_size = calc_nr_blocks(era);
1572
1573         if (era->nr_blocks != new_size) {
1574                 r = in_worker1(era, metadata_resize, &new_size);
1575                 if (r)
1576                         return r;
1577
1578                 era->nr_blocks = new_size;
1579         }
1580
1581         start_worker(era);
1582
1583         r = in_worker0(era, metadata_new_era);
1584         if (r) {
1585                 DMERR("%s: metadata_era_rollover failed", __func__);
1586                 return r;
1587         }
1588
1589         return 0;
1590 }
1591
1592 /*
1593  * Status format:
1594  *
1595  * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1596  * <current era> <held metadata root | '-'>
1597  */
1598 static void era_status(struct dm_target *ti, status_type_t type,
1599                        unsigned status_flags, char *result, unsigned maxlen)
1600 {
1601         int r;
1602         struct era *era = ti->private;
1603         ssize_t sz = 0;
1604         struct metadata_stats stats;
1605         char buf[BDEVNAME_SIZE];
1606
1607         switch (type) {
1608         case STATUSTYPE_INFO:
1609                 r = in_worker1(era, metadata_get_stats, &stats);
1610                 if (r)
1611                         goto err;
1612
1613                 DMEMIT("%u %llu/%llu %u",
1614                        (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
1615                        (unsigned long long) stats.used,
1616                        (unsigned long long) stats.total,
1617                        (unsigned) stats.era);
1618
1619                 if (stats.snap != SUPERBLOCK_LOCATION)
1620                         DMEMIT(" %llu", stats.snap);
1621                 else
1622                         DMEMIT(" -");
1623                 break;
1624
1625         case STATUSTYPE_TABLE:
1626                 format_dev_t(buf, era->metadata_dev->bdev->bd_dev);
1627                 DMEMIT("%s ", buf);
1628                 format_dev_t(buf, era->origin_dev->bdev->bd_dev);
1629                 DMEMIT("%s %u", buf, era->sectors_per_block);
1630                 break;
1631         }
1632
1633         return;
1634
1635 err:
1636         DMEMIT("Error");
1637 }
1638
1639 static int era_message(struct dm_target *ti, unsigned argc, char **argv)
1640 {
1641         struct era *era = ti->private;
1642
1643         if (argc != 1) {
1644                 DMERR("incorrect number of message arguments");
1645                 return -EINVAL;
1646         }
1647
1648         if (!strcasecmp(argv[0], "checkpoint"))
1649                 return in_worker0(era, metadata_checkpoint);
1650
1651         if (!strcasecmp(argv[0], "take_metadata_snap"))
1652                 return in_worker0(era, metadata_take_snap);
1653
1654         if (!strcasecmp(argv[0], "drop_metadata_snap"))
1655                 return in_worker0(era, metadata_drop_snap);
1656
1657         DMERR("unsupported message '%s'", argv[0]);
1658         return -EINVAL;
1659 }
1660
1661 static sector_t get_dev_size(struct dm_dev *dev)
1662 {
1663         return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1664 }
1665
1666 static int era_iterate_devices(struct dm_target *ti,
1667                                iterate_devices_callout_fn fn, void *data)
1668 {
1669         struct era *era = ti->private;
1670         return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
1671 }
1672
1673 static void era_io_hints(struct dm_target *ti, struct queue_limits *limits)
1674 {
1675         struct era *era = ti->private;
1676         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
1677
1678         /*
1679          * If the system-determined stacked limits are compatible with the
1680          * era device's blocksize (io_opt is a factor) do not override them.
1681          */
1682         if (io_opt_sectors < era->sectors_per_block ||
1683             do_div(io_opt_sectors, era->sectors_per_block)) {
1684                 blk_limits_io_min(limits, 0);
1685                 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);
1686         }
1687 }
1688
1689 /*----------------------------------------------------------------*/
1690
1691 static struct target_type era_target = {
1692         .name = "era",
1693         .version = {1, 0, 0},
1694         .module = THIS_MODULE,
1695         .ctr = era_ctr,
1696         .dtr = era_dtr,
1697         .map = era_map,
1698         .postsuspend = era_postsuspend,
1699         .preresume = era_preresume,
1700         .status = era_status,
1701         .message = era_message,
1702         .iterate_devices = era_iterate_devices,
1703         .io_hints = era_io_hints
1704 };
1705
1706 static int __init dm_era_init(void)
1707 {
1708         int r;
1709
1710         r = dm_register_target(&era_target);
1711         if (r) {
1712                 DMERR("era target registration failed: %d", r);
1713                 return r;
1714         }
1715
1716         return 0;
1717 }
1718
1719 static void __exit dm_era_exit(void)
1720 {
1721         dm_unregister_target(&era_target);
1722 }
1723
1724 module_init(dm_era_init);
1725 module_exit(dm_era_exit);
1726
1727 MODULE_DESCRIPTION(DM_NAME " era target");
1728 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
1729 MODULE_LICENSE("GPL");