bb4b733697b30c1c257e1267bd98ff2d8de7dc43
[sfrench/cifs-2.6.git] / drivers / md / dm-snap.c
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22 #include <linux/workqueue.h>
23
24 #include "dm-exception-store.h"
25
26 #define DM_MSG_PREFIX "snapshots"
27
28 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
29
30 #define dm_target_is_snapshot_merge(ti) \
31         ((ti)->type->name == dm_snapshot_merge_target_name)
32
33 /*
34  * The percentage increment we will wake up users at
35  */
36 #define WAKE_UP_PERCENT 5
37
38 /*
39  * kcopyd priority of snapshot operations
40  */
41 #define SNAPSHOT_COPY_PRIORITY 2
42
43 /*
44  * Reserve 1MB for each snapshot initially (with minimum of 1 page).
45  */
46 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
47
48 /*
49  * The size of the mempool used to track chunks in use.
50  */
51 #define MIN_IOS 256
52
53 #define DM_TRACKED_CHUNK_HASH_SIZE      16
54 #define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
55                                          (DM_TRACKED_CHUNK_HASH_SIZE - 1))
56
57 struct dm_exception_table {
58         uint32_t hash_mask;
59         unsigned hash_shift;
60         struct list_head *table;
61 };
62
63 struct dm_snapshot {
64         struct rw_semaphore lock;
65
66         struct dm_dev *origin;
67         struct dm_dev *cow;
68
69         struct dm_target *ti;
70
71         /* List of snapshots per Origin */
72         struct list_head list;
73
74         /* You can't use a snapshot if this is 0 (e.g. if full) */
75         int valid;
76
77         /* Origin writes don't trigger exceptions until this is set */
78         int active;
79
80         /* Whether or not owning mapped_device is suspended */
81         int suspended;
82
83         mempool_t *pending_pool;
84
85         atomic_t pending_exceptions_count;
86
87         struct dm_exception_table pending;
88         struct dm_exception_table complete;
89
90         /*
91          * pe_lock protects all pending_exception operations and access
92          * as well as the snapshot_bios list.
93          */
94         spinlock_t pe_lock;
95
96         /* The on disk metadata handler */
97         struct dm_exception_store *store;
98
99         struct dm_kcopyd_client *kcopyd_client;
100
101         /* Queue of snapshot writes for ksnapd to flush */
102         struct bio_list queued_bios;
103         struct work_struct queued_bios_work;
104
105         /* Chunks with outstanding reads */
106         mempool_t *tracked_chunk_pool;
107         spinlock_t tracked_chunk_lock;
108         struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
109
110         /* Wait for events based on state_bits */
111         unsigned long state_bits;
112
113         /* Range of chunks currently being merged. */
114         chunk_t first_merging_chunk;
115         int num_merging_chunks;
116
117         /*
118          * Incoming bios that overlap with chunks being merged must wait
119          * for them to be committed.
120          */
121         struct bio_list bios_queued_during_merge;
122 };
123
124 /*
125  * state_bits:
126  *   RUNNING_MERGE  - Merge operation is in progress.
127  *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
128  *                    cleared afterwards.
129  */
130 #define RUNNING_MERGE          0
131 #define SHUTDOWN_MERGE         1
132
133 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
134 {
135         return s->cow;
136 }
137 EXPORT_SYMBOL(dm_snap_cow);
138
139 static struct workqueue_struct *ksnapd;
140 static void flush_queued_bios(struct work_struct *work);
141
142 static sector_t chunk_to_sector(struct dm_exception_store *store,
143                                 chunk_t chunk)
144 {
145         return chunk << store->chunk_shift;
146 }
147
148 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
149 {
150         /*
151          * There is only ever one instance of a particular block
152          * device so we can compare pointers safely.
153          */
154         return lhs == rhs;
155 }
156
157 struct dm_snap_pending_exception {
158         struct dm_exception e;
159
160         /*
161          * Origin buffers waiting for this to complete are held
162          * in a bio list
163          */
164         struct bio_list origin_bios;
165         struct bio_list snapshot_bios;
166
167         /* Pointer back to snapshot context */
168         struct dm_snapshot *snap;
169
170         /*
171          * 1 indicates the exception has already been sent to
172          * kcopyd.
173          */
174         int started;
175 };
176
177 /*
178  * Hash table mapping origin volumes to lists of snapshots and
179  * a lock to protect it
180  */
181 static struct kmem_cache *exception_cache;
182 static struct kmem_cache *pending_cache;
183
184 struct dm_snap_tracked_chunk {
185         struct hlist_node node;
186         chunk_t chunk;
187 };
188
189 static struct kmem_cache *tracked_chunk_cache;
190
191 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
192                                                  chunk_t chunk)
193 {
194         struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
195                                                         GFP_NOIO);
196         unsigned long flags;
197
198         c->chunk = chunk;
199
200         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
201         hlist_add_head(&c->node,
202                        &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
203         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
204
205         return c;
206 }
207
208 static void stop_tracking_chunk(struct dm_snapshot *s,
209                                 struct dm_snap_tracked_chunk *c)
210 {
211         unsigned long flags;
212
213         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
214         hlist_del(&c->node);
215         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
216
217         mempool_free(c, s->tracked_chunk_pool);
218 }
219
220 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
221 {
222         struct dm_snap_tracked_chunk *c;
223         struct hlist_node *hn;
224         int found = 0;
225
226         spin_lock_irq(&s->tracked_chunk_lock);
227
228         hlist_for_each_entry(c, hn,
229             &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
230                 if (c->chunk == chunk) {
231                         found = 1;
232                         break;
233                 }
234         }
235
236         spin_unlock_irq(&s->tracked_chunk_lock);
237
238         return found;
239 }
240
241 /*
242  * This conflicting I/O is extremely improbable in the caller,
243  * so msleep(1) is sufficient and there is no need for a wait queue.
244  */
245 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
246 {
247         while (__chunk_is_tracked(s, chunk))
248                 msleep(1);
249 }
250
251 /*
252  * One of these per registered origin, held in the snapshot_origins hash
253  */
254 struct origin {
255         /* The origin device */
256         struct block_device *bdev;
257
258         struct list_head hash_list;
259
260         /* List of snapshots for this origin */
261         struct list_head snapshots;
262 };
263
264 /*
265  * Size of the hash table for origin volumes. If we make this
266  * the size of the minors list then it should be nearly perfect
267  */
268 #define ORIGIN_HASH_SIZE 256
269 #define ORIGIN_MASK      0xFF
270 static struct list_head *_origins;
271 static struct rw_semaphore _origins_lock;
272
273 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
274 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
275 static uint64_t _pending_exceptions_done_count;
276
277 static int init_origin_hash(void)
278 {
279         int i;
280
281         _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
282                            GFP_KERNEL);
283         if (!_origins) {
284                 DMERR("unable to allocate memory");
285                 return -ENOMEM;
286         }
287
288         for (i = 0; i < ORIGIN_HASH_SIZE; i++)
289                 INIT_LIST_HEAD(_origins + i);
290         init_rwsem(&_origins_lock);
291
292         return 0;
293 }
294
295 static void exit_origin_hash(void)
296 {
297         kfree(_origins);
298 }
299
300 static unsigned origin_hash(struct block_device *bdev)
301 {
302         return bdev->bd_dev & ORIGIN_MASK;
303 }
304
305 static struct origin *__lookup_origin(struct block_device *origin)
306 {
307         struct list_head *ol;
308         struct origin *o;
309
310         ol = &_origins[origin_hash(origin)];
311         list_for_each_entry (o, ol, hash_list)
312                 if (bdev_equal(o->bdev, origin))
313                         return o;
314
315         return NULL;
316 }
317
318 static void __insert_origin(struct origin *o)
319 {
320         struct list_head *sl = &_origins[origin_hash(o->bdev)];
321         list_add_tail(&o->hash_list, sl);
322 }
323
324 /*
325  * _origins_lock must be held when calling this function.
326  * Returns number of snapshots registered using the supplied cow device, plus:
327  * snap_src - a snapshot suitable for use as a source of exception handover
328  * snap_dest - a snapshot capable of receiving exception handover.
329  * snap_merge - an existing snapshot-merge target linked to the same origin.
330  *   There can be at most one snapshot-merge target. The parameter is optional.
331  *
332  * Possible return values and states of snap_src and snap_dest.
333  *   0: NULL, NULL  - first new snapshot
334  *   1: snap_src, NULL - normal snapshot
335  *   2: snap_src, snap_dest  - waiting for handover
336  *   2: snap_src, NULL - handed over, waiting for old to be deleted
337  *   1: NULL, snap_dest - source got destroyed without handover
338  */
339 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
340                                         struct dm_snapshot **snap_src,
341                                         struct dm_snapshot **snap_dest,
342                                         struct dm_snapshot **snap_merge)
343 {
344         struct dm_snapshot *s;
345         struct origin *o;
346         int count = 0;
347         int active;
348
349         o = __lookup_origin(snap->origin->bdev);
350         if (!o)
351                 goto out;
352
353         list_for_each_entry(s, &o->snapshots, list) {
354                 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
355                         *snap_merge = s;
356                 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
357                         continue;
358
359                 down_read(&s->lock);
360                 active = s->active;
361                 up_read(&s->lock);
362
363                 if (active) {
364                         if (snap_src)
365                                 *snap_src = s;
366                 } else if (snap_dest)
367                         *snap_dest = s;
368
369                 count++;
370         }
371
372 out:
373         return count;
374 }
375
376 /*
377  * On success, returns 1 if this snapshot is a handover destination,
378  * otherwise returns 0.
379  */
380 static int __validate_exception_handover(struct dm_snapshot *snap)
381 {
382         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
383         struct dm_snapshot *snap_merge = NULL;
384
385         /* Does snapshot need exceptions handed over to it? */
386         if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
387                                           &snap_merge) == 2) ||
388             snap_dest) {
389                 snap->ti->error = "Snapshot cow pairing for exception "
390                                   "table handover failed";
391                 return -EINVAL;
392         }
393
394         /*
395          * If no snap_src was found, snap cannot become a handover
396          * destination.
397          */
398         if (!snap_src)
399                 return 0;
400
401         /*
402          * Non-snapshot-merge handover?
403          */
404         if (!dm_target_is_snapshot_merge(snap->ti))
405                 return 1;
406
407         /*
408          * Do not allow more than one merging snapshot.
409          */
410         if (snap_merge) {
411                 snap->ti->error = "A snapshot is already merging.";
412                 return -EINVAL;
413         }
414
415         if (!snap_src->store->type->prepare_merge ||
416             !snap_src->store->type->commit_merge) {
417                 snap->ti->error = "Snapshot exception store does not "
418                                   "support snapshot-merge.";
419                 return -EINVAL;
420         }
421
422         return 1;
423 }
424
425 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
426 {
427         struct dm_snapshot *l;
428
429         /* Sort the list according to chunk size, largest-first smallest-last */
430         list_for_each_entry(l, &o->snapshots, list)
431                 if (l->store->chunk_size < s->store->chunk_size)
432                         break;
433         list_add_tail(&s->list, &l->list);
434 }
435
436 /*
437  * Make a note of the snapshot and its origin so we can look it
438  * up when the origin has a write on it.
439  *
440  * Also validate snapshot exception store handovers.
441  * On success, returns 1 if this registration is a handover destination,
442  * otherwise returns 0.
443  */
444 static int register_snapshot(struct dm_snapshot *snap)
445 {
446         struct origin *o, *new_o = NULL;
447         struct block_device *bdev = snap->origin->bdev;
448         int r = 0;
449
450         new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
451         if (!new_o)
452                 return -ENOMEM;
453
454         down_write(&_origins_lock);
455
456         r = __validate_exception_handover(snap);
457         if (r < 0) {
458                 kfree(new_o);
459                 goto out;
460         }
461
462         o = __lookup_origin(bdev);
463         if (o)
464                 kfree(new_o);
465         else {
466                 /* New origin */
467                 o = new_o;
468
469                 /* Initialise the struct */
470                 INIT_LIST_HEAD(&o->snapshots);
471                 o->bdev = bdev;
472
473                 __insert_origin(o);
474         }
475
476         __insert_snapshot(o, snap);
477
478 out:
479         up_write(&_origins_lock);
480
481         return r;
482 }
483
484 /*
485  * Move snapshot to correct place in list according to chunk size.
486  */
487 static void reregister_snapshot(struct dm_snapshot *s)
488 {
489         struct block_device *bdev = s->origin->bdev;
490
491         down_write(&_origins_lock);
492
493         list_del(&s->list);
494         __insert_snapshot(__lookup_origin(bdev), s);
495
496         up_write(&_origins_lock);
497 }
498
499 static void unregister_snapshot(struct dm_snapshot *s)
500 {
501         struct origin *o;
502
503         down_write(&_origins_lock);
504         o = __lookup_origin(s->origin->bdev);
505
506         list_del(&s->list);
507         if (o && list_empty(&o->snapshots)) {
508                 list_del(&o->hash_list);
509                 kfree(o);
510         }
511
512         up_write(&_origins_lock);
513 }
514
515 /*
516  * Implementation of the exception hash tables.
517  * The lowest hash_shift bits of the chunk number are ignored, allowing
518  * some consecutive chunks to be grouped together.
519  */
520 static int dm_exception_table_init(struct dm_exception_table *et,
521                                    uint32_t size, unsigned hash_shift)
522 {
523         unsigned int i;
524
525         et->hash_shift = hash_shift;
526         et->hash_mask = size - 1;
527         et->table = dm_vcalloc(size, sizeof(struct list_head));
528         if (!et->table)
529                 return -ENOMEM;
530
531         for (i = 0; i < size; i++)
532                 INIT_LIST_HEAD(et->table + i);
533
534         return 0;
535 }
536
537 static void dm_exception_table_exit(struct dm_exception_table *et,
538                                     struct kmem_cache *mem)
539 {
540         struct list_head *slot;
541         struct dm_exception *ex, *next;
542         int i, size;
543
544         size = et->hash_mask + 1;
545         for (i = 0; i < size; i++) {
546                 slot = et->table + i;
547
548                 list_for_each_entry_safe (ex, next, slot, hash_list)
549                         kmem_cache_free(mem, ex);
550         }
551
552         vfree(et->table);
553 }
554
555 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
556 {
557         return (chunk >> et->hash_shift) & et->hash_mask;
558 }
559
560 static void dm_remove_exception(struct dm_exception *e)
561 {
562         list_del(&e->hash_list);
563 }
564
565 /*
566  * Return the exception data for a sector, or NULL if not
567  * remapped.
568  */
569 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
570                                                 chunk_t chunk)
571 {
572         struct list_head *slot;
573         struct dm_exception *e;
574
575         slot = &et->table[exception_hash(et, chunk)];
576         list_for_each_entry (e, slot, hash_list)
577                 if (chunk >= e->old_chunk &&
578                     chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
579                         return e;
580
581         return NULL;
582 }
583
584 static struct dm_exception *alloc_completed_exception(void)
585 {
586         struct dm_exception *e;
587
588         e = kmem_cache_alloc(exception_cache, GFP_NOIO);
589         if (!e)
590                 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
591
592         return e;
593 }
594
595 static void free_completed_exception(struct dm_exception *e)
596 {
597         kmem_cache_free(exception_cache, e);
598 }
599
600 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
601 {
602         struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
603                                                              GFP_NOIO);
604
605         atomic_inc(&s->pending_exceptions_count);
606         pe->snap = s;
607
608         return pe;
609 }
610
611 static void free_pending_exception(struct dm_snap_pending_exception *pe)
612 {
613         struct dm_snapshot *s = pe->snap;
614
615         mempool_free(pe, s->pending_pool);
616         smp_mb__before_atomic_dec();
617         atomic_dec(&s->pending_exceptions_count);
618 }
619
620 static void dm_insert_exception(struct dm_exception_table *eh,
621                                 struct dm_exception *new_e)
622 {
623         struct list_head *l;
624         struct dm_exception *e = NULL;
625
626         l = &eh->table[exception_hash(eh, new_e->old_chunk)];
627
628         /* Add immediately if this table doesn't support consecutive chunks */
629         if (!eh->hash_shift)
630                 goto out;
631
632         /* List is ordered by old_chunk */
633         list_for_each_entry_reverse(e, l, hash_list) {
634                 /* Insert after an existing chunk? */
635                 if (new_e->old_chunk == (e->old_chunk +
636                                          dm_consecutive_chunk_count(e) + 1) &&
637                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
638                                          dm_consecutive_chunk_count(e) + 1)) {
639                         dm_consecutive_chunk_count_inc(e);
640                         free_completed_exception(new_e);
641                         return;
642                 }
643
644                 /* Insert before an existing chunk? */
645                 if (new_e->old_chunk == (e->old_chunk - 1) &&
646                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
647                         dm_consecutive_chunk_count_inc(e);
648                         e->old_chunk--;
649                         e->new_chunk--;
650                         free_completed_exception(new_e);
651                         return;
652                 }
653
654                 if (new_e->old_chunk > e->old_chunk)
655                         break;
656         }
657
658 out:
659         list_add(&new_e->hash_list, e ? &e->hash_list : l);
660 }
661
662 /*
663  * Callback used by the exception stores to load exceptions when
664  * initialising.
665  */
666 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
667 {
668         struct dm_snapshot *s = context;
669         struct dm_exception *e;
670
671         e = alloc_completed_exception();
672         if (!e)
673                 return -ENOMEM;
674
675         e->old_chunk = old;
676
677         /* Consecutive_count is implicitly initialised to zero */
678         e->new_chunk = new;
679
680         dm_insert_exception(&s->complete, e);
681
682         return 0;
683 }
684
685 #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
686
687 /*
688  * Return a minimum chunk size of all snapshots that have the specified origin.
689  * Return zero if the origin has no snapshots.
690  */
691 static sector_t __minimum_chunk_size(struct origin *o)
692 {
693         struct dm_snapshot *snap;
694         unsigned chunk_size = 0;
695
696         if (o)
697                 list_for_each_entry(snap, &o->snapshots, list)
698                         chunk_size = min_not_zero(chunk_size,
699                                                   snap->store->chunk_size);
700
701         return chunk_size;
702 }
703
704 /*
705  * Hard coded magic.
706  */
707 static int calc_max_buckets(void)
708 {
709         /* use a fixed size of 2MB */
710         unsigned long mem = 2 * 1024 * 1024;
711         mem /= sizeof(struct list_head);
712
713         return mem;
714 }
715
716 /*
717  * Allocate room for a suitable hash table.
718  */
719 static int init_hash_tables(struct dm_snapshot *s)
720 {
721         sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
722
723         /*
724          * Calculate based on the size of the original volume or
725          * the COW volume...
726          */
727         cow_dev_size = get_dev_size(s->cow->bdev);
728         origin_dev_size = get_dev_size(s->origin->bdev);
729         max_buckets = calc_max_buckets();
730
731         hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
732         hash_size = min(hash_size, max_buckets);
733
734         if (hash_size < 64)
735                 hash_size = 64;
736         hash_size = rounddown_pow_of_two(hash_size);
737         if (dm_exception_table_init(&s->complete, hash_size,
738                                     DM_CHUNK_CONSECUTIVE_BITS))
739                 return -ENOMEM;
740
741         /*
742          * Allocate hash table for in-flight exceptions
743          * Make this smaller than the real hash table
744          */
745         hash_size >>= 3;
746         if (hash_size < 64)
747                 hash_size = 64;
748
749         if (dm_exception_table_init(&s->pending, hash_size, 0)) {
750                 dm_exception_table_exit(&s->complete, exception_cache);
751                 return -ENOMEM;
752         }
753
754         return 0;
755 }
756
757 static void merge_shutdown(struct dm_snapshot *s)
758 {
759         clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
760         smp_mb__after_clear_bit();
761         wake_up_bit(&s->state_bits, RUNNING_MERGE);
762 }
763
764 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
765 {
766         s->first_merging_chunk = 0;
767         s->num_merging_chunks = 0;
768
769         return bio_list_get(&s->bios_queued_during_merge);
770 }
771
772 /*
773  * Remove one chunk from the index of completed exceptions.
774  */
775 static int __remove_single_exception_chunk(struct dm_snapshot *s,
776                                            chunk_t old_chunk)
777 {
778         struct dm_exception *e;
779
780         e = dm_lookup_exception(&s->complete, old_chunk);
781         if (!e) {
782                 DMERR("Corruption detected: exception for block %llu is "
783                       "on disk but not in memory",
784                       (unsigned long long)old_chunk);
785                 return -EINVAL;
786         }
787
788         /*
789          * If this is the only chunk using this exception, remove exception.
790          */
791         if (!dm_consecutive_chunk_count(e)) {
792                 dm_remove_exception(e);
793                 free_completed_exception(e);
794                 return 0;
795         }
796
797         /*
798          * The chunk may be either at the beginning or the end of a
799          * group of consecutive chunks - never in the middle.  We are
800          * removing chunks in the opposite order to that in which they
801          * were added, so this should always be true.
802          * Decrement the consecutive chunk counter and adjust the
803          * starting point if necessary.
804          */
805         if (old_chunk == e->old_chunk) {
806                 e->old_chunk++;
807                 e->new_chunk++;
808         } else if (old_chunk != e->old_chunk +
809                    dm_consecutive_chunk_count(e)) {
810                 DMERR("Attempt to merge block %llu from the "
811                       "middle of a chunk range [%llu - %llu]",
812                       (unsigned long long)old_chunk,
813                       (unsigned long long)e->old_chunk,
814                       (unsigned long long)
815                       e->old_chunk + dm_consecutive_chunk_count(e));
816                 return -EINVAL;
817         }
818
819         dm_consecutive_chunk_count_dec(e);
820
821         return 0;
822 }
823
824 static void flush_bios(struct bio *bio);
825
826 static int remove_single_exception_chunk(struct dm_snapshot *s)
827 {
828         struct bio *b = NULL;
829         int r;
830         chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
831
832         down_write(&s->lock);
833
834         /*
835          * Process chunks (and associated exceptions) in reverse order
836          * so that dm_consecutive_chunk_count_dec() accounting works.
837          */
838         do {
839                 r = __remove_single_exception_chunk(s, old_chunk);
840                 if (r)
841                         goto out;
842         } while (old_chunk-- > s->first_merging_chunk);
843
844         b = __release_queued_bios_after_merge(s);
845
846 out:
847         up_write(&s->lock);
848         if (b)
849                 flush_bios(b);
850
851         return r;
852 }
853
854 static int origin_write_extent(struct dm_snapshot *merging_snap,
855                                sector_t sector, unsigned chunk_size);
856
857 static void merge_callback(int read_err, unsigned long write_err,
858                            void *context);
859
860 static uint64_t read_pending_exceptions_done_count(void)
861 {
862         uint64_t pending_exceptions_done;
863
864         spin_lock(&_pending_exceptions_done_spinlock);
865         pending_exceptions_done = _pending_exceptions_done_count;
866         spin_unlock(&_pending_exceptions_done_spinlock);
867
868         return pending_exceptions_done;
869 }
870
871 static void increment_pending_exceptions_done_count(void)
872 {
873         spin_lock(&_pending_exceptions_done_spinlock);
874         _pending_exceptions_done_count++;
875         spin_unlock(&_pending_exceptions_done_spinlock);
876
877         wake_up_all(&_pending_exceptions_done);
878 }
879
880 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
881 {
882         int i, linear_chunks;
883         chunk_t old_chunk, new_chunk;
884         struct dm_io_region src, dest;
885         sector_t io_size;
886         uint64_t previous_count;
887
888         BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
889         if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
890                 goto shut;
891
892         /*
893          * valid flag never changes during merge, so no lock required.
894          */
895         if (!s->valid) {
896                 DMERR("Snapshot is invalid: can't merge");
897                 goto shut;
898         }
899
900         linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
901                                                       &new_chunk);
902         if (linear_chunks <= 0) {
903                 if (linear_chunks < 0)
904                         DMERR("Read error in exception store: "
905                               "shutting down merge");
906                 goto shut;
907         }
908
909         /* Adjust old_chunk and new_chunk to reflect start of linear region */
910         old_chunk = old_chunk + 1 - linear_chunks;
911         new_chunk = new_chunk + 1 - linear_chunks;
912
913         /*
914          * Use one (potentially large) I/O to copy all 'linear_chunks'
915          * from the exception store to the origin
916          */
917         io_size = linear_chunks * s->store->chunk_size;
918
919         dest.bdev = s->origin->bdev;
920         dest.sector = chunk_to_sector(s->store, old_chunk);
921         dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
922
923         src.bdev = s->cow->bdev;
924         src.sector = chunk_to_sector(s->store, new_chunk);
925         src.count = dest.count;
926
927         /*
928          * Reallocate any exceptions needed in other snapshots then
929          * wait for the pending exceptions to complete.
930          * Each time any pending exception (globally on the system)
931          * completes we are woken and repeat the process to find out
932          * if we can proceed.  While this may not seem a particularly
933          * efficient algorithm, it is not expected to have any
934          * significant impact on performance.
935          */
936         previous_count = read_pending_exceptions_done_count();
937         while (origin_write_extent(s, dest.sector, io_size)) {
938                 wait_event(_pending_exceptions_done,
939                            (read_pending_exceptions_done_count() !=
940                             previous_count));
941                 /* Retry after the wait, until all exceptions are done. */
942                 previous_count = read_pending_exceptions_done_count();
943         }
944
945         down_write(&s->lock);
946         s->first_merging_chunk = old_chunk;
947         s->num_merging_chunks = linear_chunks;
948         up_write(&s->lock);
949
950         /* Wait until writes to all 'linear_chunks' drain */
951         for (i = 0; i < linear_chunks; i++)
952                 __check_for_conflicting_io(s, old_chunk + i);
953
954         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
955         return;
956
957 shut:
958         merge_shutdown(s);
959 }
960
961 static void error_bios(struct bio *bio);
962
963 static void merge_callback(int read_err, unsigned long write_err, void *context)
964 {
965         struct dm_snapshot *s = context;
966         struct bio *b = NULL;
967
968         if (read_err || write_err) {
969                 if (read_err)
970                         DMERR("Read error: shutting down merge.");
971                 else
972                         DMERR("Write error: shutting down merge.");
973                 goto shut;
974         }
975
976         if (s->store->type->commit_merge(s->store,
977                                          s->num_merging_chunks) < 0) {
978                 DMERR("Write error in exception store: shutting down merge");
979                 goto shut;
980         }
981
982         if (remove_single_exception_chunk(s) < 0)
983                 goto shut;
984
985         snapshot_merge_next_chunks(s);
986
987         return;
988
989 shut:
990         down_write(&s->lock);
991         b = __release_queued_bios_after_merge(s);
992         up_write(&s->lock);
993         error_bios(b);
994
995         merge_shutdown(s);
996 }
997
998 static void start_merge(struct dm_snapshot *s)
999 {
1000         if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1001                 snapshot_merge_next_chunks(s);
1002 }
1003
1004 static int wait_schedule(void *ptr)
1005 {
1006         schedule();
1007
1008         return 0;
1009 }
1010
1011 /*
1012  * Stop the merging process and wait until it finishes.
1013  */
1014 static void stop_merge(struct dm_snapshot *s)
1015 {
1016         set_bit(SHUTDOWN_MERGE, &s->state_bits);
1017         wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1018                     TASK_UNINTERRUPTIBLE);
1019         clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1020 }
1021
1022 /*
1023  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1024  */
1025 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1026 {
1027         struct dm_snapshot *s;
1028         int i;
1029         int r = -EINVAL;
1030         char *origin_path, *cow_path;
1031         unsigned args_used, num_flush_requests = 1;
1032         fmode_t origin_mode = FMODE_READ;
1033
1034         if (argc != 4) {
1035                 ti->error = "requires exactly 4 arguments";
1036                 r = -EINVAL;
1037                 goto bad;
1038         }
1039
1040         if (dm_target_is_snapshot_merge(ti)) {
1041                 num_flush_requests = 2;
1042                 origin_mode = FMODE_WRITE;
1043         }
1044
1045         origin_path = argv[0];
1046         argv++;
1047         argc--;
1048
1049         s = kmalloc(sizeof(*s), GFP_KERNEL);
1050         if (!s) {
1051                 ti->error = "Cannot allocate snapshot context private "
1052                     "structure";
1053                 r = -ENOMEM;
1054                 goto bad;
1055         }
1056
1057         cow_path = argv[0];
1058         argv++;
1059         argc--;
1060
1061         r = dm_get_device(ti, cow_path, 0, 0,
1062                           FMODE_READ | FMODE_WRITE, &s->cow);
1063         if (r) {
1064                 ti->error = "Cannot get COW device";
1065                 goto bad_cow;
1066         }
1067
1068         r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1069         if (r) {
1070                 ti->error = "Couldn't create exception store";
1071                 r = -EINVAL;
1072                 goto bad_store;
1073         }
1074
1075         argv += args_used;
1076         argc -= args_used;
1077
1078         r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
1079         if (r) {
1080                 ti->error = "Cannot get origin device";
1081                 goto bad_origin;
1082         }
1083
1084         s->ti = ti;
1085         s->valid = 1;
1086         s->active = 0;
1087         s->suspended = 0;
1088         atomic_set(&s->pending_exceptions_count, 0);
1089         init_rwsem(&s->lock);
1090         INIT_LIST_HEAD(&s->list);
1091         spin_lock_init(&s->pe_lock);
1092         s->state_bits = 0;
1093         s->first_merging_chunk = 0;
1094         s->num_merging_chunks = 0;
1095         bio_list_init(&s->bios_queued_during_merge);
1096
1097         /* Allocate hash table for COW data */
1098         if (init_hash_tables(s)) {
1099                 ti->error = "Unable to allocate hash table space";
1100                 r = -ENOMEM;
1101                 goto bad_hash_tables;
1102         }
1103
1104         r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
1105         if (r) {
1106                 ti->error = "Could not create kcopyd client";
1107                 goto bad_kcopyd;
1108         }
1109
1110         s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1111         if (!s->pending_pool) {
1112                 ti->error = "Could not allocate mempool for pending exceptions";
1113                 goto bad_pending_pool;
1114         }
1115
1116         s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
1117                                                          tracked_chunk_cache);
1118         if (!s->tracked_chunk_pool) {
1119                 ti->error = "Could not allocate tracked_chunk mempool for "
1120                             "tracking reads";
1121                 goto bad_tracked_chunk_pool;
1122         }
1123
1124         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1125                 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1126
1127         spin_lock_init(&s->tracked_chunk_lock);
1128
1129         bio_list_init(&s->queued_bios);
1130         INIT_WORK(&s->queued_bios_work, flush_queued_bios);
1131
1132         ti->private = s;
1133         ti->num_flush_requests = num_flush_requests;
1134
1135         /* Add snapshot to the list of snapshots for this origin */
1136         /* Exceptions aren't triggered till snapshot_resume() is called */
1137         r = register_snapshot(s);
1138         if (r == -ENOMEM) {
1139                 ti->error = "Snapshot origin struct allocation failed";
1140                 goto bad_load_and_register;
1141         } else if (r < 0) {
1142                 /* invalid handover, register_snapshot has set ti->error */
1143                 goto bad_load_and_register;
1144         }
1145
1146         /*
1147          * Metadata must only be loaded into one table at once, so skip this
1148          * if metadata will be handed over during resume.
1149          * Chunk size will be set during the handover - set it to zero to
1150          * ensure it's ignored.
1151          */
1152         if (r > 0) {
1153                 s->store->chunk_size = 0;
1154                 return 0;
1155         }
1156
1157         r = s->store->type->read_metadata(s->store, dm_add_exception,
1158                                           (void *)s);
1159         if (r < 0) {
1160                 ti->error = "Failed to read snapshot metadata";
1161                 goto bad_read_metadata;
1162         } else if (r > 0) {
1163                 s->valid = 0;
1164                 DMWARN("Snapshot is marked invalid.");
1165         }
1166
1167         if (!s->store->chunk_size) {
1168                 ti->error = "Chunk size not set";
1169                 goto bad_read_metadata;
1170         }
1171         ti->split_io = s->store->chunk_size;
1172
1173         return 0;
1174
1175 bad_read_metadata:
1176         unregister_snapshot(s);
1177
1178 bad_load_and_register:
1179         mempool_destroy(s->tracked_chunk_pool);
1180
1181 bad_tracked_chunk_pool:
1182         mempool_destroy(s->pending_pool);
1183
1184 bad_pending_pool:
1185         dm_kcopyd_client_destroy(s->kcopyd_client);
1186
1187 bad_kcopyd:
1188         dm_exception_table_exit(&s->pending, pending_cache);
1189         dm_exception_table_exit(&s->complete, exception_cache);
1190
1191 bad_hash_tables:
1192         dm_put_device(ti, s->origin);
1193
1194 bad_origin:
1195         dm_exception_store_destroy(s->store);
1196
1197 bad_store:
1198         dm_put_device(ti, s->cow);
1199
1200 bad_cow:
1201         kfree(s);
1202
1203 bad:
1204         return r;
1205 }
1206
1207 static void __free_exceptions(struct dm_snapshot *s)
1208 {
1209         dm_kcopyd_client_destroy(s->kcopyd_client);
1210         s->kcopyd_client = NULL;
1211
1212         dm_exception_table_exit(&s->pending, pending_cache);
1213         dm_exception_table_exit(&s->complete, exception_cache);
1214 }
1215
1216 static void __handover_exceptions(struct dm_snapshot *snap_src,
1217                                   struct dm_snapshot *snap_dest)
1218 {
1219         union {
1220                 struct dm_exception_table table_swap;
1221                 struct dm_exception_store *store_swap;
1222         } u;
1223
1224         /*
1225          * Swap all snapshot context information between the two instances.
1226          */
1227         u.table_swap = snap_dest->complete;
1228         snap_dest->complete = snap_src->complete;
1229         snap_src->complete = u.table_swap;
1230
1231         u.store_swap = snap_dest->store;
1232         snap_dest->store = snap_src->store;
1233         snap_src->store = u.store_swap;
1234
1235         snap_dest->store->snap = snap_dest;
1236         snap_src->store->snap = snap_src;
1237
1238         snap_dest->ti->split_io = snap_dest->store->chunk_size;
1239         snap_dest->valid = snap_src->valid;
1240
1241         /*
1242          * Set source invalid to ensure it receives no further I/O.
1243          */
1244         snap_src->valid = 0;
1245 }
1246
1247 static void snapshot_dtr(struct dm_target *ti)
1248 {
1249 #ifdef CONFIG_DM_DEBUG
1250         int i;
1251 #endif
1252         struct dm_snapshot *s = ti->private;
1253         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1254
1255         flush_workqueue(ksnapd);
1256
1257         down_read(&_origins_lock);
1258         /* Check whether exception handover must be cancelled */
1259         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1260         if (snap_src && snap_dest && (s == snap_src)) {
1261                 down_write(&snap_dest->lock);
1262                 snap_dest->valid = 0;
1263                 up_write(&snap_dest->lock);
1264                 DMERR("Cancelling snapshot handover.");
1265         }
1266         up_read(&_origins_lock);
1267
1268         if (dm_target_is_snapshot_merge(ti))
1269                 stop_merge(s);
1270
1271         /* Prevent further origin writes from using this snapshot. */
1272         /* After this returns there can be no new kcopyd jobs. */
1273         unregister_snapshot(s);
1274
1275         while (atomic_read(&s->pending_exceptions_count))
1276                 msleep(1);
1277         /*
1278          * Ensure instructions in mempool_destroy aren't reordered
1279          * before atomic_read.
1280          */
1281         smp_mb();
1282
1283 #ifdef CONFIG_DM_DEBUG
1284         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1285                 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1286 #endif
1287
1288         mempool_destroy(s->tracked_chunk_pool);
1289
1290         __free_exceptions(s);
1291
1292         mempool_destroy(s->pending_pool);
1293
1294         dm_put_device(ti, s->origin);
1295
1296         dm_exception_store_destroy(s->store);
1297
1298         dm_put_device(ti, s->cow);
1299
1300         kfree(s);
1301 }
1302
1303 /*
1304  * Flush a list of buffers.
1305  */
1306 static void flush_bios(struct bio *bio)
1307 {
1308         struct bio *n;
1309
1310         while (bio) {
1311                 n = bio->bi_next;
1312                 bio->bi_next = NULL;
1313                 generic_make_request(bio);
1314                 bio = n;
1315         }
1316 }
1317
1318 static void flush_queued_bios(struct work_struct *work)
1319 {
1320         struct dm_snapshot *s =
1321                 container_of(work, struct dm_snapshot, queued_bios_work);
1322         struct bio *queued_bios;
1323         unsigned long flags;
1324
1325         spin_lock_irqsave(&s->pe_lock, flags);
1326         queued_bios = bio_list_get(&s->queued_bios);
1327         spin_unlock_irqrestore(&s->pe_lock, flags);
1328
1329         flush_bios(queued_bios);
1330 }
1331
1332 static int do_origin(struct dm_dev *origin, struct bio *bio);
1333
1334 /*
1335  * Flush a list of buffers.
1336  */
1337 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1338 {
1339         struct bio *n;
1340         int r;
1341
1342         while (bio) {
1343                 n = bio->bi_next;
1344                 bio->bi_next = NULL;
1345                 r = do_origin(s->origin, bio);
1346                 if (r == DM_MAPIO_REMAPPED)
1347                         generic_make_request(bio);
1348                 bio = n;
1349         }
1350 }
1351
1352 /*
1353  * Error a list of buffers.
1354  */
1355 static void error_bios(struct bio *bio)
1356 {
1357         struct bio *n;
1358
1359         while (bio) {
1360                 n = bio->bi_next;
1361                 bio->bi_next = NULL;
1362                 bio_io_error(bio);
1363                 bio = n;
1364         }
1365 }
1366
1367 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1368 {
1369         if (!s->valid)
1370                 return;
1371
1372         if (err == -EIO)
1373                 DMERR("Invalidating snapshot: Error reading/writing.");
1374         else if (err == -ENOMEM)
1375                 DMERR("Invalidating snapshot: Unable to allocate exception.");
1376
1377         if (s->store->type->drop_snapshot)
1378                 s->store->type->drop_snapshot(s->store);
1379
1380         s->valid = 0;
1381
1382         dm_table_event(s->ti->table);
1383 }
1384
1385 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1386 {
1387         struct dm_exception *e;
1388         struct dm_snapshot *s = pe->snap;
1389         struct bio *origin_bios = NULL;
1390         struct bio *snapshot_bios = NULL;
1391         int error = 0;
1392
1393         if (!success) {
1394                 /* Read/write error - snapshot is unusable */
1395                 down_write(&s->lock);
1396                 __invalidate_snapshot(s, -EIO);
1397                 error = 1;
1398                 goto out;
1399         }
1400
1401         e = alloc_completed_exception();
1402         if (!e) {
1403                 down_write(&s->lock);
1404                 __invalidate_snapshot(s, -ENOMEM);
1405                 error = 1;
1406                 goto out;
1407         }
1408         *e = pe->e;
1409
1410         down_write(&s->lock);
1411         if (!s->valid) {
1412                 free_completed_exception(e);
1413                 error = 1;
1414                 goto out;
1415         }
1416
1417         /* Check for conflicting reads */
1418         __check_for_conflicting_io(s, pe->e.old_chunk);
1419
1420         /*
1421          * Add a proper exception, and remove the
1422          * in-flight exception from the list.
1423          */
1424         dm_insert_exception(&s->complete, e);
1425
1426  out:
1427         dm_remove_exception(&pe->e);
1428         snapshot_bios = bio_list_get(&pe->snapshot_bios);
1429         origin_bios = bio_list_get(&pe->origin_bios);
1430         free_pending_exception(pe);
1431
1432         increment_pending_exceptions_done_count();
1433
1434         up_write(&s->lock);
1435
1436         /* Submit any pending write bios */
1437         if (error)
1438                 error_bios(snapshot_bios);
1439         else
1440                 flush_bios(snapshot_bios);
1441
1442         retry_origin_bios(s, origin_bios);
1443 }
1444
1445 static void commit_callback(void *context, int success)
1446 {
1447         struct dm_snap_pending_exception *pe = context;
1448
1449         pending_complete(pe, success);
1450 }
1451
1452 /*
1453  * Called when the copy I/O has finished.  kcopyd actually runs
1454  * this code so don't block.
1455  */
1456 static void copy_callback(int read_err, unsigned long write_err, void *context)
1457 {
1458         struct dm_snap_pending_exception *pe = context;
1459         struct dm_snapshot *s = pe->snap;
1460
1461         if (read_err || write_err)
1462                 pending_complete(pe, 0);
1463
1464         else
1465                 /* Update the metadata if we are persistent */
1466                 s->store->type->commit_exception(s->store, &pe->e,
1467                                                  commit_callback, pe);
1468 }
1469
1470 /*
1471  * Dispatches the copy operation to kcopyd.
1472  */
1473 static void start_copy(struct dm_snap_pending_exception *pe)
1474 {
1475         struct dm_snapshot *s = pe->snap;
1476         struct dm_io_region src, dest;
1477         struct block_device *bdev = s->origin->bdev;
1478         sector_t dev_size;
1479
1480         dev_size = get_dev_size(bdev);
1481
1482         src.bdev = bdev;
1483         src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1484         src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1485
1486         dest.bdev = s->cow->bdev;
1487         dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1488         dest.count = src.count;
1489
1490         /* Hand over to kcopyd */
1491         dm_kcopyd_copy(s->kcopyd_client,
1492                     &src, 1, &dest, 0, copy_callback, pe);
1493 }
1494
1495 static struct dm_snap_pending_exception *
1496 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1497 {
1498         struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1499
1500         if (!e)
1501                 return NULL;
1502
1503         return container_of(e, struct dm_snap_pending_exception, e);
1504 }
1505
1506 /*
1507  * Looks to see if this snapshot already has a pending exception
1508  * for this chunk, otherwise it allocates a new one and inserts
1509  * it into the pending table.
1510  *
1511  * NOTE: a write lock must be held on snap->lock before calling
1512  * this.
1513  */
1514 static struct dm_snap_pending_exception *
1515 __find_pending_exception(struct dm_snapshot *s,
1516                          struct dm_snap_pending_exception *pe, chunk_t chunk)
1517 {
1518         struct dm_snap_pending_exception *pe2;
1519
1520         pe2 = __lookup_pending_exception(s, chunk);
1521         if (pe2) {
1522                 free_pending_exception(pe);
1523                 return pe2;
1524         }
1525
1526         pe->e.old_chunk = chunk;
1527         bio_list_init(&pe->origin_bios);
1528         bio_list_init(&pe->snapshot_bios);
1529         pe->started = 0;
1530
1531         if (s->store->type->prepare_exception(s->store, &pe->e)) {
1532                 free_pending_exception(pe);
1533                 return NULL;
1534         }
1535
1536         dm_insert_exception(&s->pending, &pe->e);
1537
1538         return pe;
1539 }
1540
1541 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1542                             struct bio *bio, chunk_t chunk)
1543 {
1544         bio->bi_bdev = s->cow->bdev;
1545         bio->bi_sector = chunk_to_sector(s->store,
1546                                          dm_chunk_number(e->new_chunk) +
1547                                          (chunk - e->old_chunk)) +
1548                                          (bio->bi_sector &
1549                                           s->store->chunk_mask);
1550 }
1551
1552 static int snapshot_map(struct dm_target *ti, struct bio *bio,
1553                         union map_info *map_context)
1554 {
1555         struct dm_exception *e;
1556         struct dm_snapshot *s = ti->private;
1557         int r = DM_MAPIO_REMAPPED;
1558         chunk_t chunk;
1559         struct dm_snap_pending_exception *pe = NULL;
1560
1561         if (unlikely(bio_empty_barrier(bio))) {
1562                 bio->bi_bdev = s->cow->bdev;
1563                 return DM_MAPIO_REMAPPED;
1564         }
1565
1566         chunk = sector_to_chunk(s->store, bio->bi_sector);
1567
1568         /* Full snapshots are not usable */
1569         /* To get here the table must be live so s->active is always set. */
1570         if (!s->valid)
1571                 return -EIO;
1572
1573         /* FIXME: should only take write lock if we need
1574          * to copy an exception */
1575         down_write(&s->lock);
1576
1577         if (!s->valid) {
1578                 r = -EIO;
1579                 goto out_unlock;
1580         }
1581
1582         /* If the block is already remapped - use that, else remap it */
1583         e = dm_lookup_exception(&s->complete, chunk);
1584         if (e) {
1585                 remap_exception(s, e, bio, chunk);
1586                 goto out_unlock;
1587         }
1588
1589         /*
1590          * Write to snapshot - higher level takes care of RW/RO
1591          * flags so we should only get this if we are
1592          * writeable.
1593          */
1594         if (bio_rw(bio) == WRITE) {
1595                 pe = __lookup_pending_exception(s, chunk);
1596                 if (!pe) {
1597                         up_write(&s->lock);
1598                         pe = alloc_pending_exception(s);
1599                         down_write(&s->lock);
1600
1601                         if (!s->valid) {
1602                                 free_pending_exception(pe);
1603                                 r = -EIO;
1604                                 goto out_unlock;
1605                         }
1606
1607                         e = dm_lookup_exception(&s->complete, chunk);
1608                         if (e) {
1609                                 free_pending_exception(pe);
1610                                 remap_exception(s, e, bio, chunk);
1611                                 goto out_unlock;
1612                         }
1613
1614                         pe = __find_pending_exception(s, pe, chunk);
1615                         if (!pe) {
1616                                 __invalidate_snapshot(s, -ENOMEM);
1617                                 r = -EIO;
1618                                 goto out_unlock;
1619                         }
1620                 }
1621
1622                 remap_exception(s, &pe->e, bio, chunk);
1623                 bio_list_add(&pe->snapshot_bios, bio);
1624
1625                 r = DM_MAPIO_SUBMITTED;
1626
1627                 if (!pe->started) {
1628                         /* this is protected by snap->lock */
1629                         pe->started = 1;
1630                         up_write(&s->lock);
1631                         start_copy(pe);
1632                         goto out;
1633                 }
1634         } else {
1635                 bio->bi_bdev = s->origin->bdev;
1636                 map_context->ptr = track_chunk(s, chunk);
1637         }
1638
1639  out_unlock:
1640         up_write(&s->lock);
1641  out:
1642         return r;
1643 }
1644
1645 /*
1646  * A snapshot-merge target behaves like a combination of a snapshot
1647  * target and a snapshot-origin target.  It only generates new
1648  * exceptions in other snapshots and not in the one that is being
1649  * merged.
1650  *
1651  * For each chunk, if there is an existing exception, it is used to
1652  * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1653  * which in turn might generate exceptions in other snapshots.
1654  * If merging is currently taking place on the chunk in question, the
1655  * I/O is deferred by adding it to s->bios_queued_during_merge.
1656  */
1657 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1658                               union map_info *map_context)
1659 {
1660         struct dm_exception *e;
1661         struct dm_snapshot *s = ti->private;
1662         int r = DM_MAPIO_REMAPPED;
1663         chunk_t chunk;
1664
1665         if (unlikely(bio_empty_barrier(bio))) {
1666                 if (!map_context->flush_request)
1667                         bio->bi_bdev = s->origin->bdev;
1668                 else
1669                         bio->bi_bdev = s->cow->bdev;
1670                 map_context->ptr = NULL;
1671                 return DM_MAPIO_REMAPPED;
1672         }
1673
1674         chunk = sector_to_chunk(s->store, bio->bi_sector);
1675
1676         down_write(&s->lock);
1677
1678         /* Full snapshots are not usable */
1679         if (!s->valid) {
1680                 r = -EIO;
1681                 goto out_unlock;
1682         }
1683
1684         /* If the block is already remapped - use that */
1685         e = dm_lookup_exception(&s->complete, chunk);
1686         if (e) {
1687                 /* Queue writes overlapping with chunks being merged */
1688                 if (bio_rw(bio) == WRITE &&
1689                     chunk >= s->first_merging_chunk &&
1690                     chunk < (s->first_merging_chunk +
1691                              s->num_merging_chunks)) {
1692                         bio->bi_bdev = s->origin->bdev;
1693                         bio_list_add(&s->bios_queued_during_merge, bio);
1694                         r = DM_MAPIO_SUBMITTED;
1695                         goto out_unlock;
1696                 }
1697
1698                 remap_exception(s, e, bio, chunk);
1699
1700                 if (bio_rw(bio) == WRITE)
1701                         map_context->ptr = track_chunk(s, chunk);
1702                 goto out_unlock;
1703         }
1704
1705         bio->bi_bdev = s->origin->bdev;
1706
1707         if (bio_rw(bio) == WRITE) {
1708                 up_write(&s->lock);
1709                 return do_origin(s->origin, bio);
1710         }
1711
1712 out_unlock:
1713         up_write(&s->lock);
1714
1715         return r;
1716 }
1717
1718 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1719                            int error, union map_info *map_context)
1720 {
1721         struct dm_snapshot *s = ti->private;
1722         struct dm_snap_tracked_chunk *c = map_context->ptr;
1723
1724         if (c)
1725                 stop_tracking_chunk(s, c);
1726
1727         return 0;
1728 }
1729
1730 static void snapshot_merge_presuspend(struct dm_target *ti)
1731 {
1732         struct dm_snapshot *s = ti->private;
1733
1734         stop_merge(s);
1735 }
1736
1737 static void snapshot_postsuspend(struct dm_target *ti)
1738 {
1739         struct dm_snapshot *s = ti->private;
1740
1741         down_write(&s->lock);
1742         s->suspended = 1;
1743         up_write(&s->lock);
1744 }
1745
1746 static int snapshot_preresume(struct dm_target *ti)
1747 {
1748         int r = 0;
1749         struct dm_snapshot *s = ti->private;
1750         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1751
1752         down_read(&_origins_lock);
1753         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1754         if (snap_src && snap_dest) {
1755                 down_read(&snap_src->lock);
1756                 if (s == snap_src) {
1757                         DMERR("Unable to resume snapshot source until "
1758                               "handover completes.");
1759                         r = -EINVAL;
1760                 } else if (!snap_src->suspended) {
1761                         DMERR("Unable to perform snapshot handover until "
1762                               "source is suspended.");
1763                         r = -EINVAL;
1764                 }
1765                 up_read(&snap_src->lock);
1766         }
1767         up_read(&_origins_lock);
1768
1769         return r;
1770 }
1771
1772 static void snapshot_resume(struct dm_target *ti)
1773 {
1774         struct dm_snapshot *s = ti->private;
1775         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1776
1777         down_read(&_origins_lock);
1778         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1779         if (snap_src && snap_dest) {
1780                 down_write(&snap_src->lock);
1781                 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1782                 __handover_exceptions(snap_src, snap_dest);
1783                 up_write(&snap_dest->lock);
1784                 up_write(&snap_src->lock);
1785         }
1786         up_read(&_origins_lock);
1787
1788         /* Now we have correct chunk size, reregister */
1789         reregister_snapshot(s);
1790
1791         down_write(&s->lock);
1792         s->active = 1;
1793         s->suspended = 0;
1794         up_write(&s->lock);
1795 }
1796
1797 static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1798 {
1799         sector_t min_chunksize;
1800
1801         down_read(&_origins_lock);
1802         min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1803         up_read(&_origins_lock);
1804
1805         return min_chunksize;
1806 }
1807
1808 static void snapshot_merge_resume(struct dm_target *ti)
1809 {
1810         struct dm_snapshot *s = ti->private;
1811
1812         /*
1813          * Handover exceptions from existing snapshot.
1814          */
1815         snapshot_resume(ti);
1816
1817         /*
1818          * snapshot-merge acts as an origin, so set ti->split_io
1819          */
1820         ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1821
1822         start_merge(s);
1823 }
1824
1825 static int snapshot_status(struct dm_target *ti, status_type_t type,
1826                            char *result, unsigned int maxlen)
1827 {
1828         unsigned sz = 0;
1829         struct dm_snapshot *snap = ti->private;
1830
1831         switch (type) {
1832         case STATUSTYPE_INFO:
1833
1834                 down_write(&snap->lock);
1835
1836                 if (!snap->valid)
1837                         DMEMIT("Invalid");
1838                 else {
1839                         if (snap->store->type->usage) {
1840                                 sector_t total_sectors, sectors_allocated,
1841                                          metadata_sectors;
1842                                 snap->store->type->usage(snap->store,
1843                                                          &total_sectors,
1844                                                          &sectors_allocated,
1845                                                          &metadata_sectors);
1846                                 DMEMIT("%llu/%llu %llu",
1847                                        (unsigned long long)sectors_allocated,
1848                                        (unsigned long long)total_sectors,
1849                                        (unsigned long long)metadata_sectors);
1850                         }
1851                         else
1852                                 DMEMIT("Unknown");
1853                 }
1854
1855                 up_write(&snap->lock);
1856
1857                 break;
1858
1859         case STATUSTYPE_TABLE:
1860                 /*
1861                  * kdevname returns a static pointer so we need
1862                  * to make private copies if the output is to
1863                  * make sense.
1864                  */
1865                 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1866                 snap->store->type->status(snap->store, type, result + sz,
1867                                           maxlen - sz);
1868                 break;
1869         }
1870
1871         return 0;
1872 }
1873
1874 static int snapshot_iterate_devices(struct dm_target *ti,
1875                                     iterate_devices_callout_fn fn, void *data)
1876 {
1877         struct dm_snapshot *snap = ti->private;
1878
1879         return fn(ti, snap->origin, 0, ti->len, data);
1880 }
1881
1882
1883 /*-----------------------------------------------------------------
1884  * Origin methods
1885  *---------------------------------------------------------------*/
1886
1887 /*
1888  * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1889  * supplied bio was ignored.  The caller may submit it immediately.
1890  * (No remapping actually occurs as the origin is always a direct linear
1891  * map.)
1892  *
1893  * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1894  * and any supplied bio is added to a list to be submitted once all
1895  * the necessary exceptions exist.
1896  */
1897 static int __origin_write(struct list_head *snapshots, sector_t sector,
1898                           struct bio *bio)
1899 {
1900         int r = DM_MAPIO_REMAPPED;
1901         struct dm_snapshot *snap;
1902         struct dm_exception *e;
1903         struct dm_snap_pending_exception *pe;
1904         struct dm_snap_pending_exception *pe_to_start_now = NULL;
1905         struct dm_snap_pending_exception *pe_to_start_last = NULL;
1906         chunk_t chunk;
1907
1908         /* Do all the snapshots on this origin */
1909         list_for_each_entry (snap, snapshots, list) {
1910                 /*
1911                  * Don't make new exceptions in a merging snapshot
1912                  * because it has effectively been deleted
1913                  */
1914                 if (dm_target_is_snapshot_merge(snap->ti))
1915                         continue;
1916
1917                 down_write(&snap->lock);
1918
1919                 /* Only deal with valid and active snapshots */
1920                 if (!snap->valid || !snap->active)
1921                         goto next_snapshot;
1922
1923                 /* Nothing to do if writing beyond end of snapshot */
1924                 if (sector >= dm_table_get_size(snap->ti->table))
1925                         goto next_snapshot;
1926
1927                 /*
1928                  * Remember, different snapshots can have
1929                  * different chunk sizes.
1930                  */
1931                 chunk = sector_to_chunk(snap->store, sector);
1932
1933                 /*
1934                  * Check exception table to see if block
1935                  * is already remapped in this snapshot
1936                  * and trigger an exception if not.
1937                  */
1938                 e = dm_lookup_exception(&snap->complete, chunk);
1939                 if (e)
1940                         goto next_snapshot;
1941
1942                 pe = __lookup_pending_exception(snap, chunk);
1943                 if (!pe) {
1944                         up_write(&snap->lock);
1945                         pe = alloc_pending_exception(snap);
1946                         down_write(&snap->lock);
1947
1948                         if (!snap->valid) {
1949                                 free_pending_exception(pe);
1950                                 goto next_snapshot;
1951                         }
1952
1953                         e = dm_lookup_exception(&snap->complete, chunk);
1954                         if (e) {
1955                                 free_pending_exception(pe);
1956                                 goto next_snapshot;
1957                         }
1958
1959                         pe = __find_pending_exception(snap, pe, chunk);
1960                         if (!pe) {
1961                                 __invalidate_snapshot(snap, -ENOMEM);
1962                                 goto next_snapshot;
1963                         }
1964                 }
1965
1966                 r = DM_MAPIO_SUBMITTED;
1967
1968                 /*
1969                  * If an origin bio was supplied, queue it to wait for the
1970                  * completion of this exception, and start this one last,
1971                  * at the end of the function.
1972                  */
1973                 if (bio) {
1974                         bio_list_add(&pe->origin_bios, bio);
1975                         bio = NULL;
1976
1977                         if (!pe->started) {
1978                                 pe->started = 1;
1979                                 pe_to_start_last = pe;
1980                         }
1981                 }
1982
1983                 if (!pe->started) {
1984                         pe->started = 1;
1985                         pe_to_start_now = pe;
1986                 }
1987
1988  next_snapshot:
1989                 up_write(&snap->lock);
1990
1991                 if (pe_to_start_now) {
1992                         start_copy(pe_to_start_now);
1993                         pe_to_start_now = NULL;
1994                 }
1995         }
1996
1997         /*
1998          * Submit the exception against which the bio is queued last,
1999          * to give the other exceptions a head start.
2000          */
2001         if (pe_to_start_last)
2002                 start_copy(pe_to_start_last);
2003
2004         return r;
2005 }
2006
2007 /*
2008  * Called on a write from the origin driver.
2009  */
2010 static int do_origin(struct dm_dev *origin, struct bio *bio)
2011 {
2012         struct origin *o;
2013         int r = DM_MAPIO_REMAPPED;
2014
2015         down_read(&_origins_lock);
2016         o = __lookup_origin(origin->bdev);
2017         if (o)
2018                 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
2019         up_read(&_origins_lock);
2020
2021         return r;
2022 }
2023
2024 /*
2025  * Trigger exceptions in all non-merging snapshots.
2026  *
2027  * The chunk size of the merging snapshot may be larger than the chunk
2028  * size of some other snapshot so we may need to reallocate multiple
2029  * chunks in other snapshots.
2030  *
2031  * We scan all the overlapping exceptions in the other snapshots.
2032  * Returns 1 if anything was reallocated and must be waited for,
2033  * otherwise returns 0.
2034  *
2035  * size must be a multiple of merging_snap's chunk_size.
2036  */
2037 static int origin_write_extent(struct dm_snapshot *merging_snap,
2038                                sector_t sector, unsigned size)
2039 {
2040         int must_wait = 0;
2041         sector_t n;
2042         struct origin *o;
2043
2044         /*
2045          * The origin's __minimum_chunk_size() got stored in split_io
2046          * by snapshot_merge_resume().
2047          */
2048         down_read(&_origins_lock);
2049         o = __lookup_origin(merging_snap->origin->bdev);
2050         for (n = 0; n < size; n += merging_snap->ti->split_io)
2051                 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2052                     DM_MAPIO_SUBMITTED)
2053                         must_wait = 1;
2054         up_read(&_origins_lock);
2055
2056         return must_wait;
2057 }
2058
2059 /*
2060  * Origin: maps a linear range of a device, with hooks for snapshotting.
2061  */
2062
2063 /*
2064  * Construct an origin mapping: <dev_path>
2065  * The context for an origin is merely a 'struct dm_dev *'
2066  * pointing to the real device.
2067  */
2068 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2069 {
2070         int r;
2071         struct dm_dev *dev;
2072
2073         if (argc != 1) {
2074                 ti->error = "origin: incorrect number of arguments";
2075                 return -EINVAL;
2076         }
2077
2078         r = dm_get_device(ti, argv[0], 0, ti->len,
2079                           dm_table_get_mode(ti->table), &dev);
2080         if (r) {
2081                 ti->error = "Cannot get target device";
2082                 return r;
2083         }
2084
2085         ti->private = dev;
2086         ti->num_flush_requests = 1;
2087
2088         return 0;
2089 }
2090
2091 static void origin_dtr(struct dm_target *ti)
2092 {
2093         struct dm_dev *dev = ti->private;
2094         dm_put_device(ti, dev);
2095 }
2096
2097 static int origin_map(struct dm_target *ti, struct bio *bio,
2098                       union map_info *map_context)
2099 {
2100         struct dm_dev *dev = ti->private;
2101         bio->bi_bdev = dev->bdev;
2102
2103         if (unlikely(bio_empty_barrier(bio)))
2104                 return DM_MAPIO_REMAPPED;
2105
2106         /* Only tell snapshots if this is a write */
2107         return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2108 }
2109
2110 /*
2111  * Set the target "split_io" field to the minimum of all the snapshots'
2112  * chunk sizes.
2113  */
2114 static void origin_resume(struct dm_target *ti)
2115 {
2116         struct dm_dev *dev = ti->private;
2117
2118         ti->split_io = get_origin_minimum_chunksize(dev->bdev);
2119 }
2120
2121 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
2122                          unsigned int maxlen)
2123 {
2124         struct dm_dev *dev = ti->private;
2125
2126         switch (type) {
2127         case STATUSTYPE_INFO:
2128                 result[0] = '\0';
2129                 break;
2130
2131         case STATUSTYPE_TABLE:
2132                 snprintf(result, maxlen, "%s", dev->name);
2133                 break;
2134         }
2135
2136         return 0;
2137 }
2138
2139 static int origin_iterate_devices(struct dm_target *ti,
2140                                   iterate_devices_callout_fn fn, void *data)
2141 {
2142         struct dm_dev *dev = ti->private;
2143
2144         return fn(ti, dev, 0, ti->len, data);
2145 }
2146
2147 static struct target_type origin_target = {
2148         .name    = "snapshot-origin",
2149         .version = {1, 7, 0},
2150         .module  = THIS_MODULE,
2151         .ctr     = origin_ctr,
2152         .dtr     = origin_dtr,
2153         .map     = origin_map,
2154         .resume  = origin_resume,
2155         .status  = origin_status,
2156         .iterate_devices = origin_iterate_devices,
2157 };
2158
2159 static struct target_type snapshot_target = {
2160         .name    = "snapshot",
2161         .version = {1, 9, 0},
2162         .module  = THIS_MODULE,
2163         .ctr     = snapshot_ctr,
2164         .dtr     = snapshot_dtr,
2165         .map     = snapshot_map,
2166         .end_io  = snapshot_end_io,
2167         .postsuspend = snapshot_postsuspend,
2168         .preresume  = snapshot_preresume,
2169         .resume  = snapshot_resume,
2170         .status  = snapshot_status,
2171         .iterate_devices = snapshot_iterate_devices,
2172 };
2173
2174 static struct target_type merge_target = {
2175         .name    = dm_snapshot_merge_target_name,
2176         .version = {1, 0, 0},
2177         .module  = THIS_MODULE,
2178         .ctr     = snapshot_ctr,
2179         .dtr     = snapshot_dtr,
2180         .map     = snapshot_merge_map,
2181         .end_io  = snapshot_end_io,
2182         .presuspend = snapshot_merge_presuspend,
2183         .postsuspend = snapshot_postsuspend,
2184         .preresume  = snapshot_preresume,
2185         .resume  = snapshot_merge_resume,
2186         .status  = snapshot_status,
2187         .iterate_devices = snapshot_iterate_devices,
2188 };
2189
2190 static int __init dm_snapshot_init(void)
2191 {
2192         int r;
2193
2194         r = dm_exception_store_init();
2195         if (r) {
2196                 DMERR("Failed to initialize exception stores");
2197                 return r;
2198         }
2199
2200         r = dm_register_target(&snapshot_target);
2201         if (r < 0) {
2202                 DMERR("snapshot target register failed %d", r);
2203                 goto bad_register_snapshot_target;
2204         }
2205
2206         r = dm_register_target(&origin_target);
2207         if (r < 0) {
2208                 DMERR("Origin target register failed %d", r);
2209                 goto bad_register_origin_target;
2210         }
2211
2212         r = dm_register_target(&merge_target);
2213         if (r < 0) {
2214                 DMERR("Merge target register failed %d", r);
2215                 goto bad_register_merge_target;
2216         }
2217
2218         r = init_origin_hash();
2219         if (r) {
2220                 DMERR("init_origin_hash failed.");
2221                 goto bad_origin_hash;
2222         }
2223
2224         exception_cache = KMEM_CACHE(dm_exception, 0);
2225         if (!exception_cache) {
2226                 DMERR("Couldn't create exception cache.");
2227                 r = -ENOMEM;
2228                 goto bad_exception_cache;
2229         }
2230
2231         pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2232         if (!pending_cache) {
2233                 DMERR("Couldn't create pending cache.");
2234                 r = -ENOMEM;
2235                 goto bad_pending_cache;
2236         }
2237
2238         tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
2239         if (!tracked_chunk_cache) {
2240                 DMERR("Couldn't create cache to track chunks in use.");
2241                 r = -ENOMEM;
2242                 goto bad_tracked_chunk_cache;
2243         }
2244
2245         ksnapd = create_singlethread_workqueue("ksnapd");
2246         if (!ksnapd) {
2247                 DMERR("Failed to create ksnapd workqueue.");
2248                 r = -ENOMEM;
2249                 goto bad_pending_pool;
2250         }
2251
2252         return 0;
2253
2254 bad_pending_pool:
2255         kmem_cache_destroy(tracked_chunk_cache);
2256 bad_tracked_chunk_cache:
2257         kmem_cache_destroy(pending_cache);
2258 bad_pending_cache:
2259         kmem_cache_destroy(exception_cache);
2260 bad_exception_cache:
2261         exit_origin_hash();
2262 bad_origin_hash:
2263         dm_unregister_target(&merge_target);
2264 bad_register_merge_target:
2265         dm_unregister_target(&origin_target);
2266 bad_register_origin_target:
2267         dm_unregister_target(&snapshot_target);
2268 bad_register_snapshot_target:
2269         dm_exception_store_exit();
2270
2271         return r;
2272 }
2273
2274 static void __exit dm_snapshot_exit(void)
2275 {
2276         destroy_workqueue(ksnapd);
2277
2278         dm_unregister_target(&snapshot_target);
2279         dm_unregister_target(&origin_target);
2280         dm_unregister_target(&merge_target);
2281
2282         exit_origin_hash();
2283         kmem_cache_destroy(pending_cache);
2284         kmem_cache_destroy(exception_cache);
2285         kmem_cache_destroy(tracked_chunk_cache);
2286
2287         dm_exception_store_exit();
2288 }
2289
2290 /* Module hooks */
2291 module_init(dm_snapshot_init);
2292 module_exit(dm_snapshot_exit);
2293
2294 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2295 MODULE_AUTHOR("Joe Thornber");
2296 MODULE_LICENSE("GPL");