44a164965546c3a57d60c2a9d90d9dce2c4962c7
[sfrench/cifs-2.6.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    This program is free software; you can redistribute it and/or modify
23    it under the terms of the GNU General Public License as published by
24    the Free Software Foundation; either version 2, or (at your option)
25    any later version.
26
27    You should have received a copy of the GNU General Public License
28    (for example /usr/src/linux/COPYING); if not, write to the Free
29    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */
31
32 #include <linux/module.h>
33 #include <linux/config.h>
34 #include <linux/linkage.h>
35 #include <linux/raid/md.h>
36 #include <linux/sysctl.h>
37 #include <linux/devfs_fs_kernel.h>
38 #include <linux/buffer_head.h> /* for invalidate_bdev */
39 #include <linux/suspend.h>
40
41 #include <linux/init.h>
42
43 #ifdef CONFIG_KMOD
44 #include <linux/kmod.h>
45 #endif
46
47 #include <asm/unaligned.h>
48
49 #define MAJOR_NR MD_MAJOR
50 #define MD_DRIVER
51
52 /* 63 partitions with the alternate major number (mdp) */
53 #define MdpMinorShift 6
54
55 #define DEBUG 0
56 #define dprintk(x...) ((void)(DEBUG && printk(x)))
57
58
59 #ifndef MODULE
60 static void autostart_arrays (int part);
61 #endif
62
63 static mdk_personality_t *pers[MAX_PERSONALITY];
64 static DEFINE_SPINLOCK(pers_lock);
65
66 /*
67  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
68  * is 1000 KB/sec, so the extra system load does not show up that much.
69  * Increase it if you want to have more _guaranteed_ speed. Note that
70  * the RAID driver will use the maximum available bandwith if the IO
71  * subsystem is idle. There is also an 'absolute maximum' reconstruction
72  * speed limit - in case reconstruction slows down your system despite
73  * idle IO detection.
74  *
75  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
76  */
77
78 static int sysctl_speed_limit_min = 1000;
79 static int sysctl_speed_limit_max = 200000;
80
81 static struct ctl_table_header *raid_table_header;
82
83 static ctl_table raid_table[] = {
84         {
85                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
86                 .procname       = "speed_limit_min",
87                 .data           = &sysctl_speed_limit_min,
88                 .maxlen         = sizeof(int),
89                 .mode           = 0644,
90                 .proc_handler   = &proc_dointvec,
91         },
92         {
93                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
94                 .procname       = "speed_limit_max",
95                 .data           = &sysctl_speed_limit_max,
96                 .maxlen         = sizeof(int),
97                 .mode           = 0644,
98                 .proc_handler   = &proc_dointvec,
99         },
100         { .ctl_name = 0 }
101 };
102
103 static ctl_table raid_dir_table[] = {
104         {
105                 .ctl_name       = DEV_RAID,
106                 .procname       = "raid",
107                 .maxlen         = 0,
108                 .mode           = 0555,
109                 .child          = raid_table,
110         },
111         { .ctl_name = 0 }
112 };
113
114 static ctl_table raid_root_table[] = {
115         {
116                 .ctl_name       = CTL_DEV,
117                 .procname       = "dev",
118                 .maxlen         = 0,
119                 .mode           = 0555,
120                 .child          = raid_dir_table,
121         },
122         { .ctl_name = 0 }
123 };
124
125 static struct block_device_operations md_fops;
126
127 /*
128  * Enables to iterate over all existing md arrays
129  * all_mddevs_lock protects this list.
130  */
131 static LIST_HEAD(all_mddevs);
132 static DEFINE_SPINLOCK(all_mddevs_lock);
133
134
135 /*
136  * iterates through all used mddevs in the system.
137  * We take care to grab the all_mddevs_lock whenever navigating
138  * the list, and to always hold a refcount when unlocked.
139  * Any code which breaks out of this loop while own
140  * a reference to the current mddev and must mddev_put it.
141  */
142 #define ITERATE_MDDEV(mddev,tmp)                                        \
143                                                                         \
144         for (({ spin_lock(&all_mddevs_lock);                            \
145                 tmp = all_mddevs.next;                                  \
146                 mddev = NULL;});                                        \
147              ({ if (tmp != &all_mddevs)                                 \
148                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
149                 spin_unlock(&all_mddevs_lock);                          \
150                 if (mddev) mddev_put(mddev);                            \
151                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
152                 tmp != &all_mddevs;});                                  \
153              ({ spin_lock(&all_mddevs_lock);                            \
154                 tmp = tmp->next;})                                      \
155                 )
156
157
158 static int md_fail_request (request_queue_t *q, struct bio *bio)
159 {
160         bio_io_error(bio, bio->bi_size);
161         return 0;
162 }
163
164 static inline mddev_t *mddev_get(mddev_t *mddev)
165 {
166         atomic_inc(&mddev->active);
167         return mddev;
168 }
169
170 static void mddev_put(mddev_t *mddev)
171 {
172         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
173                 return;
174         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
175                 list_del(&mddev->all_mddevs);
176                 blk_put_queue(mddev->queue);
177                 kfree(mddev);
178         }
179         spin_unlock(&all_mddevs_lock);
180 }
181
182 static mddev_t * mddev_find(dev_t unit)
183 {
184         mddev_t *mddev, *new = NULL;
185
186  retry:
187         spin_lock(&all_mddevs_lock);
188         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
189                 if (mddev->unit == unit) {
190                         mddev_get(mddev);
191                         spin_unlock(&all_mddevs_lock);
192                         if (new)
193                                 kfree(new);
194                         return mddev;
195                 }
196
197         if (new) {
198                 list_add(&new->all_mddevs, &all_mddevs);
199                 spin_unlock(&all_mddevs_lock);
200                 return new;
201         }
202         spin_unlock(&all_mddevs_lock);
203
204         new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
205         if (!new)
206                 return NULL;
207
208         memset(new, 0, sizeof(*new));
209
210         new->unit = unit;
211         if (MAJOR(unit) == MD_MAJOR)
212                 new->md_minor = MINOR(unit);
213         else
214                 new->md_minor = MINOR(unit) >> MdpMinorShift;
215
216         init_MUTEX(&new->reconfig_sem);
217         INIT_LIST_HEAD(&new->disks);
218         INIT_LIST_HEAD(&new->all_mddevs);
219         init_timer(&new->safemode_timer);
220         atomic_set(&new->active, 1);
221
222         new->queue = blk_alloc_queue(GFP_KERNEL);
223         if (!new->queue) {
224                 kfree(new);
225                 return NULL;
226         }
227
228         blk_queue_make_request(new->queue, md_fail_request);
229
230         goto retry;
231 }
232
233 static inline int mddev_lock(mddev_t * mddev)
234 {
235         return down_interruptible(&mddev->reconfig_sem);
236 }
237
238 static inline void mddev_lock_uninterruptible(mddev_t * mddev)
239 {
240         down(&mddev->reconfig_sem);
241 }
242
243 static inline int mddev_trylock(mddev_t * mddev)
244 {
245         return down_trylock(&mddev->reconfig_sem);
246 }
247
248 static inline void mddev_unlock(mddev_t * mddev)
249 {
250         up(&mddev->reconfig_sem);
251
252         if (mddev->thread)
253                 md_wakeup_thread(mddev->thread);
254 }
255
256 mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
257 {
258         mdk_rdev_t * rdev;
259         struct list_head *tmp;
260
261         ITERATE_RDEV(mddev,rdev,tmp) {
262                 if (rdev->desc_nr == nr)
263                         return rdev;
264         }
265         return NULL;
266 }
267
268 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
269 {
270         struct list_head *tmp;
271         mdk_rdev_t *rdev;
272
273         ITERATE_RDEV(mddev,rdev,tmp) {
274                 if (rdev->bdev->bd_dev == dev)
275                         return rdev;
276         }
277         return NULL;
278 }
279
280 inline static sector_t calc_dev_sboffset(struct block_device *bdev)
281 {
282         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
283         return MD_NEW_SIZE_BLOCKS(size);
284 }
285
286 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
287 {
288         sector_t size;
289
290         size = rdev->sb_offset;
291
292         if (chunk_size)
293                 size &= ~((sector_t)chunk_size/1024 - 1);
294         return size;
295 }
296
297 static int alloc_disk_sb(mdk_rdev_t * rdev)
298 {
299         if (rdev->sb_page)
300                 MD_BUG();
301
302         rdev->sb_page = alloc_page(GFP_KERNEL);
303         if (!rdev->sb_page) {
304                 printk(KERN_ALERT "md: out of memory.\n");
305                 return -EINVAL;
306         }
307
308         return 0;
309 }
310
311 static void free_disk_sb(mdk_rdev_t * rdev)
312 {
313         if (rdev->sb_page) {
314                 page_cache_release(rdev->sb_page);
315                 rdev->sb_loaded = 0;
316                 rdev->sb_page = NULL;
317                 rdev->sb_offset = 0;
318                 rdev->size = 0;
319         }
320 }
321
322
323 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
324 {
325         if (bio->bi_size)
326                 return 1;
327
328         complete((struct completion*)bio->bi_private);
329         return 0;
330 }
331
332 static int sync_page_io(struct block_device *bdev, sector_t sector, int size,
333                    struct page *page, int rw)
334 {
335         struct bio *bio = bio_alloc(GFP_NOIO, 1);
336         struct completion event;
337         int ret;
338
339         rw |= (1 << BIO_RW_SYNC);
340
341         bio->bi_bdev = bdev;
342         bio->bi_sector = sector;
343         bio_add_page(bio, page, size, 0);
344         init_completion(&event);
345         bio->bi_private = &event;
346         bio->bi_end_io = bi_complete;
347         submit_bio(rw, bio);
348         wait_for_completion(&event);
349
350         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
351         bio_put(bio);
352         return ret;
353 }
354
355 static int read_disk_sb(mdk_rdev_t * rdev)
356 {
357         char b[BDEVNAME_SIZE];
358         if (!rdev->sb_page) {
359                 MD_BUG();
360                 return -EINVAL;
361         }
362         if (rdev->sb_loaded)
363                 return 0;
364
365
366         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, READ))
367                 goto fail;
368         rdev->sb_loaded = 1;
369         return 0;
370
371 fail:
372         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
373                 bdevname(rdev->bdev,b));
374         return -EINVAL;
375 }
376
377 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
378 {
379         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
380                 (sb1->set_uuid1 == sb2->set_uuid1) &&
381                 (sb1->set_uuid2 == sb2->set_uuid2) &&
382                 (sb1->set_uuid3 == sb2->set_uuid3))
383
384                 return 1;
385
386         return 0;
387 }
388
389
390 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
391 {
392         int ret;
393         mdp_super_t *tmp1, *tmp2;
394
395         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
396         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
397
398         if (!tmp1 || !tmp2) {
399                 ret = 0;
400                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
401                 goto abort;
402         }
403
404         *tmp1 = *sb1;
405         *tmp2 = *sb2;
406
407         /*
408          * nr_disks is not constant
409          */
410         tmp1->nr_disks = 0;
411         tmp2->nr_disks = 0;
412
413         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
414                 ret = 0;
415         else
416                 ret = 1;
417
418 abort:
419         if (tmp1)
420                 kfree(tmp1);
421         if (tmp2)
422                 kfree(tmp2);
423
424         return ret;
425 }
426
427 static unsigned int calc_sb_csum(mdp_super_t * sb)
428 {
429         unsigned int disk_csum, csum;
430
431         disk_csum = sb->sb_csum;
432         sb->sb_csum = 0;
433         csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
434         sb->sb_csum = disk_csum;
435         return csum;
436 }
437
438
439 /*
440  * Handle superblock details.
441  * We want to be able to handle multiple superblock formats
442  * so we have a common interface to them all, and an array of
443  * different handlers.
444  * We rely on user-space to write the initial superblock, and support
445  * reading and updating of superblocks.
446  * Interface methods are:
447  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
448  *      loads and validates a superblock on dev.
449  *      if refdev != NULL, compare superblocks on both devices
450  *    Return:
451  *      0 - dev has a superblock that is compatible with refdev
452  *      1 - dev has a superblock that is compatible and newer than refdev
453  *          so dev should be used as the refdev in future
454  *     -EINVAL superblock incompatible or invalid
455  *     -othererror e.g. -EIO
456  *
457  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
458  *      Verify that dev is acceptable into mddev.
459  *       The first time, mddev->raid_disks will be 0, and data from
460  *       dev should be merged in.  Subsequent calls check that dev
461  *       is new enough.  Return 0 or -EINVAL
462  *
463  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
464  *     Update the superblock for rdev with data in mddev
465  *     This does not write to disc.
466  *
467  */
468
469 struct super_type  {
470         char            *name;
471         struct module   *owner;
472         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
473         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
474         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
475 };
476
477 /*
478  * load_super for 0.90.0 
479  */
480 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
481 {
482         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
483         mdp_super_t *sb;
484         int ret;
485         sector_t sb_offset;
486
487         /*
488          * Calculate the position of the superblock,
489          * it's at the end of the disk.
490          *
491          * It also happens to be a multiple of 4Kb.
492          */
493         sb_offset = calc_dev_sboffset(rdev->bdev);
494         rdev->sb_offset = sb_offset;
495
496         ret = read_disk_sb(rdev);
497         if (ret) return ret;
498
499         ret = -EINVAL;
500
501         bdevname(rdev->bdev, b);
502         sb = (mdp_super_t*)page_address(rdev->sb_page);
503
504         if (sb->md_magic != MD_SB_MAGIC) {
505                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
506                        b);
507                 goto abort;
508         }
509
510         if (sb->major_version != 0 ||
511             sb->minor_version != 90) {
512                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
513                         sb->major_version, sb->minor_version,
514                         b);
515                 goto abort;
516         }
517
518         if (sb->raid_disks <= 0)
519                 goto abort;
520
521         if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
522                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
523                         b);
524                 goto abort;
525         }
526
527         rdev->preferred_minor = sb->md_minor;
528         rdev->data_offset = 0;
529
530         if (sb->level == LEVEL_MULTIPATH)
531                 rdev->desc_nr = -1;
532         else
533                 rdev->desc_nr = sb->this_disk.number;
534
535         if (refdev == 0)
536                 ret = 1;
537         else {
538                 __u64 ev1, ev2;
539                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
540                 if (!uuid_equal(refsb, sb)) {
541                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
542                                 b, bdevname(refdev->bdev,b2));
543                         goto abort;
544                 }
545                 if (!sb_equal(refsb, sb)) {
546                         printk(KERN_WARNING "md: %s has same UUID"
547                                " but different superblock to %s\n",
548                                b, bdevname(refdev->bdev, b2));
549                         goto abort;
550                 }
551                 ev1 = md_event(sb);
552                 ev2 = md_event(refsb);
553                 if (ev1 > ev2)
554                         ret = 1;
555                 else 
556                         ret = 0;
557         }
558         rdev->size = calc_dev_size(rdev, sb->chunk_size);
559
560  abort:
561         return ret;
562 }
563
564 /*
565  * validate_super for 0.90.0
566  */
567 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
568 {
569         mdp_disk_t *desc;
570         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
571
572         if (mddev->raid_disks == 0) {
573                 mddev->major_version = 0;
574                 mddev->minor_version = sb->minor_version;
575                 mddev->patch_version = sb->patch_version;
576                 mddev->persistent = ! sb->not_persistent;
577                 mddev->chunk_size = sb->chunk_size;
578                 mddev->ctime = sb->ctime;
579                 mddev->utime = sb->utime;
580                 mddev->level = sb->level;
581                 mddev->layout = sb->layout;
582                 mddev->raid_disks = sb->raid_disks;
583                 mddev->size = sb->size;
584                 mddev->events = md_event(sb);
585
586                 if (sb->state & (1<<MD_SB_CLEAN))
587                         mddev->recovery_cp = MaxSector;
588                 else {
589                         if (sb->events_hi == sb->cp_events_hi && 
590                                 sb->events_lo == sb->cp_events_lo) {
591                                 mddev->recovery_cp = sb->recovery_cp;
592                         } else
593                                 mddev->recovery_cp = 0;
594                 }
595
596                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
597                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
598                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
599                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
600
601                 mddev->max_disks = MD_SB_DISKS;
602         } else {
603                 __u64 ev1;
604                 ev1 = md_event(sb);
605                 ++ev1;
606                 if (ev1 < mddev->events) 
607                         return -EINVAL;
608         }
609         if (mddev->level != LEVEL_MULTIPATH) {
610                 rdev->raid_disk = -1;
611                 rdev->in_sync = rdev->faulty = 0;
612                 desc = sb->disks + rdev->desc_nr;
613
614                 if (desc->state & (1<<MD_DISK_FAULTY))
615                         rdev->faulty = 1;
616                 else if (desc->state & (1<<MD_DISK_SYNC) &&
617                          desc->raid_disk < mddev->raid_disks) {
618                         rdev->in_sync = 1;
619                         rdev->raid_disk = desc->raid_disk;
620                 }
621         }
622         return 0;
623 }
624
625 /*
626  * sync_super for 0.90.0
627  */
628 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
629 {
630         mdp_super_t *sb;
631         struct list_head *tmp;
632         mdk_rdev_t *rdev2;
633         int next_spare = mddev->raid_disks;
634
635         /* make rdev->sb match mddev data..
636          *
637          * 1/ zero out disks
638          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
639          * 3/ any empty disks < next_spare become removed
640          *
641          * disks[0] gets initialised to REMOVED because
642          * we cannot be sure from other fields if it has
643          * been initialised or not.
644          */
645         int i;
646         int active=0, working=0,failed=0,spare=0,nr_disks=0;
647
648         sb = (mdp_super_t*)page_address(rdev->sb_page);
649
650         memset(sb, 0, sizeof(*sb));
651
652         sb->md_magic = MD_SB_MAGIC;
653         sb->major_version = mddev->major_version;
654         sb->minor_version = mddev->minor_version;
655         sb->patch_version = mddev->patch_version;
656         sb->gvalid_words  = 0; /* ignored */
657         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
658         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
659         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
660         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
661
662         sb->ctime = mddev->ctime;
663         sb->level = mddev->level;
664         sb->size  = mddev->size;
665         sb->raid_disks = mddev->raid_disks;
666         sb->md_minor = mddev->md_minor;
667         sb->not_persistent = !mddev->persistent;
668         sb->utime = mddev->utime;
669         sb->state = 0;
670         sb->events_hi = (mddev->events>>32);
671         sb->events_lo = (u32)mddev->events;
672
673         if (mddev->in_sync)
674         {
675                 sb->recovery_cp = mddev->recovery_cp;
676                 sb->cp_events_hi = (mddev->events>>32);
677                 sb->cp_events_lo = (u32)mddev->events;
678                 if (mddev->recovery_cp == MaxSector)
679                         sb->state = (1<< MD_SB_CLEAN);
680         } else
681                 sb->recovery_cp = 0;
682
683         sb->layout = mddev->layout;
684         sb->chunk_size = mddev->chunk_size;
685
686         sb->disks[0].state = (1<<MD_DISK_REMOVED);
687         ITERATE_RDEV(mddev,rdev2,tmp) {
688                 mdp_disk_t *d;
689                 if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty)
690                         rdev2->desc_nr = rdev2->raid_disk;
691                 else
692                         rdev2->desc_nr = next_spare++;
693                 d = &sb->disks[rdev2->desc_nr];
694                 nr_disks++;
695                 d->number = rdev2->desc_nr;
696                 d->major = MAJOR(rdev2->bdev->bd_dev);
697                 d->minor = MINOR(rdev2->bdev->bd_dev);
698                 if (rdev2->raid_disk >= 0 && rdev->in_sync && !rdev2->faulty)
699                         d->raid_disk = rdev2->raid_disk;
700                 else
701                         d->raid_disk = rdev2->desc_nr; /* compatibility */
702                 if (rdev2->faulty) {
703                         d->state = (1<<MD_DISK_FAULTY);
704                         failed++;
705                 } else if (rdev2->in_sync) {
706                         d->state = (1<<MD_DISK_ACTIVE);
707                         d->state |= (1<<MD_DISK_SYNC);
708                         active++;
709                         working++;
710                 } else {
711                         d->state = 0;
712                         spare++;
713                         working++;
714                 }
715         }
716         
717         /* now set the "removed" and "faulty" bits on any missing devices */
718         for (i=0 ; i < mddev->raid_disks ; i++) {
719                 mdp_disk_t *d = &sb->disks[i];
720                 if (d->state == 0 && d->number == 0) {
721                         d->number = i;
722                         d->raid_disk = i;
723                         d->state = (1<<MD_DISK_REMOVED);
724                         d->state |= (1<<MD_DISK_FAULTY);
725                         failed++;
726                 }
727         }
728         sb->nr_disks = nr_disks;
729         sb->active_disks = active;
730         sb->working_disks = working;
731         sb->failed_disks = failed;
732         sb->spare_disks = spare;
733
734         sb->this_disk = sb->disks[rdev->desc_nr];
735         sb->sb_csum = calc_sb_csum(sb);
736 }
737
738 /*
739  * version 1 superblock
740  */
741
742 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
743 {
744         unsigned int disk_csum, csum;
745         unsigned long long newcsum;
746         int size = 256 + le32_to_cpu(sb->max_dev)*2;
747         unsigned int *isuper = (unsigned int*)sb;
748         int i;
749
750         disk_csum = sb->sb_csum;
751         sb->sb_csum = 0;
752         newcsum = 0;
753         for (i=0; size>=4; size -= 4 )
754                 newcsum += le32_to_cpu(*isuper++);
755
756         if (size == 2)
757                 newcsum += le16_to_cpu(*(unsigned short*) isuper);
758
759         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
760         sb->sb_csum = disk_csum;
761         return cpu_to_le32(csum);
762 }
763
764 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
765 {
766         struct mdp_superblock_1 *sb;
767         int ret;
768         sector_t sb_offset;
769         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
770
771         /*
772          * Calculate the position of the superblock.
773          * It is always aligned to a 4K boundary and
774          * depeding on minor_version, it can be:
775          * 0: At least 8K, but less than 12K, from end of device
776          * 1: At start of device
777          * 2: 4K from start of device.
778          */
779         switch(minor_version) {
780         case 0:
781                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
782                 sb_offset -= 8*2;
783                 sb_offset &= ~(4*2-1);
784                 /* convert from sectors to K */
785                 sb_offset /= 2;
786                 break;
787         case 1:
788                 sb_offset = 0;
789                 break;
790         case 2:
791                 sb_offset = 4;
792                 break;
793         default:
794                 return -EINVAL;
795         }
796         rdev->sb_offset = sb_offset;
797
798         ret = read_disk_sb(rdev);
799         if (ret) return ret;
800
801
802         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
803
804         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
805             sb->major_version != cpu_to_le32(1) ||
806             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
807             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
808             sb->feature_map != 0)
809                 return -EINVAL;
810
811         if (calc_sb_1_csum(sb) != sb->sb_csum) {
812                 printk("md: invalid superblock checksum on %s\n",
813                         bdevname(rdev->bdev,b));
814                 return -EINVAL;
815         }
816         if (le64_to_cpu(sb->data_size) < 10) {
817                 printk("md: data_size too small on %s\n",
818                        bdevname(rdev->bdev,b));
819                 return -EINVAL;
820         }
821         rdev->preferred_minor = 0xffff;
822         rdev->data_offset = le64_to_cpu(sb->data_offset);
823
824         if (refdev == 0)
825                 return 1;
826         else {
827                 __u64 ev1, ev2;
828                 struct mdp_superblock_1 *refsb = 
829                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
830
831                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
832                     sb->level != refsb->level ||
833                     sb->layout != refsb->layout ||
834                     sb->chunksize != refsb->chunksize) {
835                         printk(KERN_WARNING "md: %s has strangely different"
836                                 " superblock to %s\n",
837                                 bdevname(rdev->bdev,b),
838                                 bdevname(refdev->bdev,b2));
839                         return -EINVAL;
840                 }
841                 ev1 = le64_to_cpu(sb->events);
842                 ev2 = le64_to_cpu(refsb->events);
843
844                 if (ev1 > ev2)
845                         return 1;
846         }
847         if (minor_version) 
848                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
849         else
850                 rdev->size = rdev->sb_offset;
851         if (rdev->size < le64_to_cpu(sb->data_size)/2)
852                 return -EINVAL;
853         rdev->size = le64_to_cpu(sb->data_size)/2;
854         if (le32_to_cpu(sb->chunksize))
855                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
856         return 0;
857 }
858
859 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
860 {
861         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
862
863         if (mddev->raid_disks == 0) {
864                 mddev->major_version = 1;
865                 mddev->patch_version = 0;
866                 mddev->persistent = 1;
867                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
868                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
869                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
870                 mddev->level = le32_to_cpu(sb->level);
871                 mddev->layout = le32_to_cpu(sb->layout);
872                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
873                 mddev->size = le64_to_cpu(sb->size)/2;
874                 mddev->events = le64_to_cpu(sb->events);
875                 
876                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
877                 memcpy(mddev->uuid, sb->set_uuid, 16);
878
879                 mddev->max_disks =  (4096-256)/2;
880         } else {
881                 __u64 ev1;
882                 ev1 = le64_to_cpu(sb->events);
883                 ++ev1;
884                 if (ev1 < mddev->events)
885                         return -EINVAL;
886         }
887
888         if (mddev->level != LEVEL_MULTIPATH) {
889                 int role;
890                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
891                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
892                 switch(role) {
893                 case 0xffff: /* spare */
894                         rdev->in_sync = 0;
895                         rdev->faulty = 0;
896                         rdev->raid_disk = -1;
897                         break;
898                 case 0xfffe: /* faulty */
899                         rdev->in_sync = 0;
900                         rdev->faulty = 1;
901                         rdev->raid_disk = -1;
902                         break;
903                 default:
904                         rdev->in_sync = 1;
905                         rdev->faulty = 0;
906                         rdev->raid_disk = role;
907                         break;
908                 }
909         }
910         return 0;
911 }
912
913 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
914 {
915         struct mdp_superblock_1 *sb;
916         struct list_head *tmp;
917         mdk_rdev_t *rdev2;
918         int max_dev, i;
919         /* make rdev->sb match mddev and rdev data. */
920
921         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
922
923         sb->feature_map = 0;
924         sb->pad0 = 0;
925         memset(sb->pad1, 0, sizeof(sb->pad1));
926         memset(sb->pad2, 0, sizeof(sb->pad2));
927         memset(sb->pad3, 0, sizeof(sb->pad3));
928
929         sb->utime = cpu_to_le64((__u64)mddev->utime);
930         sb->events = cpu_to_le64(mddev->events);
931         if (mddev->in_sync)
932                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
933         else
934                 sb->resync_offset = cpu_to_le64(0);
935
936         max_dev = 0;
937         ITERATE_RDEV(mddev,rdev2,tmp)
938                 if (rdev2->desc_nr+1 > max_dev)
939                         max_dev = rdev2->desc_nr+1;
940         
941         sb->max_dev = cpu_to_le32(max_dev);
942         for (i=0; i<max_dev;i++)
943                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
944         
945         ITERATE_RDEV(mddev,rdev2,tmp) {
946                 i = rdev2->desc_nr;
947                 if (rdev2->faulty)
948                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
949                 else if (rdev2->in_sync)
950                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
951                 else
952                         sb->dev_roles[i] = cpu_to_le16(0xffff);
953         }
954
955         sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
956         sb->sb_csum = calc_sb_1_csum(sb);
957 }
958
959
960 struct super_type super_types[] = {
961         [0] = {
962                 .name   = "0.90.0",
963                 .owner  = THIS_MODULE,
964                 .load_super     = super_90_load,
965                 .validate_super = super_90_validate,
966                 .sync_super     = super_90_sync,
967         },
968         [1] = {
969                 .name   = "md-1",
970                 .owner  = THIS_MODULE,
971                 .load_super     = super_1_load,
972                 .validate_super = super_1_validate,
973                 .sync_super     = super_1_sync,
974         },
975 };
976         
977 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
978 {
979         struct list_head *tmp;
980         mdk_rdev_t *rdev;
981
982         ITERATE_RDEV(mddev,rdev,tmp)
983                 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
984                         return rdev;
985
986         return NULL;
987 }
988
989 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
990 {
991         struct list_head *tmp;
992         mdk_rdev_t *rdev;
993
994         ITERATE_RDEV(mddev1,rdev,tmp)
995                 if (match_dev_unit(mddev2, rdev))
996                         return 1;
997
998         return 0;
999 }
1000
1001 static LIST_HEAD(pending_raid_disks);
1002
1003 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1004 {
1005         mdk_rdev_t *same_pdev;
1006         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1007
1008         if (rdev->mddev) {
1009                 MD_BUG();
1010                 return -EINVAL;
1011         }
1012         same_pdev = match_dev_unit(mddev, rdev);
1013         if (same_pdev)
1014                 printk(KERN_WARNING
1015                         "%s: WARNING: %s appears to be on the same physical"
1016                         " disk as %s. True\n     protection against single-disk"
1017                         " failure might be compromised.\n",
1018                         mdname(mddev), bdevname(rdev->bdev,b),
1019                         bdevname(same_pdev->bdev,b2));
1020
1021         /* Verify rdev->desc_nr is unique.
1022          * If it is -1, assign a free number, else
1023          * check number is not in use
1024          */
1025         if (rdev->desc_nr < 0) {
1026                 int choice = 0;
1027                 if (mddev->pers) choice = mddev->raid_disks;
1028                 while (find_rdev_nr(mddev, choice))
1029                         choice++;
1030                 rdev->desc_nr = choice;
1031         } else {
1032                 if (find_rdev_nr(mddev, rdev->desc_nr))
1033                         return -EBUSY;
1034         }
1035                         
1036         list_add(&rdev->same_set, &mddev->disks);
1037         rdev->mddev = mddev;
1038         printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b));
1039         return 0;
1040 }
1041
1042 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1043 {
1044         char b[BDEVNAME_SIZE];
1045         if (!rdev->mddev) {
1046                 MD_BUG();
1047                 return;
1048         }
1049         list_del_init(&rdev->same_set);
1050         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1051         rdev->mddev = NULL;
1052 }
1053
1054 /*
1055  * prevent the device from being mounted, repartitioned or
1056  * otherwise reused by a RAID array (or any other kernel
1057  * subsystem), by bd_claiming the device.
1058  */
1059 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1060 {
1061         int err = 0;
1062         struct block_device *bdev;
1063         char b[BDEVNAME_SIZE];
1064
1065         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1066         if (IS_ERR(bdev)) {
1067                 printk(KERN_ERR "md: could not open %s.\n",
1068                         __bdevname(dev, b));
1069                 return PTR_ERR(bdev);
1070         }
1071         err = bd_claim(bdev, rdev);
1072         if (err) {
1073                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1074                         bdevname(bdev, b));
1075                 blkdev_put(bdev);
1076                 return err;
1077         }
1078         rdev->bdev = bdev;
1079         return err;
1080 }
1081
1082 static void unlock_rdev(mdk_rdev_t *rdev)
1083 {
1084         struct block_device *bdev = rdev->bdev;
1085         rdev->bdev = NULL;
1086         if (!bdev)
1087                 MD_BUG();
1088         bd_release(bdev);
1089         blkdev_put(bdev);
1090 }
1091
1092 void md_autodetect_dev(dev_t dev);
1093
1094 static void export_rdev(mdk_rdev_t * rdev)
1095 {
1096         char b[BDEVNAME_SIZE];
1097         printk(KERN_INFO "md: export_rdev(%s)\n",
1098                 bdevname(rdev->bdev,b));
1099         if (rdev->mddev)
1100                 MD_BUG();
1101         free_disk_sb(rdev);
1102         list_del_init(&rdev->same_set);
1103 #ifndef MODULE
1104         md_autodetect_dev(rdev->bdev->bd_dev);
1105 #endif
1106         unlock_rdev(rdev);
1107         kfree(rdev);
1108 }
1109
1110 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1111 {
1112         unbind_rdev_from_array(rdev);
1113         export_rdev(rdev);
1114 }
1115
1116 static void export_array(mddev_t *mddev)
1117 {
1118         struct list_head *tmp;
1119         mdk_rdev_t *rdev;
1120
1121         ITERATE_RDEV(mddev,rdev,tmp) {
1122                 if (!rdev->mddev) {
1123                         MD_BUG();
1124                         continue;
1125                 }
1126                 kick_rdev_from_array(rdev);
1127         }
1128         if (!list_empty(&mddev->disks))
1129                 MD_BUG();
1130         mddev->raid_disks = 0;
1131         mddev->major_version = 0;
1132 }
1133
1134 static void print_desc(mdp_disk_t *desc)
1135 {
1136         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1137                 desc->major,desc->minor,desc->raid_disk,desc->state);
1138 }
1139
1140 static void print_sb(mdp_super_t *sb)
1141 {
1142         int i;
1143
1144         printk(KERN_INFO 
1145                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1146                 sb->major_version, sb->minor_version, sb->patch_version,
1147                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1148                 sb->ctime);
1149         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1150                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1151                 sb->md_minor, sb->layout, sb->chunk_size);
1152         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1153                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1154                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1155                 sb->failed_disks, sb->spare_disks,
1156                 sb->sb_csum, (unsigned long)sb->events_lo);
1157
1158         printk(KERN_INFO);
1159         for (i = 0; i < MD_SB_DISKS; i++) {
1160                 mdp_disk_t *desc;
1161
1162                 desc = sb->disks + i;
1163                 if (desc->number || desc->major || desc->minor ||
1164                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1165                         printk("     D %2d: ", i);
1166                         print_desc(desc);
1167                 }
1168         }
1169         printk(KERN_INFO "md:     THIS: ");
1170         print_desc(&sb->this_disk);
1171
1172 }
1173
1174 static void print_rdev(mdk_rdev_t *rdev)
1175 {
1176         char b[BDEVNAME_SIZE];
1177         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1178                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1179                 rdev->faulty, rdev->in_sync, rdev->desc_nr);
1180         if (rdev->sb_loaded) {
1181                 printk(KERN_INFO "md: rdev superblock:\n");
1182                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1183         } else
1184                 printk(KERN_INFO "md: no rdev superblock!\n");
1185 }
1186
1187 void md_print_devices(void)
1188 {
1189         struct list_head *tmp, *tmp2;
1190         mdk_rdev_t *rdev;
1191         mddev_t *mddev;
1192         char b[BDEVNAME_SIZE];
1193
1194         printk("\n");
1195         printk("md:     **********************************\n");
1196         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1197         printk("md:     **********************************\n");
1198         ITERATE_MDDEV(mddev,tmp) {
1199                 printk("%s: ", mdname(mddev));
1200
1201                 ITERATE_RDEV(mddev,rdev,tmp2)
1202                         printk("<%s>", bdevname(rdev->bdev,b));
1203                 printk("\n");
1204
1205                 ITERATE_RDEV(mddev,rdev,tmp2)
1206                         print_rdev(rdev);
1207         }
1208         printk("md:     **********************************\n");
1209         printk("\n");
1210 }
1211
1212
1213 static int write_disk_sb(mdk_rdev_t * rdev)
1214 {
1215         char b[BDEVNAME_SIZE];
1216         if (!rdev->sb_loaded) {
1217                 MD_BUG();
1218                 return 1;
1219         }
1220         if (rdev->faulty) {
1221                 MD_BUG();
1222                 return 1;
1223         }
1224
1225         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1226                 bdevname(rdev->bdev,b),
1227                (unsigned long long)rdev->sb_offset);
1228   
1229         if (sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE))
1230                 return 0;
1231
1232         printk("md: write_disk_sb failed for device %s\n", 
1233                 bdevname(rdev->bdev,b));
1234         return 1;
1235 }
1236
1237 static void sync_sbs(mddev_t * mddev)
1238 {
1239         mdk_rdev_t *rdev;
1240         struct list_head *tmp;
1241
1242         ITERATE_RDEV(mddev,rdev,tmp) {
1243                 super_types[mddev->major_version].
1244                         sync_super(mddev, rdev);
1245                 rdev->sb_loaded = 1;
1246         }
1247 }
1248
1249 static void md_update_sb(mddev_t * mddev)
1250 {
1251         int err, count = 100;
1252         struct list_head *tmp;
1253         mdk_rdev_t *rdev;
1254
1255         mddev->sb_dirty = 0;
1256 repeat:
1257         mddev->utime = get_seconds();
1258         mddev->events ++;
1259
1260         if (!mddev->events) {
1261                 /*
1262                  * oops, this 64-bit counter should never wrap.
1263                  * Either we are in around ~1 trillion A.C., assuming
1264                  * 1 reboot per second, or we have a bug:
1265                  */
1266                 MD_BUG();
1267                 mddev->events --;
1268         }
1269         sync_sbs(mddev);
1270
1271         /*
1272          * do not write anything to disk if using
1273          * nonpersistent superblocks
1274          */
1275         if (!mddev->persistent)
1276                 return;
1277
1278         dprintk(KERN_INFO 
1279                 "md: updating %s RAID superblock on device (in sync %d)\n",
1280                 mdname(mddev),mddev->in_sync);
1281
1282         err = 0;
1283         ITERATE_RDEV(mddev,rdev,tmp) {
1284                 char b[BDEVNAME_SIZE];
1285                 dprintk(KERN_INFO "md: ");
1286                 if (rdev->faulty)
1287                         dprintk("(skipping faulty ");
1288
1289                 dprintk("%s ", bdevname(rdev->bdev,b));
1290                 if (!rdev->faulty) {
1291                         err += write_disk_sb(rdev);
1292                 } else
1293                         dprintk(")\n");
1294                 if (!err && mddev->level == LEVEL_MULTIPATH)
1295                         /* only need to write one superblock... */
1296                         break;
1297         }
1298         if (err) {
1299                 if (--count) {
1300                         printk(KERN_ERR "md: errors occurred during superblock"
1301                                 " update, repeating\n");
1302                         goto repeat;
1303                 }
1304                 printk(KERN_ERR \
1305                         "md: excessive errors occurred during superblock update, exiting\n");
1306         }
1307 }
1308
1309 /*
1310  * Import a device. If 'super_format' >= 0, then sanity check the superblock
1311  *
1312  * mark the device faulty if:
1313  *
1314  *   - the device is nonexistent (zero size)
1315  *   - the device has no valid superblock
1316  *
1317  * a faulty rdev _never_ has rdev->sb set.
1318  */
1319 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1320 {
1321         char b[BDEVNAME_SIZE];
1322         int err;
1323         mdk_rdev_t *rdev;
1324         sector_t size;
1325
1326         rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
1327         if (!rdev) {
1328                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1329                 return ERR_PTR(-ENOMEM);
1330         }
1331         memset(rdev, 0, sizeof(*rdev));
1332
1333         if ((err = alloc_disk_sb(rdev)))
1334                 goto abort_free;
1335
1336         err = lock_rdev(rdev, newdev);
1337         if (err)
1338                 goto abort_free;
1339
1340         rdev->desc_nr = -1;
1341         rdev->faulty = 0;
1342         rdev->in_sync = 0;
1343         rdev->data_offset = 0;
1344         atomic_set(&rdev->nr_pending, 0);
1345
1346         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1347         if (!size) {
1348                 printk(KERN_WARNING 
1349                         "md: %s has zero or unknown size, marking faulty!\n",
1350                         bdevname(rdev->bdev,b));
1351                 err = -EINVAL;
1352                 goto abort_free;
1353         }
1354
1355         if (super_format >= 0) {
1356                 err = super_types[super_format].
1357                         load_super(rdev, NULL, super_minor);
1358                 if (err == -EINVAL) {
1359                         printk(KERN_WARNING 
1360                                 "md: %s has invalid sb, not importing!\n",
1361                                 bdevname(rdev->bdev,b));
1362                         goto abort_free;
1363                 }
1364                 if (err < 0) {
1365                         printk(KERN_WARNING 
1366                                 "md: could not read %s's sb, not importing!\n",
1367                                 bdevname(rdev->bdev,b));
1368                         goto abort_free;
1369                 }
1370         }
1371         INIT_LIST_HEAD(&rdev->same_set);
1372
1373         return rdev;
1374
1375 abort_free:
1376         if (rdev->sb_page) {
1377                 if (rdev->bdev)
1378                         unlock_rdev(rdev);
1379                 free_disk_sb(rdev);
1380         }
1381         kfree(rdev);
1382         return ERR_PTR(err);
1383 }
1384
1385 /*
1386  * Check a full RAID array for plausibility
1387  */
1388
1389
1390 static int analyze_sbs(mddev_t * mddev)
1391 {
1392         int i;
1393         struct list_head *tmp;
1394         mdk_rdev_t *rdev, *freshest;
1395         char b[BDEVNAME_SIZE];
1396
1397         freshest = NULL;
1398         ITERATE_RDEV(mddev,rdev,tmp)
1399                 switch (super_types[mddev->major_version].
1400                         load_super(rdev, freshest, mddev->minor_version)) {
1401                 case 1:
1402                         freshest = rdev;
1403                         break;
1404                 case 0:
1405                         break;
1406                 default:
1407                         printk( KERN_ERR \
1408                                 "md: fatal superblock inconsistency in %s"
1409                                 " -- removing from array\n", 
1410                                 bdevname(rdev->bdev,b));
1411                         kick_rdev_from_array(rdev);
1412                 }
1413
1414
1415         super_types[mddev->major_version].
1416                 validate_super(mddev, freshest);
1417
1418         i = 0;
1419         ITERATE_RDEV(mddev,rdev,tmp) {
1420                 if (rdev != freshest)
1421                         if (super_types[mddev->major_version].
1422                             validate_super(mddev, rdev)) {
1423                                 printk(KERN_WARNING "md: kicking non-fresh %s"
1424                                         " from array!\n",
1425                                         bdevname(rdev->bdev,b));
1426                                 kick_rdev_from_array(rdev);
1427                                 continue;
1428                         }
1429                 if (mddev->level == LEVEL_MULTIPATH) {
1430                         rdev->desc_nr = i++;
1431                         rdev->raid_disk = rdev->desc_nr;
1432                         rdev->in_sync = 1;
1433                 }
1434         }
1435
1436
1437
1438         if (mddev->recovery_cp != MaxSector &&
1439             mddev->level >= 1)
1440                 printk(KERN_ERR "md: %s: raid array is not clean"
1441                        " -- starting background reconstruction\n",
1442                        mdname(mddev));
1443
1444         return 0;
1445 }
1446
1447 int mdp_major = 0;
1448
1449 static struct kobject *md_probe(dev_t dev, int *part, void *data)
1450 {
1451         static DECLARE_MUTEX(disks_sem);
1452         mddev_t *mddev = mddev_find(dev);
1453         struct gendisk *disk;
1454         int partitioned = (MAJOR(dev) != MD_MAJOR);
1455         int shift = partitioned ? MdpMinorShift : 0;
1456         int unit = MINOR(dev) >> shift;
1457
1458         if (!mddev)
1459                 return NULL;
1460
1461         down(&disks_sem);
1462         if (mddev->gendisk) {
1463                 up(&disks_sem);
1464                 mddev_put(mddev);
1465                 return NULL;
1466         }
1467         disk = alloc_disk(1 << shift);
1468         if (!disk) {
1469                 up(&disks_sem);
1470                 mddev_put(mddev);
1471                 return NULL;
1472         }
1473         disk->major = MAJOR(dev);
1474         disk->first_minor = unit << shift;
1475         if (partitioned) {
1476                 sprintf(disk->disk_name, "md_d%d", unit);
1477                 sprintf(disk->devfs_name, "md/d%d", unit);
1478         } else {
1479                 sprintf(disk->disk_name, "md%d", unit);
1480                 sprintf(disk->devfs_name, "md/%d", unit);
1481         }
1482         disk->fops = &md_fops;
1483         disk->private_data = mddev;
1484         disk->queue = mddev->queue;
1485         add_disk(disk);
1486         mddev->gendisk = disk;
1487         up(&disks_sem);
1488         return NULL;
1489 }
1490
1491 void md_wakeup_thread(mdk_thread_t *thread);
1492
1493 static void md_safemode_timeout(unsigned long data)
1494 {
1495         mddev_t *mddev = (mddev_t *) data;
1496
1497         mddev->safemode = 1;
1498         md_wakeup_thread(mddev->thread);
1499 }
1500
1501
1502 static int do_md_run(mddev_t * mddev)
1503 {
1504         int pnum, err;
1505         int chunk_size;
1506         struct list_head *tmp;
1507         mdk_rdev_t *rdev;
1508         struct gendisk *disk;
1509         char b[BDEVNAME_SIZE];
1510
1511         if (list_empty(&mddev->disks)) {
1512                 MD_BUG();
1513                 return -EINVAL;
1514         }
1515
1516         if (mddev->pers)
1517                 return -EBUSY;
1518
1519         /*
1520          * Analyze all RAID superblock(s)
1521          */
1522         if (!mddev->raid_disks && analyze_sbs(mddev)) {
1523                 MD_BUG();
1524                 return -EINVAL;
1525         }
1526
1527         chunk_size = mddev->chunk_size;
1528         pnum = level_to_pers(mddev->level);
1529
1530         if ((pnum != MULTIPATH) && (pnum != RAID1)) {
1531                 if (!chunk_size) {
1532                         /*
1533                          * 'default chunksize' in the old md code used to
1534                          * be PAGE_SIZE, baaad.
1535                          * we abort here to be on the safe side. We don't
1536                          * want to continue the bad practice.
1537                          */
1538                         printk(KERN_ERR 
1539                                 "no chunksize specified, see 'man raidtab'\n");
1540                         return -EINVAL;
1541                 }
1542                 if (chunk_size > MAX_CHUNK_SIZE) {
1543                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
1544                                 chunk_size, MAX_CHUNK_SIZE);
1545                         return -EINVAL;
1546                 }
1547                 /*
1548                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
1549                  */
1550                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
1551                         MD_BUG();
1552                         return -EINVAL;
1553                 }
1554                 if (chunk_size < PAGE_SIZE) {
1555                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
1556                                 chunk_size, PAGE_SIZE);
1557                         return -EINVAL;
1558                 }
1559
1560                 /* devices must have minimum size of one chunk */
1561                 ITERATE_RDEV(mddev,rdev,tmp) {
1562                         if (rdev->faulty)
1563                                 continue;
1564                         if (rdev->size < chunk_size / 1024) {
1565                                 printk(KERN_WARNING
1566                                         "md: Dev %s smaller than chunk_size:"
1567                                         " %lluk < %dk\n",
1568                                         bdevname(rdev->bdev,b),
1569                                         (unsigned long long)rdev->size,
1570                                         chunk_size / 1024);
1571                                 return -EINVAL;
1572                         }
1573                 }
1574         }
1575
1576         if (pnum >= MAX_PERSONALITY) {
1577                 MD_BUG();
1578                 return -EINVAL;
1579         }
1580
1581 #ifdef CONFIG_KMOD
1582         if (!pers[pnum])
1583         {
1584                 request_module("md-personality-%d", pnum);
1585         }
1586 #endif
1587
1588         /*
1589          * Drop all container device buffers, from now on
1590          * the only valid external interface is through the md
1591          * device.
1592          * Also find largest hardsector size
1593          */
1594         ITERATE_RDEV(mddev,rdev,tmp) {
1595                 if (rdev->faulty)
1596                         continue;
1597                 sync_blockdev(rdev->bdev);
1598                 invalidate_bdev(rdev->bdev, 0);
1599         }
1600
1601         md_probe(mddev->unit, NULL, NULL);
1602         disk = mddev->gendisk;
1603         if (!disk)
1604                 return -ENOMEM;
1605
1606         spin_lock(&pers_lock);
1607         if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
1608                 spin_unlock(&pers_lock);
1609                 printk(KERN_WARNING "md: personality %d is not loaded!\n",
1610                        pnum);
1611                 return -EINVAL;
1612         }
1613
1614         mddev->pers = pers[pnum];
1615         spin_unlock(&pers_lock);
1616
1617         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
1618
1619         err = mddev->pers->run(mddev);
1620         if (err) {
1621                 printk(KERN_ERR "md: pers->run() failed ...\n");
1622                 module_put(mddev->pers->owner);
1623                 mddev->pers = NULL;
1624                 return -EINVAL;
1625         }
1626         atomic_set(&mddev->writes_pending,0);
1627         mddev->safemode = 0;
1628         mddev->safemode_timer.function = md_safemode_timeout;
1629         mddev->safemode_timer.data = (unsigned long) mddev;
1630         mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
1631         mddev->in_sync = 1;
1632         
1633         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1634         
1635         if (mddev->sb_dirty)
1636                 md_update_sb(mddev);
1637
1638         set_capacity(disk, mddev->array_size<<1);
1639
1640         /* If we call blk_queue_make_request here, it will
1641          * re-initialise max_sectors etc which may have been
1642          * refined inside -> run.  So just set the bits we need to set.
1643          * Most initialisation happended when we called
1644          * blk_queue_make_request(..., md_fail_request)
1645          * earlier.
1646          */
1647         mddev->queue->queuedata = mddev;
1648         mddev->queue->make_request_fn = mddev->pers->make_request;
1649
1650         mddev->changed = 1;
1651         return 0;
1652 }
1653
1654 static int restart_array(mddev_t *mddev)
1655 {
1656         struct gendisk *disk = mddev->gendisk;
1657         int err;
1658
1659         /*
1660          * Complain if it has no devices
1661          */
1662         err = -ENXIO;
1663         if (list_empty(&mddev->disks))
1664                 goto out;
1665
1666         if (mddev->pers) {
1667                 err = -EBUSY;
1668                 if (!mddev->ro)
1669                         goto out;
1670
1671                 mddev->safemode = 0;
1672                 mddev->ro = 0;
1673                 set_disk_ro(disk, 0);
1674
1675                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
1676                         mdname(mddev));
1677                 /*
1678                  * Kick recovery or resync if necessary
1679                  */
1680                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1681                 md_wakeup_thread(mddev->thread);
1682                 err = 0;
1683         } else {
1684                 printk(KERN_ERR "md: %s has no personality assigned.\n",
1685                         mdname(mddev));
1686                 err = -EINVAL;
1687         }
1688
1689 out:
1690         return err;
1691 }
1692
1693 static int do_md_stop(mddev_t * mddev, int ro)
1694 {
1695         int err = 0;
1696         struct gendisk *disk = mddev->gendisk;
1697
1698         if (mddev->pers) {
1699                 if (atomic_read(&mddev->active)>2) {
1700                         printk("md: %s still in use.\n",mdname(mddev));
1701                         return -EBUSY;
1702                 }
1703
1704                 if (mddev->sync_thread) {
1705                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1706                         md_unregister_thread(mddev->sync_thread);
1707                         mddev->sync_thread = NULL;
1708                 }
1709
1710                 del_timer_sync(&mddev->safemode_timer);
1711
1712                 invalidate_partition(disk, 0);
1713
1714                 if (ro) {
1715                         err  = -ENXIO;
1716                         if (mddev->ro)
1717                                 goto out;
1718                         mddev->ro = 1;
1719                 } else {
1720                         if (mddev->ro)
1721                                 set_disk_ro(disk, 0);
1722                         blk_queue_make_request(mddev->queue, md_fail_request);
1723                         mddev->pers->stop(mddev);
1724                         module_put(mddev->pers->owner);
1725                         mddev->pers = NULL;
1726                         if (mddev->ro)
1727                                 mddev->ro = 0;
1728                 }
1729                 if (!mddev->in_sync) {
1730                         /* mark array as shutdown cleanly */
1731                         mddev->in_sync = 1;
1732                         md_update_sb(mddev);
1733                 }
1734                 if (ro)
1735                         set_disk_ro(disk, 1);
1736         }
1737         /*
1738          * Free resources if final stop
1739          */
1740         if (!ro) {
1741                 struct gendisk *disk;
1742                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
1743
1744                 export_array(mddev);
1745
1746                 mddev->array_size = 0;
1747                 disk = mddev->gendisk;
1748                 if (disk)
1749                         set_capacity(disk, 0);
1750                 mddev->changed = 1;
1751         } else
1752                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
1753                         mdname(mddev));
1754         err = 0;
1755 out:
1756         return err;
1757 }
1758
1759 static void autorun_array(mddev_t *mddev)
1760 {
1761         mdk_rdev_t *rdev;
1762         struct list_head *tmp;
1763         int err;
1764
1765         if (list_empty(&mddev->disks)) {
1766                 MD_BUG();
1767                 return;
1768         }
1769
1770         printk(KERN_INFO "md: running: ");
1771
1772         ITERATE_RDEV(mddev,rdev,tmp) {
1773                 char b[BDEVNAME_SIZE];
1774                 printk("<%s>", bdevname(rdev->bdev,b));
1775         }
1776         printk("\n");
1777
1778         err = do_md_run (mddev);
1779         if (err) {
1780                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
1781                 do_md_stop (mddev, 0);
1782         }
1783 }
1784
1785 /*
1786  * lets try to run arrays based on all disks that have arrived
1787  * until now. (those are in pending_raid_disks)
1788  *
1789  * the method: pick the first pending disk, collect all disks with
1790  * the same UUID, remove all from the pending list and put them into
1791  * the 'same_array' list. Then order this list based on superblock
1792  * update time (freshest comes first), kick out 'old' disks and
1793  * compare superblocks. If everything's fine then run it.
1794  *
1795  * If "unit" is allocated, then bump its reference count
1796  */
1797 static void autorun_devices(int part)
1798 {
1799         struct list_head candidates;
1800         struct list_head *tmp;
1801         mdk_rdev_t *rdev0, *rdev;
1802         mddev_t *mddev;
1803         char b[BDEVNAME_SIZE];
1804
1805         printk(KERN_INFO "md: autorun ...\n");
1806         while (!list_empty(&pending_raid_disks)) {
1807                 dev_t dev;
1808                 rdev0 = list_entry(pending_raid_disks.next,
1809                                          mdk_rdev_t, same_set);
1810
1811                 printk(KERN_INFO "md: considering %s ...\n",
1812                         bdevname(rdev0->bdev,b));
1813                 INIT_LIST_HEAD(&candidates);
1814                 ITERATE_RDEV_PENDING(rdev,tmp)
1815                         if (super_90_load(rdev, rdev0, 0) >= 0) {
1816                                 printk(KERN_INFO "md:  adding %s ...\n",
1817                                         bdevname(rdev->bdev,b));
1818                                 list_move(&rdev->same_set, &candidates);
1819                         }
1820                 /*
1821                  * now we have a set of devices, with all of them having
1822                  * mostly sane superblocks. It's time to allocate the
1823                  * mddev.
1824                  */
1825                 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
1826                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
1827                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
1828                         break;
1829                 }
1830                 if (part)
1831                         dev = MKDEV(mdp_major,
1832                                     rdev0->preferred_minor << MdpMinorShift);
1833                 else
1834                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
1835
1836                 md_probe(dev, NULL, NULL);
1837                 mddev = mddev_find(dev);
1838                 if (!mddev) {
1839                         printk(KERN_ERR 
1840                                 "md: cannot allocate memory for md drive.\n");
1841                         break;
1842                 }
1843                 if (mddev_lock(mddev)) 
1844                         printk(KERN_WARNING "md: %s locked, cannot run\n",
1845                                mdname(mddev));
1846                 else if (mddev->raid_disks || mddev->major_version
1847                          || !list_empty(&mddev->disks)) {
1848                         printk(KERN_WARNING 
1849                                 "md: %s already running, cannot run %s\n",
1850                                 mdname(mddev), bdevname(rdev0->bdev,b));
1851                         mddev_unlock(mddev);
1852                 } else {
1853                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
1854                         ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
1855                                 list_del_init(&rdev->same_set);
1856                                 if (bind_rdev_to_array(rdev, mddev))
1857                                         export_rdev(rdev);
1858                         }
1859                         autorun_array(mddev);
1860                         mddev_unlock(mddev);
1861                 }
1862                 /* on success, candidates will be empty, on error
1863                  * it won't...
1864                  */
1865                 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
1866                         export_rdev(rdev);
1867                 mddev_put(mddev);
1868         }
1869         printk(KERN_INFO "md: ... autorun DONE.\n");
1870 }
1871
1872 /*
1873  * import RAID devices based on one partition
1874  * if possible, the array gets run as well.
1875  */
1876
1877 static int autostart_array(dev_t startdev)
1878 {
1879         char b[BDEVNAME_SIZE];
1880         int err = -EINVAL, i;
1881         mdp_super_t *sb = NULL;
1882         mdk_rdev_t *start_rdev = NULL, *rdev;
1883
1884         start_rdev = md_import_device(startdev, 0, 0);
1885         if (IS_ERR(start_rdev))
1886                 return err;
1887
1888
1889         /* NOTE: this can only work for 0.90.0 superblocks */
1890         sb = (mdp_super_t*)page_address(start_rdev->sb_page);
1891         if (sb->major_version != 0 ||
1892             sb->minor_version != 90 ) {
1893                 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
1894                 export_rdev(start_rdev);
1895                 return err;
1896         }
1897
1898         if (start_rdev->faulty) {
1899                 printk(KERN_WARNING 
1900                         "md: can not autostart based on faulty %s!\n",
1901                         bdevname(start_rdev->bdev,b));
1902                 export_rdev(start_rdev);
1903                 return err;
1904         }
1905         list_add(&start_rdev->same_set, &pending_raid_disks);
1906
1907         for (i = 0; i < MD_SB_DISKS; i++) {
1908                 mdp_disk_t *desc = sb->disks + i;
1909                 dev_t dev = MKDEV(desc->major, desc->minor);
1910
1911                 if (!dev)
1912                         continue;
1913                 if (dev == startdev)
1914                         continue;
1915                 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
1916                         continue;
1917                 rdev = md_import_device(dev, 0, 0);
1918                 if (IS_ERR(rdev))
1919                         continue;
1920
1921                 list_add(&rdev->same_set, &pending_raid_disks);
1922         }
1923
1924         /*
1925          * possibly return codes
1926          */
1927         autorun_devices(0);
1928         return 0;
1929
1930 }
1931
1932
1933 static int get_version(void __user * arg)
1934 {
1935         mdu_version_t ver;
1936
1937         ver.major = MD_MAJOR_VERSION;
1938         ver.minor = MD_MINOR_VERSION;
1939         ver.patchlevel = MD_PATCHLEVEL_VERSION;
1940
1941         if (copy_to_user(arg, &ver, sizeof(ver)))
1942                 return -EFAULT;
1943
1944         return 0;
1945 }
1946
1947 static int get_array_info(mddev_t * mddev, void __user * arg)
1948 {
1949         mdu_array_info_t info;
1950         int nr,working,active,failed,spare;
1951         mdk_rdev_t *rdev;
1952         struct list_head *tmp;
1953
1954         nr=working=active=failed=spare=0;
1955         ITERATE_RDEV(mddev,rdev,tmp) {
1956                 nr++;
1957                 if (rdev->faulty)
1958                         failed++;
1959                 else {
1960                         working++;
1961                         if (rdev->in_sync)
1962                                 active++;       
1963                         else
1964                                 spare++;
1965                 }
1966         }
1967
1968         info.major_version = mddev->major_version;
1969         info.minor_version = mddev->minor_version;
1970         info.patch_version = MD_PATCHLEVEL_VERSION;
1971         info.ctime         = mddev->ctime;
1972         info.level         = mddev->level;
1973         info.size          = mddev->size;
1974         info.nr_disks      = nr;
1975         info.raid_disks    = mddev->raid_disks;
1976         info.md_minor      = mddev->md_minor;
1977         info.not_persistent= !mddev->persistent;
1978
1979         info.utime         = mddev->utime;
1980         info.state         = 0;
1981         if (mddev->in_sync)
1982                 info.state = (1<<MD_SB_CLEAN);
1983         info.active_disks  = active;
1984         info.working_disks = working;
1985         info.failed_disks  = failed;
1986         info.spare_disks   = spare;
1987
1988         info.layout        = mddev->layout;
1989         info.chunk_size    = mddev->chunk_size;
1990
1991         if (copy_to_user(arg, &info, sizeof(info)))
1992                 return -EFAULT;
1993
1994         return 0;
1995 }
1996
1997 static int get_disk_info(mddev_t * mddev, void __user * arg)
1998 {
1999         mdu_disk_info_t info;
2000         unsigned int nr;
2001         mdk_rdev_t *rdev;
2002
2003         if (copy_from_user(&info, arg, sizeof(info)))
2004                 return -EFAULT;
2005
2006         nr = info.number;
2007
2008         rdev = find_rdev_nr(mddev, nr);
2009         if (rdev) {
2010                 info.major = MAJOR(rdev->bdev->bd_dev);
2011                 info.minor = MINOR(rdev->bdev->bd_dev);
2012                 info.raid_disk = rdev->raid_disk;
2013                 info.state = 0;
2014                 if (rdev->faulty)
2015                         info.state |= (1<<MD_DISK_FAULTY);
2016                 else if (rdev->in_sync) {
2017                         info.state |= (1<<MD_DISK_ACTIVE);
2018                         info.state |= (1<<MD_DISK_SYNC);
2019                 }
2020         } else {
2021                 info.major = info.minor = 0;
2022                 info.raid_disk = -1;
2023                 info.state = (1<<MD_DISK_REMOVED);
2024         }
2025
2026         if (copy_to_user(arg, &info, sizeof(info)))
2027                 return -EFAULT;
2028
2029         return 0;
2030 }
2031
2032 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2033 {
2034         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
2035         mdk_rdev_t *rdev;
2036         dev_t dev = MKDEV(info->major,info->minor);
2037
2038         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
2039                 return -EOVERFLOW;
2040
2041         if (!mddev->raid_disks) {
2042                 int err;
2043                 /* expecting a device which has a superblock */
2044                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
2045                 if (IS_ERR(rdev)) {
2046                         printk(KERN_WARNING 
2047                                 "md: md_import_device returned %ld\n",
2048                                 PTR_ERR(rdev));
2049                         return PTR_ERR(rdev);
2050                 }
2051                 if (!list_empty(&mddev->disks)) {
2052                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2053                                                         mdk_rdev_t, same_set);
2054                         int err = super_types[mddev->major_version]
2055                                 .load_super(rdev, rdev0, mddev->minor_version);
2056                         if (err < 0) {
2057                                 printk(KERN_WARNING 
2058                                         "md: %s has different UUID to %s\n",
2059                                         bdevname(rdev->bdev,b), 
2060                                         bdevname(rdev0->bdev,b2));
2061                                 export_rdev(rdev);
2062                                 return -EINVAL;
2063                         }
2064                 }
2065                 err = bind_rdev_to_array(rdev, mddev);
2066                 if (err)
2067                         export_rdev(rdev);
2068                 return err;
2069         }
2070
2071         /*
2072          * add_new_disk can be used once the array is assembled
2073          * to add "hot spares".  They must already have a superblock
2074          * written
2075          */
2076         if (mddev->pers) {
2077                 int err;
2078                 if (!mddev->pers->hot_add_disk) {
2079                         printk(KERN_WARNING 
2080                                 "%s: personality does not support diskops!\n",
2081                                mdname(mddev));
2082                         return -EINVAL;
2083                 }
2084                 rdev = md_import_device(dev, mddev->major_version,
2085                                         mddev->minor_version);
2086                 if (IS_ERR(rdev)) {
2087                         printk(KERN_WARNING 
2088                                 "md: md_import_device returned %ld\n",
2089                                 PTR_ERR(rdev));
2090                         return PTR_ERR(rdev);
2091                 }
2092                 rdev->in_sync = 0; /* just to be sure */
2093                 rdev->raid_disk = -1;
2094                 err = bind_rdev_to_array(rdev, mddev);
2095                 if (err)
2096                         export_rdev(rdev);
2097                 if (mddev->thread)
2098                         md_wakeup_thread(mddev->thread);
2099                 return err;
2100         }
2101
2102         /* otherwise, add_new_disk is only allowed
2103          * for major_version==0 superblocks
2104          */
2105         if (mddev->major_version != 0) {
2106                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
2107                        mdname(mddev));
2108                 return -EINVAL;
2109         }
2110
2111         if (!(info->state & (1<<MD_DISK_FAULTY))) {
2112                 int err;
2113                 rdev = md_import_device (dev, -1, 0);
2114                 if (IS_ERR(rdev)) {
2115                         printk(KERN_WARNING 
2116                                 "md: error, md_import_device() returned %ld\n",
2117                                 PTR_ERR(rdev));
2118                         return PTR_ERR(rdev);
2119                 }
2120                 rdev->desc_nr = info->number;
2121                 if (info->raid_disk < mddev->raid_disks)
2122                         rdev->raid_disk = info->raid_disk;
2123                 else
2124                         rdev->raid_disk = -1;
2125
2126                 rdev->faulty = 0;
2127                 if (rdev->raid_disk < mddev->raid_disks)
2128                         rdev->in_sync = (info->state & (1<<MD_DISK_SYNC));
2129                 else
2130                         rdev->in_sync = 0;
2131
2132                 err = bind_rdev_to_array(rdev, mddev);
2133                 if (err) {
2134                         export_rdev(rdev);
2135                         return err;
2136                 }
2137
2138                 if (!mddev->persistent) {
2139                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
2140                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2141                 } else 
2142                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2143                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
2144
2145                 if (!mddev->size || (mddev->size > rdev->size))
2146                         mddev->size = rdev->size;
2147         }
2148
2149         return 0;
2150 }
2151
2152 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
2153 {
2154         char b[BDEVNAME_SIZE];
2155         mdk_rdev_t *rdev;
2156
2157         if (!mddev->pers)
2158                 return -ENODEV;
2159
2160         rdev = find_rdev(mddev, dev);
2161         if (!rdev)
2162                 return -ENXIO;
2163
2164         if (rdev->raid_disk >= 0)
2165                 goto busy;
2166
2167         kick_rdev_from_array(rdev);
2168         md_update_sb(mddev);
2169
2170         return 0;
2171 busy:
2172         printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
2173                 bdevname(rdev->bdev,b), mdname(mddev));
2174         return -EBUSY;
2175 }
2176
2177 static int hot_add_disk(mddev_t * mddev, dev_t dev)
2178 {
2179         char b[BDEVNAME_SIZE];
2180         int err;
2181         unsigned int size;
2182         mdk_rdev_t *rdev;
2183
2184         if (!mddev->pers)
2185                 return -ENODEV;
2186
2187         if (mddev->major_version != 0) {
2188                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
2189                         " version-0 superblocks.\n",
2190                         mdname(mddev));
2191                 return -EINVAL;
2192         }
2193         if (!mddev->pers->hot_add_disk) {
2194                 printk(KERN_WARNING 
2195                         "%s: personality does not support diskops!\n",
2196                         mdname(mddev));
2197                 return -EINVAL;
2198         }
2199
2200         rdev = md_import_device (dev, -1, 0);
2201         if (IS_ERR(rdev)) {
2202                 printk(KERN_WARNING 
2203                         "md: error, md_import_device() returned %ld\n",
2204                         PTR_ERR(rdev));
2205                 return -EINVAL;
2206         }
2207
2208         if (mddev->persistent)
2209                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2210         else
2211                 rdev->sb_offset =
2212                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2213
2214         size = calc_dev_size(rdev, mddev->chunk_size);
2215         rdev->size = size;
2216
2217         if (size < mddev->size) {
2218                 printk(KERN_WARNING 
2219                         "%s: disk size %llu blocks < array size %llu\n",
2220                         mdname(mddev), (unsigned long long)size,
2221                         (unsigned long long)mddev->size);
2222                 err = -ENOSPC;
2223                 goto abort_export;
2224         }
2225
2226         if (rdev->faulty) {
2227                 printk(KERN_WARNING 
2228                         "md: can not hot-add faulty %s disk to %s!\n",
2229                         bdevname(rdev->bdev,b), mdname(mddev));
2230                 err = -EINVAL;
2231                 goto abort_export;
2232         }
2233         rdev->in_sync = 0;
2234         rdev->desc_nr = -1;
2235         bind_rdev_to_array(rdev, mddev);
2236
2237         /*
2238          * The rest should better be atomic, we can have disk failures
2239          * noticed in interrupt contexts ...
2240          */
2241
2242         if (rdev->desc_nr == mddev->max_disks) {
2243                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
2244                         mdname(mddev));
2245                 err = -EBUSY;
2246                 goto abort_unbind_export;
2247         }
2248
2249         rdev->raid_disk = -1;
2250
2251         md_update_sb(mddev);
2252
2253         /*
2254          * Kick recovery, maybe this spare has to be added to the
2255          * array immediately.
2256          */
2257         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2258         md_wakeup_thread(mddev->thread);
2259
2260         return 0;
2261
2262 abort_unbind_export:
2263         unbind_rdev_from_array(rdev);
2264
2265 abort_export:
2266         export_rdev(rdev);
2267         return err;
2268 }
2269
2270 /*
2271  * set_array_info is used two different ways
2272  * The original usage is when creating a new array.
2273  * In this usage, raid_disks is > 0 and it together with
2274  *  level, size, not_persistent,layout,chunksize determine the
2275  *  shape of the array.
2276  *  This will always create an array with a type-0.90.0 superblock.
2277  * The newer usage is when assembling an array.
2278  *  In this case raid_disks will be 0, and the major_version field is
2279  *  use to determine which style super-blocks are to be found on the devices.
2280  *  The minor and patch _version numbers are also kept incase the
2281  *  super_block handler wishes to interpret them.
2282  */
2283 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2284 {
2285
2286         if (info->raid_disks == 0) {
2287                 /* just setting version number for superblock loading */
2288                 if (info->major_version < 0 ||
2289                     info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
2290                     super_types[info->major_version].name == NULL) {
2291                         /* maybe try to auto-load a module? */
2292                         printk(KERN_INFO 
2293                                 "md: superblock version %d not known\n",
2294                                 info->major_version);
2295                         return -EINVAL;
2296                 }
2297                 mddev->major_version = info->major_version;
2298                 mddev->minor_version = info->minor_version;
2299                 mddev->patch_version = info->patch_version;
2300                 return 0;
2301         }
2302         mddev->major_version = MD_MAJOR_VERSION;
2303         mddev->minor_version = MD_MINOR_VERSION;
2304         mddev->patch_version = MD_PATCHLEVEL_VERSION;
2305         mddev->ctime         = get_seconds();
2306
2307         mddev->level         = info->level;
2308         mddev->size          = info->size;
2309         mddev->raid_disks    = info->raid_disks;
2310         /* don't set md_minor, it is determined by which /dev/md* was
2311          * openned
2312          */
2313         if (info->state & (1<<MD_SB_CLEAN))
2314                 mddev->recovery_cp = MaxSector;
2315         else
2316                 mddev->recovery_cp = 0;
2317         mddev->persistent    = ! info->not_persistent;
2318
2319         mddev->layout        = info->layout;
2320         mddev->chunk_size    = info->chunk_size;
2321
2322         mddev->max_disks     = MD_SB_DISKS;
2323
2324         mddev->sb_dirty      = 1;
2325
2326         /*
2327          * Generate a 128 bit UUID
2328          */
2329         get_random_bytes(mddev->uuid, 16);
2330
2331         return 0;
2332 }
2333
2334 /*
2335  * update_array_info is used to change the configuration of an
2336  * on-line array.
2337  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
2338  * fields in the info are checked against the array.
2339  * Any differences that cannot be handled will cause an error.
2340  * Normally, only one change can be managed at a time.
2341  */
2342 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
2343 {
2344         int rv = 0;
2345         int cnt = 0;
2346
2347         if (mddev->major_version != info->major_version ||
2348             mddev->minor_version != info->minor_version ||
2349 /*          mddev->patch_version != info->patch_version || */
2350             mddev->ctime         != info->ctime         ||
2351             mddev->level         != info->level         ||
2352 /*          mddev->layout        != info->layout        || */
2353             !mddev->persistent   != info->not_persistent||
2354             mddev->chunk_size    != info->chunk_size    )
2355                 return -EINVAL;
2356         /* Check there is only one change */
2357         if (mddev->size != info->size) cnt++;
2358         if (mddev->raid_disks != info->raid_disks) cnt++;
2359         if (mddev->layout != info->layout) cnt++;
2360         if (cnt == 0) return 0;
2361         if (cnt > 1) return -EINVAL;
2362
2363         if (mddev->layout != info->layout) {
2364                 /* Change layout
2365                  * we don't need to do anything at the md level, the
2366                  * personality will take care of it all.
2367                  */
2368                 if (mddev->pers->reconfig == NULL)
2369                         return -EINVAL;
2370                 else
2371                         return mddev->pers->reconfig(mddev, info->layout, -1);
2372         }
2373         if (mddev->size != info->size) {
2374                 mdk_rdev_t * rdev;
2375                 struct list_head *tmp;
2376                 if (mddev->pers->resize == NULL)
2377                         return -EINVAL;
2378                 /* The "size" is the amount of each device that is used.
2379                  * This can only make sense for arrays with redundancy.
2380                  * linear and raid0 always use whatever space is available
2381                  * We can only consider changing the size if no resync
2382                  * or reconstruction is happening, and if the new size
2383                  * is acceptable. It must fit before the sb_offset or,
2384                  * if that is <data_offset, it must fit before the
2385                  * size of each device.
2386                  * If size is zero, we find the largest size that fits.
2387                  */
2388                 if (mddev->sync_thread)
2389                         return -EBUSY;
2390                 ITERATE_RDEV(mddev,rdev,tmp) {
2391                         sector_t avail;
2392                         int fit = (info->size == 0);
2393                         if (rdev->sb_offset > rdev->data_offset)
2394                                 avail = (rdev->sb_offset*2) - rdev->data_offset;
2395                         else
2396                                 avail = get_capacity(rdev->bdev->bd_disk)
2397                                         - rdev->data_offset;
2398                         if (fit && (info->size == 0 || info->size > avail/2))
2399                                 info->size = avail/2;
2400                         if (avail < ((sector_t)info->size << 1))
2401                                 return -ENOSPC;
2402                 }
2403                 rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
2404                 if (!rv) {
2405                         struct block_device *bdev;
2406
2407                         bdev = bdget_disk(mddev->gendisk, 0);
2408                         if (bdev) {
2409                                 down(&bdev->bd_inode->i_sem);
2410                                 i_size_write(bdev->bd_inode, mddev->array_size << 10);
2411                                 up(&bdev->bd_inode->i_sem);
2412                                 bdput(bdev);
2413                         }
2414                 }
2415         }
2416         if (mddev->raid_disks    != info->raid_disks) {
2417                 /* change the number of raid disks */
2418                 if (mddev->pers->reshape == NULL)
2419                         return -EINVAL;
2420                 if (info->raid_disks <= 0 ||
2421                     info->raid_disks >= mddev->max_disks)
2422                         return -EINVAL;
2423                 if (mddev->sync_thread)
2424                         return -EBUSY;
2425                 rv = mddev->pers->reshape(mddev, info->raid_disks);
2426                 if (!rv) {
2427                         struct block_device *bdev;
2428
2429                         bdev = bdget_disk(mddev->gendisk, 0);
2430                         if (bdev) {
2431                                 down(&bdev->bd_inode->i_sem);
2432                                 i_size_write(bdev->bd_inode, mddev->array_size << 10);
2433                                 up(&bdev->bd_inode->i_sem);
2434                                 bdput(bdev);
2435                         }
2436                 }
2437         }
2438         md_update_sb(mddev);
2439         return rv;
2440 }
2441
2442 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
2443 {
2444         mdk_rdev_t *rdev;
2445
2446         if (mddev->pers == NULL)
2447                 return -ENODEV;
2448
2449         rdev = find_rdev(mddev, dev);
2450         if (!rdev)
2451                 return -ENODEV;
2452
2453         md_error(mddev, rdev);
2454         return 0;
2455 }
2456
2457 static int md_ioctl(struct inode *inode, struct file *file,
2458                         unsigned int cmd, unsigned long arg)
2459 {
2460         int err = 0;
2461         void __user *argp = (void __user *)arg;
2462         struct hd_geometry __user *loc = argp;
2463         mddev_t *mddev = NULL;
2464
2465         if (!capable(CAP_SYS_ADMIN))
2466                 return -EACCES;
2467
2468         /*
2469          * Commands dealing with the RAID driver but not any
2470          * particular array:
2471          */
2472         switch (cmd)
2473         {
2474                 case RAID_VERSION:
2475                         err = get_version(argp);
2476                         goto done;
2477
2478                 case PRINT_RAID_DEBUG:
2479                         err = 0;
2480                         md_print_devices();
2481                         goto done;
2482
2483 #ifndef MODULE
2484                 case RAID_AUTORUN:
2485                         err = 0;
2486                         autostart_arrays(arg);
2487                         goto done;
2488 #endif
2489                 default:;
2490         }
2491
2492         /*
2493          * Commands creating/starting a new array:
2494          */
2495
2496         mddev = inode->i_bdev->bd_disk->private_data;
2497
2498         if (!mddev) {
2499                 BUG();
2500                 goto abort;
2501         }
2502
2503
2504         if (cmd == START_ARRAY) {
2505                 /* START_ARRAY doesn't need to lock the array as autostart_array
2506                  * does the locking, and it could even be a different array
2507                  */
2508                 static int cnt = 3;
2509                 if (cnt > 0 ) {
2510                         printk(KERN_WARNING
2511                                "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
2512                                "This will not be supported beyond 2.6\n",
2513                                current->comm, current->pid);
2514                         cnt--;
2515                 }
2516                 err = autostart_array(new_decode_dev(arg));
2517                 if (err) {
2518                         printk(KERN_WARNING "md: autostart failed!\n");
2519                         goto abort;
2520                 }
2521                 goto done;
2522         }
2523
2524         err = mddev_lock(mddev);
2525         if (err) {
2526                 printk(KERN_INFO 
2527                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
2528                         err, cmd);
2529                 goto abort;
2530         }
2531
2532         switch (cmd)
2533         {
2534                 case SET_ARRAY_INFO:
2535                         {
2536                                 mdu_array_info_t info;
2537                                 if (!arg)
2538                                         memset(&info, 0, sizeof(info));
2539                                 else if (copy_from_user(&info, argp, sizeof(info))) {
2540                                         err = -EFAULT;
2541                                         goto abort_unlock;
2542                                 }
2543                                 if (mddev->pers) {
2544                                         err = update_array_info(mddev, &info);
2545                                         if (err) {
2546                                                 printk(KERN_WARNING "md: couldn't update"
2547                                                        " array info. %d\n", err);
2548                                                 goto abort_unlock;
2549                                         }
2550                                         goto done_unlock;
2551                                 }
2552                                 if (!list_empty(&mddev->disks)) {
2553                                         printk(KERN_WARNING
2554                                                "md: array %s already has disks!\n",
2555                                                mdname(mddev));
2556                                         err = -EBUSY;
2557                                         goto abort_unlock;
2558                                 }
2559                                 if (mddev->raid_disks) {
2560                                         printk(KERN_WARNING
2561                                                "md: array %s already initialised!\n",
2562                                                mdname(mddev));
2563                                         err = -EBUSY;
2564                                         goto abort_unlock;
2565                                 }
2566                                 err = set_array_info(mddev, &info);
2567                                 if (err) {
2568                                         printk(KERN_WARNING "md: couldn't set"
2569                                                " array info. %d\n", err);
2570                                         goto abort_unlock;
2571                                 }
2572                         }
2573                         goto done_unlock;
2574
2575                 default:;
2576         }
2577
2578         /*
2579          * Commands querying/configuring an existing array:
2580          */
2581         /* if we are initialised yet, only ADD_NEW_DISK or STOP_ARRAY is allowed */
2582         if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY) {
2583                 err = -ENODEV;
2584                 goto abort_unlock;
2585         }
2586
2587         /*
2588          * Commands even a read-only array can execute:
2589          */
2590         switch (cmd)
2591         {
2592                 case GET_ARRAY_INFO:
2593                         err = get_array_info(mddev, argp);
2594                         goto done_unlock;
2595
2596                 case GET_DISK_INFO:
2597                         err = get_disk_info(mddev, argp);
2598                         goto done_unlock;
2599
2600                 case RESTART_ARRAY_RW:
2601                         err = restart_array(mddev);
2602                         goto done_unlock;
2603
2604                 case STOP_ARRAY:
2605                         err = do_md_stop (mddev, 0);
2606                         goto done_unlock;
2607
2608                 case STOP_ARRAY_RO:
2609                         err = do_md_stop (mddev, 1);
2610                         goto done_unlock;
2611
2612         /*
2613          * We have a problem here : there is no easy way to give a CHS
2614          * virtual geometry. We currently pretend that we have a 2 heads
2615          * 4 sectors (with a BIG number of cylinders...). This drives
2616          * dosfs just mad... ;-)
2617          */
2618                 case HDIO_GETGEO:
2619                         if (!loc) {
2620                                 err = -EINVAL;
2621                                 goto abort_unlock;
2622                         }
2623                         err = put_user (2, (char __user *) &loc->heads);
2624                         if (err)
2625                                 goto abort_unlock;
2626                         err = put_user (4, (char __user *) &loc->sectors);
2627                         if (err)
2628                                 goto abort_unlock;
2629                         err = put_user(get_capacity(mddev->gendisk)/8,
2630                                         (short __user *) &loc->cylinders);
2631                         if (err)
2632                                 goto abort_unlock;
2633                         err = put_user (get_start_sect(inode->i_bdev),
2634                                                 (long __user *) &loc->start);
2635                         goto done_unlock;
2636         }
2637
2638         /*
2639          * The remaining ioctls are changing the state of the
2640          * superblock, so we do not allow read-only arrays
2641          * here:
2642          */
2643         if (mddev->ro) {
2644                 err = -EROFS;
2645                 goto abort_unlock;
2646         }
2647
2648         switch (cmd)
2649         {
2650                 case ADD_NEW_DISK:
2651                 {
2652                         mdu_disk_info_t info;
2653                         if (copy_from_user(&info, argp, sizeof(info)))
2654                                 err = -EFAULT;
2655                         else
2656                                 err = add_new_disk(mddev, &info);
2657                         goto done_unlock;
2658                 }
2659
2660                 case HOT_REMOVE_DISK:
2661                         err = hot_remove_disk(mddev, new_decode_dev(arg));
2662                         goto done_unlock;
2663
2664                 case HOT_ADD_DISK:
2665                         err = hot_add_disk(mddev, new_decode_dev(arg));
2666                         goto done_unlock;
2667
2668                 case SET_DISK_FAULTY:
2669                         err = set_disk_faulty(mddev, new_decode_dev(arg));
2670                         goto done_unlock;
2671
2672                 case RUN_ARRAY:
2673                         err = do_md_run (mddev);
2674                         goto done_unlock;
2675
2676                 default:
2677                         if (_IOC_TYPE(cmd) == MD_MAJOR)
2678                                 printk(KERN_WARNING "md: %s(pid %d) used"
2679                                         " obsolete MD ioctl, upgrade your"
2680                                         " software to use new ictls.\n",
2681                                         current->comm, current->pid);
2682                         err = -EINVAL;
2683                         goto abort_unlock;
2684         }
2685
2686 done_unlock:
2687 abort_unlock:
2688         mddev_unlock(mddev);
2689
2690         return err;
2691 done:
2692         if (err)
2693                 MD_BUG();
2694 abort:
2695         return err;
2696 }
2697
2698 static int md_open(struct inode *inode, struct file *file)
2699 {
2700         /*
2701          * Succeed if we can lock the mddev, which confirms that
2702          * it isn't being stopped right now.
2703          */
2704         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
2705         int err;
2706
2707         if ((err = mddev_lock(mddev)))
2708                 goto out;
2709
2710         err = 0;
2711         mddev_get(mddev);
2712         mddev_unlock(mddev);
2713
2714         check_disk_change(inode->i_bdev);
2715  out:
2716         return err;
2717 }
2718
2719 static int md_release(struct inode *inode, struct file * file)
2720 {
2721         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
2722
2723         if (!mddev)
2724                 BUG();
2725         mddev_put(mddev);
2726
2727         return 0;
2728 }
2729
2730 static int md_media_changed(struct gendisk *disk)
2731 {
2732         mddev_t *mddev = disk->private_data;
2733
2734         return mddev->changed;
2735 }
2736
2737 static int md_revalidate(struct gendisk *disk)
2738 {
2739         mddev_t *mddev = disk->private_data;
2740
2741         mddev->changed = 0;
2742         return 0;
2743 }
2744 static struct block_device_operations md_fops =
2745 {
2746         .owner          = THIS_MODULE,
2747         .open           = md_open,
2748         .release        = md_release,
2749         .ioctl          = md_ioctl,
2750         .media_changed  = md_media_changed,
2751         .revalidate_disk= md_revalidate,
2752 };
2753
2754 int md_thread(void * arg)
2755 {
2756         mdk_thread_t *thread = arg;
2757
2758         lock_kernel();
2759
2760         /*
2761          * Detach thread
2762          */
2763
2764         daemonize(thread->name, mdname(thread->mddev));
2765
2766         current->exit_signal = SIGCHLD;
2767         allow_signal(SIGKILL);
2768         thread->tsk = current;
2769
2770         /*
2771          * md_thread is a 'system-thread', it's priority should be very
2772          * high. We avoid resource deadlocks individually in each
2773          * raid personality. (RAID5 does preallocation) We also use RR and
2774          * the very same RT priority as kswapd, thus we will never get
2775          * into a priority inversion deadlock.
2776          *
2777          * we definitely have to have equal or higher priority than
2778          * bdflush, otherwise bdflush will deadlock if there are too
2779          * many dirty RAID5 blocks.
2780          */
2781         unlock_kernel();
2782
2783         complete(thread->event);
2784         while (thread->run) {
2785                 void (*run)(mddev_t *);
2786
2787                 wait_event_interruptible(thread->wqueue,
2788                                          test_bit(THREAD_WAKEUP, &thread->flags));
2789                 if (current->flags & PF_FREEZE)
2790                         refrigerator(PF_FREEZE);
2791
2792                 clear_bit(THREAD_WAKEUP, &thread->flags);
2793
2794                 run = thread->run;
2795                 if (run)
2796                         run(thread->mddev);
2797
2798                 if (signal_pending(current))
2799                         flush_signals(current);
2800         }
2801         complete(thread->event);
2802         return 0;
2803 }
2804
2805 void md_wakeup_thread(mdk_thread_t *thread)
2806 {
2807         if (thread) {
2808                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
2809                 set_bit(THREAD_WAKEUP, &thread->flags);
2810                 wake_up(&thread->wqueue);
2811         }
2812 }
2813
2814 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
2815                                  const char *name)
2816 {
2817         mdk_thread_t *thread;
2818         int ret;
2819         struct completion event;
2820
2821         thread = (mdk_thread_t *) kmalloc
2822                                 (sizeof(mdk_thread_t), GFP_KERNEL);
2823         if (!thread)
2824                 return NULL;
2825
2826         memset(thread, 0, sizeof(mdk_thread_t));
2827         init_waitqueue_head(&thread->wqueue);
2828
2829         init_completion(&event);
2830         thread->event = &event;
2831         thread->run = run;
2832         thread->mddev = mddev;
2833         thread->name = name;
2834         ret = kernel_thread(md_thread, thread, 0);
2835         if (ret < 0) {
2836                 kfree(thread);
2837                 return NULL;
2838         }
2839         wait_for_completion(&event);
2840         return thread;
2841 }
2842
2843 void md_unregister_thread(mdk_thread_t *thread)
2844 {
2845         struct completion event;
2846
2847         init_completion(&event);
2848
2849         thread->event = &event;
2850
2851         /* As soon as ->run is set to NULL, the task could disappear,
2852          * so we need to hold tasklist_lock until we have sent the signal
2853          */
2854         dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
2855         read_lock(&tasklist_lock);
2856         thread->run = NULL;
2857         send_sig(SIGKILL, thread->tsk, 1);
2858         read_unlock(&tasklist_lock);
2859         wait_for_completion(&event);
2860         kfree(thread);
2861 }
2862
2863 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
2864 {
2865         if (!mddev) {
2866                 MD_BUG();
2867                 return;
2868         }
2869
2870         if (!rdev || rdev->faulty)
2871                 return;
2872
2873         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
2874                 mdname(mddev),
2875                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
2876                 __builtin_return_address(0),__builtin_return_address(1),
2877                 __builtin_return_address(2),__builtin_return_address(3));
2878
2879         if (!mddev->pers->error_handler)
2880                 return;
2881         mddev->pers->error_handler(mddev,rdev);
2882         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2883         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2884         md_wakeup_thread(mddev->thread);
2885 }
2886
2887 /* seq_file implementation /proc/mdstat */
2888
2889 static void status_unused(struct seq_file *seq)
2890 {
2891         int i = 0;
2892         mdk_rdev_t *rdev;
2893         struct list_head *tmp;
2894
2895         seq_printf(seq, "unused devices: ");
2896
2897         ITERATE_RDEV_PENDING(rdev,tmp) {
2898                 char b[BDEVNAME_SIZE];
2899                 i++;
2900                 seq_printf(seq, "%s ",
2901                               bdevname(rdev->bdev,b));
2902         }
2903         if (!i)
2904                 seq_printf(seq, "<none>");
2905
2906         seq_printf(seq, "\n");
2907 }
2908
2909
2910 static void status_resync(struct seq_file *seq, mddev_t * mddev)
2911 {
2912         unsigned long max_blocks, resync, res, dt, db, rt;
2913
2914         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
2915
2916         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2917                 max_blocks = mddev->resync_max_sectors >> 1;
2918         else
2919                 max_blocks = mddev->size;
2920
2921         /*
2922          * Should not happen.
2923          */
2924         if (!max_blocks) {
2925                 MD_BUG();
2926                 return;
2927         }
2928         res = (resync/1024)*1000/(max_blocks/1024 + 1);
2929         {
2930                 int i, x = res/50, y = 20-x;
2931                 seq_printf(seq, "[");
2932                 for (i = 0; i < x; i++)
2933                         seq_printf(seq, "=");
2934                 seq_printf(seq, ">");
2935                 for (i = 0; i < y; i++)
2936                         seq_printf(seq, ".");
2937                 seq_printf(seq, "] ");
2938         }
2939         seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
2940                       (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
2941                        "resync" : "recovery"),
2942                       res/10, res % 10, resync, max_blocks);
2943
2944         /*
2945          * We do not want to overflow, so the order of operands and
2946          * the * 100 / 100 trick are important. We do a +1 to be
2947          * safe against division by zero. We only estimate anyway.
2948          *
2949          * dt: time from mark until now
2950          * db: blocks written from mark until now
2951          * rt: remaining time
2952          */
2953         dt = ((jiffies - mddev->resync_mark) / HZ);
2954         if (!dt) dt++;
2955         db = resync - (mddev->resync_mark_cnt/2);
2956         rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
2957
2958         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
2959
2960         seq_printf(seq, " speed=%ldK/sec", db/dt);
2961 }
2962
2963 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
2964 {
2965         struct list_head *tmp;
2966         loff_t l = *pos;
2967         mddev_t *mddev;
2968
2969         if (l >= 0x10000)
2970                 return NULL;
2971         if (!l--)
2972                 /* header */
2973                 return (void*)1;
2974
2975         spin_lock(&all_mddevs_lock);
2976         list_for_each(tmp,&all_mddevs)
2977                 if (!l--) {
2978                         mddev = list_entry(tmp, mddev_t, all_mddevs);
2979                         mddev_get(mddev);
2980                         spin_unlock(&all_mddevs_lock);
2981                         return mddev;
2982                 }
2983         spin_unlock(&all_mddevs_lock);
2984         if (!l--)
2985                 return (void*)2;/* tail */
2986         return NULL;
2987 }
2988
2989 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2990 {
2991         struct list_head *tmp;
2992         mddev_t *next_mddev, *mddev = v;
2993         
2994         ++*pos;
2995         if (v == (void*)2)
2996                 return NULL;
2997
2998         spin_lock(&all_mddevs_lock);
2999         if (v == (void*)1)
3000                 tmp = all_mddevs.next;
3001         else
3002                 tmp = mddev->all_mddevs.next;
3003         if (tmp != &all_mddevs)
3004                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
3005         else {
3006                 next_mddev = (void*)2;
3007                 *pos = 0x10000;
3008         }               
3009         spin_unlock(&all_mddevs_lock);
3010
3011         if (v != (void*)1)
3012                 mddev_put(mddev);
3013         return next_mddev;
3014
3015 }
3016
3017 static void md_seq_stop(struct seq_file *seq, void *v)
3018 {
3019         mddev_t *mddev = v;
3020
3021         if (mddev && v != (void*)1 && v != (void*)2)
3022                 mddev_put(mddev);
3023 }
3024
3025 static int md_seq_show(struct seq_file *seq, void *v)
3026 {
3027         mddev_t *mddev = v;
3028         sector_t size;
3029         struct list_head *tmp2;
3030         mdk_rdev_t *rdev;
3031         int i;
3032
3033         if (v == (void*)1) {
3034                 seq_printf(seq, "Personalities : ");
3035                 spin_lock(&pers_lock);
3036                 for (i = 0; i < MAX_PERSONALITY; i++)
3037                         if (pers[i])
3038                                 seq_printf(seq, "[%s] ", pers[i]->name);
3039
3040                 spin_unlock(&pers_lock);
3041                 seq_printf(seq, "\n");
3042                 return 0;
3043         }
3044         if (v == (void*)2) {
3045                 status_unused(seq);
3046                 return 0;
3047         }
3048
3049         if (mddev_lock(mddev)!=0) 
3050                 return -EINTR;
3051         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
3052                 seq_printf(seq, "%s : %sactive", mdname(mddev),
3053                                                 mddev->pers ? "" : "in");
3054                 if (mddev->pers) {
3055                         if (mddev->ro)
3056                                 seq_printf(seq, " (read-only)");
3057                         seq_printf(seq, " %s", mddev->pers->name);
3058                 }
3059
3060                 size = 0;
3061                 ITERATE_RDEV(mddev,rdev,tmp2) {
3062                         char b[BDEVNAME_SIZE];
3063                         seq_printf(seq, " %s[%d]",
3064                                 bdevname(rdev->bdev,b), rdev->desc_nr);
3065                         if (rdev->faulty) {
3066                                 seq_printf(seq, "(F)");
3067                                 continue;
3068                         }
3069                         size += rdev->size;
3070                 }
3071
3072                 if (!list_empty(&mddev->disks)) {
3073                         if (mddev->pers)
3074                                 seq_printf(seq, "\n      %llu blocks",
3075                                         (unsigned long long)mddev->array_size);
3076                         else
3077                                 seq_printf(seq, "\n      %llu blocks",
3078                                         (unsigned long long)size);
3079                 }
3080
3081                 if (mddev->pers) {
3082                         mddev->pers->status (seq, mddev);
3083                         seq_printf(seq, "\n      ");
3084                         if (mddev->curr_resync > 2)
3085                                 status_resync (seq, mddev);
3086                         else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
3087                                 seq_printf(seq, "       resync=DELAYED");
3088                 }
3089
3090                 seq_printf(seq, "\n");
3091         }
3092         mddev_unlock(mddev);
3093         
3094         return 0;
3095 }
3096
3097 static struct seq_operations md_seq_ops = {
3098         .start  = md_seq_start,
3099         .next   = md_seq_next,
3100         .stop   = md_seq_stop,
3101         .show   = md_seq_show,
3102 };
3103
3104 static int md_seq_open(struct inode *inode, struct file *file)
3105 {
3106         int error;
3107
3108         error = seq_open(file, &md_seq_ops);
3109         return error;
3110 }
3111
3112 static struct file_operations md_seq_fops = {
3113         .open           = md_seq_open,
3114         .read           = seq_read,
3115         .llseek         = seq_lseek,
3116         .release        = seq_release,
3117 };
3118
3119 int register_md_personality(int pnum, mdk_personality_t *p)
3120 {
3121         if (pnum >= MAX_PERSONALITY) {
3122                 printk(KERN_ERR
3123                        "md: tried to install personality %s as nr %d, but max is %lu\n",
3124                        p->name, pnum, MAX_PERSONALITY-1);
3125                 return -EINVAL;
3126         }
3127
3128         spin_lock(&pers_lock);
3129         if (pers[pnum]) {
3130                 spin_unlock(&pers_lock);
3131                 MD_BUG();
3132                 return -EBUSY;
3133         }
3134
3135         pers[pnum] = p;
3136         printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3137         spin_unlock(&pers_lock);
3138         return 0;
3139 }
3140
3141 int unregister_md_personality(int pnum)
3142 {
3143         if (pnum >= MAX_PERSONALITY) {
3144                 MD_BUG();
3145                 return -EINVAL;
3146         }
3147
3148         printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3149         spin_lock(&pers_lock);
3150         pers[pnum] = NULL;
3151         spin_unlock(&pers_lock);
3152         return 0;
3153 }
3154
3155 static int is_mddev_idle(mddev_t *mddev)
3156 {
3157         mdk_rdev_t * rdev;
3158         struct list_head *tmp;
3159         int idle;
3160         unsigned long curr_events;
3161
3162         idle = 1;
3163         ITERATE_RDEV(mddev,rdev,tmp) {
3164                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
3165                 curr_events = disk_stat_read(disk, read_sectors) + 
3166                                 disk_stat_read(disk, write_sectors) - 
3167                                 atomic_read(&disk->sync_io);
3168                 /* Allow some slack between valud of curr_events and last_events,
3169                  * as there are some uninteresting races.
3170                  * Note: the following is an unsigned comparison.
3171                  */
3172                 if ((curr_events - rdev->last_events + 32) > 64) {
3173                         rdev->last_events = curr_events;
3174                         idle = 0;
3175                 }
3176         }
3177         return idle;
3178 }
3179
3180 void md_done_sync(mddev_t *mddev, int blocks, int ok)
3181 {
3182         /* another "blocks" (512byte) blocks have been synced */
3183         atomic_sub(blocks, &mddev->recovery_active);
3184         wake_up(&mddev->recovery_wait);
3185         if (!ok) {
3186                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3187                 md_wakeup_thread(mddev->thread);
3188                 // stop recovery, signal do_sync ....
3189         }
3190 }
3191
3192
3193 void md_write_start(mddev_t *mddev)
3194 {
3195         if (!atomic_read(&mddev->writes_pending)) {
3196                 mddev_lock_uninterruptible(mddev);
3197                 if (mddev->in_sync) {
3198                         mddev->in_sync = 0;
3199                         del_timer(&mddev->safemode_timer);
3200                         md_update_sb(mddev);
3201                 }
3202                 atomic_inc(&mddev->writes_pending);
3203                 mddev_unlock(mddev);
3204         } else
3205                 atomic_inc(&mddev->writes_pending);
3206 }
3207
3208 void md_write_end(mddev_t *mddev)
3209 {
3210         if (atomic_dec_and_test(&mddev->writes_pending)) {
3211                 if (mddev->safemode == 2)
3212                         md_wakeup_thread(mddev->thread);
3213                 else
3214                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
3215         }
3216 }
3217
3218 static inline void md_enter_safemode(mddev_t *mddev)
3219 {
3220         if (!mddev->safemode) return;
3221         if (mddev->safemode == 2 &&
3222             (atomic_read(&mddev->writes_pending) || mddev->in_sync ||
3223                     mddev->recovery_cp != MaxSector))
3224                 return; /* avoid the lock */
3225         mddev_lock_uninterruptible(mddev);
3226         if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
3227             !mddev->in_sync && mddev->recovery_cp == MaxSector) {
3228                 mddev->in_sync = 1;
3229                 md_update_sb(mddev);
3230         }
3231         mddev_unlock(mddev);
3232
3233         if (mddev->safemode == 1)
3234                 mddev->safemode = 0;
3235 }
3236
3237 void md_handle_safemode(mddev_t *mddev)
3238 {
3239         if (signal_pending(current)) {
3240                 printk(KERN_INFO "md: %s in immediate safe mode\n",
3241                         mdname(mddev));
3242                 mddev->safemode = 2;
3243                 flush_signals(current);
3244         }
3245         md_enter_safemode(mddev);
3246 }
3247
3248
3249 DECLARE_WAIT_QUEUE_HEAD(resync_wait);
3250
3251 #define SYNC_MARKS      10
3252 #define SYNC_MARK_STEP  (3*HZ)
3253 static void md_do_sync(mddev_t *mddev)
3254 {
3255         mddev_t *mddev2;
3256         unsigned int currspeed = 0,
3257                  window;
3258         sector_t max_sectors,j;
3259         unsigned long mark[SYNC_MARKS];
3260         sector_t mark_cnt[SYNC_MARKS];
3261         int last_mark,m;
3262         struct list_head *tmp;
3263         sector_t last_check;
3264
3265         /* just incase thread restarts... */
3266         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
3267                 return;
3268
3269         /* we overload curr_resync somewhat here.
3270          * 0 == not engaged in resync at all
3271          * 2 == checking that there is no conflict with another sync
3272          * 1 == like 2, but have yielded to allow conflicting resync to
3273          *              commense
3274          * other == active in resync - this many blocks
3275          *
3276          * Before starting a resync we must have set curr_resync to
3277          * 2, and then checked that every "conflicting" array has curr_resync
3278          * less than ours.  When we find one that is the same or higher
3279          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
3280          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
3281          * This will mean we have to start checking from the beginning again.
3282          *
3283          */
3284
3285         do {
3286                 mddev->curr_resync = 2;
3287
3288         try_again:
3289                 if (signal_pending(current)) {
3290                         flush_signals(current);
3291                         goto skip;
3292                 }
3293                 ITERATE_MDDEV(mddev2,tmp) {
3294                         printk(".");
3295                         if (mddev2 == mddev)
3296                                 continue;
3297                         if (mddev2->curr_resync && 
3298                             match_mddev_units(mddev,mddev2)) {
3299                                 DEFINE_WAIT(wq);
3300                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
3301                                         /* arbitrarily yield */
3302                                         mddev->curr_resync = 1;
3303                                         wake_up(&resync_wait);
3304                                 }
3305                                 if (mddev > mddev2 && mddev->curr_resync == 1)
3306                                         /* no need to wait here, we can wait the next
3307                                          * time 'round when curr_resync == 2
3308                                          */
3309                                         continue;
3310                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
3311                                 if (!signal_pending(current)
3312                                     && mddev2->curr_resync >= mddev->curr_resync) {
3313                                         printk(KERN_INFO "md: delaying resync of %s"
3314                                                " until %s has finished resync (they"
3315                                                " share one or more physical units)\n",
3316                                                mdname(mddev), mdname(mddev2));
3317                                         mddev_put(mddev2);
3318                                         schedule();
3319                                         finish_wait(&resync_wait, &wq);
3320                                         goto try_again;
3321                                 }
3322                                 finish_wait(&resync_wait, &wq);
3323                         }
3324                 }
3325         } while (mddev->curr_resync < 2);
3326
3327         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3328                 /* resync follows the size requested by the personality,
3329                  * which default to physical size, but can be virtual size
3330                  */
3331                 max_sectors = mddev->resync_max_sectors;
3332         else
3333                 /* recovery follows the physical size of devices */
3334                 max_sectors = mddev->size << 1;
3335
3336         printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
3337         printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
3338                 " %d KB/sec/disc.\n", sysctl_speed_limit_min);
3339         printk(KERN_INFO "md: using maximum available idle IO bandwith "
3340                "(but not more than %d KB/sec) for reconstruction.\n",
3341                sysctl_speed_limit_max);
3342
3343         is_mddev_idle(mddev); /* this also initializes IO event counters */
3344         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3345                 j = mddev->recovery_cp;
3346         else
3347                 j = 0;
3348         for (m = 0; m < SYNC_MARKS; m++) {
3349                 mark[m] = jiffies;
3350                 mark_cnt[m] = j;
3351         }
3352         last_mark = 0;
3353         mddev->resync_mark = mark[last_mark];
3354         mddev->resync_mark_cnt = mark_cnt[last_mark];
3355
3356         /*
3357          * Tune reconstruction:
3358          */
3359         window = 32*(PAGE_SIZE/512);
3360         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
3361                 window/2,(unsigned long long) max_sectors/2);
3362
3363         atomic_set(&mddev->recovery_active, 0);
3364         init_waitqueue_head(&mddev->recovery_wait);
3365         last_check = 0;
3366
3367         if (j>2) {
3368                 printk(KERN_INFO 
3369                         "md: resuming recovery of %s from checkpoint.\n",
3370                         mdname(mddev));
3371                 mddev->curr_resync = j;
3372         }
3373
3374         while (j < max_sectors) {
3375                 int sectors;
3376
3377                 sectors = mddev->pers->sync_request(mddev, j, currspeed < sysctl_speed_limit_min);
3378                 if (sectors < 0) {
3379                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3380                         goto out;
3381                 }
3382                 atomic_add(sectors, &mddev->recovery_active);
3383                 j += sectors;
3384                 if (j>1) mddev->curr_resync = j;
3385
3386                 if (last_check + window > j || j == max_sectors)
3387                         continue;
3388
3389                 last_check = j;
3390
3391                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
3392                     test_bit(MD_RECOVERY_ERR, &mddev->recovery))
3393                         break;
3394
3395         repeat:
3396                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
3397                         /* step marks */
3398                         int next = (last_mark+1) % SYNC_MARKS;
3399
3400                         mddev->resync_mark = mark[next];
3401                         mddev->resync_mark_cnt = mark_cnt[next];
3402                         mark[next] = jiffies;
3403                         mark_cnt[next] = j - atomic_read(&mddev->recovery_active);
3404                         last_mark = next;
3405                 }
3406
3407
3408                 if (signal_pending(current)) {
3409                         /*
3410                          * got a signal, exit.
3411                          */
3412                         printk(KERN_INFO 
3413                                 "md: md_do_sync() got signal ... exiting\n");
3414                         flush_signals(current);
3415                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3416                         goto out;
3417                 }
3418
3419                 /*
3420                  * this loop exits only if either when we are slower than
3421                  * the 'hard' speed limit, or the system was IO-idle for
3422                  * a jiffy.
3423                  * the system might be non-idle CPU-wise, but we only care
3424                  * about not overloading the IO subsystem. (things like an
3425                  * e2fsck being done on the RAID array should execute fast)
3426                  */
3427                 mddev->queue->unplug_fn(mddev->queue);
3428                 cond_resched();
3429
3430                 currspeed = ((unsigned long)(j-mddev->resync_mark_cnt))/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
3431
3432                 if (currspeed > sysctl_speed_limit_min) {
3433                         if ((currspeed > sysctl_speed_limit_max) ||
3434                                         !is_mddev_idle(mddev)) {
3435                                 msleep_interruptible(250);
3436                                 goto repeat;
3437                         }
3438                 }
3439         }
3440         printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
3441         /*
3442          * this also signals 'finished resyncing' to md_stop
3443          */
3444  out:
3445         mddev->queue->unplug_fn(mddev->queue);
3446
3447         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
3448
3449         /* tell personality that we are finished */
3450         mddev->pers->sync_request(mddev, max_sectors, 1);
3451
3452         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
3453             mddev->curr_resync > 2 &&
3454             mddev->curr_resync >= mddev->recovery_cp) {
3455                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
3456                         printk(KERN_INFO 
3457                                 "md: checkpointing recovery of %s.\n",
3458                                 mdname(mddev));
3459                         mddev->recovery_cp = mddev->curr_resync;
3460                 } else
3461                         mddev->recovery_cp = MaxSector;
3462         }
3463
3464         md_enter_safemode(mddev);
3465  skip:
3466         mddev->curr_resync = 0;
3467         wake_up(&resync_wait);
3468         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
3469         md_wakeup_thread(mddev->thread);
3470 }
3471
3472
3473 /*
3474  * This routine is regularly called by all per-raid-array threads to
3475  * deal with generic issues like resync and super-block update.
3476  * Raid personalities that don't have a thread (linear/raid0) do not
3477  * need this as they never do any recovery or update the superblock.
3478  *
3479  * It does not do any resync itself, but rather "forks" off other threads
3480  * to do that as needed.
3481  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
3482  * "->recovery" and create a thread at ->sync_thread.
3483  * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
3484  * and wakeups up this thread which will reap the thread and finish up.
3485  * This thread also removes any faulty devices (with nr_pending == 0).
3486  *
3487  * The overall approach is:
3488  *  1/ if the superblock needs updating, update it.
3489  *  2/ If a recovery thread is running, don't do anything else.
3490  *  3/ If recovery has finished, clean up, possibly marking spares active.
3491  *  4/ If there are any faulty devices, remove them.
3492  *  5/ If array is degraded, try to add spares devices
3493  *  6/ If array has spares or is not in-sync, start a resync thread.
3494  */
3495 void md_check_recovery(mddev_t *mddev)
3496 {
3497         mdk_rdev_t *rdev;
3498         struct list_head *rtmp;
3499
3500
3501         dprintk(KERN_INFO "md: recovery thread got woken up ...\n");
3502
3503         if (mddev->ro)
3504                 return;
3505         if ( ! (
3506                 mddev->sb_dirty ||
3507                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
3508                 test_bit(MD_RECOVERY_DONE, &mddev->recovery)
3509                 ))
3510                 return;
3511         if (mddev_trylock(mddev)==0) {
3512                 int spares =0;
3513                 if (mddev->sb_dirty)
3514                         md_update_sb(mddev);
3515                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
3516                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
3517                         /* resync/recovery still happening */
3518                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3519                         goto unlock;
3520                 }
3521                 if (mddev->sync_thread) {
3522                         /* resync has finished, collect result */
3523                         md_unregister_thread(mddev->sync_thread);
3524                         mddev->sync_thread = NULL;
3525                         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
3526                             !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
3527                                 /* success...*/
3528                                 /* activate any spares */
3529                                 mddev->pers->spare_active(mddev);
3530                         }
3531                         md_update_sb(mddev);
3532                         mddev->recovery = 0;
3533                         /* flag recovery needed just to double check */
3534                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3535                         goto unlock;
3536                 }
3537                 if (mddev->recovery)
3538                         /* probably just the RECOVERY_NEEDED flag */
3539                         mddev->recovery = 0;
3540
3541                 /* no recovery is running.
3542                  * remove any failed drives, then
3543                  * add spares if possible.
3544                  * Spare are also removed and re-added, to allow
3545                  * the personality to fail the re-add.
3546                  */
3547                 ITERATE_RDEV(mddev,rdev,rtmp)
3548                         if (rdev->raid_disk >= 0 &&
3549                             (rdev->faulty || ! rdev->in_sync) &&
3550                             atomic_read(&rdev->nr_pending)==0) {
3551                                 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0)
3552                                         rdev->raid_disk = -1;
3553                         }
3554
3555                 if (mddev->degraded) {
3556                         ITERATE_RDEV(mddev,rdev,rtmp)
3557                                 if (rdev->raid_disk < 0
3558                                     && !rdev->faulty) {
3559                                         if (mddev->pers->hot_add_disk(mddev,rdev))
3560                                                 spares++;
3561                                         else
3562                                                 break;
3563                                 }
3564                 }
3565
3566                 if (!spares && (mddev->recovery_cp == MaxSector )) {
3567                         /* nothing we can do ... */
3568                         goto unlock;
3569                 }
3570                 if (mddev->pers->sync_request) {
3571                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3572                         if (!spares)
3573                                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3574                         mddev->sync_thread = md_register_thread(md_do_sync,
3575                                                                 mddev,
3576                                                                 "%s_resync");
3577                         if (!mddev->sync_thread) {
3578                                 printk(KERN_ERR "%s: could not start resync"
3579                                         " thread...\n", 
3580                                         mdname(mddev));
3581                                 /* leave the spares where they are, it shouldn't hurt */
3582                                 mddev->recovery = 0;
3583                         } else {
3584                                 md_wakeup_thread(mddev->sync_thread);
3585                         }
3586                 }
3587         unlock:
3588                 mddev_unlock(mddev);
3589         }
3590 }
3591
3592 int md_notify_reboot(struct notifier_block *this,
3593                                         unsigned long code, void *x)
3594 {
3595         struct list_head *tmp;
3596         mddev_t *mddev;
3597
3598         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
3599
3600                 printk(KERN_INFO "md: stopping all md devices.\n");
3601
3602                 ITERATE_MDDEV(mddev,tmp)
3603                         if (mddev_trylock(mddev)==0)
3604                                 do_md_stop (mddev, 1);
3605                 /*
3606                  * certain more exotic SCSI devices are known to be
3607                  * volatile wrt too early system reboots. While the
3608                  * right place to handle this issue is the given
3609                  * driver, we do want to have a safe RAID driver ...
3610                  */
3611                 mdelay(1000*1);
3612         }
3613         return NOTIFY_DONE;
3614 }
3615
3616 struct notifier_block md_notifier = {
3617         .notifier_call  = md_notify_reboot,
3618         .next           = NULL,
3619         .priority       = INT_MAX, /* before any real devices */
3620 };
3621
3622 static void md_geninit(void)
3623 {
3624         struct proc_dir_entry *p;
3625
3626         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
3627
3628         p = create_proc_entry("mdstat", S_IRUGO, NULL);
3629         if (p)
3630                 p->proc_fops = &md_seq_fops;
3631 }
3632
3633 int __init md_init(void)
3634 {
3635         int minor;
3636
3637         printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
3638                         " MD_SB_DISKS=%d\n",
3639                         MD_MAJOR_VERSION, MD_MINOR_VERSION,
3640                         MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
3641
3642         if (register_blkdev(MAJOR_NR, "md"))
3643                 return -1;
3644         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
3645                 unregister_blkdev(MAJOR_NR, "md");
3646                 return -1;
3647         }
3648         devfs_mk_dir("md");
3649         blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
3650                                 md_probe, NULL, NULL);
3651         blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
3652                             md_probe, NULL, NULL);
3653
3654         for (minor=0; minor < MAX_MD_DEVS; ++minor)
3655                 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
3656                                 S_IFBLK|S_IRUSR|S_IWUSR,
3657                                 "md/%d", minor);
3658
3659         for (minor=0; minor < MAX_MD_DEVS; ++minor)
3660                 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
3661                               S_IFBLK|S_IRUSR|S_IWUSR,
3662                               "md/mdp%d", minor);
3663
3664
3665         register_reboot_notifier(&md_notifier);
3666         raid_table_header = register_sysctl_table(raid_root_table, 1);
3667
3668         md_geninit();
3669         return (0);
3670 }
3671
3672
3673 #ifndef MODULE
3674
3675 /*
3676  * Searches all registered partitions for autorun RAID arrays
3677  * at boot time.
3678  */
3679 static dev_t detected_devices[128];
3680 static int dev_cnt;
3681
3682 void md_autodetect_dev(dev_t dev)
3683 {
3684         if (dev_cnt >= 0 && dev_cnt < 127)
3685                 detected_devices[dev_cnt++] = dev;
3686 }
3687
3688
3689 static void autostart_arrays(int part)
3690 {
3691         mdk_rdev_t *rdev;
3692         int i;
3693
3694         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
3695
3696         for (i = 0; i < dev_cnt; i++) {
3697                 dev_t dev = detected_devices[i];
3698
3699                 rdev = md_import_device(dev,0, 0);
3700                 if (IS_ERR(rdev))
3701                         continue;
3702
3703                 if (rdev->faulty) {
3704                         MD_BUG();
3705                         continue;
3706                 }
3707                 list_add(&rdev->same_set, &pending_raid_disks);
3708         }
3709         dev_cnt = 0;
3710
3711         autorun_devices(part);
3712 }
3713
3714 #endif
3715
3716 static __exit void md_exit(void)
3717 {
3718         mddev_t *mddev;
3719         struct list_head *tmp;
3720         int i;
3721         blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
3722         blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
3723         for (i=0; i < MAX_MD_DEVS; i++)
3724                 devfs_remove("md/%d", i);
3725         for (i=0; i < MAX_MD_DEVS; i++)
3726                 devfs_remove("md/d%d", i);
3727
3728         devfs_remove("md");
3729
3730         unregister_blkdev(MAJOR_NR,"md");
3731         unregister_blkdev(mdp_major, "mdp");
3732         unregister_reboot_notifier(&md_notifier);
3733         unregister_sysctl_table(raid_table_header);
3734         remove_proc_entry("mdstat", NULL);
3735         ITERATE_MDDEV(mddev,tmp) {
3736                 struct gendisk *disk = mddev->gendisk;
3737                 if (!disk)
3738                         continue;
3739                 export_array(mddev);
3740                 del_gendisk(disk);
3741                 put_disk(disk);
3742                 mddev->gendisk = NULL;
3743                 mddev_put(mddev);
3744         }
3745 }
3746
3747 module_init(md_init)
3748 module_exit(md_exit)
3749
3750 EXPORT_SYMBOL(register_md_personality);
3751 EXPORT_SYMBOL(unregister_md_personality);
3752 EXPORT_SYMBOL(md_error);
3753 EXPORT_SYMBOL(md_done_sync);
3754 EXPORT_SYMBOL(md_write_start);
3755 EXPORT_SYMBOL(md_write_end);
3756 EXPORT_SYMBOL(md_handle_safemode);
3757 EXPORT_SYMBOL(md_register_thread);
3758 EXPORT_SYMBOL(md_unregister_thread);
3759 EXPORT_SYMBOL(md_wakeup_thread);
3760 EXPORT_SYMBOL(md_print_devices);
3761 EXPORT_SYMBOL(md_check_recovery);
3762 MODULE_LICENSE("GPL");