Merge tag 'metag-fixes-v4.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/jhoga...
[sfrench/cifs-2.6.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3      Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/fs.h>
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/string.h>
43 #include <linux/hdreg.h>
44 #include <linux/proc_fs.h>
45 #include <linux/random.h>
46 #include <linux/module.h>
47 #include <linux/reboot.h>
48 #include <linux/file.h>
49 #include <linux/compat.h>
50 #include <linux/delay.h>
51 #include <linux/raid/md_p.h>
52 #include <linux/raid/md_u.h>
53 #include <linux/slab.h>
54 #include "md.h"
55 #include "bitmap.h"
56
57 #ifndef MODULE
58 static void autostart_arrays(int part);
59 #endif
60
61 /* pers_list is a list of registered personalities protected
62  * by pers_lock.
63  * pers_lock does extra service to protect accesses to
64  * mddev->thread when the mutex cannot be held.
65  */
66 static LIST_HEAD(pers_list);
67 static DEFINE_SPINLOCK(pers_lock);
68
69 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
70 static struct workqueue_struct *md_wq;
71 static struct workqueue_struct *md_misc_wq;
72
73 static int remove_and_add_spares(struct mddev *mddev,
74                                  struct md_rdev *this);
75 static void mddev_detach(struct mddev *mddev);
76
77 /*
78  * Default number of read corrections we'll attempt on an rdev
79  * before ejecting it from the array. We divide the read error
80  * count by 2 for every hour elapsed between read errors.
81  */
82 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
83 /*
84  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
85  * is 1000 KB/sec, so the extra system load does not show up that much.
86  * Increase it if you want to have more _guaranteed_ speed. Note that
87  * the RAID driver will use the maximum available bandwidth if the IO
88  * subsystem is idle. There is also an 'absolute maximum' reconstruction
89  * speed limit - in case reconstruction slows down your system despite
90  * idle IO detection.
91  *
92  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
93  * or /sys/block/mdX/md/sync_speed_{min,max}
94  */
95
96 static int sysctl_speed_limit_min = 1000;
97 static int sysctl_speed_limit_max = 200000;
98 static inline int speed_min(struct mddev *mddev)
99 {
100         return mddev->sync_speed_min ?
101                 mddev->sync_speed_min : sysctl_speed_limit_min;
102 }
103
104 static inline int speed_max(struct mddev *mddev)
105 {
106         return mddev->sync_speed_max ?
107                 mddev->sync_speed_max : sysctl_speed_limit_max;
108 }
109
110 static struct ctl_table_header *raid_table_header;
111
112 static struct ctl_table raid_table[] = {
113         {
114                 .procname       = "speed_limit_min",
115                 .data           = &sysctl_speed_limit_min,
116                 .maxlen         = sizeof(int),
117                 .mode           = S_IRUGO|S_IWUSR,
118                 .proc_handler   = proc_dointvec,
119         },
120         {
121                 .procname       = "speed_limit_max",
122                 .data           = &sysctl_speed_limit_max,
123                 .maxlen         = sizeof(int),
124                 .mode           = S_IRUGO|S_IWUSR,
125                 .proc_handler   = proc_dointvec,
126         },
127         { }
128 };
129
130 static struct ctl_table raid_dir_table[] = {
131         {
132                 .procname       = "raid",
133                 .maxlen         = 0,
134                 .mode           = S_IRUGO|S_IXUGO,
135                 .child          = raid_table,
136         },
137         { }
138 };
139
140 static struct ctl_table raid_root_table[] = {
141         {
142                 .procname       = "dev",
143                 .maxlen         = 0,
144                 .mode           = 0555,
145                 .child          = raid_dir_table,
146         },
147         {  }
148 };
149
150 static const struct block_device_operations md_fops;
151
152 static int start_readonly;
153
154 /* bio_clone_mddev
155  * like bio_clone, but with a local bio set
156  */
157
158 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
159                             struct mddev *mddev)
160 {
161         struct bio *b;
162
163         if (!mddev || !mddev->bio_set)
164                 return bio_alloc(gfp_mask, nr_iovecs);
165
166         b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
167         if (!b)
168                 return NULL;
169         return b;
170 }
171 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
172
173 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
174                             struct mddev *mddev)
175 {
176         if (!mddev || !mddev->bio_set)
177                 return bio_clone(bio, gfp_mask);
178
179         return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
180 }
181 EXPORT_SYMBOL_GPL(bio_clone_mddev);
182
183 /*
184  * We have a system wide 'event count' that is incremented
185  * on any 'interesting' event, and readers of /proc/mdstat
186  * can use 'poll' or 'select' to find out when the event
187  * count increases.
188  *
189  * Events are:
190  *  start array, stop array, error, add device, remove device,
191  *  start build, activate spare
192  */
193 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
194 static atomic_t md_event_count;
195 void md_new_event(struct mddev *mddev)
196 {
197         atomic_inc(&md_event_count);
198         wake_up(&md_event_waiters);
199 }
200 EXPORT_SYMBOL_GPL(md_new_event);
201
202 /* Alternate version that can be called from interrupts
203  * when calling sysfs_notify isn't needed.
204  */
205 static void md_new_event_inintr(struct mddev *mddev)
206 {
207         atomic_inc(&md_event_count);
208         wake_up(&md_event_waiters);
209 }
210
211 /*
212  * Enables to iterate over all existing md arrays
213  * all_mddevs_lock protects this list.
214  */
215 static LIST_HEAD(all_mddevs);
216 static DEFINE_SPINLOCK(all_mddevs_lock);
217
218 /*
219  * iterates through all used mddevs in the system.
220  * We take care to grab the all_mddevs_lock whenever navigating
221  * the list, and to always hold a refcount when unlocked.
222  * Any code which breaks out of this loop while own
223  * a reference to the current mddev and must mddev_put it.
224  */
225 #define for_each_mddev(_mddev,_tmp)                                     \
226                                                                         \
227         for (({ spin_lock(&all_mddevs_lock);                            \
228                 _tmp = all_mddevs.next;                                 \
229                 _mddev = NULL;});                                       \
230              ({ if (_tmp != &all_mddevs)                                \
231                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
232                 spin_unlock(&all_mddevs_lock);                          \
233                 if (_mddev) mddev_put(_mddev);                          \
234                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
235                 _tmp != &all_mddevs;});                                 \
236              ({ spin_lock(&all_mddevs_lock);                            \
237                 _tmp = _tmp->next;})                                    \
238                 )
239
240 /* Rather than calling directly into the personality make_request function,
241  * IO requests come here first so that we can check if the device is
242  * being suspended pending a reconfiguration.
243  * We hold a refcount over the call to ->make_request.  By the time that
244  * call has finished, the bio has been linked into some internal structure
245  * and so is visible to ->quiesce(), so we don't need the refcount any more.
246  */
247 static void md_make_request(struct request_queue *q, struct bio *bio)
248 {
249         const int rw = bio_data_dir(bio);
250         struct mddev *mddev = q->queuedata;
251         unsigned int sectors;
252
253         if (mddev == NULL || mddev->pers == NULL
254             || !mddev->ready) {
255                 bio_io_error(bio);
256                 return;
257         }
258         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
259                 bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
260                 return;
261         }
262         smp_rmb(); /* Ensure implications of  'active' are visible */
263         rcu_read_lock();
264         if (mddev->suspended) {
265                 DEFINE_WAIT(__wait);
266                 for (;;) {
267                         prepare_to_wait(&mddev->sb_wait, &__wait,
268                                         TASK_UNINTERRUPTIBLE);
269                         if (!mddev->suspended)
270                                 break;
271                         rcu_read_unlock();
272                         schedule();
273                         rcu_read_lock();
274                 }
275                 finish_wait(&mddev->sb_wait, &__wait);
276         }
277         atomic_inc(&mddev->active_io);
278         rcu_read_unlock();
279
280         /*
281          * save the sectors now since our bio can
282          * go away inside make_request
283          */
284         sectors = bio_sectors(bio);
285         mddev->pers->make_request(mddev, bio);
286
287         generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
288
289         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
290                 wake_up(&mddev->sb_wait);
291 }
292
293 /* mddev_suspend makes sure no new requests are submitted
294  * to the device, and that any requests that have been submitted
295  * are completely handled.
296  * Once mddev_detach() is called and completes, the module will be
297  * completely unused.
298  */
299 void mddev_suspend(struct mddev *mddev)
300 {
301         BUG_ON(mddev->suspended);
302         mddev->suspended = 1;
303         synchronize_rcu();
304         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
305         mddev->pers->quiesce(mddev, 1);
306
307         del_timer_sync(&mddev->safemode_timer);
308 }
309 EXPORT_SYMBOL_GPL(mddev_suspend);
310
311 void mddev_resume(struct mddev *mddev)
312 {
313         mddev->suspended = 0;
314         wake_up(&mddev->sb_wait);
315         mddev->pers->quiesce(mddev, 0);
316
317         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
318         md_wakeup_thread(mddev->thread);
319         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
320 }
321 EXPORT_SYMBOL_GPL(mddev_resume);
322
323 int mddev_congested(struct mddev *mddev, int bits)
324 {
325         struct md_personality *pers = mddev->pers;
326         int ret = 0;
327
328         rcu_read_lock();
329         if (mddev->suspended)
330                 ret = 1;
331         else if (pers && pers->congested)
332                 ret = pers->congested(mddev, bits);
333         rcu_read_unlock();
334         return ret;
335 }
336 EXPORT_SYMBOL_GPL(mddev_congested);
337 static int md_congested(void *data, int bits)
338 {
339         struct mddev *mddev = data;
340         return mddev_congested(mddev, bits);
341 }
342
343 static int md_mergeable_bvec(struct request_queue *q,
344                              struct bvec_merge_data *bvm,
345                              struct bio_vec *biovec)
346 {
347         struct mddev *mddev = q->queuedata;
348         int ret;
349         rcu_read_lock();
350         if (mddev->suspended) {
351                 /* Must always allow one vec */
352                 if (bvm->bi_size == 0)
353                         ret = biovec->bv_len;
354                 else
355                         ret = 0;
356         } else {
357                 struct md_personality *pers = mddev->pers;
358                 if (pers && pers->mergeable_bvec)
359                         ret = pers->mergeable_bvec(mddev, bvm, biovec);
360                 else
361                         ret = biovec->bv_len;
362         }
363         rcu_read_unlock();
364         return ret;
365 }
366 /*
367  * Generic flush handling for md
368  */
369
370 static void md_end_flush(struct bio *bio, int err)
371 {
372         struct md_rdev *rdev = bio->bi_private;
373         struct mddev *mddev = rdev->mddev;
374
375         rdev_dec_pending(rdev, mddev);
376
377         if (atomic_dec_and_test(&mddev->flush_pending)) {
378                 /* The pre-request flush has finished */
379                 queue_work(md_wq, &mddev->flush_work);
380         }
381         bio_put(bio);
382 }
383
384 static void md_submit_flush_data(struct work_struct *ws);
385
386 static void submit_flushes(struct work_struct *ws)
387 {
388         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
389         struct md_rdev *rdev;
390
391         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
392         atomic_set(&mddev->flush_pending, 1);
393         rcu_read_lock();
394         rdev_for_each_rcu(rdev, mddev)
395                 if (rdev->raid_disk >= 0 &&
396                     !test_bit(Faulty, &rdev->flags)) {
397                         /* Take two references, one is dropped
398                          * when request finishes, one after
399                          * we reclaim rcu_read_lock
400                          */
401                         struct bio *bi;
402                         atomic_inc(&rdev->nr_pending);
403                         atomic_inc(&rdev->nr_pending);
404                         rcu_read_unlock();
405                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
406                         bi->bi_end_io = md_end_flush;
407                         bi->bi_private = rdev;
408                         bi->bi_bdev = rdev->bdev;
409                         atomic_inc(&mddev->flush_pending);
410                         submit_bio(WRITE_FLUSH, bi);
411                         rcu_read_lock();
412                         rdev_dec_pending(rdev, mddev);
413                 }
414         rcu_read_unlock();
415         if (atomic_dec_and_test(&mddev->flush_pending))
416                 queue_work(md_wq, &mddev->flush_work);
417 }
418
419 static void md_submit_flush_data(struct work_struct *ws)
420 {
421         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
422         struct bio *bio = mddev->flush_bio;
423
424         if (bio->bi_iter.bi_size == 0)
425                 /* an empty barrier - all done */
426                 bio_endio(bio, 0);
427         else {
428                 bio->bi_rw &= ~REQ_FLUSH;
429                 mddev->pers->make_request(mddev, bio);
430         }
431
432         mddev->flush_bio = NULL;
433         wake_up(&mddev->sb_wait);
434 }
435
436 void md_flush_request(struct mddev *mddev, struct bio *bio)
437 {
438         spin_lock_irq(&mddev->lock);
439         wait_event_lock_irq(mddev->sb_wait,
440                             !mddev->flush_bio,
441                             mddev->lock);
442         mddev->flush_bio = bio;
443         spin_unlock_irq(&mddev->lock);
444
445         INIT_WORK(&mddev->flush_work, submit_flushes);
446         queue_work(md_wq, &mddev->flush_work);
447 }
448 EXPORT_SYMBOL(md_flush_request);
449
450 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
451 {
452         struct mddev *mddev = cb->data;
453         md_wakeup_thread(mddev->thread);
454         kfree(cb);
455 }
456 EXPORT_SYMBOL(md_unplug);
457
458 static inline struct mddev *mddev_get(struct mddev *mddev)
459 {
460         atomic_inc(&mddev->active);
461         return mddev;
462 }
463
464 static void mddev_delayed_delete(struct work_struct *ws);
465
466 static void mddev_put(struct mddev *mddev)
467 {
468         struct bio_set *bs = NULL;
469
470         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
471                 return;
472         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
473             mddev->ctime == 0 && !mddev->hold_active) {
474                 /* Array is not configured at all, and not held active,
475                  * so destroy it */
476                 list_del_init(&mddev->all_mddevs);
477                 bs = mddev->bio_set;
478                 mddev->bio_set = NULL;
479                 if (mddev->gendisk) {
480                         /* We did a probe so need to clean up.  Call
481                          * queue_work inside the spinlock so that
482                          * flush_workqueue() after mddev_find will
483                          * succeed in waiting for the work to be done.
484                          */
485                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
486                         queue_work(md_misc_wq, &mddev->del_work);
487                 } else
488                         kfree(mddev);
489         }
490         spin_unlock(&all_mddevs_lock);
491         if (bs)
492                 bioset_free(bs);
493 }
494
495 void mddev_init(struct mddev *mddev)
496 {
497         mutex_init(&mddev->open_mutex);
498         mutex_init(&mddev->reconfig_mutex);
499         mutex_init(&mddev->bitmap_info.mutex);
500         INIT_LIST_HEAD(&mddev->disks);
501         INIT_LIST_HEAD(&mddev->all_mddevs);
502         init_timer(&mddev->safemode_timer);
503         atomic_set(&mddev->active, 1);
504         atomic_set(&mddev->openers, 0);
505         atomic_set(&mddev->active_io, 0);
506         spin_lock_init(&mddev->lock);
507         atomic_set(&mddev->flush_pending, 0);
508         init_waitqueue_head(&mddev->sb_wait);
509         init_waitqueue_head(&mddev->recovery_wait);
510         mddev->reshape_position = MaxSector;
511         mddev->reshape_backwards = 0;
512         mddev->last_sync_action = "none";
513         mddev->resync_min = 0;
514         mddev->resync_max = MaxSector;
515         mddev->level = LEVEL_NONE;
516 }
517 EXPORT_SYMBOL_GPL(mddev_init);
518
519 static struct mddev *mddev_find(dev_t unit)
520 {
521         struct mddev *mddev, *new = NULL;
522
523         if (unit && MAJOR(unit) != MD_MAJOR)
524                 unit &= ~((1<<MdpMinorShift)-1);
525
526  retry:
527         spin_lock(&all_mddevs_lock);
528
529         if (unit) {
530                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
531                         if (mddev->unit == unit) {
532                                 mddev_get(mddev);
533                                 spin_unlock(&all_mddevs_lock);
534                                 kfree(new);
535                                 return mddev;
536                         }
537
538                 if (new) {
539                         list_add(&new->all_mddevs, &all_mddevs);
540                         spin_unlock(&all_mddevs_lock);
541                         new->hold_active = UNTIL_IOCTL;
542                         return new;
543                 }
544         } else if (new) {
545                 /* find an unused unit number */
546                 static int next_minor = 512;
547                 int start = next_minor;
548                 int is_free = 0;
549                 int dev = 0;
550                 while (!is_free) {
551                         dev = MKDEV(MD_MAJOR, next_minor);
552                         next_minor++;
553                         if (next_minor > MINORMASK)
554                                 next_minor = 0;
555                         if (next_minor == start) {
556                                 /* Oh dear, all in use. */
557                                 spin_unlock(&all_mddevs_lock);
558                                 kfree(new);
559                                 return NULL;
560                         }
561
562                         is_free = 1;
563                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
564                                 if (mddev->unit == dev) {
565                                         is_free = 0;
566                                         break;
567                                 }
568                 }
569                 new->unit = dev;
570                 new->md_minor = MINOR(dev);
571                 new->hold_active = UNTIL_STOP;
572                 list_add(&new->all_mddevs, &all_mddevs);
573                 spin_unlock(&all_mddevs_lock);
574                 return new;
575         }
576         spin_unlock(&all_mddevs_lock);
577
578         new = kzalloc(sizeof(*new), GFP_KERNEL);
579         if (!new)
580                 return NULL;
581
582         new->unit = unit;
583         if (MAJOR(unit) == MD_MAJOR)
584                 new->md_minor = MINOR(unit);
585         else
586                 new->md_minor = MINOR(unit) >> MdpMinorShift;
587
588         mddev_init(new);
589
590         goto retry;
591 }
592
593 static struct attribute_group md_redundancy_group;
594
595 void mddev_unlock(struct mddev *mddev)
596 {
597         if (mddev->to_remove) {
598                 /* These cannot be removed under reconfig_mutex as
599                  * an access to the files will try to take reconfig_mutex
600                  * while holding the file unremovable, which leads to
601                  * a deadlock.
602                  * So hold set sysfs_active while the remove in happeing,
603                  * and anything else which might set ->to_remove or my
604                  * otherwise change the sysfs namespace will fail with
605                  * -EBUSY if sysfs_active is still set.
606                  * We set sysfs_active under reconfig_mutex and elsewhere
607                  * test it under the same mutex to ensure its correct value
608                  * is seen.
609                  */
610                 struct attribute_group *to_remove = mddev->to_remove;
611                 mddev->to_remove = NULL;
612                 mddev->sysfs_active = 1;
613                 mutex_unlock(&mddev->reconfig_mutex);
614
615                 if (mddev->kobj.sd) {
616                         if (to_remove != &md_redundancy_group)
617                                 sysfs_remove_group(&mddev->kobj, to_remove);
618                         if (mddev->pers == NULL ||
619                             mddev->pers->sync_request == NULL) {
620                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
621                                 if (mddev->sysfs_action)
622                                         sysfs_put(mddev->sysfs_action);
623                                 mddev->sysfs_action = NULL;
624                         }
625                 }
626                 mddev->sysfs_active = 0;
627         } else
628                 mutex_unlock(&mddev->reconfig_mutex);
629
630         /* As we've dropped the mutex we need a spinlock to
631          * make sure the thread doesn't disappear
632          */
633         spin_lock(&pers_lock);
634         md_wakeup_thread(mddev->thread);
635         spin_unlock(&pers_lock);
636 }
637 EXPORT_SYMBOL_GPL(mddev_unlock);
638
639 static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
640 {
641         struct md_rdev *rdev;
642
643         rdev_for_each_rcu(rdev, mddev)
644                 if (rdev->desc_nr == nr)
645                         return rdev;
646
647         return NULL;
648 }
649
650 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
651 {
652         struct md_rdev *rdev;
653
654         rdev_for_each(rdev, mddev)
655                 if (rdev->bdev->bd_dev == dev)
656                         return rdev;
657
658         return NULL;
659 }
660
661 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
662 {
663         struct md_rdev *rdev;
664
665         rdev_for_each_rcu(rdev, mddev)
666                 if (rdev->bdev->bd_dev == dev)
667                         return rdev;
668
669         return NULL;
670 }
671
672 static struct md_personality *find_pers(int level, char *clevel)
673 {
674         struct md_personality *pers;
675         list_for_each_entry(pers, &pers_list, list) {
676                 if (level != LEVEL_NONE && pers->level == level)
677                         return pers;
678                 if (strcmp(pers->name, clevel)==0)
679                         return pers;
680         }
681         return NULL;
682 }
683
684 /* return the offset of the super block in 512byte sectors */
685 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
686 {
687         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
688         return MD_NEW_SIZE_SECTORS(num_sectors);
689 }
690
691 static int alloc_disk_sb(struct md_rdev *rdev)
692 {
693         rdev->sb_page = alloc_page(GFP_KERNEL);
694         if (!rdev->sb_page) {
695                 printk(KERN_ALERT "md: out of memory.\n");
696                 return -ENOMEM;
697         }
698
699         return 0;
700 }
701
702 void md_rdev_clear(struct md_rdev *rdev)
703 {
704         if (rdev->sb_page) {
705                 put_page(rdev->sb_page);
706                 rdev->sb_loaded = 0;
707                 rdev->sb_page = NULL;
708                 rdev->sb_start = 0;
709                 rdev->sectors = 0;
710         }
711         if (rdev->bb_page) {
712                 put_page(rdev->bb_page);
713                 rdev->bb_page = NULL;
714         }
715         kfree(rdev->badblocks.page);
716         rdev->badblocks.page = NULL;
717 }
718 EXPORT_SYMBOL_GPL(md_rdev_clear);
719
720 static void super_written(struct bio *bio, int error)
721 {
722         struct md_rdev *rdev = bio->bi_private;
723         struct mddev *mddev = rdev->mddev;
724
725         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
726                 printk("md: super_written gets error=%d, uptodate=%d\n",
727                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
728                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
729                 md_error(mddev, rdev);
730         }
731
732         if (atomic_dec_and_test(&mddev->pending_writes))
733                 wake_up(&mddev->sb_wait);
734         bio_put(bio);
735 }
736
737 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
738                    sector_t sector, int size, struct page *page)
739 {
740         /* write first size bytes of page to sector of rdev
741          * Increment mddev->pending_writes before returning
742          * and decrement it on completion, waking up sb_wait
743          * if zero is reached.
744          * If an error occurred, call md_error
745          */
746         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
747
748         bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
749         bio->bi_iter.bi_sector = sector;
750         bio_add_page(bio, page, size, 0);
751         bio->bi_private = rdev;
752         bio->bi_end_io = super_written;
753
754         atomic_inc(&mddev->pending_writes);
755         submit_bio(WRITE_FLUSH_FUA, bio);
756 }
757
758 void md_super_wait(struct mddev *mddev)
759 {
760         /* wait for all superblock writes that were scheduled to complete */
761         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
762 }
763
764 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
765                  struct page *page, int rw, bool metadata_op)
766 {
767         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
768         int ret;
769
770         bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
771                 rdev->meta_bdev : rdev->bdev;
772         if (metadata_op)
773                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
774         else if (rdev->mddev->reshape_position != MaxSector &&
775                  (rdev->mddev->reshape_backwards ==
776                   (sector >= rdev->mddev->reshape_position)))
777                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
778         else
779                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
780         bio_add_page(bio, page, size, 0);
781         submit_bio_wait(rw, bio);
782
783         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
784         bio_put(bio);
785         return ret;
786 }
787 EXPORT_SYMBOL_GPL(sync_page_io);
788
789 static int read_disk_sb(struct md_rdev *rdev, int size)
790 {
791         char b[BDEVNAME_SIZE];
792
793         if (rdev->sb_loaded)
794                 return 0;
795
796         if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
797                 goto fail;
798         rdev->sb_loaded = 1;
799         return 0;
800
801 fail:
802         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
803                 bdevname(rdev->bdev,b));
804         return -EINVAL;
805 }
806
807 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
808 {
809         return  sb1->set_uuid0 == sb2->set_uuid0 &&
810                 sb1->set_uuid1 == sb2->set_uuid1 &&
811                 sb1->set_uuid2 == sb2->set_uuid2 &&
812                 sb1->set_uuid3 == sb2->set_uuid3;
813 }
814
815 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
816 {
817         int ret;
818         mdp_super_t *tmp1, *tmp2;
819
820         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
821         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
822
823         if (!tmp1 || !tmp2) {
824                 ret = 0;
825                 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
826                 goto abort;
827         }
828
829         *tmp1 = *sb1;
830         *tmp2 = *sb2;
831
832         /*
833          * nr_disks is not constant
834          */
835         tmp1->nr_disks = 0;
836         tmp2->nr_disks = 0;
837
838         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
839 abort:
840         kfree(tmp1);
841         kfree(tmp2);
842         return ret;
843 }
844
845 static u32 md_csum_fold(u32 csum)
846 {
847         csum = (csum & 0xffff) + (csum >> 16);
848         return (csum & 0xffff) + (csum >> 16);
849 }
850
851 static unsigned int calc_sb_csum(mdp_super_t *sb)
852 {
853         u64 newcsum = 0;
854         u32 *sb32 = (u32*)sb;
855         int i;
856         unsigned int disk_csum, csum;
857
858         disk_csum = sb->sb_csum;
859         sb->sb_csum = 0;
860
861         for (i = 0; i < MD_SB_BYTES/4 ; i++)
862                 newcsum += sb32[i];
863         csum = (newcsum & 0xffffffff) + (newcsum>>32);
864
865 #ifdef CONFIG_ALPHA
866         /* This used to use csum_partial, which was wrong for several
867          * reasons including that different results are returned on
868          * different architectures.  It isn't critical that we get exactly
869          * the same return value as before (we always csum_fold before
870          * testing, and that removes any differences).  However as we
871          * know that csum_partial always returned a 16bit value on
872          * alphas, do a fold to maximise conformity to previous behaviour.
873          */
874         sb->sb_csum = md_csum_fold(disk_csum);
875 #else
876         sb->sb_csum = disk_csum;
877 #endif
878         return csum;
879 }
880
881 /*
882  * Handle superblock details.
883  * We want to be able to handle multiple superblock formats
884  * so we have a common interface to them all, and an array of
885  * different handlers.
886  * We rely on user-space to write the initial superblock, and support
887  * reading and updating of superblocks.
888  * Interface methods are:
889  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
890  *      loads and validates a superblock on dev.
891  *      if refdev != NULL, compare superblocks on both devices
892  *    Return:
893  *      0 - dev has a superblock that is compatible with refdev
894  *      1 - dev has a superblock that is compatible and newer than refdev
895  *          so dev should be used as the refdev in future
896  *     -EINVAL superblock incompatible or invalid
897  *     -othererror e.g. -EIO
898  *
899  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
900  *      Verify that dev is acceptable into mddev.
901  *       The first time, mddev->raid_disks will be 0, and data from
902  *       dev should be merged in.  Subsequent calls check that dev
903  *       is new enough.  Return 0 or -EINVAL
904  *
905  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
906  *     Update the superblock for rdev with data in mddev
907  *     This does not write to disc.
908  *
909  */
910
911 struct super_type  {
912         char                *name;
913         struct module       *owner;
914         int                 (*load_super)(struct md_rdev *rdev,
915                                           struct md_rdev *refdev,
916                                           int minor_version);
917         int                 (*validate_super)(struct mddev *mddev,
918                                               struct md_rdev *rdev);
919         void                (*sync_super)(struct mddev *mddev,
920                                           struct md_rdev *rdev);
921         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
922                                                 sector_t num_sectors);
923         int                 (*allow_new_offset)(struct md_rdev *rdev,
924                                                 unsigned long long new_offset);
925 };
926
927 /*
928  * Check that the given mddev has no bitmap.
929  *
930  * This function is called from the run method of all personalities that do not
931  * support bitmaps. It prints an error message and returns non-zero if mddev
932  * has a bitmap. Otherwise, it returns 0.
933  *
934  */
935 int md_check_no_bitmap(struct mddev *mddev)
936 {
937         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
938                 return 0;
939         printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
940                 mdname(mddev), mddev->pers->name);
941         return 1;
942 }
943 EXPORT_SYMBOL(md_check_no_bitmap);
944
945 /*
946  * load_super for 0.90.0
947  */
948 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
949 {
950         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
951         mdp_super_t *sb;
952         int ret;
953
954         /*
955          * Calculate the position of the superblock (512byte sectors),
956          * it's at the end of the disk.
957          *
958          * It also happens to be a multiple of 4Kb.
959          */
960         rdev->sb_start = calc_dev_sboffset(rdev);
961
962         ret = read_disk_sb(rdev, MD_SB_BYTES);
963         if (ret) return ret;
964
965         ret = -EINVAL;
966
967         bdevname(rdev->bdev, b);
968         sb = page_address(rdev->sb_page);
969
970         if (sb->md_magic != MD_SB_MAGIC) {
971                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
972                        b);
973                 goto abort;
974         }
975
976         if (sb->major_version != 0 ||
977             sb->minor_version < 90 ||
978             sb->minor_version > 91) {
979                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
980                         sb->major_version, sb->minor_version,
981                         b);
982                 goto abort;
983         }
984
985         if (sb->raid_disks <= 0)
986                 goto abort;
987
988         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
989                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
990                         b);
991                 goto abort;
992         }
993
994         rdev->preferred_minor = sb->md_minor;
995         rdev->data_offset = 0;
996         rdev->new_data_offset = 0;
997         rdev->sb_size = MD_SB_BYTES;
998         rdev->badblocks.shift = -1;
999
1000         if (sb->level == LEVEL_MULTIPATH)
1001                 rdev->desc_nr = -1;
1002         else
1003                 rdev->desc_nr = sb->this_disk.number;
1004
1005         if (!refdev) {
1006                 ret = 1;
1007         } else {
1008                 __u64 ev1, ev2;
1009                 mdp_super_t *refsb = page_address(refdev->sb_page);
1010                 if (!uuid_equal(refsb, sb)) {
1011                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
1012                                 b, bdevname(refdev->bdev,b2));
1013                         goto abort;
1014                 }
1015                 if (!sb_equal(refsb, sb)) {
1016                         printk(KERN_WARNING "md: %s has same UUID"
1017                                " but different superblock to %s\n",
1018                                b, bdevname(refdev->bdev, b2));
1019                         goto abort;
1020                 }
1021                 ev1 = md_event(sb);
1022                 ev2 = md_event(refsb);
1023                 if (ev1 > ev2)
1024                         ret = 1;
1025                 else
1026                         ret = 0;
1027         }
1028         rdev->sectors = rdev->sb_start;
1029         /* Limit to 4TB as metadata cannot record more than that.
1030          * (not needed for Linear and RAID0 as metadata doesn't
1031          * record this size)
1032          */
1033         if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1034                 rdev->sectors = (2ULL << 32) - 2;
1035
1036         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1037                 /* "this cannot possibly happen" ... */
1038                 ret = -EINVAL;
1039
1040  abort:
1041         return ret;
1042 }
1043
1044 /*
1045  * validate_super for 0.90.0
1046  */
1047 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1048 {
1049         mdp_disk_t *desc;
1050         mdp_super_t *sb = page_address(rdev->sb_page);
1051         __u64 ev1 = md_event(sb);
1052
1053         rdev->raid_disk = -1;
1054         clear_bit(Faulty, &rdev->flags);
1055         clear_bit(In_sync, &rdev->flags);
1056         clear_bit(Bitmap_sync, &rdev->flags);
1057         clear_bit(WriteMostly, &rdev->flags);
1058
1059         if (mddev->raid_disks == 0) {
1060                 mddev->major_version = 0;
1061                 mddev->minor_version = sb->minor_version;
1062                 mddev->patch_version = sb->patch_version;
1063                 mddev->external = 0;
1064                 mddev->chunk_sectors = sb->chunk_size >> 9;
1065                 mddev->ctime = sb->ctime;
1066                 mddev->utime = sb->utime;
1067                 mddev->level = sb->level;
1068                 mddev->clevel[0] = 0;
1069                 mddev->layout = sb->layout;
1070                 mddev->raid_disks = sb->raid_disks;
1071                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1072                 mddev->events = ev1;
1073                 mddev->bitmap_info.offset = 0;
1074                 mddev->bitmap_info.space = 0;
1075                 /* bitmap can use 60 K after the 4K superblocks */
1076                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1077                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1078                 mddev->reshape_backwards = 0;
1079
1080                 if (mddev->minor_version >= 91) {
1081                         mddev->reshape_position = sb->reshape_position;
1082                         mddev->delta_disks = sb->delta_disks;
1083                         mddev->new_level = sb->new_level;
1084                         mddev->new_layout = sb->new_layout;
1085                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1086                         if (mddev->delta_disks < 0)
1087                                 mddev->reshape_backwards = 1;
1088                 } else {
1089                         mddev->reshape_position = MaxSector;
1090                         mddev->delta_disks = 0;
1091                         mddev->new_level = mddev->level;
1092                         mddev->new_layout = mddev->layout;
1093                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1094                 }
1095
1096                 if (sb->state & (1<<MD_SB_CLEAN))
1097                         mddev->recovery_cp = MaxSector;
1098                 else {
1099                         if (sb->events_hi == sb->cp_events_hi &&
1100                                 sb->events_lo == sb->cp_events_lo) {
1101                                 mddev->recovery_cp = sb->recovery_cp;
1102                         } else
1103                                 mddev->recovery_cp = 0;
1104                 }
1105
1106                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1107                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1108                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1109                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1110
1111                 mddev->max_disks = MD_SB_DISKS;
1112
1113                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1114                     mddev->bitmap_info.file == NULL) {
1115                         mddev->bitmap_info.offset =
1116                                 mddev->bitmap_info.default_offset;
1117                         mddev->bitmap_info.space =
1118                                 mddev->bitmap_info.default_space;
1119                 }
1120
1121         } else if (mddev->pers == NULL) {
1122                 /* Insist on good event counter while assembling, except
1123                  * for spares (which don't need an event count) */
1124                 ++ev1;
1125                 if (sb->disks[rdev->desc_nr].state & (
1126                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1127                         if (ev1 < mddev->events)
1128                                 return -EINVAL;
1129         } else if (mddev->bitmap) {
1130                 /* if adding to array with a bitmap, then we can accept an
1131                  * older device ... but not too old.
1132                  */
1133                 if (ev1 < mddev->bitmap->events_cleared)
1134                         return 0;
1135                 if (ev1 < mddev->events)
1136                         set_bit(Bitmap_sync, &rdev->flags);
1137         } else {
1138                 if (ev1 < mddev->events)
1139                         /* just a hot-add of a new device, leave raid_disk at -1 */
1140                         return 0;
1141         }
1142
1143         if (mddev->level != LEVEL_MULTIPATH) {
1144                 desc = sb->disks + rdev->desc_nr;
1145
1146                 if (desc->state & (1<<MD_DISK_FAULTY))
1147                         set_bit(Faulty, &rdev->flags);
1148                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1149                             desc->raid_disk < mddev->raid_disks */) {
1150                         set_bit(In_sync, &rdev->flags);
1151                         rdev->raid_disk = desc->raid_disk;
1152                         rdev->saved_raid_disk = desc->raid_disk;
1153                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1154                         /* active but not in sync implies recovery up to
1155                          * reshape position.  We don't know exactly where
1156                          * that is, so set to zero for now */
1157                         if (mddev->minor_version >= 91) {
1158                                 rdev->recovery_offset = 0;
1159                                 rdev->raid_disk = desc->raid_disk;
1160                         }
1161                 }
1162                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1163                         set_bit(WriteMostly, &rdev->flags);
1164         } else /* MULTIPATH are always insync */
1165                 set_bit(In_sync, &rdev->flags);
1166         return 0;
1167 }
1168
1169 /*
1170  * sync_super for 0.90.0
1171  */
1172 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1173 {
1174         mdp_super_t *sb;
1175         struct md_rdev *rdev2;
1176         int next_spare = mddev->raid_disks;
1177
1178         /* make rdev->sb match mddev data..
1179          *
1180          * 1/ zero out disks
1181          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1182          * 3/ any empty disks < next_spare become removed
1183          *
1184          * disks[0] gets initialised to REMOVED because
1185          * we cannot be sure from other fields if it has
1186          * been initialised or not.
1187          */
1188         int i;
1189         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1190
1191         rdev->sb_size = MD_SB_BYTES;
1192
1193         sb = page_address(rdev->sb_page);
1194
1195         memset(sb, 0, sizeof(*sb));
1196
1197         sb->md_magic = MD_SB_MAGIC;
1198         sb->major_version = mddev->major_version;
1199         sb->patch_version = mddev->patch_version;
1200         sb->gvalid_words  = 0; /* ignored */
1201         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1202         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1203         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1204         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1205
1206         sb->ctime = mddev->ctime;
1207         sb->level = mddev->level;
1208         sb->size = mddev->dev_sectors / 2;
1209         sb->raid_disks = mddev->raid_disks;
1210         sb->md_minor = mddev->md_minor;
1211         sb->not_persistent = 0;
1212         sb->utime = mddev->utime;
1213         sb->state = 0;
1214         sb->events_hi = (mddev->events>>32);
1215         sb->events_lo = (u32)mddev->events;
1216
1217         if (mddev->reshape_position == MaxSector)
1218                 sb->minor_version = 90;
1219         else {
1220                 sb->minor_version = 91;
1221                 sb->reshape_position = mddev->reshape_position;
1222                 sb->new_level = mddev->new_level;
1223                 sb->delta_disks = mddev->delta_disks;
1224                 sb->new_layout = mddev->new_layout;
1225                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1226         }
1227         mddev->minor_version = sb->minor_version;
1228         if (mddev->in_sync)
1229         {
1230                 sb->recovery_cp = mddev->recovery_cp;
1231                 sb->cp_events_hi = (mddev->events>>32);
1232                 sb->cp_events_lo = (u32)mddev->events;
1233                 if (mddev->recovery_cp == MaxSector)
1234                         sb->state = (1<< MD_SB_CLEAN);
1235         } else
1236                 sb->recovery_cp = 0;
1237
1238         sb->layout = mddev->layout;
1239         sb->chunk_size = mddev->chunk_sectors << 9;
1240
1241         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1242                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1243
1244         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1245         rdev_for_each(rdev2, mddev) {
1246                 mdp_disk_t *d;
1247                 int desc_nr;
1248                 int is_active = test_bit(In_sync, &rdev2->flags);
1249
1250                 if (rdev2->raid_disk >= 0 &&
1251                     sb->minor_version >= 91)
1252                         /* we have nowhere to store the recovery_offset,
1253                          * but if it is not below the reshape_position,
1254                          * we can piggy-back on that.
1255                          */
1256                         is_active = 1;
1257                 if (rdev2->raid_disk < 0 ||
1258                     test_bit(Faulty, &rdev2->flags))
1259                         is_active = 0;
1260                 if (is_active)
1261                         desc_nr = rdev2->raid_disk;
1262                 else
1263                         desc_nr = next_spare++;
1264                 rdev2->desc_nr = desc_nr;
1265                 d = &sb->disks[rdev2->desc_nr];
1266                 nr_disks++;
1267                 d->number = rdev2->desc_nr;
1268                 d->major = MAJOR(rdev2->bdev->bd_dev);
1269                 d->minor = MINOR(rdev2->bdev->bd_dev);
1270                 if (is_active)
1271                         d->raid_disk = rdev2->raid_disk;
1272                 else
1273                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1274                 if (test_bit(Faulty, &rdev2->flags))
1275                         d->state = (1<<MD_DISK_FAULTY);
1276                 else if (is_active) {
1277                         d->state = (1<<MD_DISK_ACTIVE);
1278                         if (test_bit(In_sync, &rdev2->flags))
1279                                 d->state |= (1<<MD_DISK_SYNC);
1280                         active++;
1281                         working++;
1282                 } else {
1283                         d->state = 0;
1284                         spare++;
1285                         working++;
1286                 }
1287                 if (test_bit(WriteMostly, &rdev2->flags))
1288                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1289         }
1290         /* now set the "removed" and "faulty" bits on any missing devices */
1291         for (i=0 ; i < mddev->raid_disks ; i++) {
1292                 mdp_disk_t *d = &sb->disks[i];
1293                 if (d->state == 0 && d->number == 0) {
1294                         d->number = i;
1295                         d->raid_disk = i;
1296                         d->state = (1<<MD_DISK_REMOVED);
1297                         d->state |= (1<<MD_DISK_FAULTY);
1298                         failed++;
1299                 }
1300         }
1301         sb->nr_disks = nr_disks;
1302         sb->active_disks = active;
1303         sb->working_disks = working;
1304         sb->failed_disks = failed;
1305         sb->spare_disks = spare;
1306
1307         sb->this_disk = sb->disks[rdev->desc_nr];
1308         sb->sb_csum = calc_sb_csum(sb);
1309 }
1310
1311 /*
1312  * rdev_size_change for 0.90.0
1313  */
1314 static unsigned long long
1315 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1316 {
1317         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1318                 return 0; /* component must fit device */
1319         if (rdev->mddev->bitmap_info.offset)
1320                 return 0; /* can't move bitmap */
1321         rdev->sb_start = calc_dev_sboffset(rdev);
1322         if (!num_sectors || num_sectors > rdev->sb_start)
1323                 num_sectors = rdev->sb_start;
1324         /* Limit to 4TB as metadata cannot record more than that.
1325          * 4TB == 2^32 KB, or 2*2^32 sectors.
1326          */
1327         if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1328                 num_sectors = (2ULL << 32) - 2;
1329         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1330                        rdev->sb_page);
1331         md_super_wait(rdev->mddev);
1332         return num_sectors;
1333 }
1334
1335 static int
1336 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1337 {
1338         /* non-zero offset changes not possible with v0.90 */
1339         return new_offset == 0;
1340 }
1341
1342 /*
1343  * version 1 superblock
1344  */
1345
1346 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1347 {
1348         __le32 disk_csum;
1349         u32 csum;
1350         unsigned long long newcsum;
1351         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1352         __le32 *isuper = (__le32*)sb;
1353
1354         disk_csum = sb->sb_csum;
1355         sb->sb_csum = 0;
1356         newcsum = 0;
1357         for (; size >= 4; size -= 4)
1358                 newcsum += le32_to_cpu(*isuper++);
1359
1360         if (size == 2)
1361                 newcsum += le16_to_cpu(*(__le16*) isuper);
1362
1363         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1364         sb->sb_csum = disk_csum;
1365         return cpu_to_le32(csum);
1366 }
1367
1368 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
1369                             int acknowledged);
1370 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1371 {
1372         struct mdp_superblock_1 *sb;
1373         int ret;
1374         sector_t sb_start;
1375         sector_t sectors;
1376         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1377         int bmask;
1378
1379         /*
1380          * Calculate the position of the superblock in 512byte sectors.
1381          * It is always aligned to a 4K boundary and
1382          * depeding on minor_version, it can be:
1383          * 0: At least 8K, but less than 12K, from end of device
1384          * 1: At start of device
1385          * 2: 4K from start of device.
1386          */
1387         switch(minor_version) {
1388         case 0:
1389                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1390                 sb_start -= 8*2;
1391                 sb_start &= ~(sector_t)(4*2-1);
1392                 break;
1393         case 1:
1394                 sb_start = 0;
1395                 break;
1396         case 2:
1397                 sb_start = 8;
1398                 break;
1399         default:
1400                 return -EINVAL;
1401         }
1402         rdev->sb_start = sb_start;
1403
1404         /* superblock is rarely larger than 1K, but it can be larger,
1405          * and it is safe to read 4k, so we do that
1406          */
1407         ret = read_disk_sb(rdev, 4096);
1408         if (ret) return ret;
1409
1410         sb = page_address(rdev->sb_page);
1411
1412         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1413             sb->major_version != cpu_to_le32(1) ||
1414             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1415             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1416             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1417                 return -EINVAL;
1418
1419         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1420                 printk("md: invalid superblock checksum on %s\n",
1421                         bdevname(rdev->bdev,b));
1422                 return -EINVAL;
1423         }
1424         if (le64_to_cpu(sb->data_size) < 10) {
1425                 printk("md: data_size too small on %s\n",
1426                        bdevname(rdev->bdev,b));
1427                 return -EINVAL;
1428         }
1429         if (sb->pad0 ||
1430             sb->pad3[0] ||
1431             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1432                 /* Some padding is non-zero, might be a new feature */
1433                 return -EINVAL;
1434
1435         rdev->preferred_minor = 0xffff;
1436         rdev->data_offset = le64_to_cpu(sb->data_offset);
1437         rdev->new_data_offset = rdev->data_offset;
1438         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1439             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1440                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1441         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1442
1443         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1444         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1445         if (rdev->sb_size & bmask)
1446                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1447
1448         if (minor_version
1449             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1450                 return -EINVAL;
1451         if (minor_version
1452             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1453                 return -EINVAL;
1454
1455         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1456                 rdev->desc_nr = -1;
1457         else
1458                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1459
1460         if (!rdev->bb_page) {
1461                 rdev->bb_page = alloc_page(GFP_KERNEL);
1462                 if (!rdev->bb_page)
1463                         return -ENOMEM;
1464         }
1465         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1466             rdev->badblocks.count == 0) {
1467                 /* need to load the bad block list.
1468                  * Currently we limit it to one page.
1469                  */
1470                 s32 offset;
1471                 sector_t bb_sector;
1472                 u64 *bbp;
1473                 int i;
1474                 int sectors = le16_to_cpu(sb->bblog_size);
1475                 if (sectors > (PAGE_SIZE / 512))
1476                         return -EINVAL;
1477                 offset = le32_to_cpu(sb->bblog_offset);
1478                 if (offset == 0)
1479                         return -EINVAL;
1480                 bb_sector = (long long)offset;
1481                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1482                                   rdev->bb_page, READ, true))
1483                         return -EIO;
1484                 bbp = (u64 *)page_address(rdev->bb_page);
1485                 rdev->badblocks.shift = sb->bblog_shift;
1486                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1487                         u64 bb = le64_to_cpu(*bbp);
1488                         int count = bb & (0x3ff);
1489                         u64 sector = bb >> 10;
1490                         sector <<= sb->bblog_shift;
1491                         count <<= sb->bblog_shift;
1492                         if (bb + 1 == 0)
1493                                 break;
1494                         if (md_set_badblocks(&rdev->badblocks,
1495                                              sector, count, 1) == 0)
1496                                 return -EINVAL;
1497                 }
1498         } else if (sb->bblog_offset != 0)
1499                 rdev->badblocks.shift = 0;
1500
1501         if (!refdev) {
1502                 ret = 1;
1503         } else {
1504                 __u64 ev1, ev2;
1505                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1506
1507                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1508                     sb->level != refsb->level ||
1509                     sb->layout != refsb->layout ||
1510                     sb->chunksize != refsb->chunksize) {
1511                         printk(KERN_WARNING "md: %s has strangely different"
1512                                 " superblock to %s\n",
1513                                 bdevname(rdev->bdev,b),
1514                                 bdevname(refdev->bdev,b2));
1515                         return -EINVAL;
1516                 }
1517                 ev1 = le64_to_cpu(sb->events);
1518                 ev2 = le64_to_cpu(refsb->events);
1519
1520                 if (ev1 > ev2)
1521                         ret = 1;
1522                 else
1523                         ret = 0;
1524         }
1525         if (minor_version) {
1526                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1527                 sectors -= rdev->data_offset;
1528         } else
1529                 sectors = rdev->sb_start;
1530         if (sectors < le64_to_cpu(sb->data_size))
1531                 return -EINVAL;
1532         rdev->sectors = le64_to_cpu(sb->data_size);
1533         return ret;
1534 }
1535
1536 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1537 {
1538         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1539         __u64 ev1 = le64_to_cpu(sb->events);
1540
1541         rdev->raid_disk = -1;
1542         clear_bit(Faulty, &rdev->flags);
1543         clear_bit(In_sync, &rdev->flags);
1544         clear_bit(Bitmap_sync, &rdev->flags);
1545         clear_bit(WriteMostly, &rdev->flags);
1546
1547         if (mddev->raid_disks == 0) {
1548                 mddev->major_version = 1;
1549                 mddev->patch_version = 0;
1550                 mddev->external = 0;
1551                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1552                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1553                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1554                 mddev->level = le32_to_cpu(sb->level);
1555                 mddev->clevel[0] = 0;
1556                 mddev->layout = le32_to_cpu(sb->layout);
1557                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1558                 mddev->dev_sectors = le64_to_cpu(sb->size);
1559                 mddev->events = ev1;
1560                 mddev->bitmap_info.offset = 0;
1561                 mddev->bitmap_info.space = 0;
1562                 /* Default location for bitmap is 1K after superblock
1563                  * using 3K - total of 4K
1564                  */
1565                 mddev->bitmap_info.default_offset = 1024 >> 9;
1566                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1567                 mddev->reshape_backwards = 0;
1568
1569                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1570                 memcpy(mddev->uuid, sb->set_uuid, 16);
1571
1572                 mddev->max_disks =  (4096-256)/2;
1573
1574                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1575                     mddev->bitmap_info.file == NULL) {
1576                         mddev->bitmap_info.offset =
1577                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1578                         /* Metadata doesn't record how much space is available.
1579                          * For 1.0, we assume we can use up to the superblock
1580                          * if before, else to 4K beyond superblock.
1581                          * For others, assume no change is possible.
1582                          */
1583                         if (mddev->minor_version > 0)
1584                                 mddev->bitmap_info.space = 0;
1585                         else if (mddev->bitmap_info.offset > 0)
1586                                 mddev->bitmap_info.space =
1587                                         8 - mddev->bitmap_info.offset;
1588                         else
1589                                 mddev->bitmap_info.space =
1590                                         -mddev->bitmap_info.offset;
1591                 }
1592
1593                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1594                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1595                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1596                         mddev->new_level = le32_to_cpu(sb->new_level);
1597                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1598                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1599                         if (mddev->delta_disks < 0 ||
1600                             (mddev->delta_disks == 0 &&
1601                              (le32_to_cpu(sb->feature_map)
1602                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1603                                 mddev->reshape_backwards = 1;
1604                 } else {
1605                         mddev->reshape_position = MaxSector;
1606                         mddev->delta_disks = 0;
1607                         mddev->new_level = mddev->level;
1608                         mddev->new_layout = mddev->layout;
1609                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1610                 }
1611
1612         } else if (mddev->pers == NULL) {
1613                 /* Insist of good event counter while assembling, except for
1614                  * spares (which don't need an event count) */
1615                 ++ev1;
1616                 if (rdev->desc_nr >= 0 &&
1617                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1618                     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
1619                         if (ev1 < mddev->events)
1620                                 return -EINVAL;
1621         } else if (mddev->bitmap) {
1622                 /* If adding to array with a bitmap, then we can accept an
1623                  * older device, but not too old.
1624                  */
1625                 if (ev1 < mddev->bitmap->events_cleared)
1626                         return 0;
1627                 if (ev1 < mddev->events)
1628                         set_bit(Bitmap_sync, &rdev->flags);
1629         } else {
1630                 if (ev1 < mddev->events)
1631                         /* just a hot-add of a new device, leave raid_disk at -1 */
1632                         return 0;
1633         }
1634         if (mddev->level != LEVEL_MULTIPATH) {
1635                 int role;
1636                 if (rdev->desc_nr < 0 ||
1637                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1638                         role = 0xffff;
1639                         rdev->desc_nr = -1;
1640                 } else
1641                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1642                 switch(role) {
1643                 case 0xffff: /* spare */
1644                         break;
1645                 case 0xfffe: /* faulty */
1646                         set_bit(Faulty, &rdev->flags);
1647                         break;
1648                 default:
1649                         rdev->saved_raid_disk = role;
1650                         if ((le32_to_cpu(sb->feature_map) &
1651                              MD_FEATURE_RECOVERY_OFFSET)) {
1652                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1653                                 if (!(le32_to_cpu(sb->feature_map) &
1654                                       MD_FEATURE_RECOVERY_BITMAP))
1655                                         rdev->saved_raid_disk = -1;
1656                         } else
1657                                 set_bit(In_sync, &rdev->flags);
1658                         rdev->raid_disk = role;
1659                         break;
1660                 }
1661                 if (sb->devflags & WriteMostly1)
1662                         set_bit(WriteMostly, &rdev->flags);
1663                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1664                         set_bit(Replacement, &rdev->flags);
1665         } else /* MULTIPATH are always insync */
1666                 set_bit(In_sync, &rdev->flags);
1667
1668         return 0;
1669 }
1670
1671 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1672 {
1673         struct mdp_superblock_1 *sb;
1674         struct md_rdev *rdev2;
1675         int max_dev, i;
1676         /* make rdev->sb match mddev and rdev data. */
1677
1678         sb = page_address(rdev->sb_page);
1679
1680         sb->feature_map = 0;
1681         sb->pad0 = 0;
1682         sb->recovery_offset = cpu_to_le64(0);
1683         memset(sb->pad3, 0, sizeof(sb->pad3));
1684
1685         sb->utime = cpu_to_le64((__u64)mddev->utime);
1686         sb->events = cpu_to_le64(mddev->events);
1687         if (mddev->in_sync)
1688                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1689         else
1690                 sb->resync_offset = cpu_to_le64(0);
1691
1692         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1693
1694         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1695         sb->size = cpu_to_le64(mddev->dev_sectors);
1696         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1697         sb->level = cpu_to_le32(mddev->level);
1698         sb->layout = cpu_to_le32(mddev->layout);
1699
1700         if (test_bit(WriteMostly, &rdev->flags))
1701                 sb->devflags |= WriteMostly1;
1702         else
1703                 sb->devflags &= ~WriteMostly1;
1704         sb->data_offset = cpu_to_le64(rdev->data_offset);
1705         sb->data_size = cpu_to_le64(rdev->sectors);
1706
1707         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1708                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1709                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1710         }
1711
1712         if (rdev->raid_disk >= 0 &&
1713             !test_bit(In_sync, &rdev->flags)) {
1714                 sb->feature_map |=
1715                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1716                 sb->recovery_offset =
1717                         cpu_to_le64(rdev->recovery_offset);
1718                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1719                         sb->feature_map |=
1720                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1721         }
1722         if (test_bit(Replacement, &rdev->flags))
1723                 sb->feature_map |=
1724                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
1725
1726         if (mddev->reshape_position != MaxSector) {
1727                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1728                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1729                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1730                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1731                 sb->new_level = cpu_to_le32(mddev->new_level);
1732                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1733                 if (mddev->delta_disks == 0 &&
1734                     mddev->reshape_backwards)
1735                         sb->feature_map
1736                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1737                 if (rdev->new_data_offset != rdev->data_offset) {
1738                         sb->feature_map
1739                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1740                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1741                                                              - rdev->data_offset));
1742                 }
1743         }
1744
1745         if (rdev->badblocks.count == 0)
1746                 /* Nothing to do for bad blocks*/ ;
1747         else if (sb->bblog_offset == 0)
1748                 /* Cannot record bad blocks on this device */
1749                 md_error(mddev, rdev);
1750         else {
1751                 struct badblocks *bb = &rdev->badblocks;
1752                 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1753                 u64 *p = bb->page;
1754                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1755                 if (bb->changed) {
1756                         unsigned seq;
1757
1758 retry:
1759                         seq = read_seqbegin(&bb->lock);
1760
1761                         memset(bbp, 0xff, PAGE_SIZE);
1762
1763                         for (i = 0 ; i < bb->count ; i++) {
1764                                 u64 internal_bb = p[i];
1765                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1766                                                 | BB_LEN(internal_bb));
1767                                 bbp[i] = cpu_to_le64(store_bb);
1768                         }
1769                         bb->changed = 0;
1770                         if (read_seqretry(&bb->lock, seq))
1771                                 goto retry;
1772
1773                         bb->sector = (rdev->sb_start +
1774                                       (int)le32_to_cpu(sb->bblog_offset));
1775                         bb->size = le16_to_cpu(sb->bblog_size);
1776                 }
1777         }
1778
1779         max_dev = 0;
1780         rdev_for_each(rdev2, mddev)
1781                 if (rdev2->desc_nr+1 > max_dev)
1782                         max_dev = rdev2->desc_nr+1;
1783
1784         if (max_dev > le32_to_cpu(sb->max_dev)) {
1785                 int bmask;
1786                 sb->max_dev = cpu_to_le32(max_dev);
1787                 rdev->sb_size = max_dev * 2 + 256;
1788                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1789                 if (rdev->sb_size & bmask)
1790                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1791         } else
1792                 max_dev = le32_to_cpu(sb->max_dev);
1793
1794         for (i=0; i<max_dev;i++)
1795                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1796
1797         rdev_for_each(rdev2, mddev) {
1798                 i = rdev2->desc_nr;
1799                 if (test_bit(Faulty, &rdev2->flags))
1800                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1801                 else if (test_bit(In_sync, &rdev2->flags))
1802                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1803                 else if (rdev2->raid_disk >= 0)
1804                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1805                 else
1806                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1807         }
1808
1809         sb->sb_csum = calc_sb_1_csum(sb);
1810 }
1811
1812 static unsigned long long
1813 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1814 {
1815         struct mdp_superblock_1 *sb;
1816         sector_t max_sectors;
1817         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1818                 return 0; /* component must fit device */
1819         if (rdev->data_offset != rdev->new_data_offset)
1820                 return 0; /* too confusing */
1821         if (rdev->sb_start < rdev->data_offset) {
1822                 /* minor versions 1 and 2; superblock before data */
1823                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1824                 max_sectors -= rdev->data_offset;
1825                 if (!num_sectors || num_sectors > max_sectors)
1826                         num_sectors = max_sectors;
1827         } else if (rdev->mddev->bitmap_info.offset) {
1828                 /* minor version 0 with bitmap we can't move */
1829                 return 0;
1830         } else {
1831                 /* minor version 0; superblock after data */
1832                 sector_t sb_start;
1833                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1834                 sb_start &= ~(sector_t)(4*2 - 1);
1835                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1836                 if (!num_sectors || num_sectors > max_sectors)
1837                         num_sectors = max_sectors;
1838                 rdev->sb_start = sb_start;
1839         }
1840         sb = page_address(rdev->sb_page);
1841         sb->data_size = cpu_to_le64(num_sectors);
1842         sb->super_offset = rdev->sb_start;
1843         sb->sb_csum = calc_sb_1_csum(sb);
1844         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1845                        rdev->sb_page);
1846         md_super_wait(rdev->mddev);
1847         return num_sectors;
1848
1849 }
1850
1851 static int
1852 super_1_allow_new_offset(struct md_rdev *rdev,
1853                          unsigned long long new_offset)
1854 {
1855         /* All necessary checks on new >= old have been done */
1856         struct bitmap *bitmap;
1857         if (new_offset >= rdev->data_offset)
1858                 return 1;
1859
1860         /* with 1.0 metadata, there is no metadata to tread on
1861          * so we can always move back */
1862         if (rdev->mddev->minor_version == 0)
1863                 return 1;
1864
1865         /* otherwise we must be sure not to step on
1866          * any metadata, so stay:
1867          * 36K beyond start of superblock
1868          * beyond end of badblocks
1869          * beyond write-intent bitmap
1870          */
1871         if (rdev->sb_start + (32+4)*2 > new_offset)
1872                 return 0;
1873         bitmap = rdev->mddev->bitmap;
1874         if (bitmap && !rdev->mddev->bitmap_info.file &&
1875             rdev->sb_start + rdev->mddev->bitmap_info.offset +
1876             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1877                 return 0;
1878         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1879                 return 0;
1880
1881         return 1;
1882 }
1883
1884 static struct super_type super_types[] = {
1885         [0] = {
1886                 .name   = "0.90.0",
1887                 .owner  = THIS_MODULE,
1888                 .load_super         = super_90_load,
1889                 .validate_super     = super_90_validate,
1890                 .sync_super         = super_90_sync,
1891                 .rdev_size_change   = super_90_rdev_size_change,
1892                 .allow_new_offset   = super_90_allow_new_offset,
1893         },
1894         [1] = {
1895                 .name   = "md-1",
1896                 .owner  = THIS_MODULE,
1897                 .load_super         = super_1_load,
1898                 .validate_super     = super_1_validate,
1899                 .sync_super         = super_1_sync,
1900                 .rdev_size_change   = super_1_rdev_size_change,
1901                 .allow_new_offset   = super_1_allow_new_offset,
1902         },
1903 };
1904
1905 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1906 {
1907         if (mddev->sync_super) {
1908                 mddev->sync_super(mddev, rdev);
1909                 return;
1910         }
1911
1912         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1913
1914         super_types[mddev->major_version].sync_super(mddev, rdev);
1915 }
1916
1917 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1918 {
1919         struct md_rdev *rdev, *rdev2;
1920
1921         rcu_read_lock();
1922         rdev_for_each_rcu(rdev, mddev1)
1923                 rdev_for_each_rcu(rdev2, mddev2)
1924                         if (rdev->bdev->bd_contains ==
1925                             rdev2->bdev->bd_contains) {
1926                                 rcu_read_unlock();
1927                                 return 1;
1928                         }
1929         rcu_read_unlock();
1930         return 0;
1931 }
1932
1933 static LIST_HEAD(pending_raid_disks);
1934
1935 /*
1936  * Try to register data integrity profile for an mddev
1937  *
1938  * This is called when an array is started and after a disk has been kicked
1939  * from the array. It only succeeds if all working and active component devices
1940  * are integrity capable with matching profiles.
1941  */
1942 int md_integrity_register(struct mddev *mddev)
1943 {
1944         struct md_rdev *rdev, *reference = NULL;
1945
1946         if (list_empty(&mddev->disks))
1947                 return 0; /* nothing to do */
1948         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1949                 return 0; /* shouldn't register, or already is */
1950         rdev_for_each(rdev, mddev) {
1951                 /* skip spares and non-functional disks */
1952                 if (test_bit(Faulty, &rdev->flags))
1953                         continue;
1954                 if (rdev->raid_disk < 0)
1955                         continue;
1956                 if (!reference) {
1957                         /* Use the first rdev as the reference */
1958                         reference = rdev;
1959                         continue;
1960                 }
1961                 /* does this rdev's profile match the reference profile? */
1962                 if (blk_integrity_compare(reference->bdev->bd_disk,
1963                                 rdev->bdev->bd_disk) < 0)
1964                         return -EINVAL;
1965         }
1966         if (!reference || !bdev_get_integrity(reference->bdev))
1967                 return 0;
1968         /*
1969          * All component devices are integrity capable and have matching
1970          * profiles, register the common profile for the md device.
1971          */
1972         if (blk_integrity_register(mddev->gendisk,
1973                         bdev_get_integrity(reference->bdev)) != 0) {
1974                 printk(KERN_ERR "md: failed to register integrity for %s\n",
1975                         mdname(mddev));
1976                 return -EINVAL;
1977         }
1978         printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
1979         if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
1980                 printk(KERN_ERR "md: failed to create integrity pool for %s\n",
1981                        mdname(mddev));
1982                 return -EINVAL;
1983         }
1984         return 0;
1985 }
1986 EXPORT_SYMBOL(md_integrity_register);
1987
1988 /* Disable data integrity if non-capable/non-matching disk is being added */
1989 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
1990 {
1991         struct blk_integrity *bi_rdev;
1992         struct blk_integrity *bi_mddev;
1993
1994         if (!mddev->gendisk)
1995                 return;
1996
1997         bi_rdev = bdev_get_integrity(rdev->bdev);
1998         bi_mddev = blk_get_integrity(mddev->gendisk);
1999
2000         if (!bi_mddev) /* nothing to do */
2001                 return;
2002         if (rdev->raid_disk < 0) /* skip spares */
2003                 return;
2004         if (bi_rdev && blk_integrity_compare(mddev->gendisk,
2005                                              rdev->bdev->bd_disk) >= 0)
2006                 return;
2007         printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
2008         blk_integrity_unregister(mddev->gendisk);
2009 }
2010 EXPORT_SYMBOL(md_integrity_add_rdev);
2011
2012 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2013 {
2014         char b[BDEVNAME_SIZE];
2015         struct kobject *ko;
2016         char *s;
2017         int err;
2018
2019         /* prevent duplicates */
2020         if (find_rdev(mddev, rdev->bdev->bd_dev))
2021                 return -EEXIST;
2022
2023         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2024         if (rdev->sectors && (mddev->dev_sectors == 0 ||
2025                         rdev->sectors < mddev->dev_sectors)) {
2026                 if (mddev->pers) {
2027                         /* Cannot change size, so fail
2028                          * If mddev->level <= 0, then we don't care
2029                          * about aligning sizes (e.g. linear)
2030                          */
2031                         if (mddev->level > 0)
2032                                 return -ENOSPC;
2033                 } else
2034                         mddev->dev_sectors = rdev->sectors;
2035         }
2036
2037         /* Verify rdev->desc_nr is unique.
2038          * If it is -1, assign a free number, else
2039          * check number is not in use
2040          */
2041         rcu_read_lock();
2042         if (rdev->desc_nr < 0) {
2043                 int choice = 0;
2044                 if (mddev->pers)
2045                         choice = mddev->raid_disks;
2046                 while (find_rdev_nr_rcu(mddev, choice))
2047                         choice++;
2048                 rdev->desc_nr = choice;
2049         } else {
2050                 if (find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2051                         rcu_read_unlock();
2052                         return -EBUSY;
2053                 }
2054         }
2055         rcu_read_unlock();
2056         if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2057                 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
2058                        mdname(mddev), mddev->max_disks);
2059                 return -EBUSY;
2060         }
2061         bdevname(rdev->bdev,b);
2062         while ( (s=strchr(b, '/')) != NULL)
2063                 *s = '!';
2064
2065         rdev->mddev = mddev;
2066         printk(KERN_INFO "md: bind<%s>\n", b);
2067
2068         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2069                 goto fail;
2070
2071         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2072         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2073                 /* failure here is OK */;
2074         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2075
2076         list_add_rcu(&rdev->same_set, &mddev->disks);
2077         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2078
2079         /* May as well allow recovery to be retried once */
2080         mddev->recovery_disabled++;
2081
2082         return 0;
2083
2084  fail:
2085         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
2086                b, mdname(mddev));
2087         return err;
2088 }
2089
2090 static void md_delayed_delete(struct work_struct *ws)
2091 {
2092         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2093         kobject_del(&rdev->kobj);
2094         kobject_put(&rdev->kobj);
2095 }
2096
2097 static void unbind_rdev_from_array(struct md_rdev *rdev)
2098 {
2099         char b[BDEVNAME_SIZE];
2100
2101         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2102         list_del_rcu(&rdev->same_set);
2103         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
2104         rdev->mddev = NULL;
2105         sysfs_remove_link(&rdev->kobj, "block");
2106         sysfs_put(rdev->sysfs_state);
2107         rdev->sysfs_state = NULL;
2108         rdev->badblocks.count = 0;
2109         /* We need to delay this, otherwise we can deadlock when
2110          * writing to 'remove' to "dev/state".  We also need
2111          * to delay it due to rcu usage.
2112          */
2113         synchronize_rcu();
2114         INIT_WORK(&rdev->del_work, md_delayed_delete);
2115         kobject_get(&rdev->kobj);
2116         queue_work(md_misc_wq, &rdev->del_work);
2117 }
2118
2119 /*
2120  * prevent the device from being mounted, repartitioned or
2121  * otherwise reused by a RAID array (or any other kernel
2122  * subsystem), by bd_claiming the device.
2123  */
2124 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2125 {
2126         int err = 0;
2127         struct block_device *bdev;
2128         char b[BDEVNAME_SIZE];
2129
2130         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2131                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2132         if (IS_ERR(bdev)) {
2133                 printk(KERN_ERR "md: could not open %s.\n",
2134                         __bdevname(dev, b));
2135                 return PTR_ERR(bdev);
2136         }
2137         rdev->bdev = bdev;
2138         return err;
2139 }
2140
2141 static void unlock_rdev(struct md_rdev *rdev)
2142 {
2143         struct block_device *bdev = rdev->bdev;
2144         rdev->bdev = NULL;
2145         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2146 }
2147
2148 void md_autodetect_dev(dev_t dev);
2149
2150 static void export_rdev(struct md_rdev *rdev)
2151 {
2152         char b[BDEVNAME_SIZE];
2153
2154         printk(KERN_INFO "md: export_rdev(%s)\n",
2155                 bdevname(rdev->bdev,b));
2156         md_rdev_clear(rdev);
2157 #ifndef MODULE
2158         if (test_bit(AutoDetected, &rdev->flags))
2159                 md_autodetect_dev(rdev->bdev->bd_dev);
2160 #endif
2161         unlock_rdev(rdev);
2162         kobject_put(&rdev->kobj);
2163 }
2164
2165 static void kick_rdev_from_array(struct md_rdev *rdev)
2166 {
2167         unbind_rdev_from_array(rdev);
2168         export_rdev(rdev);
2169 }
2170
2171 static void export_array(struct mddev *mddev)
2172 {
2173         struct md_rdev *rdev;
2174
2175         while (!list_empty(&mddev->disks)) {
2176                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2177                                         same_set);
2178                 kick_rdev_from_array(rdev);
2179         }
2180         mddev->raid_disks = 0;
2181         mddev->major_version = 0;
2182 }
2183
2184 static void sync_sbs(struct mddev *mddev, int nospares)
2185 {
2186         /* Update each superblock (in-memory image), but
2187          * if we are allowed to, skip spares which already
2188          * have the right event counter, or have one earlier
2189          * (which would mean they aren't being marked as dirty
2190          * with the rest of the array)
2191          */
2192         struct md_rdev *rdev;
2193         rdev_for_each(rdev, mddev) {
2194                 if (rdev->sb_events == mddev->events ||
2195                     (nospares &&
2196                      rdev->raid_disk < 0 &&
2197                      rdev->sb_events+1 == mddev->events)) {
2198                         /* Don't update this superblock */
2199                         rdev->sb_loaded = 2;
2200                 } else {
2201                         sync_super(mddev, rdev);
2202                         rdev->sb_loaded = 1;
2203                 }
2204         }
2205 }
2206
2207 static void md_update_sb(struct mddev *mddev, int force_change)
2208 {
2209         struct md_rdev *rdev;
2210         int sync_req;
2211         int nospares = 0;
2212         int any_badblocks_changed = 0;
2213
2214         if (mddev->ro) {
2215                 if (force_change)
2216                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2217                 return;
2218         }
2219 repeat:
2220         /* First make sure individual recovery_offsets are correct */
2221         rdev_for_each(rdev, mddev) {
2222                 if (rdev->raid_disk >= 0 &&
2223                     mddev->delta_disks >= 0 &&
2224                     !test_bit(In_sync, &rdev->flags) &&
2225                     mddev->curr_resync_completed > rdev->recovery_offset)
2226                                 rdev->recovery_offset = mddev->curr_resync_completed;
2227
2228         }
2229         if (!mddev->persistent) {
2230                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2231                 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2232                 if (!mddev->external) {
2233                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2234                         rdev_for_each(rdev, mddev) {
2235                                 if (rdev->badblocks.changed) {
2236                                         rdev->badblocks.changed = 0;
2237                                         md_ack_all_badblocks(&rdev->badblocks);
2238                                         md_error(mddev, rdev);
2239                                 }
2240                                 clear_bit(Blocked, &rdev->flags);
2241                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2242                                 wake_up(&rdev->blocked_wait);
2243                         }
2244                 }
2245                 wake_up(&mddev->sb_wait);
2246                 return;
2247         }
2248
2249         spin_lock(&mddev->lock);
2250
2251         mddev->utime = get_seconds();
2252
2253         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2254                 force_change = 1;
2255         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2256                 /* just a clean<-> dirty transition, possibly leave spares alone,
2257                  * though if events isn't the right even/odd, we will have to do
2258                  * spares after all
2259                  */
2260                 nospares = 1;
2261         if (force_change)
2262                 nospares = 0;
2263         if (mddev->degraded)
2264                 /* If the array is degraded, then skipping spares is both
2265                  * dangerous and fairly pointless.
2266                  * Dangerous because a device that was removed from the array
2267                  * might have a event_count that still looks up-to-date,
2268                  * so it can be re-added without a resync.
2269                  * Pointless because if there are any spares to skip,
2270                  * then a recovery will happen and soon that array won't
2271                  * be degraded any more and the spare can go back to sleep then.
2272                  */
2273                 nospares = 0;
2274
2275         sync_req = mddev->in_sync;
2276
2277         /* If this is just a dirty<->clean transition, and the array is clean
2278          * and 'events' is odd, we can roll back to the previous clean state */
2279         if (nospares
2280             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2281             && mddev->can_decrease_events
2282             && mddev->events != 1) {
2283                 mddev->events--;
2284                 mddev->can_decrease_events = 0;
2285         } else {
2286                 /* otherwise we have to go forward and ... */
2287                 mddev->events ++;
2288                 mddev->can_decrease_events = nospares;
2289         }
2290
2291         /*
2292          * This 64-bit counter should never wrap.
2293          * Either we are in around ~1 trillion A.C., assuming
2294          * 1 reboot per second, or we have a bug...
2295          */
2296         WARN_ON(mddev->events == 0);
2297
2298         rdev_for_each(rdev, mddev) {
2299                 if (rdev->badblocks.changed)
2300                         any_badblocks_changed++;
2301                 if (test_bit(Faulty, &rdev->flags))
2302                         set_bit(FaultRecorded, &rdev->flags);
2303         }
2304
2305         sync_sbs(mddev, nospares);
2306         spin_unlock(&mddev->lock);
2307
2308         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2309                  mdname(mddev), mddev->in_sync);
2310
2311         bitmap_update_sb(mddev->bitmap);
2312         rdev_for_each(rdev, mddev) {
2313                 char b[BDEVNAME_SIZE];
2314
2315                 if (rdev->sb_loaded != 1)
2316                         continue; /* no noise on spare devices */
2317
2318                 if (!test_bit(Faulty, &rdev->flags)) {
2319                         md_super_write(mddev,rdev,
2320                                        rdev->sb_start, rdev->sb_size,
2321                                        rdev->sb_page);
2322                         pr_debug("md: (write) %s's sb offset: %llu\n",
2323                                  bdevname(rdev->bdev, b),
2324                                  (unsigned long long)rdev->sb_start);
2325                         rdev->sb_events = mddev->events;
2326                         if (rdev->badblocks.size) {
2327                                 md_super_write(mddev, rdev,
2328                                                rdev->badblocks.sector,
2329                                                rdev->badblocks.size << 9,
2330                                                rdev->bb_page);
2331                                 rdev->badblocks.size = 0;
2332                         }
2333
2334                 } else
2335                         pr_debug("md: %s (skipping faulty)\n",
2336                                  bdevname(rdev->bdev, b));
2337
2338                 if (mddev->level == LEVEL_MULTIPATH)
2339                         /* only need to write one superblock... */
2340                         break;
2341         }
2342         md_super_wait(mddev);
2343         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2344
2345         spin_lock(&mddev->lock);
2346         if (mddev->in_sync != sync_req ||
2347             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2348                 /* have to write it out again */
2349                 spin_unlock(&mddev->lock);
2350                 goto repeat;
2351         }
2352         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2353         spin_unlock(&mddev->lock);
2354         wake_up(&mddev->sb_wait);
2355         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2356                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2357
2358         rdev_for_each(rdev, mddev) {
2359                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2360                         clear_bit(Blocked, &rdev->flags);
2361
2362                 if (any_badblocks_changed)
2363                         md_ack_all_badblocks(&rdev->badblocks);
2364                 clear_bit(BlockedBadBlocks, &rdev->flags);
2365                 wake_up(&rdev->blocked_wait);
2366         }
2367 }
2368
2369 /* words written to sysfs files may, or may not, be \n terminated.
2370  * We want to accept with case. For this we use cmd_match.
2371  */
2372 static int cmd_match(const char *cmd, const char *str)
2373 {
2374         /* See if cmd, written into a sysfs file, matches
2375          * str.  They must either be the same, or cmd can
2376          * have a trailing newline
2377          */
2378         while (*cmd && *str && *cmd == *str) {
2379                 cmd++;
2380                 str++;
2381         }
2382         if (*cmd == '\n')
2383                 cmd++;
2384         if (*str || *cmd)
2385                 return 0;
2386         return 1;
2387 }
2388
2389 struct rdev_sysfs_entry {
2390         struct attribute attr;
2391         ssize_t (*show)(struct md_rdev *, char *);
2392         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2393 };
2394
2395 static ssize_t
2396 state_show(struct md_rdev *rdev, char *page)
2397 {
2398         char *sep = "";
2399         size_t len = 0;
2400         unsigned long flags = ACCESS_ONCE(rdev->flags);
2401
2402         if (test_bit(Faulty, &flags) ||
2403             rdev->badblocks.unacked_exist) {
2404                 len+= sprintf(page+len, "%sfaulty",sep);
2405                 sep = ",";
2406         }
2407         if (test_bit(In_sync, &flags)) {
2408                 len += sprintf(page+len, "%sin_sync",sep);
2409                 sep = ",";
2410         }
2411         if (test_bit(WriteMostly, &flags)) {
2412                 len += sprintf(page+len, "%swrite_mostly",sep);
2413                 sep = ",";
2414         }
2415         if (test_bit(Blocked, &flags) ||
2416             (rdev->badblocks.unacked_exist
2417              && !test_bit(Faulty, &flags))) {
2418                 len += sprintf(page+len, "%sblocked", sep);
2419                 sep = ",";
2420         }
2421         if (!test_bit(Faulty, &flags) &&
2422             !test_bit(In_sync, &flags)) {
2423                 len += sprintf(page+len, "%sspare", sep);
2424                 sep = ",";
2425         }
2426         if (test_bit(WriteErrorSeen, &flags)) {
2427                 len += sprintf(page+len, "%swrite_error", sep);
2428                 sep = ",";
2429         }
2430         if (test_bit(WantReplacement, &flags)) {
2431                 len += sprintf(page+len, "%swant_replacement", sep);
2432                 sep = ",";
2433         }
2434         if (test_bit(Replacement, &flags)) {
2435                 len += sprintf(page+len, "%sreplacement", sep);
2436                 sep = ",";
2437         }
2438
2439         return len+sprintf(page+len, "\n");
2440 }
2441
2442 static ssize_t
2443 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2444 {
2445         /* can write
2446          *  faulty  - simulates an error
2447          *  remove  - disconnects the device
2448          *  writemostly - sets write_mostly
2449          *  -writemostly - clears write_mostly
2450          *  blocked - sets the Blocked flags
2451          *  -blocked - clears the Blocked and possibly simulates an error
2452          *  insync - sets Insync providing device isn't active
2453          *  -insync - clear Insync for a device with a slot assigned,
2454          *            so that it gets rebuilt based on bitmap
2455          *  write_error - sets WriteErrorSeen
2456          *  -write_error - clears WriteErrorSeen
2457          */
2458         int err = -EINVAL;
2459         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2460                 md_error(rdev->mddev, rdev);
2461                 if (test_bit(Faulty, &rdev->flags))
2462                         err = 0;
2463                 else
2464                         err = -EBUSY;
2465         } else if (cmd_match(buf, "remove")) {
2466                 if (rdev->raid_disk >= 0)
2467                         err = -EBUSY;
2468                 else {
2469                         struct mddev *mddev = rdev->mddev;
2470                         kick_rdev_from_array(rdev);
2471                         if (mddev->pers)
2472                                 md_update_sb(mddev, 1);
2473                         md_new_event(mddev);
2474                         err = 0;
2475                 }
2476         } else if (cmd_match(buf, "writemostly")) {
2477                 set_bit(WriteMostly, &rdev->flags);
2478                 err = 0;
2479         } else if (cmd_match(buf, "-writemostly")) {
2480                 clear_bit(WriteMostly, &rdev->flags);
2481                 err = 0;
2482         } else if (cmd_match(buf, "blocked")) {
2483                 set_bit(Blocked, &rdev->flags);
2484                 err = 0;
2485         } else if (cmd_match(buf, "-blocked")) {
2486                 if (!test_bit(Faulty, &rdev->flags) &&
2487                     rdev->badblocks.unacked_exist) {
2488                         /* metadata handler doesn't understand badblocks,
2489                          * so we need to fail the device
2490                          */
2491                         md_error(rdev->mddev, rdev);
2492                 }
2493                 clear_bit(Blocked, &rdev->flags);
2494                 clear_bit(BlockedBadBlocks, &rdev->flags);
2495                 wake_up(&rdev->blocked_wait);
2496                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2497                 md_wakeup_thread(rdev->mddev->thread);
2498
2499                 err = 0;
2500         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2501                 set_bit(In_sync, &rdev->flags);
2502                 err = 0;
2503         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
2504                 if (rdev->mddev->pers == NULL) {
2505                         clear_bit(In_sync, &rdev->flags);
2506                         rdev->saved_raid_disk = rdev->raid_disk;
2507                         rdev->raid_disk = -1;
2508                         err = 0;
2509                 }
2510         } else if (cmd_match(buf, "write_error")) {
2511                 set_bit(WriteErrorSeen, &rdev->flags);
2512                 err = 0;
2513         } else if (cmd_match(buf, "-write_error")) {
2514                 clear_bit(WriteErrorSeen, &rdev->flags);
2515                 err = 0;
2516         } else if (cmd_match(buf, "want_replacement")) {
2517                 /* Any non-spare device that is not a replacement can
2518                  * become want_replacement at any time, but we then need to
2519                  * check if recovery is needed.
2520                  */
2521                 if (rdev->raid_disk >= 0 &&
2522                     !test_bit(Replacement, &rdev->flags))
2523                         set_bit(WantReplacement, &rdev->flags);
2524                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2525                 md_wakeup_thread(rdev->mddev->thread);
2526                 err = 0;
2527         } else if (cmd_match(buf, "-want_replacement")) {
2528                 /* Clearing 'want_replacement' is always allowed.
2529                  * Once replacements starts it is too late though.
2530                  */
2531                 err = 0;
2532                 clear_bit(WantReplacement, &rdev->flags);
2533         } else if (cmd_match(buf, "replacement")) {
2534                 /* Can only set a device as a replacement when array has not
2535                  * yet been started.  Once running, replacement is automatic
2536                  * from spares, or by assigning 'slot'.
2537                  */
2538                 if (rdev->mddev->pers)
2539                         err = -EBUSY;
2540                 else {
2541                         set_bit(Replacement, &rdev->flags);
2542                         err = 0;
2543                 }
2544         } else if (cmd_match(buf, "-replacement")) {
2545                 /* Similarly, can only clear Replacement before start */
2546                 if (rdev->mddev->pers)
2547                         err = -EBUSY;
2548                 else {
2549                         clear_bit(Replacement, &rdev->flags);
2550                         err = 0;
2551                 }
2552         }
2553         if (!err)
2554                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2555         return err ? err : len;
2556 }
2557 static struct rdev_sysfs_entry rdev_state =
2558 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2559
2560 static ssize_t
2561 errors_show(struct md_rdev *rdev, char *page)
2562 {
2563         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2564 }
2565
2566 static ssize_t
2567 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2568 {
2569         char *e;
2570         unsigned long n = simple_strtoul(buf, &e, 10);
2571         if (*buf && (*e == 0 || *e == '\n')) {
2572                 atomic_set(&rdev->corrected_errors, n);
2573                 return len;
2574         }
2575         return -EINVAL;
2576 }
2577 static struct rdev_sysfs_entry rdev_errors =
2578 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2579
2580 static ssize_t
2581 slot_show(struct md_rdev *rdev, char *page)
2582 {
2583         if (rdev->raid_disk < 0)
2584                 return sprintf(page, "none\n");
2585         else
2586                 return sprintf(page, "%d\n", rdev->raid_disk);
2587 }
2588
2589 static ssize_t
2590 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2591 {
2592         char *e;
2593         int err;
2594         int slot = simple_strtoul(buf, &e, 10);
2595         if (strncmp(buf, "none", 4)==0)
2596                 slot = -1;
2597         else if (e==buf || (*e && *e!= '\n'))
2598                 return -EINVAL;
2599         if (rdev->mddev->pers && slot == -1) {
2600                 /* Setting 'slot' on an active array requires also
2601                  * updating the 'rd%d' link, and communicating
2602                  * with the personality with ->hot_*_disk.
2603                  * For now we only support removing
2604                  * failed/spare devices.  This normally happens automatically,
2605                  * but not when the metadata is externally managed.
2606                  */
2607                 if (rdev->raid_disk == -1)
2608                         return -EEXIST;
2609                 /* personality does all needed checks */
2610                 if (rdev->mddev->pers->hot_remove_disk == NULL)
2611                         return -EINVAL;
2612                 clear_bit(Blocked, &rdev->flags);
2613                 remove_and_add_spares(rdev->mddev, rdev);
2614                 if (rdev->raid_disk >= 0)
2615                         return -EBUSY;
2616                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2617                 md_wakeup_thread(rdev->mddev->thread);
2618         } else if (rdev->mddev->pers) {
2619                 /* Activating a spare .. or possibly reactivating
2620                  * if we ever get bitmaps working here.
2621                  */
2622
2623                 if (rdev->raid_disk != -1)
2624                         return -EBUSY;
2625
2626                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2627                         return -EBUSY;
2628
2629                 if (rdev->mddev->pers->hot_add_disk == NULL)
2630                         return -EINVAL;
2631
2632                 if (slot >= rdev->mddev->raid_disks &&
2633                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2634                         return -ENOSPC;
2635
2636                 rdev->raid_disk = slot;
2637                 if (test_bit(In_sync, &rdev->flags))
2638                         rdev->saved_raid_disk = slot;
2639                 else
2640                         rdev->saved_raid_disk = -1;
2641                 clear_bit(In_sync, &rdev->flags);
2642                 clear_bit(Bitmap_sync, &rdev->flags);
2643                 err = rdev->mddev->pers->
2644                         hot_add_disk(rdev->mddev, rdev);
2645                 if (err) {
2646                         rdev->raid_disk = -1;
2647                         return err;
2648                 } else
2649                         sysfs_notify_dirent_safe(rdev->sysfs_state);
2650                 if (sysfs_link_rdev(rdev->mddev, rdev))
2651                         /* failure here is OK */;
2652                 /* don't wakeup anyone, leave that to userspace. */
2653         } else {
2654                 if (slot >= rdev->mddev->raid_disks &&
2655                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2656                         return -ENOSPC;
2657                 rdev->raid_disk = slot;
2658                 /* assume it is working */
2659                 clear_bit(Faulty, &rdev->flags);
2660                 clear_bit(WriteMostly, &rdev->flags);
2661                 set_bit(In_sync, &rdev->flags);
2662                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2663         }
2664         return len;
2665 }
2666
2667 static struct rdev_sysfs_entry rdev_slot =
2668 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2669
2670 static ssize_t
2671 offset_show(struct md_rdev *rdev, char *page)
2672 {
2673         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2674 }
2675
2676 static ssize_t
2677 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2678 {
2679         unsigned long long offset;
2680         if (kstrtoull(buf, 10, &offset) < 0)
2681                 return -EINVAL;
2682         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2683                 return -EBUSY;
2684         if (rdev->sectors && rdev->mddev->external)
2685                 /* Must set offset before size, so overlap checks
2686                  * can be sane */
2687                 return -EBUSY;
2688         rdev->data_offset = offset;
2689         rdev->new_data_offset = offset;
2690         return len;
2691 }
2692
2693 static struct rdev_sysfs_entry rdev_offset =
2694 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2695
2696 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2697 {
2698         return sprintf(page, "%llu\n",
2699                        (unsigned long long)rdev->new_data_offset);
2700 }
2701
2702 static ssize_t new_offset_store(struct md_rdev *rdev,
2703                                 const char *buf, size_t len)
2704 {
2705         unsigned long long new_offset;
2706         struct mddev *mddev = rdev->mddev;
2707
2708         if (kstrtoull(buf, 10, &new_offset) < 0)
2709                 return -EINVAL;
2710
2711         if (mddev->sync_thread ||
2712             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2713                 return -EBUSY;
2714         if (new_offset == rdev->data_offset)
2715                 /* reset is always permitted */
2716                 ;
2717         else if (new_offset > rdev->data_offset) {
2718                 /* must not push array size beyond rdev_sectors */
2719                 if (new_offset - rdev->data_offset
2720                     + mddev->dev_sectors > rdev->sectors)
2721                                 return -E2BIG;
2722         }
2723         /* Metadata worries about other space details. */
2724
2725         /* decreasing the offset is inconsistent with a backwards
2726          * reshape.
2727          */
2728         if (new_offset < rdev->data_offset &&
2729             mddev->reshape_backwards)
2730                 return -EINVAL;
2731         /* Increasing offset is inconsistent with forwards
2732          * reshape.  reshape_direction should be set to
2733          * 'backwards' first.
2734          */
2735         if (new_offset > rdev->data_offset &&
2736             !mddev->reshape_backwards)
2737                 return -EINVAL;
2738
2739         if (mddev->pers && mddev->persistent &&
2740             !super_types[mddev->major_version]
2741             .allow_new_offset(rdev, new_offset))
2742                 return -E2BIG;
2743         rdev->new_data_offset = new_offset;
2744         if (new_offset > rdev->data_offset)
2745                 mddev->reshape_backwards = 1;
2746         else if (new_offset < rdev->data_offset)
2747                 mddev->reshape_backwards = 0;
2748
2749         return len;
2750 }
2751 static struct rdev_sysfs_entry rdev_new_offset =
2752 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2753
2754 static ssize_t
2755 rdev_size_show(struct md_rdev *rdev, char *page)
2756 {
2757         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2758 }
2759
2760 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2761 {
2762         /* check if two start/length pairs overlap */
2763         if (s1+l1 <= s2)
2764                 return 0;
2765         if (s2+l2 <= s1)
2766                 return 0;
2767         return 1;
2768 }
2769
2770 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2771 {
2772         unsigned long long blocks;
2773         sector_t new;
2774
2775         if (kstrtoull(buf, 10, &blocks) < 0)
2776                 return -EINVAL;
2777
2778         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2779                 return -EINVAL; /* sector conversion overflow */
2780
2781         new = blocks * 2;
2782         if (new != blocks * 2)
2783                 return -EINVAL; /* unsigned long long to sector_t overflow */
2784
2785         *sectors = new;
2786         return 0;
2787 }
2788
2789 static ssize_t
2790 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2791 {
2792         struct mddev *my_mddev = rdev->mddev;
2793         sector_t oldsectors = rdev->sectors;
2794         sector_t sectors;
2795
2796         if (strict_blocks_to_sectors(buf, &sectors) < 0)
2797                 return -EINVAL;
2798         if (rdev->data_offset != rdev->new_data_offset)
2799                 return -EINVAL; /* too confusing */
2800         if (my_mddev->pers && rdev->raid_disk >= 0) {
2801                 if (my_mddev->persistent) {
2802                         sectors = super_types[my_mddev->major_version].
2803                                 rdev_size_change(rdev, sectors);
2804                         if (!sectors)
2805                                 return -EBUSY;
2806                 } else if (!sectors)
2807                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2808                                 rdev->data_offset;
2809                 if (!my_mddev->pers->resize)
2810                         /* Cannot change size for RAID0 or Linear etc */
2811                         return -EINVAL;
2812         }
2813         if (sectors < my_mddev->dev_sectors)
2814                 return -EINVAL; /* component must fit device */
2815
2816         rdev->sectors = sectors;
2817         if (sectors > oldsectors && my_mddev->external) {
2818                 /* Need to check that all other rdevs with the same
2819                  * ->bdev do not overlap.  'rcu' is sufficient to walk
2820                  * the rdev lists safely.
2821                  * This check does not provide a hard guarantee, it
2822                  * just helps avoid dangerous mistakes.
2823                  */
2824                 struct mddev *mddev;
2825                 int overlap = 0;
2826                 struct list_head *tmp;
2827
2828                 rcu_read_lock();
2829                 for_each_mddev(mddev, tmp) {
2830                         struct md_rdev *rdev2;
2831
2832                         rdev_for_each(rdev2, mddev)
2833                                 if (rdev->bdev == rdev2->bdev &&
2834                                     rdev != rdev2 &&
2835                                     overlaps(rdev->data_offset, rdev->sectors,
2836                                              rdev2->data_offset,
2837                                              rdev2->sectors)) {
2838                                         overlap = 1;
2839                                         break;
2840                                 }
2841                         if (overlap) {
2842                                 mddev_put(mddev);
2843                                 break;
2844                         }
2845                 }
2846                 rcu_read_unlock();
2847                 if (overlap) {
2848                         /* Someone else could have slipped in a size
2849                          * change here, but doing so is just silly.
2850                          * We put oldsectors back because we *know* it is
2851                          * safe, and trust userspace not to race with
2852                          * itself
2853                          */
2854                         rdev->sectors = oldsectors;
2855                         return -EBUSY;
2856                 }
2857         }
2858         return len;
2859 }
2860
2861 static struct rdev_sysfs_entry rdev_size =
2862 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2863
2864 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
2865 {
2866         unsigned long long recovery_start = rdev->recovery_offset;
2867
2868         if (test_bit(In_sync, &rdev->flags) ||
2869             recovery_start == MaxSector)
2870                 return sprintf(page, "none\n");
2871
2872         return sprintf(page, "%llu\n", recovery_start);
2873 }
2874
2875 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
2876 {
2877         unsigned long long recovery_start;
2878
2879         if (cmd_match(buf, "none"))
2880                 recovery_start = MaxSector;
2881         else if (kstrtoull(buf, 10, &recovery_start))
2882                 return -EINVAL;
2883
2884         if (rdev->mddev->pers &&
2885             rdev->raid_disk >= 0)
2886                 return -EBUSY;
2887
2888         rdev->recovery_offset = recovery_start;
2889         if (recovery_start == MaxSector)
2890                 set_bit(In_sync, &rdev->flags);
2891         else
2892                 clear_bit(In_sync, &rdev->flags);
2893         return len;
2894 }
2895
2896 static struct rdev_sysfs_entry rdev_recovery_start =
2897 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2898
2899 static ssize_t
2900 badblocks_show(struct badblocks *bb, char *page, int unack);
2901 static ssize_t
2902 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
2903
2904 static ssize_t bb_show(struct md_rdev *rdev, char *page)
2905 {
2906         return badblocks_show(&rdev->badblocks, page, 0);
2907 }
2908 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
2909 {
2910         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
2911         /* Maybe that ack was all we needed */
2912         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
2913                 wake_up(&rdev->blocked_wait);
2914         return rv;
2915 }
2916 static struct rdev_sysfs_entry rdev_bad_blocks =
2917 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
2918
2919 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
2920 {
2921         return badblocks_show(&rdev->badblocks, page, 1);
2922 }
2923 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
2924 {
2925         return badblocks_store(&rdev->badblocks, page, len, 1);
2926 }
2927 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
2928 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
2929
2930 static struct attribute *rdev_default_attrs[] = {
2931         &rdev_state.attr,
2932         &rdev_errors.attr,
2933         &rdev_slot.attr,
2934         &rdev_offset.attr,
2935         &rdev_new_offset.attr,
2936         &rdev_size.attr,
2937         &rdev_recovery_start.attr,
2938         &rdev_bad_blocks.attr,
2939         &rdev_unack_bad_blocks.attr,
2940         NULL,
2941 };
2942 static ssize_t
2943 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2944 {
2945         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2946         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
2947
2948         if (!entry->show)
2949                 return -EIO;
2950         if (!rdev->mddev)
2951                 return -EBUSY;
2952         return entry->show(rdev, page);
2953 }
2954
2955 static ssize_t
2956 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2957               const char *page, size_t length)
2958 {
2959         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2960         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
2961         ssize_t rv;
2962         struct mddev *mddev = rdev->mddev;
2963
2964         if (!entry->store)
2965                 return -EIO;
2966         if (!capable(CAP_SYS_ADMIN))
2967                 return -EACCES;
2968         rv = mddev ? mddev_lock(mddev): -EBUSY;
2969         if (!rv) {
2970                 if (rdev->mddev == NULL)
2971                         rv = -EBUSY;
2972                 else
2973                         rv = entry->store(rdev, page, length);
2974                 mddev_unlock(mddev);
2975         }
2976         return rv;
2977 }
2978
2979 static void rdev_free(struct kobject *ko)
2980 {
2981         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
2982         kfree(rdev);
2983 }
2984 static const struct sysfs_ops rdev_sysfs_ops = {
2985         .show           = rdev_attr_show,
2986         .store          = rdev_attr_store,
2987 };
2988 static struct kobj_type rdev_ktype = {
2989         .release        = rdev_free,
2990         .sysfs_ops      = &rdev_sysfs_ops,
2991         .default_attrs  = rdev_default_attrs,
2992 };
2993
2994 int md_rdev_init(struct md_rdev *rdev)
2995 {
2996         rdev->desc_nr = -1;
2997         rdev->saved_raid_disk = -1;
2998         rdev->raid_disk = -1;
2999         rdev->flags = 0;
3000         rdev->data_offset = 0;
3001         rdev->new_data_offset = 0;
3002         rdev->sb_events = 0;
3003         rdev->last_read_error.tv_sec  = 0;
3004         rdev->last_read_error.tv_nsec = 0;
3005         rdev->sb_loaded = 0;
3006         rdev->bb_page = NULL;
3007         atomic_set(&rdev->nr_pending, 0);
3008         atomic_set(&rdev->read_errors, 0);
3009         atomic_set(&rdev->corrected_errors, 0);
3010
3011         INIT_LIST_HEAD(&rdev->same_set);
3012         init_waitqueue_head(&rdev->blocked_wait);
3013
3014         /* Add space to store bad block list.
3015          * This reserves the space even on arrays where it cannot
3016          * be used - I wonder if that matters
3017          */
3018         rdev->badblocks.count = 0;
3019         rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
3020         rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
3021         seqlock_init(&rdev->badblocks.lock);
3022         if (rdev->badblocks.page == NULL)
3023                 return -ENOMEM;
3024
3025         return 0;
3026 }
3027 EXPORT_SYMBOL_GPL(md_rdev_init);
3028 /*
3029  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3030  *
3031  * mark the device faulty if:
3032  *
3033  *   - the device is nonexistent (zero size)
3034  *   - the device has no valid superblock
3035  *
3036  * a faulty rdev _never_ has rdev->sb set.
3037  */
3038 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3039 {
3040         char b[BDEVNAME_SIZE];
3041         int err;
3042         struct md_rdev *rdev;
3043         sector_t size;
3044
3045         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3046         if (!rdev) {
3047                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
3048                 return ERR_PTR(-ENOMEM);
3049         }
3050
3051         err = md_rdev_init(rdev);
3052         if (err)
3053                 goto abort_free;
3054         err = alloc_disk_sb(rdev);
3055         if (err)
3056                 goto abort_free;
3057
3058         err = lock_rdev(rdev, newdev, super_format == -2);
3059         if (err)
3060                 goto abort_free;
3061
3062         kobject_init(&rdev->kobj, &rdev_ktype);
3063
3064         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3065         if (!size) {
3066                 printk(KERN_WARNING
3067                         "md: %s has zero or unknown size, marking faulty!\n",
3068                         bdevname(rdev->bdev,b));
3069                 err = -EINVAL;
3070                 goto abort_free;
3071         }
3072
3073         if (super_format >= 0) {
3074                 err = super_types[super_format].
3075                         load_super(rdev, NULL, super_minor);
3076                 if (err == -EINVAL) {
3077                         printk(KERN_WARNING
3078                                 "md: %s does not have a valid v%d.%d "
3079                                "superblock, not importing!\n",
3080                                 bdevname(rdev->bdev,b),
3081                                super_format, super_minor);
3082                         goto abort_free;
3083                 }
3084                 if (err < 0) {
3085                         printk(KERN_WARNING
3086                                 "md: could not read %s's sb, not importing!\n",
3087                                 bdevname(rdev->bdev,b));
3088                         goto abort_free;
3089                 }
3090         }
3091
3092         return rdev;
3093
3094 abort_free:
3095         if (rdev->bdev)
3096                 unlock_rdev(rdev);
3097         md_rdev_clear(rdev);
3098         kfree(rdev);
3099         return ERR_PTR(err);
3100 }
3101
3102 /*
3103  * Check a full RAID array for plausibility
3104  */
3105
3106 static void analyze_sbs(struct mddev *mddev)
3107 {
3108         int i;
3109         struct md_rdev *rdev, *freshest, *tmp;
3110         char b[BDEVNAME_SIZE];
3111
3112         freshest = NULL;
3113         rdev_for_each_safe(rdev, tmp, mddev)
3114                 switch (super_types[mddev->major_version].
3115                         load_super(rdev, freshest, mddev->minor_version)) {
3116                 case 1:
3117                         freshest = rdev;
3118                         break;
3119                 case 0:
3120                         break;
3121                 default:
3122                         printk( KERN_ERR \
3123                                 "md: fatal superblock inconsistency in %s"
3124                                 " -- removing from array\n",
3125                                 bdevname(rdev->bdev,b));
3126                         kick_rdev_from_array(rdev);
3127                 }
3128
3129         super_types[mddev->major_version].
3130                 validate_super(mddev, freshest);
3131
3132         i = 0;
3133         rdev_for_each_safe(rdev, tmp, mddev) {
3134                 if (mddev->max_disks &&
3135                     (rdev->desc_nr >= mddev->max_disks ||
3136                      i > mddev->max_disks)) {
3137                         printk(KERN_WARNING
3138                                "md: %s: %s: only %d devices permitted\n",
3139                                mdname(mddev), bdevname(rdev->bdev, b),
3140                                mddev->max_disks);
3141                         kick_rdev_from_array(rdev);
3142                         continue;
3143                 }
3144                 if (rdev != freshest)
3145                         if (super_types[mddev->major_version].
3146                             validate_super(mddev, rdev)) {
3147                                 printk(KERN_WARNING "md: kicking non-fresh %s"
3148                                         " from array!\n",
3149                                         bdevname(rdev->bdev,b));
3150                                 kick_rdev_from_array(rdev);
3151                                 continue;
3152                         }
3153                 if (mddev->level == LEVEL_MULTIPATH) {
3154                         rdev->desc_nr = i++;
3155                         rdev->raid_disk = rdev->desc_nr;
3156                         set_bit(In_sync, &rdev->flags);
3157                 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
3158                         rdev->raid_disk = -1;
3159                         clear_bit(In_sync, &rdev->flags);
3160                 }
3161         }
3162 }
3163
3164 /* Read a fixed-point number.
3165  * Numbers in sysfs attributes should be in "standard" units where
3166  * possible, so time should be in seconds.
3167  * However we internally use a a much smaller unit such as
3168  * milliseconds or jiffies.
3169  * This function takes a decimal number with a possible fractional
3170  * component, and produces an integer which is the result of
3171  * multiplying that number by 10^'scale'.
3172  * all without any floating-point arithmetic.
3173  */
3174 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3175 {
3176         unsigned long result = 0;
3177         long decimals = -1;
3178         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3179                 if (*cp == '.')
3180                         decimals = 0;
3181                 else if (decimals < scale) {
3182                         unsigned int value;
3183                         value = *cp - '0';
3184                         result = result * 10 + value;
3185                         if (decimals >= 0)
3186                                 decimals++;
3187                 }
3188                 cp++;
3189         }
3190         if (*cp == '\n')
3191                 cp++;
3192         if (*cp)
3193                 return -EINVAL;
3194         if (decimals < 0)
3195                 decimals = 0;
3196         while (decimals < scale) {
3197                 result *= 10;
3198                 decimals ++;
3199         }
3200         *res = result;
3201         return 0;
3202 }
3203
3204 static void md_safemode_timeout(unsigned long data);
3205
3206 static ssize_t
3207 safe_delay_show(struct mddev *mddev, char *page)
3208 {
3209         int msec = (mddev->safemode_delay*1000)/HZ;
3210         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3211 }
3212 static ssize_t
3213 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3214 {
3215         unsigned long msec;
3216
3217         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3218                 return -EINVAL;
3219         if (msec == 0)
3220                 mddev->safemode_delay = 0;
3221         else {
3222                 unsigned long old_delay = mddev->safemode_delay;
3223                 unsigned long new_delay = (msec*HZ)/1000;
3224
3225                 if (new_delay == 0)
3226                         new_delay = 1;
3227                 mddev->safemode_delay = new_delay;
3228                 if (new_delay < old_delay || old_delay == 0)
3229                         mod_timer(&mddev->safemode_timer, jiffies+1);
3230         }
3231         return len;
3232 }
3233 static struct md_sysfs_entry md_safe_delay =
3234 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3235
3236 static ssize_t
3237 level_show(struct mddev *mddev, char *page)
3238 {
3239         struct md_personality *p;
3240         int ret;
3241         spin_lock(&mddev->lock);
3242         p = mddev->pers;
3243         if (p)
3244                 ret = sprintf(page, "%s\n", p->name);
3245         else if (mddev->clevel[0])
3246                 ret = sprintf(page, "%s\n", mddev->clevel);
3247         else if (mddev->level != LEVEL_NONE)
3248                 ret = sprintf(page, "%d\n", mddev->level);
3249         else
3250                 ret = 0;
3251         spin_unlock(&mddev->lock);
3252         return ret;
3253 }
3254
3255 static ssize_t
3256 level_store(struct mddev *mddev, const char *buf, size_t len)
3257 {
3258         char clevel[16];
3259         ssize_t rv;
3260         size_t slen = len;
3261         struct md_personality *pers, *oldpers;
3262         long level;
3263         void *priv, *oldpriv;
3264         struct md_rdev *rdev;
3265
3266         if (slen == 0 || slen >= sizeof(clevel))
3267                 return -EINVAL;
3268
3269         rv = mddev_lock(mddev);
3270         if (rv)
3271                 return rv;
3272
3273         if (mddev->pers == NULL) {
3274                 strncpy(mddev->clevel, buf, slen);
3275                 if (mddev->clevel[slen-1] == '\n')
3276                         slen--;
3277                 mddev->clevel[slen] = 0;
3278                 mddev->level = LEVEL_NONE;
3279                 rv = len;
3280                 goto out_unlock;
3281         }
3282         rv = -EROFS;
3283         if (mddev->ro)
3284                 goto out_unlock;
3285
3286         /* request to change the personality.  Need to ensure:
3287          *  - array is not engaged in resync/recovery/reshape
3288          *  - old personality can be suspended
3289          *  - new personality will access other array.
3290          */
3291
3292         rv = -EBUSY;
3293         if (mddev->sync_thread ||
3294             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3295             mddev->reshape_position != MaxSector ||
3296             mddev->sysfs_active)
3297                 goto out_unlock;
3298
3299         rv = -EINVAL;
3300         if (!mddev->pers->quiesce) {
3301                 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
3302                        mdname(mddev), mddev->pers->name);
3303                 goto out_unlock;
3304         }
3305
3306         /* Now find the new personality */
3307         strncpy(clevel, buf, slen);
3308         if (clevel[slen-1] == '\n')
3309                 slen--;
3310         clevel[slen] = 0;
3311         if (kstrtol(clevel, 10, &level))
3312                 level = LEVEL_NONE;
3313
3314         if (request_module("md-%s", clevel) != 0)
3315                 request_module("md-level-%s", clevel);
3316         spin_lock(&pers_lock);
3317         pers = find_pers(level, clevel);
3318         if (!pers || !try_module_get(pers->owner)) {
3319                 spin_unlock(&pers_lock);
3320                 printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
3321                 rv = -EINVAL;
3322                 goto out_unlock;
3323         }
3324         spin_unlock(&pers_lock);
3325
3326         if (pers == mddev->pers) {
3327                 /* Nothing to do! */
3328                 module_put(pers->owner);
3329                 rv = len;
3330                 goto out_unlock;
3331         }
3332         if (!pers->takeover) {
3333                 module_put(pers->owner);
3334                 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3335                        mdname(mddev), clevel);
3336                 rv = -EINVAL;
3337                 goto out_unlock;
3338         }
3339
3340         rdev_for_each(rdev, mddev)
3341                 rdev->new_raid_disk = rdev->raid_disk;
3342
3343         /* ->takeover must set new_* and/or delta_disks
3344          * if it succeeds, and may set them when it fails.
3345          */
3346         priv = pers->takeover(mddev);
3347         if (IS_ERR(priv)) {
3348                 mddev->new_level = mddev->level;
3349                 mddev->new_layout = mddev->layout;
3350                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3351                 mddev->raid_disks -= mddev->delta_disks;
3352                 mddev->delta_disks = 0;
3353                 mddev->reshape_backwards = 0;
3354                 module_put(pers->owner);
3355                 printk(KERN_WARNING "md: %s: %s would not accept array\n",
3356                        mdname(mddev), clevel);
3357                 rv = PTR_ERR(priv);
3358                 goto out_unlock;
3359         }
3360
3361         /* Looks like we have a winner */
3362         mddev_suspend(mddev);
3363         mddev_detach(mddev);
3364
3365         spin_lock(&mddev->lock);
3366         oldpers = mddev->pers;
3367         oldpriv = mddev->private;
3368         mddev->pers = pers;
3369         mddev->private = priv;
3370         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3371         mddev->level = mddev->new_level;
3372         mddev->layout = mddev->new_layout;
3373         mddev->chunk_sectors = mddev->new_chunk_sectors;
3374         mddev->delta_disks = 0;
3375         mddev->reshape_backwards = 0;
3376         mddev->degraded = 0;
3377         spin_unlock(&mddev->lock);
3378
3379         if (oldpers->sync_request == NULL &&
3380             mddev->external) {
3381                 /* We are converting from a no-redundancy array
3382                  * to a redundancy array and metadata is managed
3383                  * externally so we need to be sure that writes
3384                  * won't block due to a need to transition
3385                  *      clean->dirty
3386                  * until external management is started.
3387                  */
3388                 mddev->in_sync = 0;
3389                 mddev->safemode_delay = 0;
3390                 mddev->safemode = 0;
3391         }
3392
3393         oldpers->free(mddev, oldpriv);
3394
3395         if (oldpers->sync_request == NULL &&
3396             pers->sync_request != NULL) {
3397                 /* need to add the md_redundancy_group */
3398                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3399                         printk(KERN_WARNING
3400                                "md: cannot register extra attributes for %s\n",
3401                                mdname(mddev));
3402                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3403         }
3404         if (oldpers->sync_request != NULL &&
3405             pers->sync_request == NULL) {
3406                 /* need to remove the md_redundancy_group */
3407                 if (mddev->to_remove == NULL)
3408                         mddev->to_remove = &md_redundancy_group;
3409         }
3410
3411         rdev_for_each(rdev, mddev) {
3412                 if (rdev->raid_disk < 0)
3413                         continue;
3414                 if (rdev->new_raid_disk >= mddev->raid_disks)
3415                         rdev->new_raid_disk = -1;
3416                 if (rdev->new_raid_disk == rdev->raid_disk)
3417                         continue;
3418                 sysfs_unlink_rdev(mddev, rdev);
3419         }
3420         rdev_for_each(rdev, mddev) {
3421                 if (rdev->raid_disk < 0)
3422                         continue;
3423                 if (rdev->new_raid_disk == rdev->raid_disk)
3424                         continue;
3425                 rdev->raid_disk = rdev->new_raid_disk;
3426                 if (rdev->raid_disk < 0)
3427                         clear_bit(In_sync, &rdev->flags);
3428                 else {
3429                         if (sysfs_link_rdev(mddev, rdev))
3430                                 printk(KERN_WARNING "md: cannot register rd%d"
3431                                        " for %s after level change\n",
3432                                        rdev->raid_disk, mdname(mddev));
3433                 }
3434         }
3435
3436         if (pers->sync_request == NULL) {
3437                 /* this is now an array without redundancy, so
3438                  * it must always be in_sync
3439                  */
3440                 mddev->in_sync = 1;
3441                 del_timer_sync(&mddev->safemode_timer);
3442         }
3443         blk_set_stacking_limits(&mddev->queue->limits);
3444         pers->run(mddev);
3445         set_bit(MD_CHANGE_DEVS, &mddev->flags);
3446         mddev_resume(mddev);
3447         if (!mddev->thread)
3448                 md_update_sb(mddev, 1);
3449         sysfs_notify(&mddev->kobj, NULL, "level");
3450         md_new_event(mddev);
3451         rv = len;
3452 out_unlock:
3453         mddev_unlock(mddev);
3454         return rv;
3455 }
3456
3457 static struct md_sysfs_entry md_level =
3458 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3459
3460 static ssize_t
3461 layout_show(struct mddev *mddev, char *page)
3462 {
3463         /* just a number, not meaningful for all levels */
3464         if (mddev->reshape_position != MaxSector &&
3465             mddev->layout != mddev->new_layout)
3466                 return sprintf(page, "%d (%d)\n",
3467                                mddev->new_layout, mddev->layout);
3468         return sprintf(page, "%d\n", mddev->layout);
3469 }
3470
3471 static ssize_t
3472 layout_store(struct mddev *mddev, const char *buf, size_t len)
3473 {
3474         char *e;
3475         unsigned long n = simple_strtoul(buf, &e, 10);
3476         int err;
3477
3478         if (!*buf || (*e && *e != '\n'))
3479                 return -EINVAL;
3480         err = mddev_lock(mddev);
3481         if (err)
3482                 return err;
3483
3484         if (mddev->pers) {
3485                 if (mddev->pers->check_reshape == NULL)
3486                         err = -EBUSY;
3487                 else if (mddev->ro)
3488                         err = -EROFS;
3489                 else {
3490                         mddev->new_layout = n;
3491                         err = mddev->pers->check_reshape(mddev);
3492                         if (err)
3493                                 mddev->new_layout = mddev->layout;
3494                 }
3495         } else {
3496                 mddev->new_layout = n;
3497                 if (mddev->reshape_position == MaxSector)
3498                         mddev->layout = n;
3499         }
3500         mddev_unlock(mddev);
3501         return err ?: len;
3502 }
3503 static struct md_sysfs_entry md_layout =
3504 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3505
3506 static ssize_t
3507 raid_disks_show(struct mddev *mddev, char *page)
3508 {
3509         if (mddev->raid_disks == 0)
3510                 return 0;
3511         if (mddev->reshape_position != MaxSector &&
3512             mddev->delta_disks != 0)
3513                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3514                                mddev->raid_disks - mddev->delta_disks);
3515         return sprintf(page, "%d\n", mddev->raid_disks);
3516 }
3517
3518 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3519
3520 static ssize_t
3521 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3522 {
3523         char *e;
3524         int err;
3525         unsigned long n = simple_strtoul(buf, &e, 10);
3526
3527         if (!*buf || (*e && *e != '\n'))
3528                 return -EINVAL;
3529
3530         err = mddev_lock(mddev);
3531         if (err)
3532                 return err;
3533         if (mddev->pers)
3534                 err = update_raid_disks(mddev, n);
3535         else if (mddev->reshape_position != MaxSector) {
3536                 struct md_rdev *rdev;
3537                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3538
3539                 err = -EINVAL;
3540                 rdev_for_each(rdev, mddev) {
3541                         if (olddisks < n &&
3542                             rdev->data_offset < rdev->new_data_offset)
3543                                 goto out_unlock;
3544                         if (olddisks > n &&
3545                             rdev->data_offset > rdev->new_data_offset)
3546                                 goto out_unlock;
3547                 }
3548                 err = 0;
3549                 mddev->delta_disks = n - olddisks;
3550                 mddev->raid_disks = n;
3551                 mddev->reshape_backwards = (mddev->delta_disks < 0);
3552         } else
3553                 mddev->raid_disks = n;
3554 out_unlock:
3555         mddev_unlock(mddev);
3556         return err ? err : len;
3557 }
3558 static struct md_sysfs_entry md_raid_disks =
3559 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3560
3561 static ssize_t
3562 chunk_size_show(struct mddev *mddev, char *page)
3563 {
3564         if (mddev->reshape_position != MaxSector &&
3565             mddev->chunk_sectors != mddev->new_chunk_sectors)
3566                 return sprintf(page, "%d (%d)\n",
3567                                mddev->new_chunk_sectors << 9,
3568                                mddev->chunk_sectors << 9);
3569         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3570 }
3571
3572 static ssize_t
3573 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3574 {
3575         int err;
3576         char *e;
3577         unsigned long n = simple_strtoul(buf, &e, 10);
3578
3579         if (!*buf || (*e && *e != '\n'))
3580                 return -EINVAL;
3581
3582         err = mddev_lock(mddev);
3583         if (err)
3584                 return err;
3585         if (mddev->pers) {
3586                 if (mddev->pers->check_reshape == NULL)
3587                         err = -EBUSY;
3588                 else if (mddev->ro)
3589                         err = -EROFS;
3590                 else {
3591                         mddev->new_chunk_sectors = n >> 9;
3592                         err = mddev->pers->check_reshape(mddev);
3593                         if (err)
3594                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3595                 }
3596         } else {
3597                 mddev->new_chunk_sectors = n >> 9;
3598                 if (mddev->reshape_position == MaxSector)
3599                         mddev->chunk_sectors = n >> 9;
3600         }
3601         mddev_unlock(mddev);
3602         return err ?: len;
3603 }
3604 static struct md_sysfs_entry md_chunk_size =
3605 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3606
3607 static ssize_t
3608 resync_start_show(struct mddev *mddev, char *page)
3609 {
3610         if (mddev->recovery_cp == MaxSector)
3611                 return sprintf(page, "none\n");
3612         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3613 }
3614
3615 static ssize_t
3616 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3617 {
3618         int err;
3619         char *e;
3620         unsigned long long n = simple_strtoull(buf, &e, 10);
3621
3622         err = mddev_lock(mddev);
3623         if (err)
3624                 return err;
3625         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3626                 err = -EBUSY;
3627         else if (cmd_match(buf, "none"))
3628                 n = MaxSector;
3629         else if (!*buf || (*e && *e != '\n'))
3630                 err = -EINVAL;
3631
3632         if (!err) {
3633                 mddev->recovery_cp = n;
3634                 if (mddev->pers)
3635                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3636         }
3637         mddev_unlock(mddev);
3638         return err ?: len;
3639 }
3640 static struct md_sysfs_entry md_resync_start =
3641 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3642                 resync_start_show, resync_start_store);
3643
3644 /*
3645  * The array state can be:
3646  *
3647  * clear
3648  *     No devices, no size, no level
3649  *     Equivalent to STOP_ARRAY ioctl
3650  * inactive
3651  *     May have some settings, but array is not active
3652  *        all IO results in error
3653  *     When written, doesn't tear down array, but just stops it
3654  * suspended (not supported yet)
3655  *     All IO requests will block. The array can be reconfigured.
3656  *     Writing this, if accepted, will block until array is quiescent
3657  * readonly
3658  *     no resync can happen.  no superblocks get written.
3659  *     write requests fail
3660  * read-auto
3661  *     like readonly, but behaves like 'clean' on a write request.
3662  *
3663  * clean - no pending writes, but otherwise active.
3664  *     When written to inactive array, starts without resync
3665  *     If a write request arrives then
3666  *       if metadata is known, mark 'dirty' and switch to 'active'.
3667  *       if not known, block and switch to write-pending
3668  *     If written to an active array that has pending writes, then fails.
3669  * active
3670  *     fully active: IO and resync can be happening.
3671  *     When written to inactive array, starts with resync
3672  *
3673  * write-pending
3674  *     clean, but writes are blocked waiting for 'active' to be written.
3675  *
3676  * active-idle
3677  *     like active, but no writes have been seen for a while (100msec).
3678  *
3679  */
3680 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3681                    write_pending, active_idle, bad_word};
3682 static char *array_states[] = {
3683         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3684         "write-pending", "active-idle", NULL };
3685
3686 static int match_word(const char *word, char **list)
3687 {
3688         int n;
3689         for (n=0; list[n]; n++)
3690                 if (cmd_match(word, list[n]))
3691                         break;
3692         return n;
3693 }
3694
3695 static ssize_t
3696 array_state_show(struct mddev *mddev, char *page)
3697 {
3698         enum array_state st = inactive;
3699
3700         if (mddev->pers)
3701                 switch(mddev->ro) {
3702                 case 1:
3703                         st = readonly;
3704                         break;
3705                 case 2:
3706                         st = read_auto;
3707                         break;
3708                 case 0:
3709                         if (mddev->in_sync)
3710                                 st = clean;
3711                         else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3712                                 st = write_pending;
3713                         else if (mddev->safemode)
3714                                 st = active_idle;
3715                         else
3716                                 st = active;
3717                 }
3718         else {
3719                 if (list_empty(&mddev->disks) &&
3720                     mddev->raid_disks == 0 &&
3721                     mddev->dev_sectors == 0)
3722                         st = clear;
3723                 else
3724                         st = inactive;
3725         }
3726         return sprintf(page, "%s\n", array_states[st]);
3727 }
3728
3729 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3730 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3731 static int do_md_run(struct mddev *mddev);
3732 static int restart_array(struct mddev *mddev);
3733
3734 static ssize_t
3735 array_state_store(struct mddev *mddev, const char *buf, size_t len)
3736 {
3737         int err;
3738         enum array_state st = match_word(buf, array_states);
3739
3740         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
3741                 /* don't take reconfig_mutex when toggling between
3742                  * clean and active
3743                  */
3744                 spin_lock(&mddev->lock);
3745                 if (st == active) {
3746                         restart_array(mddev);
3747                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3748                         wake_up(&mddev->sb_wait);
3749                         err = 0;
3750                 } else /* st == clean */ {
3751                         restart_array(mddev);
3752                         if (atomic_read(&mddev->writes_pending) == 0) {
3753                                 if (mddev->in_sync == 0) {
3754                                         mddev->in_sync = 1;
3755                                         if (mddev->safemode == 1)
3756                                                 mddev->safemode = 0;
3757                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3758                                 }
3759                                 err = 0;
3760                         } else
3761                                 err = -EBUSY;
3762                 }
3763                 spin_unlock(&mddev->lock);
3764                 return err;
3765         }
3766         err = mddev_lock(mddev);
3767         if (err)
3768                 return err;
3769         err = -EINVAL;
3770         switch(st) {
3771         case bad_word:
3772                 break;
3773         case clear:
3774                 /* stopping an active array */
3775                 err = do_md_stop(mddev, 0, NULL);
3776                 break;
3777         case inactive:
3778                 /* stopping an active array */
3779                 if (mddev->pers)
3780                         err = do_md_stop(mddev, 2, NULL);
3781                 else
3782                         err = 0; /* already inactive */
3783                 break;
3784         case suspended:
3785                 break; /* not supported yet */
3786         case readonly:
3787                 if (mddev->pers)
3788                         err = md_set_readonly(mddev, NULL);
3789                 else {
3790                         mddev->ro = 1;
3791                         set_disk_ro(mddev->gendisk, 1);
3792                         err = do_md_run(mddev);
3793                 }
3794                 break;
3795         case read_auto:
3796                 if (mddev->pers) {
3797                         if (mddev->ro == 0)
3798                                 err = md_set_readonly(mddev, NULL);
3799                         else if (mddev->ro == 1)
3800                                 err = restart_array(mddev);
3801                         if (err == 0) {
3802                                 mddev->ro = 2;
3803                                 set_disk_ro(mddev->gendisk, 0);
3804                         }
3805                 } else {
3806                         mddev->ro = 2;
3807                         err = do_md_run(mddev);
3808                 }
3809                 break;
3810         case clean:
3811                 if (mddev->pers) {
3812                         restart_array(mddev);
3813                         spin_lock(&mddev->lock);
3814                         if (atomic_read(&mddev->writes_pending) == 0) {
3815                                 if (mddev->in_sync == 0) {
3816                                         mddev->in_sync = 1;
3817                                         if (mddev->safemode == 1)
3818                                                 mddev->safemode = 0;
3819                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3820                                 }
3821                                 err = 0;
3822                         } else
3823                                 err = -EBUSY;
3824                         spin_unlock(&mddev->lock);
3825                 } else
3826                         err = -EINVAL;
3827                 break;
3828         case active:
3829                 if (mddev->pers) {
3830                         restart_array(mddev);
3831                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3832                         wake_up(&mddev->sb_wait);
3833                         err = 0;
3834                 } else {
3835                         mddev->ro = 0;
3836                         set_disk_ro(mddev->gendisk, 0);
3837                         err = do_md_run(mddev);
3838                 }
3839                 break;
3840         case write_pending:
3841         case active_idle:
3842                 /* these cannot be set */
3843                 break;
3844         }
3845
3846         if (!err) {
3847                 if (mddev->hold_active == UNTIL_IOCTL)
3848                         mddev->hold_active = 0;
3849                 sysfs_notify_dirent_safe(mddev->sysfs_state);
3850         }
3851         mddev_unlock(mddev);
3852         return err ?: len;
3853 }
3854 static struct md_sysfs_entry md_array_state =
3855 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3856
3857 static ssize_t
3858 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
3859         return sprintf(page, "%d\n",
3860                        atomic_read(&mddev->max_corr_read_errors));
3861 }
3862
3863 static ssize_t
3864 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
3865 {
3866         char *e;
3867         unsigned long n = simple_strtoul(buf, &e, 10);
3868
3869         if (*buf && (*e == 0 || *e == '\n')) {
3870                 atomic_set(&mddev->max_corr_read_errors, n);
3871                 return len;
3872         }
3873         return -EINVAL;
3874 }
3875
3876 static struct md_sysfs_entry max_corr_read_errors =
3877 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3878         max_corrected_read_errors_store);
3879
3880 static ssize_t
3881 null_show(struct mddev *mddev, char *page)
3882 {
3883         return -EINVAL;
3884 }
3885
3886 static ssize_t
3887 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
3888 {
3889         /* buf must be %d:%d\n? giving major and minor numbers */
3890         /* The new device is added to the array.
3891          * If the array has a persistent superblock, we read the
3892          * superblock to initialise info and check validity.
3893          * Otherwise, only checking done is that in bind_rdev_to_array,
3894          * which mainly checks size.
3895          */
3896         char *e;
3897         int major = simple_strtoul(buf, &e, 10);
3898         int minor;
3899         dev_t dev;
3900         struct md_rdev *rdev;
3901         int err;
3902
3903         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3904                 return -EINVAL;
3905         minor = simple_strtoul(e+1, &e, 10);
3906         if (*e && *e != '\n')
3907                 return -EINVAL;
3908         dev = MKDEV(major, minor);
3909         if (major != MAJOR(dev) ||
3910             minor != MINOR(dev))
3911                 return -EOVERFLOW;
3912
3913         flush_workqueue(md_misc_wq);
3914
3915         err = mddev_lock(mddev);
3916         if (err)
3917                 return err;
3918         if (mddev->persistent) {
3919                 rdev = md_import_device(dev, mddev->major_version,
3920                                         mddev->minor_version);
3921                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3922                         struct md_rdev *rdev0
3923                                 = list_entry(mddev->disks.next,
3924                                              struct md_rdev, same_set);
3925                         err = super_types[mddev->major_version]
3926                                 .load_super(rdev, rdev0, mddev->minor_version);
3927                         if (err < 0)
3928                                 goto out;
3929                 }
3930         } else if (mddev->external)
3931                 rdev = md_import_device(dev, -2, -1);
3932         else
3933                 rdev = md_import_device(dev, -1, -1);
3934
3935         if (IS_ERR(rdev))
3936                 return PTR_ERR(rdev);
3937         err = bind_rdev_to_array(rdev, mddev);
3938  out:
3939         if (err)
3940                 export_rdev(rdev);
3941         mddev_unlock(mddev);
3942         return err ? err : len;
3943 }
3944
3945 static struct md_sysfs_entry md_new_device =
3946 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3947
3948 static ssize_t
3949 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
3950 {
3951         char *end;
3952         unsigned long chunk, end_chunk;
3953         int err;
3954
3955         err = mddev_lock(mddev);
3956         if (err)
3957                 return err;
3958         if (!mddev->bitmap)
3959                 goto out;
3960         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3961         while (*buf) {
3962                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
3963                 if (buf == end) break;
3964                 if (*end == '-') { /* range */
3965                         buf = end + 1;
3966                         end_chunk = simple_strtoul(buf, &end, 0);
3967                         if (buf == end) break;
3968                 }
3969                 if (*end && !isspace(*end)) break;
3970                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3971                 buf = skip_spaces(end);
3972         }
3973         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3974 out:
3975         mddev_unlock(mddev);
3976         return len;
3977 }
3978
3979 static struct md_sysfs_entry md_bitmap =
3980 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3981
3982 static ssize_t
3983 size_show(struct mddev *mddev, char *page)
3984 {
3985         return sprintf(page, "%llu\n",
3986                 (unsigned long long)mddev->dev_sectors / 2);
3987 }
3988
3989 static int update_size(struct mddev *mddev, sector_t num_sectors);
3990
3991 static ssize_t
3992 size_store(struct mddev *mddev, const char *buf, size_t len)
3993 {
3994         /* If array is inactive, we can reduce the component size, but
3995          * not increase it (except from 0).
3996          * If array is active, we can try an on-line resize
3997          */
3998         sector_t sectors;
3999         int err = strict_blocks_to_sectors(buf, &sectors);
4000
4001         if (err < 0)
4002                 return err;
4003         err = mddev_lock(mddev);
4004         if (err)
4005                 return err;
4006         if (mddev->pers) {
4007                 err = update_size(mddev, sectors);
4008                 md_update_sb(mddev, 1);
4009         } else {
4010                 if (mddev->dev_sectors == 0 ||
4011                     mddev->dev_sectors > sectors)
4012                         mddev->dev_sectors = sectors;
4013                 else
4014                         err = -ENOSPC;
4015         }
4016         mddev_unlock(mddev);
4017         return err ? err : len;
4018 }
4019
4020 static struct md_sysfs_entry md_size =
4021 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4022
4023 /* Metadata version.
4024  * This is one of
4025  *   'none' for arrays with no metadata (good luck...)
4026  *   'external' for arrays with externally managed metadata,
4027  * or N.M for internally known formats
4028  */
4029 static ssize_t
4030 metadata_show(struct mddev *mddev, char *page)
4031 {
4032         if (mddev->persistent)
4033                 return sprintf(page, "%d.%d\n",
4034                                mddev->major_version, mddev->minor_version);
4035         else if (mddev->external)
4036                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4037         else
4038                 return sprintf(page, "none\n");
4039 }
4040
4041 static ssize_t
4042 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4043 {
4044         int major, minor;
4045         char *e;
4046         int err;
4047         /* Changing the details of 'external' metadata is
4048          * always permitted.  Otherwise there must be
4049          * no devices attached to the array.
4050          */
4051
4052         err = mddev_lock(mddev);
4053         if (err)
4054                 return err;
4055         err = -EBUSY;
4056         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4057                 ;
4058         else if (!list_empty(&mddev->disks))
4059                 goto out_unlock;
4060
4061         err = 0;
4062         if (cmd_match(buf, "none")) {
4063                 mddev->persistent = 0;
4064                 mddev->external = 0;
4065                 mddev->major_version = 0;
4066                 mddev->minor_version = 90;
4067                 goto out_unlock;
4068         }
4069         if (strncmp(buf, "external:", 9) == 0) {
4070                 size_t namelen = len-9;
4071                 if (namelen >= sizeof(mddev->metadata_type))
4072                         namelen = sizeof(mddev->metadata_type)-1;
4073                 strncpy(mddev->metadata_type, buf+9, namelen);
4074                 mddev->metadata_type[namelen] = 0;
4075                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4076                         mddev->metadata_type[--namelen] = 0;
4077                 mddev->persistent = 0;
4078                 mddev->external = 1;
4079                 mddev->major_version = 0;
4080                 mddev->minor_version = 90;
4081                 goto out_unlock;
4082         }
4083         major = simple_strtoul(buf, &e, 10);
4084         err = -EINVAL;
4085         if (e==buf || *e != '.')
4086                 goto out_unlock;
4087         buf = e+1;
4088         minor = simple_strtoul(buf, &e, 10);
4089         if (e==buf || (*e && *e != '\n') )
4090                 goto out_unlock;
4091         err = -ENOENT;
4092         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4093                 goto out_unlock;
4094         mddev->major_version = major;
4095         mddev->minor_version = minor;
4096         mddev->persistent = 1;
4097         mddev->external = 0;
4098         err = 0;
4099 out_unlock:
4100         mddev_unlock(mddev);
4101         return err ?: len;
4102 }
4103
4104 static struct md_sysfs_entry md_metadata =
4105 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4106
4107 static ssize_t
4108 action_show(struct mddev *mddev, char *page)
4109 {
4110         char *type = "idle";
4111         unsigned long recovery = mddev->recovery;
4112         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4113                 type = "frozen";
4114         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4115             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4116                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4117                         type = "reshape";
4118                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4119                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4120                                 type = "resync";
4121                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4122                                 type = "check";
4123                         else
4124                                 type = "repair";
4125                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4126                         type = "recover";
4127         }
4128         return sprintf(page, "%s\n", type);
4129 }
4130
4131 static ssize_t
4132 action_store(struct mddev *mddev, const char *page, size_t len)
4133 {
4134         if (!mddev->pers || !mddev->pers->sync_request)
4135                 return -EINVAL;
4136
4137         if (cmd_match(page, "frozen"))
4138                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4139         else
4140                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4141
4142         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4143                 flush_workqueue(md_misc_wq);
4144                 if (mddev->sync_thread) {
4145                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4146                         if (mddev_lock(mddev) == 0) {
4147                                 md_reap_sync_thread(mddev);
4148                                 mddev_unlock(mddev);
4149                         }
4150                 }
4151         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4152                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
4153                 return -EBUSY;
4154         else if (cmd_match(page, "resync"))
4155                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4156         else if (cmd_match(page, "recover")) {
4157                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4158                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4159         } else if (cmd_match(page, "reshape")) {
4160                 int err;
4161                 if (mddev->pers->start_reshape == NULL)
4162                         return -EINVAL;
4163                 err = mddev_lock(mddev);
4164                 if (!err) {
4165                         err = mddev->pers->start_reshape(mddev);
4166                         mddev_unlock(mddev);
4167                 }
4168                 if (err)
4169                         return err;
4170                 sysfs_notify(&mddev->kobj, NULL, "degraded");
4171         } else {
4172                 if (cmd_match(page, "check"))
4173                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4174                 else if (!cmd_match(page, "repair"))
4175                         return -EINVAL;
4176                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4177                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4178         }
4179         if (mddev->ro == 2) {
4180                 /* A write to sync_action is enough to justify
4181                  * canceling read-auto mode
4182                  */
4183                 mddev->ro = 0;
4184                 md_wakeup_thread(mddev->sync_thread);
4185         }
4186         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4187         md_wakeup_thread(mddev->thread);
4188         sysfs_notify_dirent_safe(mddev->sysfs_action);
4189         return len;
4190 }
4191
4192 static struct md_sysfs_entry md_scan_mode =
4193 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4194
4195 static ssize_t
4196 last_sync_action_show(struct mddev *mddev, char *page)
4197 {
4198         return sprintf(page, "%s\n", mddev->last_sync_action);
4199 }
4200
4201 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4202
4203 static ssize_t
4204 mismatch_cnt_show(struct mddev *mddev, char *page)
4205 {
4206         return sprintf(page, "%llu\n",
4207                        (unsigned long long)
4208                        atomic64_read(&mddev->resync_mismatches));
4209 }
4210
4211 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4212
4213 static ssize_t
4214 sync_min_show(struct mddev *mddev, char *page)
4215 {
4216         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4217                        mddev->sync_speed_min ? "local": "system");
4218 }
4219
4220 static ssize_t
4221 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4222 {
4223         int min;
4224         char *e;
4225         if (strncmp(buf, "system", 6)==0) {
4226                 mddev->sync_speed_min = 0;
4227                 return len;
4228         }
4229         min = simple_strtoul(buf, &e, 10);
4230         if (buf == e || (*e && *e != '\n') || min <= 0)
4231                 return -EINVAL;
4232         mddev->sync_speed_min = min;
4233         return len;
4234 }
4235
4236 static struct md_sysfs_entry md_sync_min =
4237 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4238
4239 static ssize_t
4240 sync_max_show(struct mddev *mddev, char *page)
4241 {
4242         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4243                        mddev->sync_speed_max ? "local": "system");
4244 }
4245
4246 static ssize_t
4247 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4248 {
4249         int max;
4250         char *e;
4251         if (strncmp(buf, "system", 6)==0) {
4252                 mddev->sync_speed_max = 0;
4253                 return len;
4254         }
4255         max = simple_strtoul(buf, &e, 10);
4256         if (buf == e || (*e && *e != '\n') || max <= 0)
4257                 return -EINVAL;
4258         mddev->sync_speed_max = max;
4259         return len;
4260 }
4261
4262 static struct md_sysfs_entry md_sync_max =
4263 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4264
4265 static ssize_t
4266 degraded_show(struct mddev *mddev, char *page)
4267 {
4268         return sprintf(page, "%d\n", mddev->degraded);
4269 }
4270 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4271
4272 static ssize_t
4273 sync_force_parallel_show(struct mddev *mddev, char *page)
4274 {
4275         return sprintf(page, "%d\n", mddev->parallel_resync);
4276 }
4277
4278 static ssize_t
4279 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4280 {
4281         long n;
4282
4283         if (kstrtol(buf, 10, &n))
4284                 return -EINVAL;
4285
4286         if (n != 0 && n != 1)
4287                 return -EINVAL;
4288
4289         mddev->parallel_resync = n;
4290
4291         if (mddev->sync_thread)
4292                 wake_up(&resync_wait);
4293
4294         return len;
4295 }
4296
4297 /* force parallel resync, even with shared block devices */
4298 static struct md_sysfs_entry md_sync_force_parallel =
4299 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4300        sync_force_parallel_show, sync_force_parallel_store);
4301
4302 static ssize_t
4303 sync_speed_show(struct mddev *mddev, char *page)
4304 {
4305         unsigned long resync, dt, db;
4306         if (mddev->curr_resync == 0)
4307                 return sprintf(page, "none\n");
4308         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4309         dt = (jiffies - mddev->resync_mark) / HZ;
4310         if (!dt) dt++;
4311         db = resync - mddev->resync_mark_cnt;
4312         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4313 }
4314
4315 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4316
4317 static ssize_t
4318 sync_completed_show(struct mddev *mddev, char *page)
4319 {
4320         unsigned long long max_sectors, resync;
4321
4322         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4323                 return sprintf(page, "none\n");
4324
4325         if (mddev->curr_resync == 1 ||
4326             mddev->curr_resync == 2)
4327                 return sprintf(page, "delayed\n");
4328
4329         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4330             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4331                 max_sectors = mddev->resync_max_sectors;
4332         else
4333                 max_sectors = mddev->dev_sectors;
4334
4335         resync = mddev->curr_resync_completed;
4336         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4337 }
4338
4339 static struct md_sysfs_entry md_sync_completed =
4340         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4341
4342 static ssize_t
4343 min_sync_show(struct mddev *mddev, char *page)
4344 {
4345         return sprintf(page, "%llu\n",
4346                        (unsigned long long)mddev->resync_min);
4347 }
4348 static ssize_t
4349 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4350 {
4351         unsigned long long min;
4352         int err;
4353         int chunk;
4354
4355         if (kstrtoull(buf, 10, &min))
4356                 return -EINVAL;
4357
4358         spin_lock(&mddev->lock);
4359         err = -EINVAL;
4360         if (min > mddev->resync_max)
4361                 goto out_unlock;
4362
4363         err = -EBUSY;
4364         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4365                 goto out_unlock;
4366
4367         /* Must be a multiple of chunk_size */
4368         chunk = mddev->chunk_sectors;
4369         if (chunk) {
4370                 sector_t temp = min;
4371
4372                 err = -EINVAL;
4373                 if (sector_div(temp, chunk))
4374                         goto out_unlock;
4375         }
4376         mddev->resync_min = min;
4377         err = 0;
4378
4379 out_unlock:
4380         spin_unlock(&mddev->lock);
4381         return err ?: len;
4382 }
4383
4384 static struct md_sysfs_entry md_min_sync =
4385 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4386
4387 static ssize_t
4388 max_sync_show(struct mddev *mddev, char *page)
4389 {
4390         if (mddev->resync_max == MaxSector)
4391                 return sprintf(page, "max\n");
4392         else
4393                 return sprintf(page, "%llu\n",
4394                                (unsigned long long)mddev->resync_max);
4395 }
4396 static ssize_t
4397 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4398 {
4399         int err;
4400         spin_lock(&mddev->lock);
4401         if (strncmp(buf, "max", 3) == 0)
4402                 mddev->resync_max = MaxSector;
4403         else {
4404                 unsigned long long max;
4405                 int chunk;
4406
4407                 err = -EINVAL;
4408                 if (kstrtoull(buf, 10, &max))
4409                         goto out_unlock;
4410                 if (max < mddev->resync_min)
4411                         goto out_unlock;
4412
4413                 err = -EBUSY;
4414                 if (max < mddev->resync_max &&
4415                     mddev->ro == 0 &&
4416                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4417                         goto out_unlock;
4418
4419                 /* Must be a multiple of chunk_size */
4420                 chunk = mddev->chunk_sectors;
4421                 if (chunk) {
4422                         sector_t temp = max;
4423
4424                         err = -EINVAL;
4425                         if (sector_div(temp, chunk))
4426                                 goto out_unlock;
4427                 }
4428                 mddev->resync_max = max;
4429         }
4430         wake_up(&mddev->recovery_wait);
4431         err = 0;
4432 out_unlock:
4433         spin_unlock(&mddev->lock);
4434         return err ?: len;
4435 }
4436
4437 static struct md_sysfs_entry md_max_sync =
4438 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4439
4440 static ssize_t
4441 suspend_lo_show(struct mddev *mddev, char *page)
4442 {
4443         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4444 }
4445
4446 static ssize_t
4447 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4448 {
4449         char *e;
4450         unsigned long long new = simple_strtoull(buf, &e, 10);
4451         unsigned long long old;
4452         int err;
4453
4454         if (buf == e || (*e && *e != '\n'))
4455                 return -EINVAL;
4456
4457         err = mddev_lock(mddev);
4458         if (err)
4459                 return err;
4460         err = -EINVAL;
4461         if (mddev->pers == NULL ||
4462             mddev->pers->quiesce == NULL)
4463                 goto unlock;
4464         old = mddev->suspend_lo;
4465         mddev->suspend_lo = new;
4466         if (new >= old)
4467                 /* Shrinking suspended region */
4468                 mddev->pers->quiesce(mddev, 2);
4469         else {
4470                 /* Expanding suspended region - need to wait */
4471                 mddev->pers->quiesce(mddev, 1);
4472                 mddev->pers->quiesce(mddev, 0);
4473         }
4474         err = 0;
4475 unlock:
4476         mddev_unlock(mddev);
4477         return err ?: len;
4478 }
4479 static struct md_sysfs_entry md_suspend_lo =
4480 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4481
4482 static ssize_t
4483 suspend_hi_show(struct mddev *mddev, char *page)
4484 {
4485         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4486 }
4487
4488 static ssize_t
4489 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4490 {
4491         char *e;
4492         unsigned long long new = simple_strtoull(buf, &e, 10);
4493         unsigned long long old;
4494         int err;
4495
4496         if (buf == e || (*e && *e != '\n'))
4497                 return -EINVAL;
4498
4499         err = mddev_lock(mddev);
4500         if (err)
4501                 return err;
4502         err = -EINVAL;
4503         if (mddev->pers == NULL ||
4504             mddev->pers->quiesce == NULL)
4505                 goto unlock;
4506         old = mddev->suspend_hi;
4507         mddev->suspend_hi = new;
4508         if (new <= old)
4509                 /* Shrinking suspended region */
4510                 mddev->pers->quiesce(mddev, 2);
4511         else {
4512                 /* Expanding suspended region - need to wait */
4513                 mddev->pers->quiesce(mddev, 1);
4514                 mddev->pers->quiesce(mddev, 0);
4515         }
4516         err = 0;
4517 unlock:
4518         mddev_unlock(mddev);
4519         return err ?: len;
4520 }
4521 static struct md_sysfs_entry md_suspend_hi =
4522 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4523
4524 static ssize_t
4525 reshape_position_show(struct mddev *mddev, char *page)
4526 {
4527         if (mddev->reshape_position != MaxSector)
4528                 return sprintf(page, "%llu\n",
4529                                (unsigned long long)mddev->reshape_position);
4530         strcpy(page, "none\n");
4531         return 5;
4532 }
4533
4534 static ssize_t
4535 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4536 {
4537         struct md_rdev *rdev;
4538         char *e;
4539         int err;
4540         unsigned long long new = simple_strtoull(buf, &e, 10);
4541
4542         if (buf == e || (*e && *e != '\n'))
4543                 return -EINVAL;
4544         err = mddev_lock(mddev);
4545         if (err)
4546                 return err;
4547         err = -EBUSY;
4548         if (mddev->pers)
4549                 goto unlock;
4550         mddev->reshape_position = new;
4551         mddev->delta_disks = 0;
4552         mddev->reshape_backwards = 0;
4553         mddev->new_level = mddev->level;
4554         mddev->new_layout = mddev->layout;
4555         mddev->new_chunk_sectors = mddev->chunk_sectors;
4556         rdev_for_each(rdev, mddev)
4557                 rdev->new_data_offset = rdev->data_offset;
4558         err = 0;
4559 unlock:
4560         mddev_unlock(mddev);
4561         return err ?: len;
4562 }
4563
4564 static struct md_sysfs_entry md_reshape_position =
4565 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4566        reshape_position_store);
4567
4568 static ssize_t
4569 reshape_direction_show(struct mddev *mddev, char *page)
4570 {
4571         return sprintf(page, "%s\n",
4572                        mddev->reshape_backwards ? "backwards" : "forwards");
4573 }
4574
4575 static ssize_t
4576 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4577 {
4578         int backwards = 0;
4579         int err;
4580
4581         if (cmd_match(buf, "forwards"))
4582                 backwards = 0;
4583         else if (cmd_match(buf, "backwards"))
4584                 backwards = 1;
4585         else
4586                 return -EINVAL;
4587         if (mddev->reshape_backwards == backwards)
4588                 return len;
4589
4590         err = mddev_lock(mddev);
4591         if (err)
4592                 return err;
4593         /* check if we are allowed to change */
4594         if (mddev->delta_disks)
4595                 err = -EBUSY;
4596         else if (mddev->persistent &&
4597             mddev->major_version == 0)
4598                 err =  -EINVAL;
4599         else
4600                 mddev->reshape_backwards = backwards;
4601         mddev_unlock(mddev);
4602         return err ?: len;
4603 }
4604
4605 static struct md_sysfs_entry md_reshape_direction =
4606 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4607        reshape_direction_store);
4608
4609 static ssize_t
4610 array_size_show(struct mddev *mddev, char *page)
4611 {
4612         if (mddev->external_size)
4613                 return sprintf(page, "%llu\n",
4614                                (unsigned long long)mddev->array_sectors/2);
4615         else
4616                 return sprintf(page, "default\n");
4617 }
4618
4619 static ssize_t
4620 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4621 {
4622         sector_t sectors;
4623         int err;
4624
4625         err = mddev_lock(mddev);
4626         if (err)
4627                 return err;
4628
4629         if (strncmp(buf, "default", 7) == 0) {
4630                 if (mddev->pers)
4631                         sectors = mddev->pers->size(mddev, 0, 0);
4632                 else
4633                         sectors = mddev->array_sectors;
4634
4635                 mddev->external_size = 0;
4636         } else {
4637                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4638                         err = -EINVAL;
4639                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4640                         err = -E2BIG;
4641                 else
4642                         mddev->external_size = 1;
4643         }
4644
4645         if (!err) {
4646                 mddev->array_sectors = sectors;
4647                 if (mddev->pers) {
4648                         set_capacity(mddev->gendisk, mddev->array_sectors);
4649                         revalidate_disk(mddev->gendisk);
4650                 }
4651         }
4652         mddev_unlock(mddev);
4653         return err ?: len;
4654 }
4655
4656 static struct md_sysfs_entry md_array_size =
4657 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4658        array_size_store);
4659
4660 static struct attribute *md_default_attrs[] = {
4661         &md_level.attr,
4662         &md_layout.attr,
4663         &md_raid_disks.attr,
4664         &md_chunk_size.attr,
4665         &md_size.attr,
4666         &md_resync_start.attr,
4667         &md_metadata.attr,
4668         &md_new_device.attr,
4669         &md_safe_delay.attr,
4670         &md_array_state.attr,
4671         &md_reshape_position.attr,
4672         &md_reshape_direction.attr,
4673         &md_array_size.attr,
4674         &max_corr_read_errors.attr,
4675         NULL,
4676 };
4677
4678 static struct attribute *md_redundancy_attrs[] = {
4679         &md_scan_mode.attr,
4680         &md_last_scan_mode.attr,
4681         &md_mismatches.attr,
4682         &md_sync_min.attr,
4683         &md_sync_max.attr,
4684         &md_sync_speed.attr,
4685         &md_sync_force_parallel.attr,
4686         &md_sync_completed.attr,
4687         &md_min_sync.attr,
4688         &md_max_sync.attr,
4689         &md_suspend_lo.attr,
4690         &md_suspend_hi.attr,
4691         &md_bitmap.attr,
4692         &md_degraded.attr,
4693         NULL,
4694 };
4695 static struct attribute_group md_redundancy_group = {
4696         .name = NULL,
4697         .attrs = md_redundancy_attrs,
4698 };
4699
4700 static ssize_t
4701 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4702 {
4703         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4704         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4705         ssize_t rv;
4706
4707         if (!entry->show)
4708                 return -EIO;
4709         spin_lock(&all_mddevs_lock);
4710         if (list_empty(&mddev->all_mddevs)) {
4711                 spin_unlock(&all_mddevs_lock);
4712                 return -EBUSY;
4713         }
4714         mddev_get(mddev);
4715         spin_unlock(&all_mddevs_lock);
4716
4717         rv = entry->show(mddev, page);
4718         mddev_put(mddev);
4719         return rv;
4720 }
4721
4722 static ssize_t
4723 md_attr_store(struct kobject *kobj, struct attribute *attr,
4724               const char *page, size_t length)
4725 {
4726         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4727         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4728         ssize_t rv;
4729
4730         if (!entry->store)
4731                 return -EIO;
4732         if (!capable(CAP_SYS_ADMIN))
4733                 return -EACCES;
4734         spin_lock(&all_mddevs_lock);
4735         if (list_empty(&mddev->all_mddevs)) {
4736                 spin_unlock(&all_mddevs_lock);
4737                 return -EBUSY;
4738         }
4739         mddev_get(mddev);
4740         spin_unlock(&all_mddevs_lock);
4741         rv = entry->store(mddev, page, length);
4742         mddev_put(mddev);
4743         return rv;
4744 }
4745
4746 static void md_free(struct kobject *ko)
4747 {
4748         struct mddev *mddev = container_of(ko, struct mddev, kobj);
4749
4750         if (mddev->sysfs_state)
4751                 sysfs_put(mddev->sysfs_state);
4752
4753         if (mddev->gendisk) {
4754                 del_gendisk(mddev->gendisk);
4755                 put_disk(mddev->gendisk);
4756         }
4757         if (mddev->queue)
4758                 blk_cleanup_queue(mddev->queue);
4759
4760         kfree(mddev);
4761 }
4762
4763 static const struct sysfs_ops md_sysfs_ops = {
4764         .show   = md_attr_show,
4765         .store  = md_attr_store,
4766 };
4767 static struct kobj_type md_ktype = {
4768         .release        = md_free,
4769         .sysfs_ops      = &md_sysfs_ops,
4770         .default_attrs  = md_default_attrs,
4771 };
4772
4773 int mdp_major = 0;
4774
4775 static void mddev_delayed_delete(struct work_struct *ws)
4776 {
4777         struct mddev *mddev = container_of(ws, struct mddev, del_work);
4778
4779         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4780         kobject_del(&mddev->kobj);
4781         kobject_put(&mddev->kobj);
4782 }
4783
4784 static int md_alloc(dev_t dev, char *name)
4785 {
4786         static DEFINE_MUTEX(disks_mutex);
4787         struct mddev *mddev = mddev_find(dev);
4788         struct gendisk *disk;
4789         int partitioned;
4790         int shift;
4791         int unit;
4792         int error;
4793
4794         if (!mddev)
4795                 return -ENODEV;
4796
4797         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4798         shift = partitioned ? MdpMinorShift : 0;
4799         unit = MINOR(mddev->unit) >> shift;
4800
4801         /* wait for any previous instance of this device to be
4802          * completely removed (mddev_delayed_delete).
4803          */
4804         flush_workqueue(md_misc_wq);
4805
4806         mutex_lock(&disks_mutex);
4807         error = -EEXIST;
4808         if (mddev->gendisk)
4809                 goto abort;
4810
4811         if (name) {
4812                 /* Need to ensure that 'name' is not a duplicate.
4813                  */
4814                 struct mddev *mddev2;
4815                 spin_lock(&all_mddevs_lock);
4816
4817                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
4818                         if (mddev2->gendisk &&
4819                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
4820                                 spin_unlock(&all_mddevs_lock);
4821                                 goto abort;
4822                         }
4823                 spin_unlock(&all_mddevs_lock);
4824         }
4825
4826         error = -ENOMEM;
4827         mddev->queue = blk_alloc_queue(GFP_KERNEL);
4828         if (!mddev->queue)
4829                 goto abort;
4830         mddev->queue->queuedata = mddev;
4831
4832         blk_queue_make_request(mddev->queue, md_make_request);
4833         blk_set_stacking_limits(&mddev->queue->limits);
4834
4835         disk = alloc_disk(1 << shift);
4836         if (!disk) {
4837                 blk_cleanup_queue(mddev->queue);
4838                 mddev->queue = NULL;
4839                 goto abort;
4840         }
4841         disk->major = MAJOR(mddev->unit);
4842         disk->first_minor = unit << shift;
4843         if (name)
4844                 strcpy(disk->disk_name, name);
4845         else if (partitioned)
4846                 sprintf(disk->disk_name, "md_d%d", unit);
4847         else
4848                 sprintf(disk->disk_name, "md%d", unit);
4849         disk->fops = &md_fops;
4850         disk->private_data = mddev;
4851         disk->queue = mddev->queue;
4852         blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
4853         /* Allow extended partitions.  This makes the
4854          * 'mdp' device redundant, but we can't really
4855          * remove it now.
4856          */
4857         disk->flags |= GENHD_FL_EXT_DEVT;
4858         mddev->gendisk = disk;
4859         /* As soon as we call add_disk(), another thread could get
4860          * through to md_open, so make sure it doesn't get too far
4861          */
4862         mutex_lock(&mddev->open_mutex);
4863         add_disk(disk);
4864
4865         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
4866                                      &disk_to_dev(disk)->kobj, "%s", "md");
4867         if (error) {
4868                 /* This isn't possible, but as kobject_init_and_add is marked
4869                  * __must_check, we must do something with the result
4870                  */
4871                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
4872                        disk->disk_name);
4873                 error = 0;
4874         }
4875         if (mddev->kobj.sd &&
4876             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4877                 printk(KERN_DEBUG "pointless warning\n");
4878         mutex_unlock(&mddev->open_mutex);
4879  abort:
4880         mutex_unlock(&disks_mutex);
4881         if (!error && mddev->kobj.sd) {
4882                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
4883                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
4884         }
4885         mddev_put(mddev);
4886         return error;
4887 }
4888
4889 static struct kobject *md_probe(dev_t dev, int *part, void *data)
4890 {
4891         md_alloc(dev, NULL);
4892         return NULL;
4893 }
4894
4895 static int add_named_array(const char *val, struct kernel_param *kp)
4896 {
4897         /* val must be "md_*" where * is not all digits.
4898          * We allocate an array with a large free minor number, and
4899          * set the name to val.  val must not already be an active name.
4900          */
4901         int len = strlen(val);
4902         char buf[DISK_NAME_LEN];
4903
4904         while (len && val[len-1] == '\n')
4905                 len--;
4906         if (len >= DISK_NAME_LEN)
4907                 return -E2BIG;
4908         strlcpy(buf, val, len+1);
4909         if (strncmp(buf, "md_", 3) != 0)
4910                 return -EINVAL;
4911         return md_alloc(0, buf);
4912 }
4913
4914 static void md_safemode_timeout(unsigned long data)
4915 {
4916         struct mddev *mddev = (struct mddev *) data;
4917
4918         if (!atomic_read(&mddev->writes_pending)) {
4919                 mddev->safemode = 1;
4920                 if (mddev->external)
4921                         sysfs_notify_dirent_safe(mddev->sysfs_state);
4922         }
4923         md_wakeup_thread(mddev->thread);
4924 }
4925
4926 static int start_dirty_degraded;
4927
4928 int md_run(struct mddev *mddev)
4929 {
4930         int err;
4931         struct md_rdev *rdev;
4932         struct md_personality *pers;
4933
4934         if (list_empty(&mddev->disks))
4935                 /* cannot run an array with no devices.. */
4936                 return -EINVAL;
4937
4938         if (mddev->pers)
4939                 return -EBUSY;
4940         /* Cannot run until previous stop completes properly */
4941         if (mddev->sysfs_active)
4942                 return -EBUSY;
4943
4944         /*
4945          * Analyze all RAID superblock(s)
4946          */
4947         if (!mddev->raid_disks) {
4948                 if (!mddev->persistent)
4949                         return -EINVAL;
4950                 analyze_sbs(mddev);
4951         }
4952
4953         if (mddev->level != LEVEL_NONE)
4954                 request_module("md-level-%d", mddev->level);
4955         else if (mddev->clevel[0])
4956                 request_module("md-%s", mddev->clevel);
4957
4958         /*
4959          * Drop all container device buffers, from now on
4960          * the only valid external interface is through the md
4961          * device.
4962          */
4963         rdev_for_each(rdev, mddev) {
4964                 if (test_bit(Faulty, &rdev->flags))
4965                         continue;
4966                 sync_blockdev(rdev->bdev);
4967                 invalidate_bdev(rdev->bdev);
4968
4969                 /* perform some consistency tests on the device.
4970                  * We don't want the data to overlap the metadata,
4971                  * Internal Bitmap issues have been handled elsewhere.
4972                  */
4973                 if (rdev->meta_bdev) {
4974                         /* Nothing to check */;
4975                 } else if (rdev->data_offset < rdev->sb_start) {
4976                         if (mddev->dev_sectors &&
4977                             rdev->data_offset + mddev->dev_sectors
4978                             > rdev->sb_start) {
4979                                 printk("md: %s: data overlaps metadata\n",
4980                                        mdname(mddev));
4981                                 return -EINVAL;
4982                         }
4983                 } else {
4984                         if (rdev->sb_start + rdev->sb_size/512
4985                             > rdev->data_offset) {
4986                                 printk("md: %s: metadata overlaps data\n",
4987                                        mdname(mddev));
4988                                 return -EINVAL;
4989                         }
4990                 }
4991                 sysfs_notify_dirent_safe(rdev->sysfs_state);
4992         }
4993
4994         if (mddev->bio_set == NULL)
4995                 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
4996
4997         spin_lock(&pers_lock);
4998         pers = find_pers(mddev->level, mddev->clevel);
4999         if (!pers || !try_module_get(pers->owner)) {
5000                 spin_unlock(&pers_lock);
5001                 if (mddev->level != LEVEL_NONE)
5002                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
5003                                mddev->level);
5004                 else
5005                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
5006                                mddev->clevel);
5007                 return -EINVAL;
5008         }
5009         spin_unlock(&pers_lock);
5010         if (mddev->level != pers->level) {
5011                 mddev->level = pers->level;
5012                 mddev->new_level = pers->level;
5013         }
5014         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5015
5016         if (mddev->reshape_position != MaxSector &&
5017             pers->start_reshape == NULL) {
5018                 /* This personality cannot handle reshaping... */
5019                 module_put(pers->owner);
5020                 return -EINVAL;
5021         }
5022
5023         if (pers->sync_request) {
5024                 /* Warn if this is a potentially silly
5025                  * configuration.
5026                  */
5027                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5028                 struct md_rdev *rdev2;
5029                 int warned = 0;
5030
5031                 rdev_for_each(rdev, mddev)
5032                         rdev_for_each(rdev2, mddev) {
5033                                 if (rdev < rdev2 &&
5034                                     rdev->bdev->bd_contains ==
5035                                     rdev2->bdev->bd_contains) {
5036                                         printk(KERN_WARNING
5037                                                "%s: WARNING: %s appears to be"
5038                                                " on the same physical disk as"
5039                                                " %s.\n",
5040                                                mdname(mddev),
5041                                                bdevname(rdev->bdev,b),
5042                                                bdevname(rdev2->bdev,b2));
5043                                         warned = 1;
5044                                 }
5045                         }
5046
5047                 if (warned)
5048                         printk(KERN_WARNING
5049                                "True protection against single-disk"
5050                                " failure might be compromised.\n");
5051         }
5052
5053         mddev->recovery = 0;
5054         /* may be over-ridden by personality */
5055         mddev->resync_max_sectors = mddev->dev_sectors;
5056
5057         mddev->ok_start_degraded = start_dirty_degraded;
5058
5059         if (start_readonly && mddev->ro == 0)
5060                 mddev->ro = 2; /* read-only, but switch on first write */
5061
5062         err = pers->run(mddev);
5063         if (err)
5064                 printk(KERN_ERR "md: pers->run() failed ...\n");
5065         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5066                 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
5067                           " but 'external_size' not in effect?\n", __func__);
5068                 printk(KERN_ERR
5069                        "md: invalid array_size %llu > default size %llu\n",
5070                        (unsigned long long)mddev->array_sectors / 2,
5071                        (unsigned long long)pers->size(mddev, 0, 0) / 2);
5072                 err = -EINVAL;
5073         }
5074         if (err == 0 && pers->sync_request &&
5075             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5076                 err = bitmap_create(mddev);
5077                 if (err)
5078                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
5079                                mdname(mddev), err);
5080         }
5081         if (err) {
5082                 mddev_detach(mddev);
5083                 if (mddev->private)
5084                         pers->free(mddev, mddev->private);
5085                 module_put(pers->owner);
5086                 bitmap_destroy(mddev);
5087                 return err;
5088         }
5089         if (mddev->queue) {
5090                 mddev->queue->backing_dev_info.congested_data = mddev;
5091                 mddev->queue->backing_dev_info.congested_fn = md_congested;
5092                 blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
5093         }
5094         if (pers->sync_request) {
5095                 if (mddev->kobj.sd &&
5096                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5097                         printk(KERN_WARNING
5098                                "md: cannot register extra attributes for %s\n",
5099                                mdname(mddev));
5100                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5101         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5102                 mddev->ro = 0;
5103
5104         atomic_set(&mddev->writes_pending,0);
5105         atomic_set(&mddev->max_corr_read_errors,
5106                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5107         mddev->safemode = 0;
5108         mddev->safemode_timer.function = md_safemode_timeout;
5109         mddev->safemode_timer.data = (unsigned long) mddev;
5110         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5111         mddev->in_sync = 1;
5112         smp_wmb();
5113         spin_lock(&mddev->lock);
5114         mddev->pers = pers;
5115         mddev->ready = 1;
5116         spin_unlock(&mddev->lock);
5117         rdev_for_each(rdev, mddev)
5118                 if (rdev->raid_disk >= 0)
5119                         if (sysfs_link_rdev(mddev, rdev))
5120                                 /* failure here is OK */;
5121
5122         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5123
5124         if (mddev->flags & MD_UPDATE_SB_FLAGS)
5125                 md_update_sb(mddev, 0);
5126
5127         md_new_event(mddev);
5128         sysfs_notify_dirent_safe(mddev->sysfs_state);
5129         sysfs_notify_dirent_safe(mddev->sysfs_action);
5130         sysfs_notify(&mddev->kobj, NULL, "degraded");
5131         return 0;
5132 }
5133 EXPORT_SYMBOL_GPL(md_run);
5134
5135 static int do_md_run(struct mddev *mddev)
5136 {
5137         int err;
5138
5139         err = md_run(mddev);
5140         if (err)
5141                 goto out;
5142         err = bitmap_load(mddev);
5143         if (err) {
5144                 bitmap_destroy(mddev);
5145                 goto out;
5146         }
5147
5148         md_wakeup_thread(mddev->thread);
5149         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5150
5151         set_capacity(mddev->gendisk, mddev->array_sectors);
5152         revalidate_disk(mddev->gendisk);
5153         mddev->changed = 1;
5154         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5155 out:
5156         return err;
5157 }
5158
5159 static int restart_array(struct mddev *mddev)
5160 {
5161         struct gendisk *disk = mddev->gendisk;
5162
5163         /* Complain if it has no devices */
5164         if (list_empty(&mddev->disks))
5165                 return -ENXIO;
5166         if (!mddev->pers)
5167                 return -EINVAL;
5168         if (!mddev->ro)
5169                 return -EBUSY;
5170         mddev->safemode = 0;
5171         mddev->ro = 0;
5172         set_disk_ro(disk, 0);
5173         printk(KERN_INFO "md: %s switched to read-write mode.\n",
5174                 mdname(mddev));
5175         /* Kick recovery or resync if necessary */
5176         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5177         md_wakeup_thread(mddev->thread);
5178         md_wakeup_thread(mddev->sync_thread);
5179         sysfs_notify_dirent_safe(mddev->sysfs_state);
5180         return 0;
5181 }
5182
5183 static void md_clean(struct mddev *mddev)
5184 {
5185         mddev->array_sectors = 0;
5186         mddev->external_size = 0;
5187         mddev->dev_sectors = 0;
5188         mddev->raid_disks = 0;
5189         mddev->recovery_cp = 0;
5190         mddev->resync_min = 0;
5191         mddev->resync_max = MaxSector;
5192         mddev->reshape_position = MaxSector;
5193         mddev->external = 0;
5194         mddev->persistent = 0;
5195         mddev->level = LEVEL_NONE;
5196         mddev->clevel[0] = 0;
5197         mddev->flags = 0;
5198         mddev->ro = 0;
5199         mddev->metadata_type[0] = 0;
5200         mddev->chunk_sectors = 0;
5201         mddev->ctime = mddev->utime = 0;
5202         mddev->layout = 0;
5203         mddev->max_disks = 0;
5204         mddev->events = 0;
5205         mddev->can_decrease_events = 0;
5206         mddev->delta_disks = 0;
5207         mddev->reshape_backwards = 0;
5208         mddev->new_level = LEVEL_NONE;
5209         mddev->new_layout = 0;
5210         mddev->new_chunk_sectors = 0;
5211         mddev->curr_resync = 0;
5212         atomic64_set(&mddev->resync_mismatches, 0);
5213         mddev->suspend_lo = mddev->suspend_hi = 0;
5214         mddev->sync_speed_min = mddev->sync_speed_max = 0;
5215         mddev->recovery = 0;
5216         mddev->in_sync = 0;
5217         mddev->changed = 0;
5218         mddev->degraded = 0;
5219         mddev->safemode = 0;
5220         mddev->merge_check_needed = 0;
5221         mddev->bitmap_info.offset = 0;
5222         mddev->bitmap_info.default_offset = 0;
5223         mddev->bitmap_info.default_space = 0;
5224         mddev->bitmap_info.chunksize = 0;
5225         mddev->bitmap_info.daemon_sleep = 0;
5226         mddev->bitmap_info.max_write_behind = 0;
5227 }
5228
5229 static void __md_stop_writes(struct mddev *mddev)
5230 {
5231         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5232         flush_workqueue(md_misc_wq);
5233         if (mddev->sync_thread) {
5234                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5235                 md_reap_sync_thread(mddev);
5236         }
5237
5238         del_timer_sync(&mddev->safemode_timer);
5239
5240         bitmap_flush(mddev);
5241         md_super_wait(mddev);
5242
5243         if (mddev->ro == 0 &&
5244             (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
5245                 /* mark array as shutdown cleanly */
5246                 mddev->in_sync = 1;
5247                 md_update_sb(mddev, 1);
5248         }
5249 }
5250
5251 void md_stop_writes(struct mddev *mddev)
5252 {
5253         mddev_lock_nointr(mddev);
5254         __md_stop_writes(mddev);
5255         mddev_unlock(mddev);
5256 }
5257 EXPORT_SYMBOL_GPL(md_stop_writes);
5258
5259 static void mddev_detach(struct mddev *mddev)
5260 {
5261         struct bitmap *bitmap = mddev->bitmap;
5262         /* wait for behind writes to complete */
5263         if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
5264                 printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
5265                        mdname(mddev));
5266                 /* need to kick something here to make sure I/O goes? */
5267                 wait_event(bitmap->behind_wait,
5268                            atomic_read(&bitmap->behind_writes) == 0);
5269         }
5270         if (mddev->pers && mddev->pers->quiesce) {
5271                 mddev->pers->quiesce(mddev, 1);
5272                 mddev->pers->quiesce(mddev, 0);
5273         }
5274         md_unregister_thread(&mddev->thread);
5275         if (mddev->queue)
5276                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5277 }
5278
5279 static void __md_stop(struct mddev *mddev)
5280 {
5281         struct md_personality *pers = mddev->pers;
5282         mddev_detach(mddev);
5283         spin_lock(&mddev->lock);
5284         mddev->ready = 0;
5285         mddev->pers = NULL;
5286         spin_unlock(&mddev->lock);
5287         pers->free(mddev, mddev->private);
5288         if (pers->sync_request && mddev->to_remove == NULL)
5289                 mddev->to_remove = &md_redundancy_group;
5290         module_put(pers->owner);
5291         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5292 }
5293
5294 void md_stop(struct mddev *mddev)
5295 {
5296         /* stop the array and free an attached data structures.
5297          * This is called from dm-raid
5298          */
5299         __md_stop(mddev);
5300         bitmap_destroy(mddev);
5301         if (mddev->bio_set)
5302                 bioset_free(mddev->bio_set);
5303 }
5304
5305 EXPORT_SYMBOL_GPL(md_stop);
5306
5307 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5308 {
5309         int err = 0;
5310         int did_freeze = 0;
5311
5312         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5313                 did_freeze = 1;
5314                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5315                 md_wakeup_thread(mddev->thread);
5316         }
5317         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5318                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5319         if (mddev->sync_thread)
5320                 /* Thread might be blocked waiting for metadata update
5321                  * which will now never happen */
5322                 wake_up_process(mddev->sync_thread->tsk);
5323
5324         mddev_unlock(mddev);
5325         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5326                                           &mddev->recovery));
5327         mddev_lock_nointr(mddev);
5328
5329         mutex_lock(&mddev->open_mutex);
5330         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5331             mddev->sync_thread ||
5332             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5333             (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5334                 printk("md: %s still in use.\n",mdname(mddev));
5335                 if (did_freeze) {
5336                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5337                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5338                         md_wakeup_thread(mddev->thread);
5339                 }
5340                 err = -EBUSY;
5341                 goto out;
5342         }
5343         if (mddev->pers) {
5344                 __md_stop_writes(mddev);
5345
5346                 err  = -ENXIO;
5347                 if (mddev->ro==1)
5348                         goto out;
5349                 mddev->ro = 1;
5350                 set_disk_ro(mddev->gendisk, 1);
5351                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5352                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5353                 md_wakeup_thread(mddev->thread);
5354                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5355                 err = 0;
5356         }
5357 out:
5358         mutex_unlock(&mddev->open_mutex);
5359         return err;
5360 }
5361
5362 /* mode:
5363  *   0 - completely stop and dis-assemble array
5364  *   2 - stop but do not disassemble array
5365  */
5366 static int do_md_stop(struct mddev *mddev, int mode,
5367                       struct block_device *bdev)
5368 {
5369         struct gendisk *disk = mddev->gendisk;
5370         struct md_rdev *rdev;
5371         int did_freeze = 0;
5372
5373         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5374                 did_freeze = 1;
5375                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5376                 md_wakeup_thread(mddev->thread);
5377         }
5378         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5379                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5380         if (mddev->sync_thread)
5381                 /* Thread might be blocked waiting for metadata update
5382                  * which will now never happen */
5383                 wake_up_process(mddev->sync_thread->tsk);
5384
5385         mddev_unlock(mddev);
5386         wait_event(resync_wait, (mddev->sync_thread == NULL &&
5387                                  !test_bit(MD_RECOVERY_RUNNING,
5388                                            &mddev->recovery)));
5389         mddev_lock_nointr(mddev);
5390
5391         mutex_lock(&mddev->open_mutex);
5392         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5393             mddev->sysfs_active ||
5394             mddev->sync_thread ||
5395             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5396             (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5397                 printk("md: %s still in use.\n",mdname(mddev));
5398                 mutex_unlock(&mddev->open_mutex);
5399                 if (did_freeze) {
5400                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5401                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5402                         md_wakeup_thread(mddev->thread);
5403                 }
5404                 return -EBUSY;
5405         }
5406         if (mddev->pers) {
5407                 if (mddev->ro)
5408                         set_disk_ro(disk, 0);
5409
5410                 __md_stop_writes(mddev);
5411                 __md_stop(mddev);
5412                 mddev->queue->merge_bvec_fn = NULL;
5413                 mddev->queue->backing_dev_info.congested_fn = NULL;
5414
5415                 /* tell userspace to handle 'inactive' */
5416                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5417
5418                 rdev_for_each(rdev, mddev)
5419                         if (rdev->raid_disk >= 0)
5420                                 sysfs_unlink_rdev(mddev, rdev);
5421
5422                 set_capacity(disk, 0);
5423                 mutex_unlock(&mddev->open_mutex);
5424                 mddev->changed = 1;
5425                 revalidate_disk(disk);
5426
5427                 if (mddev->ro)
5428                         mddev->ro = 0;
5429         } else
5430                 mutex_unlock(&mddev->open_mutex);
5431         /*
5432          * Free resources if final stop
5433          */
5434         if (mode == 0) {
5435                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
5436
5437                 bitmap_destroy(mddev);
5438                 if (mddev->bitmap_info.file) {
5439                         struct file *f = mddev->bitmap_info.file;
5440                         spin_lock(&mddev->lock);
5441                         mddev->bitmap_info.file = NULL;
5442                         spin_unlock(&mddev->lock);
5443                         fput(f);
5444                 }
5445                 mddev->bitmap_info.offset = 0;
5446
5447                 export_array(mddev);
5448
5449                 md_clean(mddev);
5450                 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5451                 if (mddev->hold_active == UNTIL_STOP)
5452                         mddev->hold_active = 0;
5453         }
5454         blk_integrity_unregister(disk);
5455         md_new_event(mddev);
5456         sysfs_notify_dirent_safe(mddev->sysfs_state);
5457         return 0;
5458 }
5459
5460 #ifndef MODULE
5461 static void autorun_array(struct mddev *mddev)
5462 {
5463         struct md_rdev *rdev;
5464         int err;
5465
5466         if (list_empty(&mddev->disks))
5467                 return;
5468
5469         printk(KERN_INFO "md: running: ");
5470
5471         rdev_for_each(rdev, mddev) {
5472                 char b[BDEVNAME_SIZE];
5473                 printk("<%s>", bdevname(rdev->bdev,b));
5474         }
5475         printk("\n");
5476
5477         err = do_md_run(mddev);
5478         if (err) {
5479                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
5480                 do_md_stop(mddev, 0, NULL);
5481         }
5482 }
5483
5484 /*
5485  * lets try to run arrays based on all disks that have arrived
5486  * until now. (those are in pending_raid_disks)
5487  *
5488  * the method: pick the first pending disk, collect all disks with
5489  * the same UUID, remove all from the pending list and put them into
5490  * the 'same_array' list. Then order this list based on superblock
5491  * update time (freshest comes first), kick out 'old' disks and
5492  * compare superblocks. If everything's fine then run it.
5493  *
5494  * If "unit" is allocated, then bump its reference count
5495  */
5496 static void autorun_devices(int part)
5497 {
5498         struct md_rdev *rdev0, *rdev, *tmp;
5499         struct mddev *mddev;
5500         char b[BDEVNAME_SIZE];
5501
5502         printk(KERN_INFO "md: autorun ...\n");
5503         while (!list_empty(&pending_raid_disks)) {
5504                 int unit;
5505                 dev_t dev;
5506                 LIST_HEAD(candidates);
5507                 rdev0 = list_entry(pending_raid_disks.next,
5508                                          struct md_rdev, same_set);
5509
5510                 printk(KERN_INFO "md: considering %s ...\n",
5511                         bdevname(rdev0->bdev,b));
5512                 INIT_LIST_HEAD(&candidates);
5513                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5514                         if (super_90_load(rdev, rdev0, 0) >= 0) {
5515                                 printk(KERN_INFO "md:  adding %s ...\n",
5516                                         bdevname(rdev->bdev,b));
5517                                 list_move(&rdev->same_set, &candidates);
5518                         }
5519                 /*
5520                  * now we have a set of devices, with all of them having
5521                  * mostly sane superblocks. It's time to allocate the
5522                  * mddev.
5523                  */
5524                 if (part) {
5525                         dev = MKDEV(mdp_major,
5526                                     rdev0->preferred_minor << MdpMinorShift);
5527                         unit = MINOR(dev) >> MdpMinorShift;
5528                 } else {
5529                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5530                         unit = MINOR(dev);
5531                 }
5532                 if (rdev0->preferred_minor != unit) {
5533                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
5534                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5535                         break;
5536                 }
5537
5538                 md_probe(dev, NULL, NULL);
5539                 mddev = mddev_find(dev);
5540                 if (!mddev || !mddev->gendisk) {
5541                         if (mddev)
5542                                 mddev_put(mddev);
5543                         printk(KERN_ERR
5544                                 "md: cannot allocate memory for md drive.\n");
5545                         break;
5546                 }
5547                 if (mddev_lock(mddev))
5548                         printk(KERN_WARNING "md: %s locked, cannot run\n",
5549                                mdname(mddev));
5550                 else if (mddev->raid_disks || mddev->major_version
5551                          || !list_empty(&mddev->disks)) {
5552                         printk(KERN_WARNING
5553                                 "md: %s already running, cannot run %s\n",
5554                                 mdname(mddev), bdevname(rdev0->bdev,b));
5555                         mddev_unlock(mddev);
5556                 } else {
5557                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
5558                         mddev->persistent = 1;
5559                         rdev_for_each_list(rdev, tmp, &candidates) {
5560                                 list_del_init(&rdev->same_set);
5561                                 if (bind_rdev_to_array(rdev, mddev))
5562                                         export_rdev(rdev);
5563                         }
5564                         autorun_array(mddev);
5565                         mddev_unlock(mddev);
5566                 }
5567                 /* on success, candidates will be empty, on error
5568                  * it won't...
5569                  */
5570                 rdev_for_each_list(rdev, tmp, &candidates) {
5571                         list_del_init(&rdev->same_set);
5572                         export_rdev(rdev);
5573                 }
5574                 mddev_put(mddev);
5575         }
5576         printk(KERN_INFO "md: ... autorun DONE.\n");
5577 }
5578 #endif /* !MODULE */
5579
5580 static int get_version(void __user *arg)
5581 {
5582         mdu_version_t ver;
5583
5584         ver.major = MD_MAJOR_VERSION;
5585         ver.minor = MD_MINOR_VERSION;
5586         ver.patchlevel = MD_PATCHLEVEL_VERSION;
5587
5588         if (copy_to_user(arg, &ver, sizeof(ver)))
5589                 return -EFAULT;
5590
5591         return 0;
5592 }
5593
5594 static int get_array_info(struct mddev *mddev, void __user *arg)
5595 {
5596         mdu_array_info_t info;
5597         int nr,working,insync,failed,spare;
5598         struct md_rdev *rdev;
5599
5600         nr = working = insync = failed = spare = 0;
5601         rcu_read_lock();
5602         rdev_for_each_rcu(rdev, mddev) {
5603                 nr++;
5604                 if (test_bit(Faulty, &rdev->flags))
5605                         failed++;
5606                 else {
5607                         working++;
5608                         if (test_bit(In_sync, &rdev->flags))
5609                                 insync++;
5610                         else
5611                                 spare++;
5612                 }
5613         }
5614         rcu_read_unlock();
5615
5616         info.major_version = mddev->major_version;
5617         info.minor_version = mddev->minor_version;
5618         info.patch_version = MD_PATCHLEVEL_VERSION;
5619         info.ctime         = mddev->ctime;
5620         info.level         = mddev->level;
5621         info.size          = mddev->dev_sectors / 2;
5622         if (info.size != mddev->dev_sectors / 2) /* overflow */
5623                 info.size = -1;
5624         info.nr_disks      = nr;
5625         info.raid_disks    = mddev->raid_disks;
5626         info.md_minor      = mddev->md_minor;
5627         info.not_persistent= !mddev->persistent;
5628
5629         info.utime         = mddev->utime;
5630         info.state         = 0;
5631         if (mddev->in_sync)
5632                 info.state = (1<<MD_SB_CLEAN);
5633         if (mddev->bitmap && mddev->bitmap_info.offset)
5634                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
5635         info.active_disks  = insync;
5636         info.working_disks = working;
5637         info.failed_disks  = failed;
5638         info.spare_disks   = spare;
5639
5640         info.layout        = mddev->layout;
5641         info.chunk_size    = mddev->chunk_sectors << 9;
5642
5643         if (copy_to_user(arg, &info, sizeof(info)))
5644                 return -EFAULT;
5645
5646         return 0;
5647 }
5648
5649 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5650 {
5651         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5652         char *ptr;
5653         int err;
5654
5655         file = kmalloc(sizeof(*file), GFP_NOIO);
5656         if (!file)
5657                 return -ENOMEM;
5658
5659         err = 0;
5660         spin_lock(&mddev->lock);
5661         /* bitmap disabled, zero the first byte and copy out */
5662         if (!mddev->bitmap_info.file)
5663                 file->pathname[0] = '\0';
5664         else if ((ptr = d_path(&mddev->bitmap_info.file->f_path,
5665                                file->pathname, sizeof(file->pathname))),
5666                  IS_ERR(ptr))
5667                 err = PTR_ERR(ptr);
5668         else
5669                 memmove(file->pathname, ptr,
5670                         sizeof(file->pathname)-(ptr-file->pathname));
5671         spin_unlock(&mddev->lock);
5672
5673         if (err == 0 &&
5674             copy_to_user(arg, file, sizeof(*file)))
5675                 err = -EFAULT;
5676
5677         kfree(file);
5678         return err;
5679 }
5680
5681 static int get_disk_info(struct mddev *mddev, void __user * arg)
5682 {
5683         mdu_disk_info_t info;
5684         struct md_rdev *rdev;
5685
5686         if (copy_from_user(&info, arg, sizeof(info)))
5687                 return -EFAULT;
5688
5689         rcu_read_lock();
5690         rdev = find_rdev_nr_rcu(mddev, info.number);
5691         if (rdev) {
5692                 info.major = MAJOR(rdev->bdev->bd_dev);
5693                 info.minor = MINOR(rdev->bdev->bd_dev);
5694                 info.raid_disk = rdev->raid_disk;
5695                 info.state = 0;
5696                 if (test_bit(Faulty, &rdev->flags))
5697                         info.state |= (1<<MD_DISK_FAULTY);
5698                 else if (test_bit(In_sync, &rdev->flags)) {
5699                         info.state |= (1<<MD_DISK_ACTIVE);
5700                         info.state |= (1<<MD_DISK_SYNC);
5701                 }
5702                 if (test_bit(WriteMostly, &rdev->flags))
5703                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
5704         } else {
5705                 info.major = info.minor = 0;
5706                 info.raid_disk = -1;
5707                 info.state = (1<<MD_DISK_REMOVED);
5708         }
5709         rcu_read_unlock();
5710
5711         if (copy_to_user(arg, &info, sizeof(info)))
5712                 return -EFAULT;
5713
5714         return 0;
5715 }
5716
5717 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
5718 {
5719         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5720         struct md_rdev *rdev;
5721         dev_t dev = MKDEV(info->major,info->minor);
5722
5723         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5724                 return -EOVERFLOW;
5725
5726         if (!mddev->raid_disks) {
5727                 int err;
5728                 /* expecting a device which has a superblock */
5729                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5730                 if (IS_ERR(rdev)) {
5731                         printk(KERN_WARNING
5732                                 "md: md_import_device returned %ld\n",
5733                                 PTR_ERR(rdev));
5734                         return PTR_ERR(rdev);
5735                 }
5736                 if (!list_empty(&mddev->disks)) {
5737                         struct md_rdev *rdev0
5738                                 = list_entry(mddev->disks.next,
5739                                              struct md_rdev, same_set);
5740                         err = super_types[mddev->major_version]
5741                                 .load_super(rdev, rdev0, mddev->minor_version);
5742                         if (err < 0) {
5743                                 printk(KERN_WARNING
5744                                         "md: %s has different UUID to %s\n",
5745                                         bdevname(rdev->bdev,b),
5746                                         bdevname(rdev0->bdev,b2));
5747                                 export_rdev(rdev);
5748                                 return -EINVAL;
5749                         }
5750                 }
5751                 err = bind_rdev_to_array(rdev, mddev);
5752                 if (err)
5753                         export_rdev(rdev);
5754                 return err;
5755         }
5756
5757         /*
5758          * add_new_disk can be used once the array is assembled
5759          * to add "hot spares".  They must already have a superblock
5760          * written
5761          */
5762         if (mddev->pers) {
5763                 int err;
5764                 if (!mddev->pers->hot_add_disk) {
5765                         printk(KERN_WARNING
5766                                 "%s: personality does not support diskops!\n",
5767                                mdname(mddev));
5768                         return -EINVAL;
5769                 }
5770                 if (mddev->persistent)
5771                         rdev = md_import_device(dev, mddev->major_version,
5772                                                 mddev->minor_version);
5773                 else
5774                         rdev = md_import_device(dev, -1, -1);
5775                 if (IS_ERR(rdev)) {
5776                         printk(KERN_WARNING
5777                                 "md: md_import_device returned %ld\n",
5778                                 PTR_ERR(rdev));
5779                         return PTR_ERR(rdev);
5780                 }
5781                 /* set saved_raid_disk if appropriate */
5782                 if (!mddev->persistent) {
5783                         if (info->state & (1<<MD_DISK_SYNC)  &&
5784                             info->raid_disk < mddev->raid_disks) {
5785                                 rdev->raid_disk = info->raid_disk;
5786                                 set_bit(In_sync, &rdev->flags);
5787                                 clear_bit(Bitmap_sync, &rdev->flags);
5788                         } else
5789                                 rdev->raid_disk = -1;
5790                         rdev->saved_raid_disk = rdev->raid_disk;
5791                 } else
5792                         super_types[mddev->major_version].
5793                                 validate_super(mddev, rdev);
5794                 if ((info->state & (1<<MD_DISK_SYNC)) &&
5795                      rdev->raid_disk != info->raid_disk) {
5796                         /* This was a hot-add request, but events doesn't
5797                          * match, so reject it.
5798                          */
5799                         export_rdev(rdev);
5800                         return -EINVAL;
5801                 }
5802
5803                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
5804                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5805                         set_bit(WriteMostly, &rdev->flags);
5806                 else
5807                         clear_bit(WriteMostly, &rdev->flags);
5808
5809                 rdev->raid_disk = -1;
5810                 err = bind_rdev_to_array(rdev, mddev);
5811                 if (!err && !mddev->pers->hot_remove_disk) {
5812                         /* If there is hot_add_disk but no hot_remove_disk
5813                          * then added disks for geometry changes,
5814                          * and should be added immediately.
5815                          */
5816                         super_types[mddev->major_version].
5817                                 validate_super(mddev, rdev);
5818                         err = mddev->pers->hot_add_disk(mddev, rdev);
5819                         if (err)
5820                                 unbind_rdev_from_array(rdev);
5821                 }
5822                 if (err)
5823                         export_rdev(rdev);
5824                 else
5825                         sysfs_notify_dirent_safe(rdev->sysfs_state);
5826
5827                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5828                 if (mddev->degraded)
5829                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5830                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5831                 if (!err)
5832                         md_new_event(mddev);
5833                 md_wakeup_thread(mddev->thread);
5834                 return err;
5835         }
5836
5837         /* otherwise, add_new_disk is only allowed
5838          * for major_version==0 superblocks
5839          */
5840         if (mddev->major_version != 0) {
5841                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
5842                        mdname(mddev));
5843                 return -EINVAL;
5844         }
5845
5846         if (!(info->state & (1<<MD_DISK_FAULTY))) {
5847                 int err;
5848                 rdev = md_import_device(dev, -1, 0);
5849                 if (IS_ERR(rdev)) {
5850                         printk(KERN_WARNING
5851                                 "md: error, md_import_device() returned %ld\n",
5852                                 PTR_ERR(rdev));
5853                         return PTR_ERR(rdev);
5854                 }
5855                 rdev->desc_nr = info->number;
5856                 if (info->raid_disk < mddev->raid_disks)
5857                         rdev->raid_disk = info->raid_disk;
5858                 else
5859                         rdev->raid_disk = -1;
5860
5861                 if (rdev->raid_disk < mddev->raid_disks)
5862                         if (info->state & (1<<MD_DISK_SYNC))
5863                                 set_bit(In_sync, &rdev->flags);
5864
5865                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5866                         set_bit(WriteMostly, &rdev->flags);
5867
5868                 if (!mddev->persistent) {
5869                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
5870                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5871                 } else
5872                         rdev->sb_start = calc_dev_sboffset(rdev);
5873                 rdev->sectors = rdev->sb_start;
5874
5875                 err = bind_rdev_to_array(rdev, mddev);
5876                 if (err) {
5877                         export_rdev(rdev);
5878                         return err;
5879                 }
5880         }
5881
5882         return 0;
5883 }
5884
5885 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
5886 {
5887         char b[BDEVNAME_SIZE];
5888         struct md_rdev *rdev;
5889
5890         rdev = find_rdev(mddev, dev);
5891         if (!rdev)
5892                 return -ENXIO;
5893
5894         clear_bit(Blocked, &rdev->flags);
5895         remove_and_add_spares(mddev, rdev);
5896
5897         if (rdev->raid_disk >= 0)
5898                 goto busy;
5899
5900         kick_rdev_from_array(rdev);
5901         md_update_sb(mddev, 1);
5902         md_new_event(mddev);
5903
5904         return 0;
5905 busy:
5906         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
5907                 bdevname(rdev->bdev,b), mdname(mddev));
5908         return -EBUSY;
5909 }
5910
5911 static int hot_add_disk(struct mddev *mddev, dev_t dev)
5912 {
5913         char b[BDEVNAME_SIZE];
5914         int err;
5915         struct md_rdev *rdev;
5916
5917         if (!mddev->pers)
5918                 return -ENODEV;
5919
5920         if (mddev->major_version != 0) {
5921                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
5922                         " version-0 superblocks.\n",
5923                         mdname(mddev));
5924                 return -EINVAL;
5925         }
5926         if (!mddev->pers->hot_add_disk) {
5927                 printk(KERN_WARNING
5928                         "%s: personality does not support diskops!\n",
5929                         mdname(mddev));
5930                 return -EINVAL;
5931         }
5932
5933         rdev = md_import_device(dev, -1, 0);
5934         if (IS_ERR(rdev)) {
5935                 printk(KERN_WARNING
5936                         "md: error, md_import_device() returned %ld\n",
5937                         PTR_ERR(rdev));
5938                 return -EINVAL;
5939         }
5940
5941         if (mddev->persistent)
5942                 rdev->sb_start = calc_dev_sboffset(rdev);
5943         else
5944                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5945
5946         rdev->sectors = rdev->sb_start;
5947
5948         if (test_bit(Faulty, &rdev->flags)) {
5949                 printk(KERN_WARNING
5950                         "md: can not hot-add faulty %s disk to %s!\n",
5951                         bdevname(rdev->bdev,b), mdname(mddev));
5952                 err = -EINVAL;
5953                 goto abort_export;
5954         }
5955         clear_bit(In_sync, &rdev->flags);
5956         rdev->desc_nr = -1;
5957         rdev->saved_raid_disk = -1;
5958         err = bind_rdev_to_array(rdev, mddev);
5959         if (err)
5960                 goto abort_export;
5961
5962         /*
5963          * The rest should better be atomic, we can have disk failures
5964          * noticed in interrupt contexts ...
5965          */
5966
5967         rdev->raid_disk = -1;
5968
5969         md_update_sb(mddev, 1);
5970
5971         /*
5972          * Kick recovery, maybe this spare has to be added to the
5973          * array immediately.
5974          */
5975         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5976         md_wakeup_thread(mddev->thread);
5977         md_new_event(mddev);
5978         return 0;
5979
5980 abort_export:
5981         export_rdev(rdev);
5982         return err;
5983 }
5984
5985 static int set_bitmap_file(struct mddev *mddev, int fd)
5986 {
5987         int err = 0;
5988
5989         if (mddev->pers) {
5990                 if (!mddev->pers->quiesce || !mddev->thread)
5991                         return -EBUSY;
5992                 if (mddev->recovery || mddev->sync_thread)
5993                         return -EBUSY;
5994                 /* we should be able to change the bitmap.. */
5995         }
5996
5997         if (fd >= 0) {
5998                 struct inode *inode;
5999                 struct file *f;
6000
6001                 if (mddev->bitmap || mddev->bitmap_info.file)
6002                         return -EEXIST; /* cannot add when bitmap is present */
6003                 f = fget(fd);
6004
6005                 if (f == NULL) {
6006                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
6007                                mdname(mddev));
6008                         return -EBADF;
6009                 }
6010
6011                 inode = f->f_mapping->host;
6012                 if (!S_ISREG(inode->i_mode)) {
6013                         printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
6014                                mdname(mddev));
6015                         err = -EBADF;
6016                 } else if (!(f->f_mode & FMODE_WRITE)) {
6017                         printk(KERN_ERR "%s: error: bitmap file must open for write\n",
6018                                mdname(mddev));
6019                         err = -EBADF;
6020                 } else if (atomic_read(&inode->i_writecount) != 1) {
6021                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
6022                                mdname(mddev));
6023                         err = -EBUSY;
6024                 }
6025                 if (err) {
6026                         fput(f);
6027                         return err;
6028                 }
6029                 mddev->bitmap_info.file = f;
6030                 mddev->bitmap_info.offset = 0; /* file overrides offset */
6031         } else if (mddev->bitmap == NULL)
6032                 return -ENOENT; /* cannot remove what isn't there */
6033         err = 0;
6034         if (mddev->pers) {
6035                 mddev->pers->quiesce(mddev, 1);
6036                 if (fd >= 0) {
6037                         err = bitmap_create(mddev);
6038                         if (!err)
6039                                 err = bitmap_load(mddev);
6040                 }
6041                 if (fd < 0 || err) {
6042                         bitmap_destroy(mddev);
6043                         fd = -1; /* make sure to put the file */
6044                 }
6045                 mddev->pers->quiesce(mddev, 0);
6046         }
6047         if (fd < 0) {
6048                 struct file *f = mddev->bitmap_info.file;
6049                 if (f) {
6050                         spin_lock(&mddev->lock);
6051                         mddev->bitmap_info.file = NULL;
6052                         spin_unlock(&mddev->lock);
6053                         fput(f);
6054                 }
6055         }
6056
6057         return err;
6058 }
6059
6060 /*
6061  * set_array_info is used two different ways
6062  * The original usage is when creating a new array.
6063  * In this usage, raid_disks is > 0 and it together with
6064  *  level, size, not_persistent,layout,chunksize determine the
6065  *  shape of the array.
6066  *  This will always create an array with a type-0.90.0 superblock.
6067  * The newer usage is when assembling an array.
6068  *  In this case raid_disks will be 0, and the major_version field is
6069  *  use to determine which style super-blocks are to be found on the devices.
6070  *  The minor and patch _version numbers are also kept incase the
6071  *  super_block handler wishes to interpret them.
6072  */
6073 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6074 {
6075
6076         if (info->raid_disks == 0) {
6077                 /* just setting version number for superblock loading */
6078                 if (info->major_version < 0 ||
6079                     info->major_version >= ARRAY_SIZE(super_types) ||
6080                     super_types[info->major_version].name == NULL) {
6081                         /* maybe try to auto-load a module? */
6082                         printk(KERN_INFO
6083                                 "md: superblock version %d not known\n",
6084                                 info->major_version);
6085                         return -EINVAL;
6086                 }
6087                 mddev->major_version = info->major_version;
6088                 mddev->minor_version = info->minor_version;
6089                 mddev->patch_version = info->patch_version;
6090                 mddev->persistent = !info->not_persistent;
6091                 /* ensure mddev_put doesn't delete this now that there
6092                  * is some minimal configuration.
6093                  */
6094                 mddev->ctime         = get_seconds();
6095                 return 0;
6096         }
6097         mddev->major_version = MD_MAJOR_VERSION;
6098         mddev->minor_version = MD_MINOR_VERSION;
6099         mddev->patch_version = MD_PATCHLEVEL_VERSION;
6100         mddev->ctime         = get_seconds();
6101
6102         mddev->level         = info->level;
6103         mddev->clevel[0]     = 0;
6104         mddev->dev_sectors   = 2 * (sector_t)info->size;
6105         mddev->raid_disks    = info->raid_disks;
6106         /* don't set md_minor, it is determined by which /dev/md* was
6107          * openned
6108          */
6109         if (info->state & (1<<MD_SB_CLEAN))
6110                 mddev->recovery_cp = MaxSector;
6111         else
6112                 mddev->recovery_cp = 0;
6113         mddev->persistent    = ! info->not_persistent;
6114         mddev->external      = 0;
6115
6116         mddev->layout        = info->layout;
6117         mddev->chunk_sectors = info->chunk_size >> 9;
6118
6119         mddev->max_disks     = MD_SB_DISKS;
6120
6121         if (mddev->persistent)
6122                 mddev->flags         = 0;
6123         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6124
6125         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6126         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6127         mddev->bitmap_info.offset = 0;
6128
6129         mddev->reshape_position = MaxSector;
6130
6131         /*
6132          * Generate a 128 bit UUID
6133          */
6134         get_random_bytes(mddev->uuid, 16);
6135
6136         mddev->new_level = mddev->level;
6137         mddev->new_chunk_sectors = mddev->chunk_sectors;
6138         mddev->new_layout = mddev->layout;
6139         mddev->delta_disks = 0;
6140         mddev->reshape_backwards = 0;
6141
6142         return 0;
6143 }
6144
6145 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6146 {
6147         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6148
6149         if (mddev->external_size)
6150                 return;
6151
6152         mddev->array_sectors = array_sectors;
6153 }
6154 EXPORT_SYMBOL(md_set_array_sectors);
6155
6156 static int update_size(struct mddev *mddev, sector_t num_sectors)
6157 {
6158         struct md_rdev *rdev;
6159         int rv;
6160         int fit = (num_sectors == 0);
6161
6162         if (mddev->pers->resize == NULL)
6163                 return -EINVAL;
6164         /* The "num_sectors" is the number of sectors of each device that
6165          * is used.  This can only make sense for arrays with redundancy.
6166          * linear and raid0 always use whatever space is available. We can only
6167          * consider changing this number if no resync or reconstruction is
6168          * happening, and if the new size is acceptable. It must fit before the
6169          * sb_start or, if that is <data_offset, it must fit before the size
6170          * of each device.  If num_sectors is zero, we find the largest size
6171          * that fits.
6172          */
6173         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6174             mddev->sync_thread)
6175                 return -EBUSY;
6176         if (mddev->ro)
6177                 return -EROFS;
6178
6179         rdev_for_each(rdev, mddev) {
6180                 sector_t avail = rdev->sectors;
6181
6182                 if (fit && (num_sectors == 0 || num_sectors > avail))
6183                         num_sectors = avail;
6184                 if (avail < num_sectors)
6185                         return -ENOSPC;
6186         }
6187         rv = mddev->pers->resize(mddev, num_sectors);
6188         if (!rv)
6189                 revalidate_disk(mddev->gendisk);
6190         return rv;
6191 }
6192
6193 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6194 {
6195         int rv;
6196         struct md_rdev *rdev;
6197         /* change the number of raid disks */
6198         if (mddev->pers->check_reshape == NULL)
6199                 return -EINVAL;
6200         if (mddev->ro)
6201                 return -EROFS;
6202         if (raid_disks <= 0 ||
6203             (mddev->max_disks && raid_disks >= mddev->max_disks))
6204                 return -EINVAL;
6205         if (mddev->sync_thread ||
6206             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6207             mddev->reshape_position != MaxSector)
6208                 return -EBUSY;
6209
6210         rdev_for_each(rdev, mddev) {
6211                 if (mddev->raid_disks < raid_disks &&
6212                     rdev->data_offset < rdev->new_data_offset)
6213                         return -EINVAL;
6214                 if (mddev->raid_disks > raid_disks &&
6215                     rdev->data_offset > rdev->new_data_offset)
6216                         return -EINVAL;
6217         }
6218
6219         mddev->delta_disks = raid_disks - mddev->raid_disks;
6220         if (mddev->delta_disks < 0)
6221                 mddev->reshape_backwards = 1;
6222         else if (mddev->delta_disks > 0)
6223                 mddev->reshape_backwards = 0;
6224
6225         rv = mddev->pers->check_reshape(mddev);
6226         if (rv < 0) {
6227                 mddev->delta_disks = 0;
6228                 mddev->reshape_backwards = 0;
6229         }
6230         return rv;
6231 }
6232
6233 /*
6234  * update_array_info is used to change the configuration of an
6235  * on-line array.
6236  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6237  * fields in the info are checked against the array.
6238  * Any differences that cannot be handled will cause an error.
6239  * Normally, only one change can be managed at a time.
6240  */
6241 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6242 {
6243         int rv = 0;
6244         int cnt = 0;
6245         int state = 0;
6246
6247         /* calculate expected state,ignoring low bits */
6248         if (mddev->bitmap && mddev->bitmap_info.offset)
6249                 state |= (1 << MD_SB_BITMAP_PRESENT);
6250
6251         if (mddev->major_version != info->major_version ||
6252             mddev->minor_version != info->minor_version ||
6253 /*          mddev->patch_version != info->patch_version || */
6254             mddev->ctime         != info->ctime         ||
6255             mddev->level         != info->level         ||
6256 /*          mddev->layout        != info->layout        || */
6257             !mddev->persistent   != info->not_persistent||
6258             mddev->chunk_sectors != info->chunk_size >> 9 ||
6259             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6260             ((state^info->state) & 0xfffffe00)
6261                 )
6262                 return -EINVAL;
6263         /* Check there is only one change */
6264         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6265                 cnt++;
6266         if (mddev->raid_disks != info->raid_disks)
6267                 cnt++;
6268         if (mddev->layout != info->layout)
6269                 cnt++;
6270         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6271                 cnt++;
6272         if (cnt == 0)
6273                 return 0;
6274         if (cnt > 1)
6275                 return -EINVAL;
6276
6277         if (mddev->layout != info->layout) {
6278                 /* Change layout
6279                  * we don't need to do anything at the md level, the
6280                  * personality will take care of it all.
6281                  */
6282                 if (mddev->pers->check_reshape == NULL)
6283                         return -EINVAL;
6284                 else {
6285                         mddev->new_layout = info->layout;
6286                         rv = mddev->pers->check_reshape(mddev);
6287                         if (rv)
6288                                 mddev->new_layout = mddev->layout;
6289                         return rv;
6290                 }
6291         }
6292         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6293                 rv = update_size(mddev, (sector_t)info->size * 2);
6294
6295         if (mddev->raid_disks    != info->raid_disks)
6296                 rv = update_raid_disks(mddev, info->raid_disks);
6297
6298         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6299                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL)
6300                         return -EINVAL;
6301                 if (mddev->recovery || mddev->sync_thread)
6302                         return -EBUSY;
6303                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6304                         /* add the bitmap */
6305                         if (mddev->bitmap)
6306                                 return -EEXIST;
6307                         if (mddev->bitmap_info.default_offset == 0)
6308                                 return -EINVAL;
6309                         mddev->bitmap_info.offset =
6310                                 mddev->bitmap_info.default_offset;
6311                         mddev->bitmap_info.space =
6312                                 mddev->bitmap_info.default_space;
6313                         mddev->pers->quiesce(mddev, 1);
6314                         rv = bitmap_create(mddev);
6315                         if (!rv)
6316                                 rv = bitmap_load(mddev);
6317                         if (rv)
6318                                 bitmap_destroy(mddev);
6319                         mddev->pers->quiesce(mddev, 0);
6320                 } else {
6321                         /* remove the bitmap */
6322                         if (!mddev->bitmap)
6323                                 return -ENOENT;
6324                         if (mddev->bitmap->storage.file)
6325                                 return -EINVAL;
6326                         mddev->pers->quiesce(mddev, 1);
6327                         bitmap_destroy(mddev);
6328                         mddev->pers->quiesce(mddev, 0);
6329                         mddev->bitmap_info.offset = 0;
6330                 }
6331         }
6332         md_update_sb(mddev, 1);
6333         return rv;
6334 }
6335
6336 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6337 {
6338         struct md_rdev *rdev;
6339         int err = 0;
6340
6341         if (mddev->pers == NULL)
6342                 return -ENODEV;
6343
6344         rcu_read_lock();
6345         rdev = find_rdev_rcu(mddev, dev);
6346         if (!rdev)
6347                 err =  -ENODEV;
6348         else {
6349                 md_error(mddev, rdev);
6350                 if (!test_bit(Faulty, &rdev->flags))
6351                         err = -EBUSY;
6352         }
6353         rcu_read_unlock();
6354         return err;
6355 }
6356
6357 /*
6358  * We have a problem here : there is no easy way to give a CHS
6359  * virtual geometry. We currently pretend that we have a 2 heads
6360  * 4 sectors (with a BIG number of cylinders...). This drives
6361  * dosfs just mad... ;-)
6362  */
6363 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6364 {
6365         struct mddev *mddev = bdev->bd_disk->private_data;
6366
6367         geo->heads = 2;
6368         geo->sectors = 4;
6369         geo->cylinders = mddev->array_sectors / 8;
6370         return 0;
6371 }
6372
6373 static inline bool md_ioctl_valid(unsigned int cmd)
6374 {
6375         switch (cmd) {
6376         case ADD_NEW_DISK:
6377         case BLKROSET:
6378         case GET_ARRAY_INFO:
6379         case GET_BITMAP_FILE:
6380         case GET_DISK_INFO:
6381         case HOT_ADD_DISK:
6382         case HOT_REMOVE_DISK:
6383         case RAID_AUTORUN:
6384         case RAID_VERSION:
6385         case RESTART_ARRAY_RW:
6386         case RUN_ARRAY:
6387         case SET_ARRAY_INFO:
6388         case SET_BITMAP_FILE:
6389         case SET_DISK_FAULTY:
6390         case STOP_ARRAY:
6391         case STOP_ARRAY_RO:
6392                 return true;
6393         default:
6394                 return false;
6395         }
6396 }
6397
6398 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6399                         unsigned int cmd, unsigned long arg)
6400 {
6401         int err = 0;
6402         void __user *argp = (void __user *)arg;
6403         struct mddev *mddev = NULL;
6404         int ro;
6405
6406         if (!md_ioctl_valid(cmd))
6407                 return -ENOTTY;
6408
6409         switch (cmd) {
6410         case RAID_VERSION:
6411         case GET_ARRAY_INFO:
6412         case GET_DISK_INFO:
6413                 break;
6414         default:
6415                 if (!capable(CAP_SYS_ADMIN))
6416                         return -EACCES;
6417         }
6418
6419         /*
6420          * Commands dealing with the RAID driver but not any
6421          * particular array:
6422          */
6423         switch (cmd) {
6424         case RAID_VERSION:
6425                 err = get_version(argp);
6426                 goto out;
6427
6428 #ifndef MODULE
6429         case RAID_AUTORUN:
6430                 err = 0;
6431                 autostart_arrays(arg);
6432                 goto out;
6433 #endif
6434         default:;
6435         }
6436
6437         /*
6438          * Commands creating/starting a new array:
6439          */
6440
6441         mddev = bdev->bd_disk->private_data;
6442
6443         if (!mddev) {
6444                 BUG();
6445                 goto out;
6446         }
6447
6448         /* Some actions do not requires the mutex */
6449         switch (cmd) {
6450         case GET_ARRAY_INFO:
6451                 if (!mddev->raid_disks && !mddev->external)
6452                         err = -ENODEV;
6453                 else
6454                         err = get_array_info(mddev, argp);
6455                 goto out;
6456
6457         case GET_DISK_INFO:
6458                 if (!mddev->raid_disks && !mddev->external)
6459                         err = -ENODEV;
6460                 else
6461                         err = get_disk_info(mddev, argp);
6462                 goto out;
6463
6464         case SET_DISK_FAULTY:
6465                 err = set_disk_faulty(mddev, new_decode_dev(arg));
6466                 goto out;
6467
6468         case GET_BITMAP_FILE:
6469                 err = get_bitmap_file(mddev, argp);
6470                 goto out;
6471
6472         }
6473
6474         if (cmd == ADD_NEW_DISK)
6475                 /* need to ensure md_delayed_delete() has completed */
6476                 flush_workqueue(md_misc_wq);
6477
6478         if (cmd == HOT_REMOVE_DISK)
6479                 /* need to ensure recovery thread has run */
6480                 wait_event_interruptible_timeout(mddev->sb_wait,
6481                                                  !test_bit(MD_RECOVERY_NEEDED,
6482                                                            &mddev->flags),
6483                                                  msecs_to_jiffies(5000));
6484         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
6485                 /* Need to flush page cache, and ensure no-one else opens
6486                  * and writes
6487                  */
6488                 mutex_lock(&mddev->open_mutex);
6489                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
6490                         mutex_unlock(&mddev->open_mutex);
6491                         err = -EBUSY;
6492                         goto out;
6493                 }
6494                 set_bit(MD_STILL_CLOSED, &mddev->flags);
6495                 mutex_unlock(&mddev->open_mutex);
6496                 sync_blockdev(bdev);
6497         }
6498         err = mddev_lock(mddev);
6499         if (err) {
6500                 printk(KERN_INFO
6501                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
6502                         err, cmd);
6503                 goto out;
6504         }
6505
6506         if (cmd == SET_ARRAY_INFO) {
6507                 mdu_array_info_t info;
6508                 if (!arg)
6509                         memset(&info, 0, sizeof(info));
6510                 else if (copy_from_user(&info, argp, sizeof(info))) {
6511                         err = -EFAULT;
6512                         goto unlock;
6513                 }
6514                 if (mddev->pers) {
6515                         err = update_array_info(mddev, &info);
6516                         if (err) {
6517                                 printk(KERN_WARNING "md: couldn't update"
6518                                        " array info. %d\n", err);
6519                                 goto unlock;
6520                         }
6521                         goto unlock;
6522                 }
6523                 if (!list_empty(&mddev->disks)) {
6524                         printk(KERN_WARNING
6525                                "md: array %s already has disks!\n",
6526                                mdname(mddev));
6527                         err = -EBUSY;
6528                         goto unlock;
6529                 }
6530                 if (mddev->raid_disks) {
6531                         printk(KERN_WARNING
6532                                "md: array %s already initialised!\n",
6533                                mdname(mddev));
6534                         err = -EBUSY;
6535                         goto unlock;
6536                 }
6537                 err = set_array_info(mddev, &info);
6538                 if (err) {
6539                         printk(KERN_WARNING "md: couldn't set"
6540                                " array info. %d\n", err);
6541                         goto unlock;
6542                 }
6543                 goto unlock;
6544         }
6545
6546         /*
6547          * Commands querying/configuring an existing array:
6548          */
6549         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6550          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6551         if ((!mddev->raid_disks && !mddev->external)
6552             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6553             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6554             && cmd != GET_BITMAP_FILE) {
6555                 err = -ENODEV;
6556                 goto unlock;
6557         }
6558
6559         /*
6560          * Commands even a read-only array can execute:
6561          */
6562         switch (cmd) {
6563         case RESTART_ARRAY_RW:
6564                 err = restart_array(mddev);
6565                 goto unlock;
6566
6567         case STOP_ARRAY:
6568                 err = do_md_stop(mddev, 0, bdev);
6569                 goto unlock;
6570
6571         case STOP_ARRAY_RO:
6572                 err = md_set_readonly(mddev, bdev);
6573                 goto unlock;
6574
6575         case HOT_REMOVE_DISK:
6576                 err = hot_remove_disk(mddev, new_decode_dev(arg));
6577                 goto unlock;
6578
6579         case ADD_NEW_DISK:
6580                 /* We can support ADD_NEW_DISK on read-only arrays
6581                  * on if we are re-adding a preexisting device.
6582                  * So require mddev->pers and MD_DISK_SYNC.
6583                  */
6584                 if (mddev->pers) {
6585                         mdu_disk_info_t info;
6586                         if (copy_from_user(&info, argp, sizeof(info)))
6587                                 err = -EFAULT;
6588                         else if (!(info.state & (1<<MD_DISK_SYNC)))
6589                                 /* Need to clear read-only for this */
6590                                 break;
6591                         else
6592                                 err = add_new_disk(mddev, &info);
6593                         goto unlock;
6594                 }
6595                 break;
6596
6597         case BLKROSET:
6598                 if (get_user(ro, (int __user *)(arg))) {
6599                         err = -EFAULT;
6600                         goto unlock;
6601                 }
6602                 err = -EINVAL;
6603
6604                 /* if the bdev is going readonly the value of mddev->ro
6605                  * does not matter, no writes are coming
6606                  */
6607                 if (ro)
6608                         goto unlock;
6609
6610                 /* are we are already prepared for writes? */
6611                 if (mddev->ro != 1)
6612                         goto unlock;
6613
6614                 /* transitioning to readauto need only happen for
6615                  * arrays that call md_write_start
6616                  */
6617                 if (mddev->pers) {
6618                         err = restart_array(mddev);
6619                         if (err == 0) {
6620                                 mddev->ro = 2;
6621                                 set_disk_ro(mddev->gendisk, 0);
6622                         }
6623                 }
6624                 goto unlock;
6625         }
6626
6627         /*
6628          * The remaining ioctls are changing the state of the
6629          * superblock, so we do not allow them on read-only arrays.
6630          */
6631         if (mddev->ro && mddev->pers) {
6632                 if (mddev->ro == 2) {
6633                         mddev->ro = 0;
6634                         sysfs_notify_dirent_safe(mddev->sysfs_state);
6635                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6636                         /* mddev_unlock will wake thread */
6637                         /* If a device failed while we were read-only, we
6638                          * need to make sure the metadata is updated now.
6639                          */
6640                         if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
6641                                 mddev_unlock(mddev);
6642                                 wait_event(mddev->sb_wait,
6643                                            !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
6644                                            !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6645                                 mddev_lock_nointr(mddev);
6646                         }
6647                 } else {
6648                         err = -EROFS;
6649                         goto unlock;
6650                 }
6651         }
6652
6653         switch (cmd) {
6654         case ADD_NEW_DISK:
6655         {
6656                 mdu_disk_info_t info;
6657                 if (copy_from_user(&info, argp, sizeof(info)))
6658                         err = -EFAULT;
6659                 else
6660                         err = add_new_disk(mddev, &info);
6661                 goto unlock;
6662         }
6663
6664         case HOT_ADD_DISK:
6665                 err = hot_add_disk(mddev, new_decode_dev(arg));
6666                 goto unlock;
6667
6668         case RUN_ARRAY:
6669                 err = do_md_run(mddev);
6670                 goto unlock;
6671
6672         case SET_BITMAP_FILE:
6673                 err = set_bitmap_file(mddev, (int)arg);
6674                 goto unlock;
6675
6676         default:
6677                 err = -EINVAL;
6678                 goto unlock;
6679         }
6680
6681 unlock:
6682         if (mddev->hold_active == UNTIL_IOCTL &&
6683             err != -EINVAL)
6684                 mddev->hold_active = 0;
6685         mddev_unlock(mddev);
6686 out:
6687         return err;
6688 }
6689 #ifdef CONFIG_COMPAT
6690 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
6691                     unsigned int cmd, unsigned long arg)
6692 {
6693         switch (cmd) {
6694         case HOT_REMOVE_DISK:
6695         case HOT_ADD_DISK:
6696         case SET_DISK_FAULTY:
6697         case SET_BITMAP_FILE:
6698                 /* These take in integer arg, do not convert */
6699                 break;
6700         default:
6701                 arg = (unsigned long)compat_ptr(arg);
6702                 break;
6703         }
6704
6705         return md_ioctl(bdev, mode, cmd, arg);
6706 }
6707 #endif /* CONFIG_COMPAT */
6708
6709 static int md_open(struct block_device *bdev, fmode_t mode)
6710 {
6711         /*
6712          * Succeed if we can lock the mddev, which confirms that
6713          * it isn't being stopped right now.
6714          */
6715         struct mddev *mddev = mddev_find(bdev->bd_dev);
6716         int err;
6717
6718         if (!mddev)
6719                 return -ENODEV;
6720
6721         if (mddev->gendisk != bdev->bd_disk) {
6722                 /* we are racing with mddev_put which is discarding this
6723                  * bd_disk.
6724                  */
6725                 mddev_put(mddev);
6726                 /* Wait until bdev->bd_disk is definitely gone */
6727                 flush_workqueue(md_misc_wq);
6728                 /* Then retry the open from the top */
6729                 return -ERESTARTSYS;
6730         }
6731         BUG_ON(mddev != bdev->bd_disk->private_data);
6732
6733         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
6734                 goto out;
6735
6736         err = 0;
6737         atomic_inc(&mddev->openers);
6738         clear_bit(MD_STILL_CLOSED, &mddev->flags);
6739         mutex_unlock(&mddev->open_mutex);
6740
6741         check_disk_change(bdev);
6742  out:
6743         return err;
6744 }
6745
6746 static void md_release(struct gendisk *disk, fmode_t mode)
6747 {
6748         struct mddev *mddev = disk->private_data;
6749
6750         BUG_ON(!mddev);
6751         atomic_dec(&mddev->openers);
6752         mddev_put(mddev);
6753 }
6754
6755 static int md_media_changed(struct gendisk *disk)
6756 {
6757         struct mddev *mddev = disk->private_data;
6758
6759         return mddev->changed;
6760 }
6761
6762 static int md_revalidate(struct gendisk *disk)
6763 {
6764         struct mddev *mddev = disk->private_data;
6765
6766         mddev->changed = 0;
6767         return 0;
6768 }
6769 static const struct block_device_operations md_fops =
6770 {
6771         .owner          = THIS_MODULE,
6772         .open           = md_open,
6773         .release        = md_release,
6774         .ioctl          = md_ioctl,
6775 #ifdef CONFIG_COMPAT
6776         .compat_ioctl   = md_compat_ioctl,
6777 #endif
6778         .getgeo         = md_getgeo,
6779         .media_changed  = md_media_changed,
6780         .revalidate_disk= md_revalidate,
6781 };
6782
6783 static int md_thread(void *arg)
6784 {
6785         struct md_thread *thread = arg;
6786
6787         /*
6788          * md_thread is a 'system-thread', it's priority should be very
6789          * high. We avoid resource deadlocks individually in each
6790          * raid personality. (RAID5 does preallocation) We also use RR and
6791          * the very same RT priority as kswapd, thus we will never get
6792          * into a priority inversion deadlock.
6793          *
6794          * we definitely have to have equal or higher priority than
6795          * bdflush, otherwise bdflush will deadlock if there are too
6796          * many dirty RAID5 blocks.
6797          */
6798
6799         allow_signal(SIGKILL);
6800         while (!kthread_should_stop()) {
6801
6802                 /* We need to wait INTERRUPTIBLE so that
6803                  * we don't add to the load-average.
6804                  * That means we need to be sure no signals are
6805                  * pending
6806                  */
6807                 if (signal_pending(current))
6808                         flush_signals(current);
6809
6810                 wait_event_interruptible_timeout
6811                         (thread->wqueue,
6812                          test_bit(THREAD_WAKEUP, &thread->flags)
6813                          || kthread_should_stop(),
6814                          thread->timeout);
6815
6816                 clear_bit(THREAD_WAKEUP, &thread->flags);
6817                 if (!kthread_should_stop())
6818                         thread->run(thread);
6819         }
6820
6821         return 0;
6822 }
6823
6824 void md_wakeup_thread(struct md_thread *thread)
6825 {
6826         if (thread) {
6827                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
6828                 set_bit(THREAD_WAKEUP, &thread->flags);
6829                 wake_up(&thread->wqueue);
6830         }
6831 }
6832 EXPORT_SYMBOL(md_wakeup_thread);
6833
6834 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
6835                 struct mddev *mddev, const char *name)
6836 {
6837         struct md_thread *thread;
6838
6839         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
6840         if (!thread)
6841                 return NULL;
6842
6843         init_waitqueue_head(&thread->wqueue);
6844
6845         thread->run = run;
6846         thread->mddev = mddev;
6847         thread->timeout = MAX_SCHEDULE_TIMEOUT;
6848         thread->tsk = kthread_run(md_thread, thread,
6849                                   "%s_%s",
6850                                   mdname(thread->mddev),
6851                                   name);
6852         if (IS_ERR(thread->tsk)) {
6853                 kfree(thread);
6854                 return NULL;
6855         }
6856         return thread;
6857 }
6858 EXPORT_SYMBOL(md_register_thread);
6859
6860 void md_unregister_thread(struct md_thread **threadp)
6861 {
6862         struct md_thread *thread = *threadp;
6863         if (!thread)
6864                 return;
6865         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6866         /* Locking ensures that mddev_unlock does not wake_up a
6867          * non-existent thread
6868          */
6869         spin_lock(&pers_lock);
6870         *threadp = NULL;
6871         spin_unlock(&pers_lock);
6872
6873         kthread_stop(thread->tsk);
6874         kfree(thread);
6875 }
6876 EXPORT_SYMBOL(md_unregister_thread);
6877
6878 void md_error(struct mddev *mddev, struct md_rdev *rdev)
6879 {
6880         if (!rdev || test_bit(Faulty, &rdev->flags))
6881                 return;
6882
6883         if (!mddev->pers || !mddev->pers->error_handler)
6884                 return;
6885         mddev->pers->error_handler(mddev,rdev);
6886         if (mddev->degraded)
6887                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6888         sysfs_notify_dirent_safe(rdev->sysfs_state);
6889         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6890         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6891         md_wakeup_thread(mddev->thread);
6892         if (mddev->event_work.func)
6893                 queue_work(md_misc_wq, &mddev->event_work);
6894         md_new_event_inintr(mddev);
6895 }
6896 EXPORT_SYMBOL(md_error);
6897
6898 /* seq_file implementation /proc/mdstat */
6899
6900 static void status_unused(struct seq_file *seq)
6901 {
6902         int i = 0;
6903         struct md_rdev *rdev;
6904
6905         seq_printf(seq, "unused devices: ");
6906
6907         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
6908                 char b[BDEVNAME_SIZE];
6909                 i++;
6910                 seq_printf(seq, "%s ",
6911                               bdevname(rdev->bdev,b));
6912         }
6913         if (!i)
6914                 seq_printf(seq, "<none>");
6915
6916         seq_printf(seq, "\n");
6917 }
6918
6919 static void status_resync(struct seq_file *seq, struct mddev *mddev)
6920 {
6921         sector_t max_sectors, resync, res;
6922         unsigned long dt, db;
6923         sector_t rt;
6924         int scale;
6925         unsigned int per_milli;
6926
6927         if (mddev->curr_resync <= 3)
6928                 resync = 0;
6929         else
6930                 resync = mddev->curr_resync
6931                         - atomic_read(&mddev->recovery_active);
6932
6933         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
6934             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6935                 max_sectors = mddev->resync_max_sectors;
6936         else
6937                 max_sectors = mddev->dev_sectors;
6938
6939         WARN_ON(max_sectors == 0);
6940         /* Pick 'scale' such that (resync>>scale)*1000 will fit
6941          * in a sector_t, and (max_sectors>>scale) will fit in a
6942          * u32, as those are the requirements for sector_div.
6943          * Thus 'scale' must be at least 10
6944          */
6945         scale = 10;
6946         if (sizeof(sector_t) > sizeof(unsigned long)) {
6947                 while ( max_sectors/2 > (1ULL<<(scale+32)))
6948                         scale++;
6949         }
6950         res = (resync>>scale)*1000;
6951         sector_div(res, (u32)((max_sectors>>scale)+1));
6952
6953         per_milli = res;
6954         {
6955                 int i, x = per_milli/50, y = 20-x;
6956                 seq_printf(seq, "[");
6957                 for (i = 0; i < x; i++)
6958                         seq_printf(seq, "=");
6959                 seq_printf(seq, ">");
6960                 for (i = 0; i < y; i++)
6961                         seq_printf(seq, ".");
6962                 seq_printf(seq, "] ");
6963         }
6964         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
6965                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
6966                     "reshape" :
6967                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
6968                      "check" :
6969                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
6970                       "resync" : "recovery"))),
6971                    per_milli/10, per_milli % 10,
6972                    (unsigned long long) resync/2,
6973                    (unsigned long long) max_sectors/2);
6974
6975         /*
6976          * dt: time from mark until now
6977          * db: blocks written from mark until now
6978          * rt: remaining time
6979          *
6980          * rt is a sector_t, so could be 32bit or 64bit.
6981          * So we divide before multiply in case it is 32bit and close
6982          * to the limit.
6983          * We scale the divisor (db) by 32 to avoid losing precision
6984          * near the end of resync when the number of remaining sectors
6985          * is close to 'db'.
6986          * We then divide rt by 32 after multiplying by db to compensate.
6987          * The '+1' avoids division by zero if db is very small.
6988          */
6989         dt = ((jiffies - mddev->resync_mark) / HZ);
6990         if (!dt) dt++;
6991         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
6992                 - mddev->resync_mark_cnt;
6993
6994         rt = max_sectors - resync;    /* number of remaining sectors */
6995         sector_div(rt, db/32+1);
6996         rt *= dt;
6997         rt >>= 5;
6998
6999         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7000                    ((unsigned long)rt % 60)/6);
7001
7002         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7003 }
7004
7005 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7006 {
7007         struct list_head *tmp;
7008         loff_t l = *pos;
7009         struct mddev *mddev;
7010
7011         if (l >= 0x10000)
7012                 return NULL;
7013         if (!l--)
7014                 /* header */
7015                 return (void*)1;
7016
7017         spin_lock(&all_mddevs_lock);
7018         list_for_each(tmp,&all_mddevs)
7019                 if (!l--) {
7020                         mddev = list_entry(tmp, struct mddev, all_mddevs);
7021                         mddev_get(mddev);
7022                         spin_unlock(&all_mddevs_lock);
7023                         return mddev;
7024                 }
7025         spin_unlock(&all_mddevs_lock);
7026         if (!l--)
7027                 return (void*)2;/* tail */
7028         return NULL;
7029 }
7030
7031 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7032 {
7033         struct list_head *tmp;
7034         struct mddev *next_mddev, *mddev = v;
7035
7036         ++*pos;
7037         if (v == (void*)2)
7038                 return NULL;
7039
7040         spin_lock(&all_mddevs_lock);
7041         if (v == (void*)1)
7042                 tmp = all_mddevs.next;
7043         else
7044                 tmp = mddev->all_mddevs.next;
7045         if (tmp != &all_mddevs)
7046                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7047         else {
7048                 next_mddev = (void*)2;
7049                 *pos = 0x10000;
7050         }
7051         spin_unlock(&all_mddevs_lock);
7052
7053         if (v != (void*)1)
7054                 mddev_put(mddev);
7055         return next_mddev;
7056
7057 }
7058
7059 static void md_seq_stop(struct seq_file *seq, void *v)
7060 {
7061         struct mddev *mddev = v;
7062
7063         if (mddev && v != (void*)1 && v != (void*)2)
7064                 mddev_put(mddev);
7065 }
7066
7067 static int md_seq_show(struct seq_file *seq, void *v)
7068 {
7069         struct mddev *mddev = v;
7070         sector_t sectors;
7071         struct md_rdev *rdev;
7072
7073         if (v == (void*)1) {
7074                 struct md_personality *pers;
7075                 seq_printf(seq, "Personalities : ");
7076                 spin_lock(&pers_lock);
7077                 list_for_each_entry(pers, &pers_list, list)
7078                         seq_printf(seq, "[%s] ", pers->name);
7079
7080                 spin_unlock(&pers_lock);
7081                 seq_printf(seq, "\n");
7082                 seq->poll_event = atomic_read(&md_event_count);
7083                 return 0;
7084         }
7085         if (v == (void*)2) {
7086                 status_unused(seq);
7087                 return 0;
7088         }
7089
7090         spin_lock(&mddev->lock);
7091         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7092                 seq_printf(seq, "%s : %sactive", mdname(mddev),
7093                                                 mddev->pers ? "" : "in");
7094                 if (mddev->pers) {
7095                         if (mddev->ro==1)
7096                                 seq_printf(seq, " (read-only)");
7097                         if (mddev->ro==2)
7098                                 seq_printf(seq, " (auto-read-only)");
7099                         seq_printf(seq, " %s", mddev->pers->name);
7100                 }
7101
7102                 sectors = 0;
7103                 rcu_read_lock();
7104                 rdev_for_each_rcu(rdev, mddev) {
7105                         char b[BDEVNAME_SIZE];
7106                         seq_printf(seq, " %s[%d]",
7107                                 bdevname(rdev->bdev,b), rdev->desc_nr);
7108                         if (test_bit(WriteMostly, &rdev->flags))
7109                                 seq_printf(seq, "(W)");
7110                         if (test_bit(Faulty, &rdev->flags)) {
7111                                 seq_printf(seq, "(F)");
7112                                 continue;
7113                         }
7114                         if (rdev->raid_disk < 0)
7115                                 seq_printf(seq, "(S)"); /* spare */
7116                         if (test_bit(Replacement, &rdev->flags))
7117                                 seq_printf(seq, "(R)");
7118                         sectors += rdev->sectors;
7119                 }
7120                 rcu_read_unlock();
7121
7122                 if (!list_empty(&mddev->disks)) {
7123                         if (mddev->pers)
7124                                 seq_printf(seq, "\n      %llu blocks",
7125                                            (unsigned long long)
7126                                            mddev->array_sectors / 2);
7127                         else
7128                                 seq_printf(seq, "\n      %llu blocks",
7129                                            (unsigned long long)sectors / 2);
7130                 }
7131                 if (mddev->persistent) {
7132                         if (mddev->major_version != 0 ||
7133                             mddev->minor_version != 90) {
7134                                 seq_printf(seq," super %d.%d",
7135                                            mddev->major_version,
7136                                            mddev->minor_version);
7137                         }
7138                 } else if (mddev->external)
7139                         seq_printf(seq, " super external:%s",
7140                                    mddev->metadata_type);
7141                 else
7142                         seq_printf(seq, " super non-persistent");
7143
7144                 if (mddev->pers) {
7145                         mddev->pers->status(seq, mddev);
7146                         seq_printf(seq, "\n      ");
7147                         if (mddev->pers->sync_request) {
7148                                 if (mddev->curr_resync > 2) {
7149                                         status_resync(seq, mddev);
7150                                         seq_printf(seq, "\n      ");
7151                                 } else if (mddev->curr_resync >= 1)
7152                                         seq_printf(seq, "\tresync=DELAYED\n      ");
7153                                 else if (mddev->recovery_cp < MaxSector)
7154                                         seq_printf(seq, "\tresync=PENDING\n      ");
7155                         }
7156                 } else
7157                         seq_printf(seq, "\n       ");
7158
7159                 bitmap_status(seq, mddev->bitmap);
7160
7161                 seq_printf(seq, "\n");
7162         }
7163         spin_unlock(&mddev->lock);
7164
7165         return 0;
7166 }
7167
7168 static const struct seq_operations md_seq_ops = {
7169         .start  = md_seq_start,
7170         .next   = md_seq_next,
7171         .stop   = md_seq_stop,
7172         .show   = md_seq_show,
7173 };
7174
7175 static int md_seq_open(struct inode *inode, struct file *file)
7176 {
7177         struct seq_file *seq;
7178         int error;
7179
7180         error = seq_open(file, &md_seq_ops);
7181         if (error)
7182                 return error;
7183
7184         seq = file->private_data;
7185         seq->poll_event = atomic_read(&md_event_count);
7186         return error;
7187 }
7188
7189 static int md_unloading;
7190 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7191 {
7192         struct seq_file *seq = filp->private_data;
7193         int mask;
7194
7195         if (md_unloading)
7196                 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7197         poll_wait(filp, &md_event_waiters, wait);
7198
7199         /* always allow read */
7200         mask = POLLIN | POLLRDNORM;
7201
7202         if (seq->poll_event != atomic_read(&md_event_count))
7203                 mask |= POLLERR | POLLPRI;
7204         return mask;
7205 }
7206
7207 static const struct file_operations md_seq_fops = {
7208         .owner          = THIS_MODULE,
7209         .open           = md_seq_open,
7210         .read           = seq_read,
7211         .llseek         = seq_lseek,
7212         .release        = seq_release_private,
7213         .poll           = mdstat_poll,
7214 };
7215
7216 int register_md_personality(struct md_personality *p)
7217 {
7218         printk(KERN_INFO "md: %s personality registered for level %d\n",
7219                                                 p->name, p->level);
7220         spin_lock(&pers_lock);
7221         list_add_tail(&p->list, &pers_list);
7222         spin_unlock(&pers_lock);
7223         return 0;
7224 }
7225 EXPORT_SYMBOL(register_md_personality);
7226
7227 int unregister_md_personality(struct md_personality *p)
7228 {
7229         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
7230         spin_lock(&pers_lock);
7231         list_del_init(&p->list);
7232         spin_unlock(&pers_lock);
7233         return 0;
7234 }
7235 EXPORT_SYMBOL(unregister_md_personality);
7236
7237 static int is_mddev_idle(struct mddev *mddev, int init)
7238 {
7239         struct md_rdev *rdev;
7240         int idle;
7241         int curr_events;
7242
7243         idle = 1;
7244         rcu_read_lock();
7245         rdev_for_each_rcu(rdev, mddev) {
7246                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7247                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7248                               (int)part_stat_read(&disk->part0, sectors[1]) -
7249                               atomic_read(&disk->sync_io);
7250                 /* sync IO will cause sync_io to increase before the disk_stats
7251                  * as sync_io is counted when a request starts, and
7252                  * disk_stats is counted when it completes.
7253                  * So resync activity will cause curr_events to be smaller than
7254                  * when there was no such activity.
7255                  * non-sync IO will cause disk_stat to increase without
7256                  * increasing sync_io so curr_events will (eventually)
7257                  * be larger than it was before.  Once it becomes
7258                  * substantially larger, the test below will cause
7259                  * the array to appear non-idle, and resync will slow
7260                  * down.
7261                  * If there is a lot of outstanding resync activity when
7262                  * we set last_event to curr_events, then all that activity
7263                  * completing might cause the array to appear non-idle
7264                  * and resync will be slowed down even though there might
7265                  * not have been non-resync activity.  This will only
7266                  * happen once though.  'last_events' will soon reflect
7267                  * the state where there is little or no outstanding
7268                  * resync requests, and further resync activity will
7269                  * always make curr_events less than last_events.
7270                  *
7271                  */
7272                 if (init || curr_events - rdev->last_events > 64) {
7273                         rdev->last_events = curr_events;
7274                         idle = 0;
7275                 }
7276         }
7277         rcu_read_unlock();
7278         return idle;
7279 }
7280
7281 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7282 {
7283         /* another "blocks" (512byte) blocks have been synced */
7284         atomic_sub(blocks, &mddev->recovery_active);
7285         wake_up(&mddev->recovery_wait);
7286         if (!ok) {
7287                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7288                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7289                 md_wakeup_thread(mddev->thread);
7290                 // stop recovery, signal do_sync ....
7291         }
7292 }
7293 EXPORT_SYMBOL(md_done_sync);
7294
7295 /* md_write_start(mddev, bi)
7296  * If we need to update some array metadata (e.g. 'active' flag
7297  * in superblock) before writing, schedule a superblock update
7298  * and wait for it to complete.
7299  */
7300 void md_write_start(struct mddev *mddev, struct bio *bi)
7301 {
7302         int did_change = 0;
7303         if (bio_data_dir(bi) != WRITE)
7304                 return;
7305
7306         BUG_ON(mddev->ro == 1);
7307         if (mddev->ro == 2) {
7308                 /* need to switch to read/write */
7309                 mddev->ro = 0;
7310                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7311                 md_wakeup_thread(mddev->thread);
7312                 md_wakeup_thread(mddev->sync_thread);
7313                 did_change = 1;
7314         }
7315         atomic_inc(&mddev->writes_pending);
7316         if (mddev->safemode == 1)
7317                 mddev->safemode = 0;
7318         if (mddev->in_sync) {
7319                 spin_lock(&mddev->lock);
7320                 if (mddev->in_sync) {
7321                         mddev->in_sync = 0;
7322                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7323                         set_bit(MD_CHANGE_PENDING, &mddev->flags);
7324                         md_wakeup_thread(mddev->thread);
7325                         did_change = 1;
7326                 }
7327                 spin_unlock(&mddev->lock);
7328         }
7329         if (did_change)
7330                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7331         wait_event(mddev->sb_wait,
7332                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
7333 }
7334 EXPORT_SYMBOL(md_write_start);
7335
7336 void md_write_end(struct mddev *mddev)
7337 {
7338         if (atomic_dec_and_test(&mddev->writes_pending)) {
7339                 if (mddev->safemode == 2)
7340                         md_wakeup_thread(mddev->thread);
7341                 else if (mddev->safemode_delay)
7342                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
7343         }
7344 }
7345 EXPORT_SYMBOL(md_write_end);
7346
7347 /* md_allow_write(mddev)
7348  * Calling this ensures that the array is marked 'active' so that writes
7349  * may proceed without blocking.  It is important to call this before
7350  * attempting a GFP_KERNEL allocation while holding the mddev lock.
7351  * Must be called with mddev_lock held.
7352  *
7353  * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
7354  * is dropped, so return -EAGAIN after notifying userspace.
7355  */
7356 int md_allow_write(struct mddev *mddev)
7357 {
7358         if (!mddev->pers)
7359                 return 0;
7360         if (mddev->ro)
7361                 return 0;
7362         if (!mddev->pers->sync_request)
7363                 return 0;
7364
7365         spin_lock(&mddev->lock);
7366         if (mddev->in_sync) {
7367                 mddev->in_sync = 0;
7368                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7369                 set_bit(MD_CHANGE_PENDING, &mddev->flags);
7370                 if (mddev->safemode_delay &&
7371                     mddev->safemode == 0)
7372                         mddev->safemode = 1;
7373                 spin_unlock(&mddev->lock);
7374                 md_update_sb(mddev, 0);
7375                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7376         } else
7377                 spin_unlock(&mddev->lock);
7378
7379         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
7380                 return -EAGAIN;
7381         else
7382                 return 0;
7383 }
7384 EXPORT_SYMBOL_GPL(md_allow_write);
7385
7386 #define SYNC_MARKS      10
7387 #define SYNC_MARK_STEP  (3*HZ)
7388 #define UPDATE_FREQUENCY (5*60*HZ)
7389 void md_do_sync(struct md_thread *thread)
7390 {
7391         struct mddev *mddev = thread->mddev;
7392         struct mddev *mddev2;
7393         unsigned int currspeed = 0,
7394                  window;
7395         sector_t max_sectors,j, io_sectors, recovery_done;
7396         unsigned long mark[SYNC_MARKS];
7397         unsigned long update_time;
7398         sector_t mark_cnt[SYNC_MARKS];
7399         int last_mark,m;
7400         struct list_head *tmp;
7401         sector_t last_check;
7402         int skipped = 0;
7403         struct md_rdev *rdev;
7404         char *desc, *action = NULL;
7405         struct blk_plug plug;
7406
7407         /* just incase thread restarts... */
7408         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7409                 return;
7410         if (mddev->ro) {/* never try to sync a read-only array */
7411                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7412                 return;
7413         }
7414
7415         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7416                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7417                         desc = "data-check";
7418                         action = "check";
7419                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7420                         desc = "requested-resync";
7421                         action = "repair";
7422                 } else
7423                         desc = "resync";
7424         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7425                 desc = "reshape";
7426         else
7427                 desc = "recovery";
7428
7429         mddev->last_sync_action = action ?: desc;
7430
7431         /* we overload curr_resync somewhat here.
7432          * 0 == not engaged in resync at all
7433          * 2 == checking that there is no conflict with another sync
7434          * 1 == like 2, but have yielded to allow conflicting resync to
7435          *              commense
7436          * other == active in resync - this many blocks
7437          *
7438          * Before starting a resync we must have set curr_resync to
7439          * 2, and then checked that every "conflicting" array has curr_resync
7440          * less than ours.  When we find one that is the same or higher
7441          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
7442          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7443          * This will mean we have to start checking from the beginning again.
7444          *
7445          */
7446
7447         do {
7448                 mddev->curr_resync = 2;
7449
7450         try_again:
7451                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7452                         goto skip;
7453                 for_each_mddev(mddev2, tmp) {
7454                         if (mddev2 == mddev)
7455                                 continue;
7456                         if (!mddev->parallel_resync
7457                         &&  mddev2->curr_resync
7458                         &&  match_mddev_units(mddev, mddev2)) {
7459                                 DEFINE_WAIT(wq);
7460                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
7461                                         /* arbitrarily yield */
7462                                         mddev->curr_resync = 1;
7463                                         wake_up(&resync_wait);
7464                                 }
7465                                 if (mddev > mddev2 && mddev->curr_resync == 1)
7466                                         /* no need to wait here, we can wait the next
7467                                          * time 'round when curr_resync == 2
7468                                          */
7469                                         continue;
7470                                 /* We need to wait 'interruptible' so as not to
7471                                  * contribute to the load average, and not to
7472                                  * be caught by 'softlockup'
7473                                  */
7474                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7475                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7476                                     mddev2->curr_resync >= mddev->curr_resync) {
7477                                         printk(KERN_INFO "md: delaying %s of %s"
7478                                                " until %s has finished (they"
7479                                                " share one or more physical units)\n",
7480                                                desc, mdname(mddev), mdname(mddev2));
7481                                         mddev_put(mddev2);
7482                                         if (signal_pending(current))
7483                                                 flush_signals(current);
7484                                         schedule();
7485                                         finish_wait(&resync_wait, &wq);
7486                                         goto try_again;
7487                                 }
7488                                 finish_wait(&resync_wait, &wq);
7489                         }
7490                 }
7491         } while (mddev->curr_resync < 2);
7492
7493         j = 0;
7494         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7495                 /* resync follows the size requested by the personality,
7496                  * which defaults to physical size, but can be virtual size
7497                  */
7498                 max_sectors = mddev->resync_max_sectors;
7499                 atomic64_set(&mddev->resync_mismatches, 0);
7500                 /* we don't use the checkpoint if there's a bitmap */
7501                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7502                         j = mddev->resync_min;
7503                 else if (!mddev->bitmap)
7504                         j = mddev->recovery_cp;
7505
7506         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7507                 max_sectors = mddev->resync_max_sectors;
7508         else {
7509                 /* recovery follows the physical size of devices */
7510                 max_sectors = mddev->dev_sectors;
7511                 j = MaxSector;
7512                 rcu_read_lock();
7513                 rdev_for_each_rcu(rdev, mddev)
7514                         if (rdev->raid_disk >= 0 &&
7515                             !test_bit(Faulty, &rdev->flags) &&
7516                             !test_bit(In_sync, &rdev->flags) &&
7517                             rdev->recovery_offset < j)
7518                                 j = rdev->recovery_offset;
7519                 rcu_read_unlock();
7520
7521                 /* If there is a bitmap, we need to make sure all
7522                  * writes that started before we added a spare
7523                  * complete before we start doing a recovery.
7524                  * Otherwise the write might complete and (via
7525                  * bitmap_endwrite) set a bit in the bitmap after the
7526                  * recovery has checked that bit and skipped that
7527                  * region.
7528                  */
7529                 if (mddev->bitmap) {
7530                         mddev->pers->quiesce(mddev, 1);
7531                         mddev->pers->quiesce(mddev, 0);
7532                 }
7533         }
7534
7535         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
7536         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
7537                 " %d KB/sec/disk.\n", speed_min(mddev));
7538         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
7539                "(but not more than %d KB/sec) for %s.\n",
7540                speed_max(mddev), desc);
7541
7542         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
7543
7544         io_sectors = 0;
7545         for (m = 0; m < SYNC_MARKS; m++) {
7546                 mark[m] = jiffies;
7547                 mark_cnt[m] = io_sectors;
7548         }
7549         last_mark = 0;
7550         mddev->resync_mark = mark[last_mark];
7551         mddev->resync_mark_cnt = mark_cnt[last_mark];
7552
7553         /*
7554          * Tune reconstruction:
7555          */
7556         window = 32*(PAGE_SIZE/512);
7557         printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
7558                 window/2, (unsigned long long)max_sectors/2);
7559
7560         atomic_set(&mddev->recovery_active, 0);
7561         last_check = 0;
7562
7563         if (j>2) {
7564                 printk(KERN_INFO
7565                        "md: resuming %s of %s from checkpoint.\n",
7566                        desc, mdname(mddev));
7567                 mddev->curr_resync = j;
7568         } else
7569                 mddev->curr_resync = 3; /* no longer delayed */
7570         mddev->curr_resync_completed = j;
7571         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7572         md_new_event(mddev);
7573         update_time = jiffies;
7574
7575         blk_start_plug(&plug);
7576         while (j < max_sectors) {
7577                 sector_t sectors;
7578
7579                 skipped = 0;
7580
7581                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7582                     ((mddev->curr_resync > mddev->curr_resync_completed &&
7583                       (mddev->curr_resync - mddev->curr_resync_completed)
7584                       > (max_sectors >> 4)) ||
7585                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
7586                      (j - mddev->curr_resync_completed)*2
7587                      >= mddev->resync_max - mddev->curr_resync_completed
7588                             )) {
7589                         /* time to update curr_resync_completed */
7590                         wait_event(mddev->recovery_wait,
7591                                    atomic_read(&mddev->recovery_active) == 0);
7592                         mddev->curr_resync_completed = j;
7593                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
7594                             j > mddev->recovery_cp)
7595                                 mddev->recovery_cp = j;
7596                         update_time = jiffies;
7597                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7598                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7599                 }
7600
7601                 while (j >= mddev->resync_max &&
7602                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7603                         /* As this condition is controlled by user-space,
7604                          * we can block indefinitely, so use '_interruptible'
7605                          * to avoid triggering warnings.
7606                          */
7607                         flush_signals(current); /* just in case */
7608                         wait_event_interruptible(mddev->recovery_wait,
7609                                                  mddev->resync_max > j
7610                                                  || test_bit(MD_RECOVERY_INTR,
7611                                                              &mddev->recovery));
7612                 }
7613
7614                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7615                         break;
7616
7617                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
7618                                                   currspeed < speed_min(mddev));
7619                 if (sectors == 0) {
7620                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7621                         break;
7622                 }
7623
7624                 if (!skipped) { /* actual IO requested */
7625                         io_sectors += sectors;
7626                         atomic_add(sectors, &mddev->recovery_active);
7627                 }
7628
7629                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7630                         break;
7631
7632                 j += sectors;
7633                 if (j > 2)
7634                         mddev->curr_resync = j;
7635                 mddev->curr_mark_cnt = io_sectors;
7636                 if (last_check == 0)
7637                         /* this is the earliest that rebuild will be
7638                          * visible in /proc/mdstat
7639                          */
7640                         md_new_event(mddev);
7641
7642                 if (last_check + window > io_sectors || j == max_sectors)
7643                         continue;
7644
7645                 last_check = io_sectors;
7646         repeat:
7647                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
7648                         /* step marks */
7649                         int next = (last_mark+1) % SYNC_MARKS;
7650
7651                         mddev->resync_mark = mark[next];
7652                         mddev->resync_mark_cnt = mark_cnt[next];
7653                         mark[next] = jiffies;
7654                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
7655                         last_mark = next;
7656                 }
7657
7658                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7659                         break;
7660
7661                 /*
7662                  * this loop exits only if either when we are slower than
7663                  * the 'hard' speed limit, or the system was IO-idle for
7664                  * a jiffy.
7665                  * the system might be non-idle CPU-wise, but we only care
7666                  * about not overloading the IO subsystem. (things like an
7667                  * e2fsck being done on the RAID array should execute fast)
7668                  */
7669                 cond_resched();
7670
7671                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
7672                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
7673                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
7674
7675                 if (currspeed > speed_min(mddev)) {
7676                         if ((currspeed > speed_max(mddev)) ||
7677                                         !is_mddev_idle(mddev, 0)) {
7678                                 msleep(500);
7679                                 goto repeat;
7680                         }
7681                 }
7682         }
7683         printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
7684                test_bit(MD_RECOVERY_INTR, &mddev->recovery)
7685                ? "interrupted" : "done");
7686         /*
7687          * this also signals 'finished resyncing' to md_stop
7688          */
7689         blk_finish_plug(&plug);
7690         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
7691
7692         /* tell personality that we are finished */
7693         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
7694
7695         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
7696             mddev->curr_resync > 2) {
7697                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7698                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7699                                 if (mddev->curr_resync >= mddev->recovery_cp) {
7700                                         printk(KERN_INFO
7701                                                "md: checkpointing %s of %s.\n",
7702                                                desc, mdname(mddev));
7703                                         if (test_bit(MD_RECOVERY_ERROR,
7704                                                 &mddev->recovery))
7705                                                 mddev->recovery_cp =
7706                                                         mddev->curr_resync_completed;
7707                                         else
7708                                                 mddev->recovery_cp =
7709                                                         mddev->curr_resync;
7710                                 }
7711                         } else
7712                                 mddev->recovery_cp = MaxSector;
7713                 } else {
7714                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7715                                 mddev->curr_resync = MaxSector;
7716                         rcu_read_lock();
7717                         rdev_for_each_rcu(rdev, mddev)
7718                                 if (rdev->raid_disk >= 0 &&
7719                                     mddev->delta_disks >= 0 &&
7720                                     !test_bit(Faulty, &rdev->flags) &&
7721                                     !test_bit(In_sync, &rdev->flags) &&
7722                                     rdev->recovery_offset < mddev->curr_resync)
7723                                         rdev->recovery_offset = mddev->curr_resync;
7724                         rcu_read_unlock();
7725                 }
7726         }
7727  skip:
7728         set_bit(MD_CHANGE_DEVS, &mddev->flags);
7729
7730         spin_lock(&mddev->lock);
7731         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7732                 /* We completed so min/max setting can be forgotten if used. */
7733                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7734                         mddev->resync_min = 0;
7735                 mddev->resync_max = MaxSector;
7736         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7737                 mddev->resync_min = mddev->curr_resync_completed;
7738         mddev->curr_resync = 0;
7739         spin_unlock(&mddev->lock);
7740
7741         wake_up(&resync_wait);
7742         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
7743         md_wakeup_thread(mddev->thread);
7744         return;
7745 }
7746 EXPORT_SYMBOL_GPL(md_do_sync);
7747
7748 static int remove_and_add_spares(struct mddev *mddev,
7749                                  struct md_rdev *this)
7750 {
7751         struct md_rdev *rdev;
7752         int spares = 0;
7753         int removed = 0;
7754
7755         rdev_for_each(rdev, mddev)
7756                 if ((this == NULL || rdev == this) &&
7757                     rdev->raid_disk >= 0 &&
7758                     !test_bit(Blocked, &rdev->flags) &&
7759                     (test_bit(Faulty, &rdev->flags) ||
7760                      ! test_bit(In_sync, &rdev->flags)) &&
7761                     atomic_read(&rdev->nr_pending)==0) {
7762                         if (mddev->pers->hot_remove_disk(
7763                                     mddev, rdev) == 0) {
7764                                 sysfs_unlink_rdev(mddev, rdev);
7765                                 rdev->raid_disk = -1;
7766                                 removed++;
7767                         }
7768                 }
7769         if (removed && mddev->kobj.sd)
7770                 sysfs_notify(&mddev->kobj, NULL, "degraded");
7771
7772         if (this)
7773                 goto no_add;
7774
7775         rdev_for_each(rdev, mddev) {
7776                 if (rdev->raid_disk >= 0 &&
7777                     !test_bit(In_sync, &rdev->flags) &&
7778                     !test_bit(Faulty, &rdev->flags))
7779                         spares++;
7780                 if (rdev->raid_disk >= 0)
7781                         continue;
7782                 if (test_bit(Faulty, &rdev->flags))
7783                         continue;
7784                 if (mddev->ro &&
7785                     ! (rdev->saved_raid_disk >= 0 &&
7786                        !test_bit(Bitmap_sync, &rdev->flags)))
7787                         continue;
7788
7789                 if (rdev->saved_raid_disk < 0)
7790                         rdev->recovery_offset = 0;
7791                 if (mddev->pers->
7792                     hot_add_disk(mddev, rdev) == 0) {
7793                         if (sysfs_link_rdev(mddev, rdev))
7794                                 /* failure here is OK */;
7795                         spares++;
7796                         md_new_event(mddev);
7797                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
7798                 }
7799         }
7800 no_add:
7801         if (removed)
7802                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
7803         return spares;
7804 }
7805
7806 static void md_start_sync(struct work_struct *ws)
7807 {
7808         struct mddev *mddev = container_of(ws, struct mddev, del_work);
7809
7810         mddev->sync_thread = md_register_thread(md_do_sync,
7811                                                 mddev,
7812                                                 "resync");
7813         if (!mddev->sync_thread) {
7814                 printk(KERN_ERR "%s: could not start resync"
7815                        " thread...\n",
7816                        mdname(mddev));
7817                 /* leave the spares where they are, it shouldn't hurt */
7818                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7819                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7820                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7821                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7822                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7823                 wake_up(&resync_wait);
7824                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7825                                        &mddev->recovery))
7826                         if (mddev->sysfs_action)
7827                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
7828         } else
7829                 md_wakeup_thread(mddev->sync_thread);
7830         sysfs_notify_dirent_safe(mddev->sysfs_action);
7831         md_new_event(mddev);
7832 }
7833
7834 /*
7835  * This routine is regularly called by all per-raid-array threads to
7836  * deal with generic issues like resync and super-block update.
7837  * Raid personalities that don't have a thread (linear/raid0) do not
7838  * need this as they never do any recovery or update the superblock.
7839  *
7840  * It does not do any resync itself, but rather "forks" off other threads
7841  * to do that as needed.
7842  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
7843  * "->recovery" and create a thread at ->sync_thread.
7844  * When the thread finishes it sets MD_RECOVERY_DONE
7845  * and wakeups up this thread which will reap the thread and finish up.
7846  * This thread also removes any faulty devices (with nr_pending == 0).
7847  *
7848  * The overall approach is:
7849  *  1/ if the superblock needs updating, update it.
7850  *  2/ If a recovery thread is running, don't do anything else.
7851  *  3/ If recovery has finished, clean up, possibly marking spares active.
7852  *  4/ If there are any faulty devices, remove them.
7853  *  5/ If array is degraded, try to add spares devices
7854  *  6/ If array has spares or is not in-sync, start a resync thread.
7855  */
7856 void md_check_recovery(struct mddev *mddev)
7857 {
7858         if (mddev->suspended)
7859                 return;
7860
7861         if (mddev->bitmap)
7862                 bitmap_daemon_work(mddev);
7863
7864         if (signal_pending(current)) {
7865                 if (mddev->pers->sync_request && !mddev->external) {
7866                         printk(KERN_INFO "md: %s in immediate safe mode\n",
7867                                mdname(mddev));
7868                         mddev->safemode = 2;
7869                 }
7870                 flush_signals(current);
7871         }
7872
7873         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7874                 return;
7875         if ( ! (
7876                 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
7877                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7878                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7879                 (mddev->external == 0 && mddev->safemode == 1) ||
7880                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
7881                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
7882                 ))
7883                 return;
7884
7885         if (mddev_trylock(mddev)) {
7886                 int spares = 0;
7887
7888                 if (mddev->ro) {
7889                         /* On a read-only array we can:
7890                          * - remove failed devices
7891                          * - add already-in_sync devices if the array itself
7892                          *   is in-sync.
7893                          * As we only add devices that are already in-sync,
7894                          * we can activate the spares immediately.
7895                          */
7896                         remove_and_add_spares(mddev, NULL);
7897                         /* There is no thread, but we need to call
7898                          * ->spare_active and clear saved_raid_disk
7899                          */
7900                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7901                         md_reap_sync_thread(mddev);
7902                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7903                         goto unlock;
7904                 }
7905
7906                 if (!mddev->external) {
7907                         int did_change = 0;
7908                         spin_lock(&mddev->lock);
7909                         if (mddev->safemode &&
7910                             !atomic_read(&mddev->writes_pending) &&
7911                             !mddev->in_sync &&
7912                             mddev->recovery_cp == MaxSector) {
7913                                 mddev->in_sync = 1;
7914                                 did_change = 1;
7915                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7916                         }
7917                         if (mddev->safemode == 1)
7918                                 mddev->safemode = 0;
7919                         spin_unlock(&mddev->lock);
7920                         if (did_change)
7921                                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7922                 }
7923
7924                 if (mddev->flags & MD_UPDATE_SB_FLAGS)
7925                         md_update_sb(mddev, 0);
7926
7927                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
7928                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
7929                         /* resync/recovery still happening */
7930                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7931                         goto unlock;
7932                 }
7933                 if (mddev->sync_thread) {
7934                         md_reap_sync_thread(mddev);
7935                         goto unlock;
7936                 }
7937                 /* Set RUNNING before clearing NEEDED to avoid
7938                  * any transients in the value of "sync_action".
7939                  */
7940                 mddev->curr_resync_completed = 0;
7941                 spin_lock(&mddev->lock);
7942                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7943                 spin_unlock(&mddev->lock);
7944                 /* Clear some bits that don't mean anything, but
7945                  * might be left set
7946                  */
7947                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
7948                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7949
7950                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7951                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7952                         goto not_running;
7953                 /* no recovery is running.
7954                  * remove any failed drives, then
7955                  * add spares if possible.
7956                  * Spares are also removed and re-added, to allow
7957                  * the personality to fail the re-add.
7958                  */
7959
7960                 if (mddev->reshape_position != MaxSector) {
7961                         if (mddev->pers->check_reshape == NULL ||
7962                             mddev->pers->check_reshape(mddev) != 0)
7963                                 /* Cannot proceed */
7964                                 goto not_running;
7965                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7966                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7967                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
7968                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7969                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7970                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7971                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7972                 } else if (mddev->recovery_cp < MaxSector) {
7973                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7974                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7975                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
7976                         /* nothing to be done ... */
7977                         goto not_running;
7978
7979                 if (mddev->pers->sync_request) {
7980                         if (spares) {
7981                                 /* We are adding a device or devices to an array
7982                                  * which has the bitmap stored on all devices.
7983                                  * So make sure all bitmap pages get written
7984                                  */
7985                                 bitmap_write_all(mddev->bitmap);
7986                         }
7987                         INIT_WORK(&mddev->del_work, md_start_sync);
7988                         queue_work(md_misc_wq, &mddev->del_work);
7989                         goto unlock;
7990                 }
7991         not_running:
7992                 if (!mddev->sync_thread) {
7993                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7994                         wake_up(&resync_wait);
7995                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7996                                                &mddev->recovery))
7997                                 if (mddev->sysfs_action)
7998                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
7999                 }
8000         unlock:
8001                 wake_up(&mddev->sb_wait);
8002                 mddev_unlock(mddev);
8003         }
8004 }
8005 EXPORT_SYMBOL(md_check_recovery);
8006
8007 void md_reap_sync_thread(struct mddev *mddev)
8008 {
8009         struct md_rdev *rdev;
8010
8011         /* resync has finished, collect result */
8012         md_unregister_thread(&mddev->sync_thread);
8013         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8014             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8015                 /* success...*/
8016                 /* activate any spares */
8017                 if (mddev->pers->spare_active(mddev)) {
8018                         sysfs_notify(&mddev->kobj, NULL,
8019                                      "degraded");
8020                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8021                 }
8022         }
8023         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8024             mddev->pers->finish_reshape)
8025                 mddev->pers->finish_reshape(mddev);
8026
8027         /* If array is no-longer degraded, then any saved_raid_disk
8028          * information must be scrapped.
8029          */
8030         if (!mddev->degraded)
8031                 rdev_for_each(rdev, mddev)
8032                         rdev->saved_raid_disk = -1;
8033
8034         md_update_sb(mddev, 1);
8035         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8036         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8037         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8038         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8039         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8040         wake_up(&resync_wait);
8041         /* flag recovery needed just to double check */
8042         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8043         sysfs_notify_dirent_safe(mddev->sysfs_action);
8044         md_new_event(mddev);
8045         if (mddev->event_work.func)
8046                 queue_work(md_misc_wq, &mddev->event_work);
8047 }
8048 EXPORT_SYMBOL(md_reap_sync_thread);
8049
8050 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8051 {
8052         sysfs_notify_dirent_safe(rdev->sysfs_state);
8053         wait_event_timeout(rdev->blocked_wait,
8054                            !test_bit(Blocked, &rdev->flags) &&
8055                            !test_bit(BlockedBadBlocks, &rdev->flags),
8056                            msecs_to_jiffies(5000));
8057         rdev_dec_pending(rdev, mddev);
8058 }
8059 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8060
8061 void md_finish_reshape(struct mddev *mddev)
8062 {
8063         /* called be personality module when reshape completes. */
8064         struct md_rdev *rdev;
8065
8066         rdev_for_each(rdev, mddev) {
8067                 if (rdev->data_offset > rdev->new_data_offset)
8068                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8069                 else
8070                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8071                 rdev->data_offset = rdev->new_data_offset;
8072         }
8073 }
8074 EXPORT_SYMBOL(md_finish_reshape);
8075
8076 /* Bad block management.
8077  * We can record which blocks on each device are 'bad' and so just
8078  * fail those blocks, or that stripe, rather than the whole device.
8079  * Entries in the bad-block table are 64bits wide.  This comprises:
8080  * Length of bad-range, in sectors: 0-511 for lengths 1-512
8081  * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
8082  *  A 'shift' can be set so that larger blocks are tracked and
8083  *  consequently larger devices can be covered.
8084  * 'Acknowledged' flag - 1 bit. - the most significant bit.
8085  *
8086  * Locking of the bad-block table uses a seqlock so md_is_badblock
8087  * might need to retry if it is very unlucky.
8088  * We will sometimes want to check for bad blocks in a bi_end_io function,
8089  * so we use the write_seqlock_irq variant.
8090  *
8091  * When looking for a bad block we specify a range and want to
8092  * know if any block in the range is bad.  So we binary-search
8093  * to the last range that starts at-or-before the given endpoint,
8094  * (or "before the sector after the target range")
8095  * then see if it ends after the given start.
8096  * We return
8097  *  0 if there are no known bad blocks in the range
8098  *  1 if there are known bad block which are all acknowledged
8099  * -1 if there are bad blocks which have not yet been acknowledged in metadata.
8100  * plus the start/length of the first bad section we overlap.
8101  */
8102 int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
8103                    sector_t *first_bad, int *bad_sectors)
8104 {
8105         int hi;
8106         int lo;
8107         u64 *p = bb->page;
8108         int rv;
8109         sector_t target = s + sectors;
8110         unsigned seq;
8111
8112         if (bb->shift > 0) {
8113                 /* round the start down, and the end up */
8114                 s >>= bb->shift;
8115                 target += (1<<bb->shift) - 1;
8116                 target >>= bb->shift;
8117                 sectors = target - s;
8118         }
8119         /* 'target' is now the first block after the bad range */
8120
8121 retry:
8122         seq = read_seqbegin(&bb->lock);
8123         lo = 0;
8124         rv = 0;
8125         hi = bb->count;
8126
8127         /* Binary search between lo and hi for 'target'
8128          * i.e. for the last range that starts before 'target'
8129          */
8130         /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
8131          * are known not to be the last range before target.
8132          * VARIANT: hi-lo is the number of possible
8133          * ranges, and decreases until it reaches 1
8134          */
8135         while (hi - lo > 1) {
8136                 int mid = (lo + hi) / 2;
8137                 sector_t a = BB_OFFSET(p[mid]);
8138                 if (a < target)
8139                         /* This could still be the one, earlier ranges
8140                          * could not. */
8141                         lo = mid;
8142                 else
8143                         /* This and later ranges are definitely out. */
8144                         hi = mid;
8145         }
8146         /* 'lo' might be the last that started before target, but 'hi' isn't */
8147         if (hi > lo) {
8148                 /* need to check all range that end after 's' to see if
8149                  * any are unacknowledged.
8150                  */
8151                 while (lo >= 0 &&
8152                        BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
8153                         if (BB_OFFSET(p[lo]) < target) {
8154                                 /* starts before the end, and finishes after
8155                                  * the start, so they must overlap
8156                                  */
8157                                 if (rv != -1 && BB_ACK(p[lo]))
8158                                         rv = 1;
8159                                 else
8160                                         rv = -1;
8161                                 *first_bad = BB_OFFSET(p[lo]);
8162                                 *bad_sectors = BB_LEN(p[lo]);
8163                         }
8164                         lo--;
8165                 }
8166         }
8167
8168         if (read_seqretry(&bb->lock, seq))
8169                 goto retry;
8170
8171         return rv;
8172 }
8173 EXPORT_SYMBOL_GPL(md_is_badblock);
8174
8175 /*
8176  * Add a range of bad blocks to the table.
8177  * This might extend the table, or might contract it
8178  * if two adjacent ranges can be merged.
8179  * We binary-search to find the 'insertion' point, then
8180  * decide how best to handle it.
8181  */
8182 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8183                             int acknowledged)
8184 {
8185         u64 *p;
8186         int lo, hi;
8187         int rv = 1;
8188         unsigned long flags;
8189
8190         if (bb->shift < 0)
8191                 /* badblocks are disabled */
8192                 return 0;
8193
8194         if (bb->shift) {
8195                 /* round the start down, and the end up */
8196                 sector_t next = s + sectors;
8197                 s >>= bb->shift;
8198                 next += (1<<bb->shift) - 1;
8199                 next >>= bb->shift;
8200                 sectors = next - s;
8201         }
8202
8203         write_seqlock_irqsave(&bb->lock, flags);
8204
8205         p = bb->page;
8206         lo = 0;
8207         hi = bb->count;
8208         /* Find the last range that starts at-or-before 's' */
8209         while (hi - lo > 1) {
8210                 int mid = (lo + hi) / 2;
8211                 sector_t a = BB_OFFSET(p[mid]);
8212                 if (a <= s)
8213                         lo = mid;
8214                 else
8215                         hi = mid;
8216         }
8217         if (hi > lo && BB_OFFSET(p[lo]) > s)
8218                 hi = lo;
8219
8220         if (hi > lo) {
8221                 /* we found a range that might merge with the start
8222                  * of our new range
8223                  */
8224                 sector_t a = BB_OFFSET(p[lo]);
8225                 sector_t e = a + BB_LEN(p[lo]);
8226                 int ack = BB_ACK(p[lo]);
8227                 if (e >= s) {
8228                         /* Yes, we can merge with a previous range */
8229                         if (s == a && s + sectors >= e)
8230                                 /* new range covers old */
8231                                 ack = acknowledged;
8232                         else
8233                                 ack = ack && acknowledged;
8234
8235                         if (e < s + sectors)
8236                                 e = s + sectors;
8237                         if (e - a <= BB_MAX_LEN) {
8238                                 p[lo] = BB_MAKE(a, e-a, ack);
8239                                 s = e;
8240                         } else {
8241                                 /* does not all fit in one range,
8242                                  * make p[lo] maximal
8243                                  */
8244                                 if (BB_LEN(p[lo]) != BB_MAX_LEN)
8245                                         p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
8246                                 s = a + BB_MAX_LEN;
8247                         }
8248                         sectors = e - s;
8249                 }
8250         }
8251         if (sectors && hi < bb->count) {
8252                 /* 'hi' points to the first range that starts after 's'.
8253                  * Maybe we can merge with the start of that range */
8254                 sector_t a = BB_OFFSET(p[hi]);
8255                 sector_t e = a + BB_LEN(p[hi]);
8256                 int ack = BB_ACK(p[hi]);
8257                 if (a <= s + sectors) {
8258                         /* merging is possible */
8259                         if (e <= s + sectors) {
8260                                 /* full overlap */
8261                                 e = s + sectors;
8262                                 ack = acknowledged;
8263                         } else
8264                                 ack = ack && acknowledged;
8265
8266                         a = s;
8267                         if (e - a <= BB_MAX_LEN) {
8268                                 p[hi] = BB_MAKE(a, e-a, ack);
8269                                 s = e;
8270                         } else {
8271                                 p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
8272                                 s = a + BB_MAX_LEN;
8273                         }
8274                         sectors = e - s;
8275                         lo = hi;
8276                         hi++;
8277                 }
8278         }
8279         if (sectors == 0 && hi < bb->count) {
8280                 /* we might be able to combine lo and hi */
8281                 /* Note: 's' is at the end of 'lo' */
8282                 sector_t a = BB_OFFSET(p[hi]);
8283                 int lolen = BB_LEN(p[lo]);
8284                 int hilen = BB_LEN(p[hi]);
8285                 int newlen = lolen + hilen - (s - a);
8286                 if (s >= a && newlen < BB_MAX_LEN) {
8287                         /* yes, we can combine them */
8288                         int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
8289                         p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
8290                         memmove(p + hi, p + hi + 1,
8291                                 (bb->count - hi - 1) * 8);
8292                         bb->count--;
8293                 }
8294         }
8295         while (sectors) {
8296                 /* didn't merge (it all).
8297                  * Need to add a range just before 'hi' */
8298                 if (bb->count >= MD_MAX_BADBLOCKS) {
8299                         /* No room for more */
8300                         rv = 0;
8301                         break;
8302                 } else {
8303                         int this_sectors = sectors;
8304                         memmove(p + hi + 1, p + hi,
8305                                 (bb->count - hi) * 8);
8306                         bb->count++;
8307
8308                         if (this_sectors > BB_MAX_LEN)
8309                                 this_sectors = BB_MAX_LEN;
8310                         p[hi] = BB_MAKE(s, this_sectors, acknowledged);
8311                         sectors -= this_sectors;
8312                         s += this_sectors;
8313                 }
8314         }
8315
8316         bb->changed = 1;
8317         if (!acknowledged)
8318                 bb->unacked_exist = 1;
8319         write_sequnlock_irqrestore(&bb->lock, flags);
8320
8321         return rv;
8322 }
8323
8324 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8325                        int is_new)
8326 {
8327         int rv;
8328         if (is_new)
8329                 s += rdev->new_data_offset;
8330         else
8331                 s += rdev->data_offset;
8332         rv = md_set_badblocks(&rdev->badblocks,
8333                               s, sectors, 0);
8334         if (rv) {
8335                 /* Make sure they get written out promptly */
8336                 sysfs_notify_dirent_safe(rdev->sysfs_state);
8337                 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
8338                 md_wakeup_thread(rdev->mddev->thread);
8339         }
8340         return rv;
8341 }
8342 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8343
8344 /*
8345  * Remove a range of bad blocks from the table.
8346  * This may involve extending the table if we spilt a region,
8347  * but it must not fail.  So if the table becomes full, we just
8348  * drop the remove request.
8349  */
8350 static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
8351 {
8352         u64 *p;
8353         int lo, hi;
8354         sector_t target = s + sectors;
8355         int rv = 0;
8356
8357         if (bb->shift > 0) {
8358                 /* When clearing we round the start up and the end down.
8359                  * This should not matter as the shift should align with
8360                  * the block size and no rounding should ever be needed.
8361                  * However it is better the think a block is bad when it
8362                  * isn't than to think a block is not bad when it is.
8363                  */
8364                 s += (1<<bb->shift) - 1;
8365                 s >>= bb->shift;
8366                 target >>= bb->shift;
8367                 sectors = target - s;
8368         }
8369
8370         write_seqlock_irq(&bb->lock);
8371
8372         p = bb->page;
8373         lo = 0;
8374         hi = bb->count;
8375         /* Find the last range that starts before 'target' */
8376         while (hi - lo > 1) {
8377                 int mid = (lo + hi) / 2;
8378                 sector_t a = BB_OFFSET(p[mid]);
8379                 if (a < target)
8380                         lo = mid;
8381                 else
8382                         hi = mid;
8383         }
8384         if (hi > lo) {
8385                 /* p[lo] is the last range that could overlap the
8386                  * current range.  Earlier ranges could also overlap,
8387                  * but only this one can overlap the end of the range.
8388                  */
8389                 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
8390                         /* Partial overlap, leave the tail of this range */
8391                         int ack = BB_ACK(p[lo]);
8392                         sector_t a = BB_OFFSET(p[lo]);
8393                         sector_t end = a + BB_LEN(p[lo]);
8394
8395                         if (a < s) {
8396                                 /* we need to split this range */
8397                                 if (bb->count >= MD_MAX_BADBLOCKS) {
8398                                         rv = -ENOSPC;
8399                                         goto out;
8400                                 }
8401                                 memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
8402                                 bb->count++;
8403                                 p[lo] = BB_MAKE(a, s-a, ack);
8404                                 lo++;
8405                         }
8406                         p[lo] = BB_MAKE(target, end - target, ack);
8407                         /* there is no longer an overlap */
8408                         hi = lo;
8409                         lo--;
8410                 }
8411                 while (lo >= 0 &&
8412                        BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
8413                         /* This range does overlap */
8414                         if (BB_OFFSET(p[lo]) < s) {
8415                                 /* Keep the early parts of this range. */
8416                                 int ack = BB_ACK(p[lo]);
8417                                 sector_t start = BB_OFFSET(p[lo]);
8418                                 p[lo] = BB_MAKE(start, s - start, ack);
8419                                 /* now low doesn't overlap, so.. */
8420                                 break;
8421                         }
8422                         lo--;
8423                 }
8424                 /* 'lo' is strictly before, 'hi' is strictly after,
8425                  * anything between needs to be discarded
8426                  */
8427                 if (hi - lo > 1) {
8428                         memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
8429                         bb->count -= (hi - lo - 1);
8430                 }
8431         }
8432
8433         bb->changed = 1;
8434 out:
8435         write_sequnlock_irq(&bb->lock);
8436         return rv;
8437 }
8438
8439 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8440                          int is_new)
8441 {
8442         if (is_new)
8443                 s += rdev->new_data_offset;
8444         else
8445                 s += rdev->data_offset;
8446         return md_clear_badblocks(&rdev->badblocks,
8447                                   s, sectors);
8448 }
8449 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8450
8451 /*
8452  * Acknowledge all bad blocks in a list.
8453  * This only succeeds if ->changed is clear.  It is used by
8454  * in-kernel metadata updates
8455  */
8456 void md_ack_all_badblocks(struct badblocks *bb)
8457 {
8458         if (bb->page == NULL || bb->changed)
8459                 /* no point even trying */
8460                 return;
8461         write_seqlock_irq(&bb->lock);
8462
8463         if (bb->changed == 0 && bb->unacked_exist) {
8464                 u64 *p = bb->page;
8465                 int i;
8466                 for (i = 0; i < bb->count ; i++) {
8467                         if (!BB_ACK(p[i])) {
8468                                 sector_t start = BB_OFFSET(p[i]);
8469                                 int len = BB_LEN(p[i]);
8470                                 p[i] = BB_MAKE(start, len, 1);
8471                         }
8472                 }
8473                 bb->unacked_exist = 0;
8474         }
8475         write_sequnlock_irq(&bb->lock);
8476 }
8477 EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
8478
8479 /* sysfs access to bad-blocks list.
8480  * We present two files.
8481  * 'bad-blocks' lists sector numbers and lengths of ranges that
8482  *    are recorded as bad.  The list is truncated to fit within
8483  *    the one-page limit of sysfs.
8484  *    Writing "sector length" to this file adds an acknowledged
8485  *    bad block list.
8486  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
8487  *    been acknowledged.  Writing to this file adds bad blocks
8488  *    without acknowledging them.  This is largely for testing.
8489  */
8490
8491 static ssize_t
8492 badblocks_show(struct badblocks *bb, char *page, int unack)
8493 {
8494         size_t len;
8495         int i;
8496         u64 *p = bb->page;
8497         unsigned seq;
8498
8499         if (bb->shift < 0)
8500                 return 0;
8501
8502 retry:
8503         seq = read_seqbegin(&bb->lock);
8504
8505         len = 0;
8506         i = 0;
8507
8508         while (len < PAGE_SIZE && i < bb->count) {
8509                 sector_t s = BB_OFFSET(p[i]);
8510                 unsigned int length = BB_LEN(p[i]);
8511                 int ack = BB_ACK(p[i]);
8512                 i++;
8513
8514                 if (unack && ack)
8515                         continue;
8516
8517                 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
8518                                 (unsigned long long)s << bb->shift,
8519                                 length << bb->shift);
8520         }
8521         if (unack && len == 0)
8522                 bb->unacked_exist = 0;
8523
8524         if (read_seqretry(&bb->lock, seq))
8525                 goto retry;
8526
8527         return len;
8528 }
8529
8530 #define DO_DEBUG 1
8531
8532 static ssize_t
8533 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
8534 {
8535         unsigned long long sector;
8536         int length;
8537         char newline;
8538 #ifdef DO_DEBUG
8539         /* Allow clearing via sysfs *only* for testing/debugging.
8540          * Normally only a successful write may clear a badblock
8541          */
8542         int clear = 0;
8543         if (page[0] == '-') {
8544                 clear = 1;
8545                 page++;
8546         }
8547 #endif /* DO_DEBUG */
8548
8549         switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
8550         case 3:
8551                 if (newline != '\n')
8552                         return -EINVAL;
8553         case 2:
8554                 if (length <= 0)
8555                         return -EINVAL;
8556                 break;
8557         default:
8558                 return -EINVAL;
8559         }
8560
8561 #ifdef DO_DEBUG
8562         if (clear) {
8563                 md_clear_badblocks(bb, sector, length);
8564                 return len;
8565         }
8566 #endif /* DO_DEBUG */
8567         if (md_set_badblocks(bb, sector, length, !unack))
8568                 return len;
8569         else
8570                 return -ENOSPC;
8571 }
8572
8573 static int md_notify_reboot(struct notifier_block *this,
8574                             unsigned long code, void *x)
8575 {
8576         struct list_head *tmp;
8577         struct mddev *mddev;
8578         int need_delay = 0;
8579
8580         for_each_mddev(mddev, tmp) {
8581                 if (mddev_trylock(mddev)) {
8582                         if (mddev->pers)
8583                                 __md_stop_writes(mddev);
8584                         if (mddev->persistent)
8585                                 mddev->safemode = 2;
8586                         mddev_unlock(mddev);
8587                 }
8588                 need_delay = 1;
8589         }
8590         /*
8591          * certain more exotic SCSI devices are known to be
8592          * volatile wrt too early system reboots. While the
8593          * right place to handle this issue is the given
8594          * driver, we do want to have a safe RAID driver ...
8595          */
8596         if (need_delay)
8597                 mdelay(1000*1);
8598
8599         return NOTIFY_DONE;
8600 }
8601
8602 static struct notifier_block md_notifier = {
8603         .notifier_call  = md_notify_reboot,
8604         .next           = NULL,
8605         .priority       = INT_MAX, /* before any real devices */
8606 };
8607
8608 static void md_geninit(void)
8609 {
8610         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8611
8612         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8613 }
8614
8615 static int __init md_init(void)
8616 {
8617         int ret = -ENOMEM;
8618
8619         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8620         if (!md_wq)
8621                 goto err_wq;
8622
8623         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8624         if (!md_misc_wq)
8625                 goto err_misc_wq;
8626
8627         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8628                 goto err_md;
8629
8630         if ((ret = register_blkdev(0, "mdp")) < 0)
8631                 goto err_mdp;
8632         mdp_major = ret;
8633
8634         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8635                             md_probe, NULL, NULL);
8636         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8637                             md_probe, NULL, NULL);
8638
8639         register_reboot_notifier(&md_notifier);
8640         raid_table_header = register_sysctl_table(raid_root_table);
8641
8642         md_geninit();
8643         return 0;
8644
8645 err_mdp:
8646         unregister_blkdev(MD_MAJOR, "md");
8647 err_md:
8648         destroy_workqueue(md_misc_wq);
8649 err_misc_wq:
8650         destroy_workqueue(md_wq);
8651 err_wq:
8652         return ret;
8653 }
8654
8655 #ifndef MODULE
8656
8657 /*
8658  * Searches all registered partitions for autorun RAID arrays
8659  * at boot time.
8660  */
8661
8662 static LIST_HEAD(all_detected_devices);
8663 struct detected_devices_node {
8664         struct list_head list;
8665         dev_t dev;
8666 };
8667
8668 void md_autodetect_dev(dev_t dev)
8669 {
8670         struct detected_devices_node *node_detected_dev;
8671
8672         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8673         if (node_detected_dev) {
8674                 node_detected_dev->dev = dev;
8675                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
8676         } else {
8677                 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
8678                         ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
8679         }
8680 }
8681
8682 static void autostart_arrays(int part)
8683 {
8684         struct md_rdev *rdev;
8685         struct detected_devices_node *node_detected_dev;
8686         dev_t dev;
8687         int i_scanned, i_passed;
8688
8689         i_scanned = 0;
8690         i_passed = 0;
8691
8692         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
8693
8694         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
8695                 i_scanned++;
8696                 node_detected_dev = list_entry(all_detected_devices.next,
8697                                         struct detected_devices_node, list);
8698                 list_del(&node_detected_dev->list);
8699                 dev = node_detected_dev->dev;
8700                 kfree(node_detected_dev);
8701                 rdev = md_import_device(dev,0, 90);
8702                 if (IS_ERR(rdev))
8703                         continue;
8704
8705                 if (test_bit(Faulty, &rdev->flags))
8706                         continue;
8707
8708                 set_bit(AutoDetected, &rdev->flags);
8709                 list_add(&rdev->same_set, &pending_raid_disks);
8710                 i_passed++;
8711         }
8712
8713         printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
8714                                                 i_scanned, i_passed);
8715
8716         autorun_devices(part);
8717 }
8718
8719 #endif /* !MODULE */
8720
8721 static __exit void md_exit(void)
8722 {
8723         struct mddev *mddev;
8724         struct list_head *tmp;
8725         int delay = 1;
8726
8727         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
8728         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
8729
8730         unregister_blkdev(MD_MAJOR,"md");
8731         unregister_blkdev(mdp_major, "mdp");
8732         unregister_reboot_notifier(&md_notifier);
8733         unregister_sysctl_table(raid_table_header);
8734
8735         /* We cannot unload the modules while some process is
8736          * waiting for us in select() or poll() - wake them up
8737          */
8738         md_unloading = 1;
8739         while (waitqueue_active(&md_event_waiters)) {
8740                 /* not safe to leave yet */
8741                 wake_up(&md_event_waiters);
8742                 msleep(delay);
8743                 delay += delay;
8744         }
8745         remove_proc_entry("mdstat", NULL);
8746
8747         for_each_mddev(mddev, tmp) {
8748                 export_array(mddev);
8749                 mddev->hold_active = 0;
8750         }
8751         destroy_workqueue(md_misc_wq);
8752         destroy_workqueue(md_wq);
8753 }
8754
8755 subsys_initcall(md_init);
8756 module_exit(md_exit)
8757
8758 static int get_ro(char *buffer, struct kernel_param *kp)
8759 {
8760         return sprintf(buffer, "%d", start_readonly);
8761 }
8762 static int set_ro(const char *val, struct kernel_param *kp)
8763 {
8764         char *e;
8765         int num = simple_strtoul(val, &e, 10);
8766         if (*val && (*e == '\0' || *e == '\n')) {
8767                 start_readonly = num;
8768                 return 0;
8769         }
8770         return -EINVAL;
8771 }
8772
8773 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
8774 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
8775 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
8776
8777 MODULE_LICENSE("GPL");
8778 MODULE_DESCRIPTION("MD RAID framework");
8779 MODULE_ALIAS("md");
8780 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);