md: remove 'go_faster' option from ->sync_request()
[sfrench/cifs-2.6.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3      Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/fs.h>
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/string.h>
43 #include <linux/hdreg.h>
44 #include <linux/proc_fs.h>
45 #include <linux/random.h>
46 #include <linux/module.h>
47 #include <linux/reboot.h>
48 #include <linux/file.h>
49 #include <linux/compat.h>
50 #include <linux/delay.h>
51 #include <linux/raid/md_p.h>
52 #include <linux/raid/md_u.h>
53 #include <linux/slab.h>
54 #include "md.h"
55 #include "bitmap.h"
56 #include "md-cluster.h"
57
58 #ifndef MODULE
59 static void autostart_arrays(int part);
60 #endif
61
62 /* pers_list is a list of registered personalities protected
63  * by pers_lock.
64  * pers_lock does extra service to protect accesses to
65  * mddev->thread when the mutex cannot be held.
66  */
67 static LIST_HEAD(pers_list);
68 static DEFINE_SPINLOCK(pers_lock);
69
70 struct md_cluster_operations *md_cluster_ops;
71 EXPORT_SYMBOL(md_cluster_ops);
72 struct module *md_cluster_mod;
73 EXPORT_SYMBOL(md_cluster_mod);
74
75 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
76 static struct workqueue_struct *md_wq;
77 static struct workqueue_struct *md_misc_wq;
78
79 static int remove_and_add_spares(struct mddev *mddev,
80                                  struct md_rdev *this);
81 static void mddev_detach(struct mddev *mddev);
82
83 /*
84  * Default number of read corrections we'll attempt on an rdev
85  * before ejecting it from the array. We divide the read error
86  * count by 2 for every hour elapsed between read errors.
87  */
88 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
89 /*
90  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
91  * is 1000 KB/sec, so the extra system load does not show up that much.
92  * Increase it if you want to have more _guaranteed_ speed. Note that
93  * the RAID driver will use the maximum available bandwidth if the IO
94  * subsystem is idle. There is also an 'absolute maximum' reconstruction
95  * speed limit - in case reconstruction slows down your system despite
96  * idle IO detection.
97  *
98  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
99  * or /sys/block/mdX/md/sync_speed_{min,max}
100  */
101
102 static int sysctl_speed_limit_min = 1000;
103 static int sysctl_speed_limit_max = 200000;
104 static inline int speed_min(struct mddev *mddev)
105 {
106         return mddev->sync_speed_min ?
107                 mddev->sync_speed_min : sysctl_speed_limit_min;
108 }
109
110 static inline int speed_max(struct mddev *mddev)
111 {
112         return mddev->sync_speed_max ?
113                 mddev->sync_speed_max : sysctl_speed_limit_max;
114 }
115
116 static struct ctl_table_header *raid_table_header;
117
118 static struct ctl_table raid_table[] = {
119         {
120                 .procname       = "speed_limit_min",
121                 .data           = &sysctl_speed_limit_min,
122                 .maxlen         = sizeof(int),
123                 .mode           = S_IRUGO|S_IWUSR,
124                 .proc_handler   = proc_dointvec,
125         },
126         {
127                 .procname       = "speed_limit_max",
128                 .data           = &sysctl_speed_limit_max,
129                 .maxlen         = sizeof(int),
130                 .mode           = S_IRUGO|S_IWUSR,
131                 .proc_handler   = proc_dointvec,
132         },
133         { }
134 };
135
136 static struct ctl_table raid_dir_table[] = {
137         {
138                 .procname       = "raid",
139                 .maxlen         = 0,
140                 .mode           = S_IRUGO|S_IXUGO,
141                 .child          = raid_table,
142         },
143         { }
144 };
145
146 static struct ctl_table raid_root_table[] = {
147         {
148                 .procname       = "dev",
149                 .maxlen         = 0,
150                 .mode           = 0555,
151                 .child          = raid_dir_table,
152         },
153         {  }
154 };
155
156 static const struct block_device_operations md_fops;
157
158 static int start_readonly;
159
160 /* bio_clone_mddev
161  * like bio_clone, but with a local bio set
162  */
163
164 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
165                             struct mddev *mddev)
166 {
167         struct bio *b;
168
169         if (!mddev || !mddev->bio_set)
170                 return bio_alloc(gfp_mask, nr_iovecs);
171
172         b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
173         if (!b)
174                 return NULL;
175         return b;
176 }
177 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
178
179 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
180                             struct mddev *mddev)
181 {
182         if (!mddev || !mddev->bio_set)
183                 return bio_clone(bio, gfp_mask);
184
185         return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
186 }
187 EXPORT_SYMBOL_GPL(bio_clone_mddev);
188
189 /*
190  * We have a system wide 'event count' that is incremented
191  * on any 'interesting' event, and readers of /proc/mdstat
192  * can use 'poll' or 'select' to find out when the event
193  * count increases.
194  *
195  * Events are:
196  *  start array, stop array, error, add device, remove device,
197  *  start build, activate spare
198  */
199 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
200 static atomic_t md_event_count;
201 void md_new_event(struct mddev *mddev)
202 {
203         atomic_inc(&md_event_count);
204         wake_up(&md_event_waiters);
205 }
206 EXPORT_SYMBOL_GPL(md_new_event);
207
208 /* Alternate version that can be called from interrupts
209  * when calling sysfs_notify isn't needed.
210  */
211 static void md_new_event_inintr(struct mddev *mddev)
212 {
213         atomic_inc(&md_event_count);
214         wake_up(&md_event_waiters);
215 }
216
217 /*
218  * Enables to iterate over all existing md arrays
219  * all_mddevs_lock protects this list.
220  */
221 static LIST_HEAD(all_mddevs);
222 static DEFINE_SPINLOCK(all_mddevs_lock);
223
224 /*
225  * iterates through all used mddevs in the system.
226  * We take care to grab the all_mddevs_lock whenever navigating
227  * the list, and to always hold a refcount when unlocked.
228  * Any code which breaks out of this loop while own
229  * a reference to the current mddev and must mddev_put it.
230  */
231 #define for_each_mddev(_mddev,_tmp)                                     \
232                                                                         \
233         for (({ spin_lock(&all_mddevs_lock);                            \
234                 _tmp = all_mddevs.next;                                 \
235                 _mddev = NULL;});                                       \
236              ({ if (_tmp != &all_mddevs)                                \
237                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
238                 spin_unlock(&all_mddevs_lock);                          \
239                 if (_mddev) mddev_put(_mddev);                          \
240                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
241                 _tmp != &all_mddevs;});                                 \
242              ({ spin_lock(&all_mddevs_lock);                            \
243                 _tmp = _tmp->next;})                                    \
244                 )
245
246 /* Rather than calling directly into the personality make_request function,
247  * IO requests come here first so that we can check if the device is
248  * being suspended pending a reconfiguration.
249  * We hold a refcount over the call to ->make_request.  By the time that
250  * call has finished, the bio has been linked into some internal structure
251  * and so is visible to ->quiesce(), so we don't need the refcount any more.
252  */
253 static void md_make_request(struct request_queue *q, struct bio *bio)
254 {
255         const int rw = bio_data_dir(bio);
256         struct mddev *mddev = q->queuedata;
257         unsigned int sectors;
258         int cpu;
259
260         if (mddev == NULL || mddev->pers == NULL
261             || !mddev->ready) {
262                 bio_io_error(bio);
263                 return;
264         }
265         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
266                 bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
267                 return;
268         }
269         smp_rmb(); /* Ensure implications of  'active' are visible */
270         rcu_read_lock();
271         if (mddev->suspended) {
272                 DEFINE_WAIT(__wait);
273                 for (;;) {
274                         prepare_to_wait(&mddev->sb_wait, &__wait,
275                                         TASK_UNINTERRUPTIBLE);
276                         if (!mddev->suspended)
277                                 break;
278                         rcu_read_unlock();
279                         schedule();
280                         rcu_read_lock();
281                 }
282                 finish_wait(&mddev->sb_wait, &__wait);
283         }
284         atomic_inc(&mddev->active_io);
285         rcu_read_unlock();
286
287         /*
288          * save the sectors now since our bio can
289          * go away inside make_request
290          */
291         sectors = bio_sectors(bio);
292         mddev->pers->make_request(mddev, bio);
293
294         cpu = part_stat_lock();
295         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
296         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
297         part_stat_unlock();
298
299         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
300                 wake_up(&mddev->sb_wait);
301 }
302
303 /* mddev_suspend makes sure no new requests are submitted
304  * to the device, and that any requests that have been submitted
305  * are completely handled.
306  * Once mddev_detach() is called and completes, the module will be
307  * completely unused.
308  */
309 void mddev_suspend(struct mddev *mddev)
310 {
311         BUG_ON(mddev->suspended);
312         mddev->suspended = 1;
313         synchronize_rcu();
314         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
315         mddev->pers->quiesce(mddev, 1);
316
317         del_timer_sync(&mddev->safemode_timer);
318 }
319 EXPORT_SYMBOL_GPL(mddev_suspend);
320
321 void mddev_resume(struct mddev *mddev)
322 {
323         mddev->suspended = 0;
324         wake_up(&mddev->sb_wait);
325         mddev->pers->quiesce(mddev, 0);
326
327         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
328         md_wakeup_thread(mddev->thread);
329         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
330 }
331 EXPORT_SYMBOL_GPL(mddev_resume);
332
333 int mddev_congested(struct mddev *mddev, int bits)
334 {
335         struct md_personality *pers = mddev->pers;
336         int ret = 0;
337
338         rcu_read_lock();
339         if (mddev->suspended)
340                 ret = 1;
341         else if (pers && pers->congested)
342                 ret = pers->congested(mddev, bits);
343         rcu_read_unlock();
344         return ret;
345 }
346 EXPORT_SYMBOL_GPL(mddev_congested);
347 static int md_congested(void *data, int bits)
348 {
349         struct mddev *mddev = data;
350         return mddev_congested(mddev, bits);
351 }
352
353 static int md_mergeable_bvec(struct request_queue *q,
354                              struct bvec_merge_data *bvm,
355                              struct bio_vec *biovec)
356 {
357         struct mddev *mddev = q->queuedata;
358         int ret;
359         rcu_read_lock();
360         if (mddev->suspended) {
361                 /* Must always allow one vec */
362                 if (bvm->bi_size == 0)
363                         ret = biovec->bv_len;
364                 else
365                         ret = 0;
366         } else {
367                 struct md_personality *pers = mddev->pers;
368                 if (pers && pers->mergeable_bvec)
369                         ret = pers->mergeable_bvec(mddev, bvm, biovec);
370                 else
371                         ret = biovec->bv_len;
372         }
373         rcu_read_unlock();
374         return ret;
375 }
376 /*
377  * Generic flush handling for md
378  */
379
380 static void md_end_flush(struct bio *bio, int err)
381 {
382         struct md_rdev *rdev = bio->bi_private;
383         struct mddev *mddev = rdev->mddev;
384
385         rdev_dec_pending(rdev, mddev);
386
387         if (atomic_dec_and_test(&mddev->flush_pending)) {
388                 /* The pre-request flush has finished */
389                 queue_work(md_wq, &mddev->flush_work);
390         }
391         bio_put(bio);
392 }
393
394 static void md_submit_flush_data(struct work_struct *ws);
395
396 static void submit_flushes(struct work_struct *ws)
397 {
398         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
399         struct md_rdev *rdev;
400
401         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
402         atomic_set(&mddev->flush_pending, 1);
403         rcu_read_lock();
404         rdev_for_each_rcu(rdev, mddev)
405                 if (rdev->raid_disk >= 0 &&
406                     !test_bit(Faulty, &rdev->flags)) {
407                         /* Take two references, one is dropped
408                          * when request finishes, one after
409                          * we reclaim rcu_read_lock
410                          */
411                         struct bio *bi;
412                         atomic_inc(&rdev->nr_pending);
413                         atomic_inc(&rdev->nr_pending);
414                         rcu_read_unlock();
415                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
416                         bi->bi_end_io = md_end_flush;
417                         bi->bi_private = rdev;
418                         bi->bi_bdev = rdev->bdev;
419                         atomic_inc(&mddev->flush_pending);
420                         submit_bio(WRITE_FLUSH, bi);
421                         rcu_read_lock();
422                         rdev_dec_pending(rdev, mddev);
423                 }
424         rcu_read_unlock();
425         if (atomic_dec_and_test(&mddev->flush_pending))
426                 queue_work(md_wq, &mddev->flush_work);
427 }
428
429 static void md_submit_flush_data(struct work_struct *ws)
430 {
431         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
432         struct bio *bio = mddev->flush_bio;
433
434         if (bio->bi_iter.bi_size == 0)
435                 /* an empty barrier - all done */
436                 bio_endio(bio, 0);
437         else {
438                 bio->bi_rw &= ~REQ_FLUSH;
439                 mddev->pers->make_request(mddev, bio);
440         }
441
442         mddev->flush_bio = NULL;
443         wake_up(&mddev->sb_wait);
444 }
445
446 void md_flush_request(struct mddev *mddev, struct bio *bio)
447 {
448         spin_lock_irq(&mddev->lock);
449         wait_event_lock_irq(mddev->sb_wait,
450                             !mddev->flush_bio,
451                             mddev->lock);
452         mddev->flush_bio = bio;
453         spin_unlock_irq(&mddev->lock);
454
455         INIT_WORK(&mddev->flush_work, submit_flushes);
456         queue_work(md_wq, &mddev->flush_work);
457 }
458 EXPORT_SYMBOL(md_flush_request);
459
460 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
461 {
462         struct mddev *mddev = cb->data;
463         md_wakeup_thread(mddev->thread);
464         kfree(cb);
465 }
466 EXPORT_SYMBOL(md_unplug);
467
468 static inline struct mddev *mddev_get(struct mddev *mddev)
469 {
470         atomic_inc(&mddev->active);
471         return mddev;
472 }
473
474 static void mddev_delayed_delete(struct work_struct *ws);
475
476 static void mddev_put(struct mddev *mddev)
477 {
478         struct bio_set *bs = NULL;
479
480         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
481                 return;
482         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
483             mddev->ctime == 0 && !mddev->hold_active) {
484                 /* Array is not configured at all, and not held active,
485                  * so destroy it */
486                 list_del_init(&mddev->all_mddevs);
487                 bs = mddev->bio_set;
488                 mddev->bio_set = NULL;
489                 if (mddev->gendisk) {
490                         /* We did a probe so need to clean up.  Call
491                          * queue_work inside the spinlock so that
492                          * flush_workqueue() after mddev_find will
493                          * succeed in waiting for the work to be done.
494                          */
495                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
496                         queue_work(md_misc_wq, &mddev->del_work);
497                 } else
498                         kfree(mddev);
499         }
500         spin_unlock(&all_mddevs_lock);
501         if (bs)
502                 bioset_free(bs);
503 }
504
505 void mddev_init(struct mddev *mddev)
506 {
507         mutex_init(&mddev->open_mutex);
508         mutex_init(&mddev->reconfig_mutex);
509         mutex_init(&mddev->bitmap_info.mutex);
510         INIT_LIST_HEAD(&mddev->disks);
511         INIT_LIST_HEAD(&mddev->all_mddevs);
512         init_timer(&mddev->safemode_timer);
513         atomic_set(&mddev->active, 1);
514         atomic_set(&mddev->openers, 0);
515         atomic_set(&mddev->active_io, 0);
516         spin_lock_init(&mddev->lock);
517         atomic_set(&mddev->flush_pending, 0);
518         init_waitqueue_head(&mddev->sb_wait);
519         init_waitqueue_head(&mddev->recovery_wait);
520         mddev->reshape_position = MaxSector;
521         mddev->reshape_backwards = 0;
522         mddev->last_sync_action = "none";
523         mddev->resync_min = 0;
524         mddev->resync_max = MaxSector;
525         mddev->level = LEVEL_NONE;
526 }
527 EXPORT_SYMBOL_GPL(mddev_init);
528
529 static struct mddev *mddev_find(dev_t unit)
530 {
531         struct mddev *mddev, *new = NULL;
532
533         if (unit && MAJOR(unit) != MD_MAJOR)
534                 unit &= ~((1<<MdpMinorShift)-1);
535
536  retry:
537         spin_lock(&all_mddevs_lock);
538
539         if (unit) {
540                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
541                         if (mddev->unit == unit) {
542                                 mddev_get(mddev);
543                                 spin_unlock(&all_mddevs_lock);
544                                 kfree(new);
545                                 return mddev;
546                         }
547
548                 if (new) {
549                         list_add(&new->all_mddevs, &all_mddevs);
550                         spin_unlock(&all_mddevs_lock);
551                         new->hold_active = UNTIL_IOCTL;
552                         return new;
553                 }
554         } else if (new) {
555                 /* find an unused unit number */
556                 static int next_minor = 512;
557                 int start = next_minor;
558                 int is_free = 0;
559                 int dev = 0;
560                 while (!is_free) {
561                         dev = MKDEV(MD_MAJOR, next_minor);
562                         next_minor++;
563                         if (next_minor > MINORMASK)
564                                 next_minor = 0;
565                         if (next_minor == start) {
566                                 /* Oh dear, all in use. */
567                                 spin_unlock(&all_mddevs_lock);
568                                 kfree(new);
569                                 return NULL;
570                         }
571
572                         is_free = 1;
573                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
574                                 if (mddev->unit == dev) {
575                                         is_free = 0;
576                                         break;
577                                 }
578                 }
579                 new->unit = dev;
580                 new->md_minor = MINOR(dev);
581                 new->hold_active = UNTIL_STOP;
582                 list_add(&new->all_mddevs, &all_mddevs);
583                 spin_unlock(&all_mddevs_lock);
584                 return new;
585         }
586         spin_unlock(&all_mddevs_lock);
587
588         new = kzalloc(sizeof(*new), GFP_KERNEL);
589         if (!new)
590                 return NULL;
591
592         new->unit = unit;
593         if (MAJOR(unit) == MD_MAJOR)
594                 new->md_minor = MINOR(unit);
595         else
596                 new->md_minor = MINOR(unit) >> MdpMinorShift;
597
598         mddev_init(new);
599
600         goto retry;
601 }
602
603 static struct attribute_group md_redundancy_group;
604
605 void mddev_unlock(struct mddev *mddev)
606 {
607         if (mddev->to_remove) {
608                 /* These cannot be removed under reconfig_mutex as
609                  * an access to the files will try to take reconfig_mutex
610                  * while holding the file unremovable, which leads to
611                  * a deadlock.
612                  * So hold set sysfs_active while the remove in happeing,
613                  * and anything else which might set ->to_remove or my
614                  * otherwise change the sysfs namespace will fail with
615                  * -EBUSY if sysfs_active is still set.
616                  * We set sysfs_active under reconfig_mutex and elsewhere
617                  * test it under the same mutex to ensure its correct value
618                  * is seen.
619                  */
620                 struct attribute_group *to_remove = mddev->to_remove;
621                 mddev->to_remove = NULL;
622                 mddev->sysfs_active = 1;
623                 mutex_unlock(&mddev->reconfig_mutex);
624
625                 if (mddev->kobj.sd) {
626                         if (to_remove != &md_redundancy_group)
627                                 sysfs_remove_group(&mddev->kobj, to_remove);
628                         if (mddev->pers == NULL ||
629                             mddev->pers->sync_request == NULL) {
630                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
631                                 if (mddev->sysfs_action)
632                                         sysfs_put(mddev->sysfs_action);
633                                 mddev->sysfs_action = NULL;
634                         }
635                 }
636                 mddev->sysfs_active = 0;
637         } else
638                 mutex_unlock(&mddev->reconfig_mutex);
639
640         /* As we've dropped the mutex we need a spinlock to
641          * make sure the thread doesn't disappear
642          */
643         spin_lock(&pers_lock);
644         md_wakeup_thread(mddev->thread);
645         spin_unlock(&pers_lock);
646 }
647 EXPORT_SYMBOL_GPL(mddev_unlock);
648
649 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
650 {
651         struct md_rdev *rdev;
652
653         rdev_for_each_rcu(rdev, mddev)
654                 if (rdev->desc_nr == nr)
655                         return rdev;
656
657         return NULL;
658 }
659 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
660
661 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
662 {
663         struct md_rdev *rdev;
664
665         rdev_for_each(rdev, mddev)
666                 if (rdev->bdev->bd_dev == dev)
667                         return rdev;
668
669         return NULL;
670 }
671
672 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
673 {
674         struct md_rdev *rdev;
675
676         rdev_for_each_rcu(rdev, mddev)
677                 if (rdev->bdev->bd_dev == dev)
678                         return rdev;
679
680         return NULL;
681 }
682
683 static struct md_personality *find_pers(int level, char *clevel)
684 {
685         struct md_personality *pers;
686         list_for_each_entry(pers, &pers_list, list) {
687                 if (level != LEVEL_NONE && pers->level == level)
688                         return pers;
689                 if (strcmp(pers->name, clevel)==0)
690                         return pers;
691         }
692         return NULL;
693 }
694
695 /* return the offset of the super block in 512byte sectors */
696 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
697 {
698         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
699         return MD_NEW_SIZE_SECTORS(num_sectors);
700 }
701
702 static int alloc_disk_sb(struct md_rdev *rdev)
703 {
704         rdev->sb_page = alloc_page(GFP_KERNEL);
705         if (!rdev->sb_page) {
706                 printk(KERN_ALERT "md: out of memory.\n");
707                 return -ENOMEM;
708         }
709
710         return 0;
711 }
712
713 void md_rdev_clear(struct md_rdev *rdev)
714 {
715         if (rdev->sb_page) {
716                 put_page(rdev->sb_page);
717                 rdev->sb_loaded = 0;
718                 rdev->sb_page = NULL;
719                 rdev->sb_start = 0;
720                 rdev->sectors = 0;
721         }
722         if (rdev->bb_page) {
723                 put_page(rdev->bb_page);
724                 rdev->bb_page = NULL;
725         }
726         kfree(rdev->badblocks.page);
727         rdev->badblocks.page = NULL;
728 }
729 EXPORT_SYMBOL_GPL(md_rdev_clear);
730
731 static void super_written(struct bio *bio, int error)
732 {
733         struct md_rdev *rdev = bio->bi_private;
734         struct mddev *mddev = rdev->mddev;
735
736         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
737                 printk("md: super_written gets error=%d, uptodate=%d\n",
738                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
739                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
740                 md_error(mddev, rdev);
741         }
742
743         if (atomic_dec_and_test(&mddev->pending_writes))
744                 wake_up(&mddev->sb_wait);
745         bio_put(bio);
746 }
747
748 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
749                    sector_t sector, int size, struct page *page)
750 {
751         /* write first size bytes of page to sector of rdev
752          * Increment mddev->pending_writes before returning
753          * and decrement it on completion, waking up sb_wait
754          * if zero is reached.
755          * If an error occurred, call md_error
756          */
757         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
758
759         bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
760         bio->bi_iter.bi_sector = sector;
761         bio_add_page(bio, page, size, 0);
762         bio->bi_private = rdev;
763         bio->bi_end_io = super_written;
764
765         atomic_inc(&mddev->pending_writes);
766         submit_bio(WRITE_FLUSH_FUA, bio);
767 }
768
769 void md_super_wait(struct mddev *mddev)
770 {
771         /* wait for all superblock writes that were scheduled to complete */
772         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
773 }
774
775 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
776                  struct page *page, int rw, bool metadata_op)
777 {
778         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
779         int ret;
780
781         bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
782                 rdev->meta_bdev : rdev->bdev;
783         if (metadata_op)
784                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
785         else if (rdev->mddev->reshape_position != MaxSector &&
786                  (rdev->mddev->reshape_backwards ==
787                   (sector >= rdev->mddev->reshape_position)))
788                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
789         else
790                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
791         bio_add_page(bio, page, size, 0);
792         submit_bio_wait(rw, bio);
793
794         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
795         bio_put(bio);
796         return ret;
797 }
798 EXPORT_SYMBOL_GPL(sync_page_io);
799
800 static int read_disk_sb(struct md_rdev *rdev, int size)
801 {
802         char b[BDEVNAME_SIZE];
803
804         if (rdev->sb_loaded)
805                 return 0;
806
807         if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
808                 goto fail;
809         rdev->sb_loaded = 1;
810         return 0;
811
812 fail:
813         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
814                 bdevname(rdev->bdev,b));
815         return -EINVAL;
816 }
817
818 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
819 {
820         return  sb1->set_uuid0 == sb2->set_uuid0 &&
821                 sb1->set_uuid1 == sb2->set_uuid1 &&
822                 sb1->set_uuid2 == sb2->set_uuid2 &&
823                 sb1->set_uuid3 == sb2->set_uuid3;
824 }
825
826 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
827 {
828         int ret;
829         mdp_super_t *tmp1, *tmp2;
830
831         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
832         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
833
834         if (!tmp1 || !tmp2) {
835                 ret = 0;
836                 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
837                 goto abort;
838         }
839
840         *tmp1 = *sb1;
841         *tmp2 = *sb2;
842
843         /*
844          * nr_disks is not constant
845          */
846         tmp1->nr_disks = 0;
847         tmp2->nr_disks = 0;
848
849         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
850 abort:
851         kfree(tmp1);
852         kfree(tmp2);
853         return ret;
854 }
855
856 static u32 md_csum_fold(u32 csum)
857 {
858         csum = (csum & 0xffff) + (csum >> 16);
859         return (csum & 0xffff) + (csum >> 16);
860 }
861
862 static unsigned int calc_sb_csum(mdp_super_t *sb)
863 {
864         u64 newcsum = 0;
865         u32 *sb32 = (u32*)sb;
866         int i;
867         unsigned int disk_csum, csum;
868
869         disk_csum = sb->sb_csum;
870         sb->sb_csum = 0;
871
872         for (i = 0; i < MD_SB_BYTES/4 ; i++)
873                 newcsum += sb32[i];
874         csum = (newcsum & 0xffffffff) + (newcsum>>32);
875
876 #ifdef CONFIG_ALPHA
877         /* This used to use csum_partial, which was wrong for several
878          * reasons including that different results are returned on
879          * different architectures.  It isn't critical that we get exactly
880          * the same return value as before (we always csum_fold before
881          * testing, and that removes any differences).  However as we
882          * know that csum_partial always returned a 16bit value on
883          * alphas, do a fold to maximise conformity to previous behaviour.
884          */
885         sb->sb_csum = md_csum_fold(disk_csum);
886 #else
887         sb->sb_csum = disk_csum;
888 #endif
889         return csum;
890 }
891
892 /*
893  * Handle superblock details.
894  * We want to be able to handle multiple superblock formats
895  * so we have a common interface to them all, and an array of
896  * different handlers.
897  * We rely on user-space to write the initial superblock, and support
898  * reading and updating of superblocks.
899  * Interface methods are:
900  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
901  *      loads and validates a superblock on dev.
902  *      if refdev != NULL, compare superblocks on both devices
903  *    Return:
904  *      0 - dev has a superblock that is compatible with refdev
905  *      1 - dev has a superblock that is compatible and newer than refdev
906  *          so dev should be used as the refdev in future
907  *     -EINVAL superblock incompatible or invalid
908  *     -othererror e.g. -EIO
909  *
910  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
911  *      Verify that dev is acceptable into mddev.
912  *       The first time, mddev->raid_disks will be 0, and data from
913  *       dev should be merged in.  Subsequent calls check that dev
914  *       is new enough.  Return 0 or -EINVAL
915  *
916  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
917  *     Update the superblock for rdev with data in mddev
918  *     This does not write to disc.
919  *
920  */
921
922 struct super_type  {
923         char                *name;
924         struct module       *owner;
925         int                 (*load_super)(struct md_rdev *rdev,
926                                           struct md_rdev *refdev,
927                                           int minor_version);
928         int                 (*validate_super)(struct mddev *mddev,
929                                               struct md_rdev *rdev);
930         void                (*sync_super)(struct mddev *mddev,
931                                           struct md_rdev *rdev);
932         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
933                                                 sector_t num_sectors);
934         int                 (*allow_new_offset)(struct md_rdev *rdev,
935                                                 unsigned long long new_offset);
936 };
937
938 /*
939  * Check that the given mddev has no bitmap.
940  *
941  * This function is called from the run method of all personalities that do not
942  * support bitmaps. It prints an error message and returns non-zero if mddev
943  * has a bitmap. Otherwise, it returns 0.
944  *
945  */
946 int md_check_no_bitmap(struct mddev *mddev)
947 {
948         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
949                 return 0;
950         printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
951                 mdname(mddev), mddev->pers->name);
952         return 1;
953 }
954 EXPORT_SYMBOL(md_check_no_bitmap);
955
956 /*
957  * load_super for 0.90.0
958  */
959 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
960 {
961         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
962         mdp_super_t *sb;
963         int ret;
964
965         /*
966          * Calculate the position of the superblock (512byte sectors),
967          * it's at the end of the disk.
968          *
969          * It also happens to be a multiple of 4Kb.
970          */
971         rdev->sb_start = calc_dev_sboffset(rdev);
972
973         ret = read_disk_sb(rdev, MD_SB_BYTES);
974         if (ret) return ret;
975
976         ret = -EINVAL;
977
978         bdevname(rdev->bdev, b);
979         sb = page_address(rdev->sb_page);
980
981         if (sb->md_magic != MD_SB_MAGIC) {
982                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
983                        b);
984                 goto abort;
985         }
986
987         if (sb->major_version != 0 ||
988             sb->minor_version < 90 ||
989             sb->minor_version > 91) {
990                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
991                         sb->major_version, sb->minor_version,
992                         b);
993                 goto abort;
994         }
995
996         if (sb->raid_disks <= 0)
997                 goto abort;
998
999         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1000                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
1001                         b);
1002                 goto abort;
1003         }
1004
1005         rdev->preferred_minor = sb->md_minor;
1006         rdev->data_offset = 0;
1007         rdev->new_data_offset = 0;
1008         rdev->sb_size = MD_SB_BYTES;
1009         rdev->badblocks.shift = -1;
1010
1011         if (sb->level == LEVEL_MULTIPATH)
1012                 rdev->desc_nr = -1;
1013         else
1014                 rdev->desc_nr = sb->this_disk.number;
1015
1016         if (!refdev) {
1017                 ret = 1;
1018         } else {
1019                 __u64 ev1, ev2;
1020                 mdp_super_t *refsb = page_address(refdev->sb_page);
1021                 if (!uuid_equal(refsb, sb)) {
1022                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
1023                                 b, bdevname(refdev->bdev,b2));
1024                         goto abort;
1025                 }
1026                 if (!sb_equal(refsb, sb)) {
1027                         printk(KERN_WARNING "md: %s has same UUID"
1028                                " but different superblock to %s\n",
1029                                b, bdevname(refdev->bdev, b2));
1030                         goto abort;
1031                 }
1032                 ev1 = md_event(sb);
1033                 ev2 = md_event(refsb);
1034                 if (ev1 > ev2)
1035                         ret = 1;
1036                 else
1037                         ret = 0;
1038         }
1039         rdev->sectors = rdev->sb_start;
1040         /* Limit to 4TB as metadata cannot record more than that.
1041          * (not needed for Linear and RAID0 as metadata doesn't
1042          * record this size)
1043          */
1044         if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1045                 rdev->sectors = (2ULL << 32) - 2;
1046
1047         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1048                 /* "this cannot possibly happen" ... */
1049                 ret = -EINVAL;
1050
1051  abort:
1052         return ret;
1053 }
1054
1055 /*
1056  * validate_super for 0.90.0
1057  */
1058 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1059 {
1060         mdp_disk_t *desc;
1061         mdp_super_t *sb = page_address(rdev->sb_page);
1062         __u64 ev1 = md_event(sb);
1063
1064         rdev->raid_disk = -1;
1065         clear_bit(Faulty, &rdev->flags);
1066         clear_bit(In_sync, &rdev->flags);
1067         clear_bit(Bitmap_sync, &rdev->flags);
1068         clear_bit(WriteMostly, &rdev->flags);
1069
1070         if (mddev->raid_disks == 0) {
1071                 mddev->major_version = 0;
1072                 mddev->minor_version = sb->minor_version;
1073                 mddev->patch_version = sb->patch_version;
1074                 mddev->external = 0;
1075                 mddev->chunk_sectors = sb->chunk_size >> 9;
1076                 mddev->ctime = sb->ctime;
1077                 mddev->utime = sb->utime;
1078                 mddev->level = sb->level;
1079                 mddev->clevel[0] = 0;
1080                 mddev->layout = sb->layout;
1081                 mddev->raid_disks = sb->raid_disks;
1082                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1083                 mddev->events = ev1;
1084                 mddev->bitmap_info.offset = 0;
1085                 mddev->bitmap_info.space = 0;
1086                 /* bitmap can use 60 K after the 4K superblocks */
1087                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1088                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1089                 mddev->reshape_backwards = 0;
1090
1091                 if (mddev->minor_version >= 91) {
1092                         mddev->reshape_position = sb->reshape_position;
1093                         mddev->delta_disks = sb->delta_disks;
1094                         mddev->new_level = sb->new_level;
1095                         mddev->new_layout = sb->new_layout;
1096                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1097                         if (mddev->delta_disks < 0)
1098                                 mddev->reshape_backwards = 1;
1099                 } else {
1100                         mddev->reshape_position = MaxSector;
1101                         mddev->delta_disks = 0;
1102                         mddev->new_level = mddev->level;
1103                         mddev->new_layout = mddev->layout;
1104                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1105                 }
1106
1107                 if (sb->state & (1<<MD_SB_CLEAN))
1108                         mddev->recovery_cp = MaxSector;
1109                 else {
1110                         if (sb->events_hi == sb->cp_events_hi &&
1111                                 sb->events_lo == sb->cp_events_lo) {
1112                                 mddev->recovery_cp = sb->recovery_cp;
1113                         } else
1114                                 mddev->recovery_cp = 0;
1115                 }
1116
1117                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1118                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1119                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1120                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1121
1122                 mddev->max_disks = MD_SB_DISKS;
1123
1124                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1125                     mddev->bitmap_info.file == NULL) {
1126                         mddev->bitmap_info.offset =
1127                                 mddev->bitmap_info.default_offset;
1128                         mddev->bitmap_info.space =
1129                                 mddev->bitmap_info.default_space;
1130                 }
1131
1132         } else if (mddev->pers == NULL) {
1133                 /* Insist on good event counter while assembling, except
1134                  * for spares (which don't need an event count) */
1135                 ++ev1;
1136                 if (sb->disks[rdev->desc_nr].state & (
1137                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1138                         if (ev1 < mddev->events)
1139                                 return -EINVAL;
1140         } else if (mddev->bitmap) {
1141                 /* if adding to array with a bitmap, then we can accept an
1142                  * older device ... but not too old.
1143                  */
1144                 if (ev1 < mddev->bitmap->events_cleared)
1145                         return 0;
1146                 if (ev1 < mddev->events)
1147                         set_bit(Bitmap_sync, &rdev->flags);
1148         } else {
1149                 if (ev1 < mddev->events)
1150                         /* just a hot-add of a new device, leave raid_disk at -1 */
1151                         return 0;
1152         }
1153
1154         if (mddev->level != LEVEL_MULTIPATH) {
1155                 desc = sb->disks + rdev->desc_nr;
1156
1157                 if (desc->state & (1<<MD_DISK_FAULTY))
1158                         set_bit(Faulty, &rdev->flags);
1159                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1160                             desc->raid_disk < mddev->raid_disks */) {
1161                         set_bit(In_sync, &rdev->flags);
1162                         rdev->raid_disk = desc->raid_disk;
1163                         rdev->saved_raid_disk = desc->raid_disk;
1164                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1165                         /* active but not in sync implies recovery up to
1166                          * reshape position.  We don't know exactly where
1167                          * that is, so set to zero for now */
1168                         if (mddev->minor_version >= 91) {
1169                                 rdev->recovery_offset = 0;
1170                                 rdev->raid_disk = desc->raid_disk;
1171                         }
1172                 }
1173                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1174                         set_bit(WriteMostly, &rdev->flags);
1175         } else /* MULTIPATH are always insync */
1176                 set_bit(In_sync, &rdev->flags);
1177         return 0;
1178 }
1179
1180 /*
1181  * sync_super for 0.90.0
1182  */
1183 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1184 {
1185         mdp_super_t *sb;
1186         struct md_rdev *rdev2;
1187         int next_spare = mddev->raid_disks;
1188
1189         /* make rdev->sb match mddev data..
1190          *
1191          * 1/ zero out disks
1192          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1193          * 3/ any empty disks < next_spare become removed
1194          *
1195          * disks[0] gets initialised to REMOVED because
1196          * we cannot be sure from other fields if it has
1197          * been initialised or not.
1198          */
1199         int i;
1200         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1201
1202         rdev->sb_size = MD_SB_BYTES;
1203
1204         sb = page_address(rdev->sb_page);
1205
1206         memset(sb, 0, sizeof(*sb));
1207
1208         sb->md_magic = MD_SB_MAGIC;
1209         sb->major_version = mddev->major_version;
1210         sb->patch_version = mddev->patch_version;
1211         sb->gvalid_words  = 0; /* ignored */
1212         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1213         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1214         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1215         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1216
1217         sb->ctime = mddev->ctime;
1218         sb->level = mddev->level;
1219         sb->size = mddev->dev_sectors / 2;
1220         sb->raid_disks = mddev->raid_disks;
1221         sb->md_minor = mddev->md_minor;
1222         sb->not_persistent = 0;
1223         sb->utime = mddev->utime;
1224         sb->state = 0;
1225         sb->events_hi = (mddev->events>>32);
1226         sb->events_lo = (u32)mddev->events;
1227
1228         if (mddev->reshape_position == MaxSector)
1229                 sb->minor_version = 90;
1230         else {
1231                 sb->minor_version = 91;
1232                 sb->reshape_position = mddev->reshape_position;
1233                 sb->new_level = mddev->new_level;
1234                 sb->delta_disks = mddev->delta_disks;
1235                 sb->new_layout = mddev->new_layout;
1236                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1237         }
1238         mddev->minor_version = sb->minor_version;
1239         if (mddev->in_sync)
1240         {
1241                 sb->recovery_cp = mddev->recovery_cp;
1242                 sb->cp_events_hi = (mddev->events>>32);
1243                 sb->cp_events_lo = (u32)mddev->events;
1244                 if (mddev->recovery_cp == MaxSector)
1245                         sb->state = (1<< MD_SB_CLEAN);
1246         } else
1247                 sb->recovery_cp = 0;
1248
1249         sb->layout = mddev->layout;
1250         sb->chunk_size = mddev->chunk_sectors << 9;
1251
1252         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1253                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1254
1255         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1256         rdev_for_each(rdev2, mddev) {
1257                 mdp_disk_t *d;
1258                 int desc_nr;
1259                 int is_active = test_bit(In_sync, &rdev2->flags);
1260
1261                 if (rdev2->raid_disk >= 0 &&
1262                     sb->minor_version >= 91)
1263                         /* we have nowhere to store the recovery_offset,
1264                          * but if it is not below the reshape_position,
1265                          * we can piggy-back on that.
1266                          */
1267                         is_active = 1;
1268                 if (rdev2->raid_disk < 0 ||
1269                     test_bit(Faulty, &rdev2->flags))
1270                         is_active = 0;
1271                 if (is_active)
1272                         desc_nr = rdev2->raid_disk;
1273                 else
1274                         desc_nr = next_spare++;
1275                 rdev2->desc_nr = desc_nr;
1276                 d = &sb->disks[rdev2->desc_nr];
1277                 nr_disks++;
1278                 d->number = rdev2->desc_nr;
1279                 d->major = MAJOR(rdev2->bdev->bd_dev);
1280                 d->minor = MINOR(rdev2->bdev->bd_dev);
1281                 if (is_active)
1282                         d->raid_disk = rdev2->raid_disk;
1283                 else
1284                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1285                 if (test_bit(Faulty, &rdev2->flags))
1286                         d->state = (1<<MD_DISK_FAULTY);
1287                 else if (is_active) {
1288                         d->state = (1<<MD_DISK_ACTIVE);
1289                         if (test_bit(In_sync, &rdev2->flags))
1290                                 d->state |= (1<<MD_DISK_SYNC);
1291                         active++;
1292                         working++;
1293                 } else {
1294                         d->state = 0;
1295                         spare++;
1296                         working++;
1297                 }
1298                 if (test_bit(WriteMostly, &rdev2->flags))
1299                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1300         }
1301         /* now set the "removed" and "faulty" bits on any missing devices */
1302         for (i=0 ; i < mddev->raid_disks ; i++) {
1303                 mdp_disk_t *d = &sb->disks[i];
1304                 if (d->state == 0 && d->number == 0) {
1305                         d->number = i;
1306                         d->raid_disk = i;
1307                         d->state = (1<<MD_DISK_REMOVED);
1308                         d->state |= (1<<MD_DISK_FAULTY);
1309                         failed++;
1310                 }
1311         }
1312         sb->nr_disks = nr_disks;
1313         sb->active_disks = active;
1314         sb->working_disks = working;
1315         sb->failed_disks = failed;
1316         sb->spare_disks = spare;
1317
1318         sb->this_disk = sb->disks[rdev->desc_nr];
1319         sb->sb_csum = calc_sb_csum(sb);
1320 }
1321
1322 /*
1323  * rdev_size_change for 0.90.0
1324  */
1325 static unsigned long long
1326 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1327 {
1328         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1329                 return 0; /* component must fit device */
1330         if (rdev->mddev->bitmap_info.offset)
1331                 return 0; /* can't move bitmap */
1332         rdev->sb_start = calc_dev_sboffset(rdev);
1333         if (!num_sectors || num_sectors > rdev->sb_start)
1334                 num_sectors = rdev->sb_start;
1335         /* Limit to 4TB as metadata cannot record more than that.
1336          * 4TB == 2^32 KB, or 2*2^32 sectors.
1337          */
1338         if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1339                 num_sectors = (2ULL << 32) - 2;
1340         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1341                        rdev->sb_page);
1342         md_super_wait(rdev->mddev);
1343         return num_sectors;
1344 }
1345
1346 static int
1347 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1348 {
1349         /* non-zero offset changes not possible with v0.90 */
1350         return new_offset == 0;
1351 }
1352
1353 /*
1354  * version 1 superblock
1355  */
1356
1357 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1358 {
1359         __le32 disk_csum;
1360         u32 csum;
1361         unsigned long long newcsum;
1362         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1363         __le32 *isuper = (__le32*)sb;
1364
1365         disk_csum = sb->sb_csum;
1366         sb->sb_csum = 0;
1367         newcsum = 0;
1368         for (; size >= 4; size -= 4)
1369                 newcsum += le32_to_cpu(*isuper++);
1370
1371         if (size == 2)
1372                 newcsum += le16_to_cpu(*(__le16*) isuper);
1373
1374         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1375         sb->sb_csum = disk_csum;
1376         return cpu_to_le32(csum);
1377 }
1378
1379 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
1380                             int acknowledged);
1381 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1382 {
1383         struct mdp_superblock_1 *sb;
1384         int ret;
1385         sector_t sb_start;
1386         sector_t sectors;
1387         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1388         int bmask;
1389
1390         /*
1391          * Calculate the position of the superblock in 512byte sectors.
1392          * It is always aligned to a 4K boundary and
1393          * depeding on minor_version, it can be:
1394          * 0: At least 8K, but less than 12K, from end of device
1395          * 1: At start of device
1396          * 2: 4K from start of device.
1397          */
1398         switch(minor_version) {
1399         case 0:
1400                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1401                 sb_start -= 8*2;
1402                 sb_start &= ~(sector_t)(4*2-1);
1403                 break;
1404         case 1:
1405                 sb_start = 0;
1406                 break;
1407         case 2:
1408                 sb_start = 8;
1409                 break;
1410         default:
1411                 return -EINVAL;
1412         }
1413         rdev->sb_start = sb_start;
1414
1415         /* superblock is rarely larger than 1K, but it can be larger,
1416          * and it is safe to read 4k, so we do that
1417          */
1418         ret = read_disk_sb(rdev, 4096);
1419         if (ret) return ret;
1420
1421         sb = page_address(rdev->sb_page);
1422
1423         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1424             sb->major_version != cpu_to_le32(1) ||
1425             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1426             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1427             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1428                 return -EINVAL;
1429
1430         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1431                 printk("md: invalid superblock checksum on %s\n",
1432                         bdevname(rdev->bdev,b));
1433                 return -EINVAL;
1434         }
1435         if (le64_to_cpu(sb->data_size) < 10) {
1436                 printk("md: data_size too small on %s\n",
1437                        bdevname(rdev->bdev,b));
1438                 return -EINVAL;
1439         }
1440         if (sb->pad0 ||
1441             sb->pad3[0] ||
1442             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1443                 /* Some padding is non-zero, might be a new feature */
1444                 return -EINVAL;
1445
1446         rdev->preferred_minor = 0xffff;
1447         rdev->data_offset = le64_to_cpu(sb->data_offset);
1448         rdev->new_data_offset = rdev->data_offset;
1449         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1450             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1451                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1452         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1453
1454         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1455         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1456         if (rdev->sb_size & bmask)
1457                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1458
1459         if (minor_version
1460             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1461                 return -EINVAL;
1462         if (minor_version
1463             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1464                 return -EINVAL;
1465
1466         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1467                 rdev->desc_nr = -1;
1468         else
1469                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1470
1471         if (!rdev->bb_page) {
1472                 rdev->bb_page = alloc_page(GFP_KERNEL);
1473                 if (!rdev->bb_page)
1474                         return -ENOMEM;
1475         }
1476         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1477             rdev->badblocks.count == 0) {
1478                 /* need to load the bad block list.
1479                  * Currently we limit it to one page.
1480                  */
1481                 s32 offset;
1482                 sector_t bb_sector;
1483                 u64 *bbp;
1484                 int i;
1485                 int sectors = le16_to_cpu(sb->bblog_size);
1486                 if (sectors > (PAGE_SIZE / 512))
1487                         return -EINVAL;
1488                 offset = le32_to_cpu(sb->bblog_offset);
1489                 if (offset == 0)
1490                         return -EINVAL;
1491                 bb_sector = (long long)offset;
1492                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1493                                   rdev->bb_page, READ, true))
1494                         return -EIO;
1495                 bbp = (u64 *)page_address(rdev->bb_page);
1496                 rdev->badblocks.shift = sb->bblog_shift;
1497                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1498                         u64 bb = le64_to_cpu(*bbp);
1499                         int count = bb & (0x3ff);
1500                         u64 sector = bb >> 10;
1501                         sector <<= sb->bblog_shift;
1502                         count <<= sb->bblog_shift;
1503                         if (bb + 1 == 0)
1504                                 break;
1505                         if (md_set_badblocks(&rdev->badblocks,
1506                                              sector, count, 1) == 0)
1507                                 return -EINVAL;
1508                 }
1509         } else if (sb->bblog_offset != 0)
1510                 rdev->badblocks.shift = 0;
1511
1512         if (!refdev) {
1513                 ret = 1;
1514         } else {
1515                 __u64 ev1, ev2;
1516                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1517
1518                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1519                     sb->level != refsb->level ||
1520                     sb->layout != refsb->layout ||
1521                     sb->chunksize != refsb->chunksize) {
1522                         printk(KERN_WARNING "md: %s has strangely different"
1523                                 " superblock to %s\n",
1524                                 bdevname(rdev->bdev,b),
1525                                 bdevname(refdev->bdev,b2));
1526                         return -EINVAL;
1527                 }
1528                 ev1 = le64_to_cpu(sb->events);
1529                 ev2 = le64_to_cpu(refsb->events);
1530
1531                 if (ev1 > ev2)
1532                         ret = 1;
1533                 else
1534                         ret = 0;
1535         }
1536         if (minor_version) {
1537                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1538                 sectors -= rdev->data_offset;
1539         } else
1540                 sectors = rdev->sb_start;
1541         if (sectors < le64_to_cpu(sb->data_size))
1542                 return -EINVAL;
1543         rdev->sectors = le64_to_cpu(sb->data_size);
1544         return ret;
1545 }
1546
1547 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1548 {
1549         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1550         __u64 ev1 = le64_to_cpu(sb->events);
1551
1552         rdev->raid_disk = -1;
1553         clear_bit(Faulty, &rdev->flags);
1554         clear_bit(In_sync, &rdev->flags);
1555         clear_bit(Bitmap_sync, &rdev->flags);
1556         clear_bit(WriteMostly, &rdev->flags);
1557
1558         if (mddev->raid_disks == 0) {
1559                 mddev->major_version = 1;
1560                 mddev->patch_version = 0;
1561                 mddev->external = 0;
1562                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1563                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1564                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1565                 mddev->level = le32_to_cpu(sb->level);
1566                 mddev->clevel[0] = 0;
1567                 mddev->layout = le32_to_cpu(sb->layout);
1568                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1569                 mddev->dev_sectors = le64_to_cpu(sb->size);
1570                 mddev->events = ev1;
1571                 mddev->bitmap_info.offset = 0;
1572                 mddev->bitmap_info.space = 0;
1573                 /* Default location for bitmap is 1K after superblock
1574                  * using 3K - total of 4K
1575                  */
1576                 mddev->bitmap_info.default_offset = 1024 >> 9;
1577                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1578                 mddev->reshape_backwards = 0;
1579
1580                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1581                 memcpy(mddev->uuid, sb->set_uuid, 16);
1582
1583                 mddev->max_disks =  (4096-256)/2;
1584
1585                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1586                     mddev->bitmap_info.file == NULL) {
1587                         mddev->bitmap_info.offset =
1588                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1589                         /* Metadata doesn't record how much space is available.
1590                          * For 1.0, we assume we can use up to the superblock
1591                          * if before, else to 4K beyond superblock.
1592                          * For others, assume no change is possible.
1593                          */
1594                         if (mddev->minor_version > 0)
1595                                 mddev->bitmap_info.space = 0;
1596                         else if (mddev->bitmap_info.offset > 0)
1597                                 mddev->bitmap_info.space =
1598                                         8 - mddev->bitmap_info.offset;
1599                         else
1600                                 mddev->bitmap_info.space =
1601                                         -mddev->bitmap_info.offset;
1602                 }
1603
1604                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1605                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1606                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1607                         mddev->new_level = le32_to_cpu(sb->new_level);
1608                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1609                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1610                         if (mddev->delta_disks < 0 ||
1611                             (mddev->delta_disks == 0 &&
1612                              (le32_to_cpu(sb->feature_map)
1613                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1614                                 mddev->reshape_backwards = 1;
1615                 } else {
1616                         mddev->reshape_position = MaxSector;
1617                         mddev->delta_disks = 0;
1618                         mddev->new_level = mddev->level;
1619                         mddev->new_layout = mddev->layout;
1620                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1621                 }
1622
1623         } else if (mddev->pers == NULL) {
1624                 /* Insist of good event counter while assembling, except for
1625                  * spares (which don't need an event count) */
1626                 ++ev1;
1627                 if (rdev->desc_nr >= 0 &&
1628                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1629                     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
1630                         if (ev1 < mddev->events)
1631                                 return -EINVAL;
1632         } else if (mddev->bitmap) {
1633                 /* If adding to array with a bitmap, then we can accept an
1634                  * older device, but not too old.
1635                  */
1636                 if (ev1 < mddev->bitmap->events_cleared)
1637                         return 0;
1638                 if (ev1 < mddev->events)
1639                         set_bit(Bitmap_sync, &rdev->flags);
1640         } else {
1641                 if (ev1 < mddev->events)
1642                         /* just a hot-add of a new device, leave raid_disk at -1 */
1643                         return 0;
1644         }
1645         if (mddev->level != LEVEL_MULTIPATH) {
1646                 int role;
1647                 if (rdev->desc_nr < 0 ||
1648                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1649                         role = 0xffff;
1650                         rdev->desc_nr = -1;
1651                 } else
1652                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1653                 switch(role) {
1654                 case 0xffff: /* spare */
1655                         break;
1656                 case 0xfffe: /* faulty */
1657                         set_bit(Faulty, &rdev->flags);
1658                         break;
1659                 default:
1660                         rdev->saved_raid_disk = role;
1661                         if ((le32_to_cpu(sb->feature_map) &
1662                              MD_FEATURE_RECOVERY_OFFSET)) {
1663                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1664                                 if (!(le32_to_cpu(sb->feature_map) &
1665                                       MD_FEATURE_RECOVERY_BITMAP))
1666                                         rdev->saved_raid_disk = -1;
1667                         } else
1668                                 set_bit(In_sync, &rdev->flags);
1669                         rdev->raid_disk = role;
1670                         break;
1671                 }
1672                 if (sb->devflags & WriteMostly1)
1673                         set_bit(WriteMostly, &rdev->flags);
1674                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1675                         set_bit(Replacement, &rdev->flags);
1676         } else /* MULTIPATH are always insync */
1677                 set_bit(In_sync, &rdev->flags);
1678
1679         return 0;
1680 }
1681
1682 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1683 {
1684         struct mdp_superblock_1 *sb;
1685         struct md_rdev *rdev2;
1686         int max_dev, i;
1687         /* make rdev->sb match mddev and rdev data. */
1688
1689         sb = page_address(rdev->sb_page);
1690
1691         sb->feature_map = 0;
1692         sb->pad0 = 0;
1693         sb->recovery_offset = cpu_to_le64(0);
1694         memset(sb->pad3, 0, sizeof(sb->pad3));
1695
1696         sb->utime = cpu_to_le64((__u64)mddev->utime);
1697         sb->events = cpu_to_le64(mddev->events);
1698         if (mddev->in_sync)
1699                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1700         else
1701                 sb->resync_offset = cpu_to_le64(0);
1702
1703         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1704
1705         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1706         sb->size = cpu_to_le64(mddev->dev_sectors);
1707         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1708         sb->level = cpu_to_le32(mddev->level);
1709         sb->layout = cpu_to_le32(mddev->layout);
1710
1711         if (test_bit(WriteMostly, &rdev->flags))
1712                 sb->devflags |= WriteMostly1;
1713         else
1714                 sb->devflags &= ~WriteMostly1;
1715         sb->data_offset = cpu_to_le64(rdev->data_offset);
1716         sb->data_size = cpu_to_le64(rdev->sectors);
1717
1718         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1719                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1720                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1721         }
1722
1723         if (rdev->raid_disk >= 0 &&
1724             !test_bit(In_sync, &rdev->flags)) {
1725                 sb->feature_map |=
1726                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1727                 sb->recovery_offset =
1728                         cpu_to_le64(rdev->recovery_offset);
1729                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1730                         sb->feature_map |=
1731                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1732         }
1733         if (test_bit(Replacement, &rdev->flags))
1734                 sb->feature_map |=
1735                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
1736
1737         if (mddev->reshape_position != MaxSector) {
1738                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1739                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1740                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1741                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1742                 sb->new_level = cpu_to_le32(mddev->new_level);
1743                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1744                 if (mddev->delta_disks == 0 &&
1745                     mddev->reshape_backwards)
1746                         sb->feature_map
1747                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1748                 if (rdev->new_data_offset != rdev->data_offset) {
1749                         sb->feature_map
1750                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1751                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1752                                                              - rdev->data_offset));
1753                 }
1754         }
1755
1756         if (rdev->badblocks.count == 0)
1757                 /* Nothing to do for bad blocks*/ ;
1758         else if (sb->bblog_offset == 0)
1759                 /* Cannot record bad blocks on this device */
1760                 md_error(mddev, rdev);
1761         else {
1762                 struct badblocks *bb = &rdev->badblocks;
1763                 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1764                 u64 *p = bb->page;
1765                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1766                 if (bb->changed) {
1767                         unsigned seq;
1768
1769 retry:
1770                         seq = read_seqbegin(&bb->lock);
1771
1772                         memset(bbp, 0xff, PAGE_SIZE);
1773
1774                         for (i = 0 ; i < bb->count ; i++) {
1775                                 u64 internal_bb = p[i];
1776                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1777                                                 | BB_LEN(internal_bb));
1778                                 bbp[i] = cpu_to_le64(store_bb);
1779                         }
1780                         bb->changed = 0;
1781                         if (read_seqretry(&bb->lock, seq))
1782                                 goto retry;
1783
1784                         bb->sector = (rdev->sb_start +
1785                                       (int)le32_to_cpu(sb->bblog_offset));
1786                         bb->size = le16_to_cpu(sb->bblog_size);
1787                 }
1788         }
1789
1790         max_dev = 0;
1791         rdev_for_each(rdev2, mddev)
1792                 if (rdev2->desc_nr+1 > max_dev)
1793                         max_dev = rdev2->desc_nr+1;
1794
1795         if (max_dev > le32_to_cpu(sb->max_dev)) {
1796                 int bmask;
1797                 sb->max_dev = cpu_to_le32(max_dev);
1798                 rdev->sb_size = max_dev * 2 + 256;
1799                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1800                 if (rdev->sb_size & bmask)
1801                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1802         } else
1803                 max_dev = le32_to_cpu(sb->max_dev);
1804
1805         for (i=0; i<max_dev;i++)
1806                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1807
1808         rdev_for_each(rdev2, mddev) {
1809                 i = rdev2->desc_nr;
1810                 if (test_bit(Faulty, &rdev2->flags))
1811                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1812                 else if (test_bit(In_sync, &rdev2->flags))
1813                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1814                 else if (rdev2->raid_disk >= 0)
1815                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1816                 else
1817                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1818         }
1819
1820         sb->sb_csum = calc_sb_1_csum(sb);
1821 }
1822
1823 static unsigned long long
1824 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1825 {
1826         struct mdp_superblock_1 *sb;
1827         sector_t max_sectors;
1828         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1829                 return 0; /* component must fit device */
1830         if (rdev->data_offset != rdev->new_data_offset)
1831                 return 0; /* too confusing */
1832         if (rdev->sb_start < rdev->data_offset) {
1833                 /* minor versions 1 and 2; superblock before data */
1834                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1835                 max_sectors -= rdev->data_offset;
1836                 if (!num_sectors || num_sectors > max_sectors)
1837                         num_sectors = max_sectors;
1838         } else if (rdev->mddev->bitmap_info.offset) {
1839                 /* minor version 0 with bitmap we can't move */
1840                 return 0;
1841         } else {
1842                 /* minor version 0; superblock after data */
1843                 sector_t sb_start;
1844                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1845                 sb_start &= ~(sector_t)(4*2 - 1);
1846                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1847                 if (!num_sectors || num_sectors > max_sectors)
1848                         num_sectors = max_sectors;
1849                 rdev->sb_start = sb_start;
1850         }
1851         sb = page_address(rdev->sb_page);
1852         sb->data_size = cpu_to_le64(num_sectors);
1853         sb->super_offset = rdev->sb_start;
1854         sb->sb_csum = calc_sb_1_csum(sb);
1855         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1856                        rdev->sb_page);
1857         md_super_wait(rdev->mddev);
1858         return num_sectors;
1859
1860 }
1861
1862 static int
1863 super_1_allow_new_offset(struct md_rdev *rdev,
1864                          unsigned long long new_offset)
1865 {
1866         /* All necessary checks on new >= old have been done */
1867         struct bitmap *bitmap;
1868         if (new_offset >= rdev->data_offset)
1869                 return 1;
1870
1871         /* with 1.0 metadata, there is no metadata to tread on
1872          * so we can always move back */
1873         if (rdev->mddev->minor_version == 0)
1874                 return 1;
1875
1876         /* otherwise we must be sure not to step on
1877          * any metadata, so stay:
1878          * 36K beyond start of superblock
1879          * beyond end of badblocks
1880          * beyond write-intent bitmap
1881          */
1882         if (rdev->sb_start + (32+4)*2 > new_offset)
1883                 return 0;
1884         bitmap = rdev->mddev->bitmap;
1885         if (bitmap && !rdev->mddev->bitmap_info.file &&
1886             rdev->sb_start + rdev->mddev->bitmap_info.offset +
1887             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1888                 return 0;
1889         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1890                 return 0;
1891
1892         return 1;
1893 }
1894
1895 static struct super_type super_types[] = {
1896         [0] = {
1897                 .name   = "0.90.0",
1898                 .owner  = THIS_MODULE,
1899                 .load_super         = super_90_load,
1900                 .validate_super     = super_90_validate,
1901                 .sync_super         = super_90_sync,
1902                 .rdev_size_change   = super_90_rdev_size_change,
1903                 .allow_new_offset   = super_90_allow_new_offset,
1904         },
1905         [1] = {
1906                 .name   = "md-1",
1907                 .owner  = THIS_MODULE,
1908                 .load_super         = super_1_load,
1909                 .validate_super     = super_1_validate,
1910                 .sync_super         = super_1_sync,
1911                 .rdev_size_change   = super_1_rdev_size_change,
1912                 .allow_new_offset   = super_1_allow_new_offset,
1913         },
1914 };
1915
1916 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1917 {
1918         if (mddev->sync_super) {
1919                 mddev->sync_super(mddev, rdev);
1920                 return;
1921         }
1922
1923         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1924
1925         super_types[mddev->major_version].sync_super(mddev, rdev);
1926 }
1927
1928 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1929 {
1930         struct md_rdev *rdev, *rdev2;
1931
1932         rcu_read_lock();
1933         rdev_for_each_rcu(rdev, mddev1)
1934                 rdev_for_each_rcu(rdev2, mddev2)
1935                         if (rdev->bdev->bd_contains ==
1936                             rdev2->bdev->bd_contains) {
1937                                 rcu_read_unlock();
1938                                 return 1;
1939                         }
1940         rcu_read_unlock();
1941         return 0;
1942 }
1943
1944 static LIST_HEAD(pending_raid_disks);
1945
1946 /*
1947  * Try to register data integrity profile for an mddev
1948  *
1949  * This is called when an array is started and after a disk has been kicked
1950  * from the array. It only succeeds if all working and active component devices
1951  * are integrity capable with matching profiles.
1952  */
1953 int md_integrity_register(struct mddev *mddev)
1954 {
1955         struct md_rdev *rdev, *reference = NULL;
1956
1957         if (list_empty(&mddev->disks))
1958                 return 0; /* nothing to do */
1959         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1960                 return 0; /* shouldn't register, or already is */
1961         rdev_for_each(rdev, mddev) {
1962                 /* skip spares and non-functional disks */
1963                 if (test_bit(Faulty, &rdev->flags))
1964                         continue;
1965                 if (rdev->raid_disk < 0)
1966                         continue;
1967                 if (!reference) {
1968                         /* Use the first rdev as the reference */
1969                         reference = rdev;
1970                         continue;
1971                 }
1972                 /* does this rdev's profile match the reference profile? */
1973                 if (blk_integrity_compare(reference->bdev->bd_disk,
1974                                 rdev->bdev->bd_disk) < 0)
1975                         return -EINVAL;
1976         }
1977         if (!reference || !bdev_get_integrity(reference->bdev))
1978                 return 0;
1979         /*
1980          * All component devices are integrity capable and have matching
1981          * profiles, register the common profile for the md device.
1982          */
1983         if (blk_integrity_register(mddev->gendisk,
1984                         bdev_get_integrity(reference->bdev)) != 0) {
1985                 printk(KERN_ERR "md: failed to register integrity for %s\n",
1986                         mdname(mddev));
1987                 return -EINVAL;
1988         }
1989         printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
1990         if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
1991                 printk(KERN_ERR "md: failed to create integrity pool for %s\n",
1992                        mdname(mddev));
1993                 return -EINVAL;
1994         }
1995         return 0;
1996 }
1997 EXPORT_SYMBOL(md_integrity_register);
1998
1999 /* Disable data integrity if non-capable/non-matching disk is being added */
2000 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2001 {
2002         struct blk_integrity *bi_rdev;
2003         struct blk_integrity *bi_mddev;
2004
2005         if (!mddev->gendisk)
2006                 return;
2007
2008         bi_rdev = bdev_get_integrity(rdev->bdev);
2009         bi_mddev = blk_get_integrity(mddev->gendisk);
2010
2011         if (!bi_mddev) /* nothing to do */
2012                 return;
2013         if (rdev->raid_disk < 0) /* skip spares */
2014                 return;
2015         if (bi_rdev && blk_integrity_compare(mddev->gendisk,
2016                                              rdev->bdev->bd_disk) >= 0)
2017                 return;
2018         printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
2019         blk_integrity_unregister(mddev->gendisk);
2020 }
2021 EXPORT_SYMBOL(md_integrity_add_rdev);
2022
2023 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2024 {
2025         char b[BDEVNAME_SIZE];
2026         struct kobject *ko;
2027         char *s;
2028         int err;
2029
2030         /* prevent duplicates */
2031         if (find_rdev(mddev, rdev->bdev->bd_dev))
2032                 return -EEXIST;
2033
2034         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2035         if (rdev->sectors && (mddev->dev_sectors == 0 ||
2036                         rdev->sectors < mddev->dev_sectors)) {
2037                 if (mddev->pers) {
2038                         /* Cannot change size, so fail
2039                          * If mddev->level <= 0, then we don't care
2040                          * about aligning sizes (e.g. linear)
2041                          */
2042                         if (mddev->level > 0)
2043                                 return -ENOSPC;
2044                 } else
2045                         mddev->dev_sectors = rdev->sectors;
2046         }
2047
2048         /* Verify rdev->desc_nr is unique.
2049          * If it is -1, assign a free number, else
2050          * check number is not in use
2051          */
2052         rcu_read_lock();
2053         if (rdev->desc_nr < 0) {
2054                 int choice = 0;
2055                 if (mddev->pers)
2056                         choice = mddev->raid_disks;
2057                 while (md_find_rdev_nr_rcu(mddev, choice))
2058                         choice++;
2059                 rdev->desc_nr = choice;
2060         } else {
2061                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2062                         rcu_read_unlock();
2063                         return -EBUSY;
2064                 }
2065         }
2066         rcu_read_unlock();
2067         if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2068                 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
2069                        mdname(mddev), mddev->max_disks);
2070                 return -EBUSY;
2071         }
2072         bdevname(rdev->bdev,b);
2073         while ( (s=strchr(b, '/')) != NULL)
2074                 *s = '!';
2075
2076         rdev->mddev = mddev;
2077         printk(KERN_INFO "md: bind<%s>\n", b);
2078
2079         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2080                 goto fail;
2081
2082         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2083         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2084                 /* failure here is OK */;
2085         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2086
2087         list_add_rcu(&rdev->same_set, &mddev->disks);
2088         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2089
2090         /* May as well allow recovery to be retried once */
2091         mddev->recovery_disabled++;
2092
2093         return 0;
2094
2095  fail:
2096         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
2097                b, mdname(mddev));
2098         return err;
2099 }
2100
2101 static void md_delayed_delete(struct work_struct *ws)
2102 {
2103         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2104         kobject_del(&rdev->kobj);
2105         kobject_put(&rdev->kobj);
2106 }
2107
2108 static void unbind_rdev_from_array(struct md_rdev *rdev)
2109 {
2110         char b[BDEVNAME_SIZE];
2111
2112         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2113         list_del_rcu(&rdev->same_set);
2114         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
2115         rdev->mddev = NULL;
2116         sysfs_remove_link(&rdev->kobj, "block");
2117         sysfs_put(rdev->sysfs_state);
2118         rdev->sysfs_state = NULL;
2119         rdev->badblocks.count = 0;
2120         /* We need to delay this, otherwise we can deadlock when
2121          * writing to 'remove' to "dev/state".  We also need
2122          * to delay it due to rcu usage.
2123          */
2124         synchronize_rcu();
2125         INIT_WORK(&rdev->del_work, md_delayed_delete);
2126         kobject_get(&rdev->kobj);
2127         queue_work(md_misc_wq, &rdev->del_work);
2128 }
2129
2130 /*
2131  * prevent the device from being mounted, repartitioned or
2132  * otherwise reused by a RAID array (or any other kernel
2133  * subsystem), by bd_claiming the device.
2134  */
2135 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2136 {
2137         int err = 0;
2138         struct block_device *bdev;
2139         char b[BDEVNAME_SIZE];
2140
2141         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2142                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2143         if (IS_ERR(bdev)) {
2144                 printk(KERN_ERR "md: could not open %s.\n",
2145                         __bdevname(dev, b));
2146                 return PTR_ERR(bdev);
2147         }
2148         rdev->bdev = bdev;
2149         return err;
2150 }
2151
2152 static void unlock_rdev(struct md_rdev *rdev)
2153 {
2154         struct block_device *bdev = rdev->bdev;
2155         rdev->bdev = NULL;
2156         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2157 }
2158
2159 void md_autodetect_dev(dev_t dev);
2160
2161 static void export_rdev(struct md_rdev *rdev)
2162 {
2163         char b[BDEVNAME_SIZE];
2164
2165         printk(KERN_INFO "md: export_rdev(%s)\n",
2166                 bdevname(rdev->bdev,b));
2167         md_rdev_clear(rdev);
2168 #ifndef MODULE
2169         if (test_bit(AutoDetected, &rdev->flags))
2170                 md_autodetect_dev(rdev->bdev->bd_dev);
2171 #endif
2172         unlock_rdev(rdev);
2173         kobject_put(&rdev->kobj);
2174 }
2175
2176 void md_kick_rdev_from_array(struct md_rdev *rdev)
2177 {
2178         unbind_rdev_from_array(rdev);
2179         export_rdev(rdev);
2180 }
2181 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2182
2183 static void export_array(struct mddev *mddev)
2184 {
2185         struct md_rdev *rdev;
2186
2187         while (!list_empty(&mddev->disks)) {
2188                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2189                                         same_set);
2190                 md_kick_rdev_from_array(rdev);
2191         }
2192         mddev->raid_disks = 0;
2193         mddev->major_version = 0;
2194 }
2195
2196 static void sync_sbs(struct mddev *mddev, int nospares)
2197 {
2198         /* Update each superblock (in-memory image), but
2199          * if we are allowed to, skip spares which already
2200          * have the right event counter, or have one earlier
2201          * (which would mean they aren't being marked as dirty
2202          * with the rest of the array)
2203          */
2204         struct md_rdev *rdev;
2205         rdev_for_each(rdev, mddev) {
2206                 if (rdev->sb_events == mddev->events ||
2207                     (nospares &&
2208                      rdev->raid_disk < 0 &&
2209                      rdev->sb_events+1 == mddev->events)) {
2210                         /* Don't update this superblock */
2211                         rdev->sb_loaded = 2;
2212                 } else {
2213                         sync_super(mddev, rdev);
2214                         rdev->sb_loaded = 1;
2215                 }
2216         }
2217 }
2218
2219 void md_update_sb(struct mddev *mddev, int force_change)
2220 {
2221         struct md_rdev *rdev;
2222         int sync_req;
2223         int nospares = 0;
2224         int any_badblocks_changed = 0;
2225
2226         if (mddev->ro) {
2227                 if (force_change)
2228                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2229                 return;
2230         }
2231 repeat:
2232         /* First make sure individual recovery_offsets are correct */
2233         rdev_for_each(rdev, mddev) {
2234                 if (rdev->raid_disk >= 0 &&
2235                     mddev->delta_disks >= 0 &&
2236                     !test_bit(In_sync, &rdev->flags) &&
2237                     mddev->curr_resync_completed > rdev->recovery_offset)
2238                                 rdev->recovery_offset = mddev->curr_resync_completed;
2239
2240         }
2241         if (!mddev->persistent) {
2242                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2243                 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2244                 if (!mddev->external) {
2245                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2246                         rdev_for_each(rdev, mddev) {
2247                                 if (rdev->badblocks.changed) {
2248                                         rdev->badblocks.changed = 0;
2249                                         md_ack_all_badblocks(&rdev->badblocks);
2250                                         md_error(mddev, rdev);
2251                                 }
2252                                 clear_bit(Blocked, &rdev->flags);
2253                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2254                                 wake_up(&rdev->blocked_wait);
2255                         }
2256                 }
2257                 wake_up(&mddev->sb_wait);
2258                 return;
2259         }
2260
2261         spin_lock(&mddev->lock);
2262
2263         mddev->utime = get_seconds();
2264
2265         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2266                 force_change = 1;
2267         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2268                 /* just a clean<-> dirty transition, possibly leave spares alone,
2269                  * though if events isn't the right even/odd, we will have to do
2270                  * spares after all
2271                  */
2272                 nospares = 1;
2273         if (force_change)
2274                 nospares = 0;
2275         if (mddev->degraded)
2276                 /* If the array is degraded, then skipping spares is both
2277                  * dangerous and fairly pointless.
2278                  * Dangerous because a device that was removed from the array
2279                  * might have a event_count that still looks up-to-date,
2280                  * so it can be re-added without a resync.
2281                  * Pointless because if there are any spares to skip,
2282                  * then a recovery will happen and soon that array won't
2283                  * be degraded any more and the spare can go back to sleep then.
2284                  */
2285                 nospares = 0;
2286
2287         sync_req = mddev->in_sync;
2288
2289         /* If this is just a dirty<->clean transition, and the array is clean
2290          * and 'events' is odd, we can roll back to the previous clean state */
2291         if (nospares
2292             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2293             && mddev->can_decrease_events
2294             && mddev->events != 1) {
2295                 mddev->events--;
2296                 mddev->can_decrease_events = 0;
2297         } else {
2298                 /* otherwise we have to go forward and ... */
2299                 mddev->events ++;
2300                 mddev->can_decrease_events = nospares;
2301         }
2302
2303         /*
2304          * This 64-bit counter should never wrap.
2305          * Either we are in around ~1 trillion A.C., assuming
2306          * 1 reboot per second, or we have a bug...
2307          */
2308         WARN_ON(mddev->events == 0);
2309
2310         rdev_for_each(rdev, mddev) {
2311                 if (rdev->badblocks.changed)
2312                         any_badblocks_changed++;
2313                 if (test_bit(Faulty, &rdev->flags))
2314                         set_bit(FaultRecorded, &rdev->flags);
2315         }
2316
2317         sync_sbs(mddev, nospares);
2318         spin_unlock(&mddev->lock);
2319
2320         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2321                  mdname(mddev), mddev->in_sync);
2322
2323         bitmap_update_sb(mddev->bitmap);
2324         rdev_for_each(rdev, mddev) {
2325                 char b[BDEVNAME_SIZE];
2326
2327                 if (rdev->sb_loaded != 1)
2328                         continue; /* no noise on spare devices */
2329
2330                 if (!test_bit(Faulty, &rdev->flags)) {
2331                         md_super_write(mddev,rdev,
2332                                        rdev->sb_start, rdev->sb_size,
2333                                        rdev->sb_page);
2334                         pr_debug("md: (write) %s's sb offset: %llu\n",
2335                                  bdevname(rdev->bdev, b),
2336                                  (unsigned long long)rdev->sb_start);
2337                         rdev->sb_events = mddev->events;
2338                         if (rdev->badblocks.size) {
2339                                 md_super_write(mddev, rdev,
2340                                                rdev->badblocks.sector,
2341                                                rdev->badblocks.size << 9,
2342                                                rdev->bb_page);
2343                                 rdev->badblocks.size = 0;
2344                         }
2345
2346                 } else
2347                         pr_debug("md: %s (skipping faulty)\n",
2348                                  bdevname(rdev->bdev, b));
2349
2350                 if (mddev->level == LEVEL_MULTIPATH)
2351                         /* only need to write one superblock... */
2352                         break;
2353         }
2354         md_super_wait(mddev);
2355         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2356
2357         spin_lock(&mddev->lock);
2358         if (mddev->in_sync != sync_req ||
2359             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2360                 /* have to write it out again */
2361                 spin_unlock(&mddev->lock);
2362                 goto repeat;
2363         }
2364         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2365         spin_unlock(&mddev->lock);
2366         wake_up(&mddev->sb_wait);
2367         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2368                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2369
2370         rdev_for_each(rdev, mddev) {
2371                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2372                         clear_bit(Blocked, &rdev->flags);
2373
2374                 if (any_badblocks_changed)
2375                         md_ack_all_badblocks(&rdev->badblocks);
2376                 clear_bit(BlockedBadBlocks, &rdev->flags);
2377                 wake_up(&rdev->blocked_wait);
2378         }
2379 }
2380 EXPORT_SYMBOL(md_update_sb);
2381
2382 static int add_bound_rdev(struct md_rdev *rdev)
2383 {
2384         struct mddev *mddev = rdev->mddev;
2385         int err = 0;
2386
2387         if (!mddev->pers->hot_remove_disk) {
2388                 /* If there is hot_add_disk but no hot_remove_disk
2389                  * then added disks for geometry changes,
2390                  * and should be added immediately.
2391                  */
2392                 super_types[mddev->major_version].
2393                         validate_super(mddev, rdev);
2394                 err = mddev->pers->hot_add_disk(mddev, rdev);
2395                 if (err) {
2396                         unbind_rdev_from_array(rdev);
2397                         export_rdev(rdev);
2398                         return err;
2399                 }
2400         }
2401         sysfs_notify_dirent_safe(rdev->sysfs_state);
2402
2403         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2404         if (mddev->degraded)
2405                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2406         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2407         md_new_event(mddev);
2408         md_wakeup_thread(mddev->thread);
2409         return 0;
2410 }
2411
2412 /* words written to sysfs files may, or may not, be \n terminated.
2413  * We want to accept with case. For this we use cmd_match.
2414  */
2415 static int cmd_match(const char *cmd, const char *str)
2416 {
2417         /* See if cmd, written into a sysfs file, matches
2418          * str.  They must either be the same, or cmd can
2419          * have a trailing newline
2420          */
2421         while (*cmd && *str && *cmd == *str) {
2422                 cmd++;
2423                 str++;
2424         }
2425         if (*cmd == '\n')
2426                 cmd++;
2427         if (*str || *cmd)
2428                 return 0;
2429         return 1;
2430 }
2431
2432 struct rdev_sysfs_entry {
2433         struct attribute attr;
2434         ssize_t (*show)(struct md_rdev *, char *);
2435         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2436 };
2437
2438 static ssize_t
2439 state_show(struct md_rdev *rdev, char *page)
2440 {
2441         char *sep = "";
2442         size_t len = 0;
2443         unsigned long flags = ACCESS_ONCE(rdev->flags);
2444
2445         if (test_bit(Faulty, &flags) ||
2446             rdev->badblocks.unacked_exist) {
2447                 len+= sprintf(page+len, "%sfaulty",sep);
2448                 sep = ",";
2449         }
2450         if (test_bit(In_sync, &flags)) {
2451                 len += sprintf(page+len, "%sin_sync",sep);
2452                 sep = ",";
2453         }
2454         if (test_bit(WriteMostly, &flags)) {
2455                 len += sprintf(page+len, "%swrite_mostly",sep);
2456                 sep = ",";
2457         }
2458         if (test_bit(Blocked, &flags) ||
2459             (rdev->badblocks.unacked_exist
2460              && !test_bit(Faulty, &flags))) {
2461                 len += sprintf(page+len, "%sblocked", sep);
2462                 sep = ",";
2463         }
2464         if (!test_bit(Faulty, &flags) &&
2465             !test_bit(In_sync, &flags)) {
2466                 len += sprintf(page+len, "%sspare", sep);
2467                 sep = ",";
2468         }
2469         if (test_bit(WriteErrorSeen, &flags)) {
2470                 len += sprintf(page+len, "%swrite_error", sep);
2471                 sep = ",";
2472         }
2473         if (test_bit(WantReplacement, &flags)) {
2474                 len += sprintf(page+len, "%swant_replacement", sep);
2475                 sep = ",";
2476         }
2477         if (test_bit(Replacement, &flags)) {
2478                 len += sprintf(page+len, "%sreplacement", sep);
2479                 sep = ",";
2480         }
2481
2482         return len+sprintf(page+len, "\n");
2483 }
2484
2485 static ssize_t
2486 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2487 {
2488         /* can write
2489          *  faulty  - simulates an error
2490          *  remove  - disconnects the device
2491          *  writemostly - sets write_mostly
2492          *  -writemostly - clears write_mostly
2493          *  blocked - sets the Blocked flags
2494          *  -blocked - clears the Blocked and possibly simulates an error
2495          *  insync - sets Insync providing device isn't active
2496          *  -insync - clear Insync for a device with a slot assigned,
2497          *            so that it gets rebuilt based on bitmap
2498          *  write_error - sets WriteErrorSeen
2499          *  -write_error - clears WriteErrorSeen
2500          */
2501         int err = -EINVAL;
2502         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2503                 md_error(rdev->mddev, rdev);
2504                 if (test_bit(Faulty, &rdev->flags))
2505                         err = 0;
2506                 else
2507                         err = -EBUSY;
2508         } else if (cmd_match(buf, "remove")) {
2509                 if (rdev->raid_disk >= 0)
2510                         err = -EBUSY;
2511                 else {
2512                         struct mddev *mddev = rdev->mddev;
2513                         if (mddev_is_clustered(mddev))
2514                                 md_cluster_ops->remove_disk(mddev, rdev);
2515                         md_kick_rdev_from_array(rdev);
2516                         if (mddev_is_clustered(mddev))
2517                                 md_cluster_ops->metadata_update_start(mddev);
2518                         if (mddev->pers)
2519                                 md_update_sb(mddev, 1);
2520                         md_new_event(mddev);
2521                         if (mddev_is_clustered(mddev))
2522                                 md_cluster_ops->metadata_update_finish(mddev);
2523                         err = 0;
2524                 }
2525         } else if (cmd_match(buf, "writemostly")) {
2526                 set_bit(WriteMostly, &rdev->flags);
2527                 err = 0;
2528         } else if (cmd_match(buf, "-writemostly")) {
2529                 clear_bit(WriteMostly, &rdev->flags);
2530                 err = 0;
2531         } else if (cmd_match(buf, "blocked")) {
2532                 set_bit(Blocked, &rdev->flags);
2533                 err = 0;
2534         } else if (cmd_match(buf, "-blocked")) {
2535                 if (!test_bit(Faulty, &rdev->flags) &&
2536                     rdev->badblocks.unacked_exist) {
2537                         /* metadata handler doesn't understand badblocks,
2538                          * so we need to fail the device
2539                          */
2540                         md_error(rdev->mddev, rdev);
2541                 }
2542                 clear_bit(Blocked, &rdev->flags);
2543                 clear_bit(BlockedBadBlocks, &rdev->flags);
2544                 wake_up(&rdev->blocked_wait);
2545                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2546                 md_wakeup_thread(rdev->mddev->thread);
2547
2548                 err = 0;
2549         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2550                 set_bit(In_sync, &rdev->flags);
2551                 err = 0;
2552         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
2553                 if (rdev->mddev->pers == NULL) {
2554                         clear_bit(In_sync, &rdev->flags);
2555                         rdev->saved_raid_disk = rdev->raid_disk;
2556                         rdev->raid_disk = -1;
2557                         err = 0;
2558                 }
2559         } else if (cmd_match(buf, "write_error")) {
2560                 set_bit(WriteErrorSeen, &rdev->flags);
2561                 err = 0;
2562         } else if (cmd_match(buf, "-write_error")) {
2563                 clear_bit(WriteErrorSeen, &rdev->flags);
2564                 err = 0;
2565         } else if (cmd_match(buf, "want_replacement")) {
2566                 /* Any non-spare device that is not a replacement can
2567                  * become want_replacement at any time, but we then need to
2568                  * check if recovery is needed.
2569                  */
2570                 if (rdev->raid_disk >= 0 &&
2571                     !test_bit(Replacement, &rdev->flags))
2572                         set_bit(WantReplacement, &rdev->flags);
2573                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2574                 md_wakeup_thread(rdev->mddev->thread);
2575                 err = 0;
2576         } else if (cmd_match(buf, "-want_replacement")) {
2577                 /* Clearing 'want_replacement' is always allowed.
2578                  * Once replacements starts it is too late though.
2579                  */
2580                 err = 0;
2581                 clear_bit(WantReplacement, &rdev->flags);
2582         } else if (cmd_match(buf, "replacement")) {
2583                 /* Can only set a device as a replacement when array has not
2584                  * yet been started.  Once running, replacement is automatic
2585                  * from spares, or by assigning 'slot'.
2586                  */
2587                 if (rdev->mddev->pers)
2588                         err = -EBUSY;
2589                 else {
2590                         set_bit(Replacement, &rdev->flags);
2591                         err = 0;
2592                 }
2593         } else if (cmd_match(buf, "-replacement")) {
2594                 /* Similarly, can only clear Replacement before start */
2595                 if (rdev->mddev->pers)
2596                         err = -EBUSY;
2597                 else {
2598                         clear_bit(Replacement, &rdev->flags);
2599                         err = 0;
2600                 }
2601         } else if (cmd_match(buf, "re-add")) {
2602                 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
2603                         /* clear_bit is performed _after_ all the devices
2604                          * have their local Faulty bit cleared. If any writes
2605                          * happen in the meantime in the local node, they
2606                          * will land in the local bitmap, which will be synced
2607                          * by this node eventually
2608                          */
2609                         if (!mddev_is_clustered(rdev->mddev) ||
2610                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
2611                                 clear_bit(Faulty, &rdev->flags);
2612                                 err = add_bound_rdev(rdev);
2613                         }
2614                 } else
2615                         err = -EBUSY;
2616         }
2617         if (!err)
2618                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2619         return err ? err : len;
2620 }
2621 static struct rdev_sysfs_entry rdev_state =
2622 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2623
2624 static ssize_t
2625 errors_show(struct md_rdev *rdev, char *page)
2626 {
2627         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2628 }
2629
2630 static ssize_t
2631 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2632 {
2633         char *e;
2634         unsigned long n = simple_strtoul(buf, &e, 10);
2635         if (*buf && (*e == 0 || *e == '\n')) {
2636                 atomic_set(&rdev->corrected_errors, n);
2637                 return len;
2638         }
2639         return -EINVAL;
2640 }
2641 static struct rdev_sysfs_entry rdev_errors =
2642 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2643
2644 static ssize_t
2645 slot_show(struct md_rdev *rdev, char *page)
2646 {
2647         if (rdev->raid_disk < 0)
2648                 return sprintf(page, "none\n");
2649         else
2650                 return sprintf(page, "%d\n", rdev->raid_disk);
2651 }
2652
2653 static ssize_t
2654 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2655 {
2656         char *e;
2657         int err;
2658         int slot = simple_strtoul(buf, &e, 10);
2659         if (strncmp(buf, "none", 4)==0)
2660                 slot = -1;
2661         else if (e==buf || (*e && *e!= '\n'))
2662                 return -EINVAL;
2663         if (rdev->mddev->pers && slot == -1) {
2664                 /* Setting 'slot' on an active array requires also
2665                  * updating the 'rd%d' link, and communicating
2666                  * with the personality with ->hot_*_disk.
2667                  * For now we only support removing
2668                  * failed/spare devices.  This normally happens automatically,
2669                  * but not when the metadata is externally managed.
2670                  */
2671                 if (rdev->raid_disk == -1)
2672                         return -EEXIST;
2673                 /* personality does all needed checks */
2674                 if (rdev->mddev->pers->hot_remove_disk == NULL)
2675                         return -EINVAL;
2676                 clear_bit(Blocked, &rdev->flags);
2677                 remove_and_add_spares(rdev->mddev, rdev);
2678                 if (rdev->raid_disk >= 0)
2679                         return -EBUSY;
2680                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2681                 md_wakeup_thread(rdev->mddev->thread);
2682         } else if (rdev->mddev->pers) {
2683                 /* Activating a spare .. or possibly reactivating
2684                  * if we ever get bitmaps working here.
2685                  */
2686
2687                 if (rdev->raid_disk != -1)
2688                         return -EBUSY;
2689
2690                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2691                         return -EBUSY;
2692
2693                 if (rdev->mddev->pers->hot_add_disk == NULL)
2694                         return -EINVAL;
2695
2696                 if (slot >= rdev->mddev->raid_disks &&
2697                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2698                         return -ENOSPC;
2699
2700                 rdev->raid_disk = slot;
2701                 if (test_bit(In_sync, &rdev->flags))
2702                         rdev->saved_raid_disk = slot;
2703                 else
2704                         rdev->saved_raid_disk = -1;
2705                 clear_bit(In_sync, &rdev->flags);
2706                 clear_bit(Bitmap_sync, &rdev->flags);
2707                 err = rdev->mddev->pers->
2708                         hot_add_disk(rdev->mddev, rdev);
2709                 if (err) {
2710                         rdev->raid_disk = -1;
2711                         return err;
2712                 } else
2713                         sysfs_notify_dirent_safe(rdev->sysfs_state);
2714                 if (sysfs_link_rdev(rdev->mddev, rdev))
2715                         /* failure here is OK */;
2716                 /* don't wakeup anyone, leave that to userspace. */
2717         } else {
2718                 if (slot >= rdev->mddev->raid_disks &&
2719                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2720                         return -ENOSPC;
2721                 rdev->raid_disk = slot;
2722                 /* assume it is working */
2723                 clear_bit(Faulty, &rdev->flags);
2724                 clear_bit(WriteMostly, &rdev->flags);
2725                 set_bit(In_sync, &rdev->flags);
2726                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2727         }
2728         return len;
2729 }
2730
2731 static struct rdev_sysfs_entry rdev_slot =
2732 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2733
2734 static ssize_t
2735 offset_show(struct md_rdev *rdev, char *page)
2736 {
2737         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2738 }
2739
2740 static ssize_t
2741 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2742 {
2743         unsigned long long offset;
2744         if (kstrtoull(buf, 10, &offset) < 0)
2745                 return -EINVAL;
2746         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2747                 return -EBUSY;
2748         if (rdev->sectors && rdev->mddev->external)
2749                 /* Must set offset before size, so overlap checks
2750                  * can be sane */
2751                 return -EBUSY;
2752         rdev->data_offset = offset;
2753         rdev->new_data_offset = offset;
2754         return len;
2755 }
2756
2757 static struct rdev_sysfs_entry rdev_offset =
2758 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2759
2760 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2761 {
2762         return sprintf(page, "%llu\n",
2763                        (unsigned long long)rdev->new_data_offset);
2764 }
2765
2766 static ssize_t new_offset_store(struct md_rdev *rdev,
2767                                 const char *buf, size_t len)
2768 {
2769         unsigned long long new_offset;
2770         struct mddev *mddev = rdev->mddev;
2771
2772         if (kstrtoull(buf, 10, &new_offset) < 0)
2773                 return -EINVAL;
2774
2775         if (mddev->sync_thread ||
2776             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2777                 return -EBUSY;
2778         if (new_offset == rdev->data_offset)
2779                 /* reset is always permitted */
2780                 ;
2781         else if (new_offset > rdev->data_offset) {
2782                 /* must not push array size beyond rdev_sectors */
2783                 if (new_offset - rdev->data_offset
2784                     + mddev->dev_sectors > rdev->sectors)
2785                                 return -E2BIG;
2786         }
2787         /* Metadata worries about other space details. */
2788
2789         /* decreasing the offset is inconsistent with a backwards
2790          * reshape.
2791          */
2792         if (new_offset < rdev->data_offset &&
2793             mddev->reshape_backwards)
2794                 return -EINVAL;
2795         /* Increasing offset is inconsistent with forwards
2796          * reshape.  reshape_direction should be set to
2797          * 'backwards' first.
2798          */
2799         if (new_offset > rdev->data_offset &&
2800             !mddev->reshape_backwards)
2801                 return -EINVAL;
2802
2803         if (mddev->pers && mddev->persistent &&
2804             !super_types[mddev->major_version]
2805             .allow_new_offset(rdev, new_offset))
2806                 return -E2BIG;
2807         rdev->new_data_offset = new_offset;
2808         if (new_offset > rdev->data_offset)
2809                 mddev->reshape_backwards = 1;
2810         else if (new_offset < rdev->data_offset)
2811                 mddev->reshape_backwards = 0;
2812
2813         return len;
2814 }
2815 static struct rdev_sysfs_entry rdev_new_offset =
2816 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2817
2818 static ssize_t
2819 rdev_size_show(struct md_rdev *rdev, char *page)
2820 {
2821         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2822 }
2823
2824 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2825 {
2826         /* check if two start/length pairs overlap */
2827         if (s1+l1 <= s2)
2828                 return 0;
2829         if (s2+l2 <= s1)
2830                 return 0;
2831         return 1;
2832 }
2833
2834 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2835 {
2836         unsigned long long blocks;
2837         sector_t new;
2838
2839         if (kstrtoull(buf, 10, &blocks) < 0)
2840                 return -EINVAL;
2841
2842         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2843                 return -EINVAL; /* sector conversion overflow */
2844
2845         new = blocks * 2;
2846         if (new != blocks * 2)
2847                 return -EINVAL; /* unsigned long long to sector_t overflow */
2848
2849         *sectors = new;
2850         return 0;
2851 }
2852
2853 static ssize_t
2854 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2855 {
2856         struct mddev *my_mddev = rdev->mddev;
2857         sector_t oldsectors = rdev->sectors;
2858         sector_t sectors;
2859
2860         if (strict_blocks_to_sectors(buf, &sectors) < 0)
2861                 return -EINVAL;
2862         if (rdev->data_offset != rdev->new_data_offset)
2863                 return -EINVAL; /* too confusing */
2864         if (my_mddev->pers && rdev->raid_disk >= 0) {
2865                 if (my_mddev->persistent) {
2866                         sectors = super_types[my_mddev->major_version].
2867                                 rdev_size_change(rdev, sectors);
2868                         if (!sectors)
2869                                 return -EBUSY;
2870                 } else if (!sectors)
2871                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2872                                 rdev->data_offset;
2873                 if (!my_mddev->pers->resize)
2874                         /* Cannot change size for RAID0 or Linear etc */
2875                         return -EINVAL;
2876         }
2877         if (sectors < my_mddev->dev_sectors)
2878                 return -EINVAL; /* component must fit device */
2879
2880         rdev->sectors = sectors;
2881         if (sectors > oldsectors && my_mddev->external) {
2882                 /* Need to check that all other rdevs with the same
2883                  * ->bdev do not overlap.  'rcu' is sufficient to walk
2884                  * the rdev lists safely.
2885                  * This check does not provide a hard guarantee, it
2886                  * just helps avoid dangerous mistakes.
2887                  */
2888                 struct mddev *mddev;
2889                 int overlap = 0;
2890                 struct list_head *tmp;
2891
2892                 rcu_read_lock();
2893                 for_each_mddev(mddev, tmp) {
2894                         struct md_rdev *rdev2;
2895
2896                         rdev_for_each(rdev2, mddev)
2897                                 if (rdev->bdev == rdev2->bdev &&
2898                                     rdev != rdev2 &&
2899                                     overlaps(rdev->data_offset, rdev->sectors,
2900                                              rdev2->data_offset,
2901                                              rdev2->sectors)) {
2902                                         overlap = 1;
2903                                         break;
2904                                 }
2905                         if (overlap) {
2906                                 mddev_put(mddev);
2907                                 break;
2908                         }
2909                 }
2910                 rcu_read_unlock();
2911                 if (overlap) {
2912                         /* Someone else could have slipped in a size
2913                          * change here, but doing so is just silly.
2914                          * We put oldsectors back because we *know* it is
2915                          * safe, and trust userspace not to race with
2916                          * itself
2917                          */
2918                         rdev->sectors = oldsectors;
2919                         return -EBUSY;
2920                 }
2921         }
2922         return len;
2923 }
2924
2925 static struct rdev_sysfs_entry rdev_size =
2926 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2927
2928 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
2929 {
2930         unsigned long long recovery_start = rdev->recovery_offset;
2931
2932         if (test_bit(In_sync, &rdev->flags) ||
2933             recovery_start == MaxSector)
2934                 return sprintf(page, "none\n");
2935
2936         return sprintf(page, "%llu\n", recovery_start);
2937 }
2938
2939 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
2940 {
2941         unsigned long long recovery_start;
2942
2943         if (cmd_match(buf, "none"))
2944                 recovery_start = MaxSector;
2945         else if (kstrtoull(buf, 10, &recovery_start))
2946                 return -EINVAL;
2947
2948         if (rdev->mddev->pers &&
2949             rdev->raid_disk >= 0)
2950                 return -EBUSY;
2951
2952         rdev->recovery_offset = recovery_start;
2953         if (recovery_start == MaxSector)
2954                 set_bit(In_sync, &rdev->flags);
2955         else
2956                 clear_bit(In_sync, &rdev->flags);
2957         return len;
2958 }
2959
2960 static struct rdev_sysfs_entry rdev_recovery_start =
2961 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2962
2963 static ssize_t
2964 badblocks_show(struct badblocks *bb, char *page, int unack);
2965 static ssize_t
2966 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
2967
2968 static ssize_t bb_show(struct md_rdev *rdev, char *page)
2969 {
2970         return badblocks_show(&rdev->badblocks, page, 0);
2971 }
2972 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
2973 {
2974         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
2975         /* Maybe that ack was all we needed */
2976         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
2977                 wake_up(&rdev->blocked_wait);
2978         return rv;
2979 }
2980 static struct rdev_sysfs_entry rdev_bad_blocks =
2981 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
2982
2983 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
2984 {
2985         return badblocks_show(&rdev->badblocks, page, 1);
2986 }
2987 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
2988 {
2989         return badblocks_store(&rdev->badblocks, page, len, 1);
2990 }
2991 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
2992 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
2993
2994 static struct attribute *rdev_default_attrs[] = {
2995         &rdev_state.attr,
2996         &rdev_errors.attr,
2997         &rdev_slot.attr,
2998         &rdev_offset.attr,
2999         &rdev_new_offset.attr,
3000         &rdev_size.attr,
3001         &rdev_recovery_start.attr,
3002         &rdev_bad_blocks.attr,
3003         &rdev_unack_bad_blocks.attr,
3004         NULL,
3005 };
3006 static ssize_t
3007 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3008 {
3009         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3010         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3011
3012         if (!entry->show)
3013                 return -EIO;
3014         if (!rdev->mddev)
3015                 return -EBUSY;
3016         return entry->show(rdev, page);
3017 }
3018
3019 static ssize_t
3020 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3021               const char *page, size_t length)
3022 {
3023         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3024         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3025         ssize_t rv;
3026         struct mddev *mddev = rdev->mddev;
3027
3028         if (!entry->store)
3029                 return -EIO;
3030         if (!capable(CAP_SYS_ADMIN))
3031                 return -EACCES;
3032         rv = mddev ? mddev_lock(mddev): -EBUSY;
3033         if (!rv) {
3034                 if (rdev->mddev == NULL)
3035                         rv = -EBUSY;
3036                 else
3037                         rv = entry->store(rdev, page, length);
3038                 mddev_unlock(mddev);
3039         }
3040         return rv;
3041 }
3042
3043 static void rdev_free(struct kobject *ko)
3044 {
3045         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3046         kfree(rdev);
3047 }
3048 static const struct sysfs_ops rdev_sysfs_ops = {
3049         .show           = rdev_attr_show,
3050         .store          = rdev_attr_store,
3051 };
3052 static struct kobj_type rdev_ktype = {
3053         .release        = rdev_free,
3054         .sysfs_ops      = &rdev_sysfs_ops,
3055         .default_attrs  = rdev_default_attrs,
3056 };
3057
3058 int md_rdev_init(struct md_rdev *rdev)
3059 {
3060         rdev->desc_nr = -1;
3061         rdev->saved_raid_disk = -1;
3062         rdev->raid_disk = -1;
3063         rdev->flags = 0;
3064         rdev->data_offset = 0;
3065         rdev->new_data_offset = 0;
3066         rdev->sb_events = 0;
3067         rdev->last_read_error.tv_sec  = 0;
3068         rdev->last_read_error.tv_nsec = 0;
3069         rdev->sb_loaded = 0;
3070         rdev->bb_page = NULL;
3071         atomic_set(&rdev->nr_pending, 0);
3072         atomic_set(&rdev->read_errors, 0);
3073         atomic_set(&rdev->corrected_errors, 0);
3074
3075         INIT_LIST_HEAD(&rdev->same_set);
3076         init_waitqueue_head(&rdev->blocked_wait);
3077
3078         /* Add space to store bad block list.
3079          * This reserves the space even on arrays where it cannot
3080          * be used - I wonder if that matters
3081          */
3082         rdev->badblocks.count = 0;
3083         rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
3084         rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
3085         seqlock_init(&rdev->badblocks.lock);
3086         if (rdev->badblocks.page == NULL)
3087                 return -ENOMEM;
3088
3089         return 0;
3090 }
3091 EXPORT_SYMBOL_GPL(md_rdev_init);
3092 /*
3093  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3094  *
3095  * mark the device faulty if:
3096  *
3097  *   - the device is nonexistent (zero size)
3098  *   - the device has no valid superblock
3099  *
3100  * a faulty rdev _never_ has rdev->sb set.
3101  */
3102 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3103 {
3104         char b[BDEVNAME_SIZE];
3105         int err;
3106         struct md_rdev *rdev;
3107         sector_t size;
3108
3109         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3110         if (!rdev) {
3111                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
3112                 return ERR_PTR(-ENOMEM);
3113         }
3114
3115         err = md_rdev_init(rdev);
3116         if (err)
3117                 goto abort_free;
3118         err = alloc_disk_sb(rdev);
3119         if (err)
3120                 goto abort_free;
3121
3122         err = lock_rdev(rdev, newdev, super_format == -2);
3123         if (err)
3124                 goto abort_free;
3125
3126         kobject_init(&rdev->kobj, &rdev_ktype);
3127
3128         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3129         if (!size) {
3130                 printk(KERN_WARNING
3131                         "md: %s has zero or unknown size, marking faulty!\n",
3132                         bdevname(rdev->bdev,b));
3133                 err = -EINVAL;
3134                 goto abort_free;
3135         }
3136
3137         if (super_format >= 0) {
3138                 err = super_types[super_format].
3139                         load_super(rdev, NULL, super_minor);
3140                 if (err == -EINVAL) {
3141                         printk(KERN_WARNING
3142                                 "md: %s does not have a valid v%d.%d "
3143                                "superblock, not importing!\n",
3144                                 bdevname(rdev->bdev,b),
3145                                super_format, super_minor);
3146                         goto abort_free;
3147                 }
3148                 if (err < 0) {
3149                         printk(KERN_WARNING
3150                                 "md: could not read %s's sb, not importing!\n",
3151                                 bdevname(rdev->bdev,b));
3152                         goto abort_free;
3153                 }
3154         }
3155
3156         return rdev;
3157
3158 abort_free:
3159         if (rdev->bdev)
3160                 unlock_rdev(rdev);
3161         md_rdev_clear(rdev);
3162         kfree(rdev);
3163         return ERR_PTR(err);
3164 }
3165
3166 /*
3167  * Check a full RAID array for plausibility
3168  */
3169
3170 static void analyze_sbs(struct mddev *mddev)
3171 {
3172         int i;
3173         struct md_rdev *rdev, *freshest, *tmp;
3174         char b[BDEVNAME_SIZE];
3175
3176         freshest = NULL;
3177         rdev_for_each_safe(rdev, tmp, mddev)
3178                 switch (super_types[mddev->major_version].
3179                         load_super(rdev, freshest, mddev->minor_version)) {
3180                 case 1:
3181                         freshest = rdev;
3182                         break;
3183                 case 0:
3184                         break;
3185                 default:
3186                         printk( KERN_ERR \
3187                                 "md: fatal superblock inconsistency in %s"
3188                                 " -- removing from array\n",
3189                                 bdevname(rdev->bdev,b));
3190                         md_kick_rdev_from_array(rdev);
3191                 }
3192
3193         super_types[mddev->major_version].
3194                 validate_super(mddev, freshest);
3195
3196         i = 0;
3197         rdev_for_each_safe(rdev, tmp, mddev) {
3198                 if (mddev->max_disks &&
3199                     (rdev->desc_nr >= mddev->max_disks ||
3200                      i > mddev->max_disks)) {
3201                         printk(KERN_WARNING
3202                                "md: %s: %s: only %d devices permitted\n",
3203                                mdname(mddev), bdevname(rdev->bdev, b),
3204                                mddev->max_disks);
3205                         md_kick_rdev_from_array(rdev);
3206                         continue;
3207                 }
3208                 if (rdev != freshest) {
3209                         if (super_types[mddev->major_version].
3210                             validate_super(mddev, rdev)) {
3211                                 printk(KERN_WARNING "md: kicking non-fresh %s"
3212                                         " from array!\n",
3213                                         bdevname(rdev->bdev,b));
3214                                 md_kick_rdev_from_array(rdev);
3215                                 continue;
3216                         }
3217                         /* No device should have a Candidate flag
3218                          * when reading devices
3219                          */
3220                         if (test_bit(Candidate, &rdev->flags)) {
3221                                 pr_info("md: kicking Cluster Candidate %s from array!\n",
3222                                         bdevname(rdev->bdev, b));
3223                                 md_kick_rdev_from_array(rdev);
3224                         }
3225                 }
3226                 if (mddev->level == LEVEL_MULTIPATH) {
3227                         rdev->desc_nr = i++;
3228                         rdev->raid_disk = rdev->desc_nr;
3229                         set_bit(In_sync, &rdev->flags);
3230                 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
3231                         rdev->raid_disk = -1;
3232                         clear_bit(In_sync, &rdev->flags);
3233                 }
3234         }
3235 }
3236
3237 /* Read a fixed-point number.
3238  * Numbers in sysfs attributes should be in "standard" units where
3239  * possible, so time should be in seconds.
3240  * However we internally use a a much smaller unit such as
3241  * milliseconds or jiffies.
3242  * This function takes a decimal number with a possible fractional
3243  * component, and produces an integer which is the result of
3244  * multiplying that number by 10^'scale'.
3245  * all without any floating-point arithmetic.
3246  */
3247 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3248 {
3249         unsigned long result = 0;
3250         long decimals = -1;
3251         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3252                 if (*cp == '.')
3253                         decimals = 0;
3254                 else if (decimals < scale) {
3255                         unsigned int value;
3256                         value = *cp - '0';
3257                         result = result * 10 + value;
3258                         if (decimals >= 0)
3259                                 decimals++;
3260                 }
3261                 cp++;
3262         }
3263         if (*cp == '\n')
3264                 cp++;
3265         if (*cp)
3266                 return -EINVAL;
3267         if (decimals < 0)
3268                 decimals = 0;
3269         while (decimals < scale) {
3270                 result *= 10;
3271                 decimals ++;
3272         }
3273         *res = result;
3274         return 0;
3275 }
3276
3277 static void md_safemode_timeout(unsigned long data);
3278
3279 static ssize_t
3280 safe_delay_show(struct mddev *mddev, char *page)
3281 {
3282         int msec = (mddev->safemode_delay*1000)/HZ;
3283         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3284 }
3285 static ssize_t
3286 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3287 {
3288         unsigned long msec;
3289
3290         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3291                 return -EINVAL;
3292         if (msec == 0)
3293                 mddev->safemode_delay = 0;
3294         else {
3295                 unsigned long old_delay = mddev->safemode_delay;
3296                 unsigned long new_delay = (msec*HZ)/1000;
3297
3298                 if (new_delay == 0)
3299                         new_delay = 1;
3300                 mddev->safemode_delay = new_delay;
3301                 if (new_delay < old_delay || old_delay == 0)
3302                         mod_timer(&mddev->safemode_timer, jiffies+1);
3303         }
3304         return len;
3305 }
3306 static struct md_sysfs_entry md_safe_delay =
3307 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3308
3309 static ssize_t
3310 level_show(struct mddev *mddev, char *page)
3311 {
3312         struct md_personality *p;
3313         int ret;
3314         spin_lock(&mddev->lock);
3315         p = mddev->pers;
3316         if (p)
3317                 ret = sprintf(page, "%s\n", p->name);
3318         else if (mddev->clevel[0])
3319                 ret = sprintf(page, "%s\n", mddev->clevel);
3320         else if (mddev->level != LEVEL_NONE)
3321                 ret = sprintf(page, "%d\n", mddev->level);
3322         else
3323                 ret = 0;
3324         spin_unlock(&mddev->lock);
3325         return ret;
3326 }
3327
3328 static ssize_t
3329 level_store(struct mddev *mddev, const char *buf, size_t len)
3330 {
3331         char clevel[16];
3332         ssize_t rv;
3333         size_t slen = len;
3334         struct md_personality *pers, *oldpers;
3335         long level;
3336         void *priv, *oldpriv;
3337         struct md_rdev *rdev;
3338
3339         if (slen == 0 || slen >= sizeof(clevel))
3340                 return -EINVAL;
3341
3342         rv = mddev_lock(mddev);
3343         if (rv)
3344                 return rv;
3345
3346         if (mddev->pers == NULL) {
3347                 strncpy(mddev->clevel, buf, slen);
3348                 if (mddev->clevel[slen-1] == '\n')
3349                         slen--;
3350                 mddev->clevel[slen] = 0;
3351                 mddev->level = LEVEL_NONE;
3352                 rv = len;
3353                 goto out_unlock;
3354         }
3355         rv = -EROFS;
3356         if (mddev->ro)
3357                 goto out_unlock;
3358
3359         /* request to change the personality.  Need to ensure:
3360          *  - array is not engaged in resync/recovery/reshape
3361          *  - old personality can be suspended
3362          *  - new personality will access other array.
3363          */
3364
3365         rv = -EBUSY;
3366         if (mddev->sync_thread ||
3367             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3368             mddev->reshape_position != MaxSector ||
3369             mddev->sysfs_active)
3370                 goto out_unlock;
3371
3372         rv = -EINVAL;
3373         if (!mddev->pers->quiesce) {
3374                 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
3375                        mdname(mddev), mddev->pers->name);
3376                 goto out_unlock;
3377         }
3378
3379         /* Now find the new personality */
3380         strncpy(clevel, buf, slen);
3381         if (clevel[slen-1] == '\n')
3382                 slen--;
3383         clevel[slen] = 0;
3384         if (kstrtol(clevel, 10, &level))
3385                 level = LEVEL_NONE;
3386
3387         if (request_module("md-%s", clevel) != 0)
3388                 request_module("md-level-%s", clevel);
3389         spin_lock(&pers_lock);
3390         pers = find_pers(level, clevel);
3391         if (!pers || !try_module_get(pers->owner)) {
3392                 spin_unlock(&pers_lock);
3393                 printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
3394                 rv = -EINVAL;
3395                 goto out_unlock;
3396         }
3397         spin_unlock(&pers_lock);
3398
3399         if (pers == mddev->pers) {
3400                 /* Nothing to do! */
3401                 module_put(pers->owner);
3402                 rv = len;
3403                 goto out_unlock;
3404         }
3405         if (!pers->takeover) {
3406                 module_put(pers->owner);
3407                 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3408                        mdname(mddev), clevel);
3409                 rv = -EINVAL;
3410                 goto out_unlock;
3411         }
3412
3413         rdev_for_each(rdev, mddev)
3414                 rdev->new_raid_disk = rdev->raid_disk;
3415
3416         /* ->takeover must set new_* and/or delta_disks
3417          * if it succeeds, and may set them when it fails.
3418          */
3419         priv = pers->takeover(mddev);
3420         if (IS_ERR(priv)) {
3421                 mddev->new_level = mddev->level;
3422                 mddev->new_layout = mddev->layout;
3423                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3424                 mddev->raid_disks -= mddev->delta_disks;
3425                 mddev->delta_disks = 0;
3426                 mddev->reshape_backwards = 0;
3427                 module_put(pers->owner);
3428                 printk(KERN_WARNING "md: %s: %s would not accept array\n",
3429                        mdname(mddev), clevel);
3430                 rv = PTR_ERR(priv);
3431                 goto out_unlock;
3432         }
3433
3434         /* Looks like we have a winner */
3435         mddev_suspend(mddev);
3436         mddev_detach(mddev);
3437
3438         spin_lock(&mddev->lock);
3439         oldpers = mddev->pers;
3440         oldpriv = mddev->private;
3441         mddev->pers = pers;
3442         mddev->private = priv;
3443         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3444         mddev->level = mddev->new_level;
3445         mddev->layout = mddev->new_layout;
3446         mddev->chunk_sectors = mddev->new_chunk_sectors;
3447         mddev->delta_disks = 0;
3448         mddev->reshape_backwards = 0;
3449         mddev->degraded = 0;
3450         spin_unlock(&mddev->lock);
3451
3452         if (oldpers->sync_request == NULL &&
3453             mddev->external) {
3454                 /* We are converting from a no-redundancy array
3455                  * to a redundancy array and metadata is managed
3456                  * externally so we need to be sure that writes
3457                  * won't block due to a need to transition
3458                  *      clean->dirty
3459                  * until external management is started.
3460                  */
3461                 mddev->in_sync = 0;
3462                 mddev->safemode_delay = 0;
3463                 mddev->safemode = 0;
3464         }
3465
3466         oldpers->free(mddev, oldpriv);
3467
3468         if (oldpers->sync_request == NULL &&
3469             pers->sync_request != NULL) {
3470                 /* need to add the md_redundancy_group */
3471                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3472                         printk(KERN_WARNING
3473                                "md: cannot register extra attributes for %s\n",
3474                                mdname(mddev));
3475                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3476         }
3477         if (oldpers->sync_request != NULL &&
3478             pers->sync_request == NULL) {
3479                 /* need to remove the md_redundancy_group */
3480                 if (mddev->to_remove == NULL)
3481                         mddev->to_remove = &md_redundancy_group;
3482         }
3483
3484         rdev_for_each(rdev, mddev) {
3485                 if (rdev->raid_disk < 0)
3486                         continue;
3487                 if (rdev->new_raid_disk >= mddev->raid_disks)
3488                         rdev->new_raid_disk = -1;
3489                 if (rdev->new_raid_disk == rdev->raid_disk)
3490                         continue;
3491                 sysfs_unlink_rdev(mddev, rdev);
3492         }
3493         rdev_for_each(rdev, mddev) {
3494                 if (rdev->raid_disk < 0)
3495                         continue;
3496                 if (rdev->new_raid_disk == rdev->raid_disk)
3497                         continue;
3498                 rdev->raid_disk = rdev->new_raid_disk;
3499                 if (rdev->raid_disk < 0)
3500                         clear_bit(In_sync, &rdev->flags);
3501                 else {
3502                         if (sysfs_link_rdev(mddev, rdev))
3503                                 printk(KERN_WARNING "md: cannot register rd%d"
3504                                        " for %s after level change\n",
3505                                        rdev->raid_disk, mdname(mddev));
3506                 }
3507