MAINTAINERS: greybus-dev list is members-only
[sfrench/cifs-2.6.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3      Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33
34    Errors, Warnings, etc.
35    Please use:
36      pr_crit() for error conditions that risk data loss
37      pr_err() for error conditions that are unexpected, like an IO error
38          or internal inconsistency
39      pr_warn() for error conditions that could have been predicated, like
40          adding a device to an array when it has incompatible metadata
41      pr_info() for every interesting, very rare events, like an array starting
42          or stopping, or resync starting or stopping
43      pr_debug() for everything else.
44
45 */
46
47 #include <linux/sched/signal.h>
48 #include <linux/kthread.h>
49 #include <linux/blkdev.h>
50 #include <linux/badblocks.h>
51 #include <linux/sysctl.h>
52 #include <linux/seq_file.h>
53 #include <linux/fs.h>
54 #include <linux/poll.h>
55 #include <linux/ctype.h>
56 #include <linux/string.h>
57 #include <linux/hdreg.h>
58 #include <linux/proc_fs.h>
59 #include <linux/random.h>
60 #include <linux/module.h>
61 #include <linux/reboot.h>
62 #include <linux/file.h>
63 #include <linux/compat.h>
64 #include <linux/delay.h>
65 #include <linux/raid/md_p.h>
66 #include <linux/raid/md_u.h>
67 #include <linux/slab.h>
68 #include <linux/percpu-refcount.h>
69
70 #include <trace/events/block.h>
71 #include "md.h"
72 #include "bitmap.h"
73 #include "md-cluster.h"
74
75 #ifndef MODULE
76 static void autostart_arrays(int part);
77 #endif
78
79 /* pers_list is a list of registered personalities protected
80  * by pers_lock.
81  * pers_lock does extra service to protect accesses to
82  * mddev->thread when the mutex cannot be held.
83  */
84 static LIST_HEAD(pers_list);
85 static DEFINE_SPINLOCK(pers_lock);
86
87 struct md_cluster_operations *md_cluster_ops;
88 EXPORT_SYMBOL(md_cluster_ops);
89 struct module *md_cluster_mod;
90 EXPORT_SYMBOL(md_cluster_mod);
91
92 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
93 static struct workqueue_struct *md_wq;
94 static struct workqueue_struct *md_misc_wq;
95
96 static int remove_and_add_spares(struct mddev *mddev,
97                                  struct md_rdev *this);
98 static void mddev_detach(struct mddev *mddev);
99
100 /*
101  * Default number of read corrections we'll attempt on an rdev
102  * before ejecting it from the array. We divide the read error
103  * count by 2 for every hour elapsed between read errors.
104  */
105 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
106 /*
107  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
108  * is 1000 KB/sec, so the extra system load does not show up that much.
109  * Increase it if you want to have more _guaranteed_ speed. Note that
110  * the RAID driver will use the maximum available bandwidth if the IO
111  * subsystem is idle. There is also an 'absolute maximum' reconstruction
112  * speed limit - in case reconstruction slows down your system despite
113  * idle IO detection.
114  *
115  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
116  * or /sys/block/mdX/md/sync_speed_{min,max}
117  */
118
119 static int sysctl_speed_limit_min = 1000;
120 static int sysctl_speed_limit_max = 200000;
121 static inline int speed_min(struct mddev *mddev)
122 {
123         return mddev->sync_speed_min ?
124                 mddev->sync_speed_min : sysctl_speed_limit_min;
125 }
126
127 static inline int speed_max(struct mddev *mddev)
128 {
129         return mddev->sync_speed_max ?
130                 mddev->sync_speed_max : sysctl_speed_limit_max;
131 }
132
133 static struct ctl_table_header *raid_table_header;
134
135 static struct ctl_table raid_table[] = {
136         {
137                 .procname       = "speed_limit_min",
138                 .data           = &sysctl_speed_limit_min,
139                 .maxlen         = sizeof(int),
140                 .mode           = S_IRUGO|S_IWUSR,
141                 .proc_handler   = proc_dointvec,
142         },
143         {
144                 .procname       = "speed_limit_max",
145                 .data           = &sysctl_speed_limit_max,
146                 .maxlen         = sizeof(int),
147                 .mode           = S_IRUGO|S_IWUSR,
148                 .proc_handler   = proc_dointvec,
149         },
150         { }
151 };
152
153 static struct ctl_table raid_dir_table[] = {
154         {
155                 .procname       = "raid",
156                 .maxlen         = 0,
157                 .mode           = S_IRUGO|S_IXUGO,
158                 .child          = raid_table,
159         },
160         { }
161 };
162
163 static struct ctl_table raid_root_table[] = {
164         {
165                 .procname       = "dev",
166                 .maxlen         = 0,
167                 .mode           = 0555,
168                 .child          = raid_dir_table,
169         },
170         {  }
171 };
172
173 static const struct block_device_operations md_fops;
174
175 static int start_readonly;
176
177 /*
178  * The original mechanism for creating an md device is to create
179  * a device node in /dev and to open it.  This causes races with device-close.
180  * The preferred method is to write to the "new_array" module parameter.
181  * This can avoid races.
182  * Setting create_on_open to false disables the original mechanism
183  * so all the races disappear.
184  */
185 static bool create_on_open = true;
186
187 /* bio_clone_mddev
188  * like bio_clone, but with a local bio set
189  */
190
191 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
192                             struct mddev *mddev)
193 {
194         struct bio *b;
195
196         if (!mddev || !mddev->bio_set)
197                 return bio_alloc(gfp_mask, nr_iovecs);
198
199         b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
200         if (!b)
201                 return NULL;
202         return b;
203 }
204 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
205
206 /*
207  * We have a system wide 'event count' that is incremented
208  * on any 'interesting' event, and readers of /proc/mdstat
209  * can use 'poll' or 'select' to find out when the event
210  * count increases.
211  *
212  * Events are:
213  *  start array, stop array, error, add device, remove device,
214  *  start build, activate spare
215  */
216 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
217 static atomic_t md_event_count;
218 void md_new_event(struct mddev *mddev)
219 {
220         atomic_inc(&md_event_count);
221         wake_up(&md_event_waiters);
222 }
223 EXPORT_SYMBOL_GPL(md_new_event);
224
225 /*
226  * Enables to iterate over all existing md arrays
227  * all_mddevs_lock protects this list.
228  */
229 static LIST_HEAD(all_mddevs);
230 static DEFINE_SPINLOCK(all_mddevs_lock);
231
232 /*
233  * iterates through all used mddevs in the system.
234  * We take care to grab the all_mddevs_lock whenever navigating
235  * the list, and to always hold a refcount when unlocked.
236  * Any code which breaks out of this loop while own
237  * a reference to the current mddev and must mddev_put it.
238  */
239 #define for_each_mddev(_mddev,_tmp)                                     \
240                                                                         \
241         for (({ spin_lock(&all_mddevs_lock);                            \
242                 _tmp = all_mddevs.next;                                 \
243                 _mddev = NULL;});                                       \
244              ({ if (_tmp != &all_mddevs)                                \
245                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
246                 spin_unlock(&all_mddevs_lock);                          \
247                 if (_mddev) mddev_put(_mddev);                          \
248                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
249                 _tmp != &all_mddevs;});                                 \
250              ({ spin_lock(&all_mddevs_lock);                            \
251                 _tmp = _tmp->next;})                                    \
252                 )
253
254 /* Rather than calling directly into the personality make_request function,
255  * IO requests come here first so that we can check if the device is
256  * being suspended pending a reconfiguration.
257  * We hold a refcount over the call to ->make_request.  By the time that
258  * call has finished, the bio has been linked into some internal structure
259  * and so is visible to ->quiesce(), so we don't need the refcount any more.
260  */
261 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
262 {
263         const int rw = bio_data_dir(bio);
264         struct mddev *mddev = q->queuedata;
265         unsigned int sectors;
266         int cpu;
267
268         blk_queue_split(q, &bio, q->bio_split);
269
270         if (mddev == NULL || mddev->pers == NULL) {
271                 bio_io_error(bio);
272                 return BLK_QC_T_NONE;
273         }
274         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
275                 if (bio_sectors(bio) != 0)
276                         bio->bi_error = -EROFS;
277                 bio_endio(bio);
278                 return BLK_QC_T_NONE;
279         }
280         smp_rmb(); /* Ensure implications of  'active' are visible */
281         rcu_read_lock();
282         if (mddev->suspended) {
283                 DEFINE_WAIT(__wait);
284                 for (;;) {
285                         prepare_to_wait(&mddev->sb_wait, &__wait,
286                                         TASK_UNINTERRUPTIBLE);
287                         if (!mddev->suspended)
288                                 break;
289                         rcu_read_unlock();
290                         schedule();
291                         rcu_read_lock();
292                 }
293                 finish_wait(&mddev->sb_wait, &__wait);
294         }
295         atomic_inc(&mddev->active_io);
296         rcu_read_unlock();
297
298         /*
299          * save the sectors now since our bio can
300          * go away inside make_request
301          */
302         sectors = bio_sectors(bio);
303         /* bio could be mergeable after passing to underlayer */
304         bio->bi_opf &= ~REQ_NOMERGE;
305         mddev->pers->make_request(mddev, bio);
306
307         cpu = part_stat_lock();
308         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
309         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
310         part_stat_unlock();
311
312         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
313                 wake_up(&mddev->sb_wait);
314
315         return BLK_QC_T_NONE;
316 }
317
318 /* mddev_suspend makes sure no new requests are submitted
319  * to the device, and that any requests that have been submitted
320  * are completely handled.
321  * Once mddev_detach() is called and completes, the module will be
322  * completely unused.
323  */
324 void mddev_suspend(struct mddev *mddev)
325 {
326         WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
327         if (mddev->suspended++)
328                 return;
329         synchronize_rcu();
330         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
331         mddev->pers->quiesce(mddev, 1);
332
333         del_timer_sync(&mddev->safemode_timer);
334 }
335 EXPORT_SYMBOL_GPL(mddev_suspend);
336
337 void mddev_resume(struct mddev *mddev)
338 {
339         if (--mddev->suspended)
340                 return;
341         wake_up(&mddev->sb_wait);
342         mddev->pers->quiesce(mddev, 0);
343
344         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
345         md_wakeup_thread(mddev->thread);
346         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
347 }
348 EXPORT_SYMBOL_GPL(mddev_resume);
349
350 int mddev_congested(struct mddev *mddev, int bits)
351 {
352         struct md_personality *pers = mddev->pers;
353         int ret = 0;
354
355         rcu_read_lock();
356         if (mddev->suspended)
357                 ret = 1;
358         else if (pers && pers->congested)
359                 ret = pers->congested(mddev, bits);
360         rcu_read_unlock();
361         return ret;
362 }
363 EXPORT_SYMBOL_GPL(mddev_congested);
364 static int md_congested(void *data, int bits)
365 {
366         struct mddev *mddev = data;
367         return mddev_congested(mddev, bits);
368 }
369
370 /*
371  * Generic flush handling for md
372  */
373
374 static void md_end_flush(struct bio *bio)
375 {
376         struct md_rdev *rdev = bio->bi_private;
377         struct mddev *mddev = rdev->mddev;
378
379         rdev_dec_pending(rdev, mddev);
380
381         if (atomic_dec_and_test(&mddev->flush_pending)) {
382                 /* The pre-request flush has finished */
383                 queue_work(md_wq, &mddev->flush_work);
384         }
385         bio_put(bio);
386 }
387
388 static void md_submit_flush_data(struct work_struct *ws);
389
390 static void submit_flushes(struct work_struct *ws)
391 {
392         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
393         struct md_rdev *rdev;
394
395         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
396         atomic_set(&mddev->flush_pending, 1);
397         rcu_read_lock();
398         rdev_for_each_rcu(rdev, mddev)
399                 if (rdev->raid_disk >= 0 &&
400                     !test_bit(Faulty, &rdev->flags)) {
401                         /* Take two references, one is dropped
402                          * when request finishes, one after
403                          * we reclaim rcu_read_lock
404                          */
405                         struct bio *bi;
406                         atomic_inc(&rdev->nr_pending);
407                         atomic_inc(&rdev->nr_pending);
408                         rcu_read_unlock();
409                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
410                         bi->bi_end_io = md_end_flush;
411                         bi->bi_private = rdev;
412                         bi->bi_bdev = rdev->bdev;
413                         bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
414                         atomic_inc(&mddev->flush_pending);
415                         submit_bio(bi);
416                         rcu_read_lock();
417                         rdev_dec_pending(rdev, mddev);
418                 }
419         rcu_read_unlock();
420         if (atomic_dec_and_test(&mddev->flush_pending))
421                 queue_work(md_wq, &mddev->flush_work);
422 }
423
424 static void md_submit_flush_data(struct work_struct *ws)
425 {
426         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
427         struct bio *bio = mddev->flush_bio;
428
429         if (bio->bi_iter.bi_size == 0)
430                 /* an empty barrier - all done */
431                 bio_endio(bio);
432         else {
433                 bio->bi_opf &= ~REQ_PREFLUSH;
434                 mddev->pers->make_request(mddev, bio);
435         }
436
437         mddev->flush_bio = NULL;
438         wake_up(&mddev->sb_wait);
439 }
440
441 void md_flush_request(struct mddev *mddev, struct bio *bio)
442 {
443         spin_lock_irq(&mddev->lock);
444         wait_event_lock_irq(mddev->sb_wait,
445                             !mddev->flush_bio,
446                             mddev->lock);
447         mddev->flush_bio = bio;
448         spin_unlock_irq(&mddev->lock);
449
450         INIT_WORK(&mddev->flush_work, submit_flushes);
451         queue_work(md_wq, &mddev->flush_work);
452 }
453 EXPORT_SYMBOL(md_flush_request);
454
455 static inline struct mddev *mddev_get(struct mddev *mddev)
456 {
457         atomic_inc(&mddev->active);
458         return mddev;
459 }
460
461 static void mddev_delayed_delete(struct work_struct *ws);
462
463 static void mddev_put(struct mddev *mddev)
464 {
465         struct bio_set *bs = NULL;
466
467         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
468                 return;
469         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
470             mddev->ctime == 0 && !mddev->hold_active) {
471                 /* Array is not configured at all, and not held active,
472                  * so destroy it */
473                 list_del_init(&mddev->all_mddevs);
474                 bs = mddev->bio_set;
475                 mddev->bio_set = NULL;
476                 if (mddev->gendisk) {
477                         /* We did a probe so need to clean up.  Call
478                          * queue_work inside the spinlock so that
479                          * flush_workqueue() after mddev_find will
480                          * succeed in waiting for the work to be done.
481                          */
482                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
483                         queue_work(md_misc_wq, &mddev->del_work);
484                 } else
485                         kfree(mddev);
486         }
487         spin_unlock(&all_mddevs_lock);
488         if (bs)
489                 bioset_free(bs);
490 }
491
492 static void md_safemode_timeout(unsigned long data);
493
494 void mddev_init(struct mddev *mddev)
495 {
496         mutex_init(&mddev->open_mutex);
497         mutex_init(&mddev->reconfig_mutex);
498         mutex_init(&mddev->bitmap_info.mutex);
499         INIT_LIST_HEAD(&mddev->disks);
500         INIT_LIST_HEAD(&mddev->all_mddevs);
501         setup_timer(&mddev->safemode_timer, md_safemode_timeout,
502                     (unsigned long) mddev);
503         atomic_set(&mddev->active, 1);
504         atomic_set(&mddev->openers, 0);
505         atomic_set(&mddev->active_io, 0);
506         spin_lock_init(&mddev->lock);
507         atomic_set(&mddev->flush_pending, 0);
508         init_waitqueue_head(&mddev->sb_wait);
509         init_waitqueue_head(&mddev->recovery_wait);
510         mddev->reshape_position = MaxSector;
511         mddev->reshape_backwards = 0;
512         mddev->last_sync_action = "none";
513         mddev->resync_min = 0;
514         mddev->resync_max = MaxSector;
515         mddev->level = LEVEL_NONE;
516 }
517 EXPORT_SYMBOL_GPL(mddev_init);
518
519 static struct mddev *mddev_find(dev_t unit)
520 {
521         struct mddev *mddev, *new = NULL;
522
523         if (unit && MAJOR(unit) != MD_MAJOR)
524                 unit &= ~((1<<MdpMinorShift)-1);
525
526  retry:
527         spin_lock(&all_mddevs_lock);
528
529         if (unit) {
530                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
531                         if (mddev->unit == unit) {
532                                 mddev_get(mddev);
533                                 spin_unlock(&all_mddevs_lock);
534                                 kfree(new);
535                                 return mddev;
536                         }
537
538                 if (new) {
539                         list_add(&new->all_mddevs, &all_mddevs);
540                         spin_unlock(&all_mddevs_lock);
541                         new->hold_active = UNTIL_IOCTL;
542                         return new;
543                 }
544         } else if (new) {
545                 /* find an unused unit number */
546                 static int next_minor = 512;
547                 int start = next_minor;
548                 int is_free = 0;
549                 int dev = 0;
550                 while (!is_free) {
551                         dev = MKDEV(MD_MAJOR, next_minor);
552                         next_minor++;
553                         if (next_minor > MINORMASK)
554                                 next_minor = 0;
555                         if (next_minor == start) {
556                                 /* Oh dear, all in use. */
557                                 spin_unlock(&all_mddevs_lock);
558                                 kfree(new);
559                                 return NULL;
560                         }
561
562                         is_free = 1;
563                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
564                                 if (mddev->unit == dev) {
565                                         is_free = 0;
566                                         break;
567                                 }
568                 }
569                 new->unit = dev;
570                 new->md_minor = MINOR(dev);
571                 new->hold_active = UNTIL_STOP;
572                 list_add(&new->all_mddevs, &all_mddevs);
573                 spin_unlock(&all_mddevs_lock);
574                 return new;
575         }
576         spin_unlock(&all_mddevs_lock);
577
578         new = kzalloc(sizeof(*new), GFP_KERNEL);
579         if (!new)
580                 return NULL;
581
582         new->unit = unit;
583         if (MAJOR(unit) == MD_MAJOR)
584                 new->md_minor = MINOR(unit);
585         else
586                 new->md_minor = MINOR(unit) >> MdpMinorShift;
587
588         mddev_init(new);
589
590         goto retry;
591 }
592
593 static struct attribute_group md_redundancy_group;
594
595 void mddev_unlock(struct mddev *mddev)
596 {
597         if (mddev->to_remove) {
598                 /* These cannot be removed under reconfig_mutex as
599                  * an access to the files will try to take reconfig_mutex
600                  * while holding the file unremovable, which leads to
601                  * a deadlock.
602                  * So hold set sysfs_active while the remove in happeing,
603                  * and anything else which might set ->to_remove or my
604                  * otherwise change the sysfs namespace will fail with
605                  * -EBUSY if sysfs_active is still set.
606                  * We set sysfs_active under reconfig_mutex and elsewhere
607                  * test it under the same mutex to ensure its correct value
608                  * is seen.
609                  */
610                 struct attribute_group *to_remove = mddev->to_remove;
611                 mddev->to_remove = NULL;
612                 mddev->sysfs_active = 1;
613                 mutex_unlock(&mddev->reconfig_mutex);
614
615                 if (mddev->kobj.sd) {
616                         if (to_remove != &md_redundancy_group)
617                                 sysfs_remove_group(&mddev->kobj, to_remove);
618                         if (mddev->pers == NULL ||
619                             mddev->pers->sync_request == NULL) {
620                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
621                                 if (mddev->sysfs_action)
622                                         sysfs_put(mddev->sysfs_action);
623                                 mddev->sysfs_action = NULL;
624                         }
625                 }
626                 mddev->sysfs_active = 0;
627         } else
628                 mutex_unlock(&mddev->reconfig_mutex);
629
630         /* As we've dropped the mutex we need a spinlock to
631          * make sure the thread doesn't disappear
632          */
633         spin_lock(&pers_lock);
634         md_wakeup_thread(mddev->thread);
635         spin_unlock(&pers_lock);
636 }
637 EXPORT_SYMBOL_GPL(mddev_unlock);
638
639 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
640 {
641         struct md_rdev *rdev;
642
643         rdev_for_each_rcu(rdev, mddev)
644                 if (rdev->desc_nr == nr)
645                         return rdev;
646
647         return NULL;
648 }
649 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
650
651 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
652 {
653         struct md_rdev *rdev;
654
655         rdev_for_each(rdev, mddev)
656                 if (rdev->bdev->bd_dev == dev)
657                         return rdev;
658
659         return NULL;
660 }
661
662 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
663 {
664         struct md_rdev *rdev;
665
666         rdev_for_each_rcu(rdev, mddev)
667                 if (rdev->bdev->bd_dev == dev)
668                         return rdev;
669
670         return NULL;
671 }
672
673 static struct md_personality *find_pers(int level, char *clevel)
674 {
675         struct md_personality *pers;
676         list_for_each_entry(pers, &pers_list, list) {
677                 if (level != LEVEL_NONE && pers->level == level)
678                         return pers;
679                 if (strcmp(pers->name, clevel)==0)
680                         return pers;
681         }
682         return NULL;
683 }
684
685 /* return the offset of the super block in 512byte sectors */
686 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
687 {
688         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
689         return MD_NEW_SIZE_SECTORS(num_sectors);
690 }
691
692 static int alloc_disk_sb(struct md_rdev *rdev)
693 {
694         rdev->sb_page = alloc_page(GFP_KERNEL);
695         if (!rdev->sb_page)
696                 return -ENOMEM;
697         return 0;
698 }
699
700 void md_rdev_clear(struct md_rdev *rdev)
701 {
702         if (rdev->sb_page) {
703                 put_page(rdev->sb_page);
704                 rdev->sb_loaded = 0;
705                 rdev->sb_page = NULL;
706                 rdev->sb_start = 0;
707                 rdev->sectors = 0;
708         }
709         if (rdev->bb_page) {
710                 put_page(rdev->bb_page);
711                 rdev->bb_page = NULL;
712         }
713         badblocks_exit(&rdev->badblocks);
714 }
715 EXPORT_SYMBOL_GPL(md_rdev_clear);
716
717 static void super_written(struct bio *bio)
718 {
719         struct md_rdev *rdev = bio->bi_private;
720         struct mddev *mddev = rdev->mddev;
721
722         if (bio->bi_error) {
723                 pr_err("md: super_written gets error=%d\n", bio->bi_error);
724                 md_error(mddev, rdev);
725                 if (!test_bit(Faulty, &rdev->flags)
726                     && (bio->bi_opf & MD_FAILFAST)) {
727                         set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
728                         set_bit(LastDev, &rdev->flags);
729                 }
730         } else
731                 clear_bit(LastDev, &rdev->flags);
732
733         if (atomic_dec_and_test(&mddev->pending_writes))
734                 wake_up(&mddev->sb_wait);
735         rdev_dec_pending(rdev, mddev);
736         bio_put(bio);
737 }
738
739 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
740                    sector_t sector, int size, struct page *page)
741 {
742         /* write first size bytes of page to sector of rdev
743          * Increment mddev->pending_writes before returning
744          * and decrement it on completion, waking up sb_wait
745          * if zero is reached.
746          * If an error occurred, call md_error
747          */
748         struct bio *bio;
749         int ff = 0;
750
751         if (test_bit(Faulty, &rdev->flags))
752                 return;
753
754         bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
755
756         atomic_inc(&rdev->nr_pending);
757
758         bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
759         bio->bi_iter.bi_sector = sector;
760         bio_add_page(bio, page, size, 0);
761         bio->bi_private = rdev;
762         bio->bi_end_io = super_written;
763
764         if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
765             test_bit(FailFast, &rdev->flags) &&
766             !test_bit(LastDev, &rdev->flags))
767                 ff = MD_FAILFAST;
768         bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
769
770         atomic_inc(&mddev->pending_writes);
771         submit_bio(bio);
772 }
773
774 int md_super_wait(struct mddev *mddev)
775 {
776         /* wait for all superblock writes that were scheduled to complete */
777         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
778         if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
779                 return -EAGAIN;
780         return 0;
781 }
782
783 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
784                  struct page *page, int op, int op_flags, bool metadata_op)
785 {
786         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
787         int ret;
788
789         bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
790                 rdev->meta_bdev : rdev->bdev;
791         bio_set_op_attrs(bio, op, op_flags);
792         if (metadata_op)
793                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
794         else if (rdev->mddev->reshape_position != MaxSector &&
795                  (rdev->mddev->reshape_backwards ==
796                   (sector >= rdev->mddev->reshape_position)))
797                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
798         else
799                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
800         bio_add_page(bio, page, size, 0);
801
802         submit_bio_wait(bio);
803
804         ret = !bio->bi_error;
805         bio_put(bio);
806         return ret;
807 }
808 EXPORT_SYMBOL_GPL(sync_page_io);
809
810 static int read_disk_sb(struct md_rdev *rdev, int size)
811 {
812         char b[BDEVNAME_SIZE];
813
814         if (rdev->sb_loaded)
815                 return 0;
816
817         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
818                 goto fail;
819         rdev->sb_loaded = 1;
820         return 0;
821
822 fail:
823         pr_err("md: disabled device %s, could not read superblock.\n",
824                bdevname(rdev->bdev,b));
825         return -EINVAL;
826 }
827
828 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
829 {
830         return  sb1->set_uuid0 == sb2->set_uuid0 &&
831                 sb1->set_uuid1 == sb2->set_uuid1 &&
832                 sb1->set_uuid2 == sb2->set_uuid2 &&
833                 sb1->set_uuid3 == sb2->set_uuid3;
834 }
835
836 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
837 {
838         int ret;
839         mdp_super_t *tmp1, *tmp2;
840
841         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
842         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
843
844         if (!tmp1 || !tmp2) {
845                 ret = 0;
846                 goto abort;
847         }
848
849         *tmp1 = *sb1;
850         *tmp2 = *sb2;
851
852         /*
853          * nr_disks is not constant
854          */
855         tmp1->nr_disks = 0;
856         tmp2->nr_disks = 0;
857
858         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
859 abort:
860         kfree(tmp1);
861         kfree(tmp2);
862         return ret;
863 }
864
865 static u32 md_csum_fold(u32 csum)
866 {
867         csum = (csum & 0xffff) + (csum >> 16);
868         return (csum & 0xffff) + (csum >> 16);
869 }
870
871 static unsigned int calc_sb_csum(mdp_super_t *sb)
872 {
873         u64 newcsum = 0;
874         u32 *sb32 = (u32*)sb;
875         int i;
876         unsigned int disk_csum, csum;
877
878         disk_csum = sb->sb_csum;
879         sb->sb_csum = 0;
880
881         for (i = 0; i < MD_SB_BYTES/4 ; i++)
882                 newcsum += sb32[i];
883         csum = (newcsum & 0xffffffff) + (newcsum>>32);
884
885 #ifdef CONFIG_ALPHA
886         /* This used to use csum_partial, which was wrong for several
887          * reasons including that different results are returned on
888          * different architectures.  It isn't critical that we get exactly
889          * the same return value as before (we always csum_fold before
890          * testing, and that removes any differences).  However as we
891          * know that csum_partial always returned a 16bit value on
892          * alphas, do a fold to maximise conformity to previous behaviour.
893          */
894         sb->sb_csum = md_csum_fold(disk_csum);
895 #else
896         sb->sb_csum = disk_csum;
897 #endif
898         return csum;
899 }
900
901 /*
902  * Handle superblock details.
903  * We want to be able to handle multiple superblock formats
904  * so we have a common interface to them all, and an array of
905  * different handlers.
906  * We rely on user-space to write the initial superblock, and support
907  * reading and updating of superblocks.
908  * Interface methods are:
909  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
910  *      loads and validates a superblock on dev.
911  *      if refdev != NULL, compare superblocks on both devices
912  *    Return:
913  *      0 - dev has a superblock that is compatible with refdev
914  *      1 - dev has a superblock that is compatible and newer than refdev
915  *          so dev should be used as the refdev in future
916  *     -EINVAL superblock incompatible or invalid
917  *     -othererror e.g. -EIO
918  *
919  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
920  *      Verify that dev is acceptable into mddev.
921  *       The first time, mddev->raid_disks will be 0, and data from
922  *       dev should be merged in.  Subsequent calls check that dev
923  *       is new enough.  Return 0 or -EINVAL
924  *
925  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
926  *     Update the superblock for rdev with data in mddev
927  *     This does not write to disc.
928  *
929  */
930
931 struct super_type  {
932         char                *name;
933         struct module       *owner;
934         int                 (*load_super)(struct md_rdev *rdev,
935                                           struct md_rdev *refdev,
936                                           int minor_version);
937         int                 (*validate_super)(struct mddev *mddev,
938                                               struct md_rdev *rdev);
939         void                (*sync_super)(struct mddev *mddev,
940                                           struct md_rdev *rdev);
941         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
942                                                 sector_t num_sectors);
943         int                 (*allow_new_offset)(struct md_rdev *rdev,
944                                                 unsigned long long new_offset);
945 };
946
947 /*
948  * Check that the given mddev has no bitmap.
949  *
950  * This function is called from the run method of all personalities that do not
951  * support bitmaps. It prints an error message and returns non-zero if mddev
952  * has a bitmap. Otherwise, it returns 0.
953  *
954  */
955 int md_check_no_bitmap(struct mddev *mddev)
956 {
957         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
958                 return 0;
959         pr_warn("%s: bitmaps are not supported for %s\n",
960                 mdname(mddev), mddev->pers->name);
961         return 1;
962 }
963 EXPORT_SYMBOL(md_check_no_bitmap);
964
965 /*
966  * load_super for 0.90.0
967  */
968 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
969 {
970         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
971         mdp_super_t *sb;
972         int ret;
973
974         /*
975          * Calculate the position of the superblock (512byte sectors),
976          * it's at the end of the disk.
977          *
978          * It also happens to be a multiple of 4Kb.
979          */
980         rdev->sb_start = calc_dev_sboffset(rdev);
981
982         ret = read_disk_sb(rdev, MD_SB_BYTES);
983         if (ret)
984                 return ret;
985
986         ret = -EINVAL;
987
988         bdevname(rdev->bdev, b);
989         sb = page_address(rdev->sb_page);
990
991         if (sb->md_magic != MD_SB_MAGIC) {
992                 pr_warn("md: invalid raid superblock magic on %s\n", b);
993                 goto abort;
994         }
995
996         if (sb->major_version != 0 ||
997             sb->minor_version < 90 ||
998             sb->minor_version > 91) {
999                 pr_warn("Bad version number %d.%d on %s\n",
1000                         sb->major_version, sb->minor_version, b);
1001                 goto abort;
1002         }
1003
1004         if (sb->raid_disks <= 0)
1005                 goto abort;
1006
1007         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1008                 pr_warn("md: invalid superblock checksum on %s\n", b);
1009                 goto abort;
1010         }
1011
1012         rdev->preferred_minor = sb->md_minor;
1013         rdev->data_offset = 0;
1014         rdev->new_data_offset = 0;
1015         rdev->sb_size = MD_SB_BYTES;
1016         rdev->badblocks.shift = -1;
1017
1018         if (sb->level == LEVEL_MULTIPATH)
1019                 rdev->desc_nr = -1;
1020         else
1021                 rdev->desc_nr = sb->this_disk.number;
1022
1023         if (!refdev) {
1024                 ret = 1;
1025         } else {
1026                 __u64 ev1, ev2;
1027                 mdp_super_t *refsb = page_address(refdev->sb_page);
1028                 if (!uuid_equal(refsb, sb)) {
1029                         pr_warn("md: %s has different UUID to %s\n",
1030                                 b, bdevname(refdev->bdev,b2));
1031                         goto abort;
1032                 }
1033                 if (!sb_equal(refsb, sb)) {
1034                         pr_warn("md: %s has same UUID but different superblock to %s\n",
1035                                 b, bdevname(refdev->bdev, b2));
1036                         goto abort;
1037                 }
1038                 ev1 = md_event(sb);
1039                 ev2 = md_event(refsb);
1040                 if (ev1 > ev2)
1041                         ret = 1;
1042                 else
1043                         ret = 0;
1044         }
1045         rdev->sectors = rdev->sb_start;
1046         /* Limit to 4TB as metadata cannot record more than that.
1047          * (not needed for Linear and RAID0 as metadata doesn't
1048          * record this size)
1049          */
1050         if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
1051             sb->level >= 1)
1052                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1053
1054         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1055                 /* "this cannot possibly happen" ... */
1056                 ret = -EINVAL;
1057
1058  abort:
1059         return ret;
1060 }
1061
1062 /*
1063  * validate_super for 0.90.0
1064  */
1065 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1066 {
1067         mdp_disk_t *desc;
1068         mdp_super_t *sb = page_address(rdev->sb_page);
1069         __u64 ev1 = md_event(sb);
1070
1071         rdev->raid_disk = -1;
1072         clear_bit(Faulty, &rdev->flags);
1073         clear_bit(In_sync, &rdev->flags);
1074         clear_bit(Bitmap_sync, &rdev->flags);
1075         clear_bit(WriteMostly, &rdev->flags);
1076
1077         if (mddev->raid_disks == 0) {
1078                 mddev->major_version = 0;
1079                 mddev->minor_version = sb->minor_version;
1080                 mddev->patch_version = sb->patch_version;
1081                 mddev->external = 0;
1082                 mddev->chunk_sectors = sb->chunk_size >> 9;
1083                 mddev->ctime = sb->ctime;
1084                 mddev->utime = sb->utime;
1085                 mddev->level = sb->level;
1086                 mddev->clevel[0] = 0;
1087                 mddev->layout = sb->layout;
1088                 mddev->raid_disks = sb->raid_disks;
1089                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1090                 mddev->events = ev1;
1091                 mddev->bitmap_info.offset = 0;
1092                 mddev->bitmap_info.space = 0;
1093                 /* bitmap can use 60 K after the 4K superblocks */
1094                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1095                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1096                 mddev->reshape_backwards = 0;
1097
1098                 if (mddev->minor_version >= 91) {
1099                         mddev->reshape_position = sb->reshape_position;
1100                         mddev->delta_disks = sb->delta_disks;
1101                         mddev->new_level = sb->new_level;
1102                         mddev->new_layout = sb->new_layout;
1103                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1104                         if (mddev->delta_disks < 0)
1105                                 mddev->reshape_backwards = 1;
1106                 } else {
1107                         mddev->reshape_position = MaxSector;
1108                         mddev->delta_disks = 0;
1109                         mddev->new_level = mddev->level;
1110                         mddev->new_layout = mddev->layout;
1111                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1112                 }
1113
1114                 if (sb->state & (1<<MD_SB_CLEAN))
1115                         mddev->recovery_cp = MaxSector;
1116                 else {
1117                         if (sb->events_hi == sb->cp_events_hi &&
1118                                 sb->events_lo == sb->cp_events_lo) {
1119                                 mddev->recovery_cp = sb->recovery_cp;
1120                         } else
1121                                 mddev->recovery_cp = 0;
1122                 }
1123
1124                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1125                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1126                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1127                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1128
1129                 mddev->max_disks = MD_SB_DISKS;
1130
1131                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1132                     mddev->bitmap_info.file == NULL) {
1133                         mddev->bitmap_info.offset =
1134                                 mddev->bitmap_info.default_offset;
1135                         mddev->bitmap_info.space =
1136                                 mddev->bitmap_info.default_space;
1137                 }
1138
1139         } else if (mddev->pers == NULL) {
1140                 /* Insist on good event counter while assembling, except
1141                  * for spares (which don't need an event count) */
1142                 ++ev1;
1143                 if (sb->disks[rdev->desc_nr].state & (
1144                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1145                         if (ev1 < mddev->events)
1146                                 return -EINVAL;
1147         } else if (mddev->bitmap) {
1148                 /* if adding to array with a bitmap, then we can accept an
1149                  * older device ... but not too old.
1150                  */
1151                 if (ev1 < mddev->bitmap->events_cleared)
1152                         return 0;
1153                 if (ev1 < mddev->events)
1154                         set_bit(Bitmap_sync, &rdev->flags);
1155         } else {
1156                 if (ev1 < mddev->events)
1157                         /* just a hot-add of a new device, leave raid_disk at -1 */
1158                         return 0;
1159         }
1160
1161         if (mddev->level != LEVEL_MULTIPATH) {
1162                 desc = sb->disks + rdev->desc_nr;
1163
1164                 if (desc->state & (1<<MD_DISK_FAULTY))
1165                         set_bit(Faulty, &rdev->flags);
1166                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1167                             desc->raid_disk < mddev->raid_disks */) {
1168                         set_bit(In_sync, &rdev->flags);
1169                         rdev->raid_disk = desc->raid_disk;
1170                         rdev->saved_raid_disk = desc->raid_disk;
1171                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1172                         /* active but not in sync implies recovery up to
1173                          * reshape position.  We don't know exactly where
1174                          * that is, so set to zero for now */
1175                         if (mddev->minor_version >= 91) {
1176                                 rdev->recovery_offset = 0;
1177                                 rdev->raid_disk = desc->raid_disk;
1178                         }
1179                 }
1180                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1181                         set_bit(WriteMostly, &rdev->flags);
1182                 if (desc->state & (1<<MD_DISK_FAILFAST))
1183                         set_bit(FailFast, &rdev->flags);
1184         } else /* MULTIPATH are always insync */
1185                 set_bit(In_sync, &rdev->flags);
1186         return 0;
1187 }
1188
1189 /*
1190  * sync_super for 0.90.0
1191  */
1192 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1193 {
1194         mdp_super_t *sb;
1195         struct md_rdev *rdev2;
1196         int next_spare = mddev->raid_disks;
1197
1198         /* make rdev->sb match mddev data..
1199          *
1200          * 1/ zero out disks
1201          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1202          * 3/ any empty disks < next_spare become removed
1203          *
1204          * disks[0] gets initialised to REMOVED because
1205          * we cannot be sure from other fields if it has
1206          * been initialised or not.
1207          */
1208         int i;
1209         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1210
1211         rdev->sb_size = MD_SB_BYTES;
1212
1213         sb = page_address(rdev->sb_page);
1214
1215         memset(sb, 0, sizeof(*sb));
1216
1217         sb->md_magic = MD_SB_MAGIC;
1218         sb->major_version = mddev->major_version;
1219         sb->patch_version = mddev->patch_version;
1220         sb->gvalid_words  = 0; /* ignored */
1221         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1222         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1223         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1224         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1225
1226         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1227         sb->level = mddev->level;
1228         sb->size = mddev->dev_sectors / 2;
1229         sb->raid_disks = mddev->raid_disks;
1230         sb->md_minor = mddev->md_minor;
1231         sb->not_persistent = 0;
1232         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1233         sb->state = 0;
1234         sb->events_hi = (mddev->events>>32);
1235         sb->events_lo = (u32)mddev->events;
1236
1237         if (mddev->reshape_position == MaxSector)
1238                 sb->minor_version = 90;
1239         else {
1240                 sb->minor_version = 91;
1241                 sb->reshape_position = mddev->reshape_position;
1242                 sb->new_level = mddev->new_level;
1243                 sb->delta_disks = mddev->delta_disks;
1244                 sb->new_layout = mddev->new_layout;
1245                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1246         }
1247         mddev->minor_version = sb->minor_version;
1248         if (mddev->in_sync)
1249         {
1250                 sb->recovery_cp = mddev->recovery_cp;
1251                 sb->cp_events_hi = (mddev->events>>32);
1252                 sb->cp_events_lo = (u32)mddev->events;
1253                 if (mddev->recovery_cp == MaxSector)
1254                         sb->state = (1<< MD_SB_CLEAN);
1255         } else
1256                 sb->recovery_cp = 0;
1257
1258         sb->layout = mddev->layout;
1259         sb->chunk_size = mddev->chunk_sectors << 9;
1260
1261         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1262                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1263
1264         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1265         rdev_for_each(rdev2, mddev) {
1266                 mdp_disk_t *d;
1267                 int desc_nr;
1268                 int is_active = test_bit(In_sync, &rdev2->flags);
1269
1270                 if (rdev2->raid_disk >= 0 &&
1271                     sb->minor_version >= 91)
1272                         /* we have nowhere to store the recovery_offset,
1273                          * but if it is not below the reshape_position,
1274                          * we can piggy-back on that.
1275                          */
1276                         is_active = 1;
1277                 if (rdev2->raid_disk < 0 ||
1278                     test_bit(Faulty, &rdev2->flags))
1279                         is_active = 0;
1280                 if (is_active)
1281                         desc_nr = rdev2->raid_disk;
1282                 else
1283                         desc_nr = next_spare++;
1284                 rdev2->desc_nr = desc_nr;
1285                 d = &sb->disks[rdev2->desc_nr];
1286                 nr_disks++;
1287                 d->number = rdev2->desc_nr;
1288                 d->major = MAJOR(rdev2->bdev->bd_dev);
1289                 d->minor = MINOR(rdev2->bdev->bd_dev);
1290                 if (is_active)
1291                         d->raid_disk = rdev2->raid_disk;
1292                 else
1293                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1294                 if (test_bit(Faulty, &rdev2->flags))
1295                         d->state = (1<<MD_DISK_FAULTY);
1296                 else if (is_active) {
1297                         d->state = (1<<MD_DISK_ACTIVE);
1298                         if (test_bit(In_sync, &rdev2->flags))
1299                                 d->state |= (1<<MD_DISK_SYNC);
1300                         active++;
1301                         working++;
1302                 } else {
1303                         d->state = 0;
1304                         spare++;
1305                         working++;
1306                 }
1307                 if (test_bit(WriteMostly, &rdev2->flags))
1308                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1309                 if (test_bit(FailFast, &rdev2->flags))
1310                         d->state |= (1<<MD_DISK_FAILFAST);
1311         }
1312         /* now set the "removed" and "faulty" bits on any missing devices */
1313         for (i=0 ; i < mddev->raid_disks ; i++) {
1314                 mdp_disk_t *d = &sb->disks[i];
1315                 if (d->state == 0 && d->number == 0) {
1316                         d->number = i;
1317                         d->raid_disk = i;
1318                         d->state = (1<<MD_DISK_REMOVED);
1319                         d->state |= (1<<MD_DISK_FAULTY);
1320                         failed++;
1321                 }
1322         }
1323         sb->nr_disks = nr_disks;
1324         sb->active_disks = active;
1325         sb->working_disks = working;
1326         sb->failed_disks = failed;
1327         sb->spare_disks = spare;
1328
1329         sb->this_disk = sb->disks[rdev->desc_nr];
1330         sb->sb_csum = calc_sb_csum(sb);
1331 }
1332
1333 /*
1334  * rdev_size_change for 0.90.0
1335  */
1336 static unsigned long long
1337 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1338 {
1339         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1340                 return 0; /* component must fit device */
1341         if (rdev->mddev->bitmap_info.offset)
1342                 return 0; /* can't move bitmap */
1343         rdev->sb_start = calc_dev_sboffset(rdev);
1344         if (!num_sectors || num_sectors > rdev->sb_start)
1345                 num_sectors = rdev->sb_start;
1346         /* Limit to 4TB as metadata cannot record more than that.
1347          * 4TB == 2^32 KB, or 2*2^32 sectors.
1348          */
1349         if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
1350             rdev->mddev->level >= 1)
1351                 num_sectors = (sector_t)(2ULL << 32) - 2;
1352         do {
1353                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1354                        rdev->sb_page);
1355         } while (md_super_wait(rdev->mddev) < 0);
1356         return num_sectors;
1357 }
1358
1359 static int
1360 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1361 {
1362         /* non-zero offset changes not possible with v0.90 */
1363         return new_offset == 0;
1364 }
1365
1366 /*
1367  * version 1 superblock
1368  */
1369
1370 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1371 {
1372         __le32 disk_csum;
1373         u32 csum;
1374         unsigned long long newcsum;
1375         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1376         __le32 *isuper = (__le32*)sb;
1377
1378         disk_csum = sb->sb_csum;
1379         sb->sb_csum = 0;
1380         newcsum = 0;
1381         for (; size >= 4; size -= 4)
1382                 newcsum += le32_to_cpu(*isuper++);
1383
1384         if (size == 2)
1385                 newcsum += le16_to_cpu(*(__le16*) isuper);
1386
1387         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1388         sb->sb_csum = disk_csum;
1389         return cpu_to_le32(csum);
1390 }
1391
1392 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1393 {
1394         struct mdp_superblock_1 *sb;
1395         int ret;
1396         sector_t sb_start;
1397         sector_t sectors;
1398         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1399         int bmask;
1400
1401         /*
1402          * Calculate the position of the superblock in 512byte sectors.
1403          * It is always aligned to a 4K boundary and
1404          * depeding on minor_version, it can be:
1405          * 0: At least 8K, but less than 12K, from end of device
1406          * 1: At start of device
1407          * 2: 4K from start of device.
1408          */
1409         switch(minor_version) {
1410         case 0:
1411                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1412                 sb_start -= 8*2;
1413                 sb_start &= ~(sector_t)(4*2-1);
1414                 break;
1415         case 1:
1416                 sb_start = 0;
1417                 break;
1418         case 2:
1419                 sb_start = 8;
1420                 break;
1421         default:
1422                 return -EINVAL;
1423         }
1424         rdev->sb_start = sb_start;
1425
1426         /* superblock is rarely larger than 1K, but it can be larger,
1427          * and it is safe to read 4k, so we do that
1428          */
1429         ret = read_disk_sb(rdev, 4096);
1430         if (ret) return ret;
1431
1432         sb = page_address(rdev->sb_page);
1433
1434         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1435             sb->major_version != cpu_to_le32(1) ||
1436             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1437             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1438             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1439                 return -EINVAL;
1440
1441         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1442                 pr_warn("md: invalid superblock checksum on %s\n",
1443                         bdevname(rdev->bdev,b));
1444                 return -EINVAL;
1445         }
1446         if (le64_to_cpu(sb->data_size) < 10) {
1447                 pr_warn("md: data_size too small on %s\n",
1448                         bdevname(rdev->bdev,b));
1449                 return -EINVAL;
1450         }
1451         if (sb->pad0 ||
1452             sb->pad3[0] ||
1453             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1454                 /* Some padding is non-zero, might be a new feature */
1455                 return -EINVAL;
1456
1457         rdev->preferred_minor = 0xffff;
1458         rdev->data_offset = le64_to_cpu(sb->data_offset);
1459         rdev->new_data_offset = rdev->data_offset;
1460         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1461             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1462                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1463         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1464
1465         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1466         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1467         if (rdev->sb_size & bmask)
1468                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1469
1470         if (minor_version
1471             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1472                 return -EINVAL;
1473         if (minor_version
1474             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1475                 return -EINVAL;
1476
1477         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1478                 rdev->desc_nr = -1;
1479         else
1480                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1481
1482         if (!rdev->bb_page) {
1483                 rdev->bb_page = alloc_page(GFP_KERNEL);
1484                 if (!rdev->bb_page)
1485                         return -ENOMEM;
1486         }
1487         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1488             rdev->badblocks.count == 0) {
1489                 /* need to load the bad block list.
1490                  * Currently we limit it to one page.
1491                  */
1492                 s32 offset;
1493                 sector_t bb_sector;
1494                 u64 *bbp;
1495                 int i;
1496                 int sectors = le16_to_cpu(sb->bblog_size);
1497                 if (sectors > (PAGE_SIZE / 512))
1498                         return -EINVAL;
1499                 offset = le32_to_cpu(sb->bblog_offset);
1500                 if (offset == 0)
1501                         return -EINVAL;
1502                 bb_sector = (long long)offset;
1503                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1504                                   rdev->bb_page, REQ_OP_READ, 0, true))
1505                         return -EIO;
1506                 bbp = (u64 *)page_address(rdev->bb_page);
1507                 rdev->badblocks.shift = sb->bblog_shift;
1508                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1509                         u64 bb = le64_to_cpu(*bbp);
1510                         int count = bb & (0x3ff);
1511                         u64 sector = bb >> 10;
1512                         sector <<= sb->bblog_shift;
1513                         count <<= sb->bblog_shift;
1514                         if (bb + 1 == 0)
1515                                 break;
1516                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1517                                 return -EINVAL;
1518                 }
1519         } else if (sb->bblog_offset != 0)
1520                 rdev->badblocks.shift = 0;
1521
1522         if (le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) {
1523                 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1524                 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1525                 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1526         }
1527
1528         if (!refdev) {
1529                 ret = 1;
1530         } else {
1531                 __u64 ev1, ev2;
1532                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1533
1534                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1535                     sb->level != refsb->level ||
1536                     sb->layout != refsb->layout ||
1537                     sb->chunksize != refsb->chunksize) {
1538                         pr_warn("md: %s has strangely different superblock to %s\n",
1539                                 bdevname(rdev->bdev,b),
1540                                 bdevname(refdev->bdev,b2));
1541                         return -EINVAL;
1542                 }
1543                 ev1 = le64_to_cpu(sb->events);
1544                 ev2 = le64_to_cpu(refsb->events);
1545
1546                 if (ev1 > ev2)
1547                         ret = 1;
1548                 else
1549                         ret = 0;
1550         }
1551         if (minor_version) {
1552                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1553                 sectors -= rdev->data_offset;
1554         } else
1555                 sectors = rdev->sb_start;
1556         if (sectors < le64_to_cpu(sb->data_size))
1557                 return -EINVAL;
1558         rdev->sectors = le64_to_cpu(sb->data_size);
1559         return ret;
1560 }
1561
1562 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1563 {
1564         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1565         __u64 ev1 = le64_to_cpu(sb->events);
1566
1567         rdev->raid_disk = -1;
1568         clear_bit(Faulty, &rdev->flags);
1569         clear_bit(In_sync, &rdev->flags);
1570         clear_bit(Bitmap_sync, &rdev->flags);
1571         clear_bit(WriteMostly, &rdev->flags);
1572
1573         if (mddev->raid_disks == 0) {
1574                 mddev->major_version = 1;
1575                 mddev->patch_version = 0;
1576                 mddev->external = 0;
1577                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1578                 mddev->ctime = le64_to_cpu(sb->ctime);
1579                 mddev->utime = le64_to_cpu(sb->utime);
1580                 mddev->level = le32_to_cpu(sb->level);
1581                 mddev->clevel[0] = 0;
1582                 mddev->layout = le32_to_cpu(sb->layout);
1583                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1584                 mddev->dev_sectors = le64_to_cpu(sb->size);
1585                 mddev->events = ev1;
1586                 mddev->bitmap_info.offset = 0;
1587                 mddev->bitmap_info.space = 0;
1588                 /* Default location for bitmap is 1K after superblock
1589                  * using 3K - total of 4K
1590                  */
1591                 mddev->bitmap_info.default_offset = 1024 >> 9;
1592                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1593                 mddev->reshape_backwards = 0;
1594
1595                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1596                 memcpy(mddev->uuid, sb->set_uuid, 16);
1597
1598                 mddev->max_disks =  (4096-256)/2;
1599
1600                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1601                     mddev->bitmap_info.file == NULL) {
1602                         mddev->bitmap_info.offset =
1603                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1604                         /* Metadata doesn't record how much space is available.
1605                          * For 1.0, we assume we can use up to the superblock
1606                          * if before, else to 4K beyond superblock.
1607                          * For others, assume no change is possible.
1608                          */
1609                         if (mddev->minor_version > 0)
1610                                 mddev->bitmap_info.space = 0;
1611                         else if (mddev->bitmap_info.offset > 0)
1612                                 mddev->bitmap_info.space =
1613                                         8 - mddev->bitmap_info.offset;
1614                         else
1615                                 mddev->bitmap_info.space =
1616                                         -mddev->bitmap_info.offset;
1617                 }
1618
1619                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1620                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1621                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1622                         mddev->new_level = le32_to_cpu(sb->new_level);
1623                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1624                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1625                         if (mddev->delta_disks < 0 ||
1626                             (mddev->delta_disks == 0 &&
1627                              (le32_to_cpu(sb->feature_map)
1628                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1629                                 mddev->reshape_backwards = 1;
1630                 } else {
1631                         mddev->reshape_position = MaxSector;
1632                         mddev->delta_disks = 0;
1633                         mddev->new_level = mddev->level;
1634                         mddev->new_layout = mddev->layout;
1635                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1636                 }
1637
1638                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1639                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1640
1641                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) {
1642                         if (le32_to_cpu(sb->feature_map) &
1643                             (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1644                                 return -EINVAL;
1645                         set_bit(MD_HAS_PPL, &mddev->flags);
1646                 }
1647         } else if (mddev->pers == NULL) {
1648                 /* Insist of good event counter while assembling, except for
1649                  * spares (which don't need an event count) */
1650                 ++ev1;
1651                 if (rdev->desc_nr >= 0 &&
1652                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1653                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1654                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1655                         if (ev1 < mddev->events)
1656                                 return -EINVAL;
1657         } else if (mddev->bitmap) {
1658                 /* If adding to array with a bitmap, then we can accept an
1659                  * older device, but not too old.
1660                  */
1661                 if (ev1 < mddev->bitmap->events_cleared)
1662                         return 0;
1663                 if (ev1 < mddev->events)
1664                         set_bit(Bitmap_sync, &rdev->flags);
1665         } else {
1666                 if (ev1 < mddev->events)
1667                         /* just a hot-add of a new device, leave raid_disk at -1 */
1668                         return 0;
1669         }
1670         if (mddev->level != LEVEL_MULTIPATH) {
1671                 int role;
1672                 if (rdev->desc_nr < 0 ||
1673                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1674                         role = MD_DISK_ROLE_SPARE;
1675                         rdev->desc_nr = -1;
1676                 } else
1677                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1678                 switch(role) {
1679                 case MD_DISK_ROLE_SPARE: /* spare */
1680                         break;
1681                 case MD_DISK_ROLE_FAULTY: /* faulty */
1682                         set_bit(Faulty, &rdev->flags);
1683                         break;
1684                 case MD_DISK_ROLE_JOURNAL: /* journal device */
1685                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1686                                 /* journal device without journal feature */
1687                                 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1688                                 return -EINVAL;
1689                         }
1690                         set_bit(Journal, &rdev->flags);
1691                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1692                         rdev->raid_disk = 0;
1693                         break;
1694                 default:
1695                         rdev->saved_raid_disk = role;
1696                         if ((le32_to_cpu(sb->feature_map) &
1697                              MD_FEATURE_RECOVERY_OFFSET)) {
1698                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1699                                 if (!(le32_to_cpu(sb->feature_map) &
1700                                       MD_FEATURE_RECOVERY_BITMAP))
1701                                         rdev->saved_raid_disk = -1;
1702                         } else
1703                                 set_bit(In_sync, &rdev->flags);
1704                         rdev->raid_disk = role;
1705                         break;
1706                 }
1707                 if (sb->devflags & WriteMostly1)
1708                         set_bit(WriteMostly, &rdev->flags);
1709                 if (sb->devflags & FailFast1)
1710                         set_bit(FailFast, &rdev->flags);
1711                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1712                         set_bit(Replacement, &rdev->flags);
1713         } else /* MULTIPATH are always insync */
1714                 set_bit(In_sync, &rdev->flags);
1715
1716         return 0;
1717 }
1718
1719 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1720 {
1721         struct mdp_superblock_1 *sb;
1722         struct md_rdev *rdev2;
1723         int max_dev, i;
1724         /* make rdev->sb match mddev and rdev data. */
1725
1726         sb = page_address(rdev->sb_page);
1727
1728         sb->feature_map = 0;
1729         sb->pad0 = 0;
1730         sb->recovery_offset = cpu_to_le64(0);
1731         memset(sb->pad3, 0, sizeof(sb->pad3));
1732
1733         sb->utime = cpu_to_le64((__u64)mddev->utime);
1734         sb->events = cpu_to_le64(mddev->events);
1735         if (mddev->in_sync)
1736                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1737         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1738                 sb->resync_offset = cpu_to_le64(MaxSector);
1739         else
1740                 sb->resync_offset = cpu_to_le64(0);
1741
1742         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1743
1744         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1745         sb->size = cpu_to_le64(mddev->dev_sectors);
1746         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1747         sb->level = cpu_to_le32(mddev->level);
1748         sb->layout = cpu_to_le32(mddev->layout);
1749         if (test_bit(FailFast, &rdev->flags))
1750                 sb->devflags |= FailFast1;
1751         else
1752                 sb->devflags &= ~FailFast1;
1753
1754         if (test_bit(WriteMostly, &rdev->flags))
1755                 sb->devflags |= WriteMostly1;
1756         else
1757                 sb->devflags &= ~WriteMostly1;
1758         sb->data_offset = cpu_to_le64(rdev->data_offset);
1759         sb->data_size = cpu_to_le64(rdev->sectors);
1760
1761         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1762                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1763                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1764         }
1765
1766         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
1767             !test_bit(In_sync, &rdev->flags)) {
1768                 sb->feature_map |=
1769                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1770                 sb->recovery_offset =
1771                         cpu_to_le64(rdev->recovery_offset);
1772                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1773                         sb->feature_map |=
1774                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1775         }
1776         /* Note: recovery_offset and journal_tail share space  */
1777         if (test_bit(Journal, &rdev->flags))
1778                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
1779         if (test_bit(Replacement, &rdev->flags))
1780                 sb->feature_map |=
1781                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
1782
1783         if (mddev->reshape_position != MaxSector) {
1784                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1785                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1786                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1787                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1788                 sb->new_level = cpu_to_le32(mddev->new_level);
1789                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1790                 if (mddev->delta_disks == 0 &&
1791                     mddev->reshape_backwards)
1792                         sb->feature_map
1793                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1794                 if (rdev->new_data_offset != rdev->data_offset) {
1795                         sb->feature_map
1796                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1797                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1798                                                              - rdev->data_offset));
1799                 }
1800         }
1801
1802         if (mddev_is_clustered(mddev))
1803                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
1804
1805         if (rdev->badblocks.count == 0)
1806                 /* Nothing to do for bad blocks*/ ;
1807         else if (sb->bblog_offset == 0)
1808                 /* Cannot record bad blocks on this device */
1809                 md_error(mddev, rdev);
1810         else {
1811                 struct badblocks *bb = &rdev->badblocks;
1812                 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1813                 u64 *p = bb->page;
1814                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1815                 if (bb->changed) {
1816                         unsigned seq;
1817
1818 retry:
1819                         seq = read_seqbegin(&bb->lock);
1820
1821                         memset(bbp, 0xff, PAGE_SIZE);
1822
1823                         for (i = 0 ; i < bb->count ; i++) {
1824                                 u64 internal_bb = p[i];
1825                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1826                                                 | BB_LEN(internal_bb));
1827                                 bbp[i] = cpu_to_le64(store_bb);
1828                         }
1829                         bb->changed = 0;
1830                         if (read_seqretry(&bb->lock, seq))
1831                                 goto retry;
1832
1833                         bb->sector = (rdev->sb_start +
1834                                       (int)le32_to_cpu(sb->bblog_offset));
1835                         bb->size = le16_to_cpu(sb->bblog_size);
1836                 }
1837         }
1838
1839         max_dev = 0;
1840         rdev_for_each(rdev2, mddev)
1841                 if (rdev2->desc_nr+1 > max_dev)
1842                         max_dev = rdev2->desc_nr+1;
1843
1844         if (max_dev > le32_to_cpu(sb->max_dev)) {
1845                 int bmask;
1846                 sb->max_dev = cpu_to_le32(max_dev);
1847                 rdev->sb_size = max_dev * 2 + 256;
1848                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1849                 if (rdev->sb_size & bmask)
1850                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1851         } else
1852                 max_dev = le32_to_cpu(sb->max_dev);
1853
1854         for (i=0; i<max_dev;i++)
1855                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1856
1857         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
1858                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
1859
1860         if (test_bit(MD_HAS_PPL, &mddev->flags)) {
1861                 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
1862                 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
1863                 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
1864         }
1865
1866         rdev_for_each(rdev2, mddev) {
1867                 i = rdev2->desc_nr;
1868                 if (test_bit(Faulty, &rdev2->flags))
1869                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1870                 else if (test_bit(In_sync, &rdev2->flags))
1871                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1872                 else if (test_bit(Journal, &rdev2->flags))
1873                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
1874                 else if (rdev2->raid_disk >= 0)
1875                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1876                 else
1877                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
1878         }
1879
1880         sb->sb_csum = calc_sb_1_csum(sb);
1881 }
1882
1883 static unsigned long long
1884 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1885 {
1886         struct mdp_superblock_1 *sb;
1887         sector_t max_sectors;
1888         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1889                 return 0; /* component must fit device */
1890         if (rdev->data_offset != rdev->new_data_offset)
1891                 return 0; /* too confusing */
1892         if (rdev->sb_start < rdev->data_offset) {
1893                 /* minor versions 1 and 2; superblock before data */
1894                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1895                 max_sectors -= rdev->data_offset;
1896                 if (!num_sectors || num_sectors > max_sectors)
1897                         num_sectors = max_sectors;
1898         } else if (rdev->mddev->bitmap_info.offset) {
1899                 /* minor version 0 with bitmap we can't move */
1900                 return 0;
1901         } else {
1902                 /* minor version 0; superblock after data */
1903                 sector_t sb_start;
1904                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1905                 sb_start &= ~(sector_t)(4*2 - 1);
1906                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1907                 if (!num_sectors || num_sectors > max_sectors)
1908                         num_sectors = max_sectors;
1909                 rdev->sb_start = sb_start;
1910         }
1911         sb = page_address(rdev->sb_page);
1912         sb->data_size = cpu_to_le64(num_sectors);
1913         sb->super_offset = cpu_to_le64(rdev->sb_start);
1914         sb->sb_csum = calc_sb_1_csum(sb);
1915         do {
1916                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1917                                rdev->sb_page);
1918         } while (md_super_wait(rdev->mddev) < 0);
1919         return num_sectors;
1920
1921 }
1922
1923 static int
1924 super_1_allow_new_offset(struct md_rdev *rdev,
1925                          unsigned long long new_offset)
1926 {
1927         /* All necessary checks on new >= old have been done */
1928         struct bitmap *bitmap;
1929         if (new_offset >= rdev->data_offset)
1930                 return 1;
1931
1932         /* with 1.0 metadata, there is no metadata to tread on
1933          * so we can always move back */
1934         if (rdev->mddev->minor_version == 0)
1935                 return 1;
1936
1937         /* otherwise we must be sure not to step on
1938          * any metadata, so stay:
1939          * 36K beyond start of superblock
1940          * beyond end of badblocks
1941          * beyond write-intent bitmap
1942          */
1943         if (rdev->sb_start + (32+4)*2 > new_offset)
1944                 return 0;
1945         bitmap = rdev->mddev->bitmap;
1946         if (bitmap && !rdev->mddev->bitmap_info.file &&
1947             rdev->sb_start + rdev->mddev->bitmap_info.offset +
1948             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1949                 return 0;
1950         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1951                 return 0;
1952
1953         return 1;
1954 }
1955
1956 static struct super_type super_types[] = {
1957         [0] = {
1958                 .name   = "0.90.0",
1959                 .owner  = THIS_MODULE,
1960                 .load_super         = super_90_load,
1961                 .validate_super     = super_90_validate,
1962                 .sync_super         = super_90_sync,
1963                 .rdev_size_change   = super_90_rdev_size_change,
1964                 .allow_new_offset   = super_90_allow_new_offset,
1965         },
1966         [1] = {
1967                 .name   = "md-1",
1968                 .owner  = THIS_MODULE,
1969                 .load_super         = super_1_load,
1970                 .validate_super     = super_1_validate,
1971                 .sync_super         = super_1_sync,
1972                 .rdev_size_change   = super_1_rdev_size_change,
1973                 .allow_new_offset   = super_1_allow_new_offset,
1974         },
1975 };
1976
1977 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1978 {
1979         if (mddev->sync_super) {
1980                 mddev->sync_super(mddev, rdev);
1981                 return;
1982         }
1983
1984         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1985
1986         super_types[mddev->major_version].sync_super(mddev, rdev);
1987 }
1988
1989 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1990 {
1991         struct md_rdev *rdev, *rdev2;
1992
1993         rcu_read_lock();
1994         rdev_for_each_rcu(rdev, mddev1) {
1995                 if (test_bit(Faulty, &rdev->flags) ||
1996                     test_bit(Journal, &rdev->flags) ||
1997                     rdev->raid_disk == -1)
1998                         continue;
1999                 rdev_for_each_rcu(rdev2, mddev2) {
2000                         if (test_bit(Faulty, &rdev2->flags) ||
2001                             test_bit(Journal, &rdev2->flags) ||
2002                             rdev2->raid_disk == -1)
2003                                 continue;
2004                         if (rdev->bdev->bd_contains ==
2005                             rdev2->bdev->bd_contains) {
2006                                 rcu_read_unlock();
2007                                 return 1;
2008                         }
2009                 }
2010         }
2011         rcu_read_unlock();
2012         return 0;
2013 }
2014
2015 static LIST_HEAD(pending_raid_disks);
2016
2017 /*
2018  * Try to register data integrity profile for an mddev
2019  *
2020  * This is called when an array is started and after a disk has been kicked
2021  * from the array. It only succeeds if all working and active component devices
2022  * are integrity capable with matching profiles.
2023  */
2024 int md_integrity_register(struct mddev *mddev)
2025 {
2026         struct md_rdev *rdev, *reference = NULL;
2027
2028         if (list_empty(&mddev->disks))
2029                 return 0; /* nothing to do */
2030         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2031                 return 0; /* shouldn't register, or already is */
2032         rdev_for_each(rdev, mddev) {
2033                 /* skip spares and non-functional disks */
2034                 if (test_bit(Faulty, &rdev->flags))
2035                         continue;
2036                 if (rdev->raid_disk < 0)
2037                         continue;
2038                 if (!reference) {
2039                         /* Use the first rdev as the reference */
2040                         reference = rdev;
2041                         continue;
2042                 }
2043                 /* does this rdev's profile match the reference profile? */
2044                 if (blk_integrity_compare(reference->bdev->bd_disk,
2045                                 rdev->bdev->bd_disk) < 0)
2046                         return -EINVAL;
2047         }
2048         if (!reference || !bdev_get_integrity(reference->bdev))
2049                 return 0;
2050         /*
2051          * All component devices are integrity capable and have matching
2052          * profiles, register the common profile for the md device.
2053          */
2054         blk_integrity_register(mddev->gendisk,
2055                                bdev_get_integrity(reference->bdev));
2056
2057         pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2058         if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
2059                 pr_err("md: failed to create integrity pool for %s\n",
2060                        mdname(mddev));
2061                 return -EINVAL;
2062         }
2063         return 0;
2064 }
2065 EXPORT_SYMBOL(md_integrity_register);
2066
2067 /*
2068  * Attempt to add an rdev, but only if it is consistent with the current
2069  * integrity profile
2070  */
2071 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2072 {
2073         struct blk_integrity *bi_rdev;
2074         struct blk_integrity *bi_mddev;
2075         char name[BDEVNAME_SIZE];
2076
2077         if (!mddev->gendisk)
2078                 return 0;
2079
2080         bi_rdev = bdev_get_integrity(rdev->bdev);
2081         bi_mddev = blk_get_integrity(mddev->gendisk);
2082
2083         if (!bi_mddev) /* nothing to do */
2084                 return 0;
2085
2086         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2087                 pr_err("%s: incompatible integrity profile for %s\n",
2088                        mdname(mddev), bdevname(rdev->bdev, name));
2089                 return -ENXIO;
2090         }
2091
2092         return 0;
2093 }
2094 EXPORT_SYMBOL(md_integrity_add_rdev);
2095
2096 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2097 {
2098         char b[BDEVNAME_SIZE];
2099         struct kobject *ko;
2100         int err;
2101
2102         /* prevent duplicates */
2103         if (find_rdev(mddev, rdev->bdev->bd_dev))
2104                 return -EEXIST;
2105
2106         if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
2107             mddev->pers)
2108                 return -EROFS;
2109
2110         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2111         if (!test_bit(Journal, &rdev->flags) &&
2112             rdev->sectors &&
2113             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2114                 if (mddev->pers) {
2115                         /* Cannot change size, so fail
2116                          * If mddev->level <= 0, then we don't care
2117                          * about aligning sizes (e.g. linear)
2118                          */
2119                         if (mddev->level > 0)
2120                                 return -ENOSPC;
2121                 } else
2122                         mddev->dev_sectors = rdev->sectors;
2123         }
2124
2125         /* Verify rdev->desc_nr is unique.
2126          * If it is -1, assign a free number, else
2127          * check number is not in use
2128          */
2129         rcu_read_lock();
2130         if (rdev->desc_nr < 0) {
2131                 int choice = 0;
2132                 if (mddev->pers)
2133                         choice = mddev->raid_disks;
2134                 while (md_find_rdev_nr_rcu(mddev, choice))
2135                         choice++;
2136                 rdev->desc_nr = choice;
2137         } else {
2138                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2139                         rcu_read_unlock();
2140                         return -EBUSY;
2141                 }
2142         }
2143         rcu_read_unlock();
2144         if (!test_bit(Journal, &rdev->flags) &&
2145             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2146                 pr_warn("md: %s: array is limited to %d devices\n",
2147                         mdname(mddev), mddev->max_disks);
2148                 return -EBUSY;
2149         }
2150         bdevname(rdev->bdev,b);
2151         strreplace(b, '/', '!');
2152
2153         rdev->mddev = mddev;
2154         pr_debug("md: bind<%s>\n", b);
2155
2156         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2157                 goto fail;
2158
2159         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2160         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2161                 /* failure here is OK */;
2162         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2163
2164         list_add_rcu(&rdev->same_set, &mddev->disks);
2165         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2166
2167         /* May as well allow recovery to be retried once */
2168         mddev->recovery_disabled++;
2169
2170         return 0;
2171
2172  fail:
2173         pr_warn("md: failed to register dev-%s for %s\n",
2174                 b, mdname(mddev));
2175         return err;
2176 }
2177
2178 static void md_delayed_delete(struct work_struct *ws)
2179 {
2180         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2181         kobject_del(&rdev->kobj);
2182         kobject_put(&rdev->kobj);
2183 }
2184
2185 static void unbind_rdev_from_array(struct md_rdev *rdev)
2186 {
2187         char b[BDEVNAME_SIZE];
2188
2189         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2190         list_del_rcu(&rdev->same_set);
2191         pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
2192         rdev->mddev = NULL;
2193         sysfs_remove_link(&rdev->kobj, "block");
2194         sysfs_put(rdev->sysfs_state);
2195         rdev->sysfs_state = NULL;
2196         rdev->badblocks.count = 0;
2197         /* We need to delay this, otherwise we can deadlock when
2198          * writing to 'remove' to "dev/state".  We also need
2199          * to delay it due to rcu usage.
2200          */
2201         synchronize_rcu();
2202         INIT_WORK(&rdev->del_work, md_delayed_delete);
2203         kobject_get(&rdev->kobj);
2204         queue_work(md_misc_wq, &rdev->del_work);
2205 }
2206
2207 /*
2208  * prevent the device from being mounted, repartitioned or
2209  * otherwise reused by a RAID array (or any other kernel
2210  * subsystem), by bd_claiming the device.
2211  */
2212 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2213 {
2214         int err = 0;
2215         struct block_device *bdev;
2216         char b[BDEVNAME_SIZE];
2217
2218         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2219                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2220         if (IS_ERR(bdev)) {
2221                 pr_warn("md: could not open %s.\n", __bdevname(dev, b));
2222                 return PTR_ERR(bdev);
2223         }
2224         rdev->bdev = bdev;
2225         return err;
2226 }
2227
2228 static void unlock_rdev(struct md_rdev *rdev)
2229 {
2230         struct block_device *bdev = rdev->bdev;
2231         rdev->bdev = NULL;
2232         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2233 }
2234
2235 void md_autodetect_dev(dev_t dev);
2236
2237 static void export_rdev(struct md_rdev *rdev)
2238 {
2239         char b[BDEVNAME_SIZE];
2240
2241         pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2242         md_rdev_clear(rdev);
2243 #ifndef MODULE
2244         if (test_bit(AutoDetected, &rdev->flags))
2245                 md_autodetect_dev(rdev->bdev->bd_dev);
2246 #endif
2247         unlock_rdev(rdev);
2248         kobject_put(&rdev->kobj);
2249 }
2250
2251 void md_kick_rdev_from_array(struct md_rdev *rdev)
2252 {
2253         unbind_rdev_from_array(rdev);
2254         export_rdev(rdev);
2255 }
2256 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2257
2258 static void export_array(struct mddev *mddev)
2259 {
2260         struct md_rdev *rdev;
2261
2262         while (!list_empty(&mddev->disks)) {
2263                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2264                                         same_set);
2265                 md_kick_rdev_from_array(rdev);
2266         }
2267         mddev->raid_disks = 0;
2268         mddev->major_version = 0;
2269 }
2270
2271 static bool set_in_sync(struct mddev *mddev)
2272 {
2273         WARN_ON_ONCE(!spin_is_locked(&mddev->lock));
2274         if (!mddev->in_sync) {
2275                 mddev->sync_checkers++;
2276                 spin_unlock(&mddev->lock);
2277                 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2278                 spin_lock(&mddev->lock);
2279                 if (!mddev->in_sync &&
2280                     percpu_ref_is_zero(&mddev->writes_pending)) {
2281                         mddev->in_sync = 1;
2282                         /*
2283                          * Ensure ->in_sync is visible before we clear
2284                          * ->sync_checkers.
2285                          */
2286                         smp_mb();
2287                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2288                         sysfs_notify_dirent_safe(mddev->sysfs_state);
2289                 }
2290                 if (--mddev->sync_checkers == 0)
2291                         percpu_ref_switch_to_percpu(&mddev->writes_pending);
2292         }
2293         if (mddev->safemode == 1)
2294                 mddev->safemode = 0;
2295         return mddev->in_sync;
2296 }
2297
2298 static void sync_sbs(struct mddev *mddev, int nospares)
2299 {
2300         /* Update each superblock (in-memory image), but
2301          * if we are allowed to, skip spares which already
2302          * have the right event counter, or have one earlier
2303          * (which would mean they aren't being marked as dirty
2304          * with the rest of the array)
2305          */
2306         struct md_rdev *rdev;
2307         rdev_for_each(rdev, mddev) {
2308                 if (rdev->sb_events == mddev->events ||
2309                     (nospares &&
2310                      rdev->raid_disk < 0 &&
2311                      rdev->sb_events+1 == mddev->events)) {
2312                         /* Don't update this superblock */
2313                         rdev->sb_loaded = 2;
2314                 } else {
2315                         sync_super(mddev, rdev);
2316                         rdev->sb_loaded = 1;
2317                 }
2318         }
2319 }
2320
2321 static bool does_sb_need_changing(struct mddev *mddev)
2322 {
2323         struct md_rdev *rdev;
2324         struct mdp_superblock_1 *sb;
2325         int role;
2326
2327         /* Find a good rdev */
2328         rdev_for_each(rdev, mddev)
2329                 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2330                         break;
2331
2332         /* No good device found. */
2333         if (!rdev)
2334                 return false;
2335
2336         sb = page_address(rdev->sb_page);
2337         /* Check if a device has become faulty or a spare become active */
2338         rdev_for_each(rdev, mddev) {
2339                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2340                 /* Device activated? */
2341                 if (role == 0xffff && rdev->raid_disk >=0 &&
2342                     !test_bit(Faulty, &rdev->flags))
2343                         return true;
2344                 /* Device turned faulty? */
2345                 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2346                         return true;
2347         }
2348
2349         /* Check if any mddev parameters have changed */
2350         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2351             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2352             (mddev->layout != le32_to_cpu(sb->layout)) ||
2353             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2354             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2355                 return true;
2356
2357         return false;
2358 }
2359
2360 void md_update_sb(struct mddev *mddev, int force_change)
2361 {
2362         struct md_rdev *rdev;
2363         int sync_req;
2364         int nospares = 0;
2365         int any_badblocks_changed = 0;
2366         int ret = -1;
2367
2368         if (mddev->ro) {
2369                 if (force_change)
2370                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2371                 return;
2372         }
2373
2374 repeat:
2375         if (mddev_is_clustered(mddev)) {
2376                 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2377                         force_change = 1;
2378                 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2379                         nospares = 1;
2380                 ret = md_cluster_ops->metadata_update_start(mddev);
2381                 /* Has someone else has updated the sb */
2382                 if (!does_sb_need_changing(mddev)) {
2383                         if (ret == 0)
2384                                 md_cluster_ops->metadata_update_cancel(mddev);
2385                         bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2386                                                          BIT(MD_SB_CHANGE_DEVS) |
2387                                                          BIT(MD_SB_CHANGE_CLEAN));
2388                         return;
2389                 }
2390         }
2391
2392         /* First make sure individual recovery_offsets are correct */
2393         rdev_for_each(rdev, mddev) {
2394                 if (rdev->raid_disk >= 0 &&
2395                     mddev->delta_disks >= 0 &&
2396                     !test_bit(Journal, &rdev->flags) &&
2397                     !test_bit(In_sync, &rdev->flags) &&
2398                     mddev->curr_resync_completed > rdev->recovery_offset)
2399                                 rdev->recovery_offset = mddev->curr_resync_completed;
2400
2401         }
2402         if (!mddev->persistent) {
2403                 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2404                 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2405                 if (!mddev->external) {
2406                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2407                         rdev_for_each(rdev, mddev) {
2408                                 if (rdev->badblocks.changed) {
2409                                         rdev->badblocks.changed = 0;
2410                                         ack_all_badblocks(&rdev->badblocks);
2411                                         md_error(mddev, rdev);
2412                                 }
2413                                 clear_bit(Blocked, &rdev->flags);
2414                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2415                                 wake_up(&rdev->blocked_wait);
2416                         }
2417                 }
2418                 wake_up(&mddev->sb_wait);
2419                 return;
2420         }
2421
2422         spin_lock(&mddev->lock);
2423
2424         mddev->utime = ktime_get_real_seconds();
2425
2426         if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2427                 force_change = 1;
2428         if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2429                 /* just a clean<-> dirty transition, possibly leave spares alone,
2430                  * though if events isn't the right even/odd, we will have to do
2431                  * spares after all
2432                  */
2433                 nospares = 1;
2434         if (force_change)
2435                 nospares = 0;
2436         if (mddev->degraded)
2437                 /* If the array is degraded, then skipping spares is both
2438                  * dangerous and fairly pointless.
2439                  * Dangerous because a device that was removed from the array
2440                  * might have a event_count that still looks up-to-date,
2441                  * so it can be re-added without a resync.
2442                  * Pointless because if there are any spares to skip,
2443                  * then a recovery will happen and soon that array won't
2444                  * be degraded any more and the spare can go back to sleep then.
2445                  */
2446                 nospares = 0;
2447
2448         sync_req = mddev->in_sync;
2449
2450         /* If this is just a dirty<->clean transition, and the array is clean
2451          * and 'events' is odd, we can roll back to the previous clean state */
2452         if (nospares
2453             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2454             && mddev->can_decrease_events
2455             && mddev->events != 1) {
2456                 mddev->events--;
2457                 mddev->can_decrease_events = 0;
2458         } else {
2459                 /* otherwise we have to go forward and ... */
2460                 mddev->events ++;
2461                 mddev->can_decrease_events = nospares;
2462         }
2463
2464         /*
2465          * This 64-bit counter should never wrap.
2466          * Either we are in around ~1 trillion A.C., assuming
2467          * 1 reboot per second, or we have a bug...
2468          */
2469         WARN_ON(mddev->events == 0);
2470
2471         rdev_for_each(rdev, mddev) {
2472                 if (rdev->badblocks.changed)
2473                         any_badblocks_changed++;
2474                 if (test_bit(Faulty, &rdev->flags))
2475                         set_bit(FaultRecorded, &rdev->flags);
2476         }
2477
2478         sync_sbs(mddev, nospares);
2479         spin_unlock(&mddev->lock);
2480
2481         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2482                  mdname(mddev), mddev->in_sync);
2483
2484         if (mddev->queue)
2485                 blk_add_trace_msg(mddev->queue, "md md_update_sb");
2486 rewrite:
2487         bitmap_update_sb(mddev->bitmap);
2488         rdev_for_each(rdev, mddev) {
2489                 char b[BDEVNAME_SIZE];
2490
2491                 if (rdev->sb_loaded != 1)
2492                         continue; /* no noise on spare devices */
2493
2494                 if (!test_bit(Faulty, &rdev->flags)) {
2495                         md_super_write(mddev,rdev,
2496                                        rdev->sb_start, rdev->sb_size,
2497                                        rdev->sb_page);
2498                         pr_debug("md: (write) %s's sb offset: %llu\n",
2499                                  bdevname(rdev->bdev, b),
2500                                  (unsigned long long)rdev->sb_start);
2501                         rdev->sb_events = mddev->events;
2502                         if (rdev->badblocks.size) {
2503                                 md_super_write(mddev, rdev,
2504                                                rdev->badblocks.sector,
2505                                                rdev->badblocks.size << 9,
2506                                                rdev->bb_page);
2507                                 rdev->badblocks.size = 0;
2508                         }
2509
2510                 } else
2511                         pr_debug("md: %s (skipping faulty)\n",
2512                                  bdevname(rdev->bdev, b));
2513
2514                 if (mddev->level == LEVEL_MULTIPATH)
2515                         /* only need to write one superblock... */
2516                         break;
2517         }
2518         if (md_super_wait(mddev) < 0)
2519                 goto rewrite;
2520         /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2521
2522         if (mddev_is_clustered(mddev) && ret == 0)
2523                 md_cluster_ops->metadata_update_finish(mddev);
2524
2525         if (mddev->in_sync != sync_req ||
2526             !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2527                                BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2528                 /* have to write it out again */
2529                 goto repeat;
2530         wake_up(&mddev->sb_wait);
2531         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2532                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2533
2534         rdev_for_each(rdev, mddev) {
2535                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2536                         clear_bit(Blocked, &rdev->flags);
2537
2538                 if (any_badblocks_changed)
2539                         ack_all_badblocks(&rdev->badblocks);
2540                 clear_bit(BlockedBadBlocks, &rdev->flags);
2541                 wake_up(&rdev->blocked_wait);
2542         }
2543 }
2544 EXPORT_SYMBOL(md_update_sb);
2545
2546 static int add_bound_rdev(struct md_rdev *rdev)
2547 {
2548         struct mddev *mddev = rdev->mddev;
2549         int err = 0;
2550         bool add_journal = test_bit(Journal, &rdev->flags);
2551
2552         if (!mddev->pers->hot_remove_disk || add_journal) {
2553                 /* If there is hot_add_disk but no hot_remove_disk
2554                  * then added disks for geometry changes,
2555                  * and should be added immediately.
2556                  */
2557                 super_types[mddev->major_version].
2558                         validate_super(mddev, rdev);
2559                 if (add_journal)
2560                         mddev_suspend(mddev);
2561                 err = mddev->pers->hot_add_disk(mddev, rdev);
2562                 if (add_journal)
2563                         mddev_resume(mddev);
2564                 if (err) {
2565                         md_kick_rdev_from_array(rdev);
2566                         return err;
2567                 }
2568         }
2569         sysfs_notify_dirent_safe(rdev->sysfs_state);
2570
2571         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2572         if (mddev->degraded)
2573                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2574         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2575         md_new_event(mddev);
2576         md_wakeup_thread(mddev->thread);
2577         return 0;
2578 }
2579
2580 /* words written to sysfs files may, or may not, be \n terminated.
2581  * We want to accept with case. For this we use cmd_match.
2582  */
2583 static int cmd_match(const char *cmd, const char *str)
2584 {
2585         /* See if cmd, written into a sysfs file, matches
2586          * str.  They must either be the same, or cmd can
2587          * have a trailing newline
2588          */
2589         while (*cmd && *str && *cmd == *str) {
2590                 cmd++;
2591                 str++;
2592         }
2593         if (*cmd == '\n')
2594                 cmd++;
2595         if (*str || *cmd)
2596                 return 0;
2597         return 1;
2598 }
2599
2600 struct rdev_sysfs_entry {
2601         struct attribute attr;
2602         ssize_t (*show)(struct md_rdev *, char *);
2603         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2604 };
2605
2606 static ssize_t
2607 state_show(struct md_rdev *rdev, char *page)
2608 {
2609         char *sep = ",";
2610         size_t len = 0;
2611         unsigned long flags = ACCESS_ONCE(rdev->flags);
2612
2613         if (test_bit(Faulty, &flags) ||
2614             (!test_bit(ExternalBbl, &flags) &&
2615             rdev->badblocks.unacked_exist))
2616                 len += sprintf(page+len, "faulty%s", sep);
2617         if (test_bit(In_sync, &flags))
2618                 len += sprintf(page+len, "in_sync%s", sep);
2619         if (test_bit(Journal, &flags))
2620                 len += sprintf(page+len, "journal%s", sep);
2621         if (test_bit(WriteMostly, &flags))
2622                 len += sprintf(page+len, "write_mostly%s", sep);
2623         if (test_bit(Blocked, &flags) ||
2624             (rdev->badblocks.unacked_exist
2625              && !test_bit(Faulty, &flags)))
2626                 len += sprintf(page+len, "blocked%s", sep);
2627         if (!test_bit(Faulty, &flags) &&
2628             !test_bit(Journal, &flags) &&
2629             !test_bit(In_sync, &flags))
2630                 len += sprintf(page+len, "spare%s", sep);
2631         if (test_bit(WriteErrorSeen, &flags))
2632                 len += sprintf(page+len, "write_error%s", sep);
2633         if (test_bit(WantReplacement, &flags))
2634                 len += sprintf(page+len, "want_replacement%s", sep);
2635         if (test_bit(Replacement, &flags))
2636                 len += sprintf(page+len, "replacement%s", sep);
2637         if (test_bit(ExternalBbl, &flags))
2638                 len += sprintf(page+len, "external_bbl%s", sep);
2639         if (test_bit(FailFast, &flags))
2640                 len += sprintf(page+len, "failfast%s", sep);
2641
2642         if (len)
2643                 len -= strlen(sep);
2644
2645         return len+sprintf(page+len, "\n");
2646 }
2647
2648 static ssize_t
2649 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2650 {
2651         /* can write
2652          *  faulty  - simulates an error
2653          *  remove  - disconnects the device
2654          *  writemostly - sets write_mostly
2655          *  -writemostly - clears write_mostly
2656          *  blocked - sets the Blocked flags
2657          *  -blocked - clears the Blocked and possibly simulates an error
2658          *  insync - sets Insync providing device isn't active
2659          *  -insync - clear Insync for a device with a slot assigned,
2660          *            so that it gets rebuilt based on bitmap
2661          *  write_error - sets WriteErrorSeen
2662          *  -write_error - clears WriteErrorSeen
2663          *  {,-}failfast - set/clear FailFast
2664          */
2665         int err = -EINVAL;
2666         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2667                 md_error(rdev->mddev, rdev);
2668                 if (test_bit(Faulty, &rdev->flags))
2669                         err = 0;
2670                 else
2671                         err = -EBUSY;
2672         } else if (cmd_match(buf, "remove")) {
2673                 if (rdev->mddev->pers) {
2674                         clear_bit(Blocked, &rdev->flags);
2675                         remove_and_add_spares(rdev->mddev, rdev);
2676                 }
2677                 if (rdev->raid_disk >= 0)
2678                         err = -EBUSY;
2679                 else {
2680                         struct mddev *mddev = rdev->mddev;
2681                         err = 0;
2682                         if (mddev_is_clustered(mddev))
2683                                 err = md_cluster_ops->remove_disk(mddev, rdev);
2684
2685                         if (err == 0) {
2686                                 md_kick_rdev_from_array(rdev);
2687                                 if (mddev->pers) {
2688                                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2689                                         md_wakeup_thread(mddev->thread);
2690                                 }
2691                                 md_new_event(mddev);
2692                         }
2693                 }
2694         } else if (cmd_match(buf, "writemostly")) {
2695                 set_bit(WriteMostly, &rdev->flags);
2696                 err = 0;
2697         } else if (cmd_match(buf, "-writemostly")) {
2698                 clear_bit(WriteMostly, &rdev->flags);
2699                 err = 0;
2700         } else if (cmd_match(buf, "blocked")) {
2701                 set_bit(Blocked, &rdev->flags);
2702                 err = 0;
2703         } else if (cmd_match(buf, "-blocked")) {
2704                 if (!test_bit(Faulty, &rdev->flags) &&
2705                     !test_bit(ExternalBbl, &rdev->flags) &&
2706                     rdev->badblocks.unacked_exist) {
2707                         /* metadata handler doesn't understand badblocks,
2708                          * so we need to fail the device
2709                          */
2710                         md_error(rdev->mddev, rdev);
2711                 }
2712                 clear_bit(Blocked, &rdev->flags);
2713                 clear_bit(BlockedBadBlocks, &rdev->flags);
2714                 wake_up(&rdev->blocked_wait);
2715                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2716                 md_wakeup_thread(rdev->mddev->thread);
2717
2718                 err = 0;
2719         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2720                 set_bit(In_sync, &rdev->flags);
2721                 err = 0;
2722         } else if (cmd_match(buf, "failfast")) {
2723                 set_bit(FailFast, &rdev->flags);
2724                 err = 0;
2725         } else if (cmd_match(buf, "-failfast")) {
2726                 clear_bit(FailFast, &rdev->flags);
2727                 err = 0;
2728         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
2729                    !test_bit(Journal, &rdev->flags)) {
2730                 if (rdev->mddev->pers == NULL) {
2731                         clear_bit(In_sync, &rdev->flags);
2732                         rdev->saved_raid_disk = rdev->raid_disk;
2733                         rdev->raid_disk = -1;
2734                         err = 0;
2735                 }
2736         } else if (cmd_match(buf, "write_error")) {
2737                 set_bit(WriteErrorSeen, &rdev->flags);
2738                 err = 0;
2739         } else if (cmd_match(buf, "-write_error")) {
2740                 clear_bit(WriteErrorSeen, &rdev->flags);
2741                 err = 0;
2742         } else if (cmd_match(buf, "want_replacement")) {
2743                 /* Any non-spare device that is not a replacement can
2744                  * become want_replacement at any time, but we then need to
2745                  * check if recovery is needed.
2746                  */
2747                 if (rdev->raid_disk >= 0 &&
2748                     !test_bit(Journal, &rdev->flags) &&
2749                     !test_bit(Replacement, &rdev->flags))
2750                         set_bit(WantReplacement, &rdev->flags);
2751                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2752                 md_wakeup_thread(rdev->mddev->thread);
2753                 err = 0;
2754         } else if (cmd_match(buf, "-want_replacement")) {
2755                 /* Clearing 'want_replacement' is always allowed.
2756                  * Once replacements starts it is too late though.
2757                  */
2758                 err = 0;
2759                 clear_bit(WantReplacement, &rdev->flags);
2760         } else if (cmd_match(buf, "replacement")) {
2761                 /* Can only set a device as a replacement when array has not
2762                  * yet been started.  Once running, replacement is automatic
2763                  * from spares, or by assigning 'slot'.
2764                  */
2765                 if (rdev->mddev->pers)
2766                         err = -EBUSY;
2767                 else {
2768                         set_bit(Replacement, &rdev->flags);
2769                         err = 0;
2770                 }
2771         } else if (cmd_match(buf, "-replacement")) {
2772                 /* Similarly, can only clear Replacement before start */
2773                 if (rdev->mddev->pers)
2774                         err = -EBUSY;
2775                 else {
2776                         clear_bit(Replacement, &rdev->flags);
2777                         err = 0;
2778                 }
2779         } else if (cmd_match(buf, "re-add")) {
2780                 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
2781                         /* clear_bit is performed _after_ all the devices
2782                          * have their local Faulty bit cleared. If any writes
2783                          * happen in the meantime in the local node, they
2784                          * will land in the local bitmap, which will be synced
2785                          * by this node eventually
2786                          */
2787                         if (!mddev_is_clustered(rdev->mddev) ||
2788                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
2789                                 clear_bit(Faulty, &rdev->flags);
2790                                 err = add_bound_rdev(rdev);
2791                         }
2792                 } else
2793                         err = -EBUSY;
2794         } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
2795                 set_bit(ExternalBbl, &rdev->flags);
2796                 rdev->badblocks.shift = 0;
2797                 err = 0;
2798         } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
2799                 clear_bit(ExternalBbl, &rdev->flags);
2800                 err = 0;
2801         }
2802         if (!err)
2803                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2804         return err ? err : len;
2805 }
2806 static struct rdev_sysfs_entry rdev_state =
2807 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2808
2809 static ssize_t
2810 errors_show(struct md_rdev *rdev, char *page)
2811 {
2812         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2813 }
2814
2815 static ssize_t
2816 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2817 {
2818         unsigned int n;
2819         int rv;
2820
2821         rv = kstrtouint(buf, 10, &n);
2822         if (rv < 0)
2823                 return rv;
2824         atomic_set(&rdev->corrected_errors, n);
2825         return len;
2826 }
2827 static struct rdev_sysfs_entry rdev_errors =
2828 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2829
2830 static ssize_t
2831 slot_show(struct md_rdev *rdev, char *page)
2832 {
2833         if (test_bit(Journal, &rdev->flags))
2834                 return sprintf(page, "journal\n");
2835         else if (rdev->raid_disk < 0)
2836                 return sprintf(page, "none\n");
2837         else
2838                 return sprintf(page, "%d\n", rdev->raid_disk);
2839 }
2840
2841 static ssize_t
2842 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2843 {
2844         int slot;
2845         int err;
2846
2847         if (test_bit(Journal, &rdev->flags))
2848                 return -EBUSY;
2849         if (strncmp(buf, "none", 4)==0)
2850                 slot = -1;
2851         else {
2852                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
2853                 if (err < 0)
2854                         return err;
2855         }
2856         if (rdev->mddev->pers && slot == -1) {
2857                 /* Setting 'slot' on an active array requires also
2858                  * updating the 'rd%d' link, and communicating
2859                  * with the personality with ->hot_*_disk.
2860                  * For now we only support removing
2861                  * failed/spare devices.  This normally happens automatically,
2862                  * but not when the metadata is externally managed.
2863                  */
2864                 if (rdev->raid_disk == -1)
2865                         return -EEXIST;
2866                 /* personality does all needed checks */
2867                 if (rdev->mddev->pers->hot_remove_disk == NULL)
2868                         return -EINVAL;
2869                 clear_bit(Blocked, &rdev->flags);
2870                 remove_and_add_spares(rdev->mddev, rdev);
2871                 if (rdev->raid_disk >= 0)
2872                         return -EBUSY;
2873                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2874                 md_wakeup_thread(rdev->mddev->thread);
2875         } else if (rdev->mddev->pers) {
2876                 /* Activating a spare .. or possibly reactivating
2877                  * if we ever get bitmaps working here.
2878                  */
2879                 int err;
2880
2881                 if (rdev->raid_disk != -1)
2882                         return -EBUSY;
2883
2884                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2885                         return -EBUSY;
2886
2887                 if (rdev->mddev->pers->hot_add_disk == NULL)
2888                         return -EINVAL;
2889
2890                 if (slot >= rdev->mddev->raid_disks &&
2891                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2892                         return -ENOSPC;
2893
2894                 rdev->raid_disk = slot;
2895                 if (test_bit(In_sync, &rdev->flags))
2896                         rdev->saved_raid_disk = slot;
2897                 else
2898                         rdev->saved_raid_disk = -1;
2899                 clear_bit(In_sync, &rdev->flags);
2900                 clear_bit(Bitmap_sync, &rdev->flags);
2901                 err = rdev->mddev->pers->
2902                         hot_add_disk(rdev->mddev, rdev);
2903                 if (err) {
2904                         rdev->raid_disk = -1;
2905                         return err;
2906                 } else
2907                         sysfs_notify_dirent_safe(rdev->sysfs_state);
2908                 if (sysfs_link_rdev(rdev->mddev, rdev))
2909                         /* failure here is OK */;
2910                 /* don't wakeup anyone, leave that to userspace. */
2911         } else {
2912                 if (slot >= rdev->mddev->raid_disks &&
2913                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2914                         return -ENOSPC;
2915                 rdev->raid_disk = slot;
2916                 /* assume it is working */
2917                 clear_bit(Faulty, &rdev->flags);
2918                 clear_bit(WriteMostly, &rdev->flags);
2919                 set_bit(In_sync, &rdev->flags);
2920                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2921         }
2922         return len;
2923 }
2924
2925 static struct rdev_sysfs_entry rdev_slot =
2926 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2927
2928 static ssize_t
2929 offset_show(struct md_rdev *rdev, char *page)
2930 {
2931         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2932 }
2933
2934 static ssize_t
2935 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2936 {
2937         unsigned long long offset;
2938         if (kstrtoull(buf, 10, &offset) < 0)
2939                 return -EINVAL;
2940         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2941                 return -EBUSY;
2942         if (rdev->sectors && rdev->mddev->external)
2943                 /* Must set offset before size, so overlap checks
2944                  * can be sane */
2945                 return -EBUSY;
2946         rdev->data_offset = offset;
2947         rdev->new_data_offset = offset;
2948         return len;
2949 }
2950
2951 static struct rdev_sysfs_entry rdev_offset =
2952 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2953
2954 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2955 {
2956         return sprintf(page, "%llu\n",
2957                        (unsigned long long)rdev->new_data_offset);
2958 }
2959
2960 static ssize_t new_offset_store(struct md_rdev *rdev,
2961                                 const char *buf, size_t len)
2962 {
2963         unsigned long long new_offset;
2964         struct mddev *mddev = rdev->mddev;
2965
2966         if (kstrtoull(buf, 10, &new_offset) < 0)
2967                 return -EINVAL;
2968
2969         if (mddev->sync_thread ||
2970             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2971                 return -EBUSY;
2972         if (new_offset == rdev->data_offset)
2973                 /* reset is always permitted */
2974                 ;
2975         else if (new_offset > rdev->data_offset) {
2976                 /* must not push array size beyond rdev_sectors */
2977                 if (new_offset - rdev->data_offset
2978                     + mddev->dev_sectors > rdev->sectors)
2979                                 return -E2BIG;
2980         }
2981         /* Metadata worries about other space details. */
2982
2983         /* decreasing the offset is inconsistent with a backwards
2984          * reshape.
2985          */
2986         if (new_offset < rdev->data_offset &&
2987             mddev->reshape_backwards)
2988                 return -EINVAL;
2989         /* Increasing offset is inconsistent with forwards
2990          * reshape.  reshape_direction should be set to
2991          * 'backwards' first.
2992          */
2993         if (new_offset > rdev->data_offset &&
2994             !mddev->reshape_backwards)
2995                 return -EINVAL;
2996
2997         if (mddev->pers && mddev->persistent &&
2998             !super_types[mddev->major_version]
2999             .allow_new_offset(rdev, new_offset))
3000                 return -E2BIG;
3001         rdev->new_data_offset = new_offset;
3002         if (new_offset > rdev->data_offset)
3003                 mddev->reshape_backwards = 1;
3004         else if (new_offset < rdev->data_offset)
3005                 mddev->reshape_backwards = 0;
3006
3007         return len;
3008 }
3009 static struct rdev_sysfs_entry rdev_new_offset =
3010 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3011
3012 static ssize_t
3013 rdev_size_show(struct md_rdev *rdev, char *page)
3014 {
3015         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3016 }
3017
3018 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3019 {
3020         /* check if two start/length pairs overlap */
3021         if (s1+l1 <= s2)
3022                 return 0;
3023         if (s2+l2 <= s1)
3024                 return 0;
3025         return 1;
3026 }
3027
3028 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3029 {
3030         unsigned long long blocks;
3031         sector_t new;
3032
3033         if (kstrtoull(buf, 10, &blocks) < 0)
3034                 return -EINVAL;
3035
3036         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3037                 return -EINVAL; /* sector conversion overflow */
3038
3039         new = blocks * 2;
3040         if (new != blocks * 2)
3041                 return -EINVAL; /* unsigned long long to sector_t overflow */
3042
3043         *sectors = new;
3044         return 0;
3045 }
3046
3047 static ssize_t
3048 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3049 {
3050         struct mddev *my_mddev = rdev->mddev;
3051         sector_t oldsectors = rdev->sectors;
3052         sector_t sectors;
3053
3054         if (test_bit(Journal, &rdev->flags))
3055                 return -EBUSY;
3056         if (strict_blocks_to_sectors(buf, &sectors) < 0)
3057                 return -EINVAL;
3058         if (rdev->data_offset != rdev->new_data_offset)
3059                 return -EINVAL; /* too confusing */
3060         if (my_mddev->pers && rdev->raid_disk >= 0) {
3061                 if (my_mddev->persistent) {
3062                         sectors = super_types[my_mddev->major_version].
3063                                 rdev_size_change(rdev, sectors);
3064                         if (!sectors)
3065                                 return -EBUSY;
3066                 } else if (!sectors)
3067                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
3068                                 rdev->data_offset;
3069                 if (!my_mddev->pers->resize)
3070                         /* Cannot change size for RAID0 or Linear etc */
3071                         return -EINVAL;
3072         }
3073         if (sectors < my_mddev->dev_sectors)
3074                 return -EINVAL; /* component must fit device */
3075
3076         rdev->sectors = sectors;
3077         if (sectors > oldsectors && my_mddev->external) {
3078                 /* Need to check that all other rdevs with the same
3079                  * ->bdev do not overlap.  'rcu' is sufficient to walk
3080                  * the rdev lists safely.
3081                  * This check does not provide a hard guarantee, it
3082                  * just helps avoid dangerous mistakes.
3083                  */
3084                 struct mddev *mddev;
3085                 int overlap = 0;
3086                 struct list_head *tmp;
3087
3088                 rcu_read_lock();
3089                 for_each_mddev(mddev, tmp) {
3090                         struct md_rdev *rdev2;
3091
3092                         rdev_for_each(rdev2, mddev)
3093                                 if (rdev->bdev == rdev2->bdev &&
3094                                     rdev != rdev2 &&
3095                                     overlaps(rdev->data_offset, rdev->sectors,
3096                                              rdev2->data_offset,
3097                                              rdev2->sectors)) {
3098                                         overlap = 1;
3099                                         break;
3100                                 }
3101                         if (overlap) {
3102                                 mddev_put(mddev);
3103                                 break;
3104                         }
3105                 }
3106                 rcu_read_unlock();
3107                 if (overlap) {
3108                         /* Someone else could have slipped in a size
3109                          * change here, but doing so is just silly.
3110                          * We put oldsectors back because we *know* it is
3111                          * safe, and trust userspace not to race with
3112                          * itself
3113                          */
3114                         rdev->sectors = oldsectors;
3115                         return -EBUSY;
3116                 }
3117         }
3118         return len;
3119 }
3120
3121 static struct rdev_sysfs_entry rdev_size =
3122 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3123
3124 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3125 {
3126         unsigned long long recovery_start = rdev->recovery_offset;
3127
3128         if (test_bit(In_sync, &rdev->flags) ||
3129             recovery_start == MaxSector)
3130                 return sprintf(page, "none\n");
3131
3132         return sprintf(page, "%llu\n", recovery_start);
3133 }
3134
3135 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3136 {
3137         unsigned long long recovery_start;
3138
3139         if (cmd_match(buf, "none"))
3140                 recovery_start = MaxSector;
3141         else if (kstrtoull(buf, 10, &recovery_start))
3142                 return -EINVAL;
3143
3144         if (rdev->mddev->pers &&
3145             rdev->raid_disk >= 0)
3146                 return -EBUSY;
3147
3148         rdev->recovery_offset = recovery_start;
3149         if (recovery_start == MaxSector)
3150                 set_bit(In_sync, &rdev->flags);
3151         else
3152                 clear_bit(In_sync, &rdev->flags);
3153         return len;
3154 }
3155
3156 static struct rdev_sysfs_entry rdev_recovery_start =
3157 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3158
3159 /* sysfs access to bad-blocks list.
3160  * We present two files.
3161  * 'bad-blocks' lists sector numbers and lengths of ranges that
3162  *    are recorded as bad.  The list is truncated to fit within
3163  *    the one-page limit of sysfs.
3164  *    Writing "sector length" to this file adds an acknowledged
3165  *    bad block list.
3166  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3167  *    been acknowledged.  Writing to this file adds bad blocks
3168  *    without acknowledging them.  This is largely for testing.
3169  */
3170 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3171 {
3172         return badblocks_show(&rdev->badblocks, page, 0);
3173 }
3174 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3175 {
3176         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3177         /* Maybe that ack was all we needed */
3178         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3179                 wake_up(&rdev->blocked_wait);
3180         return rv;
3181 }
3182 static struct rdev_sysfs_entry rdev_bad_blocks =
3183 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3184
3185 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3186 {
3187         return badblocks_show(&rdev->badblocks, page, 1);
3188 }
3189 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3190 {
3191         return badblocks_store(&rdev->badblocks, page, len, 1);
3192 }
3193 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3194 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3195
3196 static ssize_t
3197 ppl_sector_show(struct md_rdev *rdev, char *page)
3198 {
3199         return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3200 }
3201
3202 static ssize_t
3203 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3204 {
3205         unsigned long long sector;
3206
3207         if (kstrtoull(buf, 10, &sector) < 0)
3208                 return -EINVAL;
3209         if (sector != (sector_t)sector)
3210                 return -EINVAL;
3211
3212         if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3213             rdev->raid_disk >= 0)
3214                 return -EBUSY;
3215
3216         if (rdev->mddev->persistent) {
3217                 if (rdev->mddev->major_version == 0)
3218                         return -EINVAL;
3219                 if ((sector > rdev->sb_start &&
3220                      sector - rdev->sb_start > S16_MAX) ||
3221                     (sector < rdev->sb_start &&
3222                      rdev->sb_start - sector > -S16_MIN))
3223                         return -EINVAL;
3224                 rdev->ppl.offset = sector - rdev->sb_start;
3225         } else if (!rdev->mddev->external) {
3226                 return -EBUSY;
3227         }
3228         rdev->ppl.sector = sector;
3229         return len;
3230 }
3231
3232 static struct rdev_sysfs_entry rdev_ppl_sector =
3233 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3234
3235 static ssize_t
3236 ppl_size_show(struct md_rdev *rdev, char *page)
3237 {
3238         return sprintf(page, "%u\n", rdev->ppl.size);
3239 }
3240
3241 static ssize_t
3242 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3243 {
3244         unsigned int size;
3245
3246         if (kstrtouint(buf, 10, &size) < 0)
3247                 return -EINVAL;
3248
3249         if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3250             rdev->raid_disk >= 0)
3251                 return -EBUSY;
3252
3253         if (rdev->mddev->persistent) {
3254                 if (rdev->mddev->major_version == 0)
3255                         return -EINVAL;
3256                 if (size > U16_MAX)
3257                         return -EINVAL;
3258         } else if (!rdev->mddev->external) {
3259                 return -EBUSY;
3260         }
3261         rdev->ppl.size = size;
3262         return len;
3263 }
3264
3265 static struct rdev_sysfs_entry rdev_ppl_size =
3266 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3267
3268 static struct attribute *rdev_default_attrs[] = {
3269         &rdev_state.attr,
3270         &rdev_errors.attr,
3271         &rdev_slot.attr,
3272         &rdev_offset.attr,
3273         &rdev_new_offset.attr,
3274         &rdev_size.attr,
3275         &rdev_recovery_start.attr,
3276         &rdev_bad_blocks.attr,
3277         &rdev_unack_bad_blocks.attr,
3278         &rdev_ppl_sector.attr,
3279         &rdev_ppl_size.attr,
3280         NULL,
3281 };
3282 static ssize_t
3283 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3284 {
3285         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3286         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3287
3288         if (!entry->show)
3289                 return -EIO;
3290         if (!rdev->mddev)
3291                 return -EBUSY;
3292         return entry->show(rdev, page);
3293 }
3294
3295 static ssize_t
3296 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3297               const char *page, size_t length)
3298 {
3299         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3300         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3301         ssize_t rv;
3302         struct mddev *mddev = rdev->mddev;
3303
3304         if (!entry->store)
3305                 return -EIO;
3306         if (!capable(CAP_SYS_ADMIN))
3307                 return -EACCES;
3308         rv = mddev ? mddev_lock(mddev): -EBUSY;
3309         if (!rv) {
3310                 if (rdev->mddev == NULL)
3311                         rv = -EBUSY;
3312                 else
3313                         rv = entry->store(rdev, page, length);
3314                 mddev_unlock(mddev);
3315         }
3316         return rv;
3317 }
3318
3319 static void rdev_free(struct kobject *ko)
3320 {
3321         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3322         kfree(rdev);
3323 }
3324 static const struct sysfs_ops rdev_sysfs_ops = {
3325         .show           = rdev_attr_show,
3326         .store          = rdev_attr_store,
3327 };
3328 static struct kobj_type rdev_ktype = {
3329         .release        = rdev_free,
3330         .sysfs_ops      = &rdev_sysfs_ops,
3331         .default_attrs  = rdev_default_attrs,
3332 };
3333
3334 int md_rdev_init(struct md_rdev *rdev)
3335 {
3336         rdev->desc_nr = -1;
3337         rdev->saved_raid_disk = -1;
3338         rdev->raid_disk = -1;
3339         rdev->flags = 0;
3340         rdev->data_offset = 0;
3341         rdev->new_data_offset = 0;
3342         rdev->sb_events = 0;
3343         rdev->last_read_error = 0;
3344         rdev->sb_loaded = 0;
3345         rdev->bb_page = NULL;
3346         atomic_set(&rdev->nr_pending, 0);
3347         atomic_set(&rdev->read_errors, 0);
3348         atomic_set(&rdev->corrected_errors, 0);
3349
3350         INIT_LIST_HEAD(&rdev->same_set);
3351         init_waitqueue_head(&rdev->blocked_wait);
3352
3353         /* Add space to store bad block list.
3354          * This reserves the space even on arrays where it cannot
3355          * be used - I wonder if that matters
3356          */
3357         return badblocks_init(&rdev->badblocks, 0);
3358 }
3359 EXPORT_SYMBOL_GPL(md_rdev_init);
3360 /*
3361  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3362  *
3363  * mark the device faulty if:
3364  *
3365  *   - the device is nonexistent (zero size)
3366  *   - the device has no valid superblock
3367  *
3368  * a faulty rdev _never_ has rdev->sb set.
3369  */
3370 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3371 {
3372         char b[BDEVNAME_SIZE];
3373         int err;
3374         struct md_rdev *rdev;
3375         sector_t size;
3376
3377         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3378         if (!rdev)
3379                 return ERR_PTR(-ENOMEM);
3380
3381         err = md_rdev_init(rdev);
3382         if (err)
3383                 goto abort_free;
3384         err = alloc_disk_sb(rdev);
3385         if (err)
3386                 goto abort_free;
3387
3388         err = lock_rdev(rdev, newdev, super_format == -2);
3389         if (err)
3390                 goto abort_free;
3391
3392         kobject_init(&rdev->kobj, &rdev_ktype);
3393
3394         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3395         if (!size) {
3396                 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
3397                         bdevname(rdev->bdev,b));
3398                 err = -EINVAL;
3399                 goto abort_free;
3400         }
3401
3402         if (super_format >= 0) {
3403                 err = super_types[super_format].
3404                         load_super(rdev, NULL, super_minor);
3405                 if (err == -EINVAL) {
3406                         pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
3407                                 bdevname(rdev->bdev,b),
3408                                 super_format, super_minor);
3409                         goto abort_free;
3410                 }
3411                 if (err < 0) {
3412                         pr_warn("md: could not read %s's sb, not importing!\n",
3413                                 bdevname(rdev->bdev,b));
3414                         goto abort_free;
3415                 }
3416         }
3417
3418         return rdev;
3419
3420 abort_free:
3421         if (rdev->bdev)
3422                 unlock_rdev(rdev);
3423         md_rdev_clear(rdev);
3424         kfree(rdev);
3425         return ERR_PTR(err);
3426 }
3427
3428 /*
3429  * Check a full RAID array for plausibility
3430  */
3431
3432 static void analyze_sbs(struct mddev *mddev)
3433 {
3434         int i;
3435         struct md_rdev *rdev, *freshest, *tmp;
3436         char b[BDEVNAME_SIZE];
3437
3438         freshest = NULL;
3439         rdev_for_each_safe(rdev, tmp, mddev)
3440                 switch (super_types[mddev->major_version].
3441                         load_super(rdev, freshest, mddev->minor_version)) {
3442                 case 1:
3443                         freshest = rdev;
3444                         break;
3445                 case 0:
3446                         break;
3447                 default:
3448                         pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
3449                                 bdevname(rdev->bdev,b));
3450                         md_kick_rdev_from_array(rdev);
3451                 }
3452
3453         super_types[mddev->major_version].
3454                 validate_super(mddev, freshest);
3455
3456         i = 0;
3457         rdev_for_each_safe(rdev, tmp, mddev) {
3458                 if (mddev->max_disks &&
3459                     (rdev->desc_nr >= mddev->max_disks ||
3460                      i > mddev->max_disks)) {
3461                         pr_warn("md: %s: %s: only %d devices permitted\n",
3462                                 mdname(mddev), bdevname(rdev->bdev, b),
3463                                 mddev->max_disks);
3464                         md_kick_rdev_from_array(rdev);
3465                         continue;
3466                 }
3467                 if (rdev != freshest) {
3468                         if (super_types[mddev->major_version].
3469                             validate_super(mddev, rdev)) {
3470                                 pr_warn("md: kicking non-fresh %s from array!\n",
3471                                         bdevname(rdev->bdev,b));
3472                                 md_kick_rdev_from_array(rdev);
3473                                 continue;
3474                         }
3475                 }
3476                 if (mddev->level == LEVEL_MULTIPATH) {
3477                         rdev->desc_nr = i++;
3478                         rdev->raid_disk = rdev->desc_nr;
3479                         set_bit(In_sync, &rdev->flags);
3480                 } else if (rdev->raid_disk >=
3481                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3482                            !test_bit(Journal, &rdev->flags)) {
3483                         rdev->raid_disk = -1;
3484                         clear_bit(In_sync, &rdev->flags);
3485                 }
3486         }
3487 }
3488
3489 /* Read a fixed-point number.
3490  * Numbers in sysfs attributes should be in "standard" units where
3491  * possible, so time should be in seconds.
3492  * However we internally use a a much smaller unit such as
3493  * milliseconds or jiffies.
3494  * This function takes a decimal number with a possible fractional
3495  * component, and produces an integer which is the result of
3496  * multiplying that number by 10^'scale'.
3497  * all without any floating-point arithmetic.
3498  */
3499 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3500 {
3501         unsigned long result = 0;
3502         long decimals = -1;
3503         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3504                 if (*cp == '.')
3505                         decimals = 0;
3506                 else if (decimals < scale) {
3507                         unsigned int value;
3508                         value = *cp - '0';
3509                         result = result * 10 + value;
3510                         if (decimals >= 0)
3511                                 decimals++;
3512                 }
3513                 cp++;
3514         }
3515         if (*cp == '\n')
3516                 cp++;
3517         if (*cp)
3518                 return -EINVAL;
3519         if (decimals < 0)
3520                 decimals = 0;
3521         while (decimals < scale) {
3522                 result *= 10;
3523                 decimals ++;
3524         }
3525         *res = result;
3526         return 0;
3527 }
3528
3529 static ssize_t
3530 safe_delay_show(struct mddev *mddev, char *page)
3531 {
3532         int msec = (mddev->safemode_delay*1000)/HZ;
3533         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3534 }
3535 static ssize_t
3536 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3537 {
3538         unsigned long msec;
3539
3540         if (mddev_is_clustered(mddev)) {
3541                 pr_warn("md: Safemode is disabled for clustered mode\n");
3542                 return -EINVAL;
3543         }
3544
3545         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3546                 return -EINVAL;
3547         if (msec == 0)
3548                 mddev->safemode_delay = 0;
3549         else {
3550                 unsigned long old_delay = mddev->safemode_delay;
3551                 unsigned long new_delay = (msec*HZ)/1000;
3552
3553                 if (new_delay == 0)
3554                         new_delay = 1;
3555                 mddev->safemode_delay = new_delay;
3556                 if (new_delay < old_delay || old_delay == 0)
3557                         mod_timer(&mddev->safemode_timer, jiffies+1);
3558         }
3559         return len;
3560 }
3561 static struct md_sysfs_entry md_safe_delay =
3562 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3563
3564 static ssize_t
3565 level_show(struct mddev *mddev, char *page)
3566 {
3567         struct md_personality *p;
3568         int ret;
3569         spin_lock(&mddev->lock);
3570         p = mddev->pers;
3571         if (p)
3572                 ret = sprintf(page, "%s\n", p->name);
3573         else if (mddev->clevel[0])
3574                 ret = sprintf(page, "%s\n", mddev->clevel);
3575         else if (mddev->level != LEVEL_NONE)
3576                 ret = sprintf(page, "%d\n", mddev->level);
3577         else
3578                 ret = 0;
3579         spin_unlock(&mddev->lock);
3580         return ret;
3581 }
3582
3583 static ssize_t
3584 level_store(struct mddev *mddev, const char *buf, size_t len)
3585 {
3586         char clevel[16];
3587         ssize_t rv;
3588         size_t slen = len;
3589         struct md_personality *pers, *oldpers;
3590         long level;
3591         void *priv, *oldpriv;
3592         struct md_rdev *rdev;
3593
3594         if (slen == 0 || slen >= sizeof(clevel))
3595                 return -EINVAL;
3596
3597         rv = mddev_lock(mddev);
3598         if (rv)
3599                 return rv;
3600
3601         if (mddev->pers == NULL) {
3602                 strncpy(mddev->clevel, buf, slen);
3603                 if (mddev->clevel[slen-1] == '\n')
3604                         slen--;
3605                 mddev->clevel[slen] = 0;
3606                 mddev->level = LEVEL_NONE;
3607                 rv = len;
3608                 goto out_unlock;
3609         }
3610         rv = -EROFS;
3611         if (mddev->ro)
3612                 goto out_unlock;
3613
3614         /* request to change the personality.  Need to ensure:
3615          *  - array is not engaged in resync/recovery/reshape
3616          *  - old personality can be suspended
3617          *  - new personality will access other array.
3618          */
3619
3620         rv = -EBUSY;
3621         if (mddev->sync_thread ||
3622             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3623             mddev->reshape_position != MaxSector ||
3624             mddev->sysfs_active)
3625                 goto out_unlock;
3626
3627         rv = -EINVAL;
3628         if (!mddev->pers->quiesce) {
3629                 pr_warn("md: %s: %s does not support online personality change\n",
3630                         mdname(mddev), mddev->pers->name);
3631                 goto out_unlock;
3632         }
3633
3634         /* Now find the new personality */
3635         strncpy(clevel, buf, slen);
3636         if (clevel[slen-1] == '\n')
3637                 slen--;
3638         clevel[slen] = 0;
3639         if (kstrtol(clevel, 10, &level))
3640                 level = LEVEL_NONE;
3641
3642         if (request_module("md-%s", clevel) != 0)
3643                 request_module("md-level-%s", clevel);
3644         spin_lock(&pers_lock);
3645         pers = find_pers(level, clevel);
3646         if (!pers || !try_module_get(pers->owner)) {
3647                 spin_unlock(&pers_lock);
3648                 pr_warn("md: personality %s not loaded\n", clevel);
3649                 rv = -EINVAL;
3650                 goto out_unlock;
3651         }
3652         spin_unlock(&pers_lock);
3653
3654         if (pers == mddev->pers) {
3655                 /* Nothing to do! */
3656                 module_put(pers->owner);
3657                 rv = len;
3658                 goto out_unlock;
3659         }
3660         if (!pers->takeover) {
3661                 module_put(pers->owner);
3662                 pr_warn("md: %s: %s does not support personality takeover\n",
3663                         mdname(mddev), clevel);
3664                 rv = -EINVAL;
3665                 goto out_unlock;
3666         }
3667
3668         rdev_for_each(rdev, mddev)
3669                 rdev->new_raid_disk = rdev->raid_disk;
3670
3671         /* ->takeover must set new_* and/or delta_disks
3672          * if it succeeds, and may set them when it fails.
3673          */
3674         priv = pers->takeover(mddev);
3675         if (IS_ERR(priv)) {
3676                 mddev->new_level = mddev->level;
3677                 mddev->new_layout = mddev->layout;
3678                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3679                 mddev->raid_disks -= mddev->delta_disks;
3680                 mddev->delta_disks = 0;
3681                 mddev->reshape_backwards = 0;
3682                 module_put(pers->owner);
3683                 pr_warn("md: %s: %s would not accept array\n",
3684                         mdname(mddev), clevel);
3685                 rv = PTR_ERR(priv);
3686                 goto out_unlock;
3687         }
3688
3689         /* Looks like we have a winner */
3690         mddev_suspend(mddev);
3691         mddev_detach(mddev);
3692
3693         spin_lock(&mddev->lock);
3694         oldpers = mddev->pers;
3695         oldpriv = mddev->private;
3696         mddev->pers = pers;
3697         mddev->private = priv;
3698         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3699         mddev->level = mddev->new_level;
3700         mddev->layout = mddev->new_layout;
3701         mddev->chunk_sectors = mddev->new_chunk_sectors;
3702         mddev->delta_disks = 0;
3703         mddev->reshape_backwards = 0;
3704         mddev->degraded = 0;
3705         spin_unlock(&mddev->lock);
3706
3707         if (oldpers->sync_request == NULL &&
3708             mddev->external) {
3709                 /* We are converting from a no-redundancy array
3710                  * to a redundancy array and metadata is managed
3711                  * externally so we need to be sure that writes
3712                  * won't block due to a need to transition
3713                  *      clean->dirty
3714                  * until external management is started.
3715                  */
3716                 mddev->in_sync = 0;
3717                 mddev->safemode_delay = 0;
3718                 mddev->safemode = 0;
3719         }
3720
3721         oldpers->free(mddev, oldpriv);
3722
3723         if (oldpers->sync_request == NULL &&
3724             pers->sync_request != NULL) {
3725                 /* need to add the md_redundancy_group */
3726                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3727                         pr_warn("md: cannot register extra attributes for %s\n",
3728                                 mdname(mddev));
3729                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3730         }
3731         if (oldpers->sync_request != NULL &&
3732             pers->sync_request == NULL) {
3733                 /* need to remove the md_redundancy_group */
3734                 if (mddev->to_remove == NULL)
3735                         mddev->to_remove = &md_redundancy_group;
3736         }
3737
3738         module_put(oldpers->owner);
3739
3740         rdev_for_each(rdev, mddev) {
3741                 if (rdev->raid_disk < 0)
3742                         continue;
3743                 if (rdev->new_raid_disk >= mddev->raid_disks)
3744                         rdev->new_raid_disk = -1;
3745                 if (rdev->new_raid_disk == rdev->raid_disk)
3746                         continue;
3747                 sysfs_unlink_rdev(mddev, rdev);
3748         }
3749         rdev_for_each(rdev, mddev) {
3750                 if (rdev->raid_disk < 0)
3751                         continue;
3752                 if (rdev->new_raid_disk == rdev->raid_disk)
3753                         continue;
3754                 rdev->raid_disk = rdev->new_raid_disk;
3755                 if (rdev->raid_disk < 0)
3756                         clear_bit(In_sync, &rdev->flags);
3757                 else {
3758                         if (sysfs_link_rdev(mddev, rdev))
3759                                 pr_warn("md: cannot register rd%d for %s after level change\n",
3760                                         rdev->raid_disk, mdname(mddev));
3761                 }
3762         }
3763
3764         if (pers->sync_request == NULL) {
3765                 /* this is now an array without redundancy, so
3766                  * it must always be in_sync
3767                  */
3768                 mddev->in_sync = 1;
3769                 del_timer_sync(&mddev->safemode_timer);
3770         }
3771         blk_set_stacking_limits(&mddev->queue->limits);
3772         pers->run(mddev);
3773         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3774         mddev_resume(mddev);
3775         if (!mddev->thread)
3776                 md_update_sb(mddev, 1);
3777         sysfs_notify(&mddev->kobj, NULL, "level");
3778         md_new_event(mddev);
3779         rv = len;
3780 out_unlock:
3781         mddev_unlock(mddev);
3782         return rv;
3783 }
3784
3785 static struct md_sysfs_entry md_level =
3786 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3787
3788 static ssize_t
3789 layout_show(struct mddev *mddev, char *page)
3790 {
3791         /* just a number, not meaningful for all levels */
3792         if (mddev->reshape_position != MaxSector &&
3793             mddev->layout != mddev->new_layout)
3794                 return sprintf(page, "%d (%d)\n",
3795                                mddev->new_layout, mddev->layout);
3796         return sprintf(page, "%d\n", mddev->layout);
3797 }
3798
3799 static ssize_t
3800 layout_store(struct mddev *mddev, const char *buf, size_t len)
3801 {
3802         unsigned int n;
3803         int err;
3804
3805         err = kstrtouint(buf, 10, &n);
3806         if (err < 0)
3807                 return err;
3808         err = mddev_lock(mddev);
3809         if (err)
3810                 return err;
3811
3812         if (mddev->pers) {
3813                 if (mddev->pers->check_reshape == NULL)
3814                         err = -EBUSY;
3815                 else if (mddev->ro)
3816                         err = -EROFS;
3817                 else {
3818                         mddev->new_layout = n;
3819                         err = mddev->pers->check_reshape(mddev);
3820                         if (err)
3821                                 mddev->new_layout = mddev->layout;
3822                 }
3823         } else {
3824                 mddev->new_layout = n;
3825                 if (mddev->reshape_position == MaxSector)
3826                         mddev->layout = n;
3827         }
3828         mddev_unlock(mddev);
3829         return err ?: len;
3830 }
3831 static struct md_sysfs_entry md_layout =
3832 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3833
3834 static ssize_t
3835 raid_disks_show(struct mddev *mddev, char *page)
3836 {
3837         if (mddev->raid_disks == 0)
3838                 return 0;
3839         if (mddev->reshape_position != MaxSector &&
3840             mddev->delta_disks != 0)
3841                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3842                                mddev->raid_disks - mddev->delta_disks);
3843         return sprintf(page, "%d\n", mddev->raid_disks);
3844 }
3845
3846 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3847
3848 static ssize_t
3849 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3850 {
3851         unsigned int n;
3852         int err;
3853
3854         err = kstrtouint(buf, 10, &n);
3855         if (err < 0)
3856                 return err;
3857
3858         err = mddev_lock(mddev);
3859         if (err)
3860                 return err;
3861         if (mddev->pers)
3862                 err = update_raid_disks(mddev, n);
3863         else if (mddev->reshape_position != MaxSector) {
3864                 struct md_rdev *rdev;
3865                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3866
3867                 err = -EINVAL;
3868                 rdev_for_each(rdev, mddev) {
3869                         if (olddisks < n &&
3870                             rdev->data_offset < rdev->new_data_offset)
3871                                 goto out_unlock;
3872                         if (olddisks > n &&
3873                             rdev->data_offset > rdev->new_data_offset)
3874                                 goto out_unlock;
3875                 }
3876                 err = 0;
3877                 mddev->delta_disks = n - olddisks;
3878                 mddev->raid_disks = n;
3879                 mddev->reshape_backwards = (mddev->delta_disks < 0);
3880         } else
3881                 mddev->raid_disks = n;
3882 out_unlock:
3883         mddev_unlock(mddev);
3884         return err ? err : len;
3885 }
3886 static struct md_sysfs_entry md_raid_disks =
3887 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3888
3889 static ssize_t
3890 chunk_size_show(struct mddev *mddev, char *page)
3891 {
3892         if (mddev->reshape_position != MaxSector &&
3893             mddev->chunk_sectors != mddev->new_chunk_sectors)
3894                 return sprintf(page, "%d (%d)\n",
3895                                mddev->new_chunk_sectors << 9,
3896                                mddev->chunk_sectors << 9);
3897         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3898 }
3899
3900 static ssize_t
3901 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3902 {
3903         unsigned long n;
3904         int err;
3905
3906         err = kstrtoul(buf, 10, &n);
3907         if (err < 0)
3908                 return err;
3909
3910         err = mddev_lock(mddev);
3911         if (err)
3912                 return err;
3913         if (mddev->pers) {
3914                 if (mddev->pers->check_reshape == NULL)
3915                         err = -EBUSY;
3916                 else if (mddev->ro)
3917                         err = -EROFS;
3918                 else {
3919                         mddev->new_chunk_sectors = n >> 9;
3920                         err = mddev->pers->check_reshape(mddev);
3921                         if (err)
3922                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3923                 }
3924         } else {
3925                 mddev->new_chunk_sectors = n >> 9;
3926                 if (mddev->reshape_position == MaxSector)
3927                         mddev->chunk_sectors = n >> 9;
3928         }
3929         mddev_unlock(mddev);
3930         return err ?: len;
3931 }
3932 static struct md_sysfs_entry md_chunk_size =
3933 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3934
3935 static ssize_t
3936 resync_start_show(struct mddev *mddev, char *page)
3937 {
3938         if (mddev->recovery_cp == MaxSector)
3939                 return sprintf(page, "none\n");
3940         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3941 }
3942
3943 static ssize_t
3944 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3945 {
3946         unsigned long long n;
3947         int err;
3948
3949         if (cmd_match(buf, "none"))
3950                 n = MaxSector;
3951         else {
3952                 err = kstrtoull(buf, 10, &n);
3953                 if (err < 0)
3954                         return err;
3955                 if (n != (sector_t)n)
3956                         return -EINVAL;
3957         }
3958
3959         err = mddev_lock(mddev);
3960         if (err)
3961                 return err;
3962         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3963                 err = -EBUSY;
3964
3965         if (!err) {
3966                 mddev->recovery_cp = n;
3967                 if (mddev->pers)
3968                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
3969         }
3970         mddev_unlock(mddev);
3971         return err ?: len;
3972 }
3973 static struct md_sysfs_entry md_resync_start =
3974 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3975                 resync_start_show, resync_start_store);
3976
3977 /*
3978  * The array state can be:
3979  *
3980  * clear
3981  *     No devices, no size, no level
3982  *     Equivalent to STOP_ARRAY ioctl
3983  * inactive
3984  *     May have some settings, but array is not active
3985  *        all IO results in error
3986  *     When written, doesn't tear down array, but just stops it
3987  * suspended (not supported yet)
3988  *     All IO requests will block. The array can be reconfigured.
3989  *     Writing this, if accepted, will block until array is quiescent
3990  * readonly
3991  *     no resync can happen.  no superblocks get written.
3992  *     write requests fail
3993  * read-auto
3994  *     like readonly, but behaves like 'clean' on a write request.
3995  *
3996  * clean - no pending writes, but otherwise active.
3997  *     When written to inactive array, starts without resync
3998  *     If a write request arrives then
3999  *       if metadata is known, mark 'dirty' and switch to 'active'.
4000  *       if not known, block and switch to write-pending
4001  *     If written to an active array that has pending writes, then fails.
4002  * active
4003  *     fully active: IO and resync can be happening.
4004  *     When written to inactive array, starts with resync
4005  *
4006  * write-pending
4007  *     clean, but writes are blocked waiting for 'active' to be written.
4008  *
4009  * active-idle
4010  *     like active, but no writes have been seen for a while (100msec).
4011  *
4012  */
4013 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
4014                    write_pending, active_idle, bad_word};
4015 static char *array_states[] = {
4016         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
4017         "write-pending", "active-idle", NULL };
4018
4019 static int match_word(const char *word, char **list)
4020 {
4021         int n;
4022         for (n=0; list[n]; n++)
4023                 if (cmd_match(word, list[n]))
4024                         break;
4025         return n;
4026 }
4027
4028 static ssize_t
4029 array_state_show(struct mddev *mddev, char *page)
4030 {
4031         enum array_state st = inactive;
4032
4033         if (mddev->pers)
4034                 switch(mddev->ro) {
4035                 case 1:
4036                         st = readonly;
4037                         break;
4038                 case 2:
4039                         st = read_auto;
4040                         break;
4041                 case 0:
4042                         spin_lock(&mddev->lock);
4043                         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
4044                                 st = write_pending;
4045                         else if (mddev->in_sync)
4046                                 st = clean;
4047                         else if (mddev->safemode)
4048                                 st = active_idle;
4049                         else
4050                                 st = active;
4051                         spin_unlock(&mddev->lock);
4052                 }
4053         else {
4054                 if (list_empty(&mddev->disks) &&
4055                     mddev->raid_disks == 0 &&
4056                     mddev->dev_sectors == 0)
4057                         st = clear;
4058                 else
4059                         st = inactive;
4060         }
4061         return sprintf(page, "%s\n", array_states[st]);
4062 }
4063
4064 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4065 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4066 static int do_md_run(struct mddev *mddev);
4067 static int restart_array(struct mddev *mddev);
4068
4069 static ssize_t
4070 array_state_store(struct mddev *mddev, const char *buf, size_t len)
4071 {
4072         int err = 0;
4073         enum array_state st = match_word(buf, array_states);
4074
4075         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4076                 /* don't take reconfig_mutex when toggling between
4077                  * clean and active
4078                  */
4079                 spin_lock(&mddev->lock);
4080                 if (st == active) {
4081                         restart_array(mddev);
4082                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4083                         md_wakeup_thread(mddev->thread);
4084                         wake_up(&mddev->sb_wait);
4085                 } else /* st == clean */ {
4086                         restart_array(mddev);
4087                         if (!set_in_sync(mddev))
4088                                 err = -EBUSY;
4089                 }
4090                 if (!err)
4091                         sysfs_notify_dirent_safe(mddev->sysfs_state);
4092                 spin_unlock(&mddev->lock);
4093                 return err ?: len;
4094         }
4095         err = mddev_lock(mddev);
4096         if (err)
4097                 return err;
4098         err = -EINVAL;
4099         switch(st) {
4100         case bad_word:
4101                 break;
4102         case clear:
4103                 /* stopping an active array */
4104                 err = do_md_stop(mddev, 0, NULL);
4105                 break;
4106         case inactive:
4107                 /* stopping an active array */
4108                 if (mddev->pers)
4109                         err = do_md_stop(mddev, 2, NULL);
4110                 else
4111                         err = 0; /* already inactive */
4112                 break;
4113         case suspended:
4114                 break; /* not supported yet */
4115         case readonly:
4116                 if (mddev->pers)
4117                         err = md_set_readonly(mddev, NULL);
4118                 else {
4119                         mddev->ro = 1;
4120                         set_disk_ro(mddev->gendisk, 1);
4121                         err = do_md_run(mddev);
4122                 }
4123                 break;
4124         case read_auto:
4125                 if (mddev->pers) {
4126                         if (mddev->ro == 0)
4127                                 err = md_set_readonly(mddev, NULL);
4128                         else if (mddev->ro == 1)
4129                                 err = restart_array(mddev);
4130                         if (err == 0) {
4131                                 mddev->ro = 2;
4132                                 set_disk_ro(mddev->gendisk, 0);
4133                         }
4134                 } else {
4135                         mddev->ro = 2;
4136                         err = do_md_run(mddev);
4137                 }
4138                 break;
4139         case clean:
4140                 if (mddev->pers) {
4141                         err = restart_array(mddev);
4142                         if (err)
4143                                 break;
4144                         spin_lock(&mddev->lock);
4145                         if (!set_in_sync(mddev))
4146                                 err = -EBUSY;
4147                         spin_unlock(&mddev->lock);
4148                 } else
4149                         err = -EINVAL;
4150                 break;
4151         case active:
4152                 if (mddev->pers) {
4153                         err = restart_array(mddev);
4154                         if (err)
4155                                 break;
4156                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4157                         wake_up(&mddev->sb_wait);
4158                         err = 0;
4159                 } else {
4160                         mddev->ro = 0;
4161                         set_disk_ro(mddev->gendisk, 0);
4162                         err = do_md_run(mddev);
4163                 }
4164                 break;
4165         case write_pending:
4166         case active_idle:
4167                 /* these cannot be set */
4168                 break;
4169         }
4170
4171         if (!err) {
4172                 if (mddev->hold_active == UNTIL_IOCTL)
4173                         mddev->hold_active = 0;
4174                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4175         }
4176         mddev_unlock(mddev);
4177         return err ?: len;
4178 }
4179 static struct md_sysfs_entry md_array_state =
4180 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4181
4182 static ssize_t
4183 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4184         return sprintf(page, "%d\n",
4185                        atomic_read(&mddev->max_corr_read_errors));
4186 }
4187
4188 static ssize_t
4189 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4190 {
4191         unsigned int n;
4192         int rv;
4193
4194         rv = kstrtouint(buf, 10, &n);
4195         if (rv < 0)
4196                 return rv;
4197         atomic_set(&mddev->max_corr_read_errors, n);
4198         return len;
4199 }
4200
4201 static struct md_sysfs_entry max_corr_read_errors =
4202 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4203         max_corrected_read_errors_store);
4204
4205 static ssize_t
4206 null_show(struct mddev *mddev, char *page)
4207 {
4208         return -EINVAL;
4209 }
4210
4211 static ssize_t
4212 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4213 {
4214         /* buf must be %d:%d\n? giving major and minor numbers */
4215         /* The new device is added to the array.
4216          * If the array has a persistent superblock, we read the
4217          * superblock to initialise info and check validity.
4218          * Otherwise, only checking done is that in bind_rdev_to_array,
4219          * which mainly checks size.
4220          */
4221         char *e;
4222         int major = simple_strtoul(buf, &e, 10);
4223         int minor;
4224         dev_t dev;
4225         struct md_rdev *rdev;
4226         int err;
4227
4228         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4229                 return -EINVAL;
4230         minor = simple_strtoul(e+1, &e, 10);
4231         if (*e && *e != '\n')
4232                 return -EINVAL;
4233         dev = MKDEV(major, minor);
4234         if (major != MAJOR(dev) ||
4235             minor != MINOR(dev))
4236                 return -EOVERFLOW;
4237
4238         flush_workqueue(md_misc_wq);
4239
4240         err = mddev_lock(mddev);
4241         if (err)
4242                 return err;
4243         if (mddev->persistent) {
4244                 rdev = md_import_device(dev, mddev->major_version,
4245                                         mddev->minor_version);
4246                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4247                         struct md_rdev *rdev0
4248                                 = list_entry(mddev->disks.next,
4249                                              struct md_rdev, same_set);
4250                         err = super_types[mddev->major_version]
4251                                 .load_super(rdev, rdev0, mddev->minor_version);
4252                         if (err < 0)
4253                                 goto out;
4254                 }
4255         } else if (mddev->external)
4256                 rdev = md_import_device(dev, -2, -1);
4257         else
4258                 rdev = md_import_device(dev, -1, -1);
4259
4260         if (IS_ERR(rdev)) {
4261                 mddev_unlock(mddev);
4262                 return PTR_ERR(rdev);
4263         }
4264         err = bind_rdev_to_array(rdev, mddev);
4265  out:
4266         if (err)
4267                 export_rdev(rdev);
4268         mddev_unlock(mddev);
4269         return err ? err : len;
4270 }
4271
4272 static struct md_sysfs_entry md_new_device =
4273 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4274
4275 static ssize_t
4276 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4277 {
4278         char *end;
4279         unsigned long chunk, end_chunk;
4280         int err;
4281
4282         err = mddev_lock(mddev);
4283         if (err)
4284                 return err;
4285         if (!mddev->bitmap)
4286                 goto out;
4287         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4288         while (*buf) {
4289                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4290                 if (buf == end) break;
4291                 if (*end == '-') { /* range */
4292                         buf = end + 1;
4293                         end_chunk = simple_strtoul(buf, &end, 0);
4294                         if (buf == end) break;
4295                 }
4296                 if (*end && !isspace(*end)) break;
4297                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4298                 buf = skip_spaces(end);
4299         }
4300         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4301 out:
4302         mddev_unlock(mddev);
4303         return len;
4304 }
4305
4306 static struct md_sysfs_entry md_bitmap =
4307 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4308
4309 static ssize_t
4310 size_show(struct mddev *mddev, char *page)
4311 {
4312         return sprintf(page, "%llu\n",
4313                 (unsigned long long)mddev->dev_sectors / 2);
4314 }
4315
4316 static int update_size(struct mddev *mddev, sector_t num_sectors);
4317
4318 static ssize_t
4319 size_store(struct mddev *mddev, const char *buf, size_t len)
4320 {
4321         /* If array is inactive, we can reduce the component size, but
4322          * not increase it (except from 0).
4323          * If array is active, we can try an on-line resize
4324          */
4325         sector_t sectors;
4326         int err = strict_blocks_to_sectors(buf, &sectors);
4327
4328         if (err < 0)
4329                 return err;
4330         err = mddev_lock(mddev);
4331         if (err)
4332                 return err;
4333         if (mddev->pers) {
4334                 err = update_size(mddev, sectors);
4335                 if (err == 0)
4336                         md_update_sb(mddev, 1);
4337         } else {
4338                 if (mddev->dev_sectors == 0 ||
4339                     mddev->dev_sectors > sectors)
4340                         mddev->dev_sectors = sectors;
4341                 else
4342                         err = -ENOSPC;
4343         }
4344         mddev_unlock(mddev);
4345         return err ? err : len;
4346 }
4347
4348 static struct md_sysfs_entry md_size =
4349 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4350
4351 /* Metadata version.
4352  * This is one of
4353  *   'none' for arrays with no metadata (good luck...)
4354  *   'external' for arrays with externally managed metadata,
4355  * or N.M for internally known formats
4356  */
4357 static ssize_t
4358 metadata_show(struct mddev *mddev, char *page)
4359 {
4360         if (mddev->persistent)
4361                 return sprintf(page, "%d.%d\n",
4362                                mddev->major_version, mddev->minor_version);
4363         else if (mddev->external)
4364                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4365         else
4366                 return sprintf(page, "none\n");
4367 }
4368
4369 static ssize_t
4370 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4371 {
4372         int major, minor;
4373         char *e;
4374         int err;
4375         /* Changing the details of 'external' metadata is
4376          * always permitted.  Otherwise there must be
4377          * no devices attached to the array.
4378          */
4379
4380         err = mddev_lock(mddev);
4381         if (err)
4382                 return err;
4383         err = -EBUSY;
4384         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4385                 ;
4386         else if (!list_empty(&mddev->disks))
4387                 goto out_unlock;
4388
4389         err = 0;
4390         if (cmd_match(buf, "none")) {
4391                 mddev->persistent = 0;
4392                 mddev->external = 0;
4393                 mddev->major_version = 0;
4394                 mddev->minor_version = 90;
4395                 goto out_unlock;
4396         }
4397         if (strncmp(buf, "external:", 9) == 0) {
4398                 size_t namelen = len-9;
4399                 if (namelen >= sizeof(mddev->metadata_type))
4400                         namelen = sizeof(mddev->metadata_type)-1;
4401                 strncpy(mddev->metadata_type, buf+9, namelen);
4402                 mddev->metadata_type[namelen] = 0;
4403                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4404                         mddev->metadata_type[--namelen] = 0;
4405                 mddev->persistent = 0;
4406                 mddev->external = 1;
4407                 mddev->major_version = 0;
4408                 mddev->minor_version = 90;
4409                 goto out_unlock;
4410         }
4411         major = simple_strtoul(buf, &e, 10);
4412         err = -EINVAL;
4413         if (e==buf || *e != '.')
4414                 goto out_unlock;
4415         buf = e+1;
4416         minor = simple_strtoul(buf, &e, 10);
4417         if (e==buf || (*e && *e != '\n') )
4418                 goto out_unlock;
4419         err = -ENOENT;
4420         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4421                 goto out_unlock;
4422         mddev->major_version = major;
4423         mddev->minor_version = minor;
4424         mddev->persistent = 1;
4425         mddev->external = 0;
4426         err = 0;
4427 out_unlock:
4428         mddev_unlock(mddev);
4429         return err ?: len;
4430 }
4431
4432 static struct md_sysfs_entry md_metadata =
4433 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4434
4435 static ssize_t
4436 action_show(struct mddev *mddev, char *page)
4437 {
4438         char *type = "idle";
4439         unsigned long recovery = mddev->recovery;
4440         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4441                 type = "frozen";
4442         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4443             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4444                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4445                         type = "reshape";
4446                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4447                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4448                                 type = "resync";
4449                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4450                                 type = "check";
4451                         else
4452                                 type = "repair";
4453                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4454                         type = "recover";
4455                 else if (mddev->reshape_position != MaxSector)
4456                         type = "reshape";
4457         }
4458         return sprintf(page, "%s\n", type);
4459 }
4460
4461 static ssize_t
4462 action_store(struct mddev *mddev, const char *page, size_t len)
4463 {
4464         if (!mddev->pers || !mddev->pers->sync_request)
4465                 return -EINVAL;
4466
4467
4468         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4469                 if (cmd_match(page, "frozen"))
4470                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4471                 else
4472                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4473                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4474                     mddev_lock(mddev) == 0) {
4475                         flush_workqueue(md_misc_wq);
4476                         if (mddev->sync_thread) {
4477                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4478                                 md_reap_sync_thread(mddev);
4479                         }
4480                         mddev_unlock(mddev);
4481                 }
4482         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4483                 return -EBUSY;
4484         else if (cmd_match(page, "resync"))
4485                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4486         else if (cmd_match(page, "recover")) {
4487                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4488                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4489         } else if (cmd_match(page, "reshape")) {
4490                 int err;
4491                 if (mddev->pers->start_reshape == NULL)
4492                         return -EINVAL;
4493                 err = mddev_lock(mddev);
4494                 if (!err) {
4495                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4496                                 err =  -EBUSY;
4497                         else {
4498                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4499                                 err = mddev->pers->start_reshape(mddev);
4500                         }
4501                         mddev_unlock(mddev);
4502                 }
4503                 if (err)
4504                         return err;
4505                 sysfs_notify(&mddev->kobj, NULL, "degraded");
4506         } else {
4507                 if (cmd_match(page, "check"))
4508                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4509                 else if (!cmd_match(page, "repair"))
4510                         return -EINVAL;
4511                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4512                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4513                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4514         }
4515         if (mddev->ro == 2) {
4516                 /* A write to sync_action is enough to justify
4517                  * canceling read-auto mode
4518                  */
4519                 mddev->ro = 0;
4520                 md_wakeup_thread(mddev->sync_thread);
4521         }
4522         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4523         md_wakeup_thread(mddev->thread);
4524         sysfs_notify_dirent_safe(mddev->sysfs_action);
4525         return len;
4526 }
4527
4528 static struct md_sysfs_entry md_scan_mode =
4529 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4530
4531 static ssize_t
4532 last_sync_action_show(struct mddev *mddev, char *page)
4533 {
4534         return sprintf(page, "%s\n", mddev->last_sync_action);
4535 }
4536
4537 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4538
4539 static ssize_t
4540 mismatch_cnt_show(struct mddev *mddev, char *page)
4541 {
4542         return sprintf(page, "%llu\n",
4543                        (unsigned long long)
4544                        atomic64_read(&mddev->resync_mismatches));
4545 }
4546
4547 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4548
4549 static ssize_t
4550 sync_min_show(struct mddev *mddev, char *page)
4551 {
4552         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4553                        mddev->sync_speed_min ? "local": "system");
4554 }
4555
4556 static ssize_t
4557 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4558 {
4559         unsigned int min;
4560         int rv;
4561
4562         if (strncmp(buf, "system", 6)==0) {
4563                 min = 0;
4564         } else {
4565                 rv = kstrtouint(buf, 10, &min);
4566                 if (rv < 0)
4567                         return rv;
4568                 if (min == 0)
4569                         return -EINVAL;
4570         }
4571         mddev->sync_speed_min = min;
4572         return len;
4573 }
4574
4575 static struct md_sysfs_entry md_sync_min =
4576 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4577
4578 static ssize_t
4579 sync_max_show(struct mddev *mddev, char *page)
4580 {
4581         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4582                        mddev->sync_speed_max ? "local": "system");
4583 }
4584
4585 static ssize_t
4586 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4587 {
4588         unsigned int max;
4589         int rv;
4590
4591         if (strncmp(buf, "system", 6)==0) {
4592                 max = 0;
4593         } else {
4594                 rv = kstrtouint(buf, 10, &max);
4595                 if (rv < 0)
4596                         return rv;
4597                 if (max == 0)
4598                         return -EINVAL;
4599         }
4600         mddev->sync_speed_max = max;
4601         return len;
4602 }
4603
4604 static struct md_sysfs_entry md_sync_max =
4605 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4606
4607 static ssize_t
4608 degraded_show(struct mddev *mddev, char *page)
4609 {
4610         return sprintf(page, "%d\n", mddev->degraded);
4611 }
4612 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4613
4614 static ssize_t
4615 sync_force_parallel_show(struct mddev *mddev, char *page)
4616 {
4617         return sprintf(page, "%d\n", mddev->parallel_resync);
4618 }
4619
4620 static ssize_t
4621 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4622 {
4623         long n;
4624
4625         if (kstrtol(buf, 10, &n))
4626                 return -EINVAL;
4627
4628         if (n != 0 && n != 1)
4629                 return -EINVAL;
4630
4631         mddev->parallel_resync = n;
4632
4633         if (mddev->sync_thread)
4634                 wake_up(&resync_wait);
4635
4636         return len;
4637 }
4638
4639 /* force parallel resync, even with shared block devices */
4640 static struct md_sysfs_entry md_sync_force_parallel =
4641 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4642        sync_force_parallel_show, sync_force_parallel_store);
4643
4644 static ssize_t
4645 sync_speed_show(struct mddev *mddev, char *page)
4646 {
4647         unsigned long resync, dt, db;
4648         if (mddev->curr_resync == 0)
4649                 return sprintf(page, "none\n");
4650         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4651         dt = (jiffies - mddev->resync_mark) / HZ;
4652         if (!dt) dt++;
4653         db = resync - mddev->resync_mark_cnt;
4654         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4655 }
4656
4657 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4658
4659 static ssize_t
4660 sync_completed_show(struct mddev *mddev, char *page)
4661 {
4662         unsigned long long max_sectors, resync;
4663
4664         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4665                 return sprintf(page, "none\n");
4666
4667         if (mddev->curr_resync == 1 ||
4668             mddev->curr_resync == 2)
4669                 return sprintf(page, "delayed\n");
4670
4671         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4672             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4673                 max_sectors = mddev->resync_max_sectors;
4674         else
4675                 max_sectors = mddev->dev_sectors;
4676
4677         resync = mddev->curr_resync_completed;
4678         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4679 }
4680
4681 static struct md_sysfs_entry md_sync_completed =
4682         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4683
4684 static ssize_t
4685 min_sync_show(struct mddev *mddev, char *page)
4686 {
4687         return sprintf(page, "%llu\n",
4688                        (unsigned long long)mddev->resync_min);
4689 }
4690 static ssize_t
4691 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4692 {
4693         unsigned long long min;
4694         int err;
4695
4696         if (kstrtoull(buf, 10, &min))
4697                 return -EINVAL;
4698
4699         spin_lock(&mddev->lock);
4700         err = -EINVAL;
4701         if (min > mddev->resync_max)
4702                 goto out_unlock;
4703
4704         err = -EBUSY;
4705         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4706                 goto out_unlock;
4707
4708         /* Round down to multiple of 4K for safety */
4709         mddev->resync_min = round_down(min, 8);
4710         err = 0;
4711
4712 out_unlock:
4713         spin_unlock(&mddev->lock);
4714         return err ?: len;
4715 }
4716
4717 static struct md_sysfs_entry md_min_sync =
4718 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4719
4720 static ssize_t
4721 max_sync_show(struct mddev *mddev, char *page)
4722 {
4723         if (mddev->resync_max == MaxSector)
4724                 return sprintf(page, "max\n");
4725         else
4726                 return sprintf(page, "%llu\n",
4727                                (unsigned long long)mddev->resync_max);
4728 }
4729 static ssize_t
4730 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4731 {
4732         int err;
4733         spin_lock(&mddev->lock);
4734         if (strncmp(buf, "max", 3) == 0)
4735                 mddev->resync_max = MaxSector;
4736         else {
4737                 unsigned long long max;
4738                 int chunk;
4739
4740                 err = -EINVAL;
4741                 if (kstrtoull(buf, 10, &max))
4742                         goto out_unlock;
4743                 if (max < mddev->resync_min)
4744                         goto out_unlock;
4745
4746                 err = -EBUSY;
4747                 if (max < mddev->resync_max &&
4748                     mddev->ro == 0 &&
4749                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4750                         goto out_unlock;
4751
4752                 /* Must be a multiple of chunk_size */
4753                 chunk = mddev->chunk_sectors;
4754                 if (chunk) {
4755                         sector_t temp = max;
4756
4757                         err = -EINVAL;
4758                         if (sector_div(temp, chunk))
4759                                 goto out_unlock;
4760                 }
4761                 mddev->resync_max = max;
4762         }
4763         wake_up(&mddev->recovery_wait);
4764         err = 0;
4765 out_unlock:
4766         spin_unlock(&mddev->lock);
4767         return err ?: len;
4768 }
4769
4770 static struct md_sysfs_entry md_max_sync =
4771 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4772
4773 static ssize_t
4774 suspend_lo_show(struct mddev *mddev, char *page)
4775 {
4776         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4777 }
4778
4779 static ssize_t
4780 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4781 {
4782         unsigned long long old, new;
4783         int err;
4784
4785         err = kstrtoull(buf, 10, &new);
4786         if (err < 0)
4787                 return err;
4788         if (new != (sector_t)new)
4789                 return -EINVAL;
4790
4791         err = mddev_lock(mddev);
4792         if (err)
4793                 return err;
4794         err = -EINVAL;
4795         if (mddev->pers == NULL ||
4796             mddev->pers->quiesce == NULL)
4797                 goto unlock;
4798         old = mddev->suspend_lo;
4799         mddev->suspend_lo = new;
4800         if (new >= old)
4801                 /* Shrinking suspended region */
4802                 mddev->pers->quiesce(mddev, 2);
4803         else {
4804                 /* Expanding suspended region - need to wait */
4805                 mddev->pers->quiesce(mddev, 1);
4806                 mddev->pers->quiesce(mddev, 0);
4807         }
4808         err = 0;
4809 unlock:
4810         mddev_unlock(mddev);
4811         return err ?: len;
4812 }
4813 static struct md_sysfs_entry md_suspend_lo =
4814 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4815
4816 static ssize_t
4817 suspend_hi_show(struct mddev *mddev, char *page)
4818 {
4819         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4820 }
4821
4822 static ssize_t
4823 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4824 {
4825         unsigned long long old, new;
4826         int err;
4827
4828         err = kstrtoull(buf, 10, &new);
4829         if (err < 0)
4830                 return err;
4831         if (new != (sector_t)new)
4832                 return -EINVAL;
4833
4834         err = mddev_lock(mddev);
4835         if (err)
4836                 return err;
4837         err = -EINVAL;
4838         if (mddev->pers == NULL ||
4839             mddev->pers->quiesce == NULL)
4840                 goto unlock;
4841         old = mddev->suspend_hi;
4842         mddev->suspend_hi = new;
4843         if (new <= old)
4844                 /* Shrinking suspended region */
4845                 mddev->pers->quiesce(mddev, 2);
4846         else {
4847                 /* Expanding suspended region - need to wait */
4848                 mddev->pers->quiesce(mddev, 1);
4849                 mddev->pers->quiesce(mddev, 0);
4850         }
4851         err = 0;
4852 unlock:
4853         mddev_unlock(mddev);
4854         return err ?: len;
4855 }
4856 static struct md_sysfs_entry md_suspend_hi =
4857 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4858
4859 static ssize_t
4860 reshape_position_show(struct mddev *mddev, char *page)
4861 {
4862         if (mddev->reshape_position != MaxSector)
4863                 return sprintf(page, "%llu\n",
4864                                (unsigned long long)mddev->reshape_position);
4865         strcpy(page, "none\n");
4866         return 5;
4867 }
4868
4869 static ssize_t
4870 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4871 {
4872         struct md_rdev *rdev;
4873         unsigned long long new;
4874         int err;
4875
4876         err = kstrtoull(buf, 10, &new);
4877         if (err < 0)
4878                 return err;
4879         if (new != (sector_t)new)
4880                 return -EINVAL;
4881         err = mddev_lock(mddev);
4882         if (err)
4883                 return err;
4884         err = -EBUSY;
4885         if (mddev->pers)
4886                 goto unlock;
4887         mddev->reshape_position = new;
4888         mddev->delta_disks = 0;
4889         mddev->reshape_backwards = 0;
4890         mddev->new_level = mddev->level;
4891         mddev->new_layout = mddev->layout;
4892         mddev->new_chunk_sectors = mddev->chunk_sectors;
4893         rdev_for_each(rdev, mddev)
4894                 rdev->new_data_offset = rdev->data_offset;
4895         err = 0;
4896 unlock:
4897         mddev_unlock(mddev);
4898         return err ?: len;
4899 }
4900
4901 static struct md_sysfs_entry md_reshape_position =
4902 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4903        reshape_position_store);
4904
4905 static ssize_t
4906 reshape_direction_show(struct mddev *mddev, char *page)
4907 {
4908         return sprintf(page, "%s\n",
4909                        mddev->reshape_backwards ? "backwards" : "forwards");
4910 }
4911
4912 static ssize_t
4913 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4914 {
4915         int backwards = 0;
4916         int err;
4917
4918         if (cmd_match(buf, "forwards"))
4919                 backwards = 0;
4920         else if (cmd_match(buf, "backwards"))
4921                 backwards = 1;
4922         else
4923                 return -EINVAL;
4924         if (mddev->reshape_backwards == backwards)
4925                 return len;
4926
4927         err = mddev_lock(mddev);
4928         if (err)
4929                 return err;
4930         /* check if we are allowed to change */
4931         if (mddev->delta_disks)
4932                 err = -EBUSY;
4933         else if (mddev->persistent &&
4934             mddev->major_version == 0)
4935                 err =  -EINVAL;
4936         else
4937                 mddev->reshape_backwards = backwards;
4938         mddev_unlock(mddev);
4939         return err ?: len;
4940 }
4941
4942 static struct md_sysfs_entry md_reshape_direction =
4943 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4944        reshape_direction_store);
4945
4946 static ssize_t
4947 array_size_show(struct mddev *mddev, char *page)
4948 {
4949         if (mddev->external_size)
4950                 return sprintf(page, "%llu\n",
4951                                (unsigned long long)mddev->array_sectors/2);
4952         else
4953                 return sprintf(page, "default\n");
4954 }
4955
4956 static ssize_t
4957 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4958 {
4959         sector_t sectors;
4960         int err;
4961
4962         err = mddev_lock(mddev);
4963         if (err)
4964                 return err;
4965
4966         /* cluster raid doesn't support change array_sectors */
4967         if (mddev_is_clustered(mddev)) {
4968                 mddev_unlock(mddev);
4969                 return -EINVAL;
4970         }
4971
4972         if (strncmp(buf, "default", 7) == 0) {
4973                 if (mddev->pers)
4974                         sectors = mddev->pers->size(mddev, 0, 0);
4975                 else
4976                         sectors = mddev->array_sectors;
4977
4978                 mddev->external_size = 0;
4979         } else {
4980                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4981                         err = -EINVAL;
4982                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4983                         err = -E2BIG;
4984                 else
4985                         mddev->external_size = 1;
4986         }
4987
4988         if (!err) {
4989                 mddev->array_sectors = sectors;
4990                 if (mddev->pers) {
4991                         set_capacity(mddev->gendisk, mddev->array_sectors);
4992                         revalidate_disk(mddev->gendisk);
4993                 }
4994         }
4995         mddev_unlock(mddev);
4996         return err ?: len;
4997 }
4998
4999 static struct md_sysfs_entry md_array_size =
5000 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5001        array_size_store);
5002
5003 static ssize_t
5004 consistency_policy_show(struct mddev *mddev, char *page)
5005 {
5006         int ret;
5007
5008         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5009                 ret = sprintf(page, "journal\n");
5010         } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5011                 ret = sprintf(page, "ppl\n");
5012         } else if (mddev->bitmap) {
5013                 ret = sprintf(page, "bitmap\n");
5014         } else if (mddev->pers) {
5015                 if (mddev->pers->sync_request)
5016                         ret = sprintf(page, "resync\n");
5017                 else
5018                         ret = sprintf(page, "none\n");
5019         } else {
5020                 ret = sprintf(page, "unknown\n");
5021         }
5022
5023         return ret;
5024 }
5025
5026 static ssize_t
5027 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5028 {
5029         int err = 0;
5030
5031         if (mddev->pers) {
5032                 if (mddev->pers->change_consistency_policy)
5033                         err = mddev->pers->change_consistency_policy(mddev, buf);
5034                 else
5035                         err = -EBUSY;
5036         } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5037                 set_bit(MD_HAS_PPL, &mddev->flags);
5038         } else {
5039                 err = -EINVAL;
5040         }
5041
5042         return err ? err : len;
5043 }
5044
5045 static struct md_sysfs_entry md_consistency_policy =
5046 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5047        consistency_policy_store);
5048
5049 static struct attribute *md_default_attrs[] = {
5050         &md_level.attr,
5051         &md_layout.attr,
5052         &md_raid_disks.attr,
5053         &md_chunk_size.attr,
5054         &md_size.attr,
5055         &md_resync_start.attr,
5056         &md_metadata.attr,
5057         &md_new_device.attr,
5058         &md_safe_delay.attr,
5059         &md_array_state.attr,
5060         &md_reshape_position.attr,
5061         &md_reshape_direction.attr,
5062         &md_array_size.attr,
5063         &max_corr_read_errors.attr,
5064         &md_consistency_policy.attr,
5065         NULL,
5066 };
5067
5068 static struct attribute *md_redundancy_attrs[] = {
5069         &md_scan_mode.attr,
5070         &md_last_scan_mode.attr,
5071         &md_mismatches.attr,
5072         &md_sync_min.attr,
5073         &md_sync_max.attr,
5074         &md_sync_speed.attr,
5075         &md_sync_force_parallel.attr,
5076         &md_sync_completed.attr,
5077         &md_min_sync.attr,
5078         &md_max_sync.attr,
5079         &md_suspend_lo.attr,
5080         &md_suspend_hi.attr,
5081         &md_bitmap.attr,
5082         &md_degraded.attr,
5083         NULL,
5084 };
5085 static struct attribute_group md_redundancy_group = {
5086         .name = NULL,
5087         .attrs = md_redundancy_attrs,
5088 };
5089
5090 static ssize_t
5091 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5092 {
5093         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5094         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5095         ssize_t rv;
5096
5097         if (!entry->show)
5098                 return -EIO;
5099         spin_lock(&all_mddevs_lock);
5100         if (list_empty(&mddev->all_mddevs)) {
5101                 spin_unlock(&all_mddevs_lock);
5102                 return -EBUSY;
5103         }
5104         mddev_get(mddev);
5105         spin_unlock(&all_mddevs_lock);
5106
5107         rv = entry->show(mddev, page);
5108         mddev_put(mddev);
5109         return rv;
5110 }
5111
5112 static ssize_t
5113 md_attr_store(struct kobject *kobj, struct attribute *attr,
5114               const char *page, size_t length)
5115 {
5116         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5117         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5118         ssize_t rv;
5119
5120         if (!entry->store)
5121                 return -EIO;
5122         if (!capable(CAP_SYS_ADMIN))
5123                 return -EACCES;
5124         spin_lock(&all_mddevs_lock);
5125         if (list_empty(&mddev->all_mddevs)) {
5126                 spin_unlock(&all_mddevs_lock);
5127                 return -EBUSY;
5128         }
5129         mddev_get(mddev);
5130         spin_unlock(&all_mddevs_lock);
5131         rv = entry->store(mddev, page, length);
5132         mddev_put(mddev);
5133         return rv;
5134 }
5135
5136 static void md_free(struct kobject *ko)
5137 {
5138         struct mddev *mddev = container_of(ko, struct mddev, kobj);
5139
5140         if (mddev->sysfs_state)
5141                 sysfs_put(mddev->sysfs_state);
5142
5143         if (mddev->queue)
5144                 blk_cleanup_queue(mddev->queue);
5145         if (mddev->gendisk) {
5146                 del_gendisk(mddev->gendisk);
5147                 put_disk(mddev->gendisk);
5148         }
5149         percpu_ref_exit(&mddev->writes_pending);
5150
5151         kfree(mddev);
5152 }
5153
5154 static const struct sysfs_ops md_sysfs_ops = {
5155         .show   = md_attr_show,
5156         .store  = md_attr_store,
5157 };
5158 static struct kobj_type md_ktype = {
5159         .release        = md_free,
5160         .sysfs_ops      = &md_sysfs_ops,
5161         .default_attrs  = md_default_attrs,
5162 };
5163
5164 int mdp_major = 0;
5165
5166 static void mddev_delayed_delete(struct work_struct *ws)
5167 {
5168         struct mddev *mddev = container_of(ws, struct mddev, del_work);
5169
5170         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
5171         kobject_del(&mddev->kobj);
5172         kobject_put(&mddev->kobj);
5173 }
5174
5175 static void no_op(struct percpu_ref *r) {}
5176
5177 static int md_alloc(dev_t dev, char *name)
5178 {
5179         /*
5180          * If dev is zero, name is the name of a device to allocate with
5181          * an arbitrary minor number.  It will be "md_???"
5182          * If dev is non-zero it must be a device number with a MAJOR of
5183          * MD_MAJOR or mdp_major.  In this case, if "name" is NULL, then
5184          * the device is being created by opening a node in /dev.
5185          * If "name" is not NULL, the device is being created by
5186          * writing to /sys/module/md_mod/parameters/new_array.
5187          */
5188         static DEFINE_MUTEX(disks_mutex);
5189         struct mddev *mddev = mddev_find(dev);
5190         struct gendisk *disk;
5191         int partitioned;
5192         int shift;
5193         int unit;
5194         int error;
5195
5196         if (!mddev)
5197                 return -ENODEV;
5198
5199         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5200         shift = partitioned ? MdpMinorShift : 0;
5201         unit = MINOR(mddev->unit) >> shift;
5202
5203         /* wait for any previous instance of this device to be
5204          * completely removed (mddev_delayed_delete).
5205          */
5206         flush_workqueue(md_misc_wq);
5207
5208         mutex_lock(&disks_mutex);
5209         error = -EEXIST;
5210         if (mddev->gendisk)
5211                 goto abort;
5212
5213         if (name && !dev) {
5214                 /* Need to ensure that 'name' is not a duplicate.
5215                  */
5216                 struct mddev *mddev2;
5217                 spin_lock(&all_mddevs_lock);
5218
5219                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5220                         if (mddev2->gendisk &&
5221                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5222                                 spin_unlock(&all_mddevs_lock);
5223                                 goto abort;
5224                         }
5225                 spin_unlock(&all_mddevs_lock);
5226         }
5227         if (name && dev)
5228                 /*
5229                  * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5230                  */
5231                 mddev->hold_active = UNTIL_STOP;
5232
5233         error = -ENOMEM;
5234         mddev->queue = blk_alloc_queue(GFP_KERNEL);
5235         if (!mddev->queue)
5236                 goto abort;
5237         mddev->queue->queuedata = mddev;
5238
5239         blk_queue_make_request(mddev->queue, md_make_request);
5240         blk_set_stacking_limits(&mddev->queue->limits);
5241
5242         if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
5243                 goto abort;
5244         /* We want to start with the refcount at zero */
5245         percpu_ref_put(&mddev->writes_pending);
5246         disk = alloc_disk(1 << shift);
5247         if (!disk) {
5248                 blk_cleanup_queue(mddev->queue);
5249                 mddev->queue = NULL;
5250                 goto abort;
5251         }
5252         disk->major = MAJOR(mddev->unit);
5253         disk->first_minor = unit << shift;
5254         if (name)
5255                 strcpy(disk->disk_name, name);
5256         else if (partitioned)
5257                 sprintf(disk->disk_name, "md_d%d", unit);
5258         else
5259                 sprintf(disk->disk_name, "md%d", unit);
5260         disk->fops = &md_fops;
5261         disk->private_data = mddev;
5262         disk->queue = mddev->queue;
5263         blk_queue_write_cache(mddev->queue, true, true);
5264         /* Allow extended partitions.  This makes the
5265          * 'mdp' device redundant, but we can't really
5266          * remove it now.
5267          */
5268         disk->flags |= GENHD_FL_EXT_DEVT;
5269         mddev->gendisk = disk;
5270         /* As soon as we call add_disk(), another thread could get
5271          * through to md_open, so make sure it doesn't get too far
5272          */
5273         mutex_lock(&mddev->open_mutex);
5274         add_disk(disk);
5275
5276         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
5277                                      &disk_to_dev(disk)->kobj, "%s", "md");
5278         if (error) {
5279                 /* This isn't possible, but as kobject_init_and_add is marked
5280                  * __must_check, we must do something with the result
5281                  */
5282                 pr_debug("md: cannot register %s/md - name in use\n",
5283                          disk->disk_name);
5284                 error = 0;
5285         }
5286         if (mddev->kobj.sd &&
5287             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5288                 pr_debug("pointless warning\n");
5289         mutex_unlock(&mddev->open_mutex);
5290  abort:
5291         mutex_unlock(&disks_mutex);
5292         if (!error && mddev->kobj.sd) {
5293                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5294                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5295         }
5296         mddev_put(mddev);
5297         return error;
5298 }
5299
5300 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5301 {
5302         if (create_on_open)
5303                 md_alloc(dev, NULL);
5304         return NULL;
5305 }
5306
5307 static int add_named_array(const char *val, struct kernel_param *kp)
5308 {
5309         /*
5310          * val must be "md_*" or "mdNNN".
5311          * For "md_*" we allocate an array with a large free minor number, and
5312          * set the name to val.  val must not already be an active name.
5313          * For "mdNNN" we allocate an array with the minor number NNN
5314          * which must not already be in use.
5315          */
5316         int len = strlen(val);
5317         char buf[DISK_NAME_LEN];
5318         unsigned long devnum;
5319
5320         while (len && val[len-1] == '\n')
5321                 len--;
5322         if (len >= DISK_NAME_LEN)
5323                 return -E2BIG;
5324         strlcpy(buf, val, len+1);
5325         if (strncmp(buf, "md_", 3) == 0)
5326                 return md_alloc(0, buf);
5327         if (strncmp(buf, "md", 2) == 0 &&
5328             isdigit(buf[2]) &&
5329             kstrtoul(buf+2, 10, &devnum) == 0 &&
5330             devnum <= MINORMASK)
5331                 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5332
5333         return -EINVAL;
5334 }
5335
5336 static void md_safemode_timeout(unsigned long data)
5337 {
5338         struct mddev *mddev = (struct mddev *) data;
5339
5340         mddev->safemode = 1;
5341         if (mddev->external)
5342                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5343
5344         md_wakeup_thread(mddev->thread);
5345 }
5346
5347 static int start_dirty_degraded;
5348
5349 int md_run(struct mddev *mddev)
5350 {
5351         int err;
5352         struct md_rdev *rdev;
5353         struct md_personality *pers;
5354
5355         if (list_empty(&mddev->disks))
5356                 /* cannot run an array with no devices.. */
5357                 return -EINVAL;
5358
5359         if (mddev->pers)
5360                 return -EBUSY;
5361         /* Cannot run until previous stop completes properly */
5362         if (mddev->sysfs_active)
5363                 return -EBUSY;
5364
5365         /*
5366          * Analyze all RAID superblock(s)
5367          */
5368         if (!mddev->raid_disks) {
5369                 if (!mddev->persistent)
5370                         return -EINVAL;
5371                 analyze_sbs(mddev);
5372         }
5373
5374         if (mddev->level != LEVEL_NONE)
5375                 request_module("md-level-%d", mddev->level);
5376         else if (mddev->clevel[0])
5377                 request_module("md-%s", mddev->clevel);
5378
5379         /*
5380          * Drop all container device buffers, from now on
5381          * the only valid external interface is through the md
5382          * device.
5383          */
5384         rdev_for_each(rdev, mddev) {
5385                 if (test_bit(Faulty, &rdev->flags))
5386                         continue;
5387                 sync_blockdev(rdev->bdev);
5388                 invalidate_bdev(rdev->bdev);
5389                 if (mddev->ro != 1 &&
5390                     (bdev_read_only(rdev->bdev) ||
5391                      bdev_read_only(rdev->meta_bdev))) {
5392                         mddev->ro = 1;
5393                         if (mddev->gendisk)
5394                                 set_disk_ro(mddev->gendisk, 1);
5395                 }
5396
5397                 /* perform some consistency tests on the device.
5398                  * We don't want the data to overlap the metadata,
5399                  * Internal Bitmap issues have been handled elsewhere.
5400                  */
5401                 if (rdev->meta_bdev) {
5402                         /* Nothing to check */;
5403                 } else if (rdev->data_offset < rdev->sb_start) {
5404                         if (mddev->dev_sectors &&
5405                             rdev->data_offset + mddev->dev_sectors
5406                             > rdev->sb_start) {
5407                                 pr_warn("md: %s: data overlaps metadata\n",
5408                                         mdname(mddev));
5409                                 return -EINVAL;
5410                         }
5411                 } else {
5412                         if (rdev->sb_start + rdev->sb_size/512
5413                             > rdev->data_offset) {
5414                                 pr_warn("md: %s: metadata overlaps data\n",
5415                                         mdname(mddev));
5416                                 return -EINVAL;
5417                         }
5418                 }
5419                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5420         }
5421
5422         if (mddev->bio_set == NULL) {
5423                 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5424                 if (!mddev->bio_set)
5425                         return -ENOMEM;
5426         }
5427
5428         spin_lock(&pers_lock);
5429         pers = find_pers(mddev->level, mddev->clevel);
5430         if (!pers || !try_module_get(pers->owner)) {
5431                 spin_unlock(&pers_lock);
5432                 if (mddev->level != LEVEL_NONE)
5433                         pr_warn("md: personality for level %d is not loaded!\n",
5434                                 mddev->level);
5435                 else
5436                         pr_warn("md: personality for level %s is not loaded!\n",
5437                                 mddev->clevel);
5438                 return -EINVAL;
5439         }
5440         spin_unlock(&pers_lock);
5441         if (mddev->level != pers->level) {
5442                 mddev->level = pers->level;
5443                 mddev->new_level = pers->level;
5444         }
5445         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5446
5447         if (mddev->reshape_position != MaxSector &&
5448             pers->start_reshape == NULL) {
5449                 /* This personality cannot handle reshaping... */
5450                 module_put(pers->owner);
5451                 return -EINVAL;
5452         }
5453
5454         if (pers->sync_request) {
5455                 /* Warn if this is a potentially silly
5456                  * configuration.
5457                  */
5458                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5459                 struct md_rdev *rdev2;
5460                 int warned = 0;
5461
5462                 rdev_for_each(rdev, mddev)
5463                         rdev_for_each(rdev2, mddev) {
5464                                 if (rdev < rdev2 &&
5465                                     rdev->bdev->bd_contains ==
5466                                     rdev2->bdev->bd_contains) {
5467                                         pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5468                                                 mdname(mddev),
5469                                                 bdevname(rdev->bdev,b),
5470                                                 bdevname(rdev2->bdev,b2));
5471                                         warned = 1;
5472                                 }
5473                         }
5474
5475                 if (warned)
5476                         pr_warn("True protection against single-disk failure might be compromised.\n");
5477         }
5478
5479         mddev->recovery = 0;
5480         /* may be over-ridden by personality */
5481         mddev->resync_max_sectors = mddev->dev_sectors;
5482
5483         mddev->ok_start_degraded = start_dirty_degraded;
5484
5485         if (start_readonly && mddev->ro == 0)
5486                 mddev->ro = 2; /* read-only, but switch on first write */
5487
5488         /*
5489          * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
5490          * up mddev->thread. It is important to initialize critical
5491          * resources for mddev->thread BEFORE calling pers->run().
5492          */
5493         err = pers->run(mddev);
5494         if (err)
5495                 pr_warn("md: pers->run() failed ...\n");
5496         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5497                 WARN_ONCE(!mddev->external_size,
5498                           "%s: default size too small, but 'external_size' not in effect?\n",
5499                           __func__);
5500                 pr_warn("md: invalid array_size %llu > default size %llu\n",
5501                         (unsigned long long)mddev->array_sectors / 2,
5502                         (unsigned long long)pers->size(mddev, 0, 0) / 2);
5503                 err = -EINVAL;
5504         }
5505         if (err == 0 && pers->sync_request &&
5506             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5507                 struct bitmap *bitmap;
5508
5509                 bitmap = bitmap_create(mddev, -1);
5510                 if (IS_ERR(bitmap)) {
5511                         err = PTR_ERR(bitmap);
5512                         pr_warn("%s: failed to create bitmap (%d)\n",
5513                                 mdname(mddev), err);
5514                 } else
5515                         mddev->bitmap = bitmap;
5516
5517         }
5518         if (err) {
5519                 mddev_detach(mddev);
5520                 if (mddev->private)
5521                         pers->free(mddev, mddev->private);
5522                 mddev->private = NULL;
5523                 module_put(pers->owner);
5524                 bitmap_destroy(mddev);
5525                 return err;
5526         }
5527         if (mddev->queue) {
5528                 bool nonrot = true;
5529
5530                 rdev_for_each(rdev, mddev) {
5531                         if (rdev->raid_disk >= 0 &&
5532                             !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
5533                                 nonrot = false;
5534                                 break;
5535                         }
5536                 }
5537                 if (mddev->degraded)
5538                         nonrot = false;
5539                 if (nonrot)
5540                         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5541                 else
5542                         queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5543                 mddev->queue->backing_dev_info->congested_data = mddev;
5544                 mddev->queue->backing_dev_info->congested_fn = md_congested;
5545         }
5546         if (pers->sync_request) {
5547                 if (mddev->kobj.sd &&
5548                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5549                         pr_warn("md: cannot register extra attributes for %s\n",
5550                                 mdname(mddev));
5551                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5552         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5553                 mddev->ro = 0;
5554
5555         atomic_set(&mddev->max_corr_read_errors,
5556                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5557         mddev->safemode = 0;
5558         if (mddev_is_clustered(mddev))
5559                 mddev->safemode_delay = 0;
5560         else
5561                 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5562         mddev->in_sync = 1;
5563         smp_wmb();
5564         spin_lock(&mddev->lock);
5565         mddev->pers = pers;
5566         spin_unlock(&mddev->lock);
5567         rdev_for_each(rdev, mddev)
5568                 if (rdev->raid_disk >= 0)
5569                         if (sysfs_link_rdev(mddev, rdev))
5570                                 /* failure here is OK */;
5571
5572         if (mddev->degraded && !mddev->ro)
5573                 /* This ensures that recovering status is reported immediately
5574                  * via sysfs - until a lack of spares is confirmed.
5575                  */
5576                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5577         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5578
5579         if (mddev->sb_flags)
5580                 md_update_sb(mddev, 0);
5581
5582         md_new_event(mddev);
5583         sysfs_notify_dirent_safe(mddev->sysfs_state);
5584         sysfs_notify_dirent_safe(mddev->sysfs_action);
5585         sysfs_notify(&mddev->kobj, NULL, "degraded");
5586         return 0;
5587 }
5588 EXPORT_SYMBOL_GPL(md_run);
5589
5590 static int do_md_run(struct mddev *mddev)
5591 {
5592         int err;
5593
5594         err = md_run(mddev);
5595         if (err)
5596                 goto out;
5597         err = bitmap_load(mddev);
5598         if (err) {
5599                 bitmap_destroy(mddev);
5600                 goto out;
5601         }
5602
5603         if (mddev_is_clustered(mddev))
5604                 md_allow_write(mddev);
5605
5606         md_wakeup_thread(mddev->thread);
5607         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5608
5609         set_capacity(mddev->gendisk, mddev->array_sectors);
5610         revalidate_disk(mddev->gendisk);
5611         mddev->changed = 1;
5612         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5613 out:
5614         return err;
5615 }
5616
5617 static int restart_array(struct mddev *mddev)
5618 {
5619         struct gendisk *disk = mddev->gendisk;
5620         struct md_rdev *rdev;
5621         bool has_journal = false;
5622         bool has_readonly = false;
5623
5624         /* Complain if it has no devices */
5625         if (list_empty(&mddev->disks))
5626                 return -ENXIO;
5627         if (!mddev->pers)
5628                 return -EINVAL;
5629         if (!mddev->ro)
5630                 return -EBUSY;
5631
5632         rcu_read_lock();
5633         rdev_for_each_rcu(rdev, mddev) {
5634                 if (test_bit(Journal, &rdev->flags) &&
5635                     !test_bit(Faulty, &rdev->flags))
5636                         has_journal = true;
5637                 if (bdev_read_only(rdev->bdev))
5638                         has_readonly = true;
5639         }
5640         rcu_read_unlock();
5641         if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
5642                 /* Don't restart rw with journal missing/faulty */
5643                         return -EINVAL;
5644         if (has_readonly)
5645                 return -EROFS;
5646
5647         mddev->safemode = 0;
5648         mddev->ro = 0;
5649         set_disk_ro(disk, 0);
5650         pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
5651         /* Kick recovery or resync if necessary */
5652         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5653         md_wakeup_thread(mddev->thread);
5654         md_wakeup_thread(mddev->sync_thread);
5655         sysfs_notify_dirent_safe(mddev->sysfs_state);
5656         return 0;
5657 }
5658
5659 static void md_clean(struct mddev *mddev)
5660 {
5661         mddev->array_sectors = 0;
5662         mddev->external_size = 0;
5663         mddev->dev_sectors = 0;
5664         mddev->raid_disks = 0;
5665         mddev->recovery_cp = 0;
5666         mddev->resync_min = 0;
5667         mddev->resync_max = MaxSector;
5668         mddev->reshape_position = MaxSector;
5669         mddev->external = 0;
5670         mddev->persistent = 0;
5671         mddev->level = LEVEL_NONE;
5672         mddev->clevel[0] = 0;
5673         mddev->flags = 0;
5674         mddev->sb_flags = 0;
5675         mddev->ro = 0;
5676         mddev->metadata_type[0] = 0;
5677         mddev->chunk_sectors = 0;
5678         mddev->ctime = mddev->utime = 0;
5679         mddev->layout = 0;
5680         mddev->max_disks = 0;
5681         mddev->events = 0;
5682         mddev->can_decrease_events = 0;
5683         mddev->delta_disks = 0;
5684         mddev->reshape_backwards = 0;
5685         mddev->new_level = LEVEL_NONE;
5686         mddev->new_layout = 0;
5687         mddev->new_chunk_sectors = 0;
5688         mddev->curr_resync = 0;
5689         atomic64_set(&mddev->resync_mismatches, 0);
5690         mddev->suspend_lo = mddev->suspend_hi = 0;
5691         mddev->sync_speed_min = mddev->sync_speed_max = 0;
5692         mddev->recovery = 0;
5693         mddev->in_sync = 0;
5694         mddev->changed = 0;
5695         mddev->degraded = 0;
5696         mddev->safemode = 0;
5697         mddev->private = NULL;
5698         mddev->cluster_info = NULL;
5699         mddev->bitmap_info.offset = 0;
5700         mddev->bitmap_info.default_offset = 0;
5701         mddev->bitmap_info.default_space = 0;
5702         mddev->bitmap_info.chunksize = 0;
5703         mddev->bitmap_info.daemon_sleep = 0;
5704         mddev->bitmap_info.max_write_behind = 0;
5705         mddev->bitmap_info.nodes = 0;
5706 }
5707
5708 static void __md_stop_writes(struct mddev *mddev)
5709 {
5710         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5711         flush_workqueue(md_misc_wq);
5712         if (mddev->sync_thread) {
5713                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5714                 md_reap_sync_thread(mddev);
5715         }
5716
5717         del_timer_sync(&mddev->safemode_timer);
5718
5719         if (mddev->pers && mddev->pers->quiesce) {
5720                 mddev->pers->quiesce(mddev, 1);
5721                 mddev->pers->quiesce(mddev, 0);
5722         }
5723         bitmap_flush(mddev);
5724
5725         if (mddev->ro == 0 &&
5726             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
5727              mddev->sb_flags)) {
5728                 /* mark array as shutdown cleanly */
5729                 if (!mddev_is_clustered(mddev))
5730                         mddev->in_sync = 1;
5731                 md_update_sb(mddev, 1);
5732         }
5733 }
5734
5735 void md_stop_writes(struct mddev *mddev)
5736 {
5737         mddev_lock_nointr(mddev);
5738         __md_stop_writes(mddev);
5739         mddev_unlock(mddev);
5740 }
5741 EXPORT_SYMBOL_GPL(md_stop_writes);
5742
5743 static void mddev_detach(struct mddev *mddev)
5744 {
5745         bitmap_wait_behind_writes(mddev);
5746         if (mddev->pers && mddev->pers->quiesce) {
5747                 mddev->pers->quiesce(mddev, 1);
5748                 mddev->pers->quiesce(mddev, 0);
5749         }
5750         md_unregister_thread(&mddev->thread);
5751         if (mddev->queue)
5752                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5753 }
5754
5755 static void __md_stop(struct mddev *mddev)
5756 {
5757         struct md_personality *pers = mddev->pers;
5758         bitmap_destroy(mddev);
5759         mddev_detach(mddev);
5760         /* Ensure ->event_work is done */
5761         flush_workqueue(md_misc_wq);
5762         spin_lock(&mddev->lock);
5763         mddev->pers = NULL;
5764         spin_unlock(&mddev->lock);
5765         pers->free(mddev, mddev->private);
5766         mddev->private = NULL;
5767         if (pers->sync_request && mddev->to_remove == NULL)
5768                 mddev->to_remove = &md_redundancy_group;
5769         module_put(pers->owner);
5770         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5771 }
5772
5773 void md_stop(struct mddev *mddev)
5774 {
5775         /* stop the array and free an attached data structures.
5776          * This is called from dm-raid
5777          */
5778         __md_stop(mddev);
5779         if (mddev->bio_set)
5780                 bioset_free(mddev->bio_set);
5781 }
5782
5783 EXPORT_SYMBOL_GPL(md_stop);
5784
5785 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5786 {
5787         int err = 0;
5788         int did_freeze = 0;
5789
5790         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5791                 did_freeze = 1;
5792                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5793                 md_wakeup_thread(mddev->thread);
5794         }
5795         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5796                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5797         if (mddev->sync_thread)
5798                 /* Thread might be blocked waiting for metadata update
5799                  * which will now never happen */
5800                 wake_up_process(mddev->sync_thread->tsk);
5801
5802         if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
5803                 return -EBUSY;
5804         mddev_unlock(mddev);
5805         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5806                                           &mddev->recovery));
5807         wait_event(mddev->sb_wait,
5808                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
5809         mddev_lock_nointr(mddev);
5810
5811         mutex_lock(&mddev->open_mutex);
5812         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5813             mddev->sync_thread ||
5814             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5815                 pr_warn("md: %s still in use.\n",mdname(mddev));
5816                 if (did_freeze) {
5817                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5818                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5819                         md_wakeup_thread(mddev->thread);
5820                 }
5821                 err = -EBUSY;
5822                 goto out;
5823         }
5824         if (mddev->pers) {
5825                 __md_stop_writes(mddev);
5826
5827                 err  = -ENXIO;
5828                 if (mddev->ro==1)
5829                         goto out;
5830                 mddev->ro = 1;
5831                 set_disk_ro(mddev->gendisk, 1);
5832                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5833                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5834                 md_wakeup_thread(mddev->thread);
5835                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5836                 err = 0;
5837         }
5838 out:
5839         mutex_unlock(&mddev->open_mutex);
5840         return err;
5841 }
5842
5843 /* mode:
5844  *   0 - completely stop and dis-assemble array
5845  *   2 - stop but do not disassemble array
5846  */
5847 static int do_md_stop(struct mddev *mddev, int mode,
5848                       struct block_device *bdev)
5849 {
5850         struct gendisk *disk = mddev->gendisk;
5851         struct md_rdev *rdev;
5852         int did_freeze = 0;
5853
5854         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5855                 did_freeze = 1;
5856                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5857                 md_wakeup_thread(mddev->thread);
5858         }
5859         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5860                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5861         if (mddev->sync_thread)
5862                 /* Thread might be blocked waiting for metadata update
5863                  * which will now never happen */
5864                 wake_up_process(mddev->sync_thread->tsk);
5865
5866         mddev_unlock(mddev);
5867         wait_event(resync_wait, (mddev->sync_thread == NULL &&
5868                                  !test_bit(MD_RECOVERY_RUNNING,
5869                                            &mddev->recovery)));
5870         mddev_lock_nointr(mddev);
5871
5872         mutex_lock(&mddev->open_mutex);
5873         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5874             mddev->sysfs_active ||
5875             mddev->sync_thread ||
5876             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5877                 pr_warn("md: %s still in use.\n",mdname(mddev));
5878                 mutex_unlock(&mddev->open_mutex);
5879                 if (did_freeze) {
5880                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5881                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5882                         md_wakeup_thread(mddev->thread);
5883                 }
5884                 return -EBUSY;
5885         }
5886         if (mddev->pers) {
5887                 if (mddev->ro)
5888                         set_disk_ro(disk, 0);
5889
5890                 __md_stop_writes(mddev);
5891                 __md_stop(mddev);
5892                 mddev->queue->backing_dev_info->congested_fn = NULL;
5893
5894                 /* tell userspace to handle 'inactive' */
5895                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5896
5897                 rdev_for_each(rdev, mddev)
5898                         if (rdev->raid_disk >= 0)
5899                                 sysfs_unlink_rdev(mddev, rdev);
5900
5901                 set_capacity(disk, 0);
5902                 mutex_unlock(&mddev->open_mutex);
5903                 mddev->changed = 1;
5904                 revalidate_disk(disk);
5905
5906                 if (mddev->ro)
5907                         mddev->ro = 0;
5908         } else
5909                 mutex_unlock(&mddev->open_mutex);
5910         /*
5911          * Free resources if final stop
5912          */
5913         if (mode == 0) {
5914                 pr_info("md: %s stopped.\n", mdname(mddev));
5915
5916                 if (mddev->bitmap_info.file) {
5917                         struct file *f = mddev->bitmap_info.file;
5918                         spin_lock(&mddev->lock);
5919                         mddev->bitmap_info.file = NULL;
5920                         spin_unlock(&mddev->lock);
5921                         fput(f);
5922                 }
5923                 mddev->bitmap_info.offset = 0;
5924
5925                 export_array(mddev);
5926
5927                 md_clean(mddev);
5928                 if (mddev->hold_active == UNTIL_STOP)
5929                         mddev->hold_active = 0;
5930         }
5931         md_new_event(mddev);
5932         sysfs_notify_dirent_safe(mddev->sysfs_state);
5933         return 0;
5934 }
5935
5936 #ifndef MODULE
5937 static void autorun_array(struct mddev *mddev)
5938 {
5939         struct md_rdev *rdev;
5940         int err;
5941
5942         if (list_empty(&mddev->disks))
5943                 return;
5944
5945         pr_info("md: running: ");
5946
5947         rdev_for_each(rdev, mddev) {
5948                 char b[BDEVNAME_SIZE];
5949                 pr_cont("<%s>", bdevname(rdev->bdev,b));
5950         }
5951         pr_cont("\n");
5952
5953         err = do_md_run(mddev);
5954         if (err) {
5955                 pr_warn("md: do_md_run() returned %d\n", err);
5956                 do_md_stop(mddev, 0, NULL);
5957         }
5958 }
5959
5960 /*
5961  * lets try to run arrays based on all disks that have arrived
5962  * until now. (those are in pending_raid_disks)
5963  *
5964  * the method: pick the first pending disk, collect all disks with
5965  * the same UUID, remove all from the pending list and put them into
5966  * the 'same_array' list. Then order this list based on superblock
5967  * update time (freshest comes first), kick out 'old' disks and
5968  * compare superblocks. If everything's fine then run it.
5969  *
5970  * If "unit" is allocated, then bump its reference count
5971  */
5972 static void autorun_devices(int part)
5973 {
5974         struct md_rdev *rdev0, *rdev, *tmp;
5975         struct mddev *mddev;
5976         char b[BDEVNAME_SIZE];
5977
5978         pr_info("md: autorun ...\n");
5979         while (!list_empty(&pending_raid_disks)) {
5980                 int unit;
5981                 dev_t dev;
5982                 LIST_HEAD(candidates);
5983                 rdev0 = list_entry(pending_raid_disks.next,
5984                                          struct md_rdev, same_set);
5985
5986                 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
5987                 INIT_LIST_HEAD(&candidates);
5988                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5989                         if (super_90_load(rdev, rdev0, 0) >= 0) {
5990                                 pr_debug("md:  adding %s ...\n",
5991                                          bdevname(rdev->bdev,b));
5992                                 list_move(&rdev->same_set, &candidates);
5993                         }
5994                 /*
5995                  * now we have a set of devices, with all of them having
5996                  * mostly sane superblocks. It's time to allocate the
5997                  * mddev.
5998                  */
5999                 if (part) {
6000                         dev = MKDEV(mdp_major,
6001                                     rdev0->preferred_minor << MdpMinorShift);
6002                         unit = MINOR(dev) >> MdpMinorShift;
6003                 } else {
6004                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6005                         unit = MINOR(dev);
6006                 }
6007                 if (rdev0->preferred_minor != unit) {
6008                         pr_warn("md: unit number in %s is bad: %d\n",
6009                                 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
6010                         break;
6011                 }
6012
6013                 md_probe(dev, NULL, NULL);
6014                 mddev = mddev_find(dev);
6015                 if (!mddev || !mddev->gendisk) {
6016                         if (mddev)
6017                                 mddev_put(mddev);
6018                         break;
6019                 }
6020                 if (mddev_lock(mddev))
6021                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
6022                 else if (mddev->raid_disks || mddev->major_version
6023                          || !list_empty(&mddev->disks)) {
6024                         pr_warn("md: %s already running, cannot run %s\n",
6025                                 mdname(mddev), bdevname(rdev0->bdev,b));
6026                         mddev_unlock(mddev);
6027                 } else {
6028                         pr_debug("md: created %s\n", mdname(mddev));
6029                         mddev->persistent = 1;
6030                         rdev_for_each_list(rdev, tmp, &candidates) {
6031                                 list_del_init(&rdev->same_set);
6032                                 if (bind_rdev_to_array(rdev, mddev))
6033                                         export_rdev(rdev);
6034                         }
6035                         autorun_array(mddev);
6036                         mddev_unlock(mddev);
6037                 }
6038                 /* on success, candidates will be empty, on error
6039                  * it won't...
6040                  */
6041                 rdev_for_each_list(rdev, tmp, &candidates) {
6042                         list_del_init(&rdev->same_set);
6043                         export_rdev(rdev);
6044                 }
6045                 mddev_put(mddev);
6046         }
6047         pr_info("md: ... autorun DONE.\n");
6048 }
6049 #endif /* !MODULE */
6050
6051 static int get_version(void __user *arg)
6052 {
6053         mdu_version_t ver;
6054
6055         ver.major = MD_MAJOR_VERSION;
6056         ver.minor = MD_MINOR_VERSION;
6057         ver.patchlevel = MD_PATCHLEVEL_VERSION;
6058
6059         if (copy_to_user(arg, &ver, sizeof(ver)))
6060                 return -EFAULT;
6061
6062         return 0;
6063 }
6064
6065 static int get_array_info(struct mddev *mddev, void __user *arg)
6066 {
6067         mdu_array_info_t info;
6068         int nr,working,insync,failed,spare;
6069         struct md_rdev *rdev;
6070
6071         nr = working = insync = failed = spare = 0;
6072         rcu_read_lock();
6073         rdev_for_each_rcu(rdev, mddev) {
6074                 nr++;
6075                 if (test_bit(Faulty, &rdev->flags))
6076                         failed++;
6077                 else {
6078                         working++;
6079                         if (test_bit(In_sync, &rdev->flags))
6080                                 insync++;
6081                         else if (test_bit(Journal, &rdev->flags))
6082                                 /* TODO: add journal count to md_u.h */
6083                                 ;
6084                         else
6085                                 spare++;
6086                 }
6087         }
6088         rcu_read_unlock();
6089
6090         info.major_version = mddev->major_version;
6091         info.minor_version = mddev->minor_version;
6092         info.patch_version = MD_PATCHLEVEL_VERSION;
6093         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
6094         info.level         = mddev->level;
6095         info.size          = mddev->dev_sectors / 2;
6096         if (info.size != mddev->dev_sectors / 2) /* overflow */
6097                 info.size = -1;
6098         info.nr_disks      = nr;
6099         info.raid_disks    = mddev->raid_disks;
6100         info.md_minor      = mddev->md_minor;
6101         info.not_persistent= !mddev->persistent;
6102
6103         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
6104         info.state         = 0;
6105         if (mddev->in_sync)
6106                 info.state = (1<<MD_SB_CLEAN);
6107         if (mddev->bitmap && mddev->bitmap_info.offset)
6108                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
6109         if (mddev_is_clustered(mddev))
6110                 info.state |= (1<<MD_SB_CLUSTERED);
6111         info.active_disks  = insync;
6112         info.working_disks = working;
6113         info.failed_disks  = failed;
6114         info.spare_disks   = spare;
6115
6116         info.layout        = mddev->layout;
6117         info.chunk_size    = mddev->chunk_sectors << 9;
6118
6119         if (copy_to_user(arg, &info, sizeof(info)))
6120                 return -EFAULT;
6121
6122         return 0;
6123 }
6124
6125 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
6126 {
6127         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
6128         char *ptr;
6129         int err;
6130
6131         file = kzalloc(sizeof(*file), GFP_NOIO);
6132         if (!file)
6133                 return -ENOMEM;
6134
6135         err = 0;
6136         spin_lock(&mddev->lock);
6137         /* bitmap enabled */
6138         if (mddev->bitmap_info.file) {
6139                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6140                                 sizeof(file->pathname));
6141                 if (IS_ERR(ptr))
6142                         err = PTR_ERR(ptr);
6143                 else
6144                         memmove(file->pathname, ptr,
6145                                 sizeof(file->pathname)-(ptr-file->pathname));
6146         }
6147         spin_unlock(&mddev->lock);
6148
6149         if (err == 0 &&
6150             copy_to_user(arg, file, sizeof(*file)))
6151                 err = -EFAULT;
6152
6153         kfree(file);
6154         return err;
6155 }
6156
6157 static int get_disk_info(struct mddev *mddev, void __user * arg)
6158 {
6159         mdu_disk_info_t info;
6160         struct md_rdev *rdev;
6161
6162         if (copy_from_user(&info, arg, sizeof(info)))
6163                 return -EFAULT;
6164
6165         rcu_read_lock();
6166         rdev = md_find_rdev_nr_rcu(mddev, info.number);
6167         if (rdev) {
6168                 info.major = MAJOR(rdev->bdev->bd_dev);
6169                 info.minor = MINOR(rdev->bdev->bd_dev);
6170                 info.raid_disk = rdev->raid_disk;
6171                 info.state = 0;
6172                 if (test_bit(Faulty, &rdev->flags))
6173                         info.state |= (1<<MD_DISK_FAULTY);
6174                 else if (test_bit(In_sync, &rdev->flags)) {
6175                         info.state |= (1<<MD_DISK_ACTIVE);
6176                         info.state |= (1<<MD_DISK_SYNC);
6177                 }
6178                 if (test_bit(Journal, &rdev->flags))
6179                         info.state |= (1<<MD_DISK_JOURNAL);
6180                 if (test_bit(WriteMostly, &rdev->flags))
6181                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
6182                 if (test_bit(FailFast, &rdev->flags))
6183                         info.state |= (1<<MD_DISK_FAILFAST);
6184         } else {
6185                 info.major = info.minor = 0;
6186                 info.raid_disk = -1;
6187                 info.state = (1<<MD_DISK_REMOVED);
6188         }
6189         rcu_read_unlock();
6190
6191         if (copy_to_user(arg, &info, sizeof(info)))
6192                 return -EFAULT;
6193
6194         return 0;
6195 }
6196
6197 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
6198 {
6199         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
6200         struct md_rdev *rdev;
6201         dev_t dev = MKDEV(info->major,info->minor);
6202
6203         if (mddev_is_clustered(mddev) &&
6204                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6205                 pr_warn("%s: Cannot add to clustered mddev.\n",
6206                         mdname(mddev));
6207                 return -EINVAL;
6208         }
6209
6210         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6211                 return -EOVERFLOW;
6212
6213         if (!mddev->raid_disks) {
6214                 int err;
6215                 /* expecting a device which has a superblock */
6216                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6217                 if (IS_ERR(rdev)) {
6218                         pr_warn("md: md_import_device returned %ld\n",
6219                                 PTR_ERR(rdev));
6220                         return PTR_ERR(rdev);
6221                 }
6222                 if (!list_empty(&mddev->disks)) {
6223                         struct md_rdev *rdev0
6224                                 = list_entry(mddev->disks.next,
6225                                              struct md_rdev, same_set);
6226                         err = super_types[mddev->major_version]
6227                                 .load_super(rdev, rdev0, mddev->minor_version);
6228                         if (err < 0) {
6229                                 pr_warn("md: %s has different UUID to %s\n",
6230                                         bdevname(rdev->bdev,b),
6231                                         bdevname(rdev0->bdev,b2));
6232                                 export_rdev(rdev);
6233                                 return -EINVAL;
6234                         }
6235                 }
6236                 err = bind_rdev_to_array(rdev, mddev);
6237                 if (err)
6238                         export_rdev(rdev);
6239                 return err;
6240         }
6241
6242         /*
6243          * add_new_disk can be used once the array is assembled
6244          * to add "hot spares".  They must already have a superblock
6245          * written
6246          */
6247         if (mddev->pers) {
6248                 int err;
6249                 if (!mddev->pers->hot_add_disk) {
6250                         pr_warn("%s: personality does not support diskops!\n",
6251                                 mdname(mddev));
6252                         return -EINVAL;
6253                 }
6254                 if (mddev->persistent)
6255                         rdev = md_import_device(dev, mddev->major_version,
6256                                                 mddev->minor_version);
6257                 else
6258                         rdev = md_import_device(dev, -1, -1);
6259                 if (IS_ERR(rdev)) {
6260                         pr_warn("md: md_import_device returned %ld\n",
6261                                 PTR_ERR(rdev));
6262                         return PTR_ERR(rdev);
6263                 }
6264                 /* set saved_raid_disk if appropriate */
6265                 if (!mddev->persistent) {
6266                         if (info->state & (1<<MD_DISK_SYNC)  &&
6267                             info->raid_disk < mddev->raid_disks) {
6268                                 rdev->raid_disk = info->raid_disk;
6269                                 set_bit(In_sync, &rdev->flags);
6270                                 clear_bit(Bitmap_sync, &rdev->flags);
6271                         } else
6272                                 rdev->raid_disk = -1;
6273                         rdev->saved_raid_disk = rdev->raid_disk;
6274                 } else
6275                         super_types[mddev->major_version].
6276                                 validate_super(mddev, rdev);
6277                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6278                      rdev->raid_disk != info->raid_disk) {
6279                         /* This was a hot-add request, but events doesn't
6280                          * match, so reject it.
6281                          */
6282                         export_rdev(rdev);
6283                         return -EINVAL;
6284                 }
6285
6286                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6287                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6288                         set_bit(WriteMostly, &rdev->flags);
6289                 else
6290                         clear_bit(WriteMostly, &rdev->flags);
6291                 if (info->state & (1<<MD_DISK_FAILFAST))
6292                         set_bit(FailFast, &rdev->flags);
6293                 else
6294                         clear_bit(FailFast, &rdev->flags);
6295
6296                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6297                         struct md_rdev *rdev2;
6298                         bool has_journal = false;
6299
6300                         /* make sure no existing journal disk */
6301                         rdev_for_each(rdev2, mddev) {
6302                                 if (test_bit(Journal, &rdev2->flags)) {
6303                                         has_journal = true;
6304                                         break;
6305                                 }
6306                         }
6307                         if (has_journal) {
6308                                 export_rdev(rdev);
6309                                 return -EBUSY;
6310                         }
6311                         set_bit(Journal, &rdev->flags);
6312                 }
6313                 /*
6314                  * check whether the device shows up in other nodes
6315                  */
6316                 if (mddev_is_clustered(mddev)) {
6317                         if (info->state & (1 << MD_DISK_CANDIDATE))
6318                                 set_bit(Candidate, &rdev->flags);
6319                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6320                                 /* --add initiated by this node */
6321                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6322                                 if (err) {
6323                                         export_rdev(rdev);
6324                                         return err;
6325                                 }
6326                         }
6327                 }
6328
6329                 rdev->raid_disk = -1;
6330                 err = bind_rdev_to_array(rdev, mddev);
6331
6332                 if (err)
6333                         export_rdev(rdev);
6334
6335                 if (mddev_is_clustered(mddev)) {
6336                         if (info->state & (1 << MD_DISK_CANDIDATE)) {
6337                                 if (!err) {
6338                                         err = md_cluster_ops->new_disk_ack(mddev,
6339                                                 err == 0);
6340                                         if (err)
6341                                                 md_kick_rdev_from_array(rdev);
6342                                 }
6343                         } else {
6344                                 if (err)
6345                                         md_cluster_ops->add_new_disk_cancel(mddev);
6346                                 else
6347                                         err = add_bound_rdev(rdev);
6348                         }
6349
6350                 } else if (!err)
6351                         err = add_bound_rdev(rdev);
6352
6353                 return err;
6354         }
6355
6356         /* otherwise, add_new_disk is only allowed
6357          * for major_version==0 superblocks
6358          */
6359         if (mddev->major_version != 0) {
6360                 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6361                 return -EINVAL;
6362         }
6363
6364         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6365                 int err;
6366                 rdev = md_import_device(dev, -1, 0);
6367                 if (IS_ERR(rdev)) {
6368                         pr_warn("md: error, md_import_device() returned %ld\n",
6369                                 PTR_ERR(rdev));
6370                         return PTR_ERR(rdev);
6371                 }
6372                 rdev->desc_nr = info->number;
6373                 if (info->raid_disk < mddev->raid_disks)
6374                         rdev->raid_disk = info->raid_disk;
6375                 else
6376                         rdev->raid_disk = -1;
6377
6378                 if (rdev->raid_disk < mddev->raid_disks)
6379                         if (info->state & (1<<MD_DISK_SYNC))
6380                                 set_bit(In_sync, &rdev->flags);
6381
6382                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6383                         set_bit(WriteMostly, &rdev->flags);
6384                 if (info->state & (1<<MD_DISK_FAILFAST))
6385                         set_bit(FailFast, &rdev->flags);
6386
6387                 if (!mddev->persistent) {
6388                         pr_debug("md: nonpersistent superblock ...\n");
6389                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6390                 } else
6391                         rdev->sb_start = calc_dev_sboffset(rdev);
6392                 rdev->sectors = rdev->sb_start;
6393
6394                 err = bind_rdev_to_array(rdev, mddev);
6395                 if (err) {
6396                         export_rdev(rdev);
6397                         return err;
6398                 }
6399         }
6400
6401         return 0;
6402 }
6403
6404 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6405 {
6406         char b[BDEVNAME_SIZE];
6407         struct md_rdev *rdev;
6408
6409         rdev = find_rdev(mddev, dev);
6410         if (!rdev)
6411                 return -ENXIO;
6412
6413         if (rdev->raid_disk < 0)
6414                 goto kick_rdev;
6415
6416         clear_bit(Blocked, &rdev->flags);
6417         remove_and_add_spares(mddev, rdev);
6418
6419         if (rdev->raid_disk >= 0)
6420                 goto busy;
6421
6422 kick_rdev:
6423         if (mddev_is_clustered(mddev))
6424                 md_cluster_ops->remove_disk(mddev, rdev);
6425
6426         md_kick_rdev_from_array(rdev);
6427         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6428         if (mddev->thread)
6429                 md_wakeup_thread(mddev->thread);
6430         else
6431                 md_update_sb(mddev, 1);
6432         md_new_event(mddev);
6433
6434         return 0;
6435 busy:
6436         pr_debug("md: cannot remove active disk %s from %s ...\n",
6437                  bdevname(rdev->bdev,b), mdname(mddev));
6438         return -EBUSY;
6439 }
6440
6441 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6442 {
6443         char b[BDEVNAME_SIZE];
6444         int err;
6445         struct md_rdev *rdev;
6446
6447         if (!mddev->pers)
6448                 return -ENODEV;
6449
6450         if (mddev->major_version != 0) {
6451                 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6452                         mdname(mddev));
6453                 return -EINVAL;
6454         }
6455         if (!mddev->pers->hot_add_disk) {
6456                 pr_warn("%s: personality does not support diskops!\n",
6457                         mdname(mddev));
6458                 return -EINVAL;
6459         }
6460
6461         rdev = md_import_device(dev, -1, 0);
6462         if (IS_ERR(rdev)) {
6463                 pr_warn("md: error, md_import_device() returned %ld\n",
6464                         PTR_ERR(rdev));
6465                 return -EINVAL;
6466         }
6467
6468         if (mddev->persistent)
6469                 rdev->sb_start = calc_dev_sboffset(rdev);
6470         else
6471                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6472
6473         rdev->sectors = rdev->sb_start;
6474
6475         if (test_bit(Faulty, &rdev->flags)) {
6476                 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
6477                         bdevname(rdev->bdev,b), mdname(mddev));
6478                 err = -EINVAL;
6479                 goto abort_export;
6480         }
6481
6482         clear_bit(In_sync, &rdev->flags);
6483         rdev->desc_nr = -1;
6484         rdev->saved_raid_disk = -1;
6485         err = bind_rdev_to_array(rdev, mddev);
6486         if (err)
6487                 goto abort_export;
6488
6489         /*
6490          * The rest should better be atomic, we can have disk failures
6491          * noticed in interrupt contexts ...
6492          */
6493
6494         rdev->raid_disk = -1;
6495
6496         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6497         if (!mddev->thread)
6498                 md_update_sb(mddev, 1);
6499         /*
6500          * Kick recovery, maybe this spare has to be added to the
6501          * array immediately.
6502          */
6503         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6504         md_wakeup_thread(mddev->thread);
6505         md_new_event(mddev);
6506         return 0;
6507
6508 abort_export:
6509         export_rdev(rdev);
6510         return err;
6511 }
6512
6513 static int set_bitmap_file(struct mddev *mddev, int fd)
6514 {
6515         int err = 0;
6516
6517         if (mddev->pers) {
6518                 if (!mddev->pers->quiesce || !mddev->thread)
6519                         return -EBUSY;
6520                 if (mddev->recovery || mddev->sync_thread)
6521                         return -EBUSY;
6522                 /* we should be able to change the bitmap.. */
6523         }
6524
6525         if (fd >= 0) {
6526                 struct inode *inode;
6527                 struct file *f;
6528
6529                 if (mddev->bitmap || mddev->bitmap_info.file)
6530                         return -EEXIST; /* cannot add when bitmap is present */
6531                 f = fget(fd);
6532
6533                 if (f == NULL) {
6534                         pr_warn("%s: error: failed to get bitmap file\n",
6535                                 mdname(mddev));
6536                         return -EBADF;
6537                 }
6538
6539                 inode = f->f_mapping->host;
6540                 if (!S_ISREG(inode->i_mode)) {
6541                         pr_warn("%s: error: bitmap file must be a regular file\n",
6542                                 mdname(mddev));
6543                         err = -EBADF;
6544                 } else if (!(f->f_mode & FMODE_WRITE)) {
6545                         pr_warn("%s: error: bitmap file must open for write\n",
6546                                 mdname(mddev));
6547                         err = -EBADF;
6548                 } else if (atomic_read(&inode->i_writecount) != 1) {
6549                         pr_warn("%s: error: bitmap file is already in use\n",
6550                                 mdname(mddev));
6551                         err = -EBUSY;
6552                 }
6553                 if (err) {
6554                         fput(f);
6555                         return err;
6556                 }
6557                 mddev->bitmap_info.file = f;
6558                 mddev->bitmap_info.offset = 0; /* file overrides offset */
6559         } else if (mddev->bitmap == NULL)
6560                 return -ENOENT; /* cannot remove what isn't there */
6561         err = 0;
6562         if (mddev->pers) {
6563                 mddev->pers->quiesce(mddev, 1);
6564                 if (fd >= 0) {
6565                         struct bitmap *bitmap;
6566
6567                         bitmap = bitmap_create(mddev, -1);
6568                         if (!IS_ERR(bitmap)) {
6569                                 mddev->bitmap = bitmap;
6570                                 err = bitmap_load(mddev);
6571                         } else
6572                                 err = PTR_ERR(bitmap);
6573                 }
6574                 if (fd < 0 || err) {
6575                         bitmap_destroy(mddev);
6576                         fd = -1; /* make sure to put the file */
6577                 }
6578                 mddev->pers->quiesce(mddev, 0);
6579         }
6580         if (fd < 0) {
6581                 struct file *f = mddev->bitmap_info.file;
6582                 if (f) {
6583                         spin_lock(&mddev->lock);
6584                         mddev->bitmap_info.file = NULL;
6585                         spin_unlock(&mddev->lock);
6586                         fput(f);
6587                 }
6588         }
6589
6590         return err;
6591 }
6592
6593 /*
6594  * set_array_info is used two different ways
6595  * The original usage is when creating a new array.
6596  * In this usage, raid_disks is > 0 and it together with
6597  *  level, size, not_persistent,layout,chunksize determine the
6598  *  shape of the array.
6599  *  This will always create an array with a type-0.90.0 superblock.
6600  * The newer usage is when assembling an array.
6601  *  In this case raid_disks will be 0, and the major_version field is
6602  *  use to determine which style super-blocks are to be found on the devices.
6603  *  The minor and patch _version numbers are also kept incase the
6604  *  super_block handler wishes to interpret them.
6605  */
6606 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6607 {
6608
6609         if (info->raid_disks == 0) {
6610                 /* just setting version number for superblock loading */
6611                 if (info->major_version < 0 ||
6612                     info->major_version >= ARRAY_SIZE(super_types) ||
6613                     super_types[info->major_version].name == NULL) {
6614                         /* maybe try to auto-load a module? */
6615                         pr_warn("md: superblock version %d not known\n",
6616                                 info->major_version);
6617                         return -EINVAL;
6618                 }
6619                 mddev->major_version = info->major_version;
6620                 mddev->minor_version = info->minor_version;
6621                 mddev->patch_version = info->patch_version;
6622                 mddev->persistent = !info->not_persistent;
6623                 /* ensure mddev_put doesn't delete this now that there
6624                  * is some minimal configuration.
6625                  */
6626                 mddev->ctime         = ktime_get_real_seconds();
6627                 return 0;
6628         }
6629         mddev->major_version = MD_MAJOR_VERSION;
6630         mddev->minor_version = MD_MINOR_VERSION;
6631         mddev->patch_version = MD_PATCHLEVEL_VERSION;
6632         mddev->ctime         = ktime_get_real_seconds();
6633
6634         mddev->level         = info->level;
6635         mddev->clevel[0]     = 0;
6636         mddev->dev_sectors   = 2 * (sector_t)info->size;
6637         mddev->raid_disks    = info->raid_disks;
6638         /* don't set md_minor, it is determined by which /dev/md* was
6639          * openned
6640          */
6641         if (info->state & (1<<MD_SB_CLEAN))
6642                 mddev->recovery_cp = MaxSector;
6643         else
6644                 mddev->recovery_cp = 0;
6645         mddev->persistent    = ! info->not_persistent;
6646         mddev->external      = 0;
6647
6648         mddev->layout        = info->layout;
6649         mddev->chunk_sectors = info->chunk_size >> 9;
6650
6651         if (mddev->persistent) {
6652                 mddev->max_disks = MD_SB_DISKS;
6653                 mddev->flags = 0;
6654                 mddev->sb_flags = 0;
6655         }
6656         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6657
6658         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6659         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6660         mddev->bitmap_info.offset = 0;
6661
6662         mddev->reshape_position = MaxSector;
6663
6664         /*
6665          * Generate a 128 bit UUID
6666          */
6667         get_random_bytes(mddev->uuid, 16);
6668
6669         mddev->new_level = mddev->level;
6670         mddev->new_chunk_sectors = mddev->chunk_sectors;
6671         mddev->new_layout = mddev->layout;
6672         mddev->delta_disks = 0;
6673         mddev->reshape_backwards = 0;
6674
6675         return 0;
6676 }
6677
6678 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6679 {
6680         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6681
6682         if (mddev->external_size)
6683                 return;
6684
6685         mddev->array_sectors = array_sectors;
6686 }
6687 EXPORT_SYMBOL(md_set_array_sectors);
6688
6689 static int update_size(struct mddev *mddev, sector_t num_sectors)
6690 {
6691         struct md_rdev *rdev;
6692         int rv;
6693         int fit = (num_sectors == 0);
6694         sector_t old_dev_sectors = mddev->dev_sectors;
6695
6696         if (mddev->pers->resize == NULL)
6697                 return -EINVAL;
6698         /* The "num_sectors" is the number of sectors of each device that
6699          * is used.  This can only make sense for arrays with redundancy.
6700          * linear and raid0 always use whatever space is available. We can only
6701          * consider changing this number if no resync or reconstruction is
6702          * happening, and if the new size is acceptable. It must fit before the
6703          * sb_start or, if that is <data_offset, it must fit before the size
6704          * of each device.  If num_sectors is zero, we find the largest size
6705          * that fits.
6706          */
6707         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6708             mddev->sync_thread)
6709                 return -EBUSY;
6710         if (mddev->ro)
6711                 return -EROFS;
6712
6713         rdev_for_each(rdev, mddev) {
6714                 sector_t avail = rdev->sectors;
6715
6716                 if (fit && (num_sectors == 0 || num_sectors > avail))
6717                         num_sectors = avail;
6718                 if (avail < num_sectors)
6719                         return -ENOSPC;
6720         }
6721         rv = mddev->pers->resize(mddev, num_sectors);
6722         if (!rv) {
6723                 if (mddev_is_clustered(mddev))
6724                         md_cluster_ops->update_size(mddev, old_dev_sectors);
6725                 else if (mddev->queue) {
6726                         set_capacity(mddev->gendisk, mddev->array_sectors);
6727                         revalidate_disk(mddev->gendisk);
6728                 }
6729         }
6730         return rv;
6731 }
6732
6733 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6734 {
6735         int rv;
6736         struct md_rdev *rdev;
6737         /* change the number of raid disks */
6738         if (mddev->pers->check_reshape == NULL)
6739                 return -EINVAL;
6740         if (mddev->ro)
6741                 return -EROFS;
6742         if (raid_disks <= 0 ||
6743             (mddev->max_disks && raid_disks >= mddev->max_disks))
6744                 return -EINVAL;
6745         if (mddev->sync_thread ||
6746             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6747             mddev->reshape_position != MaxSector)
6748                 return -EBUSY;
6749
6750         rdev_for_each(rdev, mddev) {
6751                 if (mddev->raid_disks < raid_disks &&
6752                     rdev->data_offset < rdev->new_data_offset)
6753                         return -EINVAL;
6754                 if (mddev->raid_disks > raid_disks &&
6755                     rdev->data_offset > rdev->new_data_offset)
6756                         return -EINVAL;
6757         }
6758
6759         mddev->delta_disks = raid_disks - mddev->raid_disks;
6760         if (mddev->delta_disks < 0)
6761                 mddev->reshape_backwards = 1;
6762         else if (mddev->delta_disks > 0)
6763                 mddev->reshape_backwards = 0;
6764
6765         rv = mddev->pers->check_reshape(mddev);
6766         if (rv < 0) {
6767                 mddev->delta_disks = 0;
6768                 mddev->reshape_backwards = 0;
6769         }
6770         return rv;
6771 }
6772
6773 /*
6774  * update_array_info is used to change the configuration of an
6775  * on-line array.
6776  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6777  * fields in the info are checked against the array.
6778  * Any differences that cannot be handled will cause an error.
6779  * Normally, only one change can be managed at a time.
6780  */
6781 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6782 {
6783         int rv = 0;
6784         int cnt = 0;
6785         int state = 0;
6786
6787         /* calculate expected state,ignoring low bits */
6788         if (mddev->bitmap && mddev->bitmap_info.offset)
6789                 state |= (1 << MD_SB_BITMAP_PRESENT);
6790
6791         if (mddev->major_version != info->major_version ||
6792             mddev->minor_version != info->minor_version ||
6793 /*          mddev->patch_version != info->patch_version || */
6794             mddev->ctime         != info->ctime         ||
6795             mddev->level         != info->level         ||
6796 /*          mddev->layout        != info->layout        || */
6797             mddev->persistent    != !info->not_persistent ||
6798             mddev->chunk_sectors != info->chunk_size >> 9 ||
6799             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6800             ((state^info->state) & 0xfffffe00)
6801                 )
6802                 return -EINVAL;
6803         /* Check there is only one change */
6804         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6805                 cnt++;
6806         if (mddev->raid_disks != info->raid_disks)
6807                 cnt++;
6808         if (mddev->layout != info->layout)
6809                 cnt++;
6810         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6811                 cnt++;
6812         if (cnt == 0)
6813                 return 0;
6814         if (cnt > 1)
6815                 return -EINVAL;
6816
6817         if (mddev->layout != info->layout) {
6818                 /* Change layout
6819                  * we don't need to do anything at the md level, the
6820                  * personality will take care of it all.
6821                  */
6822                 if (mddev->pers->check_reshape == NULL)
6823                         return -EINVAL;
6824                 else {
6825                         mddev->new_layout = info->layout;
6826                         rv = mddev->pers->check_reshape(mddev);
6827                         if (rv)
6828                                 mddev->new_layout = mddev->layout;
6829                         return rv;
6830                 }
6831         }
6832         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6833                 rv = update_size(mddev, (sector_t)info->size * 2);
6834
6835         if (mddev->raid_disks    != info->raid_disks)
6836                 rv = update_raid_disks(mddev, info->raid_disks);
6837
6838         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6839                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
6840                         rv = -EINVAL;
6841                         goto err;
6842                 }
6843                 if (mddev->recovery || mddev->sync_thread) {
6844                         rv = -EBUSY;
6845                         goto err;
6846                 }
6847                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6848                         struct bitmap *bitmap;
6849                         /* add the bitmap */
6850                         if (mddev->bitmap) {
6851                                 rv = -EEXIST;
6852                                 goto err;
6853                         }
6854                         if (mddev->bitmap_info.default_offset == 0) {
6855                                 rv = -EINVAL;
6856                                 goto err;
6857                         }
6858                         mddev->bitmap_info.offset =
6859                                 mddev->bitmap_info.default_offset;
6860                         mddev->bitmap_info.space =
6861                                 mddev->bitmap_info.default_space;
6862                         mddev->pers->quiesce(mddev, 1);
6863                         bitmap = bitmap_create(mddev, -1);
6864                         if (!IS_ERR(bitmap)) {
6865                                 mddev->bitmap = bitmap;
6866                                 rv = bitmap_load(mddev);
6867                         } else
6868                                 rv = PTR_ERR(bitmap);
6869                         if (rv)
6870                                 bitmap_destroy(mddev);
6871                         mddev->pers->quiesce(mddev, 0);
6872                 } else {
6873                         /* remove the bitmap */
6874                         if (!mddev->bitmap) {
6875                                 rv = -ENOENT;
6876                                 goto err;
6877                         }
6878                         if (mddev->bitmap->storage.file) {
6879                                 rv = -EINVAL;
6880                                 goto err;
6881                         }
6882                         if (mddev->bitmap_info.nodes) {
6883                                 /* hold PW on all the bitmap lock */
6884                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
6885                                         pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
6886                                         rv = -EPERM;
6887                                         md_cluster_ops->unlock_all_bitmaps(mddev);
6888                                         goto err;
6889                                 }
6890
6891                                 mddev->bitmap_info.nodes = 0;
6892                                 md_cluster_ops->leave(mddev);
6893                         }
6894                         mddev->pers->quiesce(mddev, 1);
6895                         bitmap_destroy(mddev);
6896                         mddev->pers->quiesce(mddev, 0);
6897                         mddev->bitmap_info.offset = 0;
6898                 }
6899         }
6900         md_update_sb(mddev, 1);
6901         return rv;
6902 err:
6903         return rv;
6904 }
6905
6906 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6907 {
6908         struct md_rdev *rdev;
6909         int err = 0;
6910
6911         if (mddev->pers == NULL)
6912                 return -ENODEV;
6913
6914         rcu_read_lock();
6915         rdev = find_rdev_rcu(mddev, dev);
6916         if (!rdev)
6917                 err =  -ENODEV;
6918         else {
6919                 md_error(mddev, rdev);
6920                 if (!test_bit(Faulty, &rdev->flags))
6921                         err = -EBUSY;
6922         }
6923         rcu_read_unlock();
6924         return err;
6925 }
6926
6927 /*
6928  * We have a problem here : there is no easy way to give a CHS
6929  * virtual geometry. We currently pretend that we have a 2 heads
6930  * 4 sectors (with a BIG number of cylinders...). This drives
6931  * dosfs just mad... ;-)
6932  */
6933 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6934 {
6935         struct mddev *mddev = bdev->bd_disk->private_data;
6936
6937         geo->heads = 2;
6938         geo->sectors = 4;
6939         geo->cylinders = mddev->array_sectors / 8;
6940         return 0;
6941 }
6942
6943 static inline bool md_ioctl_valid(unsigned int cmd)
6944 {
6945         switch (cmd) {
6946         case ADD_NEW_DISK:
6947         case BLKROSET:
6948         case GET_ARRAY_INFO:
6949         case GET_BITMAP_FILE:
6950         case GET_DISK_INFO:
6951         case HOT_ADD_DISK:
6952         case HOT_REMOVE_DISK:
6953         case RAID_AUTORUN:
6954         case RAID_VERSION:
6955         case RESTART_ARRAY_RW:
6956         case RUN_ARRAY:
6957         case SET_ARRAY_INFO:
6958         case SET_BITMAP_FILE:
6959         case SET_DISK_FAULTY:
6960         case STOP_ARRAY:
6961         case STOP_ARRAY_RO:
6962         case CLUSTERED_DISK_NACK:
6963                 return true;
6964         default:
6965                 return false;
6966         }
6967 }
6968
6969 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6970                         unsigned int cmd, unsigned long arg)
6971 {
6972         int err = 0;
6973         void __user *argp = (void __user *)arg;
6974         struct mddev *mddev = NULL;
6975         int ro;
6976         bool did_set_md_closing = false;
6977
6978         if (!md_ioctl_valid(cmd))
6979                 return -ENOTTY;
6980
6981         switch (cmd) {
6982         case RAID_VERSION:
6983         case GET_ARRAY_INFO:
6984         case GET_DISK_INFO:
6985                 break;
6986         default:
6987                 if (!capable(CAP_SYS_ADMIN))
6988                         return -EACCES;
6989         }
6990
6991         /*
6992          * Commands dealing with the RAID driver but not any
6993          * particular array:
6994          */
6995         switch (cmd) {
6996         case RAID_VERSION:
6997                 err = get_version(argp);
6998                 goto out;
6999
7000 #ifndef MODULE
7001         case RAID_AUTORUN:
7002                 err = 0;
7003                 autostart_arrays(arg);
7004                 goto out;
7005 #endif
7006         default:;
7007         }
7008
7009         /*
7010          * Commands creating/starting a new array:
7011          */
7012
7013         mddev = bdev->bd_disk->private_data;
7014
7015         if (!mddev) {
7016                 BUG();
7017                 goto out;
7018         }
7019
7020         /* Some actions do not requires the mutex */
7021         switch (cmd) {
7022         case GET_ARRAY_INFO:
7023                 if (!mddev->raid_disks && !mddev->external)
7024                         err = -ENODEV;
7025                 else
7026                         err = get_array_info(mddev, argp);
7027                 goto out;
7028
7029         case GET_DISK_INFO:
7030                 if (!mddev->raid_disks && !mddev->external)
7031                         err = -ENODEV;
7032                 else
7033                         err = get_disk_info(mddev, argp);
7034                 goto out;
7035
7036         case SET_DISK_FAULTY:
7037                 err = set_disk_faulty(mddev, new_decode_dev(arg));
7038                 goto out;
7039
7040         case GET_BITMAP_FILE:
7041                 err = get_bitmap_file(mddev, argp);
7042                 goto out;
7043
7044         }
7045
7046         if (cmd == ADD_NEW_DISK)
7047                 /* need to ensure md_delayed_delete() has completed */
7048                 flush_workqueue(md_misc_wq);
7049
7050         if (cmd == HOT_REMOVE_DISK)
7051                 /* need to ensure recovery thread has run */
7052                 wait_event_interruptible_timeout(mddev->sb_wait,
7053                                                  !test_bit(MD_RECOVERY_NEEDED,
7054                                                            &mddev->recovery),
7055                                                  msecs_to_jiffies(5000));
7056         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7057                 /* Need to flush page cache, and ensure no-one else opens
7058                  * and writes
7059                  */
7060                 mutex_lock(&mddev->open_mutex);
7061                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
7062                         mutex_unlock(&mddev->open_mutex);
7063                         err = -EBUSY;
7064                         goto out;
7065                 }
7066                 WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
7067                 set_bit(MD_CLOSING, &mddev->flags);
7068                 did_set_md_closing = true;
7069                 mutex_unlock(&mddev->open_mutex);
7070                 sync_blockdev(bdev);
7071         }
7072         err = mddev_lock(mddev);
7073         if (err) {
7074                 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7075                          err, cmd);
7076                 goto out;
7077         }
7078
7079         if (cmd == SET_ARRAY_INFO) {
7080                 mdu_array_info_t info;
7081                 if (!arg)
7082                         memset(&info, 0, sizeof(info));
7083                 else if (copy_from_user(&info, argp, sizeof(info))) {
7084                         err = -EFAULT;
7085                         goto unlock;
7086                 }
7087                 if (mddev->pers) {
7088                         err = update_array_info(mddev, &info);
7089                         if (err) {
7090                                 pr_warn("md: couldn't update array info. %d\n", err);
7091                                 goto unlock;
7092                         }
7093                         goto unlock;
7094                 }
7095                 if (!list_empty(&mddev->disks)) {
7096                         pr_warn("md: array %s already has disks!\n", mdname(mddev));
7097                         err = -EBUSY;
7098                         goto unlock;
7099                 }
7100                 if (mddev->raid_disks) {
7101                         pr_warn("md: array %s already initialised!\n", mdname(mddev));
7102                         err = -EBUSY;
7103                         goto unlock;
7104                 }
7105                 err = set_array_info(mddev, &info);
7106                 if (err) {
7107                         pr_warn("md: couldn't set array info. %d\n", err);
7108                         goto unlock;
7109                 }
7110                 goto unlock;
7111         }
7112
7113         /*
7114          * Commands querying/configuring an existing array:
7115          */
7116         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
7117          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
7118         if ((!mddev->raid_disks && !mddev->external)
7119             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7120             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7121             && cmd != GET_BITMAP_FILE) {
7122                 err = -ENODEV;
7123                 goto unlock;
7124         }
7125
7126         /*
7127          * Commands even a read-only array can execute:
7128          */
7129         switch (cmd) {
7130         case RESTART_ARRAY_RW:
7131                 err = restart_array(mddev);
7132                 goto unlock;
7133
7134         case STOP_ARRAY:
7135                 err = do_md_stop(mddev, 0, bdev);
7136                 goto unlock;
7137
7138         case STOP_ARRAY_RO:
7139                 err = md_set_readonly(mddev, bdev);
7140                 goto unlock;
7141
7142         case HOT_REMOVE_DISK:
7143                 err = hot_remove_disk(mddev, new_decode_dev(arg));
7144                 goto unlock;
7145
7146         case ADD_NEW_DISK:
7147                 /* We can support ADD_NEW_DISK on read-only arrays
7148                  * only if we are re-adding a preexisting device.
7149                  * So require mddev->pers and MD_DISK_SYNC.
7150                  */
7151                 if (mddev->pers) {
7152                         mdu_disk_info_t info;
7153                         if (copy_from_user(&info, argp, sizeof(info)))
7154                                 err = -EFAULT;
7155                         else if (!(info.state & (1<<MD_DISK_SYNC)))
7156                                 /* Need to clear read-only for this */
7157                                 break;
7158                         else
7159                                 err = add_new_disk(mddev, &info);
7160                         goto unlock;
7161                 }
7162                 break;
7163
7164         case BLKROSET:
7165                 if (get_user(ro, (int __user *)(arg))) {
7166                         err = -EFAULT;
7167                         goto unlock;
7168                 }
7169                 err = -EINVAL;
7170
7171                 /* if the bdev is going readonly the value of mddev->ro
7172                  * does not matter, no writes are coming
7173                  */
7174                 if (ro)
7175                         goto unlock;
7176
7177                 /* are we are already prepared for writes? */
7178                 if (mddev->ro != 1)
7179                         goto unlock;
7180
7181                 /* transitioning to readauto need only happen for
7182                  * arrays that call md_write_start
7183                  */
7184                 if (mddev->pers) {
7185                         err = restart_array(mddev);
7186                         if (err == 0) {
7187                                 mddev->ro = 2;
7188                                 set_disk_ro(mddev->gendisk, 0);
7189                         }
7190                 }
7191                 goto unlock;
7192         }
7193
7194         /*
7195          * The remaining ioctls are changing the state of the
7196          * superblock, so we do not allow them on read-only arrays.
7197          */
7198         if (mddev->ro && mddev->pers) {
7199                 if (mddev->ro == 2) {
7200                         mddev->ro = 0;
7201                         sysfs_notify_dirent_safe(mddev->sysfs_state);
7202                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7203                         /* mddev_unlock will wake thread */
7204                         /* If a device failed while we were read-only, we
7205                          * need to make sure the metadata is updated now.
7206                          */
7207                         if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7208                                 mddev_unlock(mddev);
7209                                 wait_event(mddev->sb_wait,
7210                                            !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7211                                            !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7212                                 mddev_lock_nointr(mddev);
7213                         }
7214                 } else {
7215                         err = -EROFS;
7216                         goto unlock;
7217                 }
7218         }
7219
7220         switch (cmd) {
7221         case ADD_NEW_DISK:
7222         {
7223                 mdu_disk_info_t info;
7224                 if (copy_from_user(&info, argp, sizeof(info)))
7225                         err = -EFAULT;
7226                 else
7227                         err = add_new_disk(mddev, &info);
7228                 goto unlock;
7229         }
7230
7231         case CLUSTERED_DISK_NACK:
7232                 if (mddev_is_clustered(mddev))
7233                         md_cluster_ops->new_disk_ack(mddev, false);
7234                 else
7235                         err = -EINVAL;
7236                 goto unlock;
7237
7238         case HOT_ADD_DISK:
7239                 err = hot_add_disk(mddev, new_decode_dev(arg));
7240                 goto unlock;
7241
7242         case RUN_ARRAY:
7243                 err = do_md_run(mddev);
7244                 goto unlock;
7245
7246         case SET_BITMAP_FILE:
7247                 err = set_bitmap_file(mddev, (int)arg);
7248                 goto unlock;
7249
7250         default:
7251                 err = -EINVAL;
7252                 goto unlock;
7253         }
7254
7255 unlock:
7256         if (mddev->hold_active == UNTIL_IOCTL &&
7257             err != -EINVAL)
7258                 mddev->hold_active = 0;
7259         mddev_unlock(mddev);
7260 out:
7261         if(did_set_md_closing)
7262                 clear_bit(MD_CLOSING, &mddev->flags);
7263         return err;
7264 }
7265 #ifdef CONFIG_COMPAT
7266 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7267                     unsigned int cmd, unsigned long arg)
7268 {
7269         switch (cmd) {
7270         case HOT_REMOVE_DISK:
7271         case HOT_ADD_DISK:
7272         case SET_DISK_FAULTY:
7273         case SET_BITMAP_FILE:
7274                 /* These take in integer arg, do not convert */
7275                 break;
7276         default:
7277                 arg = (unsigned long)compat_ptr(arg);
7278                 break;
7279         }
7280
7281         return md_ioctl(bdev, mode, cmd, arg);
7282 }
7283 #endif /* CONFIG_COMPAT */
7284
7285 static int md_open(struct block_device *bdev, fmode_t mode)
7286 {
7287         /*
7288          * Succeed if we can lock the mddev, which confirms that
7289          * it isn't being stopped right now.
7290          */
7291         struct mddev *mddev = mddev_find(bdev->bd_dev);
7292         int err;
7293
7294         if (!mddev)
7295                 return -ENODEV;
7296
7297         if (mddev->gendisk != bdev->bd_disk) {
7298                 /* we are racing with mddev_put which is discarding this
7299                  * bd_disk.
7300                  */
7301                 mddev_put(mddev);
7302                 /* Wait until bdev->bd_disk is definitely gone */
7303                 flush_workqueue(md_misc_wq);
7304                 /* Then retry the open from the top */
7305                 return -ERESTARTSYS;
7306         }
7307         BUG_ON(mddev != bdev->bd_disk->private_data);
7308
7309         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7310                 goto out;
7311
7312         if (test_bit(MD_CLOSING, &mddev->flags)) {
7313                 mutex_unlock(&mddev->open_mutex);
7314                 err = -ENODEV;
7315                 goto out;
7316         }
7317
7318         err = 0;
7319         atomic_inc(&mddev->openers);
7320         mutex_unlock(&mddev->open_mutex);
7321
7322         check_disk_change(bdev);
7323  out:
7324         if (err)
7325                 mddev_put(mddev);
7326         return err;
7327 }
7328
7329 static void md_release(struct gendisk *disk, fmode_t mode)
7330 {
7331         struct mddev *mddev = disk->private_data;
7332
7333         BUG_ON(!mddev);
7334         atomic_dec(&mddev->openers);
7335         mddev_put(mddev);
7336 }
7337
7338 static int md_media_changed(struct gendisk *disk)
7339 {
7340         struct mddev *mddev = disk->private_data;
7341
7342         return mddev->changed;
7343 }
7344
7345 static int md_revalidate(struct gendisk *disk)
7346 {
7347         struct mddev *mddev = disk->private_data;
7348
7349         mddev->changed = 0;
7350         return 0;
7351 }
7352 static const struct block_device_operations md_fops =
7353 {
7354         .owner          = THIS_MODULE,
7355         .open           = md_open,
7356         .release        = md_release,
7357         .ioctl          = md_ioctl,
7358 #ifdef CONFIG_COMPAT
7359         .compat_ioctl   = md_compat_ioctl,
7360 #endif
7361         .getgeo         = md_getgeo,
7362         .media_changed  = md_media_changed,
7363         .revalidate_disk= md_revalidate,
7364 };
7365
7366 static int md_thread(void *arg)
7367 {
7368         struct md_thread *thread = arg;
7369
7370         /*
7371          * md_thread is a 'system-thread', it's priority should be very
7372          * high. We avoid resource deadlocks individually in each
7373          * raid personality. (RAID5 does preallocation) We also use RR and
7374          * the very same RT priority as kswapd, thus we will never get
7375          * into a priority inversion deadlock.
7376          *
7377          * we definitely have to have equal or higher priority than
7378          * bdflush, otherwise bdflush will deadlock if there are too
7379          * many dirty RAID5 blocks.
7380          */
7381
7382         allow_signal(SIGKILL);
7383         while (!kthread_should_stop()) {
7384
7385                 /* We need to wait INTERRUPTIBLE so that
7386                  * we don't add to the load-average.
7387                  * That means we need to be sure no signals are
7388                  * pending
7389                  */
7390                 if (signal_pending(current))
7391                         flush_signals(current);
7392
7393                 wait_event_interruptible_timeout
7394                         (thread->wqueue,
7395                          test_bit(THREAD_WAKEUP, &thread->flags)
7396                          || kthread_should_stop() || kthread_should_park(),
7397                          thread->timeout);
7398
7399                 clear_bit(THREAD_WAKEUP, &thread->flags);
7400                 if (kthread_should_park())
7401                         kthread_parkme();
7402                 if (!kthread_should_stop())
7403                         thread->run(thread);
7404         }
7405
7406         return 0;
7407 }
7408
7409 void md_wakeup_thread(struct md_thread *thread)
7410 {
7411         if (thread) {
7412                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7413                 if (!test_and_set_bit(THREAD_WAKEUP, &thread->flags))
7414                         wake_up(&thread->wqueue);
7415         }
7416 }
7417 EXPORT_SYMBOL(md_wakeup_thread);
7418
7419 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7420                 struct mddev *mddev, const char *name)
7421 {
7422         struct md_thread *thread;
7423
7424         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7425         if (!thread)
7426                 return NULL;
7427
7428         init_waitqueue_head(&thread->wqueue);
7429
7430         thread->run = run;
7431         thread->mddev = mddev;
7432         thread->timeout = MAX_SCHEDULE_TIMEOUT;
7433         thread->tsk = kthread_run(md_thread, thread,
7434                                   "%s_%s",
7435                                   mdname(thread->mddev),
7436                                   name);
7437         if (IS_ERR(thread->tsk)) {
7438                 kfree(thread);
7439                 return NULL;
7440         }
7441         return thread;
7442 }
7443 EXPORT_SYMBOL(md_register_thread);
7444
7445 void md_unregister_thread(struct md_thread **threadp)
7446 {
7447         struct md_thread *thread = *threadp;
7448         if (!thread)
7449                 return;
7450         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7451         /* Locking ensures that mddev_unlock does not wake_up a
7452          * non-existent thread
7453          */
7454         spin_lock(&pers_lock);
7455         *threadp = NULL;
7456         spin_unlock(&pers_lock);
7457
7458         kthread_stop(thread->tsk);
7459         kfree(thread);
7460 }
7461 EXPORT_SYMBOL(md_unregister_thread);
7462
7463 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7464 {
7465         if (!rdev || test_bit(Faulty, &rdev->flags))
7466                 return;
7467
7468         if (!mddev->pers || !mddev->pers->error_handler)
7469                 return;
7470         mddev->pers->error_handler(mddev,rdev);
7471         if (mddev->degraded)
7472                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7473         sysfs_notify_dirent_safe(rdev->sysfs_state);
7474         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7475         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7476         md_wakeup_thread(mddev->thread);
7477         if (mddev->event_work.func)
7478                 queue_work(md_misc_wq, &mddev->event_work);
7479         md_new_event(mddev);
7480 }
7481 EXPORT_SYMBOL(md_error);
7482
7483 /* seq_file implementation /proc/mdstat */
7484
7485 static void status_unused(struct seq_file *seq)
7486 {
7487         int i = 0;
7488         struct md_rdev *rdev;
7489
7490         seq_printf(seq, "unused devices: ");
7491
7492         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7493                 char b[BDEVNAME_SIZE];
7494                 i++;
7495                 seq_printf(seq, "%s ",
7496                               bdevname(rdev->bdev,b));
7497         }
7498         if (!i)
7499                 seq_printf(seq, "<none>");
7500
7501         seq_printf(seq, "\n");
7502 }
7503
7504 static int status_resync(struct seq_file *seq, struct mddev *mddev)
7505 {
7506         sector_t max_sectors, resync, res;
7507         unsigned long dt, db;
7508         sector_t rt;
7509         int scale;
7510         unsigned int per_milli;
7511
7512         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7513             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7514                 max_sectors = mddev->resync_max_sectors;
7515         else
7516                 max_sectors = mddev->dev_sectors;
7517
7518         resync = mddev->curr_resync;
7519         if (resync <= 3) {
7520                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7521                         /* Still cleaning up */
7522                         resync = max_sectors;
7523         } else
7524                 resync -= atomic_read(&mddev->recovery_active);
7525
7526         if (resync == 0) {
7527                 if (mddev->recovery_cp < MaxSector) {
7528                         seq_printf(seq, "\tresync=PENDING");
7529                         return 1;
7530                 }
7531                 return 0;
7532         }
7533         if (resync < 3) {
7534                 seq_printf(seq, "\tresync=DELAYED");
7535                 return 1;
7536         }
7537
7538         WARN_ON(max_sectors == 0);
7539         /* Pick 'scale' such that (resync>>scale)*1000 will fit
7540          * in a sector_t, and (max_sectors>>scale) will fit in a
7541          * u32, as those are the requirements for sector_div.
7542          * Thus 'scale' must be at least 10
7543          */
7544         scale = 10;
7545         if (sizeof(sector_t) > sizeof(unsigned long)) {
7546                 while ( max_sectors/2 > (1ULL<<(scale+32)))
7547                         scale++;
7548         }
7549         res = (resync>>scale)*1000;
7550         sector_div(res, (u32)((max_sectors>>scale)+1));
7551
7552         per_milli = res;
7553         {
7554                 int i, x = per_milli/50, y = 20-x;
7555                 seq_printf(seq, "[");
7556                 for (i = 0; i < x; i++)
7557                         seq_printf(seq, "=");
7558                 seq_printf(seq, ">");
7559                 for (i = 0; i < y; i++)
7560                         seq_printf(seq, ".");
7561                 seq_printf(seq, "] ");
7562         }
7563         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
7564                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
7565                     "reshape" :
7566                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
7567                      "check" :
7568                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
7569                       "resync" : "recovery"))),
7570                    per_milli/10, per_milli % 10,
7571                    (unsigned long long) resync/2,
7572                    (unsigned long long) max_sectors/2);
7573
7574         /*
7575          * dt: time from mark until now
7576          * db: blocks written from mark until now
7577          * rt: remaining time
7578          *
7579          * rt is a sector_t, so could be 32bit or 64bit.
7580          * So we divide before multiply in case it is 32bit and close
7581          * to the limit.
7582          * We scale the divisor (db) by 32 to avoid losing precision
7583          * near the end of resync when the number of remaining sectors
7584          * is close to 'db'.
7585          * We then divide rt by 32 after multiplying by db to compensate.
7586          * The '+1' avoids division by zero if db is very small.
7587          */
7588         dt = ((jiffies - mddev->resync_mark) / HZ);
7589         if (!dt) dt++;
7590         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
7591                 - mddev->resync_mark_cnt;
7592
7593         rt = max_sectors - resync;    /* number of remaining sectors */
7594         sector_div(rt, db/32+1);
7595         rt *= dt;
7596         rt >>= 5;
7597
7598         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7599                    ((unsigned long)rt % 60)/6);
7600
7601         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7602         return 1;
7603 }
7604
7605 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7606 {
7607         struct list_head *tmp;
7608         loff_t l = *pos;
7609         struct mddev *mddev;
7610
7611         if (l >= 0x10000)
7612                 return NULL;
7613         if (!l--)
7614                 /* header */
7615                 return (void*)1;
7616
7617         spin_lock(&all_mddevs_lock);
7618         list_for_each(tmp,&all_mddevs)
7619                 if (!l--) {
7620                         mddev = list_entry(tmp, struct mddev, all_mddevs);
7621                         mddev_get(mddev);
7622                         spin_unlock(&all_mddevs_lock);
7623                         return mddev;
7624                 }
7625         spin_unlock(&all_mddevs_lock);
7626         if (!l--)
7627                 return (void*)2;/* tail */
7628         return NULL;
7629 }
7630
7631 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7632 {
7633         struct list_head *tmp;
7634         struct mddev *next_mddev, *mddev = v;
7635
7636         ++*pos;
7637         if (v == (void*)2)
7638                 return NULL;
7639
7640         spin_lock(&all_mddevs_lock);
7641         if (v == (void*)1)
7642                 tmp = all_mddevs.next;
7643         else
7644                 tmp = mddev->all_mddevs.next;
7645         if (tmp != &all_mddevs)
7646                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7647         else {
7648                 next_mddev = (void*)2;
7649                 *pos = 0x10000;
7650         }
7651         spin_unlock(&all_mddevs_lock);
7652
7653         if (v != (void*)1)
7654                 mddev_put(mddev);
7655         return next_mddev;
7656
7657 }
7658
7659 static void md_seq_stop(struct seq_file *seq, void *v)
7660 {
7661         struct mddev *mddev = v;
7662
7663         if (mddev && v != (void*)1 && v != (void*)2)
7664                 mddev_put(mddev);
7665 }
7666
7667 static int md_seq_show(struct seq_file *seq, void *v)
7668 {
7669         struct mddev *mddev = v;
7670         sector_t sectors;
7671         struct md_rdev *rdev;
7672
7673         if (v == (void*)1) {
7674                 struct md_personality *pers;
7675                 seq_printf(seq, "Personalities : ");
7676                 spin_lock(&pers_lock);
7677                 list_for_each_entry(pers, &pers_list, list)
7678                         seq_printf(seq, "[%s] ", pers->name);
7679
7680                 spin_unlock(&pers_lock);
7681                 seq_printf(seq, "\n");
7682                 seq->poll_event = atomic_read(&md_event_count);
7683                 return 0;
7684         }
7685         if (v == (void*)2) {
7686                 status_unused(seq);
7687                 return 0;
7688         }
7689
7690         spin_lock(&mddev->lock);
7691         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7692                 seq_printf(seq, "%s : %sactive", mdname(mddev),
7693                                                 mddev->pers ? "" : "in");
7694                 if (mddev->pers) {
7695                         if (mddev->ro==1)
7696                                 seq_printf(seq, " (read-only)");
7697                         if (mddev->ro==2)
7698                                 seq_printf(seq, " (auto-read-only)");
7699                         seq_printf(seq, " %s", mddev->pers->name);
7700                 }
7701
7702                 sectors = 0;
7703                 rcu_read_lock();
7704                 rdev_for_each_rcu(rdev, mddev) {
7705                         char b[BDEVNAME_SIZE];
7706                         seq_printf(seq, " %s[%d]",
7707                                 bdevname(rdev->bdev,b), rdev->desc_nr);
7708                         if (test_bit(WriteMostly, &rdev->flags))
7709                                 seq_printf(seq, "(W)");
7710                         if (test_bit(Journal, &rdev->flags))
7711                                 seq_printf(seq, "(J)");
7712                         if (test_bit(Faulty, &rdev->flags)) {
7713                                 seq_printf(seq, "(F)");
7714                                 continue;
7715                         }
7716                         if (rdev->raid_disk < 0)
7717                                 seq_printf(seq, "(S)"); /* spare */
7718                         if (test_bit(Replacement, &rdev->flags))
7719                                 seq_printf(seq, "(R)");
7720                         sectors += rdev->sectors;
7721                 }
7722                 rcu_read_unlock();
7723
7724                 if (!list_empty(&mddev->disks)) {
7725                         if (mddev->pers)
7726                                 seq_printf(seq, "\n      %llu blocks",
7727                                            (unsigned long long)
7728                                            mddev->array_sectors / 2);
7729                         else
7730                                 seq_printf(seq, "\n      %llu blocks",
7731                                            (unsigned long long)sectors / 2);
7732                 }
7733                 if (mddev->persistent) {
7734                         if (mddev->major_version != 0 ||
7735                             mddev->minor_version != 90) {
7736                                 seq_printf(seq," super %d.%d",
7737                                            mddev->major_version,
7738                                            mddev->minor_version);
7739                         }
7740                 } else if (mddev->external)
7741                         seq_printf(seq, " super external:%s",
7742                                    mddev->metadata_type);
7743                 else
7744                         seq_printf(seq, " super non-persistent");
7745
7746                 if (mddev->pers) {
7747                         mddev->pers->status(seq, mddev);
7748                         seq_printf(seq, "\n      ");
7749                         if (mddev->pers->sync_request) {
7750                                 if (status_resync(seq, mddev))
7751                                         seq_printf(seq, "\n      ");
7752                         }
7753                 } else
7754                         seq_printf(seq, "\n       ");
7755
7756                 bitmap_status(seq, mddev->bitmap);
7757
7758                 seq_printf(seq, "\n");
7759         }
7760         spin_unlock(&mddev->lock);
7761
7762         return 0;
7763 }
7764
7765 static const struct seq_operations md_seq_ops = {
7766         .start  = md_seq_start,
7767         .next   = md_seq_next,
7768         .stop   = md_seq_stop,
7769         .show   = md_seq_show,
7770 };
7771
7772 static int md_seq_open(struct inode *inode, struct file *file)
7773 {
7774         struct seq_file *seq;
7775         int error;
7776
7777         error = seq_open(file, &md_seq_ops);
7778         if (error)
7779                 return error;
7780
7781         seq = file->private_data;
7782         seq->poll_event = atomic_read(&md_event_count);
7783         return error;
7784 }
7785
7786 static int md_unloading;
7787 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7788 {
7789         struct seq_file *seq = filp->private_data;
7790         int mask;
7791
7792         if (md_unloading)
7793                 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7794         poll_wait(filp, &md_event_waiters, wait);
7795
7796         /* always allow read */
7797         mask = POLLIN | POLLRDNORM;
7798
7799         if (seq->poll_event != atomic_read(&md_event_count))
7800                 mask |= POLLERR | POLLPRI;
7801         return mask;
7802 }
7803
7804 static const struct file_operations md_seq_fops = {
7805         .owner          = THIS_MODULE,
7806         .open           = md_seq_open,
7807         .read           = seq_read,
7808         .llseek         = seq_lseek,
7809         .release        = seq_release_private,
7810         .poll           = mdstat_poll,
7811 };
7812
7813 int register_md_personality(struct md_personality *p)
7814 {
7815         pr_debug("md: %s personality registered for level %d\n",
7816                  p->name, p->level);
7817         spin_lock(&pers_lock);
7818         list_add_tail(&p->list, &pers_list);
7819         spin_unlock(&pers_lock);
7820         return 0;
7821 }
7822 EXPORT_SYMBOL(register_md_personality);
7823
7824 int unregister_md_personality(struct md_personality *p)
7825 {
7826         pr_debug("md: %s personality unregistered\n", p->name);
7827         spin_lock(&pers_lock);
7828         list_del_init(&p->list);
7829         spin_unlock(&pers_lock);
7830         return 0;
7831 }
7832 EXPORT_SYMBOL(unregister_md_personality);
7833
7834 int register_md_cluster_operations(struct md_cluster_operations *ops,
7835                                    struct module *module)
7836 {
7837         int ret = 0;
7838         spin_lock(&pers_lock);
7839         if (md_cluster_ops != NULL)
7840                 ret = -EALREADY;
7841         else {
7842                 md_cluster_ops = ops;
7843                 md_cluster_mod = module;
7844         }
7845         spin_unlock(&pers_lock);
7846         return ret;
7847 }
7848 EXPORT_SYMBOL(register_md_cluster_operations);
7849
7850 int unregister_md_cluster_operations(void)
7851 {
7852         spin_lock(&pers_lock);
7853         md_cluster_ops = NULL;
7854         spin_unlock(&pers_lock);
7855         return 0;
7856 }
7857 EXPORT_SYMBOL(unregister_md_cluster_operations);
7858
7859 int md_setup_cluster(struct mddev *mddev, int nodes)
7860 {
7861         if (!md_cluster_ops)
7862                 request_module("md-cluster");
7863         spin_lock(&pers_lock);
7864         /* ensure module won't be unloaded */
7865         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
7866                 pr_warn("can't find md-cluster module or get it's reference.\n");
7867                 spin_unlock(&pers_lock);
7868                 return -ENOENT;
7869         }
7870         spin_unlock(&pers_lock);
7871
7872         return md_cluster_ops->join(mddev, nodes);
7873 }
7874
7875 void md_cluster_stop(struct mddev *mddev)
7876 {
7877         if (!md_cluster_ops)
7878                 return;
7879         md_cluster_ops->leave(mddev);
7880         module_put(md_cluster_mod);
7881 }
7882
7883 static int is_mddev_idle(struct mddev *mddev, int init)
7884 {
7885         struct md_rdev *rdev;
7886         int idle;
7887         int curr_events;
7888
7889         idle = 1;
7890         rcu_read_lock();
7891         rdev_for_each_rcu(rdev, mddev) {
7892                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7893                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7894                               (int)part_stat_read(&disk->part0, sectors[1]) -
7895                               atomic_read(&disk->sync_io);
7896                 /* sync IO will cause sync_io to increase before the disk_stats
7897                  * as sync_io is counted when a request starts, and
7898                  * disk_stats is counted when it completes.
7899                  * So resync activity will cause curr_events to be smaller than
7900                  * when there was no such activity.
7901                  * non-sync IO will cause disk_stat to increase without
7902                  * increasing sync_io so curr_events will (eventually)
7903                  * be larger than it was before.  Once it becomes
7904                  * substantially larger, the test below will cause
7905                  * the array to appear non-idle, and resync will slow
7906                  * down.
7907                  * If there is a lot of outstanding resync activity when
7908                  * we set last_event to curr_events, then all that activity
7909                  * completing might cause the array to appear non-idle
7910                  * and resync will be slowed down even though there might
7911                  * not have been non-resync activity.  This will only
7912                  * happen once though.  'last_events' will soon reflect
7913                  * the state where there is little or no outstanding
7914                  * resync requests, and further resync activity will
7915                  * always make curr_events less than last_events.
7916                  *
7917                  */
7918                 if (init || curr_events - rdev->last_events > 64) {
7919                         rdev->last_events = curr_events;
7920                         idle = 0;
7921                 }
7922         }
7923         rcu_read_unlock();
7924         return idle;
7925 }
7926
7927 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7928 {
7929         /* another "blocks" (512byte) blocks have been synced */
7930         atomic_sub(blocks, &mddev->recovery_active);
7931         wake_up(&mddev->recovery_wait);
7932         if (!ok) {
7933                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7934                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7935                 md_wakeup_thread(mddev->thread);
7936                 // stop recovery, signal do_sync ....
7937         }
7938 }
7939 EXPORT_SYMBOL(md_done_sync);
7940
7941 /* md_write_start(mddev, bi)
7942  * If we need to update some array metadata (e.g. 'active' flag
7943  * in superblock) before writing, schedule a superblock update
7944  * and wait for it to complete.
7945  */
7946 void md_write_start(struct mddev *mddev, struct bio *bi)
7947 {
7948         int did_change = 0;
7949         if (bio_data_dir(bi) != WRITE)
7950                 return;
7951
7952         BUG_ON(mddev->ro == 1);
7953         if (mddev->ro == 2) {
7954                 /* need to switch to read/write */
7955                 mddev->ro = 0;
7956                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7957                 md_wakeup_thread(mddev->thread);
7958                 md_wakeup_thread(mddev->sync_thread);
7959                 did_change = 1;
7960         }
7961         rcu_read_lock();
7962         percpu_ref_get(&mddev->writes_pending);
7963         smp_mb(); /* Match smp_mb in set_in_sync() */
7964         if (mddev->safemode == 1)
7965                 mddev->safemode = 0;
7966         /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
7967         if (mddev->in_sync || !mddev->sync_checkers) {
7968                 spin_lock(&mddev->lock);
7969                 if (mddev->in_sync) {
7970                         mddev->in_sync = 0;
7971                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
7972                         set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
7973                         md_wakeup_thread(mddev->thread);
7974                         did_change = 1;
7975                 }
7976                 spin_unlock(&mddev->lock);
7977         }
7978         rcu_read_unlock();
7979         if (did_change)
7980                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7981         wait_event(mddev->sb_wait,
7982                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7983 }
7984 EXPORT_SYMBOL(md_write_start);
7985
7986 /* md_write_inc can only be called when md_write_start() has
7987  * already been called at least once of the current request.
7988  * It increments the counter and is useful when a single request
7989  * is split into several parts.  Each part causes an increment and
7990  * so needs a matching md_write_end().
7991  * Unlike md_write_start(), it is safe to call md_write_inc() inside
7992  * a spinlocked region.
7993  */
7994 void md_write_inc(struct mddev *mddev, struct bio *bi)
7995 {
7996         if (bio_data_dir(bi) != WRITE)
7997                 return;
7998         WARN_ON_ONCE(mddev->in_sync || mddev->ro);
7999         percpu_ref_get(&mddev->writes_pending);
8000 }
8001 EXPORT_SYMBOL(md_write_inc);
8002
8003 void md_write_end(struct mddev *mddev)
8004 {
8005         percpu_ref_put(&mddev->writes_pending);
8006
8007         if (mddev->safemode == 2)
8008                 md_wakeup_thread(mddev->thread);
8009         else if (mddev->safemode_delay)
8010                 /* The roundup() ensures this only performs locking once
8011                  * every ->safemode_delay jiffies
8012                  */
8013                 mod_timer(&mddev->safemode_timer,
8014                           roundup(jiffies, mddev->safemode_delay) +
8015                           mddev->safemode_delay);
8016 }
8017
8018 EXPORT_SYMBOL(md_write_end);
8019
8020 /* md_allow_write(mddev)
8021  * Calling this ensures that the array is marked 'active' so that writes
8022  * may proceed without blocking.  It is important to call this before
8023  * attempting a GFP_KERNEL allocation while holding the mddev lock.
8024  * Must be called with mddev_lock held.
8025  *
8026  * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
8027  * is dropped, so return -EAGAIN after notifying userspace.
8028  */
8029 int md_allow_write(struct mddev *mddev)
8030 {
8031         if (!mddev->pers)
8032                 return 0;
8033         if (mddev->ro)
8034                 return 0;
8035         if (!mddev->pers->sync_request)
8036                 return 0;
8037
8038         spin_lock(&mddev->lock);
8039         if (mddev->in_sync) {
8040                 mddev->in_sync = 0;
8041                 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8042                 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8043                 if (mddev->safemode_delay &&
8044                     mddev->safemode == 0)
8045                         mddev->safemode = 1;
8046                 spin_unlock(&mddev->lock);
8047                 md_update_sb(mddev, 0);
8048                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8049         } else
8050                 spin_unlock(&mddev->lock);
8051
8052         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
8053                 return -EAGAIN;
8054         else
8055                 return 0;
8056 }
8057 EXPORT_SYMBOL_GPL(md_allow_write);
8058
8059 #define SYNC_MARKS      10
8060 #define SYNC_MARK_STEP  (3*HZ)
8061 #define UPDATE_FREQUENCY (5*60*HZ)
8062 void md_do_sync(struct md_thread *thread)
8063 {
8064         struct mddev *mddev = thread->mddev;
8065         struct mddev *mddev2;
8066         unsigned int currspeed = 0,
8067                  window;
8068         sector_t max_sectors,j, io_sectors, recovery_done;
8069         unsigned long mark[SYNC_MARKS];
8070         unsigned long update_time;
8071         sector_t mark_cnt[SYNC_MARKS];
8072         int last_mark,m;
8073         struct list_head *tmp;
8074         sector_t last_check;
8075         int skipped = 0;
8076         struct md_rdev *rdev;
8077         char *desc, *action = NULL;
8078         struct blk_plug plug;
8079         int ret;
8080
8081         /* just incase thread restarts... */
8082         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8083                 return;
8084         if (mddev->ro) {/* never try to sync a read-only array */
8085                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8086                 return;
8087         }
8088
8089         if (mddev_is_clustered(mddev)) {
8090                 ret = md_cluster_ops->resync_start(mddev);
8091                 if (ret)
8092                         goto skip;
8093
8094                 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
8095                 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8096                         test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8097                         test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8098                      && ((unsigned long long)mddev->curr_resync_completed
8099                          < (unsigned long long)mddev->resync_max_sectors))
8100                         goto skip;
8101         }
8102
8103         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8104                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
8105                         desc = "data-check";
8106                         action = "check";
8107                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8108                         desc = "requested-resync";
8109                         action = "repair";
8110                 } else
8111                         desc = "resync";
8112         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8113                 desc = "reshape";
8114         else
8115                 desc = "recovery";
8116
8117         mddev->last_sync_action = action ?: desc;
8118
8119         /* we overload curr_resync somewhat here.
8120          * 0 == not engaged in resync at all
8121          * 2 == checking that there is no conflict with another sync
8122          * 1 == like 2, but have yielded to allow conflicting resync to
8123          *              commense
8124          * other == active in resync - this many blocks
8125          *
8126          * Before starting a resync we must have set curr_resync to
8127          * 2, and then checked that every "conflicting" array has curr_resync
8128          * less than ours.  When we find one that is the same or higher
8129          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
8130          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8131          * This will mean we have to start checking from the beginning again.
8132          *
8133          */
8134
8135         do {
8136                 int mddev2_minor = -1;
8137                 mddev->curr_resync = 2;
8138
8139         try_again:
8140                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8141                         goto skip;
8142                 for_each_mddev(mddev2, tmp) {
8143                         if (mddev2 == mddev)
8144                                 continue;
8145                         if (!mddev->parallel_resync
8146                         &&  mddev2->curr_resync
8147                         &&  match_mddev_units(mddev, mddev2)) {
8148                                 DEFINE_WAIT(wq);
8149                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
8150                                         /* arbitrarily yield */
8151                                         mddev->curr_resync = 1;
8152                                         wake_up(&resync_wait);
8153                                 }
8154                                 if (mddev > mddev2 && mddev->curr_resync == 1)
8155                                         /* no need to wait here, we can wait the next
8156                                          * time 'round when curr_resync == 2
8157                                          */
8158                                         continue;
8159                                 /* We need to wait 'interruptible' so as not to
8160                                  * contribute to the load average, and not to
8161                                  * be caught by 'softlockup'
8162                                  */
8163                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
8164                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8165                                     mddev2->curr_resync >= mddev->curr_resync) {
8166                                         if (mddev2_minor != mddev2->md_minor) {
8167                                                 mddev2_minor = mddev2->md_minor;
8168                                                 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8169                                                         desc, mdname(mddev),
8170                                                         mdname(mddev2));
8171                                         }
8172                                         mddev_put(mddev2);
8173                                         if (signal_pending(current))
8174                                                 flush_signals(current);
8175                                         schedule();
8176                                         finish_wait(&resync_wait, &wq);
8177                                         goto try_again;
8178                                 }
8179                                 finish_wait(&resync_wait, &wq);
8180                         }
8181                 }
8182         } while (mddev->curr_resync < 2);
8183
8184         j = 0;
8185         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8186                 /* resync follows the size requested by the personality,
8187                  * which defaults to physical size, but can be virtual size
8188                  */
8189                 max_sectors = mddev->resync_max_sectors;
8190                 atomic64_set(&mddev->resync_mismatches, 0);
8191                 /* we don't use the checkpoint if there's a bitmap */
8192                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8193                         j = mddev->resync_min;
8194                 else if (!mddev->bitmap)
8195                         j = mddev->recovery_cp;
8196
8197         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8198                 max_sectors = mddev->resync_max_sectors;
8199         else {
8200                 /* recovery follows the physical size of devices */
8201                 max_sectors = mddev->dev_sectors;
8202                 j = MaxSector;
8203                 rcu_read_lock();
8204                 rdev_for_each_rcu(rdev, mddev)
8205                         if (rdev->raid_disk >= 0 &&
8206                             !test_bit(Journal, &rdev->flags) &&
8207                             !test_bit(Faulty, &rdev->flags) &&
8208                             !test_bit(In_sync, &rdev->flags) &&
8209                             rdev->recovery_offset < j)
8210                                 j = rdev->recovery_offset;
8211                 rcu_read_unlock();
8212
8213                 /* If there is a bitmap, we need to make sure all
8214                  * writes that started before we added a spare
8215                  * complete before we start doing a recovery.
8216                  * Otherwise the write might complete and (via
8217                  * bitmap_endwrite) set a bit in the bitmap after the
8218                  * recovery has checked that bit and skipped that
8219                  * region.
8220                  */
8221                 if (mddev->bitmap) {
8222                         mddev->pers->quiesce(mddev, 1);
8223                         mddev->pers->quiesce(mddev, 0);
8224                 }
8225         }
8226
8227         pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8228         pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
8229         pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8230                  speed_max(mddev), desc);
8231
8232         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8233
8234         io_sectors = 0;
8235         for (m = 0; m < SYNC_MARKS; m++) {
8236                 mark[m] = jiffies;
8237                 mark_cnt[m] = io_sectors;
8238         }
8239         last_mark = 0;
8240         mddev->resync_mark = mark[last_mark];
8241         mddev->resync_mark_cnt = mark_cnt[last_mark];
8242
8243         /*
8244          * Tune reconstruction:
8245          */
8246         window = 32*(PAGE_SIZE/512);
8247         pr_debug("md: using %dk window, over a total of %lluk.\n",
8248                  window/2, (unsigned long long)max_sectors/2);
8249
8250         atomic_set(&mddev->recovery_active, 0);
8251         last_check = 0;
8252
8253         if (j>2) {
8254                 pr_debug("md: resuming %s of %s from checkpoint.\n",
8255                          desc, mdname(mddev));
8256                 mddev->curr_resync = j;
8257         } else
8258                 mddev->curr_resync = 3; /* no longer delayed */
8259         mddev->curr_resync_completed = j;
8260         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8261         md_new_event(mddev);
8262         update_time = jiffies;
8263
8264         blk_start_plug(&plug);
8265         while (j < max_sectors) {
8266                 sector_t sectors;
8267
8268                 skipped = 0;
8269
8270                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8271                     ((mddev->curr_resync > mddev->curr_resync_completed &&
8272                       (mddev->curr_resync - mddev->curr_resync_completed)
8273                       > (max_sectors >> 4)) ||
8274                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
8275                      (j - mddev->curr_resync_completed)*2
8276                      >= mddev->resync_max - mddev->curr_resync_completed ||
8277                      mddev->curr_resync_completed > mddev->resync_max
8278                             )) {
8279                         /* time to update curr_resync_completed */
8280                         wait_event(mddev->recovery_wait,
8281                                    atomic_read(&mddev->recovery_active) == 0);
8282                         mddev->curr_resync_completed = j;
8283                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8284                             j > mddev->recovery_cp)
8285                                 mddev->recovery_cp = j;
8286                         update_time = jiffies;
8287                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8288                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8289                 }
8290
8291                 while (j >= mddev->resync_max &&
8292                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8293                         /* As this condition is controlled by user-space,
8294                          * we can block indefinitely, so use '_interruptible'
8295                          * to avoid triggering warnings.
8296                          */
8297                         flush_signals(current); /* just in case */
8298                         wait_event_interruptible(mddev->recovery_wait,
8299                                                  mddev->resync_max > j
8300                                                  || test_bit(MD_RECOVERY_INTR,
8301                                                              &mddev->recovery));
8302                 }
8303
8304                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8305                         break;
8306
8307                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8308                 if (sectors == 0) {
8309                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8310                         break;
8311                 }
8312
8313                 if (!skipped) { /* actual IO requested */
8314                         io_sectors += sectors;
8315                         atomic_add(sectors, &mddev->recovery_active);
8316                 }
8317
8318                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8319                         break;
8320
8321                 j += sectors;
8322                 if (j > max_sectors)
8323                         /* when skipping, extra large numbers can be returned. */
8324                         j = max_sectors;
8325                 if (j > 2)
8326                         mddev->curr_resync = j;
8327                 mddev->curr_mark_cnt = io_sectors;
8328                 if (last_check == 0)
8329                         /* this is the earliest that rebuild will be
8330                          * visible in /proc/mdstat
8331                          */
8332                         md_new_event(mddev);
8333
8334                 if (last_check + window > io_sectors || j == max_sectors)
8335                         continue;
8336
8337                 last_check = io_sectors;
8338         repeat:
8339                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8340                         /* step marks */
8341                         int next = (last_mark+1) % SYNC_MARKS;
8342
8343                         mddev->resync_mark = mark[next];
8344                         mddev->resync_mark_cnt = mark_cnt[next];
8345                         mark[next] = jiffies;
8346                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8347                         last_mark = next;
8348                 }
8349
8350                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8351                         break;
8352
8353                 /*
8354                  * this loop exits only if either when we are slower than
8355                  * the 'hard' speed limit, or the system was IO-idle for
8356                  * a jiffy.
8357                  * the system might be non-idle CPU-wise, but we only care
8358                  * about not overloading the IO subsystem. (things like an
8359                  * e2fsck being done on the RAID array should execute fast)
8360                  */
8361                 cond_resched();
8362
8363                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8364                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8365                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
8366
8367                 if (currspeed > speed_min(mddev)) {
8368                         if (currspeed > speed_max(mddev)) {
8369                                 msleep(500);
8370                                 goto repeat;
8371                         }
8372                         if (!is_mddev_idle(mddev, 0)) {
8373                                 /*
8374                                  * Give other IO more of a chance.
8375                                  * The faster the devices, the less we wait.
8376                                  */
8377                                 wait_event(mddev->recovery_wait,
8378                                            !atomic_read(&mddev->recovery_active));
8379                         }
8380                 }
8381         }
8382         pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8383                 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8384                 ? "interrupted" : "done");
8385         /*
8386          * this also signals 'finished resyncing' to md_stop
8387          */
8388         blk_finish_plug(&plug);
8389         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8390
8391         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8392             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8393             mddev->curr_resync > 3) {
8394                 mddev->curr_resync_completed = mddev->curr_resync;
8395                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8396         }
8397         mddev->pers->sync_request(mddev, max_sectors, &skipped);
8398
8399         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8400             mddev->curr_resync > 3) {
8401                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8402                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8403                                 if (mddev->curr_resync >= mddev->recovery_cp) {
8404                                         pr_debug("md: checkpointing %s of %s.\n",
8405                                                  desc, mdname(mddev));
8406                                         if (test_bit(MD_RECOVERY_ERROR,
8407                                                 &mddev->recovery))
8408                                                 mddev->recovery_cp =
8409                                                         mddev->curr_resync_completed;
8410                                         else
8411                                                 mddev->recovery_cp =
8412                                                         mddev->curr_resync;
8413                                 }
8414                         } else
8415                                 mddev->recovery_cp = MaxSector;
8416                 } else {
8417                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8418                                 mddev->curr_resync = MaxSector;
8419                         rcu_read_lock();
8420                         rdev_for_each_rcu(rdev, mddev)
8421                                 if (rdev->raid_disk >= 0 &&
8422                                     mddev->delta_disks >= 0 &&
8423                                     !test_bit(Journal, &rdev->flags) &&
8424                                     !test_bit(Faulty, &rdev->flags) &&
8425                                     !test_bit(In_sync, &rdev->flags) &&
8426                                     rdev->recovery_offset < mddev->curr_resync)
8427                                         rdev->recovery_offset = mddev->curr_resync;
8428                         rcu_read_unlock();
8429                 }
8430         }
8431  skip:
8432         /* set CHANGE_PENDING here since maybe another update is needed,
8433          * so other nodes are informed. It should be harmless for normal
8434          * raid */
8435         set_mask_bits(&mddev->sb_flags, 0,
8436                       BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
8437
8438         spin_lock(&mddev->lock);
8439         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8440                 /* We completed so min/max setting can be forgotten if used. */
8441                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8442                         mddev->resync_min = 0;
8443                 mddev->resync_max = MaxSector;
8444         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8445                 mddev->resync_min = mddev->curr_resync_completed;
8446         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8447         mddev->curr_resync = 0;
8448         spin_unlock(&mddev->lock);
8449
8450         wake_up(&resync_wait);
8451         md_wakeup_thread(mddev->thread);
8452         return;
8453 }
8454 EXPORT_SYMBOL_GPL(md_do_sync);
8455
8456 static int remove_and_add_spares(struct mddev *mddev,
8457                                  struct md_rdev *this)
8458 {
8459         struct md_rdev *rdev;
8460         int spares = 0;
8461         int removed = 0;
8462         bool remove_some = false;
8463
8464         rdev_for_each(rdev, mddev) {
8465                 if ((this == NULL || rdev == this) &&
8466                     rdev->raid_disk >= 0 &&
8467                     !test_bit(Blocked, &rdev->flags) &&
8468                     test_bit(Faulty, &rdev->flags) &&
8469                     atomic_read(&rdev->nr_pending)==0) {
8470                         /* Faulty non-Blocked devices with nr_pending == 0
8471                          * never get nr_pending incremented,
8472                          * never get Faulty cleared, and never get Blocked set.
8473                          * So we can synchronize_rcu now rather than once per device
8474                          */
8475                         remove_some = true;
8476                         set_bit(RemoveSynchronized, &rdev->flags);
8477                 }
8478         }
8479
8480         if (remove_some)
8481                 synchronize_rcu();
8482         rdev_for_each(rdev, mddev) {
8483                 if ((this == NULL || rdev == this) &&
8484                     rdev->raid_disk >= 0 &&
8485                     !test_bit(Blocked, &rdev->flags) &&
8486                     ((test_bit(RemoveSynchronized, &rdev->flags) ||
8487                      (!test_bit(In_sync, &rdev->flags) &&
8488                       !test_bit(Journal, &rdev->flags))) &&
8489                     atomic_read(&rdev->nr_pending)==0)) {
8490                         if (mddev->pers->hot_remove_disk(
8491                                     mddev, rdev) == 0) {
8492                                 sysfs_unlink_rdev(mddev, rdev);
8493                                 rdev->raid_disk = -1;
8494                                 removed++;
8495                         }
8496                 }
8497                 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
8498                         clear_bit(RemoveSynchronized, &rdev->flags);
8499         }
8500
8501         if (removed && mddev->kobj.sd)
8502                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8503
8504         if (this && removed)
8505                 goto no_add;
8506
8507         rdev_for_each(rdev, mddev) {
8508                 if (this && this != rdev)
8509                         continue;
8510                 if (test_bit(Candidate, &rdev->flags))
8511                         continue;
8512                 if (rdev->raid_disk >= 0 &&
8513                     !test_bit(In_sync, &rdev->flags) &&
8514                     !test_bit(Journal, &rdev->flags) &&
8515                     !test_bit(Faulty, &rdev->flags))
8516                         spares++;
8517                 if (rdev->raid_disk >= 0)
8518                         continue;
8519                 if (test_bit(Faulty, &rdev->flags))
8520                         continue;
8521                 if (!test_bit(Journal, &rdev->flags)) {
8522                         if (mddev->ro &&
8523                             ! (rdev->saved_raid_disk >= 0 &&
8524                                !test_bit(Bitmap_sync, &rdev->flags)))
8525                                 continue;
8526
8527                         rdev->recovery_offset = 0;
8528                 }
8529                 if (mddev->pers->
8530                     hot_add_disk(mddev, rdev) == 0) {
8531                         if (sysfs_link_rdev(mddev, rdev))
8532                                 /* failure here is OK */;
8533                         if (!test_bit(Journal, &rdev->flags))
8534                                 spares++;
8535                         md_new_event(mddev);
8536                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8537                 }
8538         }
8539 no_add:
8540         if (removed)
8541                 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8542         return spares;
8543 }
8544
8545 static void md_start_sync(struct work_struct *ws)
8546 {
8547         struct mddev *mddev = container_of(ws, struct mddev, del_work);
8548
8549         mddev->sync_thread = md_register_thread(md_do_sync,
8550                                                 mddev,
8551                                                 "resync");
8552         if (!mddev->sync_thread) {
8553                 pr_warn("%s: could not start resync thread...\n",
8554                         mdname(mddev));
8555                 /* leave the spares where they are, it shouldn't hurt */
8556                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8557                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8558                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8559                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8560                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8561                 wake_up(&resync_wait);
8562                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8563                                        &mddev->recovery))
8564                         if (mddev->sysfs_action)
8565                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
8566         } else
8567                 md_wakeup_thread(mddev->sync_thread);
8568         sysfs_notify_dirent_safe(mddev->sysfs_action);
8569         md_new_event(mddev);
8570 }
8571
8572 /*
8573  * This routine is regularly called by all per-raid-array threads to
8574  * deal with generic issues like resync and super-block update.
8575  * Raid personalities that don't have a thread (linear/raid0) do not
8576  * need this as they never do any recovery or update the superblock.
8577  *
8578  * It does not do any resync itself, but rather "forks" off other threads
8579  * to do that as needed.
8580  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
8581  * "->recovery" and create a thread at ->sync_thread.
8582  * When the thread finishes it sets MD_RECOVERY_DONE
8583  * and wakeups up this thread which will reap the thread and finish up.
8584  * This thread also removes any faulty devices (with nr_pending == 0).
8585  *
8586  * The overall approach is:
8587  *  1/ if the superblock needs updating, update it.
8588  *  2/ If a recovery thread is running, don't do anything else.
8589  *  3/ If recovery has finished, clean up, possibly marking spares active.
8590  *  4/ If there are any faulty devices, remove them.
8591  *  5/ If array is degraded, try to add spares devices
8592  *  6/ If array has spares or is not in-sync, start a resync thread.
8593  */
8594 void md_check_recovery(struct mddev *mddev)
8595 {
8596         if (mddev->suspended)
8597                 return;
8598
8599         if (mddev->bitmap)
8600                 bitmap_daemon_work(mddev);
8601
8602         if (signal_pending(current)) {
8603                 if (mddev->pers->sync_request && !mddev->external) {
8604                         pr_debug("md: %s in immediate safe mode\n",
8605                                  mdname(mddev));
8606                         mddev->safemode = 2;
8607                 }
8608                 flush_signals(current);
8609         }
8610
8611         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
8612                 return;
8613         if ( ! (
8614                 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
8615                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8616                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8617                 (mddev->external == 0 && mddev->safemode == 1) ||
8618                 (mddev->safemode == 2
8619                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
8620                 ))
8621                 return;
8622
8623         if (mddev_trylock(mddev)) {
8624                 int spares = 0;
8625
8626                 if (mddev->ro) {
8627                         struct md_rdev *rdev;
8628                         if (!mddev->external && mddev->in_sync)
8629                                 /* 'Blocked' flag not needed as failed devices
8630                                  * will be recorded if array switched to read/write.
8631                                  * Leaving it set will prevent the device
8632                                  * from being removed.
8633                                  */
8634                                 rdev_for_each(rdev, mddev)
8635                                         clear_bit(Blocked, &rdev->flags);
8636                         /* On a read-only array we can:
8637                          * - remove failed devices
8638                          * - add already-in_sync devices if the array itself
8639                          *   is in-sync.
8640                          * As we only add devices that are already in-sync,
8641                          * we can activate the spares immediately.
8642                          */
8643                         remove_and_add_spares(mddev, NULL);
8644                         /* There is no thread, but we need to call
8645                          * ->spare_active and clear saved_raid_disk
8646                          */
8647                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8648                         md_reap_sync_thread(mddev);
8649                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8650                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8651                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8652                         goto unlock;
8653                 }
8654
8655                 if (mddev_is_clustered(mddev)) {
8656                         struct md_rdev *rdev;
8657                         /* kick the device if another node issued a
8658                          * remove disk.
8659                          */
8660                         rdev_for_each(rdev, mddev) {
8661                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
8662                                                 rdev->raid_disk < 0)
8663                                         md_kick_rdev_from_array(rdev);
8664                         }
8665                 }
8666
8667                 if (!mddev->external && !mddev->in_sync) {
8668                         spin_lock(&mddev->lock);
8669                         set_in_sync(mddev);
8670                         spin_unlock(&mddev->lock);
8671                 }
8672
8673                 if (mddev->sb_flags)
8674                         md_update_sb(mddev, 0);
8675
8676                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
8677                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
8678                         /* resync/recovery still happening */
8679                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8680                         goto unlock;
8681                 }
8682                 if (mddev->sync_thread) {
8683                         md_reap_sync_thread(mddev);
8684                         goto unlock;
8685                 }
8686                 /* Set RUNNING before clearing NEEDED to avoid
8687                  * any transients in the value of "sync_action".
8688                  */
8689                 mddev->curr_resync_completed = 0;
8690                 spin_lock(&mddev->lock);
8691                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8692                 spin_unlock(&mddev->lock);
8693                 /* Clear some bits that don't mean anything, but
8694                  * might be left set
8695                  */
8696                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
8697                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8698
8699                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8700                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
8701                         goto not_running;
8702                 /* no recovery is running.
8703                  * remove any failed drives, then
8704                  * add spares if possible.
8705                  * Spares are also removed and re-added, to allow
8706                  * the personality to fail the re-add.
8707                  */
8708
8709                 if (mddev->reshape_position != MaxSector) {
8710                         if (mddev->pers->check_reshape == NULL ||
8711                             mddev->pers->check_reshape(mddev) != 0)
8712                                 /* Cannot proceed */
8713                                 goto not_running;
8714                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8715                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8716                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
8717                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8718                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8719                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8720                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8721                 } else if (mddev->recovery_cp < MaxSector) {
8722                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8723                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8724                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
8725                         /* nothing to be done ... */
8726                         goto not_running;
8727
8728                 if (mddev->pers->sync_request) {
8729                         if (spares) {
8730                                 /* We are adding a device or devices to an array
8731                                  * which has the bitmap stored on all devices.
8732                                  * So make sure all bitmap pages get written
8733                                  */
8734                                 bitmap_write_all(mddev->bitmap);
8735                         }
8736                         INIT_WORK(&mddev->del_work, md_start_sync);
8737                         queue_work(md_misc_wq, &mddev->del_work);
8738                         goto unlock;
8739                 }
8740         not_running:
8741                 if (!mddev->sync_thread) {
8742                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8743                         wake_up(&resync_wait);
8744                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8745                                                &mddev->recovery))
8746                                 if (mddev->sysfs_action)
8747                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
8748                 }
8749         unlock:
8750                 wake_up(&mddev->sb_wait);
8751                 mddev_unlock(mddev);
8752         }
8753 }
8754 EXPORT_SYMBOL(md_check_recovery);
8755
8756 void md_reap_sync_thread(struct mddev *mddev)
8757 {
8758         struct md_rdev *rdev;
8759
8760         /* resync has finished, collect result */
8761         md_unregister_thread(&mddev->sync_thread);
8762         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8763             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8764                 /* success...*/
8765                 /* activate any spares */
8766                 if (mddev->pers->spare_active(mddev)) {
8767                         sysfs_notify(&mddev->kobj, NULL,
8768                                      "degraded");
8769                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8770                 }
8771         }
8772         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8773             mddev->pers->finish_reshape)
8774                 mddev->pers->finish_reshape(mddev);
8775
8776         /* If array is no-longer degraded, then any saved_raid_disk
8777          * information must be scrapped.
8778          */
8779         if (!mddev->degraded)
8780                 rdev_for_each(rdev, mddev)
8781                         rdev->saved_raid_disk = -1;
8782
8783         md_update_sb(mddev, 1);
8784         /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
8785          * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
8786          * clustered raid */
8787         if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
8788                 md_cluster_ops->resync_finish(mddev);
8789         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8790         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8791         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8792         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8793         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8794         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8795         wake_up(&resync_wait);
8796         /* flag recovery needed just to double check */
8797         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8798         sysfs_notify_dirent_safe(mddev->sysfs_action);
8799         md_new_event(mddev);
8800         if (mddev->event_work.func)
8801                 queue_work(md_misc_wq, &mddev->event_work);
8802 }
8803 EXPORT_SYMBOL(md_reap_sync_thread);
8804
8805 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8806 {
8807         sysfs_notify_dirent_safe(rdev->sysfs_state);
8808         wait_event_timeout(rdev->blocked_wait,
8809                            !test_bit(Blocked, &rdev->flags) &&
8810                            !test_bit(BlockedBadBlocks, &rdev->flags),
8811                            msecs_to_jiffies(5000));
8812         rdev_dec_pending(rdev, mddev);
8813 }
8814 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8815
8816 void md_finish_reshape(struct mddev *mddev)
8817 {
8818         /* called be personality module when reshape completes. */
8819         struct md_rdev *rdev;
8820
8821         rdev_for_each(rdev, mddev) {
8822                 if (rdev->data_offset > rdev->new_data_offset)
8823                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8824                 else
8825                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8826                 rdev->data_offset = rdev->new_data_offset;
8827         }
8828 }
8829 EXPORT_SYMBOL(md_finish_reshape);
8830
8831 /* Bad block management */
8832
8833 /* Returns 1 on success, 0 on failure */
8834 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8835                        int is_new)
8836 {
8837         struct mddev *mddev = rdev->mddev;
8838         int rv;
8839         if (is_new)
8840                 s += rdev->new_data_offset;
8841         else
8842                 s += rdev->data_offset;
8843         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
8844         if (rv == 0) {
8845                 /* Make sure they get written out promptly */
8846                 if (test_bit(ExternalBbl, &rdev->flags))
8847                         sysfs_notify(&rdev->kobj, NULL,
8848                                      "unacknowledged_bad_blocks");
8849                 sysfs_notify_dirent_safe(rdev->sysfs_state);
8850                 set_mask_bits(&mddev->sb_flags, 0,
8851                               BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
8852                 md_wakeup_thread(rdev->mddev->thread);
8853                 return 1;
8854         } else
8855                 return 0;
8856 }
8857 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8858
8859 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8860                          int is_new)
8861 {
8862         int rv;
8863         if (is_new)
8864                 s += rdev->new_data_offset;
8865         else
8866                 s += rdev->data_offset;
8867         rv = badblocks_clear(&rdev->badblocks, s, sectors);
8868         if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
8869                 sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
8870         return rv;
8871 }
8872 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8873
8874 static int md_notify_reboot(struct notifier_block *this,
8875                             unsigned long code, void *x)
8876 {
8877         struct list_head *tmp;
8878         struct mddev *mddev;
8879         int need_delay = 0;
8880
8881         for_each_mddev(mddev, tmp) {
8882                 if (mddev_trylock(mddev)) {
8883                         if (mddev->pers)
8884                                 __md_stop_writes(mddev);
8885                         if (mddev->persistent)
8886                                 mddev->safemode = 2;
8887                         mddev_unlock(mddev);
8888                 }
8889                 need_delay = 1;
8890         }
8891         /*
8892          * certain more exotic SCSI devices are known to be
8893          * volatile wrt too early system reboots. While the
8894          * right place to handle this issue is the given
8895          * driver, we do want to have a safe RAID driver ...
8896          */
8897         if (need_delay)
8898                 mdelay(1000*1);
8899
8900         return NOTIFY_DONE;
8901 }
8902
8903 static struct notifier_block md_notifier = {
8904         .notifier_call  = md_notify_reboot,
8905         .next           = NULL,
8906         .priority       = INT_MAX, /* before any real devices */
8907 };
8908
8909 static void md_geninit(void)
8910 {
8911         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8912
8913         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8914 }
8915
8916 static int __init md_init(void)
8917 {
8918         int ret = -ENOMEM;
8919
8920         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8921         if (!md_wq)
8922                 goto err_wq;
8923
8924         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8925         if (!md_misc_wq)
8926                 goto err_misc_wq;
8927
8928         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8929                 goto err_md;
8930
8931         if ((ret = register_blkdev(0, "mdp")) < 0)
8932                 goto err_mdp;
8933         mdp_major = ret;
8934
8935         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8936                             md_probe, NULL, NULL);
8937         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8938                             md_probe, NULL, NULL);
8939
8940         register_reboot_notifier(&md_notifier);
8941         raid_table_header = register_sysctl_table(raid_root_table);
8942
8943         md_geninit();
8944         return 0;
8945
8946 err_mdp:
8947         unregister_blkdev(MD_MAJOR, "md");
8948 err_md:
8949         destroy_workqueue(md_misc_wq);
8950 err_misc_wq:
8951         destroy_workqueue(md_wq);
8952 err_wq:
8953         return ret;
8954 }
8955
8956 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
8957 {
8958         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
8959         struct md_rdev *rdev2;
8960         int role, ret;
8961         char b[BDEVNAME_SIZE];
8962
8963         /*
8964          * If size is changed in another node then we need to
8965          * do resize as well.
8966          */
8967         if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
8968                 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
8969                 if (ret)
8970                         pr_info("md-cluster: resize failed\n");
8971                 else
8972                         bitmap_update_sb(mddev->bitmap);
8973         }
8974
8975         /* Check for change of roles in the active devices */
8976         rdev_for_each(rdev2, mddev) {
8977                 if (test_bit(Faulty, &rdev2->flags))
8978                         continue;
8979
8980                 /* Check if the roles changed */
8981                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
8982
8983                 if (test_bit(Candidate, &rdev2->flags)) {
8984                         if (role == 0xfffe) {
8985                                 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
8986                                 md_kick_rdev_from_array(rdev2);
8987                                 continue;
8988                         }
8989                         else
8990                                 clear_bit(Candidate, &rdev2->flags);
8991                 }
8992
8993                 if (role != rdev2->raid_disk) {
8994                         /* got activated */
8995                         if (rdev2->raid_disk == -1 && role != 0xffff) {
8996                                 rdev2->saved_raid_disk = role;
8997                                 ret = remove_and_add_spares(mddev, rdev2);
8998                                 pr_info("Activated spare: %s\n",
8999                                         bdevname(rdev2->bdev,b));
9000                                 /* wakeup mddev->thread here, so array could
9001                                  * perform resync with the new activated disk */
9002                                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9003                                 md_wakeup_thread(mddev->thread);
9004
9005                         }
9006                         /* device faulty
9007                          * We just want to do the minimum to mark the disk
9008                          * as faulty. The recovery is performed by the
9009                          * one who initiated the error.
9010                          */
9011                         if ((role == 0xfffe) || (role == 0xfffd)) {
9012                                 md_error(mddev, rdev2);
9013                                 clear_bit(Blocked, &rdev2->flags);
9014                         }
9015                 }
9016         }
9017
9018         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
9019                 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9020
9021         /* Finally set the event to be up to date */
9022         mddev->events = le64_to_cpu(sb->events);
9023 }
9024
9025 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9026 {
9027         int err;
9028         struct page *swapout = rdev->sb_page;
9029         struct mdp_superblock_1 *sb;
9030
9031         /* Store the sb page of the rdev in the swapout temporary
9032          * variable in case we err in the future
9033          */
9034         rdev->sb_page = NULL;
9035         err = alloc_disk_sb(rdev);
9036         if (err == 0) {
9037                 ClearPageUptodate(rdev->sb_page);
9038                 rdev->sb_loaded = 0;
9039                 err = super_types[mddev->major_version].
9040                         load_super(rdev, NULL, mddev->minor_version);
9041         }
9042         if (err < 0) {
9043                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9044                                 __func__, __LINE__, rdev->desc_nr, err);
9045                 if (rdev->sb_page)
9046                         put_page(rdev->sb_page);
9047                 rdev->sb_page = swapout;
9048                 rdev->sb_loaded = 1;
9049                 return err;
9050         }
9051
9052         sb = page_address(rdev->sb_page);
9053         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9054          * is not set
9055          */
9056
9057         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9058                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9059
9060         /* The other node finished recovery, call spare_active to set
9061          * device In_sync and mddev->degraded
9062          */
9063         if (rdev->recovery_offset == MaxSector &&
9064             !test_bit(In_sync, &rdev->flags) &&
9065             mddev->pers->spare_active(mddev))
9066                 sysfs_notify(&mddev->kobj, NULL, "degraded");
9067
9068         put_page(swapout);
9069         return 0;
9070 }
9071
9072 void md_reload_sb(struct mddev *mddev, int nr)
9073 {
9074         struct md_rdev *rdev;
9075         int err;
9076
9077         /* Find the rdev */
9078         rdev_for_each_rcu(rdev, mddev) {
9079                 if (rdev->desc_nr == nr)
9080                         break;
9081         }
9082
9083         if (!rdev || rdev->desc_nr != nr) {
9084                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9085                 return;
9086         }
9087
9088         err = read_rdev(mddev, rdev);
9089         if (err < 0)
9090                 return;
9091
9092         check_sb_changes(mddev, rdev);
9093
9094         /* Read all rdev's to update recovery_offset */
9095         rdev_for_each_rcu(rdev, mddev)
9096                 read_rdev(mddev, rdev);
9097 }
9098 EXPORT_SYMBOL(md_reload_sb);
9099
9100 #ifndef MODULE
9101
9102 /*
9103  * Searches all registered partitions for autorun RAID arrays
9104  * at boot time.
9105  */
9106
9107 static DEFINE_MUTEX(detected_devices_mutex);
9108 static LIST_HEAD(all_detected_devices);
9109 struct detected_devices_node {
9110         struct list_head list;
9111         dev_t dev;
9112 };
9113
9114 void md_autodetect_dev(dev_t dev)
9115 {
9116         struct detected_devices_node *node_detected_dev;
9117
9118         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9119         if (node_detected_dev) {
9120                 node_detected_dev->dev = dev;
9121                 mutex_lock(&detected_devices_mutex);
9122                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
9123                 mutex_unlock(&detected_devices_mutex);
9124         }
9125 }
9126
9127 static void autostart_arrays(int part)
9128 {
9129         struct md_rdev *rdev;
9130         struct detected_devices_node *node_detected_dev;
9131         dev_t dev;
9132         int i_scanned, i_passed;
9133
9134         i_scanned = 0;
9135         i_passed = 0;
9136
9137         pr_info("md: Autodetecting RAID arrays.\n");
9138
9139         mutex_lock(&detected_devices_mutex);
9140         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9141                 i_scanned++;
9142                 node_detected_dev = list_entry(all_detected_devices.next,
9143                                         struct detected_devices_node, list);
9144                 list_del(&node_detected_dev->list);
9145                 dev = node_detected_dev->dev;
9146                 kfree(node_detected_dev);
9147                 mutex_unlock(&detected_devices_mutex);
9148                 rdev = md_import_device(dev,0, 90);
9149                 mutex_lock(&detected_devices_mutex);
9150                 if (IS_ERR(rdev))
9151                         continue;
9152
9153                 if (test_bit(Faulty, &rdev->flags))
9154                         continue;
9155
9156                 set_bit(AutoDetected, &rdev->flags);
9157                 list_add(&rdev->same_set, &pending_raid_disks);
9158                 i_passed++;
9159         }
9160         mutex_unlock(&detected_devices_mutex);
9161
9162         pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
9163
9164         autorun_devices(part);
9165 }
9166
9167 #endif /* !MODULE */
9168
9169 static __exit void md_exit(void)
9170 {
9171         struct mddev *mddev;
9172         struct list_head *tmp;
9173         int delay = 1;
9174
9175         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
9176         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
9177
9178         unregister_blkdev(MD_MAJOR,"md");
9179         unregister_blkdev(mdp_major, "mdp");
9180         unregister_reboot_notifier(&md_notifier);
9181         unregister_sysctl_table(raid_table_header);
9182
9183         /* We cannot unload the modules while some process is
9184          * waiting for us in select() or poll() - wake them up
9185          */
9186         md_unloading = 1;
9187         while (waitqueue_active(&md_event_waiters)) {
9188                 /* not safe to leave yet */
9189                 wake_up(&md_event_waiters);
9190                 msleep(delay);
9191                 delay += delay;
9192         }
9193         remove_proc_entry("mdstat", NULL);
9194
9195         for_each_mddev(mddev, tmp) {
9196                 export_array(mddev);
9197                 mddev->ctime = 0;
9198                 mddev->hold_active = 0;
9199                 /*
9200                  * for_each_mddev() will call mddev_put() at the end of each
9201                  * iteration.  As the mddev is now fully clear, this will
9202                  * schedule the mddev for destruction by a workqueue, and the
9203                  * destroy_workqueue() below will wait for that to complete.
9204                  */
9205         }
9206         destroy_workqueue(md_misc_wq);
9207         destroy_workqueue(md_wq);
9208 }
9209
9210 subsys_initcall(md_init);
9211 module_exit(md_exit)
9212
9213 static int get_ro(char *buffer, struct kernel_param *kp)
9214 {
9215         return sprintf(buffer, "%d", start_readonly);
9216 }
9217 static int set_ro(const char *val, struct kernel_param *kp)
9218 {
9219         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
9220 }
9221
9222 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9223 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
9224 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
9225 module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
9226
9227 MODULE_LICENSE("GPL");
9228 MODULE_DESCRIPTION("MD RAID framework");
9229 MODULE_ALIAS("md");
9230 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);