Merge branch 'bkl/ioctl' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
[sfrench/cifs-2.6.git] / drivers / mtd / mtdconcat.c
1 /*
2  * MTD device concatenation layer
3  *
4  * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
5  * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
6  *
7  * NAND support by Christian Gan <cgan@iders.ca>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
22  *
23  */
24
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/types.h>
30 #include <linux/backing-dev.h>
31
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/concat.h>
34
35 #include <asm/div64.h>
36
37 /*
38  * Our storage structure:
39  * Subdev points to an array of pointers to struct mtd_info objects
40  * which is allocated along with this structure
41  *
42  */
43 struct mtd_concat {
44         struct mtd_info mtd;
45         int num_subdev;
46         struct mtd_info **subdev;
47 };
48
49 /*
50  * how to calculate the size required for the above structure,
51  * including the pointer array subdev points to:
52  */
53 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)    \
54         ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
55
56 /*
57  * Given a pointer to the MTD object in the mtd_concat structure,
58  * we can retrieve the pointer to that structure with this macro.
59  */
60 #define CONCAT(x)  ((struct mtd_concat *)(x))
61
62 /*
63  * MTD methods which look up the relevant subdevice, translate the
64  * effective address and pass through to the subdevice.
65  */
66
67 static int
68 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
69             size_t * retlen, u_char * buf)
70 {
71         struct mtd_concat *concat = CONCAT(mtd);
72         int ret = 0, err;
73         int i;
74
75         *retlen = 0;
76
77         for (i = 0; i < concat->num_subdev; i++) {
78                 struct mtd_info *subdev = concat->subdev[i];
79                 size_t size, retsize;
80
81                 if (from >= subdev->size) {
82                         /* Not destined for this subdev */
83                         size = 0;
84                         from -= subdev->size;
85                         continue;
86                 }
87                 if (from + len > subdev->size)
88                         /* First part goes into this subdev */
89                         size = subdev->size - from;
90                 else
91                         /* Entire transaction goes into this subdev */
92                         size = len;
93
94                 err = subdev->read(subdev, from, size, &retsize, buf);
95
96                 /* Save information about bitflips! */
97                 if (unlikely(err)) {
98                         if (err == -EBADMSG) {
99                                 mtd->ecc_stats.failed++;
100                                 ret = err;
101                         } else if (err == -EUCLEAN) {
102                                 mtd->ecc_stats.corrected++;
103                                 /* Do not overwrite -EBADMSG !! */
104                                 if (!ret)
105                                         ret = err;
106                         } else
107                                 return err;
108                 }
109
110                 *retlen += retsize;
111                 len -= size;
112                 if (len == 0)
113                         return ret;
114
115                 buf += size;
116                 from = 0;
117         }
118         return -EINVAL;
119 }
120
121 static int
122 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
123              size_t * retlen, const u_char * buf)
124 {
125         struct mtd_concat *concat = CONCAT(mtd);
126         int err = -EINVAL;
127         int i;
128
129         if (!(mtd->flags & MTD_WRITEABLE))
130                 return -EROFS;
131
132         *retlen = 0;
133
134         for (i = 0; i < concat->num_subdev; i++) {
135                 struct mtd_info *subdev = concat->subdev[i];
136                 size_t size, retsize;
137
138                 if (to >= subdev->size) {
139                         size = 0;
140                         to -= subdev->size;
141                         continue;
142                 }
143                 if (to + len > subdev->size)
144                         size = subdev->size - to;
145                 else
146                         size = len;
147
148                 if (!(subdev->flags & MTD_WRITEABLE))
149                         err = -EROFS;
150                 else
151                         err = subdev->write(subdev, to, size, &retsize, buf);
152
153                 if (err)
154                         break;
155
156                 *retlen += retsize;
157                 len -= size;
158                 if (len == 0)
159                         break;
160
161                 err = -EINVAL;
162                 buf += size;
163                 to = 0;
164         }
165         return err;
166 }
167
168 static int
169 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
170                 unsigned long count, loff_t to, size_t * retlen)
171 {
172         struct mtd_concat *concat = CONCAT(mtd);
173         struct kvec *vecs_copy;
174         unsigned long entry_low, entry_high;
175         size_t total_len = 0;
176         int i;
177         int err = -EINVAL;
178
179         if (!(mtd->flags & MTD_WRITEABLE))
180                 return -EROFS;
181
182         *retlen = 0;
183
184         /* Calculate total length of data */
185         for (i = 0; i < count; i++)
186                 total_len += vecs[i].iov_len;
187
188         /* Do not allow write past end of device */
189         if ((to + total_len) > mtd->size)
190                 return -EINVAL;
191
192         /* Check alignment */
193         if (mtd->writesize > 1) {
194                 uint64_t __to = to;
195                 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
196                         return -EINVAL;
197         }
198
199         /* make a copy of vecs */
200         vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
201         if (!vecs_copy)
202                 return -ENOMEM;
203
204         entry_low = 0;
205         for (i = 0; i < concat->num_subdev; i++) {
206                 struct mtd_info *subdev = concat->subdev[i];
207                 size_t size, wsize, retsize, old_iov_len;
208
209                 if (to >= subdev->size) {
210                         to -= subdev->size;
211                         continue;
212                 }
213
214                 size = min_t(uint64_t, total_len, subdev->size - to);
215                 wsize = size; /* store for future use */
216
217                 entry_high = entry_low;
218                 while (entry_high < count) {
219                         if (size <= vecs_copy[entry_high].iov_len)
220                                 break;
221                         size -= vecs_copy[entry_high++].iov_len;
222                 }
223
224                 old_iov_len = vecs_copy[entry_high].iov_len;
225                 vecs_copy[entry_high].iov_len = size;
226
227                 if (!(subdev->flags & MTD_WRITEABLE))
228                         err = -EROFS;
229                 else
230                         err = subdev->writev(subdev, &vecs_copy[entry_low],
231                                 entry_high - entry_low + 1, to, &retsize);
232
233                 vecs_copy[entry_high].iov_len = old_iov_len - size;
234                 vecs_copy[entry_high].iov_base += size;
235
236                 entry_low = entry_high;
237
238                 if (err)
239                         break;
240
241                 *retlen += retsize;
242                 total_len -= wsize;
243
244                 if (total_len == 0)
245                         break;
246
247                 err = -EINVAL;
248                 to = 0;
249         }
250
251         kfree(vecs_copy);
252         return err;
253 }
254
255 static int
256 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
257 {
258         struct mtd_concat *concat = CONCAT(mtd);
259         struct mtd_oob_ops devops = *ops;
260         int i, err, ret = 0;
261
262         ops->retlen = ops->oobretlen = 0;
263
264         for (i = 0; i < concat->num_subdev; i++) {
265                 struct mtd_info *subdev = concat->subdev[i];
266
267                 if (from >= subdev->size) {
268                         from -= subdev->size;
269                         continue;
270                 }
271
272                 /* partial read ? */
273                 if (from + devops.len > subdev->size)
274                         devops.len = subdev->size - from;
275
276                 err = subdev->read_oob(subdev, from, &devops);
277                 ops->retlen += devops.retlen;
278                 ops->oobretlen += devops.oobretlen;
279
280                 /* Save information about bitflips! */
281                 if (unlikely(err)) {
282                         if (err == -EBADMSG) {
283                                 mtd->ecc_stats.failed++;
284                                 ret = err;
285                         } else if (err == -EUCLEAN) {
286                                 mtd->ecc_stats.corrected++;
287                                 /* Do not overwrite -EBADMSG !! */
288                                 if (!ret)
289                                         ret = err;
290                         } else
291                                 return err;
292                 }
293
294                 if (devops.datbuf) {
295                         devops.len = ops->len - ops->retlen;
296                         if (!devops.len)
297                                 return ret;
298                         devops.datbuf += devops.retlen;
299                 }
300                 if (devops.oobbuf) {
301                         devops.ooblen = ops->ooblen - ops->oobretlen;
302                         if (!devops.ooblen)
303                                 return ret;
304                         devops.oobbuf += ops->oobretlen;
305                 }
306
307                 from = 0;
308         }
309         return -EINVAL;
310 }
311
312 static int
313 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
314 {
315         struct mtd_concat *concat = CONCAT(mtd);
316         struct mtd_oob_ops devops = *ops;
317         int i, err;
318
319         if (!(mtd->flags & MTD_WRITEABLE))
320                 return -EROFS;
321
322         ops->retlen = 0;
323
324         for (i = 0; i < concat->num_subdev; i++) {
325                 struct mtd_info *subdev = concat->subdev[i];
326
327                 if (to >= subdev->size) {
328                         to -= subdev->size;
329                         continue;
330                 }
331
332                 /* partial write ? */
333                 if (to + devops.len > subdev->size)
334                         devops.len = subdev->size - to;
335
336                 err = subdev->write_oob(subdev, to, &devops);
337                 ops->retlen += devops.retlen;
338                 if (err)
339                         return err;
340
341                 if (devops.datbuf) {
342                         devops.len = ops->len - ops->retlen;
343                         if (!devops.len)
344                                 return 0;
345                         devops.datbuf += devops.retlen;
346                 }
347                 if (devops.oobbuf) {
348                         devops.ooblen = ops->ooblen - ops->oobretlen;
349                         if (!devops.ooblen)
350                                 return 0;
351                         devops.oobbuf += devops.oobretlen;
352                 }
353                 to = 0;
354         }
355         return -EINVAL;
356 }
357
358 static void concat_erase_callback(struct erase_info *instr)
359 {
360         wake_up((wait_queue_head_t *) instr->priv);
361 }
362
363 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
364 {
365         int err;
366         wait_queue_head_t waitq;
367         DECLARE_WAITQUEUE(wait, current);
368
369         /*
370          * This code was stol^H^H^H^Hinspired by mtdchar.c
371          */
372         init_waitqueue_head(&waitq);
373
374         erase->mtd = mtd;
375         erase->callback = concat_erase_callback;
376         erase->priv = (unsigned long) &waitq;
377
378         /*
379          * FIXME: Allow INTERRUPTIBLE. Which means
380          * not having the wait_queue head on the stack.
381          */
382         err = mtd->erase(mtd, erase);
383         if (!err) {
384                 set_current_state(TASK_UNINTERRUPTIBLE);
385                 add_wait_queue(&waitq, &wait);
386                 if (erase->state != MTD_ERASE_DONE
387                     && erase->state != MTD_ERASE_FAILED)
388                         schedule();
389                 remove_wait_queue(&waitq, &wait);
390                 set_current_state(TASK_RUNNING);
391
392                 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
393         }
394         return err;
395 }
396
397 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
398 {
399         struct mtd_concat *concat = CONCAT(mtd);
400         struct mtd_info *subdev;
401         int i, err;
402         uint64_t length, offset = 0;
403         struct erase_info *erase;
404
405         if (!(mtd->flags & MTD_WRITEABLE))
406                 return -EROFS;
407
408         if (instr->addr > concat->mtd.size)
409                 return -EINVAL;
410
411         if (instr->len + instr->addr > concat->mtd.size)
412                 return -EINVAL;
413
414         /*
415          * Check for proper erase block alignment of the to-be-erased area.
416          * It is easier to do this based on the super device's erase
417          * region info rather than looking at each particular sub-device
418          * in turn.
419          */
420         if (!concat->mtd.numeraseregions) {
421                 /* the easy case: device has uniform erase block size */
422                 if (instr->addr & (concat->mtd.erasesize - 1))
423                         return -EINVAL;
424                 if (instr->len & (concat->mtd.erasesize - 1))
425                         return -EINVAL;
426         } else {
427                 /* device has variable erase size */
428                 struct mtd_erase_region_info *erase_regions =
429                     concat->mtd.eraseregions;
430
431                 /*
432                  * Find the erase region where the to-be-erased area begins:
433                  */
434                 for (i = 0; i < concat->mtd.numeraseregions &&
435                      instr->addr >= erase_regions[i].offset; i++) ;
436                 --i;
437
438                 /*
439                  * Now erase_regions[i] is the region in which the
440                  * to-be-erased area begins. Verify that the starting
441                  * offset is aligned to this region's erase size:
442                  */
443                 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
444                         return -EINVAL;
445
446                 /*
447                  * now find the erase region where the to-be-erased area ends:
448                  */
449                 for (; i < concat->mtd.numeraseregions &&
450                      (instr->addr + instr->len) >= erase_regions[i].offset;
451                      ++i) ;
452                 --i;
453                 /*
454                  * check if the ending offset is aligned to this region's erase size
455                  */
456                 if (i < 0 || ((instr->addr + instr->len) &
457                                         (erase_regions[i].erasesize - 1)))
458                         return -EINVAL;
459         }
460
461         instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
462
463         /* make a local copy of instr to avoid modifying the caller's struct */
464         erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
465
466         if (!erase)
467                 return -ENOMEM;
468
469         *erase = *instr;
470         length = instr->len;
471
472         /*
473          * find the subdevice where the to-be-erased area begins, adjust
474          * starting offset to be relative to the subdevice start
475          */
476         for (i = 0; i < concat->num_subdev; i++) {
477                 subdev = concat->subdev[i];
478                 if (subdev->size <= erase->addr) {
479                         erase->addr -= subdev->size;
480                         offset += subdev->size;
481                 } else {
482                         break;
483                 }
484         }
485
486         /* must never happen since size limit has been verified above */
487         BUG_ON(i >= concat->num_subdev);
488
489         /* now do the erase: */
490         err = 0;
491         for (; length > 0; i++) {
492                 /* loop for all subdevices affected by this request */
493                 subdev = concat->subdev[i];     /* get current subdevice */
494
495                 /* limit length to subdevice's size: */
496                 if (erase->addr + length > subdev->size)
497                         erase->len = subdev->size - erase->addr;
498                 else
499                         erase->len = length;
500
501                 if (!(subdev->flags & MTD_WRITEABLE)) {
502                         err = -EROFS;
503                         break;
504                 }
505                 length -= erase->len;
506                 if ((err = concat_dev_erase(subdev, erase))) {
507                         /* sanity check: should never happen since
508                          * block alignment has been checked above */
509                         BUG_ON(err == -EINVAL);
510                         if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
511                                 instr->fail_addr = erase->fail_addr + offset;
512                         break;
513                 }
514                 /*
515                  * erase->addr specifies the offset of the area to be
516                  * erased *within the current subdevice*. It can be
517                  * non-zero only the first time through this loop, i.e.
518                  * for the first subdevice where blocks need to be erased.
519                  * All the following erases must begin at the start of the
520                  * current subdevice, i.e. at offset zero.
521                  */
522                 erase->addr = 0;
523                 offset += subdev->size;
524         }
525         instr->state = erase->state;
526         kfree(erase);
527         if (err)
528                 return err;
529
530         if (instr->callback)
531                 instr->callback(instr);
532         return 0;
533 }
534
535 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
536 {
537         struct mtd_concat *concat = CONCAT(mtd);
538         int i, err = -EINVAL;
539
540         if ((len + ofs) > mtd->size)
541                 return -EINVAL;
542
543         for (i = 0; i < concat->num_subdev; i++) {
544                 struct mtd_info *subdev = concat->subdev[i];
545                 uint64_t size;
546
547                 if (ofs >= subdev->size) {
548                         size = 0;
549                         ofs -= subdev->size;
550                         continue;
551                 }
552                 if (ofs + len > subdev->size)
553                         size = subdev->size - ofs;
554                 else
555                         size = len;
556
557                 if (subdev->lock) {
558                         err = subdev->lock(subdev, ofs, size);
559                         if (err)
560                                 break;
561                 } else
562                         err = -EOPNOTSUPP;
563
564                 len -= size;
565                 if (len == 0)
566                         break;
567
568                 err = -EINVAL;
569                 ofs = 0;
570         }
571
572         return err;
573 }
574
575 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
576 {
577         struct mtd_concat *concat = CONCAT(mtd);
578         int i, err = 0;
579
580         if ((len + ofs) > mtd->size)
581                 return -EINVAL;
582
583         for (i = 0; i < concat->num_subdev; i++) {
584                 struct mtd_info *subdev = concat->subdev[i];
585                 uint64_t size;
586
587                 if (ofs >= subdev->size) {
588                         size = 0;
589                         ofs -= subdev->size;
590                         continue;
591                 }
592                 if (ofs + len > subdev->size)
593                         size = subdev->size - ofs;
594                 else
595                         size = len;
596
597                 if (subdev->unlock) {
598                         err = subdev->unlock(subdev, ofs, size);
599                         if (err)
600                                 break;
601                 } else
602                         err = -EOPNOTSUPP;
603
604                 len -= size;
605                 if (len == 0)
606                         break;
607
608                 err = -EINVAL;
609                 ofs = 0;
610         }
611
612         return err;
613 }
614
615 static void concat_sync(struct mtd_info *mtd)
616 {
617         struct mtd_concat *concat = CONCAT(mtd);
618         int i;
619
620         for (i = 0; i < concat->num_subdev; i++) {
621                 struct mtd_info *subdev = concat->subdev[i];
622                 subdev->sync(subdev);
623         }
624 }
625
626 static int concat_suspend(struct mtd_info *mtd)
627 {
628         struct mtd_concat *concat = CONCAT(mtd);
629         int i, rc = 0;
630
631         for (i = 0; i < concat->num_subdev; i++) {
632                 struct mtd_info *subdev = concat->subdev[i];
633                 if ((rc = subdev->suspend(subdev)) < 0)
634                         return rc;
635         }
636         return rc;
637 }
638
639 static void concat_resume(struct mtd_info *mtd)
640 {
641         struct mtd_concat *concat = CONCAT(mtd);
642         int i;
643
644         for (i = 0; i < concat->num_subdev; i++) {
645                 struct mtd_info *subdev = concat->subdev[i];
646                 subdev->resume(subdev);
647         }
648 }
649
650 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
651 {
652         struct mtd_concat *concat = CONCAT(mtd);
653         int i, res = 0;
654
655         if (!concat->subdev[0]->block_isbad)
656                 return res;
657
658         if (ofs > mtd->size)
659                 return -EINVAL;
660
661         for (i = 0; i < concat->num_subdev; i++) {
662                 struct mtd_info *subdev = concat->subdev[i];
663
664                 if (ofs >= subdev->size) {
665                         ofs -= subdev->size;
666                         continue;
667                 }
668
669                 res = subdev->block_isbad(subdev, ofs);
670                 break;
671         }
672
673         return res;
674 }
675
676 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
677 {
678         struct mtd_concat *concat = CONCAT(mtd);
679         int i, err = -EINVAL;
680
681         if (!concat->subdev[0]->block_markbad)
682                 return 0;
683
684         if (ofs > mtd->size)
685                 return -EINVAL;
686
687         for (i = 0; i < concat->num_subdev; i++) {
688                 struct mtd_info *subdev = concat->subdev[i];
689
690                 if (ofs >= subdev->size) {
691                         ofs -= subdev->size;
692                         continue;
693                 }
694
695                 err = subdev->block_markbad(subdev, ofs);
696                 if (!err)
697                         mtd->ecc_stats.badblocks++;
698                 break;
699         }
700
701         return err;
702 }
703
704 /*
705  * try to support NOMMU mmaps on concatenated devices
706  * - we don't support subdev spanning as we can't guarantee it'll work
707  */
708 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
709                                               unsigned long len,
710                                               unsigned long offset,
711                                               unsigned long flags)
712 {
713         struct mtd_concat *concat = CONCAT(mtd);
714         int i;
715
716         for (i = 0; i < concat->num_subdev; i++) {
717                 struct mtd_info *subdev = concat->subdev[i];
718
719                 if (offset >= subdev->size) {
720                         offset -= subdev->size;
721                         continue;
722                 }
723
724                 /* we've found the subdev over which the mapping will reside */
725                 if (offset + len > subdev->size)
726                         return (unsigned long) -EINVAL;
727
728                 if (subdev->get_unmapped_area)
729                         return subdev->get_unmapped_area(subdev, len, offset,
730                                                          flags);
731
732                 break;
733         }
734
735         return (unsigned long) -ENOSYS;
736 }
737
738 /*
739  * This function constructs a virtual MTD device by concatenating
740  * num_devs MTD devices. A pointer to the new device object is
741  * stored to *new_dev upon success. This function does _not_
742  * register any devices: this is the caller's responsibility.
743  */
744 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],   /* subdevices to concatenate */
745                                    int num_devs,        /* number of subdevices      */
746                                    const char *name)
747 {                               /* name for the new device   */
748         int i;
749         size_t size;
750         struct mtd_concat *concat;
751         uint32_t max_erasesize, curr_erasesize;
752         int num_erase_region;
753
754         printk(KERN_NOTICE "Concatenating MTD devices:\n");
755         for (i = 0; i < num_devs; i++)
756                 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
757         printk(KERN_NOTICE "into device \"%s\"\n", name);
758
759         /* allocate the device structure */
760         size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
761         concat = kzalloc(size, GFP_KERNEL);
762         if (!concat) {
763                 printk
764                     ("memory allocation error while creating concatenated device \"%s\"\n",
765                      name);
766                 return NULL;
767         }
768         concat->subdev = (struct mtd_info **) (concat + 1);
769
770         /*
771          * Set up the new "super" device's MTD object structure, check for
772          * incompatibilites between the subdevices.
773          */
774         concat->mtd.type = subdev[0]->type;
775         concat->mtd.flags = subdev[0]->flags;
776         concat->mtd.size = subdev[0]->size;
777         concat->mtd.erasesize = subdev[0]->erasesize;
778         concat->mtd.writesize = subdev[0]->writesize;
779         concat->mtd.subpage_sft = subdev[0]->subpage_sft;
780         concat->mtd.oobsize = subdev[0]->oobsize;
781         concat->mtd.oobavail = subdev[0]->oobavail;
782         if (subdev[0]->writev)
783                 concat->mtd.writev = concat_writev;
784         if (subdev[0]->read_oob)
785                 concat->mtd.read_oob = concat_read_oob;
786         if (subdev[0]->write_oob)
787                 concat->mtd.write_oob = concat_write_oob;
788         if (subdev[0]->block_isbad)
789                 concat->mtd.block_isbad = concat_block_isbad;
790         if (subdev[0]->block_markbad)
791                 concat->mtd.block_markbad = concat_block_markbad;
792
793         concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
794
795         concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
796
797         concat->subdev[0] = subdev[0];
798
799         for (i = 1; i < num_devs; i++) {
800                 if (concat->mtd.type != subdev[i]->type) {
801                         kfree(concat);
802                         printk("Incompatible device type on \"%s\"\n",
803                                subdev[i]->name);
804                         return NULL;
805                 }
806                 if (concat->mtd.flags != subdev[i]->flags) {
807                         /*
808                          * Expect all flags except MTD_WRITEABLE to be
809                          * equal on all subdevices.
810                          */
811                         if ((concat->mtd.flags ^ subdev[i]->
812                              flags) & ~MTD_WRITEABLE) {
813                                 kfree(concat);
814                                 printk("Incompatible device flags on \"%s\"\n",
815                                        subdev[i]->name);
816                                 return NULL;
817                         } else
818                                 /* if writeable attribute differs,
819                                    make super device writeable */
820                                 concat->mtd.flags |=
821                                     subdev[i]->flags & MTD_WRITEABLE;
822                 }
823
824                 /* only permit direct mapping if the BDIs are all the same
825                  * - copy-mapping is still permitted
826                  */
827                 if (concat->mtd.backing_dev_info !=
828                     subdev[i]->backing_dev_info)
829                         concat->mtd.backing_dev_info =
830                                 &default_backing_dev_info;
831
832                 concat->mtd.size += subdev[i]->size;
833                 concat->mtd.ecc_stats.badblocks +=
834                         subdev[i]->ecc_stats.badblocks;
835                 if (concat->mtd.writesize   !=  subdev[i]->writesize ||
836                     concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
837                     concat->mtd.oobsize    !=  subdev[i]->oobsize ||
838                     !concat->mtd.read_oob  != !subdev[i]->read_oob ||
839                     !concat->mtd.write_oob != !subdev[i]->write_oob) {
840                         kfree(concat);
841                         printk("Incompatible OOB or ECC data on \"%s\"\n",
842                                subdev[i]->name);
843                         return NULL;
844                 }
845                 concat->subdev[i] = subdev[i];
846
847         }
848
849         concat->mtd.ecclayout = subdev[0]->ecclayout;
850
851         concat->num_subdev = num_devs;
852         concat->mtd.name = name;
853
854         concat->mtd.erase = concat_erase;
855         concat->mtd.read = concat_read;
856         concat->mtd.write = concat_write;
857         concat->mtd.sync = concat_sync;
858         concat->mtd.lock = concat_lock;
859         concat->mtd.unlock = concat_unlock;
860         concat->mtd.suspend = concat_suspend;
861         concat->mtd.resume = concat_resume;
862         concat->mtd.get_unmapped_area = concat_get_unmapped_area;
863
864         /*
865          * Combine the erase block size info of the subdevices:
866          *
867          * first, walk the map of the new device and see how
868          * many changes in erase size we have
869          */
870         max_erasesize = curr_erasesize = subdev[0]->erasesize;
871         num_erase_region = 1;
872         for (i = 0; i < num_devs; i++) {
873                 if (subdev[i]->numeraseregions == 0) {
874                         /* current subdevice has uniform erase size */
875                         if (subdev[i]->erasesize != curr_erasesize) {
876                                 /* if it differs from the last subdevice's erase size, count it */
877                                 ++num_erase_region;
878                                 curr_erasesize = subdev[i]->erasesize;
879                                 if (curr_erasesize > max_erasesize)
880                                         max_erasesize = curr_erasesize;
881                         }
882                 } else {
883                         /* current subdevice has variable erase size */
884                         int j;
885                         for (j = 0; j < subdev[i]->numeraseregions; j++) {
886
887                                 /* walk the list of erase regions, count any changes */
888                                 if (subdev[i]->eraseregions[j].erasesize !=
889                                     curr_erasesize) {
890                                         ++num_erase_region;
891                                         curr_erasesize =
892                                             subdev[i]->eraseregions[j].
893                                             erasesize;
894                                         if (curr_erasesize > max_erasesize)
895                                                 max_erasesize = curr_erasesize;
896                                 }
897                         }
898                 }
899         }
900
901         if (num_erase_region == 1) {
902                 /*
903                  * All subdevices have the same uniform erase size.
904                  * This is easy:
905                  */
906                 concat->mtd.erasesize = curr_erasesize;
907                 concat->mtd.numeraseregions = 0;
908         } else {
909                 uint64_t tmp64;
910
911                 /*
912                  * erase block size varies across the subdevices: allocate
913                  * space to store the data describing the variable erase regions
914                  */
915                 struct mtd_erase_region_info *erase_region_p;
916                 uint64_t begin, position;
917
918                 concat->mtd.erasesize = max_erasesize;
919                 concat->mtd.numeraseregions = num_erase_region;
920                 concat->mtd.eraseregions = erase_region_p =
921                     kmalloc(num_erase_region *
922                             sizeof (struct mtd_erase_region_info), GFP_KERNEL);
923                 if (!erase_region_p) {
924                         kfree(concat);
925                         printk
926                             ("memory allocation error while creating erase region list"
927                              " for device \"%s\"\n", name);
928                         return NULL;
929                 }
930
931                 /*
932                  * walk the map of the new device once more and fill in
933                  * in erase region info:
934                  */
935                 curr_erasesize = subdev[0]->erasesize;
936                 begin = position = 0;
937                 for (i = 0; i < num_devs; i++) {
938                         if (subdev[i]->numeraseregions == 0) {
939                                 /* current subdevice has uniform erase size */
940                                 if (subdev[i]->erasesize != curr_erasesize) {
941                                         /*
942                                          *  fill in an mtd_erase_region_info structure for the area
943                                          *  we have walked so far:
944                                          */
945                                         erase_region_p->offset = begin;
946                                         erase_region_p->erasesize =
947                                             curr_erasesize;
948                                         tmp64 = position - begin;
949                                         do_div(tmp64, curr_erasesize);
950                                         erase_region_p->numblocks = tmp64;
951                                         begin = position;
952
953                                         curr_erasesize = subdev[i]->erasesize;
954                                         ++erase_region_p;
955                                 }
956                                 position += subdev[i]->size;
957                         } else {
958                                 /* current subdevice has variable erase size */
959                                 int j;
960                                 for (j = 0; j < subdev[i]->numeraseregions; j++) {
961                                         /* walk the list of erase regions, count any changes */
962                                         if (subdev[i]->eraseregions[j].
963                                             erasesize != curr_erasesize) {
964                                                 erase_region_p->offset = begin;
965                                                 erase_region_p->erasesize =
966                                                     curr_erasesize;
967                                                 tmp64 = position - begin;
968                                                 do_div(tmp64, curr_erasesize);
969                                                 erase_region_p->numblocks = tmp64;
970                                                 begin = position;
971
972                                                 curr_erasesize =
973                                                     subdev[i]->eraseregions[j].
974                                                     erasesize;
975                                                 ++erase_region_p;
976                                         }
977                                         position +=
978                                             subdev[i]->eraseregions[j].
979                                             numblocks * (uint64_t)curr_erasesize;
980                                 }
981                         }
982                 }
983                 /* Now write the final entry */
984                 erase_region_p->offset = begin;
985                 erase_region_p->erasesize = curr_erasesize;
986                 tmp64 = position - begin;
987                 do_div(tmp64, curr_erasesize);
988                 erase_region_p->numblocks = tmp64;
989         }
990
991         return &concat->mtd;
992 }
993
994 /*
995  * This function destroys an MTD object obtained from concat_mtd_devs()
996  */
997
998 void mtd_concat_destroy(struct mtd_info *mtd)
999 {
1000         struct mtd_concat *concat = CONCAT(mtd);
1001         if (concat->mtd.numeraseregions)
1002                 kfree(concat->mtd.eraseregions);
1003         kfree(concat);
1004 }
1005
1006 EXPORT_SYMBOL(mtd_concat_create);
1007 EXPORT_SYMBOL(mtd_concat_destroy);
1008
1009 MODULE_LICENSE("GPL");
1010 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
1011 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");