Merge tag 'docs-4.12-2' of git://git.lwn.net/linux
[sfrench/cifs-2.6.git] / drivers / mtd / nand / davinci_nand.c
1 /*
2  * davinci_nand.c - NAND Flash Driver for DaVinci family chips
3  *
4  * Copyright © 2006 Texas Instruments.
5  *
6  * Port to 2.6.23 Copyright © 2008 by:
7  *   Sander Huijsen <Shuijsen@optelecom-nkf.com>
8  *   Troy Kisky <troy.kisky@boundarydevices.com>
9  *   Dirk Behme <Dirk.Behme@gmail.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/err.h>
30 #include <linux/clk.h>
31 #include <linux/io.h>
32 #include <linux/mtd/nand.h>
33 #include <linux/mtd/partitions.h>
34 #include <linux/slab.h>
35 #include <linux/of_device.h>
36 #include <linux/of.h>
37
38 #include <linux/platform_data/mtd-davinci.h>
39 #include <linux/platform_data/mtd-davinci-aemif.h>
40
41 /*
42  * This is a device driver for the NAND flash controller found on the
43  * various DaVinci family chips.  It handles up to four SoC chipselects,
44  * and some flavors of secondary chipselect (e.g. based on A12) as used
45  * with multichip packages.
46  *
47  * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
48  * available on chips like the DM355 and OMAP-L137 and needed with the
49  * more error-prone MLC NAND chips.
50  *
51  * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
52  * outputs in a "wire-AND" configuration, with no per-chip signals.
53  */
54 struct davinci_nand_info {
55         struct nand_chip        chip;
56
57         struct device           *dev;
58         struct clk              *clk;
59
60         bool                    is_readmode;
61
62         void __iomem            *base;
63         void __iomem            *vaddr;
64
65         uint32_t                ioaddr;
66         uint32_t                current_cs;
67
68         uint32_t                mask_chipsel;
69         uint32_t                mask_ale;
70         uint32_t                mask_cle;
71
72         uint32_t                core_chipsel;
73
74         struct davinci_aemif_timing     *timing;
75 };
76
77 static DEFINE_SPINLOCK(davinci_nand_lock);
78 static bool ecc4_busy;
79
80 static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
81 {
82         return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
83 }
84
85 static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
86                 int offset)
87 {
88         return __raw_readl(info->base + offset);
89 }
90
91 static inline void davinci_nand_writel(struct davinci_nand_info *info,
92                 int offset, unsigned long value)
93 {
94         __raw_writel(value, info->base + offset);
95 }
96
97 /*----------------------------------------------------------------------*/
98
99 /*
100  * Access to hardware control lines:  ALE, CLE, secondary chipselect.
101  */
102
103 static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
104                                    unsigned int ctrl)
105 {
106         struct davinci_nand_info        *info = to_davinci_nand(mtd);
107         uint32_t                        addr = info->current_cs;
108         struct nand_chip                *nand = mtd_to_nand(mtd);
109
110         /* Did the control lines change? */
111         if (ctrl & NAND_CTRL_CHANGE) {
112                 if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
113                         addr |= info->mask_cle;
114                 else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
115                         addr |= info->mask_ale;
116
117                 nand->IO_ADDR_W = (void __iomem __force *)addr;
118         }
119
120         if (cmd != NAND_CMD_NONE)
121                 iowrite8(cmd, nand->IO_ADDR_W);
122 }
123
124 static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
125 {
126         struct davinci_nand_info        *info = to_davinci_nand(mtd);
127         uint32_t                        addr = info->ioaddr;
128
129         /* maybe kick in a second chipselect */
130         if (chip > 0)
131                 addr |= info->mask_chipsel;
132         info->current_cs = addr;
133
134         info->chip.IO_ADDR_W = (void __iomem __force *)addr;
135         info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
136 }
137
138 /*----------------------------------------------------------------------*/
139
140 /*
141  * 1-bit hardware ECC ... context maintained for each core chipselect
142  */
143
144 static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
145 {
146         struct davinci_nand_info *info = to_davinci_nand(mtd);
147
148         return davinci_nand_readl(info, NANDF1ECC_OFFSET
149                         + 4 * info->core_chipsel);
150 }
151
152 static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
153 {
154         struct davinci_nand_info *info;
155         uint32_t nandcfr;
156         unsigned long flags;
157
158         info = to_davinci_nand(mtd);
159
160         /* Reset ECC hardware */
161         nand_davinci_readecc_1bit(mtd);
162
163         spin_lock_irqsave(&davinci_nand_lock, flags);
164
165         /* Restart ECC hardware */
166         nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
167         nandcfr |= BIT(8 + info->core_chipsel);
168         davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
169
170         spin_unlock_irqrestore(&davinci_nand_lock, flags);
171 }
172
173 /*
174  * Read hardware ECC value and pack into three bytes
175  */
176 static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
177                                       const u_char *dat, u_char *ecc_code)
178 {
179         unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
180         unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
181
182         /* invert so that erased block ecc is correct */
183         ecc24 = ~ecc24;
184         ecc_code[0] = (u_char)(ecc24);
185         ecc_code[1] = (u_char)(ecc24 >> 8);
186         ecc_code[2] = (u_char)(ecc24 >> 16);
187
188         return 0;
189 }
190
191 static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
192                                      u_char *read_ecc, u_char *calc_ecc)
193 {
194         struct nand_chip *chip = mtd_to_nand(mtd);
195         uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
196                                           (read_ecc[2] << 16);
197         uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
198                                           (calc_ecc[2] << 16);
199         uint32_t diff = eccCalc ^ eccNand;
200
201         if (diff) {
202                 if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
203                         /* Correctable error */
204                         if ((diff >> (12 + 3)) < chip->ecc.size) {
205                                 dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
206                                 return 1;
207                         } else {
208                                 return -EBADMSG;
209                         }
210                 } else if (!(diff & (diff - 1))) {
211                         /* Single bit ECC error in the ECC itself,
212                          * nothing to fix */
213                         return 1;
214                 } else {
215                         /* Uncorrectable error */
216                         return -EBADMSG;
217                 }
218
219         }
220         return 0;
221 }
222
223 /*----------------------------------------------------------------------*/
224
225 /*
226  * 4-bit hardware ECC ... context maintained over entire AEMIF
227  *
228  * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
229  * since that forces use of a problematic "infix OOB" layout.
230  * Among other things, it trashes manufacturer bad block markers.
231  * Also, and specific to this hardware, it ECC-protects the "prepad"
232  * in the OOB ... while having ECC protection for parts of OOB would
233  * seem useful, the current MTD stack sometimes wants to update the
234  * OOB without recomputing ECC.
235  */
236
237 static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
238 {
239         struct davinci_nand_info *info = to_davinci_nand(mtd);
240         unsigned long flags;
241         u32 val;
242
243         /* Reset ECC hardware */
244         davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
245
246         spin_lock_irqsave(&davinci_nand_lock, flags);
247
248         /* Start 4-bit ECC calculation for read/write */
249         val = davinci_nand_readl(info, NANDFCR_OFFSET);
250         val &= ~(0x03 << 4);
251         val |= (info->core_chipsel << 4) | BIT(12);
252         davinci_nand_writel(info, NANDFCR_OFFSET, val);
253
254         info->is_readmode = (mode == NAND_ECC_READ);
255
256         spin_unlock_irqrestore(&davinci_nand_lock, flags);
257 }
258
259 /* Read raw ECC code after writing to NAND. */
260 static void
261 nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
262 {
263         const u32 mask = 0x03ff03ff;
264
265         code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
266         code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
267         code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
268         code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
269 }
270
271 /* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
272 static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
273                 const u_char *dat, u_char *ecc_code)
274 {
275         struct davinci_nand_info *info = to_davinci_nand(mtd);
276         u32 raw_ecc[4], *p;
277         unsigned i;
278
279         /* After a read, terminate ECC calculation by a dummy read
280          * of some 4-bit ECC register.  ECC covers everything that
281          * was read; correct() just uses the hardware state, so
282          * ecc_code is not needed.
283          */
284         if (info->is_readmode) {
285                 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
286                 return 0;
287         }
288
289         /* Pack eight raw 10-bit ecc values into ten bytes, making
290          * two passes which each convert four values (in upper and
291          * lower halves of two 32-bit words) into five bytes.  The
292          * ROM boot loader uses this same packing scheme.
293          */
294         nand_davinci_readecc_4bit(info, raw_ecc);
295         for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
296                 *ecc_code++ =   p[0]        & 0xff;
297                 *ecc_code++ = ((p[0] >>  8) & 0x03) | ((p[0] >> 14) & 0xfc);
298                 *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] <<  4) & 0xf0);
299                 *ecc_code++ = ((p[1] >>  4) & 0x3f) | ((p[1] >> 10) & 0xc0);
300                 *ecc_code++ =  (p[1] >> 18) & 0xff;
301         }
302
303         return 0;
304 }
305
306 /* Correct up to 4 bits in data we just read, using state left in the
307  * hardware plus the ecc_code computed when it was first written.
308  */
309 static int nand_davinci_correct_4bit(struct mtd_info *mtd,
310                 u_char *data, u_char *ecc_code, u_char *null)
311 {
312         int i;
313         struct davinci_nand_info *info = to_davinci_nand(mtd);
314         unsigned short ecc10[8];
315         unsigned short *ecc16;
316         u32 syndrome[4];
317         u32 ecc_state;
318         unsigned num_errors, corrected;
319         unsigned long timeo;
320
321         /* Unpack ten bytes into eight 10 bit values.  We know we're
322          * little-endian, and use type punning for less shifting/masking.
323          */
324         if (WARN_ON(0x01 & (unsigned) ecc_code))
325                 return -EINVAL;
326         ecc16 = (unsigned short *)ecc_code;
327
328         ecc10[0] =  (ecc16[0] >>  0) & 0x3ff;
329         ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
330         ecc10[2] =  (ecc16[1] >>  4) & 0x3ff;
331         ecc10[3] = ((ecc16[1] >> 14) & 0x3)  | ((ecc16[2] << 2) & 0x3fc);
332         ecc10[4] =  (ecc16[2] >>  8)         | ((ecc16[3] << 8) & 0x300);
333         ecc10[5] =  (ecc16[3] >>  2) & 0x3ff;
334         ecc10[6] = ((ecc16[3] >> 12) & 0xf)  | ((ecc16[4] << 4) & 0x3f0);
335         ecc10[7] =  (ecc16[4] >>  6) & 0x3ff;
336
337         /* Tell ECC controller about the expected ECC codes. */
338         for (i = 7; i >= 0; i--)
339                 davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
340
341         /* Allow time for syndrome calculation ... then read it.
342          * A syndrome of all zeroes 0 means no detected errors.
343          */
344         davinci_nand_readl(info, NANDFSR_OFFSET);
345         nand_davinci_readecc_4bit(info, syndrome);
346         if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
347                 return 0;
348
349         /*
350          * Clear any previous address calculation by doing a dummy read of an
351          * error address register.
352          */
353         davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
354
355         /* Start address calculation, and wait for it to complete.
356          * We _could_ start reading more data while this is working,
357          * to speed up the overall page read.
358          */
359         davinci_nand_writel(info, NANDFCR_OFFSET,
360                         davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
361
362         /*
363          * ECC_STATE field reads 0x3 (Error correction complete) immediately
364          * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
365          * begin trying to poll for the state, you may fall right out of your
366          * loop without any of the correction calculations having taken place.
367          * The recommendation from the hardware team is to initially delay as
368          * long as ECC_STATE reads less than 4. After that, ECC HW has entered
369          * correction state.
370          */
371         timeo = jiffies + usecs_to_jiffies(100);
372         do {
373                 ecc_state = (davinci_nand_readl(info,
374                                 NANDFSR_OFFSET) >> 8) & 0x0f;
375                 cpu_relax();
376         } while ((ecc_state < 4) && time_before(jiffies, timeo));
377
378         for (;;) {
379                 u32     fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
380
381                 switch ((fsr >> 8) & 0x0f) {
382                 case 0:         /* no error, should not happen */
383                         davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
384                         return 0;
385                 case 1:         /* five or more errors detected */
386                         davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
387                         return -EBADMSG;
388                 case 2:         /* error addresses computed */
389                 case 3:
390                         num_errors = 1 + ((fsr >> 16) & 0x03);
391                         goto correct;
392                 default:        /* still working on it */
393                         cpu_relax();
394                         continue;
395                 }
396         }
397
398 correct:
399         /* correct each error */
400         for (i = 0, corrected = 0; i < num_errors; i++) {
401                 int error_address, error_value;
402
403                 if (i > 1) {
404                         error_address = davinci_nand_readl(info,
405                                                 NAND_ERR_ADD2_OFFSET);
406                         error_value = davinci_nand_readl(info,
407                                                 NAND_ERR_ERRVAL2_OFFSET);
408                 } else {
409                         error_address = davinci_nand_readl(info,
410                                                 NAND_ERR_ADD1_OFFSET);
411                         error_value = davinci_nand_readl(info,
412                                                 NAND_ERR_ERRVAL1_OFFSET);
413                 }
414
415                 if (i & 1) {
416                         error_address >>= 16;
417                         error_value >>= 16;
418                 }
419                 error_address &= 0x3ff;
420                 error_address = (512 + 7) - error_address;
421
422                 if (error_address < 512) {
423                         data[error_address] ^= error_value;
424                         corrected++;
425                 }
426         }
427
428         return corrected;
429 }
430
431 /*----------------------------------------------------------------------*/
432
433 /*
434  * NOTE:  NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
435  * how these chips are normally wired.  This translates to both 8 and 16
436  * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
437  *
438  * For now we assume that configuration, or any other one which ignores
439  * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
440  * and have that transparently morphed into multiple NAND operations.
441  */
442 static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
443 {
444         struct nand_chip *chip = mtd_to_nand(mtd);
445
446         if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
447                 ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
448         else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
449                 ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
450         else
451                 ioread8_rep(chip->IO_ADDR_R, buf, len);
452 }
453
454 static void nand_davinci_write_buf(struct mtd_info *mtd,
455                 const uint8_t *buf, int len)
456 {
457         struct nand_chip *chip = mtd_to_nand(mtd);
458
459         if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
460                 iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
461         else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
462                 iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
463         else
464                 iowrite8_rep(chip->IO_ADDR_R, buf, len);
465 }
466
467 /*
468  * Check hardware register for wait status. Returns 1 if device is ready,
469  * 0 if it is still busy.
470  */
471 static int nand_davinci_dev_ready(struct mtd_info *mtd)
472 {
473         struct davinci_nand_info *info = to_davinci_nand(mtd);
474
475         return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
476 }
477
478 /*----------------------------------------------------------------------*/
479
480 /* An ECC layout for using 4-bit ECC with small-page flash, storing
481  * ten ECC bytes plus the manufacturer's bad block marker byte, and
482  * and not overlapping the default BBT markers.
483  */
484 static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
485                                       struct mtd_oob_region *oobregion)
486 {
487         if (section > 2)
488                 return -ERANGE;
489
490         if (!section) {
491                 oobregion->offset = 0;
492                 oobregion->length = 5;
493         } else if (section == 1) {
494                 oobregion->offset = 6;
495                 oobregion->length = 2;
496         } else {
497                 oobregion->offset = 13;
498                 oobregion->length = 3;
499         }
500
501         return 0;
502 }
503
504 static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
505                                        struct mtd_oob_region *oobregion)
506 {
507         if (section > 1)
508                 return -ERANGE;
509
510         if (!section) {
511                 oobregion->offset = 8;
512                 oobregion->length = 5;
513         } else {
514                 oobregion->offset = 16;
515                 oobregion->length = mtd->oobsize - 16;
516         }
517
518         return 0;
519 }
520
521 static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
522         .ecc = hwecc4_ooblayout_small_ecc,
523         .free = hwecc4_ooblayout_small_free,
524 };
525
526 #if defined(CONFIG_OF)
527 static const struct of_device_id davinci_nand_of_match[] = {
528         {.compatible = "ti,davinci-nand", },
529         {.compatible = "ti,keystone-nand", },
530         {},
531 };
532 MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
533
534 static struct davinci_nand_pdata
535         *nand_davinci_get_pdata(struct platform_device *pdev)
536 {
537         if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
538                 struct davinci_nand_pdata *pdata;
539                 const char *mode;
540                 u32 prop;
541
542                 pdata =  devm_kzalloc(&pdev->dev,
543                                 sizeof(struct davinci_nand_pdata),
544                                 GFP_KERNEL);
545                 pdev->dev.platform_data = pdata;
546                 if (!pdata)
547                         return ERR_PTR(-ENOMEM);
548                 if (!of_property_read_u32(pdev->dev.of_node,
549                         "ti,davinci-chipselect", &prop))
550                         pdev->id = prop;
551                 else
552                         return ERR_PTR(-EINVAL);
553
554                 if (!of_property_read_u32(pdev->dev.of_node,
555                         "ti,davinci-mask-ale", &prop))
556                         pdata->mask_ale = prop;
557                 if (!of_property_read_u32(pdev->dev.of_node,
558                         "ti,davinci-mask-cle", &prop))
559                         pdata->mask_cle = prop;
560                 if (!of_property_read_u32(pdev->dev.of_node,
561                         "ti,davinci-mask-chipsel", &prop))
562                         pdata->mask_chipsel = prop;
563                 if (!of_property_read_string(pdev->dev.of_node,
564                         "ti,davinci-ecc-mode", &mode)) {
565                         if (!strncmp("none", mode, 4))
566                                 pdata->ecc_mode = NAND_ECC_NONE;
567                         if (!strncmp("soft", mode, 4))
568                                 pdata->ecc_mode = NAND_ECC_SOFT;
569                         if (!strncmp("hw", mode, 2))
570                                 pdata->ecc_mode = NAND_ECC_HW;
571                 }
572                 if (!of_property_read_u32(pdev->dev.of_node,
573                         "ti,davinci-ecc-bits", &prop))
574                         pdata->ecc_bits = prop;
575
576                 if (!of_property_read_u32(pdev->dev.of_node,
577                         "ti,davinci-nand-buswidth", &prop) && prop == 16)
578                         pdata->options |= NAND_BUSWIDTH_16;
579
580                 if (of_property_read_bool(pdev->dev.of_node,
581                         "ti,davinci-nand-use-bbt"))
582                         pdata->bbt_options = NAND_BBT_USE_FLASH;
583
584                 /*
585                  * Since kernel v4.8, this driver has been fixed to enable
586                  * use of 4-bit hardware ECC with subpages and verified on
587                  * TI's keystone EVMs (K2L, K2HK and K2E).
588                  * However, in the interest of not breaking systems using
589                  * existing UBI partitions, sub-page writes are not being
590                  * (re)enabled. If you want to use subpage writes on Keystone
591                  * platforms (i.e. do not have any existing UBI partitions),
592                  * then use "ti,davinci-nand" as the compatible in your
593                  * device-tree file.
594                  */
595                 if (of_device_is_compatible(pdev->dev.of_node,
596                                             "ti,keystone-nand")) {
597                         pdata->options |= NAND_NO_SUBPAGE_WRITE;
598                 }
599         }
600
601         return dev_get_platdata(&pdev->dev);
602 }
603 #else
604 static struct davinci_nand_pdata
605         *nand_davinci_get_pdata(struct platform_device *pdev)
606 {
607         return dev_get_platdata(&pdev->dev);
608 }
609 #endif
610
611 static int nand_davinci_probe(struct platform_device *pdev)
612 {
613         struct davinci_nand_pdata       *pdata;
614         struct davinci_nand_info        *info;
615         struct resource                 *res1;
616         struct resource                 *res2;
617         void __iomem                    *vaddr;
618         void __iomem                    *base;
619         int                             ret;
620         uint32_t                        val;
621         struct mtd_info                 *mtd;
622
623         pdata = nand_davinci_get_pdata(pdev);
624         if (IS_ERR(pdata))
625                 return PTR_ERR(pdata);
626
627         /* insist on board-specific configuration */
628         if (!pdata)
629                 return -ENODEV;
630
631         /* which external chipselect will we be managing? */
632         if (pdev->id < 0 || pdev->id > 3)
633                 return -ENODEV;
634
635         info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
636         if (!info)
637                 return -ENOMEM;
638
639         platform_set_drvdata(pdev, info);
640
641         res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
642         res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
643         if (!res1 || !res2) {
644                 dev_err(&pdev->dev, "resource missing\n");
645                 return -EINVAL;
646         }
647
648         vaddr = devm_ioremap_resource(&pdev->dev, res1);
649         if (IS_ERR(vaddr))
650                 return PTR_ERR(vaddr);
651
652         /*
653          * This registers range is used to setup NAND settings. In case with
654          * TI AEMIF driver, the same memory address range is requested already
655          * by AEMIF, so we cannot request it twice, just ioremap.
656          * The AEMIF and NAND drivers not use the same registers in this range.
657          */
658         base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
659         if (!base) {
660                 dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
661                 return -EADDRNOTAVAIL;
662         }
663
664         info->dev               = &pdev->dev;
665         info->base              = base;
666         info->vaddr             = vaddr;
667
668         mtd                     = nand_to_mtd(&info->chip);
669         mtd->dev.parent         = &pdev->dev;
670         nand_set_flash_node(&info->chip, pdev->dev.of_node);
671
672         info->chip.IO_ADDR_R    = vaddr;
673         info->chip.IO_ADDR_W    = vaddr;
674         info->chip.chip_delay   = 0;
675         info->chip.select_chip  = nand_davinci_select_chip;
676
677         /* options such as NAND_BBT_USE_FLASH */
678         info->chip.bbt_options  = pdata->bbt_options;
679         /* options such as 16-bit widths */
680         info->chip.options      = pdata->options;
681         info->chip.bbt_td       = pdata->bbt_td;
682         info->chip.bbt_md       = pdata->bbt_md;
683         info->timing            = pdata->timing;
684
685         info->ioaddr            = (uint32_t __force) vaddr;
686
687         info->current_cs        = info->ioaddr;
688         info->core_chipsel      = pdev->id;
689         info->mask_chipsel      = pdata->mask_chipsel;
690
691         /* use nandboot-capable ALE/CLE masks by default */
692         info->mask_ale          = pdata->mask_ale ? : MASK_ALE;
693         info->mask_cle          = pdata->mask_cle ? : MASK_CLE;
694
695         /* Set address of hardware control function */
696         info->chip.cmd_ctrl     = nand_davinci_hwcontrol;
697         info->chip.dev_ready    = nand_davinci_dev_ready;
698
699         /* Speed up buffer I/O */
700         info->chip.read_buf     = nand_davinci_read_buf;
701         info->chip.write_buf    = nand_davinci_write_buf;
702
703         /* Use board-specific ECC config */
704         info->chip.ecc.mode     = pdata->ecc_mode;
705
706         ret = -EINVAL;
707
708         info->clk = devm_clk_get(&pdev->dev, "aemif");
709         if (IS_ERR(info->clk)) {
710                 ret = PTR_ERR(info->clk);
711                 dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
712                 return ret;
713         }
714
715         ret = clk_prepare_enable(info->clk);
716         if (ret < 0) {
717                 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
718                         ret);
719                 goto err_clk_enable;
720         }
721
722         spin_lock_irq(&davinci_nand_lock);
723
724         /* put CSxNAND into NAND mode */
725         val = davinci_nand_readl(info, NANDFCR_OFFSET);
726         val |= BIT(info->core_chipsel);
727         davinci_nand_writel(info, NANDFCR_OFFSET, val);
728
729         spin_unlock_irq(&davinci_nand_lock);
730
731         /* Scan to find existence of the device(s) */
732         ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
733         if (ret < 0) {
734                 dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
735                 goto err;
736         }
737
738         switch (info->chip.ecc.mode) {
739         case NAND_ECC_NONE:
740                 pdata->ecc_bits = 0;
741                 break;
742         case NAND_ECC_SOFT:
743                 pdata->ecc_bits = 0;
744                 /*
745                  * This driver expects Hamming based ECC when ecc_mode is set
746                  * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
747                  * avoid adding an extra ->ecc_algo field to
748                  * davinci_nand_pdata.
749                  */
750                 info->chip.ecc.algo = NAND_ECC_HAMMING;
751                 break;
752         case NAND_ECC_HW:
753                 if (pdata->ecc_bits == 4) {
754                         /* No sanity checks:  CPUs must support this,
755                          * and the chips may not use NAND_BUSWIDTH_16.
756                          */
757
758                         /* No sharing 4-bit hardware between chipselects yet */
759                         spin_lock_irq(&davinci_nand_lock);
760                         if (ecc4_busy)
761                                 ret = -EBUSY;
762                         else
763                                 ecc4_busy = true;
764                         spin_unlock_irq(&davinci_nand_lock);
765
766                         if (ret == -EBUSY)
767                                 return ret;
768
769                         info->chip.ecc.calculate = nand_davinci_calculate_4bit;
770                         info->chip.ecc.correct = nand_davinci_correct_4bit;
771                         info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
772                         info->chip.ecc.bytes = 10;
773                         info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
774                 } else {
775                         info->chip.ecc.calculate = nand_davinci_calculate_1bit;
776                         info->chip.ecc.correct = nand_davinci_correct_1bit;
777                         info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
778                         info->chip.ecc.bytes = 3;
779                 }
780                 info->chip.ecc.size = 512;
781                 info->chip.ecc.strength = pdata->ecc_bits;
782                 break;
783         default:
784                 return -EINVAL;
785         }
786
787         /* Update ECC layout if needed ... for 1-bit HW ECC, the default
788          * is OK, but it allocates 6 bytes when only 3 are needed (for
789          * each 512 bytes).  For the 4-bit HW ECC, that default is not
790          * usable:  10 bytes are needed, not 6.
791          */
792         if (pdata->ecc_bits == 4) {
793                 int     chunks = mtd->writesize / 512;
794
795                 if (!chunks || mtd->oobsize < 16) {
796                         dev_dbg(&pdev->dev, "too small\n");
797                         ret = -EINVAL;
798                         goto err;
799                 }
800
801                 /* For small page chips, preserve the manufacturer's
802                  * badblock marking data ... and make sure a flash BBT
803                  * table marker fits in the free bytes.
804                  */
805                 if (chunks == 1) {
806                         mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
807                 } else if (chunks == 4 || chunks == 8) {
808                         mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
809                         info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
810                 } else {
811                         ret = -EIO;
812                         goto err;
813                 }
814         }
815
816         ret = nand_scan_tail(mtd);
817         if (ret < 0)
818                 goto err;
819
820         if (pdata->parts)
821                 ret = mtd_device_parse_register(mtd, NULL, NULL,
822                                         pdata->parts, pdata->nr_parts);
823         else
824                 ret = mtd_device_register(mtd, NULL, 0);
825         if (ret < 0)
826                 goto err;
827
828         val = davinci_nand_readl(info, NRCSR_OFFSET);
829         dev_info(&pdev->dev, "controller rev. %d.%d\n",
830                (val >> 8) & 0xff, val & 0xff);
831
832         return 0;
833
834 err:
835         clk_disable_unprepare(info->clk);
836
837 err_clk_enable:
838         spin_lock_irq(&davinci_nand_lock);
839         if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
840                 ecc4_busy = false;
841         spin_unlock_irq(&davinci_nand_lock);
842         return ret;
843 }
844
845 static int nand_davinci_remove(struct platform_device *pdev)
846 {
847         struct davinci_nand_info *info = platform_get_drvdata(pdev);
848
849         spin_lock_irq(&davinci_nand_lock);
850         if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
851                 ecc4_busy = false;
852         spin_unlock_irq(&davinci_nand_lock);
853
854         nand_release(nand_to_mtd(&info->chip));
855
856         clk_disable_unprepare(info->clk);
857
858         return 0;
859 }
860
861 static struct platform_driver nand_davinci_driver = {
862         .probe          = nand_davinci_probe,
863         .remove         = nand_davinci_remove,
864         .driver         = {
865                 .name   = "davinci_nand",
866                 .of_match_table = of_match_ptr(davinci_nand_of_match),
867         },
868 };
869 MODULE_ALIAS("platform:davinci_nand");
870
871 module_platform_driver(nand_davinci_driver);
872
873 MODULE_LICENSE("GPL");
874 MODULE_AUTHOR("Texas Instruments");
875 MODULE_DESCRIPTION("Davinci NAND flash driver");
876