1 // SPDX-License-Identifier: GPL-2.0
3 * NAND Flash Controller Device Driver
4 * Copyright © 2009-2010, Intel Corporation and its suppliers.
6 * Copyright (c) 2017 Socionext Inc.
7 * Reworked by Masahiro Yamada <yamada.masahiro@socionext.com>
10 #include <linux/bitfield.h>
11 #include <linux/completion.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
23 #define DENALI_NAND_NAME "denali-nand"
24 #define DENALI_DEFAULT_OOB_SKIP_BYTES 8
26 /* for Indexed Addressing */
27 #define DENALI_INDEXED_CTRL 0x00
28 #define DENALI_INDEXED_DATA 0x10
30 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */
31 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
32 #define DENALI_MAP10 (2 << 26) /* high-level control plane */
33 #define DENALI_MAP11 (3 << 26) /* direct controller access */
35 /* MAP11 access cycle type */
36 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
37 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
38 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
41 #define DENALI_ERASE 0x01
43 #define DENALI_BANK(denali) ((denali)->active_bank << 24)
45 #define DENALI_INVALID_BANK -1
46 #define DENALI_NR_BANKS 4
48 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
50 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
54 * Direct Addressing - the slave address forms the control information (command
55 * type, bank, block, and page address). The slave data is the actual data to
56 * be transferred. This mode requires 28 bits of address region allocated.
58 static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
60 return ioread32(denali->host + addr);
63 static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
66 iowrite32(data, denali->host + addr);
70 * Indexed Addressing - address translation module intervenes in passing the
71 * control information. This mode reduces the required address range. The
72 * control information and transferred data are latched by the registers in
73 * the translation module.
75 static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
77 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
78 return ioread32(denali->host + DENALI_INDEXED_DATA);
81 static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
84 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
85 iowrite32(data, denali->host + DENALI_INDEXED_DATA);
89 * Use the configuration feature register to determine the maximum number of
90 * banks that the hardware supports.
92 static void denali_detect_max_banks(struct denali_nand_info *denali)
94 uint32_t features = ioread32(denali->reg + FEATURES);
96 denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
98 /* the encoding changed from rev 5.0 to 5.1 */
99 if (denali->revision < 0x0501)
100 denali->max_banks <<= 1;
103 static void denali_enable_irq(struct denali_nand_info *denali)
107 for (i = 0; i < DENALI_NR_BANKS; i++)
108 iowrite32(U32_MAX, denali->reg + INTR_EN(i));
109 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
112 static void denali_disable_irq(struct denali_nand_info *denali)
116 for (i = 0; i < DENALI_NR_BANKS; i++)
117 iowrite32(0, denali->reg + INTR_EN(i));
118 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
121 static void denali_clear_irq(struct denali_nand_info *denali,
122 int bank, uint32_t irq_status)
124 /* write one to clear bits */
125 iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
128 static void denali_clear_irq_all(struct denali_nand_info *denali)
132 for (i = 0; i < DENALI_NR_BANKS; i++)
133 denali_clear_irq(denali, i, U32_MAX);
136 static irqreturn_t denali_isr(int irq, void *dev_id)
138 struct denali_nand_info *denali = dev_id;
139 irqreturn_t ret = IRQ_NONE;
143 spin_lock(&denali->irq_lock);
145 for (i = 0; i < DENALI_NR_BANKS; i++) {
146 irq_status = ioread32(denali->reg + INTR_STATUS(i));
150 denali_clear_irq(denali, i, irq_status);
152 if (i != denali->active_bank)
155 denali->irq_status |= irq_status;
157 if (denali->irq_status & denali->irq_mask)
158 complete(&denali->complete);
161 spin_unlock(&denali->irq_lock);
166 static void denali_reset_irq(struct denali_nand_info *denali)
170 spin_lock_irqsave(&denali->irq_lock, flags);
171 denali->irq_status = 0;
172 denali->irq_mask = 0;
173 spin_unlock_irqrestore(&denali->irq_lock, flags);
176 static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
179 unsigned long time_left, flags;
182 spin_lock_irqsave(&denali->irq_lock, flags);
184 irq_status = denali->irq_status;
186 if (irq_mask & irq_status) {
187 /* return immediately if the IRQ has already happened. */
188 spin_unlock_irqrestore(&denali->irq_lock, flags);
192 denali->irq_mask = irq_mask;
193 reinit_completion(&denali->complete);
194 spin_unlock_irqrestore(&denali->irq_lock, flags);
196 time_left = wait_for_completion_timeout(&denali->complete,
197 msecs_to_jiffies(1000));
199 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
204 return denali->irq_status;
207 static void denali_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
209 struct mtd_info *mtd = nand_to_mtd(chip);
210 struct denali_nand_info *denali = mtd_to_denali(mtd);
211 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
214 for (i = 0; i < len; i++)
215 buf[i] = denali->host_read(denali, addr);
218 static void denali_write_buf(struct nand_chip *chip, const uint8_t *buf,
221 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
222 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
225 for (i = 0; i < len; i++)
226 denali->host_write(denali, addr, buf[i]);
229 static void denali_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
231 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
232 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
233 uint16_t *buf16 = (uint16_t *)buf;
236 for (i = 0; i < len / 2; i++)
237 buf16[i] = denali->host_read(denali, addr);
240 static void denali_write_buf16(struct nand_chip *chip, const uint8_t *buf,
243 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
244 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
245 const uint16_t *buf16 = (const uint16_t *)buf;
248 for (i = 0; i < len / 2; i++)
249 denali->host_write(denali, addr, buf16[i]);
252 static uint8_t denali_read_byte(struct nand_chip *chip)
256 denali_read_buf(chip, &byte, 1);
261 static void denali_write_byte(struct nand_chip *chip, uint8_t byte)
263 denali_write_buf(chip, &byte, 1);
266 static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
268 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
272 type = DENALI_MAP11_CMD;
273 else if (ctrl & NAND_ALE)
274 type = DENALI_MAP11_ADDR;
279 * Some commands are followed by chip->legacy.waitfunc.
280 * irq_status must be cleared here to catch the R/B# interrupt later.
282 if (ctrl & NAND_CTRL_CHANGE)
283 denali_reset_irq(denali);
285 denali->host_write(denali, DENALI_BANK(denali) | type, dat);
288 static int denali_check_erased_page(struct mtd_info *mtd,
289 struct nand_chip *chip, uint8_t *buf,
290 unsigned long uncor_ecc_flags,
291 unsigned int max_bitflips)
293 struct denali_nand_info *denali = mtd_to_denali(mtd);
294 uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
295 int ecc_steps = chip->ecc.steps;
296 int ecc_size = chip->ecc.size;
297 int ecc_bytes = chip->ecc.bytes;
300 for (i = 0; i < ecc_steps; i++) {
301 if (!(uncor_ecc_flags & BIT(i)))
304 stat = nand_check_erased_ecc_chunk(buf, ecc_size,
309 mtd->ecc_stats.failed++;
311 mtd->ecc_stats.corrected += stat;
312 max_bitflips = max_t(unsigned int, max_bitflips, stat);
316 ecc_code += ecc_bytes;
322 static int denali_hw_ecc_fixup(struct mtd_info *mtd,
323 struct denali_nand_info *denali,
324 unsigned long *uncor_ecc_flags)
326 struct nand_chip *chip = mtd_to_nand(mtd);
327 int bank = denali->active_bank;
329 unsigned int max_bitflips;
331 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
332 ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
334 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
336 * This flag is set when uncorrectable error occurs at least in
337 * one ECC sector. We can not know "how many sectors", or
338 * "which sector(s)". We need erase-page check for all sectors.
340 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
344 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
347 * The register holds the maximum of per-sector corrected bitflips.
348 * This is suitable for the return value of the ->read_page() callback.
349 * Unfortunately, we can not know the total number of corrected bits in
350 * the page. Increase the stats by max_bitflips. (compromised solution)
352 mtd->ecc_stats.corrected += max_bitflips;
357 static int denali_sw_ecc_fixup(struct mtd_info *mtd,
358 struct denali_nand_info *denali,
359 unsigned long *uncor_ecc_flags, uint8_t *buf)
361 unsigned int ecc_size = denali->nand.ecc.size;
362 unsigned int bitflips = 0;
363 unsigned int max_bitflips = 0;
364 uint32_t err_addr, err_cor_info;
365 unsigned int err_byte, err_sector, err_device;
366 uint8_t err_cor_value;
367 unsigned int prev_sector = 0;
370 denali_reset_irq(denali);
373 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
374 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
375 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
377 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
378 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
380 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
383 /* reset the bitflip counter when crossing ECC sector */
384 if (err_sector != prev_sector)
387 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
389 * Check later if this is a real ECC error, or
392 *uncor_ecc_flags |= BIT(err_sector);
393 } else if (err_byte < ecc_size) {
395 * If err_byte is larger than ecc_size, means error
396 * happened in OOB, so we ignore it. It's no need for
397 * us to correct it err_device is represented the NAND
398 * error bits are happened in if there are more than
399 * one NAND connected.
402 unsigned int flips_in_byte;
404 offset = (err_sector * ecc_size + err_byte) *
405 denali->devs_per_cs + err_device;
407 /* correct the ECC error */
408 flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
409 buf[offset] ^= err_cor_value;
410 mtd->ecc_stats.corrected += flips_in_byte;
411 bitflips += flips_in_byte;
413 max_bitflips = max(max_bitflips, bitflips);
416 prev_sector = err_sector;
417 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
420 * Once handle all ECC errors, controller will trigger an
421 * ECC_TRANSACTION_DONE interrupt.
423 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
424 if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
430 static void denali_setup_dma64(struct denali_nand_info *denali,
431 dma_addr_t dma_addr, int page, int write)
434 const int page_count = 1;
436 mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
438 /* DMA is a three step process */
441 * 1. setup transfer type, interrupt when complete,
442 * burst len = 64 bytes, the number of pages
444 denali->host_write(denali, mode,
445 0x01002000 | (64 << 16) | (write << 8) | page_count);
447 /* 2. set memory low address */
448 denali->host_write(denali, mode, lower_32_bits(dma_addr));
450 /* 3. set memory high address */
451 denali->host_write(denali, mode, upper_32_bits(dma_addr));
454 static void denali_setup_dma32(struct denali_nand_info *denali,
455 dma_addr_t dma_addr, int page, int write)
458 const int page_count = 1;
460 mode = DENALI_MAP10 | DENALI_BANK(denali);
462 /* DMA is a four step process */
464 /* 1. setup transfer type and # of pages */
465 denali->host_write(denali, mode | page,
466 0x2000 | (write << 8) | page_count);
468 /* 2. set memory high address bits 23:8 */
469 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
471 /* 3. set memory low address bits 23:8 */
472 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
474 /* 4. interrupt when complete, burst len = 64 bytes */
475 denali->host_write(denali, mode | 0x14000, 0x2400);
478 static int denali_pio_read(struct denali_nand_info *denali, void *buf,
479 size_t size, int page, int raw)
481 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
482 uint32_t *buf32 = (uint32_t *)buf;
483 uint32_t irq_status, ecc_err_mask;
486 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
487 ecc_err_mask = INTR__ECC_UNCOR_ERR;
489 ecc_err_mask = INTR__ECC_ERR;
491 denali_reset_irq(denali);
493 for (i = 0; i < size / 4; i++)
494 *buf32++ = denali->host_read(denali, addr);
496 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
497 if (!(irq_status & INTR__PAGE_XFER_INC))
500 if (irq_status & INTR__ERASED_PAGE)
501 memset(buf, 0xff, size);
503 return irq_status & ecc_err_mask ? -EBADMSG : 0;
506 static int denali_pio_write(struct denali_nand_info *denali,
507 const void *buf, size_t size, int page, int raw)
509 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
510 const uint32_t *buf32 = (uint32_t *)buf;
514 denali_reset_irq(denali);
516 for (i = 0; i < size / 4; i++)
517 denali->host_write(denali, addr, *buf32++);
519 irq_status = denali_wait_for_irq(denali,
520 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
521 if (!(irq_status & INTR__PROGRAM_COMP))
527 static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
528 size_t size, int page, int raw, int write)
531 return denali_pio_write(denali, buf, size, page, raw);
533 return denali_pio_read(denali, buf, size, page, raw);
536 static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
537 size_t size, int page, int raw, int write)
540 uint32_t irq_mask, irq_status, ecc_err_mask;
541 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
544 dma_addr = dma_map_single(denali->dev, buf, size, dir);
545 if (dma_mapping_error(denali->dev, dma_addr)) {
546 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
547 return denali_pio_xfer(denali, buf, size, page, raw, write);
552 * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
553 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
554 * when the page program is completed.
556 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
558 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
559 irq_mask = INTR__DMA_CMD_COMP;
560 ecc_err_mask = INTR__ECC_UNCOR_ERR;
562 irq_mask = INTR__DMA_CMD_COMP;
563 ecc_err_mask = INTR__ECC_ERR;
566 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
568 * The ->setup_dma() hook kicks DMA by using the data/command
569 * interface, which belongs to a different AXI port from the
570 * register interface. Read back the register to avoid a race.
572 ioread32(denali->reg + DMA_ENABLE);
574 denali_reset_irq(denali);
575 denali->setup_dma(denali, dma_addr, page, write);
577 irq_status = denali_wait_for_irq(denali, irq_mask);
578 if (!(irq_status & INTR__DMA_CMD_COMP))
580 else if (irq_status & ecc_err_mask)
583 iowrite32(0, denali->reg + DMA_ENABLE);
585 dma_unmap_single(denali->dev, dma_addr, size, dir);
587 if (irq_status & INTR__ERASED_PAGE)
588 memset(buf, 0xff, size);
593 static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
594 size_t size, int page, int raw, int write)
596 iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
597 iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
598 denali->reg + TRANSFER_SPARE_REG);
600 if (denali->dma_avail)
601 return denali_dma_xfer(denali, buf, size, page, raw, write);
603 return denali_pio_xfer(denali, buf, size, page, raw, write);
606 static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
609 struct denali_nand_info *denali = mtd_to_denali(mtd);
610 int writesize = mtd->writesize;
611 int oobsize = mtd->oobsize;
612 uint8_t *bufpoi = chip->oob_poi;
613 int ecc_steps = chip->ecc.steps;
614 int ecc_size = chip->ecc.size;
615 int ecc_bytes = chip->ecc.bytes;
616 int oob_skip = denali->oob_skip_bytes;
617 size_t size = writesize + oobsize;
620 /* BBM at the beginning of the OOB area */
622 nand_prog_page_begin_op(chip, page, writesize, bufpoi,
625 nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
629 for (i = 0; i < ecc_steps; i++) {
630 pos = ecc_size + i * (ecc_size + ecc_bytes);
633 if (pos >= writesize)
635 else if (pos + len > writesize)
636 len = writesize - pos;
639 nand_change_write_column_op(chip, pos, bufpoi, len,
642 nand_change_read_column_op(chip, pos, bufpoi, len,
645 if (len < ecc_bytes) {
646 len = ecc_bytes - len;
648 nand_change_write_column_op(chip, writesize +
652 nand_change_read_column_op(chip, writesize +
660 len = oobsize - (bufpoi - chip->oob_poi);
662 nand_change_write_column_op(chip, size - len, bufpoi, len,
665 nand_change_read_column_op(chip, size - len, bufpoi, len,
669 static int denali_read_page_raw(struct nand_chip *chip, uint8_t *buf,
670 int oob_required, int page)
672 struct mtd_info *mtd = nand_to_mtd(chip);
673 struct denali_nand_info *denali = mtd_to_denali(mtd);
674 int writesize = mtd->writesize;
675 int oobsize = mtd->oobsize;
676 int ecc_steps = chip->ecc.steps;
677 int ecc_size = chip->ecc.size;
678 int ecc_bytes = chip->ecc.bytes;
679 void *tmp_buf = denali->buf;
680 int oob_skip = denali->oob_skip_bytes;
681 size_t size = writesize + oobsize;
682 int ret, i, pos, len;
684 ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
688 /* Arrange the buffer for syndrome payload/ecc layout */
690 for (i = 0; i < ecc_steps; i++) {
691 pos = i * (ecc_size + ecc_bytes);
694 if (pos >= writesize)
696 else if (pos + len > writesize)
697 len = writesize - pos;
699 memcpy(buf, tmp_buf + pos, len);
701 if (len < ecc_size) {
702 len = ecc_size - len;
703 memcpy(buf, tmp_buf + writesize + oob_skip,
711 uint8_t *oob = chip->oob_poi;
713 /* BBM at the beginning of the OOB area */
714 memcpy(oob, tmp_buf + writesize, oob_skip);
718 for (i = 0; i < ecc_steps; i++) {
719 pos = ecc_size + i * (ecc_size + ecc_bytes);
722 if (pos >= writesize)
724 else if (pos + len > writesize)
725 len = writesize - pos;
727 memcpy(oob, tmp_buf + pos, len);
729 if (len < ecc_bytes) {
730 len = ecc_bytes - len;
731 memcpy(oob, tmp_buf + writesize + oob_skip,
738 len = oobsize - (oob - chip->oob_poi);
739 memcpy(oob, tmp_buf + size - len, len);
745 static int denali_read_oob(struct nand_chip *chip, int page)
747 struct mtd_info *mtd = nand_to_mtd(chip);
749 denali_oob_xfer(mtd, chip, page, 0);
754 static int denali_write_oob(struct nand_chip *chip, int page)
756 struct mtd_info *mtd = nand_to_mtd(chip);
757 struct denali_nand_info *denali = mtd_to_denali(mtd);
759 denali_reset_irq(denali);
761 denali_oob_xfer(mtd, chip, page, 1);
763 return nand_prog_page_end_op(chip);
766 static int denali_read_page(struct nand_chip *chip, uint8_t *buf,
767 int oob_required, int page)
769 struct mtd_info *mtd = nand_to_mtd(chip);
770 struct denali_nand_info *denali = mtd_to_denali(mtd);
771 unsigned long uncor_ecc_flags = 0;
775 ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
776 if (ret && ret != -EBADMSG)
779 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
780 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
781 else if (ret == -EBADMSG)
782 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
787 if (uncor_ecc_flags) {
788 ret = denali_read_oob(chip, page);
792 stat = denali_check_erased_page(mtd, chip, buf,
793 uncor_ecc_flags, stat);
799 static int denali_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
800 int oob_required, int page)
802 struct mtd_info *mtd = nand_to_mtd(chip);
803 struct denali_nand_info *denali = mtd_to_denali(mtd);
804 int writesize = mtd->writesize;
805 int oobsize = mtd->oobsize;
806 int ecc_steps = chip->ecc.steps;
807 int ecc_size = chip->ecc.size;
808 int ecc_bytes = chip->ecc.bytes;
809 void *tmp_buf = denali->buf;
810 int oob_skip = denali->oob_skip_bytes;
811 size_t size = writesize + oobsize;
815 * Fill the buffer with 0xff first except the full page transfer.
816 * This simplifies the logic.
818 if (!buf || !oob_required)
819 memset(tmp_buf, 0xff, size);
821 /* Arrange the buffer for syndrome payload/ecc layout */
823 for (i = 0; i < ecc_steps; i++) {
824 pos = i * (ecc_size + ecc_bytes);
827 if (pos >= writesize)
829 else if (pos + len > writesize)
830 len = writesize - pos;
832 memcpy(tmp_buf + pos, buf, len);
834 if (len < ecc_size) {
835 len = ecc_size - len;
836 memcpy(tmp_buf + writesize + oob_skip, buf,
844 const uint8_t *oob = chip->oob_poi;
846 /* BBM at the beginning of the OOB area */
847 memcpy(tmp_buf + writesize, oob, oob_skip);
851 for (i = 0; i < ecc_steps; i++) {
852 pos = ecc_size + i * (ecc_size + ecc_bytes);
855 if (pos >= writesize)
857 else if (pos + len > writesize)
858 len = writesize - pos;
860 memcpy(tmp_buf + pos, oob, len);
862 if (len < ecc_bytes) {
863 len = ecc_bytes - len;
864 memcpy(tmp_buf + writesize + oob_skip, oob,
871 len = oobsize - (oob - chip->oob_poi);
872 memcpy(tmp_buf + size - len, oob, len);
875 return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
878 static int denali_write_page(struct nand_chip *chip, const uint8_t *buf,
879 int oob_required, int page)
881 struct mtd_info *mtd = nand_to_mtd(chip);
882 struct denali_nand_info *denali = mtd_to_denali(mtd);
884 return denali_data_xfer(denali, (void *)buf, mtd->writesize,
888 static void denali_select_chip(struct nand_chip *chip, int cs)
890 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
892 denali->active_bank = cs;
895 static int denali_waitfunc(struct nand_chip *chip)
897 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
900 /* R/B# pin transitioned from low to high? */
901 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
903 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
906 static int denali_erase(struct nand_chip *chip, int page)
908 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
911 denali_reset_irq(denali);
913 denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
916 /* wait for erase to complete or failure to occur */
917 irq_status = denali_wait_for_irq(denali,
918 INTR__ERASE_COMP | INTR__ERASE_FAIL);
920 return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
923 static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
924 const struct nand_data_interface *conf)
926 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
927 const struct nand_sdr_timings *timings;
928 unsigned long t_x, mult_x;
929 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
930 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
931 int addr_2_data_mask;
934 timings = nand_get_sdr_timings(conf);
936 return PTR_ERR(timings);
938 /* clk_x period in picoseconds */
939 t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
944 * The bus interface clock, clk_x, is phase aligned with the core clock.
945 * The clk_x is an integral multiple N of the core clk. The value N is
946 * configured at IP delivery time, and its available value is 4, 5, 6.
948 mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
949 if (mult_x < 4 || mult_x > 6)
952 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
955 /* tREA -> ACC_CLKS */
956 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
957 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
959 tmp = ioread32(denali->reg + ACC_CLKS);
960 tmp &= ~ACC_CLKS__VALUE;
961 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
962 iowrite32(tmp, denali->reg + ACC_CLKS);
964 /* tRWH -> RE_2_WE */
965 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
966 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
968 tmp = ioread32(denali->reg + RE_2_WE);
969 tmp &= ~RE_2_WE__VALUE;
970 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
971 iowrite32(tmp, denali->reg + RE_2_WE);
973 /* tRHZ -> RE_2_RE */
974 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
975 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
977 tmp = ioread32(denali->reg + RE_2_RE);
978 tmp &= ~RE_2_RE__VALUE;
979 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
980 iowrite32(tmp, denali->reg + RE_2_RE);
983 * tCCS, tWHR -> WE_2_RE
985 * With WE_2_RE properly set, the Denali controller automatically takes
986 * care of the delay; the driver need not set NAND_WAIT_TCCS.
988 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
989 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
991 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
992 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
993 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
994 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
996 /* tADL -> ADDR_2_DATA */
998 /* for older versions, ADDR_2_DATA is only 6 bit wide */
999 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1000 if (denali->revision < 0x0501)
1001 addr_2_data_mask >>= 1;
1003 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
1004 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
1006 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
1007 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1008 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
1009 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
1011 /* tREH, tWH -> RDWR_EN_HI_CNT */
1012 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1014 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
1016 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
1017 tmp &= ~RDWR_EN_HI_CNT__VALUE;
1018 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
1019 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
1021 /* tRP, tWP -> RDWR_EN_LO_CNT */
1022 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
1023 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1025 rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
1026 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
1027 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
1029 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
1030 tmp &= ~RDWR_EN_LO_CNT__VALUE;
1031 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
1032 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
1034 /* tCS, tCEA -> CS_SETUP_CNT */
1035 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
1036 (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
1038 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1040 tmp = ioread32(denali->reg + CS_SETUP_CNT);
1041 tmp &= ~CS_SETUP_CNT__VALUE;
1042 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
1043 iowrite32(tmp, denali->reg + CS_SETUP_CNT);
1048 static void denali_hw_init(struct denali_nand_info *denali)
1051 * The REVISION register may not be reliable. Platforms are allowed to
1054 if (!denali->revision)
1055 denali->revision = swab16(ioread32(denali->reg + REVISION));
1058 * Set how many bytes should be skipped before writing data in OOB.
1059 * If a non-zero value has already been set (by firmware or something),
1060 * just use it. Otherwise, set the driver default.
1062 denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
1063 if (!denali->oob_skip_bytes) {
1064 denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
1065 iowrite32(denali->oob_skip_bytes,
1066 denali->reg + SPARE_AREA_SKIP_BYTES);
1069 denali_detect_max_banks(denali);
1070 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
1071 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
1073 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
1076 int denali_calc_ecc_bytes(int step_size, int strength)
1078 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
1079 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1081 EXPORT_SYMBOL(denali_calc_ecc_bytes);
1083 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1084 struct mtd_oob_region *oobregion)
1086 struct denali_nand_info *denali = mtd_to_denali(mtd);
1087 struct nand_chip *chip = mtd_to_nand(mtd);
1092 oobregion->offset = denali->oob_skip_bytes;
1093 oobregion->length = chip->ecc.total;
1098 static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1099 struct mtd_oob_region *oobregion)
1101 struct denali_nand_info *denali = mtd_to_denali(mtd);
1102 struct nand_chip *chip = mtd_to_nand(mtd);
1107 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
1108 oobregion->length = mtd->oobsize - oobregion->offset;
1113 static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1114 .ecc = denali_ooblayout_ecc,
1115 .free = denali_ooblayout_free,
1118 static int denali_multidev_fixup(struct denali_nand_info *denali)
1120 struct nand_chip *chip = &denali->nand;
1121 struct mtd_info *mtd = nand_to_mtd(chip);
1124 * Support for multi device:
1125 * When the IP configuration is x16 capable and two x8 chips are
1126 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1127 * In this case, the core framework knows nothing about this fact,
1128 * so we should tell it the _logical_ pagesize and anything necessary.
1130 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
1133 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1134 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
1136 if (denali->devs_per_cs == 0) {
1137 denali->devs_per_cs = 1;
1138 iowrite32(1, denali->reg + DEVICES_CONNECTED);
1141 if (denali->devs_per_cs == 1)
1144 if (denali->devs_per_cs != 2) {
1145 dev_err(denali->dev, "unsupported number of devices %d\n",
1146 denali->devs_per_cs);
1150 /* 2 chips in parallel */
1152 mtd->erasesize <<= 1;
1153 mtd->writesize <<= 1;
1155 chip->chipsize <<= 1;
1156 chip->page_shift += 1;
1157 chip->phys_erase_shift += 1;
1158 chip->bbt_erase_shift += 1;
1159 chip->chip_shift += 1;
1160 chip->pagemask <<= 1;
1161 chip->ecc.size <<= 1;
1162 chip->ecc.bytes <<= 1;
1163 chip->ecc.strength <<= 1;
1164 denali->oob_skip_bytes <<= 1;
1169 static int denali_attach_chip(struct nand_chip *chip)
1171 struct mtd_info *mtd = nand_to_mtd(chip);
1172 struct denali_nand_info *denali = mtd_to_denali(mtd);
1175 if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
1176 denali->dma_avail = 1;
1178 if (denali->dma_avail) {
1179 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
1181 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
1183 dev_info(denali->dev,
1184 "Failed to set DMA mask. Disabling DMA.\n");
1185 denali->dma_avail = 0;
1189 if (denali->dma_avail) {
1190 chip->options |= NAND_USE_BOUNCE_BUFFER;
1191 chip->buf_align = 16;
1192 if (denali->caps & DENALI_CAP_DMA_64BIT)
1193 denali->setup_dma = denali_setup_dma64;
1195 denali->setup_dma = denali_setup_dma32;
1198 chip->bbt_options |= NAND_BBT_USE_FLASH;
1199 chip->bbt_options |= NAND_BBT_NO_OOB;
1200 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
1201 chip->options |= NAND_NO_SUBPAGE_WRITE;
1203 ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
1204 mtd->oobsize - denali->oob_skip_bytes);
1206 dev_err(denali->dev, "Failed to setup ECC settings.\n");
1210 dev_dbg(denali->dev,
1211 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1212 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1214 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
1215 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
1216 denali->reg + ECC_CORRECTION);
1217 iowrite32(mtd->erasesize / mtd->writesize,
1218 denali->reg + PAGES_PER_BLOCK);
1219 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1220 denali->reg + DEVICE_WIDTH);
1221 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
1222 denali->reg + TWO_ROW_ADDR_CYCLES);
1223 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
1224 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
1226 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
1227 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
1228 /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1229 iowrite32(mtd->writesize / chip->ecc.size,
1230 denali->reg + CFG_NUM_DATA_BLOCKS);
1232 mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
1234 if (chip->options & NAND_BUSWIDTH_16) {
1235 chip->legacy.read_buf = denali_read_buf16;
1236 chip->legacy.write_buf = denali_write_buf16;
1238 chip->legacy.read_buf = denali_read_buf;
1239 chip->legacy.write_buf = denali_write_buf;
1241 chip->ecc.read_page = denali_read_page;
1242 chip->ecc.read_page_raw = denali_read_page_raw;
1243 chip->ecc.write_page = denali_write_page;
1244 chip->ecc.write_page_raw = denali_write_page_raw;
1245 chip->ecc.read_oob = denali_read_oob;
1246 chip->ecc.write_oob = denali_write_oob;
1247 chip->legacy.erase = denali_erase;
1249 ret = denali_multidev_fixup(denali);
1254 * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
1255 * use devm_kmalloc() because the memory allocated by devm_ does not
1256 * guarantee DMA-safe alignment.
1258 denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
1265 static void denali_detach_chip(struct nand_chip *chip)
1267 struct mtd_info *mtd = nand_to_mtd(chip);
1268 struct denali_nand_info *denali = mtd_to_denali(mtd);
1273 static const struct nand_controller_ops denali_controller_ops = {
1274 .attach_chip = denali_attach_chip,
1275 .detach_chip = denali_detach_chip,
1276 .setup_data_interface = denali_setup_data_interface,
1279 int denali_init(struct denali_nand_info *denali)
1281 struct nand_chip *chip = &denali->nand;
1282 struct mtd_info *mtd = nand_to_mtd(chip);
1283 u32 features = ioread32(denali->reg + FEATURES);
1286 mtd->dev.parent = denali->dev;
1287 denali_hw_init(denali);
1289 init_completion(&denali->complete);
1290 spin_lock_init(&denali->irq_lock);
1292 denali_clear_irq_all(denali);
1294 ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1295 IRQF_SHARED, DENALI_NAND_NAME, denali);
1297 dev_err(denali->dev, "Unable to request IRQ\n");
1301 denali_enable_irq(denali);
1303 denali->active_bank = DENALI_INVALID_BANK;
1305 nand_set_flash_node(chip, denali->dev->of_node);
1306 /* Fallback to the default name if DT did not give "label" property */
1308 mtd->name = "denali-nand";
1310 chip->legacy.select_chip = denali_select_chip;
1311 chip->legacy.read_byte = denali_read_byte;
1312 chip->legacy.write_byte = denali_write_byte;
1313 chip->legacy.cmd_ctrl = denali_cmd_ctrl;
1314 chip->legacy.waitfunc = denali_waitfunc;
1316 if (features & FEATURES__INDEX_ADDR) {
1317 denali->host_read = denali_indexed_read;
1318 denali->host_write = denali_indexed_write;
1320 denali->host_read = denali_direct_read;
1321 denali->host_write = denali_direct_write;
1324 /* clk rate info is needed for setup_data_interface */
1325 if (!denali->clk_rate || !denali->clk_x_rate)
1326 chip->options |= NAND_KEEP_TIMINGS;
1328 chip->legacy.dummy_controller.ops = &denali_controller_ops;
1329 ret = nand_scan(chip, denali->max_banks);
1333 ret = mtd_device_register(mtd, NULL, 0);
1335 dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
1344 denali_disable_irq(denali);
1348 EXPORT_SYMBOL(denali_init);
1350 void denali_remove(struct denali_nand_info *denali)
1352 nand_release(&denali->nand);
1353 denali_disable_irq(denali);
1355 EXPORT_SYMBOL(denali_remove);
1357 MODULE_DESCRIPTION("Driver core for Denali NAND controller");
1358 MODULE_AUTHOR("Intel Corporation and its suppliers");
1359 MODULE_LICENSE("GPL v2");