Merge branch 'ipvlan-Fix-insufficient-skb-linear-check'
[sfrench/cifs-2.6.git] / drivers / mtd / nand / qcom_nandc.c
1 /*
2  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13
14 #include <linux/clk.h>
15 #include <linux/slab.h>
16 #include <linux/bitops.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dmaengine.h>
19 #include <linux/module.h>
20 #include <linux/mtd/rawnand.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/delay.h>
25
26 /* NANDc reg offsets */
27 #define NAND_FLASH_CMD                  0x00
28 #define NAND_ADDR0                      0x04
29 #define NAND_ADDR1                      0x08
30 #define NAND_FLASH_CHIP_SELECT          0x0c
31 #define NAND_EXEC_CMD                   0x10
32 #define NAND_FLASH_STATUS               0x14
33 #define NAND_BUFFER_STATUS              0x18
34 #define NAND_DEV0_CFG0                  0x20
35 #define NAND_DEV0_CFG1                  0x24
36 #define NAND_DEV0_ECC_CFG               0x28
37 #define NAND_DEV1_ECC_CFG               0x2c
38 #define NAND_DEV1_CFG0                  0x30
39 #define NAND_DEV1_CFG1                  0x34
40 #define NAND_READ_ID                    0x40
41 #define NAND_READ_STATUS                0x44
42 #define NAND_DEV_CMD0                   0xa0
43 #define NAND_DEV_CMD1                   0xa4
44 #define NAND_DEV_CMD2                   0xa8
45 #define NAND_DEV_CMD_VLD                0xac
46 #define SFLASHC_BURST_CFG               0xe0
47 #define NAND_ERASED_CW_DETECT_CFG       0xe8
48 #define NAND_ERASED_CW_DETECT_STATUS    0xec
49 #define NAND_EBI2_ECC_BUF_CFG           0xf0
50 #define FLASH_BUF_ACC                   0x100
51
52 #define NAND_CTRL                       0xf00
53 #define NAND_VERSION                    0xf08
54 #define NAND_READ_LOCATION_0            0xf20
55 #define NAND_READ_LOCATION_1            0xf24
56 #define NAND_READ_LOCATION_2            0xf28
57 #define NAND_READ_LOCATION_3            0xf2c
58
59 /* dummy register offsets, used by write_reg_dma */
60 #define NAND_DEV_CMD1_RESTORE           0xdead
61 #define NAND_DEV_CMD_VLD_RESTORE        0xbeef
62
63 /* NAND_FLASH_CMD bits */
64 #define PAGE_ACC                        BIT(4)
65 #define LAST_PAGE                       BIT(5)
66
67 /* NAND_FLASH_CHIP_SELECT bits */
68 #define NAND_DEV_SEL                    0
69 #define DM_EN                           BIT(2)
70
71 /* NAND_FLASH_STATUS bits */
72 #define FS_OP_ERR                       BIT(4)
73 #define FS_READY_BSY_N                  BIT(5)
74 #define FS_MPU_ERR                      BIT(8)
75 #define FS_DEVICE_STS_ERR               BIT(16)
76 #define FS_DEVICE_WP                    BIT(23)
77
78 /* NAND_BUFFER_STATUS bits */
79 #define BS_UNCORRECTABLE_BIT            BIT(8)
80 #define BS_CORRECTABLE_ERR_MSK          0x1f
81
82 /* NAND_DEVn_CFG0 bits */
83 #define DISABLE_STATUS_AFTER_WRITE      4
84 #define CW_PER_PAGE                     6
85 #define UD_SIZE_BYTES                   9
86 #define ECC_PARITY_SIZE_BYTES_RS        19
87 #define SPARE_SIZE_BYTES                23
88 #define NUM_ADDR_CYCLES                 27
89 #define STATUS_BFR_READ                 30
90 #define SET_RD_MODE_AFTER_STATUS        31
91
92 /* NAND_DEVn_CFG0 bits */
93 #define DEV0_CFG1_ECC_DISABLE           0
94 #define WIDE_FLASH                      1
95 #define NAND_RECOVERY_CYCLES            2
96 #define CS_ACTIVE_BSY                   5
97 #define BAD_BLOCK_BYTE_NUM              6
98 #define BAD_BLOCK_IN_SPARE_AREA         16
99 #define WR_RD_BSY_GAP                   17
100 #define ENABLE_BCH_ECC                  27
101
102 /* NAND_DEV0_ECC_CFG bits */
103 #define ECC_CFG_ECC_DISABLE             0
104 #define ECC_SW_RESET                    1
105 #define ECC_MODE                        4
106 #define ECC_PARITY_SIZE_BYTES_BCH       8
107 #define ECC_NUM_DATA_BYTES              16
108 #define ECC_FORCE_CLK_OPEN              30
109
110 /* NAND_DEV_CMD1 bits */
111 #define READ_ADDR                       0
112
113 /* NAND_DEV_CMD_VLD bits */
114 #define READ_START_VLD                  BIT(0)
115 #define READ_STOP_VLD                   BIT(1)
116 #define WRITE_START_VLD                 BIT(2)
117 #define ERASE_START_VLD                 BIT(3)
118 #define SEQ_READ_START_VLD              BIT(4)
119
120 /* NAND_EBI2_ECC_BUF_CFG bits */
121 #define NUM_STEPS                       0
122
123 /* NAND_ERASED_CW_DETECT_CFG bits */
124 #define ERASED_CW_ECC_MASK              1
125 #define AUTO_DETECT_RES                 0
126 #define MASK_ECC                        (1 << ERASED_CW_ECC_MASK)
127 #define RESET_ERASED_DET                (1 << AUTO_DETECT_RES)
128 #define ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
129 #define CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
130 #define SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
131
132 /* NAND_ERASED_CW_DETECT_STATUS bits */
133 #define PAGE_ALL_ERASED                 BIT(7)
134 #define CODEWORD_ALL_ERASED             BIT(6)
135 #define PAGE_ERASED                     BIT(5)
136 #define CODEWORD_ERASED                 BIT(4)
137 #define ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
138 #define ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
139
140 /* NAND_READ_LOCATION_n bits */
141 #define READ_LOCATION_OFFSET            0
142 #define READ_LOCATION_SIZE              16
143 #define READ_LOCATION_LAST              31
144
145 /* Version Mask */
146 #define NAND_VERSION_MAJOR_MASK         0xf0000000
147 #define NAND_VERSION_MAJOR_SHIFT        28
148 #define NAND_VERSION_MINOR_MASK         0x0fff0000
149 #define NAND_VERSION_MINOR_SHIFT        16
150
151 /* NAND OP_CMDs */
152 #define PAGE_READ                       0x2
153 #define PAGE_READ_WITH_ECC              0x3
154 #define PAGE_READ_WITH_ECC_SPARE        0x4
155 #define PROGRAM_PAGE                    0x6
156 #define PAGE_PROGRAM_WITH_ECC           0x7
157 #define PROGRAM_PAGE_SPARE              0x9
158 #define BLOCK_ERASE                     0xa
159 #define FETCH_ID                        0xb
160 #define RESET_DEVICE                    0xd
161
162 /* Default Value for NAND_DEV_CMD_VLD */
163 #define NAND_DEV_CMD_VLD_VAL            (READ_START_VLD | WRITE_START_VLD | \
164                                          ERASE_START_VLD | SEQ_READ_START_VLD)
165
166 /* NAND_CTRL bits */
167 #define BAM_MODE_EN                     BIT(0)
168
169 /*
170  * the NAND controller performs reads/writes with ECC in 516 byte chunks.
171  * the driver calls the chunks 'step' or 'codeword' interchangeably
172  */
173 #define NANDC_STEP_SIZE                 512
174
175 /*
176  * the largest page size we support is 8K, this will have 16 steps/codewords
177  * of 512 bytes each
178  */
179 #define MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
180
181 /* we read at most 3 registers per codeword scan */
182 #define MAX_REG_RD                      (3 * MAX_NUM_STEPS)
183
184 /* ECC modes supported by the controller */
185 #define ECC_NONE        BIT(0)
186 #define ECC_RS_4BIT     BIT(1)
187 #define ECC_BCH_4BIT    BIT(2)
188 #define ECC_BCH_8BIT    BIT(3)
189
190 #define nandc_set_read_loc(nandc, reg, offset, size, is_last)   \
191 nandc_set_reg(nandc, NAND_READ_LOCATION_##reg,                  \
192               ((offset) << READ_LOCATION_OFFSET) |              \
193               ((size) << READ_LOCATION_SIZE) |                  \
194               ((is_last) << READ_LOCATION_LAST))
195
196 /*
197  * Returns the actual register address for all NAND_DEV_ registers
198  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
199  */
200 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
201
202 #define QPIC_PER_CW_CMD_SGL             32
203 #define QPIC_PER_CW_DATA_SGL            8
204
205 /*
206  * Flags used in DMA descriptor preparation helper functions
207  * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
208  */
209 /* Don't set the EOT in current tx BAM sgl */
210 #define NAND_BAM_NO_EOT                 BIT(0)
211 /* Set the NWD flag in current BAM sgl */
212 #define NAND_BAM_NWD                    BIT(1)
213 /* Finish writing in the current BAM sgl and start writing in another BAM sgl */
214 #define NAND_BAM_NEXT_SGL               BIT(2)
215 /*
216  * Erased codeword status is being used two times in single transfer so this
217  * flag will determine the current value of erased codeword status register
218  */
219 #define NAND_ERASED_CW_SET              BIT(4)
220
221 /*
222  * This data type corresponds to the BAM transaction which will be used for all
223  * NAND transfers.
224  * @cmd_sgl - sgl for NAND BAM command pipe
225  * @data_sgl - sgl for NAND BAM consumer/producer pipe
226  * @cmd_sgl_pos - current index in command sgl.
227  * @cmd_sgl_start - start index in command sgl.
228  * @tx_sgl_pos - current index in data sgl for tx.
229  * @tx_sgl_start - start index in data sgl for tx.
230  * @rx_sgl_pos - current index in data sgl for rx.
231  * @rx_sgl_start - start index in data sgl for rx.
232  */
233 struct bam_transaction {
234         struct scatterlist *cmd_sgl;
235         struct scatterlist *data_sgl;
236         u32 cmd_sgl_pos;
237         u32 cmd_sgl_start;
238         u32 tx_sgl_pos;
239         u32 tx_sgl_start;
240         u32 rx_sgl_pos;
241         u32 rx_sgl_start;
242 };
243
244 /*
245  * This data type corresponds to the nand dma descriptor
246  * @list - list for desc_info
247  * @dir - DMA transfer direction
248  * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
249  *            ADM
250  * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
251  * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
252  * @dma_desc - low level DMA engine descriptor
253  */
254 struct desc_info {
255         struct list_head node;
256
257         enum dma_data_direction dir;
258         union {
259                 struct scatterlist adm_sgl;
260                 struct {
261                         struct scatterlist *bam_sgl;
262                         int sgl_cnt;
263                 };
264         };
265         struct dma_async_tx_descriptor *dma_desc;
266 };
267
268 /*
269  * holds the current register values that we want to write. acts as a contiguous
270  * chunk of memory which we use to write the controller registers through DMA.
271  */
272 struct nandc_regs {
273         __le32 cmd;
274         __le32 addr0;
275         __le32 addr1;
276         __le32 chip_sel;
277         __le32 exec;
278
279         __le32 cfg0;
280         __le32 cfg1;
281         __le32 ecc_bch_cfg;
282
283         __le32 clrflashstatus;
284         __le32 clrreadstatus;
285
286         __le32 cmd1;
287         __le32 vld;
288
289         __le32 orig_cmd1;
290         __le32 orig_vld;
291
292         __le32 ecc_buf_cfg;
293         __le32 read_location0;
294         __le32 read_location1;
295         __le32 read_location2;
296         __le32 read_location3;
297
298         __le32 erased_cw_detect_cfg_clr;
299         __le32 erased_cw_detect_cfg_set;
300 };
301
302 /*
303  * NAND controller data struct
304  *
305  * @controller:                 base controller structure
306  * @host_list:                  list containing all the chips attached to the
307  *                              controller
308  * @dev:                        parent device
309  * @base:                       MMIO base
310  * @base_dma:                   physical base address of controller registers
311  * @core_clk:                   controller clock
312  * @aon_clk:                    another controller clock
313  *
314  * @chan:                       dma channel
315  * @cmd_crci:                   ADM DMA CRCI for command flow control
316  * @data_crci:                  ADM DMA CRCI for data flow control
317  * @desc_list:                  DMA descriptor list (list of desc_infos)
318  *
319  * @data_buffer:                our local DMA buffer for page read/writes,
320  *                              used when we can't use the buffer provided
321  *                              by upper layers directly
322  * @buf_size/count/start:       markers for chip->read_buf/write_buf functions
323  * @reg_read_buf:               local buffer for reading back registers via DMA
324  * @reg_read_dma:               contains dma address for register read buffer
325  * @reg_read_pos:               marker for data read in reg_read_buf
326  *
327  * @regs:                       a contiguous chunk of memory for DMA register
328  *                              writes. contains the register values to be
329  *                              written to controller
330  * @cmd1/vld:                   some fixed controller register values
331  * @props:                      properties of current NAND controller,
332  *                              initialized via DT match data
333  * @max_cwperpage:              maximum QPIC codewords required. calculated
334  *                              from all connected NAND devices pagesize
335  */
336 struct qcom_nand_controller {
337         struct nand_hw_control controller;
338         struct list_head host_list;
339
340         struct device *dev;
341
342         void __iomem *base;
343         dma_addr_t base_dma;
344
345         struct clk *core_clk;
346         struct clk *aon_clk;
347
348         union {
349                 /* will be used only by QPIC for BAM DMA */
350                 struct {
351                         struct dma_chan *tx_chan;
352                         struct dma_chan *rx_chan;
353                         struct dma_chan *cmd_chan;
354                 };
355
356                 /* will be used only by EBI2 for ADM DMA */
357                 struct {
358                         struct dma_chan *chan;
359                         unsigned int cmd_crci;
360                         unsigned int data_crci;
361                 };
362         };
363
364         struct list_head desc_list;
365         struct bam_transaction *bam_txn;
366
367         u8              *data_buffer;
368         int             buf_size;
369         int             buf_count;
370         int             buf_start;
371         unsigned int    max_cwperpage;
372
373         __le32 *reg_read_buf;
374         dma_addr_t reg_read_dma;
375         int reg_read_pos;
376
377         struct nandc_regs *regs;
378
379         u32 cmd1, vld;
380         const struct qcom_nandc_props *props;
381 };
382
383 /*
384  * NAND chip structure
385  *
386  * @chip:                       base NAND chip structure
387  * @node:                       list node to add itself to host_list in
388  *                              qcom_nand_controller
389  *
390  * @cs:                         chip select value for this chip
391  * @cw_size:                    the number of bytes in a single step/codeword
392  *                              of a page, consisting of all data, ecc, spare
393  *                              and reserved bytes
394  * @cw_data:                    the number of bytes within a codeword protected
395  *                              by ECC
396  * @use_ecc:                    request the controller to use ECC for the
397  *                              upcoming read/write
398  * @bch_enabled:                flag to tell whether BCH ECC mode is used
399  * @ecc_bytes_hw:               ECC bytes used by controller hardware for this
400  *                              chip
401  * @status:                     value to be returned if NAND_CMD_STATUS command
402  *                              is executed
403  * @last_command:               keeps track of last command on this chip. used
404  *                              for reading correct status
405  *
406  * @cfg0, cfg1, cfg0_raw..:     NANDc register configurations needed for
407  *                              ecc/non-ecc mode for the current nand flash
408  *                              device
409  */
410 struct qcom_nand_host {
411         struct nand_chip chip;
412         struct list_head node;
413
414         int cs;
415         int cw_size;
416         int cw_data;
417         bool use_ecc;
418         bool bch_enabled;
419         int ecc_bytes_hw;
420         int spare_bytes;
421         int bbm_size;
422         u8 status;
423         int last_command;
424
425         u32 cfg0, cfg1;
426         u32 cfg0_raw, cfg1_raw;
427         u32 ecc_buf_cfg;
428         u32 ecc_bch_cfg;
429         u32 clrflashstatus;
430         u32 clrreadstatus;
431 };
432
433 /*
434  * This data type corresponds to the NAND controller properties which varies
435  * among different NAND controllers.
436  * @ecc_modes - ecc mode for NAND
437  * @is_bam - whether NAND controller is using BAM
438  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
439  */
440 struct qcom_nandc_props {
441         u32 ecc_modes;
442         bool is_bam;
443         u32 dev_cmd_reg_start;
444 };
445
446 /* Frees the BAM transaction memory */
447 static void free_bam_transaction(struct qcom_nand_controller *nandc)
448 {
449         struct bam_transaction *bam_txn = nandc->bam_txn;
450
451         devm_kfree(nandc->dev, bam_txn);
452 }
453
454 /* Allocates and Initializes the BAM transaction */
455 static struct bam_transaction *
456 alloc_bam_transaction(struct qcom_nand_controller *nandc)
457 {
458         struct bam_transaction *bam_txn;
459         size_t bam_txn_size;
460         unsigned int num_cw = nandc->max_cwperpage;
461         void *bam_txn_buf;
462
463         bam_txn_size =
464                 sizeof(*bam_txn) + num_cw *
465                 ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
466                 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
467
468         bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
469         if (!bam_txn_buf)
470                 return NULL;
471
472         bam_txn = bam_txn_buf;
473         bam_txn_buf += sizeof(*bam_txn);
474
475         bam_txn->cmd_sgl = bam_txn_buf;
476         bam_txn_buf +=
477                 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
478
479         bam_txn->data_sgl = bam_txn_buf;
480
481         return bam_txn;
482 }
483
484 /* Clears the BAM transaction indexes */
485 static void clear_bam_transaction(struct qcom_nand_controller *nandc)
486 {
487         struct bam_transaction *bam_txn = nandc->bam_txn;
488
489         if (!nandc->props->is_bam)
490                 return;
491
492         bam_txn->cmd_sgl_pos = 0;
493         bam_txn->cmd_sgl_start = 0;
494         bam_txn->tx_sgl_pos = 0;
495         bam_txn->tx_sgl_start = 0;
496         bam_txn->rx_sgl_pos = 0;
497         bam_txn->rx_sgl_start = 0;
498
499         sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
500                       QPIC_PER_CW_CMD_SGL);
501         sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
502                       QPIC_PER_CW_DATA_SGL);
503 }
504
505 static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
506 {
507         return container_of(chip, struct qcom_nand_host, chip);
508 }
509
510 static inline struct qcom_nand_controller *
511 get_qcom_nand_controller(struct nand_chip *chip)
512 {
513         return container_of(chip->controller, struct qcom_nand_controller,
514                             controller);
515 }
516
517 static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
518 {
519         return ioread32(nandc->base + offset);
520 }
521
522 static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
523                                u32 val)
524 {
525         iowrite32(val, nandc->base + offset);
526 }
527
528 static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
529                                           bool is_cpu)
530 {
531         if (!nandc->props->is_bam)
532                 return;
533
534         if (is_cpu)
535                 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
536                                         MAX_REG_RD *
537                                         sizeof(*nandc->reg_read_buf),
538                                         DMA_FROM_DEVICE);
539         else
540                 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
541                                            MAX_REG_RD *
542                                            sizeof(*nandc->reg_read_buf),
543                                            DMA_FROM_DEVICE);
544 }
545
546 static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
547 {
548         switch (offset) {
549         case NAND_FLASH_CMD:
550                 return &regs->cmd;
551         case NAND_ADDR0:
552                 return &regs->addr0;
553         case NAND_ADDR1:
554                 return &regs->addr1;
555         case NAND_FLASH_CHIP_SELECT:
556                 return &regs->chip_sel;
557         case NAND_EXEC_CMD:
558                 return &regs->exec;
559         case NAND_FLASH_STATUS:
560                 return &regs->clrflashstatus;
561         case NAND_DEV0_CFG0:
562                 return &regs->cfg0;
563         case NAND_DEV0_CFG1:
564                 return &regs->cfg1;
565         case NAND_DEV0_ECC_CFG:
566                 return &regs->ecc_bch_cfg;
567         case NAND_READ_STATUS:
568                 return &regs->clrreadstatus;
569         case NAND_DEV_CMD1:
570                 return &regs->cmd1;
571         case NAND_DEV_CMD1_RESTORE:
572                 return &regs->orig_cmd1;
573         case NAND_DEV_CMD_VLD:
574                 return &regs->vld;
575         case NAND_DEV_CMD_VLD_RESTORE:
576                 return &regs->orig_vld;
577         case NAND_EBI2_ECC_BUF_CFG:
578                 return &regs->ecc_buf_cfg;
579         case NAND_READ_LOCATION_0:
580                 return &regs->read_location0;
581         case NAND_READ_LOCATION_1:
582                 return &regs->read_location1;
583         case NAND_READ_LOCATION_2:
584                 return &regs->read_location2;
585         case NAND_READ_LOCATION_3:
586                 return &regs->read_location3;
587         default:
588                 return NULL;
589         }
590 }
591
592 static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
593                           u32 val)
594 {
595         struct nandc_regs *regs = nandc->regs;
596         __le32 *reg;
597
598         reg = offset_to_nandc_reg(regs, offset);
599
600         if (reg)
601                 *reg = cpu_to_le32(val);
602 }
603
604 /* helper to configure address register values */
605 static void set_address(struct qcom_nand_host *host, u16 column, int page)
606 {
607         struct nand_chip *chip = &host->chip;
608         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
609
610         if (chip->options & NAND_BUSWIDTH_16)
611                 column >>= 1;
612
613         nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
614         nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
615 }
616
617 /*
618  * update_rw_regs:      set up read/write register values, these will be
619  *                      written to the NAND controller registers via DMA
620  *
621  * @num_cw:             number of steps for the read/write operation
622  * @read:               read or write operation
623  */
624 static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
625 {
626         struct nand_chip *chip = &host->chip;
627         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
628         u32 cmd, cfg0, cfg1, ecc_bch_cfg;
629
630         if (read) {
631                 if (host->use_ecc)
632                         cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
633                 else
634                         cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
635         } else {
636                         cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
637         }
638
639         if (host->use_ecc) {
640                 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
641                                 (num_cw - 1) << CW_PER_PAGE;
642
643                 cfg1 = host->cfg1;
644                 ecc_bch_cfg = host->ecc_bch_cfg;
645         } else {
646                 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
647                                 (num_cw - 1) << CW_PER_PAGE;
648
649                 cfg1 = host->cfg1_raw;
650                 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
651         }
652
653         nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
654         nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
655         nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
656         nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
657         nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
658         nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
659         nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
660         nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
661
662         if (read)
663                 nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
664                                    host->cw_data : host->cw_size, 1);
665 }
666
667 /*
668  * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
669  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
670  * which will be submitted to DMA engine.
671  */
672 static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
673                                   struct dma_chan *chan,
674                                   unsigned long flags)
675 {
676         struct desc_info *desc;
677         struct scatterlist *sgl;
678         unsigned int sgl_cnt;
679         int ret;
680         struct bam_transaction *bam_txn = nandc->bam_txn;
681         enum dma_transfer_direction dir_eng;
682         struct dma_async_tx_descriptor *dma_desc;
683
684         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
685         if (!desc)
686                 return -ENOMEM;
687
688         if (chan == nandc->cmd_chan) {
689                 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
690                 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
691                 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
692                 dir_eng = DMA_MEM_TO_DEV;
693                 desc->dir = DMA_TO_DEVICE;
694         } else if (chan == nandc->tx_chan) {
695                 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
696                 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
697                 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
698                 dir_eng = DMA_MEM_TO_DEV;
699                 desc->dir = DMA_TO_DEVICE;
700         } else {
701                 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
702                 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
703                 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
704                 dir_eng = DMA_DEV_TO_MEM;
705                 desc->dir = DMA_FROM_DEVICE;
706         }
707
708         sg_mark_end(sgl + sgl_cnt - 1);
709         ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
710         if (ret == 0) {
711                 dev_err(nandc->dev, "failure in mapping desc\n");
712                 kfree(desc);
713                 return -ENOMEM;
714         }
715
716         desc->sgl_cnt = sgl_cnt;
717         desc->bam_sgl = sgl;
718
719         dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
720                                            flags);
721
722         if (!dma_desc) {
723                 dev_err(nandc->dev, "failure in prep desc\n");
724                 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
725                 kfree(desc);
726                 return -EINVAL;
727         }
728
729         desc->dma_desc = dma_desc;
730
731         list_add_tail(&desc->node, &nandc->desc_list);
732
733         return 0;
734 }
735
736 /*
737  * Prepares the data descriptor for BAM DMA which will be used for NAND
738  * data reads and writes.
739  */
740 static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
741                                   const void *vaddr,
742                                   int size, unsigned int flags)
743 {
744         int ret;
745         struct bam_transaction *bam_txn = nandc->bam_txn;
746
747         if (read) {
748                 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
749                            vaddr, size);
750                 bam_txn->rx_sgl_pos++;
751         } else {
752                 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
753                            vaddr, size);
754                 bam_txn->tx_sgl_pos++;
755
756                 /*
757                  * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
758                  * is not set, form the DMA descriptor
759                  */
760                 if (!(flags & NAND_BAM_NO_EOT)) {
761                         ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
762                                                      DMA_PREP_INTERRUPT);
763                         if (ret)
764                                 return ret;
765                 }
766         }
767
768         return 0;
769 }
770
771 static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
772                              int reg_off, const void *vaddr, int size,
773                              bool flow_control)
774 {
775         struct desc_info *desc;
776         struct dma_async_tx_descriptor *dma_desc;
777         struct scatterlist *sgl;
778         struct dma_slave_config slave_conf;
779         enum dma_transfer_direction dir_eng;
780         int ret;
781
782         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
783         if (!desc)
784                 return -ENOMEM;
785
786         sgl = &desc->adm_sgl;
787
788         sg_init_one(sgl, vaddr, size);
789
790         if (read) {
791                 dir_eng = DMA_DEV_TO_MEM;
792                 desc->dir = DMA_FROM_DEVICE;
793         } else {
794                 dir_eng = DMA_MEM_TO_DEV;
795                 desc->dir = DMA_TO_DEVICE;
796         }
797
798         ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
799         if (ret == 0) {
800                 ret = -ENOMEM;
801                 goto err;
802         }
803
804         memset(&slave_conf, 0x00, sizeof(slave_conf));
805
806         slave_conf.device_fc = flow_control;
807         if (read) {
808                 slave_conf.src_maxburst = 16;
809                 slave_conf.src_addr = nandc->base_dma + reg_off;
810                 slave_conf.slave_id = nandc->data_crci;
811         } else {
812                 slave_conf.dst_maxburst = 16;
813                 slave_conf.dst_addr = nandc->base_dma + reg_off;
814                 slave_conf.slave_id = nandc->cmd_crci;
815         }
816
817         ret = dmaengine_slave_config(nandc->chan, &slave_conf);
818         if (ret) {
819                 dev_err(nandc->dev, "failed to configure dma channel\n");
820                 goto err;
821         }
822
823         dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
824         if (!dma_desc) {
825                 dev_err(nandc->dev, "failed to prepare desc\n");
826                 ret = -EINVAL;
827                 goto err;
828         }
829
830         desc->dma_desc = dma_desc;
831
832         list_add_tail(&desc->node, &nandc->desc_list);
833
834         return 0;
835 err:
836         kfree(desc);
837
838         return ret;
839 }
840
841 /*
842  * read_reg_dma:        prepares a descriptor to read a given number of
843  *                      contiguous registers to the reg_read_buf pointer
844  *
845  * @first:              offset of the first register in the contiguous block
846  * @num_regs:           number of registers to read
847  * @flags:              flags to control DMA descriptor preparation
848  */
849 static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
850                         int num_regs, unsigned int flags)
851 {
852         bool flow_control = false;
853         void *vaddr;
854         int size;
855
856         if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
857                 flow_control = true;
858
859         if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
860                 first = dev_cmd_reg_addr(nandc, first);
861
862         size = num_regs * sizeof(u32);
863         vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
864         nandc->reg_read_pos += num_regs;
865
866         return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
867 }
868
869 /*
870  * write_reg_dma:       prepares a descriptor to write a given number of
871  *                      contiguous registers
872  *
873  * @first:              offset of the first register in the contiguous block
874  * @num_regs:           number of registers to write
875  * @flags:              flags to control DMA descriptor preparation
876  */
877 static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
878                          int num_regs, unsigned int flags)
879 {
880         bool flow_control = false;
881         struct nandc_regs *regs = nandc->regs;
882         void *vaddr;
883         int size;
884
885         vaddr = offset_to_nandc_reg(regs, first);
886
887         if (first == NAND_FLASH_CMD)
888                 flow_control = true;
889
890         if (first == NAND_ERASED_CW_DETECT_CFG) {
891                 if (flags & NAND_ERASED_CW_SET)
892                         vaddr = &regs->erased_cw_detect_cfg_set;
893                 else
894                         vaddr = &regs->erased_cw_detect_cfg_clr;
895         }
896
897         if (first == NAND_EXEC_CMD)
898                 flags |= NAND_BAM_NWD;
899
900         if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
901                 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
902
903         if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
904                 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
905
906         size = num_regs * sizeof(u32);
907
908         return prep_adm_dma_desc(nandc, false, first, vaddr, size,
909                                  flow_control);
910 }
911
912 /*
913  * read_data_dma:       prepares a DMA descriptor to transfer data from the
914  *                      controller's internal buffer to the buffer 'vaddr'
915  *
916  * @reg_off:            offset within the controller's data buffer
917  * @vaddr:              virtual address of the buffer we want to write to
918  * @size:               DMA transaction size in bytes
919  * @flags:              flags to control DMA descriptor preparation
920  */
921 static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
922                          const u8 *vaddr, int size, unsigned int flags)
923 {
924         if (nandc->props->is_bam)
925                 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
926
927         return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
928 }
929
930 /*
931  * write_data_dma:      prepares a DMA descriptor to transfer data from
932  *                      'vaddr' to the controller's internal buffer
933  *
934  * @reg_off:            offset within the controller's data buffer
935  * @vaddr:              virtual address of the buffer we want to read from
936  * @size:               DMA transaction size in bytes
937  * @flags:              flags to control DMA descriptor preparation
938  */
939 static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
940                           const u8 *vaddr, int size, unsigned int flags)
941 {
942         if (nandc->props->is_bam)
943                 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
944
945         return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
946 }
947
948 /*
949  * Helper to prepare DMA descriptors for configuring registers
950  * before reading a NAND page.
951  */
952 static void config_nand_page_read(struct qcom_nand_controller *nandc)
953 {
954         write_reg_dma(nandc, NAND_ADDR0, 2, 0);
955         write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
956         write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
957         write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
958         write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
959                       NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
960 }
961
962 /*
963  * Helper to prepare DMA descriptors for configuring registers
964  * before reading each codeword in NAND page.
965  */
966 static void config_nand_cw_read(struct qcom_nand_controller *nandc)
967 {
968         if (nandc->props->is_bam)
969                 write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
970                               NAND_BAM_NEXT_SGL);
971
972         write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
973         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
974
975         read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
976         read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
977                      NAND_BAM_NEXT_SGL);
978 }
979
980 /*
981  * Helper to prepare dma descriptors to configure registers needed for reading a
982  * single codeword in page
983  */
984 static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
985 {
986         config_nand_page_read(nandc);
987         config_nand_cw_read(nandc);
988 }
989
990 /*
991  * Helper to prepare DMA descriptors used to configure registers needed for
992  * before writing a NAND page.
993  */
994 static void config_nand_page_write(struct qcom_nand_controller *nandc)
995 {
996         write_reg_dma(nandc, NAND_ADDR0, 2, 0);
997         write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
998         write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
999                       NAND_BAM_NEXT_SGL);
1000 }
1001
1002 /*
1003  * Helper to prepare DMA descriptors for configuring registers
1004  * before writing each codeword in NAND page.
1005  */
1006 static void config_nand_cw_write(struct qcom_nand_controller *nandc)
1007 {
1008         write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1009         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1010
1011         read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1012
1013         write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1014         write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1015 }
1016
1017 /*
1018  * the following functions are used within chip->cmdfunc() to perform different
1019  * NAND_CMD_* commands
1020  */
1021
1022 /* sets up descriptors for NAND_CMD_PARAM */
1023 static int nandc_param(struct qcom_nand_host *host)
1024 {
1025         struct nand_chip *chip = &host->chip;
1026         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1027
1028         /*
1029          * NAND_CMD_PARAM is called before we know much about the FLASH chip
1030          * in use. we configure the controller to perform a raw read of 512
1031          * bytes to read onfi params
1032          */
1033         nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
1034         nandc_set_reg(nandc, NAND_ADDR0, 0);
1035         nandc_set_reg(nandc, NAND_ADDR1, 0);
1036         nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1037                                         | 512 << UD_SIZE_BYTES
1038                                         | 5 << NUM_ADDR_CYCLES
1039                                         | 0 << SPARE_SIZE_BYTES);
1040         nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1041                                         | 0 << CS_ACTIVE_BSY
1042                                         | 17 << BAD_BLOCK_BYTE_NUM
1043                                         | 1 << BAD_BLOCK_IN_SPARE_AREA
1044                                         | 2 << WR_RD_BSY_GAP
1045                                         | 0 << WIDE_FLASH
1046                                         | 1 << DEV0_CFG1_ECC_DISABLE);
1047         nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1048
1049         /* configure CMD1 and VLD for ONFI param probing */
1050         nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
1051                       (nandc->vld & ~READ_START_VLD));
1052         nandc_set_reg(nandc, NAND_DEV_CMD1,
1053                       (nandc->cmd1 & ~(0xFF << READ_ADDR))
1054                       | NAND_CMD_PARAM << READ_ADDR);
1055
1056         nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1057
1058         nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1059         nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1060         nandc_set_read_loc(nandc, 0, 0, 512, 1);
1061
1062         write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1063         write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1064
1065         nandc->buf_count = 512;
1066         memset(nandc->data_buffer, 0xff, nandc->buf_count);
1067
1068         config_nand_single_cw_page_read(nandc);
1069
1070         read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1071                       nandc->buf_count, 0);
1072
1073         /* restore CMD1 and VLD regs */
1074         write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1075         write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1076
1077         return 0;
1078 }
1079
1080 /* sets up descriptors for NAND_CMD_ERASE1 */
1081 static int erase_block(struct qcom_nand_host *host, int page_addr)
1082 {
1083         struct nand_chip *chip = &host->chip;
1084         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1085
1086         nandc_set_reg(nandc, NAND_FLASH_CMD,
1087                       BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1088         nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1089         nandc_set_reg(nandc, NAND_ADDR1, 0);
1090         nandc_set_reg(nandc, NAND_DEV0_CFG0,
1091                       host->cfg0_raw & ~(7 << CW_PER_PAGE));
1092         nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1093         nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1094         nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1095         nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1096
1097         write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1098         write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1099         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1100
1101         read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1102
1103         write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1104         write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1105
1106         return 0;
1107 }
1108
1109 /* sets up descriptors for NAND_CMD_READID */
1110 static int read_id(struct qcom_nand_host *host, int column)
1111 {
1112         struct nand_chip *chip = &host->chip;
1113         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1114
1115         if (column == -1)
1116                 return 0;
1117
1118         nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
1119         nandc_set_reg(nandc, NAND_ADDR0, column);
1120         nandc_set_reg(nandc, NAND_ADDR1, 0);
1121         nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1122                       nandc->props->is_bam ? 0 : DM_EN);
1123         nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1124
1125         write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1126         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1127
1128         read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1129
1130         return 0;
1131 }
1132
1133 /* sets up descriptors for NAND_CMD_RESET */
1134 static int reset(struct qcom_nand_host *host)
1135 {
1136         struct nand_chip *chip = &host->chip;
1137         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1138
1139         nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
1140         nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1141
1142         write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1143         write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1144
1145         read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1146
1147         return 0;
1148 }
1149
1150 /* helpers to submit/free our list of dma descriptors */
1151 static int submit_descs(struct qcom_nand_controller *nandc)
1152 {
1153         struct desc_info *desc;
1154         dma_cookie_t cookie = 0;
1155         struct bam_transaction *bam_txn = nandc->bam_txn;
1156         int r;
1157
1158         if (nandc->props->is_bam) {
1159                 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1160                         r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1161                         if (r)
1162                                 return r;
1163                 }
1164
1165                 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1166                         r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1167                                                    DMA_PREP_INTERRUPT);
1168                         if (r)
1169                                 return r;
1170                 }
1171
1172                 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1173                         r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
1174                         if (r)
1175                                 return r;
1176                 }
1177         }
1178
1179         list_for_each_entry(desc, &nandc->desc_list, node)
1180                 cookie = dmaengine_submit(desc->dma_desc);
1181
1182         if (nandc->props->is_bam) {
1183                 dma_async_issue_pending(nandc->tx_chan);
1184                 dma_async_issue_pending(nandc->rx_chan);
1185
1186                 if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
1187                         return -ETIMEDOUT;
1188         } else {
1189                 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1190                         return -ETIMEDOUT;
1191         }
1192
1193         return 0;
1194 }
1195
1196 static void free_descs(struct qcom_nand_controller *nandc)
1197 {
1198         struct desc_info *desc, *n;
1199
1200         list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1201                 list_del(&desc->node);
1202
1203                 if (nandc->props->is_bam)
1204                         dma_unmap_sg(nandc->dev, desc->bam_sgl,
1205                                      desc->sgl_cnt, desc->dir);
1206                 else
1207                         dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1208                                      desc->dir);
1209
1210                 kfree(desc);
1211         }
1212 }
1213
1214 /* reset the register read buffer for next NAND operation */
1215 static void clear_read_regs(struct qcom_nand_controller *nandc)
1216 {
1217         nandc->reg_read_pos = 0;
1218         nandc_read_buffer_sync(nandc, false);
1219 }
1220
1221 static void pre_command(struct qcom_nand_host *host, int command)
1222 {
1223         struct nand_chip *chip = &host->chip;
1224         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1225
1226         nandc->buf_count = 0;
1227         nandc->buf_start = 0;
1228         host->use_ecc = false;
1229         host->last_command = command;
1230
1231         clear_read_regs(nandc);
1232
1233         if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1234             command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1235                 clear_bam_transaction(nandc);
1236 }
1237
1238 /*
1239  * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1240  * privately maintained status byte, this status byte can be read after
1241  * NAND_CMD_STATUS is called
1242  */
1243 static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1244 {
1245         struct nand_chip *chip = &host->chip;
1246         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1247         struct nand_ecc_ctrl *ecc = &chip->ecc;
1248         int num_cw;
1249         int i;
1250
1251         num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1252         nandc_read_buffer_sync(nandc, true);
1253
1254         for (i = 0; i < num_cw; i++) {
1255                 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1256
1257                 if (flash_status & FS_MPU_ERR)
1258                         host->status &= ~NAND_STATUS_WP;
1259
1260                 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1261                                                  (flash_status &
1262                                                   FS_DEVICE_STS_ERR)))
1263                         host->status |= NAND_STATUS_FAIL;
1264         }
1265 }
1266
1267 static void post_command(struct qcom_nand_host *host, int command)
1268 {
1269         struct nand_chip *chip = &host->chip;
1270         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1271
1272         switch (command) {
1273         case NAND_CMD_READID:
1274                 nandc_read_buffer_sync(nandc, true);
1275                 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1276                        nandc->buf_count);
1277                 break;
1278         case NAND_CMD_PAGEPROG:
1279         case NAND_CMD_ERASE1:
1280                 parse_erase_write_errors(host, command);
1281                 break;
1282         default:
1283                 break;
1284         }
1285 }
1286
1287 /*
1288  * Implements chip->cmdfunc. It's  only used for a limited set of commands.
1289  * The rest of the commands wouldn't be called by upper layers. For example,
1290  * NAND_CMD_READOOB would never be called because we have our own versions
1291  * of read_oob ops for nand_ecc_ctrl.
1292  */
1293 static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
1294                                int column, int page_addr)
1295 {
1296         struct nand_chip *chip = mtd_to_nand(mtd);
1297         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1298         struct nand_ecc_ctrl *ecc = &chip->ecc;
1299         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1300         bool wait = false;
1301         int ret = 0;
1302
1303         pre_command(host, command);
1304
1305         switch (command) {
1306         case NAND_CMD_RESET:
1307                 ret = reset(host);
1308                 wait = true;
1309                 break;
1310
1311         case NAND_CMD_READID:
1312                 nandc->buf_count = 4;
1313                 ret = read_id(host, column);
1314                 wait = true;
1315                 break;
1316
1317         case NAND_CMD_PARAM:
1318                 ret = nandc_param(host);
1319                 wait = true;
1320                 break;
1321
1322         case NAND_CMD_ERASE1:
1323                 ret = erase_block(host, page_addr);
1324                 wait = true;
1325                 break;
1326
1327         case NAND_CMD_READ0:
1328                 /* we read the entire page for now */
1329                 WARN_ON(column != 0);
1330
1331                 host->use_ecc = true;
1332                 set_address(host, 0, page_addr);
1333                 update_rw_regs(host, ecc->steps, true);
1334                 break;
1335
1336         case NAND_CMD_SEQIN:
1337                 WARN_ON(column != 0);
1338                 set_address(host, 0, page_addr);
1339                 break;
1340
1341         case NAND_CMD_PAGEPROG:
1342         case NAND_CMD_STATUS:
1343         case NAND_CMD_NONE:
1344         default:
1345                 break;
1346         }
1347
1348         if (ret) {
1349                 dev_err(nandc->dev, "failure executing command %d\n",
1350                         command);
1351                 free_descs(nandc);
1352                 return;
1353         }
1354
1355         if (wait) {
1356                 ret = submit_descs(nandc);
1357                 if (ret)
1358                         dev_err(nandc->dev,
1359                                 "failure submitting descs for command %d\n",
1360                                 command);
1361         }
1362
1363         free_descs(nandc);
1364
1365         post_command(host, command);
1366 }
1367
1368 /*
1369  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1370  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1371  *
1372  * when using RS ECC, the HW reports the same erros when reading an erased CW,
1373  * but it notifies that it is an erased CW by placing special characters at
1374  * certain offsets in the buffer.
1375  *
1376  * verify if the page is erased or not, and fix up the page for RS ECC by
1377  * replacing the special characters with 0xff.
1378  */
1379 static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1380 {
1381         u8 empty1, empty2;
1382
1383         /*
1384          * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1385          * is erased by looking for 0x54s at offsets 3 and 175 from the
1386          * beginning of each codeword
1387          */
1388
1389         empty1 = data_buf[3];
1390         empty2 = data_buf[175];
1391
1392         /*
1393          * if the erased codework markers, if they exist override them with
1394          * 0xffs
1395          */
1396         if ((empty1 == 0x54 && empty2 == 0xff) ||
1397             (empty1 == 0xff && empty2 == 0x54)) {
1398                 data_buf[3] = 0xff;
1399                 data_buf[175] = 0xff;
1400         }
1401
1402         /*
1403          * check if the entire chunk contains 0xffs or not. if it doesn't, then
1404          * restore the original values at the special offsets
1405          */
1406         if (memchr_inv(data_buf, 0xff, data_len)) {
1407                 data_buf[3] = empty1;
1408                 data_buf[175] = empty2;
1409
1410                 return false;
1411         }
1412
1413         return true;
1414 }
1415
1416 struct read_stats {
1417         __le32 flash;
1418         __le32 buffer;
1419         __le32 erased_cw;
1420 };
1421
1422 /*
1423  * reads back status registers set by the controller to notify page read
1424  * errors. this is equivalent to what 'ecc->correct()' would do.
1425  */
1426 static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1427                              u8 *oob_buf)
1428 {
1429         struct nand_chip *chip = &host->chip;
1430         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1431         struct mtd_info *mtd = nand_to_mtd(chip);
1432         struct nand_ecc_ctrl *ecc = &chip->ecc;
1433         unsigned int max_bitflips = 0;
1434         struct read_stats *buf;
1435         int i;
1436
1437         buf = (struct read_stats *)nandc->reg_read_buf;
1438         nandc_read_buffer_sync(nandc, true);
1439
1440         for (i = 0; i < ecc->steps; i++, buf++) {
1441                 u32 flash, buffer, erased_cw;
1442                 int data_len, oob_len;
1443
1444                 if (i == (ecc->steps - 1)) {
1445                         data_len = ecc->size - ((ecc->steps - 1) << 2);
1446                         oob_len = ecc->steps << 2;
1447                 } else {
1448                         data_len = host->cw_data;
1449                         oob_len = 0;
1450                 }
1451
1452                 flash = le32_to_cpu(buf->flash);
1453                 buffer = le32_to_cpu(buf->buffer);
1454                 erased_cw = le32_to_cpu(buf->erased_cw);
1455
1456                 if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1457                         bool erased;
1458
1459                         /* ignore erased codeword errors */
1460                         if (host->bch_enabled) {
1461                                 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1462                                          true : false;
1463                         } else {
1464                                 erased = erased_chunk_check_and_fixup(data_buf,
1465                                                                       data_len);
1466                         }
1467
1468                         if (erased) {
1469                                 data_buf += data_len;
1470                                 if (oob_buf)
1471                                         oob_buf += oob_len + ecc->bytes;
1472                                 continue;
1473                         }
1474
1475                         if (buffer & BS_UNCORRECTABLE_BIT) {
1476                                 int ret, ecclen, extraooblen;
1477                                 void *eccbuf;
1478
1479                                 eccbuf = oob_buf ? oob_buf + oob_len : NULL;
1480                                 ecclen = oob_buf ? host->ecc_bytes_hw : 0;
1481                                 extraooblen = oob_buf ? oob_len : 0;
1482
1483                                 /*
1484                                  * make sure it isn't an erased page reported
1485                                  * as not-erased by HW because of a few bitflips
1486                                  */
1487                                 ret = nand_check_erased_ecc_chunk(data_buf,
1488                                         data_len, eccbuf, ecclen, oob_buf,
1489                                         extraooblen, ecc->strength);
1490                                 if (ret < 0) {
1491                                         mtd->ecc_stats.failed++;
1492                                 } else {
1493                                         mtd->ecc_stats.corrected += ret;
1494                                         max_bitflips =
1495                                                 max_t(unsigned int, max_bitflips, ret);
1496                                 }
1497                         }
1498                 } else {
1499                         unsigned int stat;
1500
1501                         stat = buffer & BS_CORRECTABLE_ERR_MSK;
1502                         mtd->ecc_stats.corrected += stat;
1503                         max_bitflips = max(max_bitflips, stat);
1504                 }
1505
1506                 data_buf += data_len;
1507                 if (oob_buf)
1508                         oob_buf += oob_len + ecc->bytes;
1509         }
1510
1511         return max_bitflips;
1512 }
1513
1514 /*
1515  * helper to perform the actual page read operation, used by ecc->read_page(),
1516  * ecc->read_oob()
1517  */
1518 static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1519                          u8 *oob_buf)
1520 {
1521         struct nand_chip *chip = &host->chip;
1522         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1523         struct nand_ecc_ctrl *ecc = &chip->ecc;
1524         int i, ret;
1525
1526         config_nand_page_read(nandc);
1527
1528         /* queue cmd descs for each codeword */
1529         for (i = 0; i < ecc->steps; i++) {
1530                 int data_size, oob_size;
1531
1532                 if (i == (ecc->steps - 1)) {
1533                         data_size = ecc->size - ((ecc->steps - 1) << 2);
1534                         oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1535                                    host->spare_bytes;
1536                 } else {
1537                         data_size = host->cw_data;
1538                         oob_size = host->ecc_bytes_hw + host->spare_bytes;
1539                 }
1540
1541                 if (nandc->props->is_bam) {
1542                         if (data_buf && oob_buf) {
1543                                 nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1544                                 nandc_set_read_loc(nandc, 1, data_size,
1545                                                    oob_size, 1);
1546                         } else if (data_buf) {
1547                                 nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1548                         } else {
1549                                 nandc_set_read_loc(nandc, 0, data_size,
1550                                                    oob_size, 1);
1551                         }
1552                 }
1553
1554                 config_nand_cw_read(nandc);
1555
1556                 if (data_buf)
1557                         read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1558                                       data_size, 0);
1559
1560                 /*
1561                  * when ecc is enabled, the controller doesn't read the real
1562                  * or dummy bad block markers in each chunk. To maintain a
1563                  * consistent layout across RAW and ECC reads, we just
1564                  * leave the real/dummy BBM offsets empty (i.e, filled with
1565                  * 0xffs)
1566                  */
1567                 if (oob_buf) {
1568                         int j;
1569
1570                         for (j = 0; j < host->bbm_size; j++)
1571                                 *oob_buf++ = 0xff;
1572
1573                         read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1574                                       oob_buf, oob_size, 0);
1575                 }
1576
1577                 if (data_buf)
1578                         data_buf += data_size;
1579                 if (oob_buf)
1580                         oob_buf += oob_size;
1581         }
1582
1583         ret = submit_descs(nandc);
1584         if (ret)
1585                 dev_err(nandc->dev, "failure to read page/oob\n");
1586
1587         free_descs(nandc);
1588
1589         return ret;
1590 }
1591
1592 /*
1593  * a helper that copies the last step/codeword of a page (containing free oob)
1594  * into our local buffer
1595  */
1596 static int copy_last_cw(struct qcom_nand_host *host, int page)
1597 {
1598         struct nand_chip *chip = &host->chip;
1599         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1600         struct nand_ecc_ctrl *ecc = &chip->ecc;
1601         int size;
1602         int ret;
1603
1604         clear_read_regs(nandc);
1605
1606         size = host->use_ecc ? host->cw_data : host->cw_size;
1607
1608         /* prepare a clean read buffer */
1609         memset(nandc->data_buffer, 0xff, size);
1610
1611         set_address(host, host->cw_size * (ecc->steps - 1), page);
1612         update_rw_regs(host, 1, true);
1613
1614         config_nand_single_cw_page_read(nandc);
1615
1616         read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1617
1618         ret = submit_descs(nandc);
1619         if (ret)
1620                 dev_err(nandc->dev, "failed to copy last codeword\n");
1621
1622         free_descs(nandc);
1623
1624         return ret;
1625 }
1626
1627 /* implements ecc->read_page() */
1628 static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1629                                 uint8_t *buf, int oob_required, int page)
1630 {
1631         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1632         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1633         u8 *data_buf, *oob_buf = NULL;
1634         int ret;
1635
1636         data_buf = buf;
1637         oob_buf = oob_required ? chip->oob_poi : NULL;
1638
1639         clear_bam_transaction(nandc);
1640         ret = read_page_ecc(host, data_buf, oob_buf);
1641         if (ret) {
1642                 dev_err(nandc->dev, "failure to read page\n");
1643                 return ret;
1644         }
1645
1646         return parse_read_errors(host, data_buf, oob_buf);
1647 }
1648
1649 /* implements ecc->read_page_raw() */
1650 static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1651                                     struct nand_chip *chip, uint8_t *buf,
1652                                     int oob_required, int page)
1653 {
1654         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1655         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1656         u8 *data_buf, *oob_buf;
1657         struct nand_ecc_ctrl *ecc = &chip->ecc;
1658         int i, ret;
1659         int read_loc;
1660
1661         data_buf = buf;
1662         oob_buf = chip->oob_poi;
1663
1664         host->use_ecc = false;
1665
1666         clear_bam_transaction(nandc);
1667         update_rw_regs(host, ecc->steps, true);
1668         config_nand_page_read(nandc);
1669
1670         for (i = 0; i < ecc->steps; i++) {
1671                 int data_size1, data_size2, oob_size1, oob_size2;
1672                 int reg_off = FLASH_BUF_ACC;
1673
1674                 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1675                 oob_size1 = host->bbm_size;
1676
1677                 if (i == (ecc->steps - 1)) {
1678                         data_size2 = ecc->size - data_size1 -
1679                                      ((ecc->steps - 1) << 2);
1680                         oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1681                                     host->spare_bytes;
1682                 } else {
1683                         data_size2 = host->cw_data - data_size1;
1684                         oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1685                 }
1686
1687                 if (nandc->props->is_bam) {
1688                         read_loc = 0;
1689                         nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1690                         read_loc += data_size1;
1691
1692                         nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1693                         read_loc += oob_size1;
1694
1695                         nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1696                         read_loc += data_size2;
1697
1698                         nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1699                 }
1700
1701                 config_nand_cw_read(nandc);
1702
1703                 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1704                 reg_off += data_size1;
1705                 data_buf += data_size1;
1706
1707                 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1708                 reg_off += oob_size1;
1709                 oob_buf += oob_size1;
1710
1711                 read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
1712                 reg_off += data_size2;
1713                 data_buf += data_size2;
1714
1715                 read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
1716                 oob_buf += oob_size2;
1717         }
1718
1719         ret = submit_descs(nandc);
1720         if (ret)
1721                 dev_err(nandc->dev, "failure to read raw page\n");
1722
1723         free_descs(nandc);
1724
1725         return 0;
1726 }
1727
1728 /* implements ecc->read_oob() */
1729 static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1730                                int page)
1731 {
1732         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1733         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1734         struct nand_ecc_ctrl *ecc = &chip->ecc;
1735         int ret;
1736
1737         clear_read_regs(nandc);
1738         clear_bam_transaction(nandc);
1739
1740         host->use_ecc = true;
1741         set_address(host, 0, page);
1742         update_rw_regs(host, ecc->steps, true);
1743
1744         ret = read_page_ecc(host, NULL, chip->oob_poi);
1745         if (ret)
1746                 dev_err(nandc->dev, "failure to read oob\n");
1747
1748         return ret;
1749 }
1750
1751 /* implements ecc->write_page() */
1752 static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1753                                  const uint8_t *buf, int oob_required, int page)
1754 {
1755         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1756         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1757         struct nand_ecc_ctrl *ecc = &chip->ecc;
1758         u8 *data_buf, *oob_buf;
1759         int i, ret;
1760
1761         clear_read_regs(nandc);
1762         clear_bam_transaction(nandc);
1763
1764         data_buf = (u8 *)buf;
1765         oob_buf = chip->oob_poi;
1766
1767         host->use_ecc = true;
1768         update_rw_regs(host, ecc->steps, false);
1769         config_nand_page_write(nandc);
1770
1771         for (i = 0; i < ecc->steps; i++) {
1772                 int data_size, oob_size;
1773
1774                 if (i == (ecc->steps - 1)) {
1775                         data_size = ecc->size - ((ecc->steps - 1) << 2);
1776                         oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1777                                    host->spare_bytes;
1778                 } else {
1779                         data_size = host->cw_data;
1780                         oob_size = ecc->bytes;
1781                 }
1782
1783
1784                 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
1785                                i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
1786
1787                 /*
1788                  * when ECC is enabled, we don't really need to write anything
1789                  * to oob for the first n - 1 codewords since these oob regions
1790                  * just contain ECC bytes that's written by the controller
1791                  * itself. For the last codeword, we skip the bbm positions and
1792                  * write to the free oob area.
1793                  */
1794                 if (i == (ecc->steps - 1)) {
1795                         oob_buf += host->bbm_size;
1796
1797                         write_data_dma(nandc, FLASH_BUF_ACC + data_size,
1798                                        oob_buf, oob_size, 0);
1799                 }
1800
1801                 config_nand_cw_write(nandc);
1802
1803                 data_buf += data_size;
1804                 oob_buf += oob_size;
1805         }
1806
1807         ret = submit_descs(nandc);
1808         if (ret)
1809                 dev_err(nandc->dev, "failure to write page\n");
1810
1811         free_descs(nandc);
1812
1813         return ret;
1814 }
1815
1816 /* implements ecc->write_page_raw() */
1817 static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1818                                      struct nand_chip *chip, const uint8_t *buf,
1819                                      int oob_required, int page)
1820 {
1821         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1822         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1823         struct nand_ecc_ctrl *ecc = &chip->ecc;
1824         u8 *data_buf, *oob_buf;
1825         int i, ret;
1826
1827         clear_read_regs(nandc);
1828         clear_bam_transaction(nandc);
1829
1830         data_buf = (u8 *)buf;
1831         oob_buf = chip->oob_poi;
1832
1833         host->use_ecc = false;
1834         update_rw_regs(host, ecc->steps, false);
1835         config_nand_page_write(nandc);
1836
1837         for (i = 0; i < ecc->steps; i++) {
1838                 int data_size1, data_size2, oob_size1, oob_size2;
1839                 int reg_off = FLASH_BUF_ACC;
1840
1841                 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1842                 oob_size1 = host->bbm_size;
1843
1844                 if (i == (ecc->steps - 1)) {
1845                         data_size2 = ecc->size - data_size1 -
1846                                      ((ecc->steps - 1) << 2);
1847                         oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1848                                     host->spare_bytes;
1849                 } else {
1850                         data_size2 = host->cw_data - data_size1;
1851                         oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1852                 }
1853
1854                 write_data_dma(nandc, reg_off, data_buf, data_size1,
1855                                NAND_BAM_NO_EOT);
1856                 reg_off += data_size1;
1857                 data_buf += data_size1;
1858
1859                 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
1860                                NAND_BAM_NO_EOT);
1861                 reg_off += oob_size1;
1862                 oob_buf += oob_size1;
1863
1864                 write_data_dma(nandc, reg_off, data_buf, data_size2,
1865                                NAND_BAM_NO_EOT);
1866                 reg_off += data_size2;
1867                 data_buf += data_size2;
1868
1869                 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
1870                 oob_buf += oob_size2;
1871
1872                 config_nand_cw_write(nandc);
1873         }
1874
1875         ret = submit_descs(nandc);
1876         if (ret)
1877                 dev_err(nandc->dev, "failure to write raw page\n");
1878
1879         free_descs(nandc);
1880
1881         return ret;
1882 }
1883
1884 /*
1885  * implements ecc->write_oob()
1886  *
1887  * the NAND controller cannot write only data or only oob within a codeword,
1888  * since ecc is calculated for the combined codeword. we first copy the
1889  * entire contents for the last codeword(data + oob), replace the old oob
1890  * with the new one in chip->oob_poi, and then write the entire codeword.
1891  * this read-copy-write operation results in a slight performance loss.
1892  */
1893 static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1894                                 int page)
1895 {
1896         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1897         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1898         struct nand_ecc_ctrl *ecc = &chip->ecc;
1899         u8 *oob = chip->oob_poi;
1900         int data_size, oob_size;
1901         int ret, status = 0;
1902
1903         host->use_ecc = true;
1904
1905         clear_bam_transaction(nandc);
1906         ret = copy_last_cw(host, page);
1907         if (ret)
1908                 return ret;
1909
1910         clear_read_regs(nandc);
1911         clear_bam_transaction(nandc);
1912
1913         /* calculate the data and oob size for the last codeword/step */
1914         data_size = ecc->size - ((ecc->steps - 1) << 2);
1915         oob_size = mtd->oobavail;
1916
1917         /* override new oob content to last codeword */
1918         mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
1919                                     0, mtd->oobavail);
1920
1921         set_address(host, host->cw_size * (ecc->steps - 1), page);
1922         update_rw_regs(host, 1, false);
1923
1924         config_nand_page_write(nandc);
1925         write_data_dma(nandc, FLASH_BUF_ACC,
1926                        nandc->data_buffer, data_size + oob_size, 0);
1927         config_nand_cw_write(nandc);
1928
1929         ret = submit_descs(nandc);
1930
1931         free_descs(nandc);
1932
1933         if (ret) {
1934                 dev_err(nandc->dev, "failure to write oob\n");
1935                 return -EIO;
1936         }
1937
1938         chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1939
1940         status = chip->waitfunc(mtd, chip);
1941
1942         return status & NAND_STATUS_FAIL ? -EIO : 0;
1943 }
1944
1945 static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
1946 {
1947         struct nand_chip *chip = mtd_to_nand(mtd);
1948         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1949         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1950         struct nand_ecc_ctrl *ecc = &chip->ecc;
1951         int page, ret, bbpos, bad = 0;
1952         u32 flash_status;
1953
1954         page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1955
1956         /*
1957          * configure registers for a raw sub page read, the address is set to
1958          * the beginning of the last codeword, we don't care about reading ecc
1959          * portion of oob. we just want the first few bytes from this codeword
1960          * that contains the BBM
1961          */
1962         host->use_ecc = false;
1963
1964         clear_bam_transaction(nandc);
1965         ret = copy_last_cw(host, page);
1966         if (ret)
1967                 goto err;
1968
1969         flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
1970
1971         if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
1972                 dev_warn(nandc->dev, "error when trying to read BBM\n");
1973                 goto err;
1974         }
1975
1976         bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
1977
1978         bad = nandc->data_buffer[bbpos] != 0xff;
1979
1980         if (chip->options & NAND_BUSWIDTH_16)
1981                 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
1982 err:
1983         return bad;
1984 }
1985
1986 static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
1987 {
1988         struct nand_chip *chip = mtd_to_nand(mtd);
1989         struct qcom_nand_host *host = to_qcom_nand_host(chip);
1990         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1991         struct nand_ecc_ctrl *ecc = &chip->ecc;
1992         int page, ret, status = 0;
1993
1994         clear_read_regs(nandc);
1995         clear_bam_transaction(nandc);
1996
1997         /*
1998          * to mark the BBM as bad, we flash the entire last codeword with 0s.
1999          * we don't care about the rest of the content in the codeword since
2000          * we aren't going to use this block again
2001          */
2002         memset(nandc->data_buffer, 0x00, host->cw_size);
2003
2004         page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2005
2006         /* prepare write */
2007         host->use_ecc = false;
2008         set_address(host, host->cw_size * (ecc->steps - 1), page);
2009         update_rw_regs(host, 1, false);
2010
2011         config_nand_page_write(nandc);
2012         write_data_dma(nandc, FLASH_BUF_ACC,
2013                        nandc->data_buffer, host->cw_size, 0);
2014         config_nand_cw_write(nandc);
2015
2016         ret = submit_descs(nandc);
2017
2018         free_descs(nandc);
2019
2020         if (ret) {
2021                 dev_err(nandc->dev, "failure to update BBM\n");
2022                 return -EIO;
2023         }
2024
2025         chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2026
2027         status = chip->waitfunc(mtd, chip);
2028
2029         return status & NAND_STATUS_FAIL ? -EIO : 0;
2030 }
2031
2032 /*
2033  * the three functions below implement chip->read_byte(), chip->read_buf()
2034  * and chip->write_buf() respectively. these aren't used for
2035  * reading/writing page data, they are used for smaller data like reading
2036  * id, status etc
2037  */
2038 static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
2039 {
2040         struct nand_chip *chip = mtd_to_nand(mtd);
2041         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2042         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2043         u8 *buf = nandc->data_buffer;
2044         u8 ret = 0x0;
2045
2046         if (host->last_command == NAND_CMD_STATUS) {
2047                 ret = host->status;
2048
2049                 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2050
2051                 return ret;
2052         }
2053
2054         if (nandc->buf_start < nandc->buf_count)
2055                 ret = buf[nandc->buf_start++];
2056
2057         return ret;
2058 }
2059
2060 static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
2061 {
2062         struct nand_chip *chip = mtd_to_nand(mtd);
2063         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2064         int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2065
2066         memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2067         nandc->buf_start += real_len;
2068 }
2069
2070 static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
2071                                  int len)
2072 {
2073         struct nand_chip *chip = mtd_to_nand(mtd);
2074         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2075         int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2076
2077         memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2078
2079         nandc->buf_start += real_len;
2080 }
2081
2082 /* we support only one external chip for now */
2083 static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
2084 {
2085         struct nand_chip *chip = mtd_to_nand(mtd);
2086         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2087
2088         if (chipnr <= 0)
2089                 return;
2090
2091         dev_warn(nandc->dev, "invalid chip select\n");
2092 }
2093
2094 /*
2095  * NAND controller page layout info
2096  *
2097  * Layout with ECC enabled:
2098  *
2099  * |----------------------|  |---------------------------------|
2100  * |           xx.......yy|  |             *********xx.......yy|
2101  * |    DATA   xx..ECC..yy|  |    DATA     **SPARE**xx..ECC..yy|
2102  * |   (516)   xx.......yy|  |  (516-n*4)  **(n*4)**xx.......yy|
2103  * |           xx.......yy|  |             *********xx.......yy|
2104  * |----------------------|  |---------------------------------|
2105  *     codeword 1,2..n-1                  codeword n
2106  *  <---(528/532 Bytes)-->    <-------(528/532 Bytes)--------->
2107  *
2108  * n = Number of codewords in the page
2109  * . = ECC bytes
2110  * * = Spare/free bytes
2111  * x = Unused byte(s)
2112  * y = Reserved byte(s)
2113  *
2114  * 2K page: n = 4, spare = 16 bytes
2115  * 4K page: n = 8, spare = 32 bytes
2116  * 8K page: n = 16, spare = 64 bytes
2117  *
2118  * the qcom nand controller operates at a sub page/codeword level. each
2119  * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2120  * the number of ECC bytes vary based on the ECC strength and the bus width.
2121  *
2122  * the first n - 1 codewords contains 516 bytes of user data, the remaining
2123  * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2124  * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2125  *
2126  * When we access a page with ECC enabled, the reserved bytes(s) are not
2127  * accessible at all. When reading, we fill up these unreadable positions
2128  * with 0xffs. When writing, the controller skips writing the inaccessible
2129  * bytes.
2130  *
2131  * Layout with ECC disabled:
2132  *
2133  * |------------------------------|  |---------------------------------------|
2134  * |         yy          xx.......|  |         bb          *********xx.......|
2135  * |  DATA1  yy  DATA2   xx..ECC..|  |  DATA1  bb  DATA2   **SPARE**xx..ECC..|
2136  * | (size1) yy (size2)  xx.......|  | (size1) bb (size2)  **(n*4)**xx.......|
2137  * |         yy          xx.......|  |         bb          *********xx.......|
2138  * |------------------------------|  |---------------------------------------|
2139  *         codeword 1,2..n-1                        codeword n
2140  *  <-------(528/532 Bytes)------>    <-----------(528/532 Bytes)----------->
2141  *
2142  * n = Number of codewords in the page
2143  * . = ECC bytes
2144  * * = Spare/free bytes
2145  * x = Unused byte(s)
2146  * y = Dummy Bad Bock byte(s)
2147  * b = Real Bad Block byte(s)
2148  * size1/size2 = function of codeword size and 'n'
2149  *
2150  * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2151  * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2152  * Block Markers. In the last codeword, this position contains the real BBM
2153  *
2154  * In order to have a consistent layout between RAW and ECC modes, we assume
2155  * the following OOB layout arrangement:
2156  *
2157  * |-----------|  |--------------------|
2158  * |yyxx.......|  |bb*********xx.......|
2159  * |yyxx..ECC..|  |bb*FREEOOB*xx..ECC..|
2160  * |yyxx.......|  |bb*********xx.......|
2161  * |yyxx.......|  |bb*********xx.......|
2162  * |-----------|  |--------------------|
2163  *  first n - 1       nth OOB region
2164  *  OOB regions
2165  *
2166  * n = Number of codewords in the page
2167  * . = ECC bytes
2168  * * = FREE OOB bytes
2169  * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2170  * x = Unused byte(s)
2171  * b = Real bad block byte(s) (inaccessible when ECC enabled)
2172  *
2173  * This layout is read as is when ECC is disabled. When ECC is enabled, the
2174  * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2175  * and assumed as 0xffs when we read a page/oob. The ECC, unused and
2176  * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2177  * the sum of the three).
2178  */
2179 static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2180                                    struct mtd_oob_region *oobregion)
2181 {
2182         struct nand_chip *chip = mtd_to_nand(mtd);
2183         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2184         struct nand_ecc_ctrl *ecc = &chip->ecc;
2185
2186         if (section > 1)
2187                 return -ERANGE;
2188
2189         if (!section) {
2190                 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2191                                     host->bbm_size;
2192                 oobregion->offset = 0;
2193         } else {
2194                 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2195                 oobregion->offset = mtd->oobsize - oobregion->length;
2196         }
2197
2198         return 0;
2199 }
2200
2201 static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2202                                      struct mtd_oob_region *oobregion)
2203 {
2204         struct nand_chip *chip = mtd_to_nand(mtd);
2205         struct qcom_nand_host *host = to_qcom_nand_host(chip);
2206         struct nand_ecc_ctrl *ecc = &chip->ecc;
2207
2208         if (section)
2209                 return -ERANGE;
2210
2211         oobregion->length = ecc->steps * 4;
2212         oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2213
2214         return 0;
2215 }
2216
2217 static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2218         .ecc = qcom_nand_ooblayout_ecc,
2219         .free = qcom_nand_ooblayout_free,
2220 };
2221
2222 static int qcom_nand_host_setup(struct qcom_nand_host *host)
2223 {
2224         struct nand_chip *chip = &host->chip;
2225         struct mtd_info *mtd = nand_to_mtd(chip);
2226         struct nand_ecc_ctrl *ecc = &chip->ecc;
2227         struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2228         int cwperpage, bad_block_byte;
2229         bool wide_bus;
2230         int ecc_mode = 1;
2231
2232         /*
2233          * the controller requires each step consists of 512 bytes of data.
2234          * bail out if DT has populated a wrong step size.
2235          */
2236         if (ecc->size != NANDC_STEP_SIZE) {
2237                 dev_err(nandc->dev, "invalid ecc size\n");
2238                 return -EINVAL;
2239         }
2240
2241         wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2242
2243         if (ecc->strength >= 8) {
2244                 /* 8 bit ECC defaults to BCH ECC on all platforms */
2245                 host->bch_enabled = true;
2246                 ecc_mode = 1;
2247
2248                 if (wide_bus) {
2249                         host->ecc_bytes_hw = 14;
2250                         host->spare_bytes = 0;
2251                         host->bbm_size = 2;
2252                 } else {
2253                         host->ecc_bytes_hw = 13;
2254                         host->spare_bytes = 2;
2255                         host->bbm_size = 1;
2256                 }
2257         } else {
2258                 /*
2259                  * if the controller supports BCH for 4 bit ECC, the controller
2260                  * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2261                  * always 10 bytes
2262                  */
2263                 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2264                         /* BCH */
2265                         host->bch_enabled = true;
2266                         ecc_mode = 0;
2267
2268                         if (wide_bus) {
2269                                 host->ecc_bytes_hw = 8;
2270                                 host->spare_bytes = 2;
2271                                 host->bbm_size = 2;
2272                         } else {
2273                                 host->ecc_bytes_hw = 7;
2274                                 host->spare_bytes = 4;
2275                                 host->bbm_size = 1;
2276                         }
2277                 } else {
2278                         /* RS */
2279                         host->ecc_bytes_hw = 10;
2280
2281                         if (wide_bus) {
2282                                 host->spare_bytes = 0;
2283                                 host->bbm_size = 2;
2284                         } else {
2285                                 host->spare_bytes = 1;
2286                                 host->bbm_size = 1;
2287                         }
2288                 }
2289         }
2290
2291         /*
2292          * we consider ecc->bytes as the sum of all the non-data content in a
2293          * step. It gives us a clean representation of the oob area (even if
2294          * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2295          * ECC and 12 bytes for 4 bit ECC
2296          */
2297         ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2298
2299         ecc->read_page          = qcom_nandc_read_page;
2300         ecc->read_page_raw      = qcom_nandc_read_page_raw;
2301         ecc->read_oob           = qcom_nandc_read_oob;
2302         ecc->write_page         = qcom_nandc_write_page;
2303         ecc->write_page_raw     = qcom_nandc_write_page_raw;
2304         ecc->write_oob          = qcom_nandc_write_oob;
2305
2306         ecc->mode = NAND_ECC_HW;
2307
2308         mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2309
2310         cwperpage = mtd->writesize / ecc->size;
2311         nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2312                                      cwperpage);
2313
2314         /*
2315          * DATA_UD_BYTES varies based on whether the read/write command protects
2316          * spare data with ECC too. We protect spare data by default, so we set
2317          * it to main + spare data, which are 512 and 4 bytes respectively.
2318          */
2319         host->cw_data = 516;
2320
2321         /*
2322          * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2323          * for 8 bit ECC
2324          */
2325         host->cw_size = host->cw_data + ecc->bytes;
2326
2327         if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
2328                 dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
2329                 return -EINVAL;
2330         }
2331
2332         bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2333
2334         host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2335                                 | host->cw_data << UD_SIZE_BYTES
2336                                 | 0 << DISABLE_STATUS_AFTER_WRITE
2337                                 | 5 << NUM_ADDR_CYCLES
2338                                 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2339                                 | 0 << STATUS_BFR_READ
2340                                 | 1 << SET_RD_MODE_AFTER_STATUS
2341                                 | host->spare_bytes << SPARE_SIZE_BYTES;
2342
2343         host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2344                                 | 0 <<  CS_ACTIVE_BSY
2345                                 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2346                                 | 0 << BAD_BLOCK_IN_SPARE_AREA
2347                                 | 2 << WR_RD_BSY_GAP
2348                                 | wide_bus << WIDE_FLASH
2349                                 | host->bch_enabled << ENABLE_BCH_ECC;
2350
2351         host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2352                                 | host->cw_size << UD_SIZE_BYTES
2353                                 | 5 << NUM_ADDR_CYCLES
2354                                 | 0 << SPARE_SIZE_BYTES;
2355
2356         host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2357                                 | 0 << CS_ACTIVE_BSY
2358                                 | 17 << BAD_BLOCK_BYTE_NUM
2359                                 | 1 << BAD_BLOCK_IN_SPARE_AREA
2360                                 | 2 << WR_RD_BSY_GAP
2361                                 | wide_bus << WIDE_FLASH
2362                                 | 1 << DEV0_CFG1_ECC_DISABLE;
2363
2364         host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2365                                 | 0 << ECC_SW_RESET
2366                                 | host->cw_data << ECC_NUM_DATA_BYTES
2367                                 | 1 << ECC_FORCE_CLK_OPEN
2368                                 | ecc_mode << ECC_MODE
2369                                 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2370
2371         host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2372
2373         host->clrflashstatus = FS_READY_BSY_N;
2374         host->clrreadstatus = 0xc0;
2375         nandc->regs->erased_cw_detect_cfg_clr =
2376                 cpu_to_le32(CLR_ERASED_PAGE_DET);
2377         nandc->regs->erased_cw_detect_cfg_set =
2378                 cpu_to_le32(SET_ERASED_PAGE_DET);
2379
2380         dev_dbg(nandc->dev,
2381                 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2382                 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2383                 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2384                 cwperpage);
2385
2386         return 0;
2387 }
2388
2389 static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2390 {
2391         int ret;
2392
2393         ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2394         if (ret) {
2395                 dev_err(nandc->dev, "failed to set DMA mask\n");
2396                 return ret;
2397         }
2398
2399         /*
2400          * we use the internal buffer for reading ONFI params, reading small
2401          * data like ID and status, and preforming read-copy-write operations
2402          * when writing to a codeword partially. 532 is the maximum possible
2403          * size of a codeword for our nand controller
2404          */
2405         nandc->buf_size = 532;
2406
2407         nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2408                                         GFP_KERNEL);
2409         if (!nandc->data_buffer)
2410                 return -ENOMEM;
2411
2412         nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2413                                         GFP_KERNEL);
2414         if (!nandc->regs)
2415                 return -ENOMEM;
2416
2417         nandc->reg_read_buf = devm_kzalloc(nandc->dev,
2418                                 MAX_REG_RD * sizeof(*nandc->reg_read_buf),
2419                                 GFP_KERNEL);
2420         if (!nandc->reg_read_buf)
2421                 return -ENOMEM;
2422
2423         if (nandc->props->is_bam) {
2424                 nandc->reg_read_dma =
2425                         dma_map_single(nandc->dev, nandc->reg_read_buf,
2426                                        MAX_REG_RD *
2427                                        sizeof(*nandc->reg_read_buf),
2428                                        DMA_FROM_DEVICE);
2429                 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2430                         dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2431                         return -EIO;
2432                 }
2433
2434                 nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
2435                 if (!nandc->tx_chan) {
2436                         dev_err(nandc->dev, "failed to request tx channel\n");
2437                         return -ENODEV;
2438                 }
2439
2440                 nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
2441                 if (!nandc->rx_chan) {
2442                         dev_err(nandc->dev, "failed to request rx channel\n");
2443                         return -ENODEV;
2444                 }
2445
2446                 nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
2447                 if (!nandc->cmd_chan) {
2448                         dev_err(nandc->dev, "failed to request cmd channel\n");
2449                         return -ENODEV;
2450                 }
2451
2452                 /*
2453                  * Initially allocate BAM transaction to read ONFI param page.
2454                  * After detecting all the devices, this BAM transaction will
2455                  * be freed and the next BAM tranasction will be allocated with
2456                  * maximum codeword size
2457                  */
2458                 nandc->max_cwperpage = 1;
2459                 nandc->bam_txn = alloc_bam_transaction(nandc);
2460                 if (!nandc->bam_txn) {
2461                         dev_err(nandc->dev,
2462                                 "failed to allocate bam transaction\n");
2463                         return -ENOMEM;
2464                 }
2465         } else {
2466                 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
2467                 if (!nandc->chan) {
2468                         dev_err(nandc->dev,
2469                                 "failed to request slave channel\n");
2470                         return -ENODEV;
2471                 }
2472         }
2473
2474         INIT_LIST_HEAD(&nandc->desc_list);
2475         INIT_LIST_HEAD(&nandc->host_list);
2476
2477         nand_hw_control_init(&nandc->controller);
2478
2479         return 0;
2480 }
2481
2482 static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2483 {
2484         if (nandc->props->is_bam) {
2485                 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2486                         dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2487                                          MAX_REG_RD *
2488                                          sizeof(*nandc->reg_read_buf),
2489                                          DMA_FROM_DEVICE);
2490
2491                 if (nandc->tx_chan)
2492                         dma_release_channel(nandc->tx_chan);
2493
2494                 if (nandc->rx_chan)
2495                         dma_release_channel(nandc->rx_chan);
2496
2497                 if (nandc->cmd_chan)
2498                         dma_release_channel(nandc->cmd_chan);
2499         } else {
2500                 if (nandc->chan)
2501                         dma_release_channel(nandc->chan);
2502         }
2503 }
2504
2505 /* one time setup of a few nand controller registers */
2506 static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2507 {
2508         u32 nand_ctrl;
2509
2510         /* kill onenand */
2511         nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2512         nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2513                     NAND_DEV_CMD_VLD_VAL);
2514
2515         /* enable ADM or BAM DMA */
2516         if (nandc->props->is_bam) {
2517                 nand_ctrl = nandc_read(nandc, NAND_CTRL);
2518                 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2519         } else {
2520                 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2521         }
2522
2523         /* save the original values of these registers */
2524         nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2525         nandc->vld = NAND_DEV_CMD_VLD_VAL;
2526
2527         return 0;
2528 }
2529
2530 static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2531                                struct qcom_nand_host *host,
2532                                struct device_node *dn)
2533 {
2534         struct nand_chip *chip = &host->chip;
2535         struct mtd_info *mtd = nand_to_mtd(chip);
2536         struct device *dev = nandc->dev;
2537         int ret;
2538
2539         ret = of_property_read_u32(dn, "reg", &host->cs);
2540         if (ret) {
2541                 dev_err(dev, "can't get chip-select\n");
2542                 return -ENXIO;
2543         }
2544
2545         nand_set_flash_node(chip, dn);
2546         mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2547         mtd->owner = THIS_MODULE;
2548         mtd->dev.parent = dev;
2549
2550         chip->cmdfunc           = qcom_nandc_command;
2551         chip->select_chip       = qcom_nandc_select_chip;
2552         chip->read_byte         = qcom_nandc_read_byte;
2553         chip->read_buf          = qcom_nandc_read_buf;
2554         chip->write_buf         = qcom_nandc_write_buf;
2555         chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
2556         chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
2557
2558         /*
2559          * the bad block marker is readable only when we read the last codeword
2560          * of a page with ECC disabled. currently, the nand_base and nand_bbt
2561          * helpers don't allow us to read BB from a nand chip with ECC
2562          * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2563          * and block_markbad helpers until we permanently switch to using
2564          * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2565          */
2566         chip->block_bad         = qcom_nandc_block_bad;
2567         chip->block_markbad     = qcom_nandc_block_markbad;
2568
2569         chip->controller = &nandc->controller;
2570         chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2571                          NAND_SKIP_BBTSCAN;
2572
2573         /* set up initial status value */
2574         host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2575
2576         ret = nand_scan_ident(mtd, 1, NULL);
2577         if (ret)
2578                 return ret;
2579
2580         ret = qcom_nand_host_setup(host);
2581
2582         return ret;
2583 }
2584
2585 static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
2586                                   struct qcom_nand_host *host,
2587                                   struct device_node *dn)
2588 {
2589         struct nand_chip *chip = &host->chip;
2590         struct mtd_info *mtd = nand_to_mtd(chip);
2591         int ret;
2592
2593         ret = nand_scan_tail(mtd);
2594         if (ret)
2595                 return ret;
2596
2597         ret = mtd_device_register(mtd, NULL, 0);
2598         if (ret)
2599                 nand_cleanup(mtd_to_nand(mtd));
2600
2601         return ret;
2602 }
2603
2604 static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2605 {
2606         struct device *dev = nandc->dev;
2607         struct device_node *dn = dev->of_node, *child;
2608         struct qcom_nand_host *host, *tmp;
2609         int ret;
2610
2611         for_each_available_child_of_node(dn, child) {
2612                 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2613                 if (!host) {
2614                         of_node_put(child);
2615                         return -ENOMEM;
2616                 }
2617
2618                 ret = qcom_nand_host_init(nandc, host, child);
2619                 if (ret) {
2620                         devm_kfree(dev, host);
2621                         continue;
2622                 }
2623
2624                 list_add_tail(&host->node, &nandc->host_list);
2625         }
2626
2627         if (list_empty(&nandc->host_list))
2628                 return -ENODEV;
2629
2630         if (nandc->props->is_bam) {
2631                 free_bam_transaction(nandc);
2632                 nandc->bam_txn = alloc_bam_transaction(nandc);
2633                 if (!nandc->bam_txn) {
2634                         dev_err(nandc->dev,
2635                                 "failed to allocate bam transaction\n");
2636                         return -ENOMEM;
2637                 }
2638         }
2639
2640         list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
2641                 ret = qcom_nand_mtd_register(nandc, host, child);
2642                 if (ret) {
2643                         list_del(&host->node);
2644                         devm_kfree(dev, host);
2645                 }
2646         }
2647
2648         if (list_empty(&nandc->host_list))
2649                 return -ENODEV;
2650
2651         return 0;
2652 }
2653
2654 /* parse custom DT properties here */
2655 static int qcom_nandc_parse_dt(struct platform_device *pdev)
2656 {
2657         struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2658         struct device_node *np = nandc->dev->of_node;
2659         int ret;
2660
2661         if (!nandc->props->is_bam) {
2662                 ret = of_property_read_u32(np, "qcom,cmd-crci",
2663                                            &nandc->cmd_crci);
2664                 if (ret) {
2665                         dev_err(nandc->dev, "command CRCI unspecified\n");
2666                         return ret;
2667                 }
2668
2669                 ret = of_property_read_u32(np, "qcom,data-crci",
2670                                            &nandc->data_crci);
2671                 if (ret) {
2672                         dev_err(nandc->dev, "data CRCI unspecified\n");
2673                         return ret;
2674                 }
2675         }
2676
2677         return 0;
2678 }
2679
2680 static int qcom_nandc_probe(struct platform_device *pdev)
2681 {
2682         struct qcom_nand_controller *nandc;
2683         const void *dev_data;
2684         struct device *dev = &pdev->dev;
2685         struct resource *res;
2686         int ret;
2687
2688         nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2689         if (!nandc)
2690                 return -ENOMEM;
2691
2692         platform_set_drvdata(pdev, nandc);
2693         nandc->dev = dev;
2694
2695         dev_data = of_device_get_match_data(dev);
2696         if (!dev_data) {
2697                 dev_err(&pdev->dev, "failed to get device data\n");
2698                 return -ENODEV;
2699         }
2700
2701         nandc->props = dev_data;
2702
2703         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2704         nandc->base = devm_ioremap_resource(dev, res);
2705         if (IS_ERR(nandc->base))
2706                 return PTR_ERR(nandc->base);
2707
2708         nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
2709
2710         nandc->core_clk = devm_clk_get(dev, "core");
2711         if (IS_ERR(nandc->core_clk))
2712                 return PTR_ERR(nandc->core_clk);
2713
2714         nandc->aon_clk = devm_clk_get(dev, "aon");
2715         if (IS_ERR(nandc->aon_clk))
2716                 return PTR_ERR(nandc->aon_clk);
2717
2718         ret = qcom_nandc_parse_dt(pdev);
2719         if (ret)
2720                 return ret;
2721
2722         ret = qcom_nandc_alloc(nandc);
2723         if (ret)
2724                 goto err_core_clk;
2725
2726         ret = clk_prepare_enable(nandc->core_clk);
2727         if (ret)
2728                 goto err_core_clk;
2729
2730         ret = clk_prepare_enable(nandc->aon_clk);
2731         if (ret)
2732                 goto err_aon_clk;
2733
2734         ret = qcom_nandc_setup(nandc);
2735         if (ret)
2736                 goto err_setup;
2737
2738         ret = qcom_probe_nand_devices(nandc);
2739         if (ret)
2740                 goto err_setup;
2741
2742         return 0;
2743
2744 err_setup:
2745         clk_disable_unprepare(nandc->aon_clk);
2746 err_aon_clk:
2747         clk_disable_unprepare(nandc->core_clk);
2748 err_core_clk:
2749         qcom_nandc_unalloc(nandc);
2750
2751         return ret;
2752 }
2753
2754 static int qcom_nandc_remove(struct platform_device *pdev)
2755 {
2756         struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2757         struct qcom_nand_host *host;
2758
2759         list_for_each_entry(host, &nandc->host_list, node)
2760                 nand_release(nand_to_mtd(&host->chip));
2761
2762         qcom_nandc_unalloc(nandc);
2763
2764         clk_disable_unprepare(nandc->aon_clk);
2765         clk_disable_unprepare(nandc->core_clk);
2766
2767         return 0;
2768 }
2769
2770 static const struct qcom_nandc_props ipq806x_nandc_props = {
2771         .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
2772         .is_bam = false,
2773         .dev_cmd_reg_start = 0x0,
2774 };
2775
2776 static const struct qcom_nandc_props ipq4019_nandc_props = {
2777         .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2778         .is_bam = true,
2779         .dev_cmd_reg_start = 0x0,
2780 };
2781
2782 static const struct qcom_nandc_props ipq8074_nandc_props = {
2783         .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2784         .is_bam = true,
2785         .dev_cmd_reg_start = 0x7000,
2786 };
2787
2788 /*
2789  * data will hold a struct pointer containing more differences once we support
2790  * more controller variants
2791  */
2792 static const struct of_device_id qcom_nandc_of_match[] = {
2793         {
2794                 .compatible = "qcom,ipq806x-nand",
2795                 .data = &ipq806x_nandc_props,
2796         },
2797         {
2798                 .compatible = "qcom,ipq4019-nand",
2799                 .data = &ipq4019_nandc_props,
2800         },
2801         {
2802                 .compatible = "qcom,ipq8074-nand",
2803                 .data = &ipq8074_nandc_props,
2804         },
2805         {}
2806 };
2807 MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
2808
2809 static struct platform_driver qcom_nandc_driver = {
2810         .driver = {
2811                 .name = "qcom-nandc",
2812                 .of_match_table = qcom_nandc_of_match,
2813         },
2814         .probe   = qcom_nandc_probe,
2815         .remove  = qcom_nandc_remove,
2816 };
2817 module_platform_driver(qcom_nandc_driver);
2818
2819 MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
2820 MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
2821 MODULE_LICENSE("GPL v2");