Merge branch 'core-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / spi / spi-s3c64xx.c
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright (c) 2009 Samsung Electronics Co., Ltd.
4 //      Jaswinder Singh <jassi.brar@samsung.com>
5
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/clk.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/spi/spi.h>
16 #include <linux/gpio.h>
17 #include <linux/of.h>
18 #include <linux/of_gpio.h>
19
20 #include <linux/platform_data/spi-s3c64xx.h>
21
22 #define MAX_SPI_PORTS           6
23 #define S3C64XX_SPI_QUIRK_POLL          (1 << 0)
24 #define S3C64XX_SPI_QUIRK_CS_AUTO       (1 << 1)
25 #define AUTOSUSPEND_TIMEOUT     2000
26
27 /* Registers and bit-fields */
28
29 #define S3C64XX_SPI_CH_CFG              0x00
30 #define S3C64XX_SPI_CLK_CFG             0x04
31 #define S3C64XX_SPI_MODE_CFG            0x08
32 #define S3C64XX_SPI_SLAVE_SEL           0x0C
33 #define S3C64XX_SPI_INT_EN              0x10
34 #define S3C64XX_SPI_STATUS              0x14
35 #define S3C64XX_SPI_TX_DATA             0x18
36 #define S3C64XX_SPI_RX_DATA             0x1C
37 #define S3C64XX_SPI_PACKET_CNT          0x20
38 #define S3C64XX_SPI_PENDING_CLR         0x24
39 #define S3C64XX_SPI_SWAP_CFG            0x28
40 #define S3C64XX_SPI_FB_CLK              0x2C
41
42 #define S3C64XX_SPI_CH_HS_EN            (1<<6)  /* High Speed Enable */
43 #define S3C64XX_SPI_CH_SW_RST           (1<<5)
44 #define S3C64XX_SPI_CH_SLAVE            (1<<4)
45 #define S3C64XX_SPI_CPOL_L              (1<<3)
46 #define S3C64XX_SPI_CPHA_B              (1<<2)
47 #define S3C64XX_SPI_CH_RXCH_ON          (1<<1)
48 #define S3C64XX_SPI_CH_TXCH_ON          (1<<0)
49
50 #define S3C64XX_SPI_CLKSEL_SRCMSK       (3<<9)
51 #define S3C64XX_SPI_CLKSEL_SRCSHFT      9
52 #define S3C64XX_SPI_ENCLK_ENABLE        (1<<8)
53 #define S3C64XX_SPI_PSR_MASK            0xff
54
55 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE            (0<<29)
56 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD        (1<<29)
57 #define S3C64XX_SPI_MODE_CH_TSZ_WORD            (2<<29)
58 #define S3C64XX_SPI_MODE_CH_TSZ_MASK            (3<<29)
59 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE           (0<<17)
60 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD       (1<<17)
61 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD           (2<<17)
62 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK           (3<<17)
63 #define S3C64XX_SPI_MODE_RXDMA_ON               (1<<2)
64 #define S3C64XX_SPI_MODE_TXDMA_ON               (1<<1)
65 #define S3C64XX_SPI_MODE_4BURST                 (1<<0)
66
67 #define S3C64XX_SPI_SLAVE_AUTO                  (1<<1)
68 #define S3C64XX_SPI_SLAVE_SIG_INACT             (1<<0)
69 #define S3C64XX_SPI_SLAVE_NSC_CNT_2             (2<<4)
70
71 #define S3C64XX_SPI_INT_TRAILING_EN             (1<<6)
72 #define S3C64XX_SPI_INT_RX_OVERRUN_EN           (1<<5)
73 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN          (1<<4)
74 #define S3C64XX_SPI_INT_TX_OVERRUN_EN           (1<<3)
75 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN          (1<<2)
76 #define S3C64XX_SPI_INT_RX_FIFORDY_EN           (1<<1)
77 #define S3C64XX_SPI_INT_TX_FIFORDY_EN           (1<<0)
78
79 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR           (1<<5)
80 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR          (1<<4)
81 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR           (1<<3)
82 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR          (1<<2)
83 #define S3C64XX_SPI_ST_RX_FIFORDY               (1<<1)
84 #define S3C64XX_SPI_ST_TX_FIFORDY               (1<<0)
85
86 #define S3C64XX_SPI_PACKET_CNT_EN               (1<<16)
87
88 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR         (1<<4)
89 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR          (1<<3)
90 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR         (1<<2)
91 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR          (1<<1)
92 #define S3C64XX_SPI_PND_TRAILING_CLR            (1<<0)
93
94 #define S3C64XX_SPI_SWAP_RX_HALF_WORD           (1<<7)
95 #define S3C64XX_SPI_SWAP_RX_BYTE                (1<<6)
96 #define S3C64XX_SPI_SWAP_RX_BIT                 (1<<5)
97 #define S3C64XX_SPI_SWAP_RX_EN                  (1<<4)
98 #define S3C64XX_SPI_SWAP_TX_HALF_WORD           (1<<3)
99 #define S3C64XX_SPI_SWAP_TX_BYTE                (1<<2)
100 #define S3C64XX_SPI_SWAP_TX_BIT                 (1<<1)
101 #define S3C64XX_SPI_SWAP_TX_EN                  (1<<0)
102
103 #define S3C64XX_SPI_FBCLK_MSK                   (3<<0)
104
105 #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
106 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
107                                 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
108 #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
109 #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
110                                         FIFO_LVL_MASK(i))
111
112 #define S3C64XX_SPI_MAX_TRAILCNT        0x3ff
113 #define S3C64XX_SPI_TRAILCNT_OFF        19
114
115 #define S3C64XX_SPI_TRAILCNT            S3C64XX_SPI_MAX_TRAILCNT
116
117 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
118 #define is_polling(x)   (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
119
120 #define RXBUSY    (1<<2)
121 #define TXBUSY    (1<<3)
122
123 struct s3c64xx_spi_dma_data {
124         struct dma_chan *ch;
125         enum dma_transfer_direction direction;
126 };
127
128 /**
129  * struct s3c64xx_spi_info - SPI Controller hardware info
130  * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
131  * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
132  * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
133  * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
134  * @clk_from_cmu: True, if the controller does not include a clock mux and
135  *      prescaler unit.
136  *
137  * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
138  * differ in some aspects such as the size of the fifo and spi bus clock
139  * setup. Such differences are specified to the driver using this structure
140  * which is provided as driver data to the driver.
141  */
142 struct s3c64xx_spi_port_config {
143         int     fifo_lvl_mask[MAX_SPI_PORTS];
144         int     rx_lvl_offset;
145         int     tx_st_done;
146         int     quirks;
147         bool    high_speed;
148         bool    clk_from_cmu;
149         bool    clk_ioclk;
150 };
151
152 /**
153  * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
154  * @clk: Pointer to the spi clock.
155  * @src_clk: Pointer to the clock used to generate SPI signals.
156  * @ioclk: Pointer to the i/o clock between master and slave
157  * @master: Pointer to the SPI Protocol master.
158  * @cntrlr_info: Platform specific data for the controller this driver manages.
159  * @lock: Controller specific lock.
160  * @state: Set of FLAGS to indicate status.
161  * @rx_dmach: Controller's DMA channel for Rx.
162  * @tx_dmach: Controller's DMA channel for Tx.
163  * @sfr_start: BUS address of SPI controller regs.
164  * @regs: Pointer to ioremap'ed controller registers.
165  * @irq: interrupt
166  * @xfer_completion: To indicate completion of xfer task.
167  * @cur_mode: Stores the active configuration of the controller.
168  * @cur_bpw: Stores the active bits per word settings.
169  * @cur_speed: Stores the active xfer clock speed.
170  */
171 struct s3c64xx_spi_driver_data {
172         void __iomem                    *regs;
173         struct clk                      *clk;
174         struct clk                      *src_clk;
175         struct clk                      *ioclk;
176         struct platform_device          *pdev;
177         struct spi_master               *master;
178         struct s3c64xx_spi_info  *cntrlr_info;
179         spinlock_t                      lock;
180         unsigned long                   sfr_start;
181         struct completion               xfer_completion;
182         unsigned                        state;
183         unsigned                        cur_mode, cur_bpw;
184         unsigned                        cur_speed;
185         struct s3c64xx_spi_dma_data     rx_dma;
186         struct s3c64xx_spi_dma_data     tx_dma;
187         struct s3c64xx_spi_port_config  *port_conf;
188         unsigned int                    port_id;
189 };
190
191 static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
192 {
193         void __iomem *regs = sdd->regs;
194         unsigned long loops;
195         u32 val;
196
197         writel(0, regs + S3C64XX_SPI_PACKET_CNT);
198
199         val = readl(regs + S3C64XX_SPI_CH_CFG);
200         val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
201         writel(val, regs + S3C64XX_SPI_CH_CFG);
202
203         val = readl(regs + S3C64XX_SPI_CH_CFG);
204         val |= S3C64XX_SPI_CH_SW_RST;
205         val &= ~S3C64XX_SPI_CH_HS_EN;
206         writel(val, regs + S3C64XX_SPI_CH_CFG);
207
208         /* Flush TxFIFO*/
209         loops = msecs_to_loops(1);
210         do {
211                 val = readl(regs + S3C64XX_SPI_STATUS);
212         } while (TX_FIFO_LVL(val, sdd) && loops--);
213
214         if (loops == 0)
215                 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
216
217         /* Flush RxFIFO*/
218         loops = msecs_to_loops(1);
219         do {
220                 val = readl(regs + S3C64XX_SPI_STATUS);
221                 if (RX_FIFO_LVL(val, sdd))
222                         readl(regs + S3C64XX_SPI_RX_DATA);
223                 else
224                         break;
225         } while (loops--);
226
227         if (loops == 0)
228                 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
229
230         val = readl(regs + S3C64XX_SPI_CH_CFG);
231         val &= ~S3C64XX_SPI_CH_SW_RST;
232         writel(val, regs + S3C64XX_SPI_CH_CFG);
233
234         val = readl(regs + S3C64XX_SPI_MODE_CFG);
235         val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
236         writel(val, regs + S3C64XX_SPI_MODE_CFG);
237 }
238
239 static void s3c64xx_spi_dmacb(void *data)
240 {
241         struct s3c64xx_spi_driver_data *sdd;
242         struct s3c64xx_spi_dma_data *dma = data;
243         unsigned long flags;
244
245         if (dma->direction == DMA_DEV_TO_MEM)
246                 sdd = container_of(data,
247                         struct s3c64xx_spi_driver_data, rx_dma);
248         else
249                 sdd = container_of(data,
250                         struct s3c64xx_spi_driver_data, tx_dma);
251
252         spin_lock_irqsave(&sdd->lock, flags);
253
254         if (dma->direction == DMA_DEV_TO_MEM) {
255                 sdd->state &= ~RXBUSY;
256                 if (!(sdd->state & TXBUSY))
257                         complete(&sdd->xfer_completion);
258         } else {
259                 sdd->state &= ~TXBUSY;
260                 if (!(sdd->state & RXBUSY))
261                         complete(&sdd->xfer_completion);
262         }
263
264         spin_unlock_irqrestore(&sdd->lock, flags);
265 }
266
267 static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
268                         struct sg_table *sgt)
269 {
270         struct s3c64xx_spi_driver_data *sdd;
271         struct dma_slave_config config;
272         struct dma_async_tx_descriptor *desc;
273
274         memset(&config, 0, sizeof(config));
275
276         if (dma->direction == DMA_DEV_TO_MEM) {
277                 sdd = container_of((void *)dma,
278                         struct s3c64xx_spi_driver_data, rx_dma);
279                 config.direction = dma->direction;
280                 config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
281                 config.src_addr_width = sdd->cur_bpw / 8;
282                 config.src_maxburst = 1;
283                 dmaengine_slave_config(dma->ch, &config);
284         } else {
285                 sdd = container_of((void *)dma,
286                         struct s3c64xx_spi_driver_data, tx_dma);
287                 config.direction = dma->direction;
288                 config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
289                 config.dst_addr_width = sdd->cur_bpw / 8;
290                 config.dst_maxburst = 1;
291                 dmaengine_slave_config(dma->ch, &config);
292         }
293
294         desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
295                                        dma->direction, DMA_PREP_INTERRUPT);
296
297         desc->callback = s3c64xx_spi_dmacb;
298         desc->callback_param = dma;
299
300         dmaengine_submit(desc);
301         dma_async_issue_pending(dma->ch);
302 }
303
304 static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
305 {
306         struct s3c64xx_spi_driver_data *sdd =
307                                         spi_master_get_devdata(spi->master);
308
309         if (sdd->cntrlr_info->no_cs)
310                 return;
311
312         if (enable) {
313                 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
314                         writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
315                 } else {
316                         u32 ssel = readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL);
317
318                         ssel |= (S3C64XX_SPI_SLAVE_AUTO |
319                                                 S3C64XX_SPI_SLAVE_NSC_CNT_2);
320                         writel(ssel, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
321                 }
322         } else {
323                 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
324                         writel(S3C64XX_SPI_SLAVE_SIG_INACT,
325                                sdd->regs + S3C64XX_SPI_SLAVE_SEL);
326         }
327 }
328
329 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
330 {
331         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
332
333         if (is_polling(sdd))
334                 return 0;
335
336         spi->dma_rx = sdd->rx_dma.ch;
337         spi->dma_tx = sdd->tx_dma.ch;
338
339         return 0;
340 }
341
342 static bool s3c64xx_spi_can_dma(struct spi_master *master,
343                                 struct spi_device *spi,
344                                 struct spi_transfer *xfer)
345 {
346         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
347
348         return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
349 }
350
351 static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
352                                     struct spi_transfer *xfer, int dma_mode)
353 {
354         void __iomem *regs = sdd->regs;
355         u32 modecfg, chcfg;
356
357         modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
358         modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
359
360         chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
361         chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
362
363         if (dma_mode) {
364                 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
365         } else {
366                 /* Always shift in data in FIFO, even if xfer is Tx only,
367                  * this helps setting PCKT_CNT value for generating clocks
368                  * as exactly needed.
369                  */
370                 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
371                 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
372                                         | S3C64XX_SPI_PACKET_CNT_EN,
373                                         regs + S3C64XX_SPI_PACKET_CNT);
374         }
375
376         if (xfer->tx_buf != NULL) {
377                 sdd->state |= TXBUSY;
378                 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
379                 if (dma_mode) {
380                         modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
381                         prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
382                 } else {
383                         switch (sdd->cur_bpw) {
384                         case 32:
385                                 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
386                                         xfer->tx_buf, xfer->len / 4);
387                                 break;
388                         case 16:
389                                 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
390                                         xfer->tx_buf, xfer->len / 2);
391                                 break;
392                         default:
393                                 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
394                                         xfer->tx_buf, xfer->len);
395                                 break;
396                         }
397                 }
398         }
399
400         if (xfer->rx_buf != NULL) {
401                 sdd->state |= RXBUSY;
402
403                 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
404                                         && !(sdd->cur_mode & SPI_CPHA))
405                         chcfg |= S3C64XX_SPI_CH_HS_EN;
406
407                 if (dma_mode) {
408                         modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
409                         chcfg |= S3C64XX_SPI_CH_RXCH_ON;
410                         writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
411                                         | S3C64XX_SPI_PACKET_CNT_EN,
412                                         regs + S3C64XX_SPI_PACKET_CNT);
413                         prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
414                 }
415         }
416
417         writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
418         writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
419 }
420
421 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
422                                         int timeout_ms)
423 {
424         void __iomem *regs = sdd->regs;
425         unsigned long val = 1;
426         u32 status;
427
428         /* max fifo depth available */
429         u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
430
431         if (timeout_ms)
432                 val = msecs_to_loops(timeout_ms);
433
434         do {
435                 status = readl(regs + S3C64XX_SPI_STATUS);
436         } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
437
438         /* return the actual received data length */
439         return RX_FIFO_LVL(status, sdd);
440 }
441
442 static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
443                                 struct spi_transfer *xfer)
444 {
445         void __iomem *regs = sdd->regs;
446         unsigned long val;
447         u32 status;
448         int ms;
449
450         /* millisecs to xfer 'len' bytes @ 'cur_speed' */
451         ms = xfer->len * 8 * 1000 / sdd->cur_speed;
452         ms += 10; /* some tolerance */
453
454         val = msecs_to_jiffies(ms) + 10;
455         val = wait_for_completion_timeout(&sdd->xfer_completion, val);
456
457         /*
458          * If the previous xfer was completed within timeout, then
459          * proceed further else return -EIO.
460          * DmaTx returns after simply writing data in the FIFO,
461          * w/o waiting for real transmission on the bus to finish.
462          * DmaRx returns only after Dma read data from FIFO which
463          * needs bus transmission to finish, so we don't worry if
464          * Xfer involved Rx(with or without Tx).
465          */
466         if (val && !xfer->rx_buf) {
467                 val = msecs_to_loops(10);
468                 status = readl(regs + S3C64XX_SPI_STATUS);
469                 while ((TX_FIFO_LVL(status, sdd)
470                         || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
471                        && --val) {
472                         cpu_relax();
473                         status = readl(regs + S3C64XX_SPI_STATUS);
474                 }
475
476         }
477
478         /* If timed out while checking rx/tx status return error */
479         if (!val)
480                 return -EIO;
481
482         return 0;
483 }
484
485 static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
486                                 struct spi_transfer *xfer)
487 {
488         void __iomem *regs = sdd->regs;
489         unsigned long val;
490         u32 status;
491         int loops;
492         u32 cpy_len;
493         u8 *buf;
494         int ms;
495
496         /* millisecs to xfer 'len' bytes @ 'cur_speed' */
497         ms = xfer->len * 8 * 1000 / sdd->cur_speed;
498         ms += 10; /* some tolerance */
499
500         val = msecs_to_loops(ms);
501         do {
502                 status = readl(regs + S3C64XX_SPI_STATUS);
503         } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
504
505         if (!val)
506                 return -EIO;
507
508         /* If it was only Tx */
509         if (!xfer->rx_buf) {
510                 sdd->state &= ~TXBUSY;
511                 return 0;
512         }
513
514         /*
515          * If the receive length is bigger than the controller fifo
516          * size, calculate the loops and read the fifo as many times.
517          * loops = length / max fifo size (calculated by using the
518          * fifo mask).
519          * For any size less than the fifo size the below code is
520          * executed atleast once.
521          */
522         loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
523         buf = xfer->rx_buf;
524         do {
525                 /* wait for data to be received in the fifo */
526                 cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
527                                                        (loops ? ms : 0));
528
529                 switch (sdd->cur_bpw) {
530                 case 32:
531                         ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
532                                      buf, cpy_len / 4);
533                         break;
534                 case 16:
535                         ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
536                                      buf, cpy_len / 2);
537                         break;
538                 default:
539                         ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
540                                     buf, cpy_len);
541                         break;
542                 }
543
544                 buf = buf + cpy_len;
545         } while (loops--);
546         sdd->state &= ~RXBUSY;
547
548         return 0;
549 }
550
551 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
552 {
553         void __iomem *regs = sdd->regs;
554         u32 val;
555
556         /* Disable Clock */
557         if (!sdd->port_conf->clk_from_cmu) {
558                 val = readl(regs + S3C64XX_SPI_CLK_CFG);
559                 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
560                 writel(val, regs + S3C64XX_SPI_CLK_CFG);
561         }
562
563         /* Set Polarity and Phase */
564         val = readl(regs + S3C64XX_SPI_CH_CFG);
565         val &= ~(S3C64XX_SPI_CH_SLAVE |
566                         S3C64XX_SPI_CPOL_L |
567                         S3C64XX_SPI_CPHA_B);
568
569         if (sdd->cur_mode & SPI_CPOL)
570                 val |= S3C64XX_SPI_CPOL_L;
571
572         if (sdd->cur_mode & SPI_CPHA)
573                 val |= S3C64XX_SPI_CPHA_B;
574
575         writel(val, regs + S3C64XX_SPI_CH_CFG);
576
577         /* Set Channel & DMA Mode */
578         val = readl(regs + S3C64XX_SPI_MODE_CFG);
579         val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
580                         | S3C64XX_SPI_MODE_CH_TSZ_MASK);
581
582         switch (sdd->cur_bpw) {
583         case 32:
584                 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
585                 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
586                 break;
587         case 16:
588                 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
589                 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
590                 break;
591         default:
592                 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
593                 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
594                 break;
595         }
596
597         writel(val, regs + S3C64XX_SPI_MODE_CFG);
598
599         if (sdd->port_conf->clk_from_cmu) {
600                 /* The src_clk clock is divided internally by 2 */
601                 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
602         } else {
603                 /* Configure Clock */
604                 val = readl(regs + S3C64XX_SPI_CLK_CFG);
605                 val &= ~S3C64XX_SPI_PSR_MASK;
606                 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
607                                 & S3C64XX_SPI_PSR_MASK);
608                 writel(val, regs + S3C64XX_SPI_CLK_CFG);
609
610                 /* Enable Clock */
611                 val = readl(regs + S3C64XX_SPI_CLK_CFG);
612                 val |= S3C64XX_SPI_ENCLK_ENABLE;
613                 writel(val, regs + S3C64XX_SPI_CLK_CFG);
614         }
615 }
616
617 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
618
619 static int s3c64xx_spi_prepare_message(struct spi_master *master,
620                                        struct spi_message *msg)
621 {
622         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
623         struct spi_device *spi = msg->spi;
624         struct s3c64xx_spi_csinfo *cs = spi->controller_data;
625
626         /* Configure feedback delay */
627         writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
628
629         return 0;
630 }
631
632 static int s3c64xx_spi_transfer_one(struct spi_master *master,
633                                     struct spi_device *spi,
634                                     struct spi_transfer *xfer)
635 {
636         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
637         const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
638         const void *tx_buf = NULL;
639         void *rx_buf = NULL;
640         int target_len = 0, origin_len = 0;
641         int use_dma = 0;
642         int status;
643         u32 speed;
644         u8 bpw;
645         unsigned long flags;
646
647         reinit_completion(&sdd->xfer_completion);
648
649         /* Only BPW and Speed may change across transfers */
650         bpw = xfer->bits_per_word;
651         speed = xfer->speed_hz;
652
653         if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
654                 sdd->cur_bpw = bpw;
655                 sdd->cur_speed = speed;
656                 sdd->cur_mode = spi->mode;
657                 s3c64xx_spi_config(sdd);
658         }
659
660         if (!is_polling(sdd) && (xfer->len > fifo_len) &&
661             sdd->rx_dma.ch && sdd->tx_dma.ch) {
662                 use_dma = 1;
663
664         } else if (is_polling(sdd) && xfer->len > fifo_len) {
665                 tx_buf = xfer->tx_buf;
666                 rx_buf = xfer->rx_buf;
667                 origin_len = xfer->len;
668
669                 target_len = xfer->len;
670                 if (xfer->len > fifo_len)
671                         xfer->len = fifo_len;
672         }
673
674         do {
675                 spin_lock_irqsave(&sdd->lock, flags);
676
677                 /* Pending only which is to be done */
678                 sdd->state &= ~RXBUSY;
679                 sdd->state &= ~TXBUSY;
680
681                 s3c64xx_enable_datapath(sdd, xfer, use_dma);
682
683                 /* Start the signals */
684                 s3c64xx_spi_set_cs(spi, true);
685
686                 spin_unlock_irqrestore(&sdd->lock, flags);
687
688                 if (use_dma)
689                         status = s3c64xx_wait_for_dma(sdd, xfer);
690                 else
691                         status = s3c64xx_wait_for_pio(sdd, xfer);
692
693                 if (status) {
694                         dev_err(&spi->dev,
695                                 "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
696                                 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
697                                 (sdd->state & RXBUSY) ? 'f' : 'p',
698                                 (sdd->state & TXBUSY) ? 'f' : 'p',
699                                 xfer->len);
700
701                         if (use_dma) {
702                                 if (xfer->tx_buf && (sdd->state & TXBUSY))
703                                         dmaengine_terminate_all(sdd->tx_dma.ch);
704                                 if (xfer->rx_buf && (sdd->state & RXBUSY))
705                                         dmaengine_terminate_all(sdd->rx_dma.ch);
706                         }
707                 } else {
708                         s3c64xx_flush_fifo(sdd);
709                 }
710                 if (target_len > 0) {
711                         target_len -= xfer->len;
712
713                         if (xfer->tx_buf)
714                                 xfer->tx_buf += xfer->len;
715
716                         if (xfer->rx_buf)
717                                 xfer->rx_buf += xfer->len;
718
719                         if (target_len > fifo_len)
720                                 xfer->len = fifo_len;
721                         else
722                                 xfer->len = target_len;
723                 }
724         } while (target_len > 0);
725
726         if (origin_len) {
727                 /* Restore original xfer buffers and length */
728                 xfer->tx_buf = tx_buf;
729                 xfer->rx_buf = rx_buf;
730                 xfer->len = origin_len;
731         }
732
733         return status;
734 }
735
736 static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
737                                 struct spi_device *spi)
738 {
739         struct s3c64xx_spi_csinfo *cs;
740         struct device_node *slave_np, *data_np = NULL;
741         u32 fb_delay = 0;
742
743         slave_np = spi->dev.of_node;
744         if (!slave_np) {
745                 dev_err(&spi->dev, "device node not found\n");
746                 return ERR_PTR(-EINVAL);
747         }
748
749         data_np = of_get_child_by_name(slave_np, "controller-data");
750         if (!data_np) {
751                 dev_err(&spi->dev, "child node 'controller-data' not found\n");
752                 return ERR_PTR(-EINVAL);
753         }
754
755         cs = kzalloc(sizeof(*cs), GFP_KERNEL);
756         if (!cs) {
757                 of_node_put(data_np);
758                 return ERR_PTR(-ENOMEM);
759         }
760
761         of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
762         cs->fb_delay = fb_delay;
763         of_node_put(data_np);
764         return cs;
765 }
766
767 /*
768  * Here we only check the validity of requested configuration
769  * and save the configuration in a local data-structure.
770  * The controller is actually configured only just before we
771  * get a message to transfer.
772  */
773 static int s3c64xx_spi_setup(struct spi_device *spi)
774 {
775         struct s3c64xx_spi_csinfo *cs = spi->controller_data;
776         struct s3c64xx_spi_driver_data *sdd;
777         int err;
778
779         sdd = spi_master_get_devdata(spi->master);
780         if (spi->dev.of_node) {
781                 cs = s3c64xx_get_slave_ctrldata(spi);
782                 spi->controller_data = cs;
783         } else if (cs) {
784                 /* On non-DT platforms the SPI core will set spi->cs_gpio
785                  * to -ENOENT. The GPIO pin used to drive the chip select
786                  * is defined by using platform data so spi->cs_gpio value
787                  * has to be override to have the proper GPIO pin number.
788                  */
789                 spi->cs_gpio = cs->line;
790         }
791
792         if (IS_ERR_OR_NULL(cs)) {
793                 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
794                 return -ENODEV;
795         }
796
797         if (!spi_get_ctldata(spi)) {
798                 if (gpio_is_valid(spi->cs_gpio)) {
799                         err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH,
800                                                dev_name(&spi->dev));
801                         if (err) {
802                                 dev_err(&spi->dev,
803                                         "Failed to get /CS gpio [%d]: %d\n",
804                                         spi->cs_gpio, err);
805                                 goto err_gpio_req;
806                         }
807                 }
808
809                 spi_set_ctldata(spi, cs);
810         }
811
812         pm_runtime_get_sync(&sdd->pdev->dev);
813
814         /* Check if we can provide the requested rate */
815         if (!sdd->port_conf->clk_from_cmu) {
816                 u32 psr, speed;
817
818                 /* Max possible */
819                 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
820
821                 if (spi->max_speed_hz > speed)
822                         spi->max_speed_hz = speed;
823
824                 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
825                 psr &= S3C64XX_SPI_PSR_MASK;
826                 if (psr == S3C64XX_SPI_PSR_MASK)
827                         psr--;
828
829                 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
830                 if (spi->max_speed_hz < speed) {
831                         if (psr+1 < S3C64XX_SPI_PSR_MASK) {
832                                 psr++;
833                         } else {
834                                 err = -EINVAL;
835                                 goto setup_exit;
836                         }
837                 }
838
839                 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
840                 if (spi->max_speed_hz >= speed) {
841                         spi->max_speed_hz = speed;
842                 } else {
843                         dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
844                                 spi->max_speed_hz);
845                         err = -EINVAL;
846                         goto setup_exit;
847                 }
848         }
849
850         pm_runtime_mark_last_busy(&sdd->pdev->dev);
851         pm_runtime_put_autosuspend(&sdd->pdev->dev);
852         s3c64xx_spi_set_cs(spi, false);
853
854         return 0;
855
856 setup_exit:
857         pm_runtime_mark_last_busy(&sdd->pdev->dev);
858         pm_runtime_put_autosuspend(&sdd->pdev->dev);
859         /* setup() returns with device de-selected */
860         s3c64xx_spi_set_cs(spi, false);
861
862         if (gpio_is_valid(spi->cs_gpio))
863                 gpio_free(spi->cs_gpio);
864         spi_set_ctldata(spi, NULL);
865
866 err_gpio_req:
867         if (spi->dev.of_node)
868                 kfree(cs);
869
870         return err;
871 }
872
873 static void s3c64xx_spi_cleanup(struct spi_device *spi)
874 {
875         struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
876
877         if (gpio_is_valid(spi->cs_gpio)) {
878                 gpio_free(spi->cs_gpio);
879                 if (spi->dev.of_node)
880                         kfree(cs);
881                 else {
882                         /* On non-DT platforms, the SPI core sets
883                          * spi->cs_gpio to -ENOENT and .setup()
884                          * overrides it with the GPIO pin value
885                          * passed using platform data.
886                          */
887                         spi->cs_gpio = -ENOENT;
888                 }
889         }
890
891         spi_set_ctldata(spi, NULL);
892 }
893
894 static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
895 {
896         struct s3c64xx_spi_driver_data *sdd = data;
897         struct spi_master *spi = sdd->master;
898         unsigned int val, clr = 0;
899
900         val = readl(sdd->regs + S3C64XX_SPI_STATUS);
901
902         if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
903                 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
904                 dev_err(&spi->dev, "RX overrun\n");
905         }
906         if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
907                 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
908                 dev_err(&spi->dev, "RX underrun\n");
909         }
910         if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
911                 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
912                 dev_err(&spi->dev, "TX overrun\n");
913         }
914         if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
915                 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
916                 dev_err(&spi->dev, "TX underrun\n");
917         }
918
919         /* Clear the pending irq by setting and then clearing it */
920         writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
921         writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
922
923         return IRQ_HANDLED;
924 }
925
926 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
927 {
928         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
929         void __iomem *regs = sdd->regs;
930         unsigned int val;
931
932         sdd->cur_speed = 0;
933
934         if (sci->no_cs)
935                 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
936         else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
937                 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
938
939         /* Disable Interrupts - we use Polling if not DMA mode */
940         writel(0, regs + S3C64XX_SPI_INT_EN);
941
942         if (!sdd->port_conf->clk_from_cmu)
943                 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
944                                 regs + S3C64XX_SPI_CLK_CFG);
945         writel(0, regs + S3C64XX_SPI_MODE_CFG);
946         writel(0, regs + S3C64XX_SPI_PACKET_CNT);
947
948         /* Clear any irq pending bits, should set and clear the bits */
949         val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
950                 S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
951                 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
952                 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
953         writel(val, regs + S3C64XX_SPI_PENDING_CLR);
954         writel(0, regs + S3C64XX_SPI_PENDING_CLR);
955
956         writel(0, regs + S3C64XX_SPI_SWAP_CFG);
957
958         val = readl(regs + S3C64XX_SPI_MODE_CFG);
959         val &= ~S3C64XX_SPI_MODE_4BURST;
960         val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
961         val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
962         writel(val, regs + S3C64XX_SPI_MODE_CFG);
963
964         s3c64xx_flush_fifo(sdd);
965 }
966
967 #ifdef CONFIG_OF
968 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
969 {
970         struct s3c64xx_spi_info *sci;
971         u32 temp;
972
973         sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
974         if (!sci)
975                 return ERR_PTR(-ENOMEM);
976
977         if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
978                 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
979                 sci->src_clk_nr = 0;
980         } else {
981                 sci->src_clk_nr = temp;
982         }
983
984         if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
985                 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
986                 sci->num_cs = 1;
987         } else {
988                 sci->num_cs = temp;
989         }
990
991         sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
992
993         return sci;
994 }
995 #else
996 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
997 {
998         return dev_get_platdata(dev);
999 }
1000 #endif
1001
1002 static const struct of_device_id s3c64xx_spi_dt_match[];
1003
1004 static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
1005                                                 struct platform_device *pdev)
1006 {
1007 #ifdef CONFIG_OF
1008         if (pdev->dev.of_node) {
1009                 const struct of_device_id *match;
1010                 match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
1011                 return (struct s3c64xx_spi_port_config *)match->data;
1012         }
1013 #endif
1014         return (struct s3c64xx_spi_port_config *)
1015                          platform_get_device_id(pdev)->driver_data;
1016 }
1017
1018 static int s3c64xx_spi_probe(struct platform_device *pdev)
1019 {
1020         struct resource *mem_res;
1021         struct s3c64xx_spi_driver_data *sdd;
1022         struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
1023         struct spi_master *master;
1024         int ret, irq;
1025         char clk_name[16];
1026
1027         if (!sci && pdev->dev.of_node) {
1028                 sci = s3c64xx_spi_parse_dt(&pdev->dev);
1029                 if (IS_ERR(sci))
1030                         return PTR_ERR(sci);
1031         }
1032
1033         if (!sci) {
1034                 dev_err(&pdev->dev, "platform_data missing!\n");
1035                 return -ENODEV;
1036         }
1037
1038         mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1039         if (mem_res == NULL) {
1040                 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
1041                 return -ENXIO;
1042         }
1043
1044         irq = platform_get_irq(pdev, 0);
1045         if (irq < 0) {
1046                 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
1047                 return irq;
1048         }
1049
1050         master = spi_alloc_master(&pdev->dev,
1051                                 sizeof(struct s3c64xx_spi_driver_data));
1052         if (master == NULL) {
1053                 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1054                 return -ENOMEM;
1055         }
1056
1057         platform_set_drvdata(pdev, master);
1058
1059         sdd = spi_master_get_devdata(master);
1060         sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
1061         sdd->master = master;
1062         sdd->cntrlr_info = sci;
1063         sdd->pdev = pdev;
1064         sdd->sfr_start = mem_res->start;
1065         if (pdev->dev.of_node) {
1066                 ret = of_alias_get_id(pdev->dev.of_node, "spi");
1067                 if (ret < 0) {
1068                         dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
1069                                 ret);
1070                         goto err_deref_master;
1071                 }
1072                 sdd->port_id = ret;
1073         } else {
1074                 sdd->port_id = pdev->id;
1075         }
1076
1077         sdd->cur_bpw = 8;
1078
1079         sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1080         sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1081
1082         master->dev.of_node = pdev->dev.of_node;
1083         master->bus_num = sdd->port_id;
1084         master->setup = s3c64xx_spi_setup;
1085         master->cleanup = s3c64xx_spi_cleanup;
1086         master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1087         master->prepare_message = s3c64xx_spi_prepare_message;
1088         master->transfer_one = s3c64xx_spi_transfer_one;
1089         master->num_chipselect = sci->num_cs;
1090         master->dma_alignment = 8;
1091         master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
1092                                         SPI_BPW_MASK(8);
1093         /* the spi->mode bits understood by this driver: */
1094         master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1095         master->auto_runtime_pm = true;
1096         if (!is_polling(sdd))
1097                 master->can_dma = s3c64xx_spi_can_dma;
1098
1099         sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1100         if (IS_ERR(sdd->regs)) {
1101                 ret = PTR_ERR(sdd->regs);
1102                 goto err_deref_master;
1103         }
1104
1105         if (sci->cfg_gpio && sci->cfg_gpio()) {
1106                 dev_err(&pdev->dev, "Unable to config gpio\n");
1107                 ret = -EBUSY;
1108                 goto err_deref_master;
1109         }
1110
1111         /* Setup clocks */
1112         sdd->clk = devm_clk_get(&pdev->dev, "spi");
1113         if (IS_ERR(sdd->clk)) {
1114                 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1115                 ret = PTR_ERR(sdd->clk);
1116                 goto err_deref_master;
1117         }
1118
1119         ret = clk_prepare_enable(sdd->clk);
1120         if (ret) {
1121                 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1122                 goto err_deref_master;
1123         }
1124
1125         sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
1126         sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
1127         if (IS_ERR(sdd->src_clk)) {
1128                 dev_err(&pdev->dev,
1129                         "Unable to acquire clock '%s'\n", clk_name);
1130                 ret = PTR_ERR(sdd->src_clk);
1131                 goto err_disable_clk;
1132         }
1133
1134         ret = clk_prepare_enable(sdd->src_clk);
1135         if (ret) {
1136                 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
1137                 goto err_disable_clk;
1138         }
1139
1140         if (sdd->port_conf->clk_ioclk) {
1141                 sdd->ioclk = devm_clk_get(&pdev->dev, "spi_ioclk");
1142                 if (IS_ERR(sdd->ioclk)) {
1143                         dev_err(&pdev->dev, "Unable to acquire 'ioclk'\n");
1144                         ret = PTR_ERR(sdd->ioclk);
1145                         goto err_disable_src_clk;
1146                 }
1147
1148                 ret = clk_prepare_enable(sdd->ioclk);
1149                 if (ret) {
1150                         dev_err(&pdev->dev, "Couldn't enable clock 'ioclk'\n");
1151                         goto err_disable_src_clk;
1152                 }
1153         }
1154
1155         if (!is_polling(sdd)) {
1156                 /* Acquire DMA channels */
1157                 sdd->rx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
1158                                                                   "rx");
1159                 if (IS_ERR(sdd->rx_dma.ch)) {
1160                         dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
1161                         ret = PTR_ERR(sdd->rx_dma.ch);
1162                         goto err_disable_io_clk;
1163                 }
1164                 sdd->tx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
1165                                                                   "tx");
1166                 if (IS_ERR(sdd->tx_dma.ch)) {
1167                         dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
1168                         ret = PTR_ERR(sdd->tx_dma.ch);
1169                         goto err_release_rx_dma;
1170                 }
1171         }
1172
1173         pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
1174         pm_runtime_use_autosuspend(&pdev->dev);
1175         pm_runtime_set_active(&pdev->dev);
1176         pm_runtime_enable(&pdev->dev);
1177         pm_runtime_get_sync(&pdev->dev);
1178
1179         /* Setup Deufult Mode */
1180         s3c64xx_spi_hwinit(sdd);
1181
1182         spin_lock_init(&sdd->lock);
1183         init_completion(&sdd->xfer_completion);
1184
1185         ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
1186                                 "spi-s3c64xx", sdd);
1187         if (ret != 0) {
1188                 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1189                         irq, ret);
1190                 goto err_pm_put;
1191         }
1192
1193         writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1194                S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1195                sdd->regs + S3C64XX_SPI_INT_EN);
1196
1197         ret = devm_spi_register_master(&pdev->dev, master);
1198         if (ret != 0) {
1199                 dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
1200                 goto err_pm_put;
1201         }
1202
1203         dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1204                                         sdd->port_id, master->num_chipselect);
1205         dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
1206                                         mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
1207
1208         pm_runtime_mark_last_busy(&pdev->dev);
1209         pm_runtime_put_autosuspend(&pdev->dev);
1210
1211         return 0;
1212
1213 err_pm_put:
1214         pm_runtime_put_noidle(&pdev->dev);
1215         pm_runtime_disable(&pdev->dev);
1216         pm_runtime_set_suspended(&pdev->dev);
1217
1218         if (!is_polling(sdd))
1219                 dma_release_channel(sdd->tx_dma.ch);
1220 err_release_rx_dma:
1221         if (!is_polling(sdd))
1222                 dma_release_channel(sdd->rx_dma.ch);
1223 err_disable_io_clk:
1224         clk_disable_unprepare(sdd->ioclk);
1225 err_disable_src_clk:
1226         clk_disable_unprepare(sdd->src_clk);
1227 err_disable_clk:
1228         clk_disable_unprepare(sdd->clk);
1229 err_deref_master:
1230         spi_master_put(master);
1231
1232         return ret;
1233 }
1234
1235 static int s3c64xx_spi_remove(struct platform_device *pdev)
1236 {
1237         struct spi_master *master = platform_get_drvdata(pdev);
1238         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1239
1240         pm_runtime_get_sync(&pdev->dev);
1241
1242         writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1243
1244         if (!is_polling(sdd)) {
1245                 dma_release_channel(sdd->rx_dma.ch);
1246                 dma_release_channel(sdd->tx_dma.ch);
1247         }
1248
1249         clk_disable_unprepare(sdd->ioclk);
1250
1251         clk_disable_unprepare(sdd->src_clk);
1252
1253         clk_disable_unprepare(sdd->clk);
1254
1255         pm_runtime_put_noidle(&pdev->dev);
1256         pm_runtime_disable(&pdev->dev);
1257         pm_runtime_set_suspended(&pdev->dev);
1258
1259         return 0;
1260 }
1261
1262 #ifdef CONFIG_PM_SLEEP
1263 static int s3c64xx_spi_suspend(struct device *dev)
1264 {
1265         struct spi_master *master = dev_get_drvdata(dev);
1266         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1267
1268         int ret = spi_master_suspend(master);
1269         if (ret)
1270                 return ret;
1271
1272         ret = pm_runtime_force_suspend(dev);
1273         if (ret < 0)
1274                 return ret;
1275
1276         sdd->cur_speed = 0; /* Output Clock is stopped */
1277
1278         return 0;
1279 }
1280
1281 static int s3c64xx_spi_resume(struct device *dev)
1282 {
1283         struct spi_master *master = dev_get_drvdata(dev);
1284         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1285         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1286         int ret;
1287
1288         if (sci->cfg_gpio)
1289                 sci->cfg_gpio();
1290
1291         ret = pm_runtime_force_resume(dev);
1292         if (ret < 0)
1293                 return ret;
1294
1295         return spi_master_resume(master);
1296 }
1297 #endif /* CONFIG_PM_SLEEP */
1298
1299 #ifdef CONFIG_PM
1300 static int s3c64xx_spi_runtime_suspend(struct device *dev)
1301 {
1302         struct spi_master *master = dev_get_drvdata(dev);
1303         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1304
1305         clk_disable_unprepare(sdd->clk);
1306         clk_disable_unprepare(sdd->src_clk);
1307         clk_disable_unprepare(sdd->ioclk);
1308
1309         return 0;
1310 }
1311
1312 static int s3c64xx_spi_runtime_resume(struct device *dev)
1313 {
1314         struct spi_master *master = dev_get_drvdata(dev);
1315         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1316         int ret;
1317
1318         if (sdd->port_conf->clk_ioclk) {
1319                 ret = clk_prepare_enable(sdd->ioclk);
1320                 if (ret != 0)
1321                         return ret;
1322         }
1323
1324         ret = clk_prepare_enable(sdd->src_clk);
1325         if (ret != 0)
1326                 goto err_disable_ioclk;
1327
1328         ret = clk_prepare_enable(sdd->clk);
1329         if (ret != 0)
1330                 goto err_disable_src_clk;
1331
1332         s3c64xx_spi_hwinit(sdd);
1333
1334         return 0;
1335
1336 err_disable_src_clk:
1337         clk_disable_unprepare(sdd->src_clk);
1338 err_disable_ioclk:
1339         clk_disable_unprepare(sdd->ioclk);
1340
1341         return ret;
1342 }
1343 #endif /* CONFIG_PM */
1344
1345 static const struct dev_pm_ops s3c64xx_spi_pm = {
1346         SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
1347         SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
1348                            s3c64xx_spi_runtime_resume, NULL)
1349 };
1350
1351 static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
1352         .fifo_lvl_mask  = { 0x7f },
1353         .rx_lvl_offset  = 13,
1354         .tx_st_done     = 21,
1355         .high_speed     = true,
1356 };
1357
1358 static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
1359         .fifo_lvl_mask  = { 0x7f, 0x7F },
1360         .rx_lvl_offset  = 13,
1361         .tx_st_done     = 21,
1362 };
1363
1364 static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
1365         .fifo_lvl_mask  = { 0x1ff, 0x7F },
1366         .rx_lvl_offset  = 15,
1367         .tx_st_done     = 25,
1368         .high_speed     = true,
1369 };
1370
1371 static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
1372         .fifo_lvl_mask  = { 0x1ff, 0x7F, 0x7F },
1373         .rx_lvl_offset  = 15,
1374         .tx_st_done     = 25,
1375         .high_speed     = true,
1376         .clk_from_cmu   = true,
1377 };
1378
1379 static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
1380         .fifo_lvl_mask  = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
1381         .rx_lvl_offset  = 15,
1382         .tx_st_done     = 25,
1383         .high_speed     = true,
1384         .clk_from_cmu   = true,
1385         .quirks         = S3C64XX_SPI_QUIRK_CS_AUTO,
1386 };
1387
1388 static struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
1389         .fifo_lvl_mask  = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
1390         .rx_lvl_offset  = 15,
1391         .tx_st_done     = 25,
1392         .high_speed     = true,
1393         .clk_from_cmu   = true,
1394         .clk_ioclk      = true,
1395         .quirks         = S3C64XX_SPI_QUIRK_CS_AUTO,
1396 };
1397
1398 static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
1399         {
1400                 .name           = "s3c2443-spi",
1401                 .driver_data    = (kernel_ulong_t)&s3c2443_spi_port_config,
1402         }, {
1403                 .name           = "s3c6410-spi",
1404                 .driver_data    = (kernel_ulong_t)&s3c6410_spi_port_config,
1405         },
1406         { },
1407 };
1408
1409 static const struct of_device_id s3c64xx_spi_dt_match[] = {
1410         { .compatible = "samsung,s3c2443-spi",
1411                         .data = (void *)&s3c2443_spi_port_config,
1412         },
1413         { .compatible = "samsung,s3c6410-spi",
1414                         .data = (void *)&s3c6410_spi_port_config,
1415         },
1416         { .compatible = "samsung,s5pv210-spi",
1417                         .data = (void *)&s5pv210_spi_port_config,
1418         },
1419         { .compatible = "samsung,exynos4210-spi",
1420                         .data = (void *)&exynos4_spi_port_config,
1421         },
1422         { .compatible = "samsung,exynos7-spi",
1423                         .data = (void *)&exynos7_spi_port_config,
1424         },
1425         { .compatible = "samsung,exynos5433-spi",
1426                         .data = (void *)&exynos5433_spi_port_config,
1427         },
1428         { },
1429 };
1430 MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
1431
1432 static struct platform_driver s3c64xx_spi_driver = {
1433         .driver = {
1434                 .name   = "s3c64xx-spi",
1435                 .pm = &s3c64xx_spi_pm,
1436                 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
1437         },
1438         .probe = s3c64xx_spi_probe,
1439         .remove = s3c64xx_spi_remove,
1440         .id_table = s3c64xx_spi_driver_ids,
1441 };
1442 MODULE_ALIAS("platform:s3c64xx-spi");
1443
1444 module_platform_driver(s3c64xx_spi_driver);
1445
1446 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1447 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1448 MODULE_LICENSE("GPL");