Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[sfrench/cifs-2.6.git] / drivers / spi / pxa2xx_spi.c
1 /*
2  * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/device.h>
22 #include <linux/ioport.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/platform_device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/spi/spi.h>
28 #include <linux/workqueue.h>
29 #include <linux/delay.h>
30
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <asm/hardware.h>
34 #include <asm/delay.h>
35 #include <asm/dma.h>
36
37 #include <asm/arch/hardware.h>
38 #include <asm/arch/pxa-regs.h>
39 #include <asm/arch/pxa2xx_spi.h>
40
41 MODULE_AUTHOR("Stephen Street");
42 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
43 MODULE_LICENSE("GPL");
44
45 #define MAX_BUSES 3
46
47 #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
48 #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
49 #define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
50
51 /* for testing SSCR1 changes that require SSP restart, basically
52  * everything except the service and interrupt enables */
53 #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_EBCEI | SSCR1_SCFR \
54                                 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
55                                 | SSCR1_RWOT | SSCR1_TRAIL | SSCR1_PINTE \
56                                 | SSCR1_STRF | SSCR1_EFWR |SSCR1_RFT \
57                                 | SSCR1_TFT | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
58
59 #define DEFINE_SSP_REG(reg, off) \
60 static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
61 static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
62
63 DEFINE_SSP_REG(SSCR0, 0x00)
64 DEFINE_SSP_REG(SSCR1, 0x04)
65 DEFINE_SSP_REG(SSSR, 0x08)
66 DEFINE_SSP_REG(SSITR, 0x0c)
67 DEFINE_SSP_REG(SSDR, 0x10)
68 DEFINE_SSP_REG(SSTO, 0x28)
69 DEFINE_SSP_REG(SSPSP, 0x2c)
70
71 #define START_STATE ((void*)0)
72 #define RUNNING_STATE ((void*)1)
73 #define DONE_STATE ((void*)2)
74 #define ERROR_STATE ((void*)-1)
75
76 #define QUEUE_RUNNING 0
77 #define QUEUE_STOPPED 1
78
79 struct driver_data {
80         /* Driver model hookup */
81         struct platform_device *pdev;
82
83         /* SPI framework hookup */
84         enum pxa_ssp_type ssp_type;
85         struct spi_master *master;
86
87         /* PXA hookup */
88         struct pxa2xx_spi_master *master_info;
89
90         /* DMA setup stuff */
91         int rx_channel;
92         int tx_channel;
93         u32 *null_dma_buf;
94
95         /* SSP register addresses */
96         void *ioaddr;
97         u32 ssdr_physical;
98
99         /* SSP masks*/
100         u32 dma_cr1;
101         u32 int_cr1;
102         u32 clear_sr;
103         u32 mask_sr;
104
105         /* Driver message queue */
106         struct workqueue_struct *workqueue;
107         struct work_struct pump_messages;
108         spinlock_t lock;
109         struct list_head queue;
110         int busy;
111         int run;
112
113         /* Message Transfer pump */
114         struct tasklet_struct pump_transfers;
115
116         /* Current message transfer state info */
117         struct spi_message* cur_msg;
118         struct spi_transfer* cur_transfer;
119         struct chip_data *cur_chip;
120         size_t len;
121         void *tx;
122         void *tx_end;
123         void *rx;
124         void *rx_end;
125         int dma_mapped;
126         dma_addr_t rx_dma;
127         dma_addr_t tx_dma;
128         size_t rx_map_len;
129         size_t tx_map_len;
130         u8 n_bytes;
131         u32 dma_width;
132         int cs_change;
133         int (*write)(struct driver_data *drv_data);
134         int (*read)(struct driver_data *drv_data);
135         irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
136         void (*cs_control)(u32 command);
137 };
138
139 struct chip_data {
140         u32 cr0;
141         u32 cr1;
142         u32 psp;
143         u32 timeout;
144         u8 n_bytes;
145         u32 dma_width;
146         u32 dma_burst_size;
147         u32 threshold;
148         u32 dma_threshold;
149         u8 enable_dma;
150         u8 bits_per_word;
151         u32 speed_hz;
152         int (*write)(struct driver_data *drv_data);
153         int (*read)(struct driver_data *drv_data);
154         void (*cs_control)(u32 command);
155 };
156
157 static void pump_messages(struct work_struct *work);
158
159 static int flush(struct driver_data *drv_data)
160 {
161         unsigned long limit = loops_per_jiffy << 1;
162
163         void *reg = drv_data->ioaddr;
164
165         do {
166                 while (read_SSSR(reg) & SSSR_RNE) {
167                         read_SSDR(reg);
168                 }
169         } while ((read_SSSR(reg) & SSSR_BSY) && limit--);
170         write_SSSR(SSSR_ROR, reg);
171
172         return limit;
173 }
174
175 static void null_cs_control(u32 command)
176 {
177 }
178
179 static int null_writer(struct driver_data *drv_data)
180 {
181         void *reg = drv_data->ioaddr;
182         u8 n_bytes = drv_data->n_bytes;
183
184         if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
185                 || (drv_data->tx == drv_data->tx_end))
186                 return 0;
187
188         write_SSDR(0, reg);
189         drv_data->tx += n_bytes;
190
191         return 1;
192 }
193
194 static int null_reader(struct driver_data *drv_data)
195 {
196         void *reg = drv_data->ioaddr;
197         u8 n_bytes = drv_data->n_bytes;
198
199         while ((read_SSSR(reg) & SSSR_RNE)
200                 && (drv_data->rx < drv_data->rx_end)) {
201                 read_SSDR(reg);
202                 drv_data->rx += n_bytes;
203         }
204
205         return drv_data->rx == drv_data->rx_end;
206 }
207
208 static int u8_writer(struct driver_data *drv_data)
209 {
210         void *reg = drv_data->ioaddr;
211
212         if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
213                 || (drv_data->tx == drv_data->tx_end))
214                 return 0;
215
216         write_SSDR(*(u8 *)(drv_data->tx), reg);
217         ++drv_data->tx;
218
219         return 1;
220 }
221
222 static int u8_reader(struct driver_data *drv_data)
223 {
224         void *reg = drv_data->ioaddr;
225
226         while ((read_SSSR(reg) & SSSR_RNE)
227                 && (drv_data->rx < drv_data->rx_end)) {
228                 *(u8 *)(drv_data->rx) = read_SSDR(reg);
229                 ++drv_data->rx;
230         }
231
232         return drv_data->rx == drv_data->rx_end;
233 }
234
235 static int u16_writer(struct driver_data *drv_data)
236 {
237         void *reg = drv_data->ioaddr;
238
239         if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
240                 || (drv_data->tx == drv_data->tx_end))
241                 return 0;
242
243         write_SSDR(*(u16 *)(drv_data->tx), reg);
244         drv_data->tx += 2;
245
246         return 1;
247 }
248
249 static int u16_reader(struct driver_data *drv_data)
250 {
251         void *reg = drv_data->ioaddr;
252
253         while ((read_SSSR(reg) & SSSR_RNE)
254                 && (drv_data->rx < drv_data->rx_end)) {
255                 *(u16 *)(drv_data->rx) = read_SSDR(reg);
256                 drv_data->rx += 2;
257         }
258
259         return drv_data->rx == drv_data->rx_end;
260 }
261
262 static int u32_writer(struct driver_data *drv_data)
263 {
264         void *reg = drv_data->ioaddr;
265
266         if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
267                 || (drv_data->tx == drv_data->tx_end))
268                 return 0;
269
270         write_SSDR(*(u32 *)(drv_data->tx), reg);
271         drv_data->tx += 4;
272
273         return 1;
274 }
275
276 static int u32_reader(struct driver_data *drv_data)
277 {
278         void *reg = drv_data->ioaddr;
279
280         while ((read_SSSR(reg) & SSSR_RNE)
281                 && (drv_data->rx < drv_data->rx_end)) {
282                 *(u32 *)(drv_data->rx) = read_SSDR(reg);
283                 drv_data->rx += 4;
284         }
285
286         return drv_data->rx == drv_data->rx_end;
287 }
288
289 static void *next_transfer(struct driver_data *drv_data)
290 {
291         struct spi_message *msg = drv_data->cur_msg;
292         struct spi_transfer *trans = drv_data->cur_transfer;
293
294         /* Move to next transfer */
295         if (trans->transfer_list.next != &msg->transfers) {
296                 drv_data->cur_transfer =
297                         list_entry(trans->transfer_list.next,
298                                         struct spi_transfer,
299                                         transfer_list);
300                 return RUNNING_STATE;
301         } else
302                 return DONE_STATE;
303 }
304
305 static int map_dma_buffers(struct driver_data *drv_data)
306 {
307         struct spi_message *msg = drv_data->cur_msg;
308         struct device *dev = &msg->spi->dev;
309
310         if (!drv_data->cur_chip->enable_dma)
311                 return 0;
312
313         if (msg->is_dma_mapped)
314                 return  drv_data->rx_dma && drv_data->tx_dma;
315
316         if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
317                 return 0;
318
319         /* Modify setup if rx buffer is null */
320         if (drv_data->rx == NULL) {
321                 *drv_data->null_dma_buf = 0;
322                 drv_data->rx = drv_data->null_dma_buf;
323                 drv_data->rx_map_len = 4;
324         } else
325                 drv_data->rx_map_len = drv_data->len;
326
327
328         /* Modify setup if tx buffer is null */
329         if (drv_data->tx == NULL) {
330                 *drv_data->null_dma_buf = 0;
331                 drv_data->tx = drv_data->null_dma_buf;
332                 drv_data->tx_map_len = 4;
333         } else
334                 drv_data->tx_map_len = drv_data->len;
335
336         /* Stream map the rx buffer */
337         drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
338                                                 drv_data->rx_map_len,
339                                                 DMA_FROM_DEVICE);
340         if (dma_mapping_error(drv_data->rx_dma))
341                 return 0;
342
343         /* Stream map the tx buffer */
344         drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
345                                                 drv_data->tx_map_len,
346                                                 DMA_TO_DEVICE);
347
348         if (dma_mapping_error(drv_data->tx_dma)) {
349                 dma_unmap_single(dev, drv_data->rx_dma,
350                                         drv_data->rx_map_len, DMA_FROM_DEVICE);
351                 return 0;
352         }
353
354         return 1;
355 }
356
357 static void unmap_dma_buffers(struct driver_data *drv_data)
358 {
359         struct device *dev;
360
361         if (!drv_data->dma_mapped)
362                 return;
363
364         if (!drv_data->cur_msg->is_dma_mapped) {
365                 dev = &drv_data->cur_msg->spi->dev;
366                 dma_unmap_single(dev, drv_data->rx_dma,
367                                         drv_data->rx_map_len, DMA_FROM_DEVICE);
368                 dma_unmap_single(dev, drv_data->tx_dma,
369                                         drv_data->tx_map_len, DMA_TO_DEVICE);
370         }
371
372         drv_data->dma_mapped = 0;
373 }
374
375 /* caller already set message->status; dma and pio irqs are blocked */
376 static void giveback(struct driver_data *drv_data)
377 {
378         struct spi_transfer* last_transfer;
379         unsigned long flags;
380         struct spi_message *msg;
381
382         spin_lock_irqsave(&drv_data->lock, flags);
383         msg = drv_data->cur_msg;
384         drv_data->cur_msg = NULL;
385         drv_data->cur_transfer = NULL;
386         drv_data->cur_chip = NULL;
387         queue_work(drv_data->workqueue, &drv_data->pump_messages);
388         spin_unlock_irqrestore(&drv_data->lock, flags);
389
390         last_transfer = list_entry(msg->transfers.prev,
391                                         struct spi_transfer,
392                                         transfer_list);
393
394         if (!last_transfer->cs_change)
395                 drv_data->cs_control(PXA2XX_CS_DEASSERT);
396
397         msg->state = NULL;
398         if (msg->complete)
399                 msg->complete(msg->context);
400 }
401
402 static int wait_ssp_rx_stall(void *ioaddr)
403 {
404         unsigned long limit = loops_per_jiffy << 1;
405
406         while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--)
407                 cpu_relax();
408
409         return limit;
410 }
411
412 static int wait_dma_channel_stop(int channel)
413 {
414         unsigned long limit = loops_per_jiffy << 1;
415
416         while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--)
417                 cpu_relax();
418
419         return limit;
420 }
421
422 void dma_error_stop(struct driver_data *drv_data, const char *msg)
423 {
424         void *reg = drv_data->ioaddr;
425
426         /* Stop and reset */
427         DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
428         DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
429         write_SSSR(drv_data->clear_sr, reg);
430         write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
431         if (drv_data->ssp_type != PXA25x_SSP)
432                 write_SSTO(0, reg);
433         flush(drv_data);
434         write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
435
436         unmap_dma_buffers(drv_data);
437
438         dev_err(&drv_data->pdev->dev, "%s\n", msg);
439
440         drv_data->cur_msg->state = ERROR_STATE;
441         tasklet_schedule(&drv_data->pump_transfers);
442 }
443
444 static void dma_transfer_complete(struct driver_data *drv_data)
445 {
446         void *reg = drv_data->ioaddr;
447         struct spi_message *msg = drv_data->cur_msg;
448
449         /* Clear and disable interrupts on SSP and DMA channels*/
450         write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
451         write_SSSR(drv_data->clear_sr, reg);
452         DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
453         DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
454
455         if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
456                 dev_err(&drv_data->pdev->dev,
457                         "dma_handler: dma rx channel stop failed\n");
458
459         if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
460                 dev_err(&drv_data->pdev->dev,
461                         "dma_transfer: ssp rx stall failed\n");
462
463         unmap_dma_buffers(drv_data);
464
465         /* update the buffer pointer for the amount completed in dma */
466         drv_data->rx += drv_data->len -
467                         (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
468
469         /* read trailing data from fifo, it does not matter how many
470          * bytes are in the fifo just read until buffer is full
471          * or fifo is empty, which ever occurs first */
472         drv_data->read(drv_data);
473
474         /* return count of what was actually read */
475         msg->actual_length += drv_data->len -
476                                 (drv_data->rx_end - drv_data->rx);
477
478         /* Release chip select if requested, transfer delays are
479          * handled in pump_transfers */
480         if (drv_data->cs_change)
481                 drv_data->cs_control(PXA2XX_CS_DEASSERT);
482
483         /* Move to next transfer */
484         msg->state = next_transfer(drv_data);
485
486         /* Schedule transfer tasklet */
487         tasklet_schedule(&drv_data->pump_transfers);
488 }
489
490 static void dma_handler(int channel, void *data)
491 {
492         struct driver_data *drv_data = data;
493         u32 irq_status = DCSR(channel) & DMA_INT_MASK;
494
495         if (irq_status & DCSR_BUSERR) {
496
497                 if (channel == drv_data->tx_channel)
498                         dma_error_stop(drv_data,
499                                         "dma_handler: "
500                                         "bad bus address on tx channel");
501                 else
502                         dma_error_stop(drv_data,
503                                         "dma_handler: "
504                                         "bad bus address on rx channel");
505                 return;
506         }
507
508         /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
509         if ((channel == drv_data->tx_channel)
510                 && (irq_status & DCSR_ENDINTR)
511                 && (drv_data->ssp_type == PXA25x_SSP)) {
512
513                 /* Wait for rx to stall */
514                 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
515                         dev_err(&drv_data->pdev->dev,
516                                 "dma_handler: ssp rx stall failed\n");
517
518                 /* finish this transfer, start the next */
519                 dma_transfer_complete(drv_data);
520         }
521 }
522
523 static irqreturn_t dma_transfer(struct driver_data *drv_data)
524 {
525         u32 irq_status;
526         void *reg = drv_data->ioaddr;
527
528         irq_status = read_SSSR(reg) & drv_data->mask_sr;
529         if (irq_status & SSSR_ROR) {
530                 dma_error_stop(drv_data, "dma_transfer: fifo overrun");
531                 return IRQ_HANDLED;
532         }
533
534         /* Check for false positive timeout */
535         if ((irq_status & SSSR_TINT)
536                 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
537                 write_SSSR(SSSR_TINT, reg);
538                 return IRQ_HANDLED;
539         }
540
541         if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
542
543                 /* Clear and disable timeout interrupt, do the rest in
544                  * dma_transfer_complete */
545                 if (drv_data->ssp_type != PXA25x_SSP)
546                         write_SSTO(0, reg);
547
548                 /* finish this transfer, start the next */
549                 dma_transfer_complete(drv_data);
550
551                 return IRQ_HANDLED;
552         }
553
554         /* Opps problem detected */
555         return IRQ_NONE;
556 }
557
558 static void int_error_stop(struct driver_data *drv_data, const char* msg)
559 {
560         void *reg = drv_data->ioaddr;
561
562         /* Stop and reset SSP */
563         write_SSSR(drv_data->clear_sr, reg);
564         write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
565         if (drv_data->ssp_type != PXA25x_SSP)
566                 write_SSTO(0, reg);
567         flush(drv_data);
568         write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
569
570         dev_err(&drv_data->pdev->dev, "%s\n", msg);
571
572         drv_data->cur_msg->state = ERROR_STATE;
573         tasklet_schedule(&drv_data->pump_transfers);
574 }
575
576 static void int_transfer_complete(struct driver_data *drv_data)
577 {
578         void *reg = drv_data->ioaddr;
579
580         /* Stop SSP */
581         write_SSSR(drv_data->clear_sr, reg);
582         write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
583         if (drv_data->ssp_type != PXA25x_SSP)
584                 write_SSTO(0, reg);
585
586         /* Update total byte transfered return count actual bytes read */
587         drv_data->cur_msg->actual_length += drv_data->len -
588                                 (drv_data->rx_end - drv_data->rx);
589
590         /* Release chip select if requested, transfer delays are
591          * handled in pump_transfers */
592         if (drv_data->cs_change)
593                 drv_data->cs_control(PXA2XX_CS_DEASSERT);
594
595         /* Move to next transfer */
596         drv_data->cur_msg->state = next_transfer(drv_data);
597
598         /* Schedule transfer tasklet */
599         tasklet_schedule(&drv_data->pump_transfers);
600 }
601
602 static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
603 {
604         void *reg = drv_data->ioaddr;
605
606         u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
607                         drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
608
609         u32 irq_status = read_SSSR(reg) & irq_mask;
610
611         if (irq_status & SSSR_ROR) {
612                 int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
613                 return IRQ_HANDLED;
614         }
615
616         if (irq_status & SSSR_TINT) {
617                 write_SSSR(SSSR_TINT, reg);
618                 if (drv_data->read(drv_data)) {
619                         int_transfer_complete(drv_data);
620                         return IRQ_HANDLED;
621                 }
622         }
623
624         /* Drain rx fifo, Fill tx fifo and prevent overruns */
625         do {
626                 if (drv_data->read(drv_data)) {
627                         int_transfer_complete(drv_data);
628                         return IRQ_HANDLED;
629                 }
630         } while (drv_data->write(drv_data));
631
632         if (drv_data->read(drv_data)) {
633                 int_transfer_complete(drv_data);
634                 return IRQ_HANDLED;
635         }
636
637         if (drv_data->tx == drv_data->tx_end) {
638                 write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
639                 /* PXA25x_SSP has no timeout, read trailing bytes */
640                 if (drv_data->ssp_type == PXA25x_SSP) {
641                         if (!wait_ssp_rx_stall(reg))
642                         {
643                                 int_error_stop(drv_data, "interrupt_transfer: "
644                                                 "rx stall failed");
645                                 return IRQ_HANDLED;
646                         }
647                         if (!drv_data->read(drv_data))
648                         {
649                                 int_error_stop(drv_data,
650                                                 "interrupt_transfer: "
651                                                 "trailing byte read failed");
652                                 return IRQ_HANDLED;
653                         }
654                         int_transfer_complete(drv_data);
655                 }
656         }
657
658         /* We did something */
659         return IRQ_HANDLED;
660 }
661
662 static irqreturn_t ssp_int(int irq, void *dev_id)
663 {
664         struct driver_data *drv_data = dev_id;
665         void *reg = drv_data->ioaddr;
666
667         if (!drv_data->cur_msg) {
668
669                 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
670                 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
671                 if (drv_data->ssp_type != PXA25x_SSP)
672                         write_SSTO(0, reg);
673                 write_SSSR(drv_data->clear_sr, reg);
674
675                 dev_err(&drv_data->pdev->dev, "bad message state "
676                         "in interrupt handler\n");
677
678                 /* Never fail */
679                 return IRQ_HANDLED;
680         }
681
682         return drv_data->transfer_handler(drv_data);
683 }
684
685 int set_dma_burst_and_threshold(struct chip_data *chip, struct spi_device *spi,
686                                 u8 bits_per_word, u32 *burst_code,
687                                 u32 *threshold)
688 {
689         struct pxa2xx_spi_chip *chip_info =
690                         (struct pxa2xx_spi_chip *)spi->controller_data;
691         int bytes_per_word;
692         int burst_bytes;
693         int thresh_words;
694         int req_burst_size;
695         int retval = 0;
696
697         /* Set the threshold (in registers) to equal the same amount of data
698          * as represented by burst size (in bytes).  The computation below
699          * is (burst_size rounded up to nearest 8 byte, word or long word)
700          * divided by (bytes/register); the tx threshold is the inverse of
701          * the rx, so that there will always be enough data in the rx fifo
702          * to satisfy a burst, and there will always be enough space in the
703          * tx fifo to accept a burst (a tx burst will overwrite the fifo if
704          * there is not enough space), there must always remain enough empty
705          * space in the rx fifo for any data loaded to the tx fifo.
706          * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
707          * will be 8, or half the fifo;
708          * The threshold can only be set to 2, 4 or 8, but not 16, because
709          * to burst 16 to the tx fifo, the fifo would have to be empty;
710          * however, the minimum fifo trigger level is 1, and the tx will
711          * request service when the fifo is at this level, with only 15 spaces.
712          */
713
714         /* find bytes/word */
715         if (bits_per_word <= 8)
716                 bytes_per_word = 1;
717         else if (bits_per_word <= 16)
718                 bytes_per_word = 2;
719         else
720                 bytes_per_word = 4;
721
722         /* use struct pxa2xx_spi_chip->dma_burst_size if available */
723         if (chip_info)
724                 req_burst_size = chip_info->dma_burst_size;
725         else {
726                 switch (chip->dma_burst_size) {
727                 default:
728                         /* if the default burst size is not set,
729                          * do it now */
730                         chip->dma_burst_size = DCMD_BURST8;
731                 case DCMD_BURST8:
732                         req_burst_size = 8;
733                         break;
734                 case DCMD_BURST16:
735                         req_burst_size = 16;
736                         break;
737                 case DCMD_BURST32:
738                         req_burst_size = 32;
739                         break;
740                 }
741         }
742         if (req_burst_size <= 8) {
743                 *burst_code = DCMD_BURST8;
744                 burst_bytes = 8;
745         } else if (req_burst_size <= 16) {
746                 if (bytes_per_word == 1) {
747                         /* don't burst more than 1/2 the fifo */
748                         *burst_code = DCMD_BURST8;
749                         burst_bytes = 8;
750                         retval = 1;
751                 } else {
752                         *burst_code = DCMD_BURST16;
753                         burst_bytes = 16;
754                 }
755         } else {
756                 if (bytes_per_word == 1) {
757                         /* don't burst more than 1/2 the fifo */
758                         *burst_code = DCMD_BURST8;
759                         burst_bytes = 8;
760                         retval = 1;
761                 } else if (bytes_per_word == 2) {
762                         /* don't burst more than 1/2 the fifo */
763                         *burst_code = DCMD_BURST16;
764                         burst_bytes = 16;
765                         retval = 1;
766                 } else {
767                         *burst_code = DCMD_BURST32;
768                         burst_bytes = 32;
769                 }
770         }
771
772         thresh_words = burst_bytes / bytes_per_word;
773
774         /* thresh_words will be between 2 and 8 */
775         *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
776                         | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
777
778         return retval;
779 }
780
781 static void pump_transfers(unsigned long data)
782 {
783         struct driver_data *drv_data = (struct driver_data *)data;
784         struct spi_message *message = NULL;
785         struct spi_transfer *transfer = NULL;
786         struct spi_transfer *previous = NULL;
787         struct chip_data *chip = NULL;
788         void *reg = drv_data->ioaddr;
789         u32 clk_div = 0;
790         u8 bits = 0;
791         u32 speed = 0;
792         u32 cr0;
793         u32 cr1;
794         u32 dma_thresh = drv_data->cur_chip->dma_threshold;
795         u32 dma_burst = drv_data->cur_chip->dma_burst_size;
796
797         /* Get current state information */
798         message = drv_data->cur_msg;
799         transfer = drv_data->cur_transfer;
800         chip = drv_data->cur_chip;
801
802         /* Handle for abort */
803         if (message->state == ERROR_STATE) {
804                 message->status = -EIO;
805                 giveback(drv_data);
806                 return;
807         }
808
809         /* Handle end of message */
810         if (message->state == DONE_STATE) {
811                 message->status = 0;
812                 giveback(drv_data);
813                 return;
814         }
815
816         /* Delay if requested at end of transfer*/
817         if (message->state == RUNNING_STATE) {
818                 previous = list_entry(transfer->transfer_list.prev,
819                                         struct spi_transfer,
820                                         transfer_list);
821                 if (previous->delay_usecs)
822                         udelay(previous->delay_usecs);
823         }
824
825         /* Check transfer length */
826         if (transfer->len > 8191)
827         {
828                 dev_warn(&drv_data->pdev->dev, "pump_transfers: transfer "
829                                 "length greater than 8191\n");
830                 message->status = -EINVAL;
831                 giveback(drv_data);
832                 return;
833         }
834
835         /* Setup the transfer state based on the type of transfer */
836         if (flush(drv_data) == 0) {
837                 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
838                 message->status = -EIO;
839                 giveback(drv_data);
840                 return;
841         }
842         drv_data->n_bytes = chip->n_bytes;
843         drv_data->dma_width = chip->dma_width;
844         drv_data->cs_control = chip->cs_control;
845         drv_data->tx = (void *)transfer->tx_buf;
846         drv_data->tx_end = drv_data->tx + transfer->len;
847         drv_data->rx = transfer->rx_buf;
848         drv_data->rx_end = drv_data->rx + transfer->len;
849         drv_data->rx_dma = transfer->rx_dma;
850         drv_data->tx_dma = transfer->tx_dma;
851         drv_data->len = transfer->len & DCMD_LENGTH;
852         drv_data->write = drv_data->tx ? chip->write : null_writer;
853         drv_data->read = drv_data->rx ? chip->read : null_reader;
854         drv_data->cs_change = transfer->cs_change;
855
856         /* Change speed and bit per word on a per transfer */
857         cr0 = chip->cr0;
858         if (transfer->speed_hz || transfer->bits_per_word) {
859
860                 bits = chip->bits_per_word;
861                 speed = chip->speed_hz;
862
863                 if (transfer->speed_hz)
864                         speed = transfer->speed_hz;
865
866                 if (transfer->bits_per_word)
867                         bits = transfer->bits_per_word;
868
869                 if (reg == SSP1_VIRT)
870                         clk_div = SSP1_SerClkDiv(speed);
871                 else if (reg == SSP2_VIRT)
872                         clk_div = SSP2_SerClkDiv(speed);
873                 else if (reg == SSP3_VIRT)
874                         clk_div = SSP3_SerClkDiv(speed);
875
876                 if (bits <= 8) {
877                         drv_data->n_bytes = 1;
878                         drv_data->dma_width = DCMD_WIDTH1;
879                         drv_data->read = drv_data->read != null_reader ?
880                                                 u8_reader : null_reader;
881                         drv_data->write = drv_data->write != null_writer ?
882                                                 u8_writer : null_writer;
883                 } else if (bits <= 16) {
884                         drv_data->n_bytes = 2;
885                         drv_data->dma_width = DCMD_WIDTH2;
886                         drv_data->read = drv_data->read != null_reader ?
887                                                 u16_reader : null_reader;
888                         drv_data->write = drv_data->write != null_writer ?
889                                                 u16_writer : null_writer;
890                 } else if (bits <= 32) {
891                         drv_data->n_bytes = 4;
892                         drv_data->dma_width = DCMD_WIDTH4;
893                         drv_data->read = drv_data->read != null_reader ?
894                                                 u32_reader : null_reader;
895                         drv_data->write = drv_data->write != null_writer ?
896                                                 u32_writer : null_writer;
897                 }
898                 /* if bits/word is changed in dma mode, then must check the
899                  * thresholds and burst also */
900                 if (chip->enable_dma) {
901                         if (set_dma_burst_and_threshold(chip, message->spi,
902                                                         bits, &dma_burst,
903                                                         &dma_thresh))
904                                 if (printk_ratelimit())
905                                         dev_warn(&message->spi->dev,
906                                                 "pump_transfer: "
907                                                 "DMA burst size reduced to "
908                                                 "match bits_per_word\n");
909                 }
910
911                 cr0 = clk_div
912                         | SSCR0_Motorola
913                         | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
914                         | SSCR0_SSE
915                         | (bits > 16 ? SSCR0_EDSS : 0);
916         }
917
918         message->state = RUNNING_STATE;
919
920         /* Try to map dma buffer and do a dma transfer if successful */
921         if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) {
922
923                 /* Ensure we have the correct interrupt handler */
924                 drv_data->transfer_handler = dma_transfer;
925
926                 /* Setup rx DMA Channel */
927                 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
928                 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
929                 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
930                 if (drv_data->rx == drv_data->null_dma_buf)
931                         /* No target address increment */
932                         DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
933                                                         | drv_data->dma_width
934                                                         | dma_burst
935                                                         | drv_data->len;
936                 else
937                         DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
938                                                         | DCMD_FLOWSRC
939                                                         | drv_data->dma_width
940                                                         | dma_burst
941                                                         | drv_data->len;
942
943                 /* Setup tx DMA Channel */
944                 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
945                 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
946                 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
947                 if (drv_data->tx == drv_data->null_dma_buf)
948                         /* No source address increment */
949                         DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
950                                                         | drv_data->dma_width
951                                                         | dma_burst
952                                                         | drv_data->len;
953                 else
954                         DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
955                                                         | DCMD_FLOWTRG
956                                                         | drv_data->dma_width
957                                                         | dma_burst
958                                                         | drv_data->len;
959
960                 /* Enable dma end irqs on SSP to detect end of transfer */
961                 if (drv_data->ssp_type == PXA25x_SSP)
962                         DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
963
964                 /* Fix me, need to handle cs polarity */
965                 drv_data->cs_control(PXA2XX_CS_ASSERT);
966
967                 /* Clear status and start DMA engine */
968                 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
969                 write_SSSR(drv_data->clear_sr, reg);
970                 DCSR(drv_data->rx_channel) |= DCSR_RUN;
971                 DCSR(drv_data->tx_channel) |= DCSR_RUN;
972         } else {
973                 /* Ensure we have the correct interrupt handler */
974                 drv_data->transfer_handler = interrupt_transfer;
975
976                 /* Fix me, need to handle cs polarity */
977                 drv_data->cs_control(PXA2XX_CS_ASSERT);
978
979                 /* Clear status  */
980                 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
981                 write_SSSR(drv_data->clear_sr, reg);
982         }
983
984         /* see if we need to reload the config registers */
985         if ((read_SSCR0(reg) != cr0)
986                 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
987                         (cr1 & SSCR1_CHANGE_MASK)) {
988
989                 write_SSCR0(cr0 & ~SSCR0_SSE, reg);
990                 if (drv_data->ssp_type != PXA25x_SSP)
991                         write_SSTO(chip->timeout, reg);
992                 write_SSCR1(cr1, reg);
993                 write_SSCR0(cr0, reg);
994         } else {
995                 if (drv_data->ssp_type != PXA25x_SSP)
996                         write_SSTO(chip->timeout, reg);
997                 write_SSCR1(cr1, reg);
998         }
999 }
1000
1001 static void pump_messages(struct work_struct *work)
1002 {
1003         struct driver_data *drv_data =
1004                 container_of(work, struct driver_data, pump_messages);
1005         unsigned long flags;
1006
1007         /* Lock queue and check for queue work */
1008         spin_lock_irqsave(&drv_data->lock, flags);
1009         if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
1010                 drv_data->busy = 0;
1011                 spin_unlock_irqrestore(&drv_data->lock, flags);
1012                 return;
1013         }
1014
1015         /* Make sure we are not already running a message */
1016         if (drv_data->cur_msg) {
1017                 spin_unlock_irqrestore(&drv_data->lock, flags);
1018                 return;
1019         }
1020
1021         /* Extract head of queue */
1022         drv_data->cur_msg = list_entry(drv_data->queue.next,
1023                                         struct spi_message, queue);
1024         list_del_init(&drv_data->cur_msg->queue);
1025
1026         /* Initial message state*/
1027         drv_data->cur_msg->state = START_STATE;
1028         drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
1029                                                 struct spi_transfer,
1030                                                 transfer_list);
1031
1032         /* prepare to setup the SSP, in pump_transfers, using the per
1033          * chip configuration */
1034         drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
1035
1036         /* Mark as busy and launch transfers */
1037         tasklet_schedule(&drv_data->pump_transfers);
1038
1039         drv_data->busy = 1;
1040         spin_unlock_irqrestore(&drv_data->lock, flags);
1041 }
1042
1043 static int transfer(struct spi_device *spi, struct spi_message *msg)
1044 {
1045         struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1046         unsigned long flags;
1047
1048         spin_lock_irqsave(&drv_data->lock, flags);
1049
1050         if (drv_data->run == QUEUE_STOPPED) {
1051                 spin_unlock_irqrestore(&drv_data->lock, flags);
1052                 return -ESHUTDOWN;
1053         }
1054
1055         msg->actual_length = 0;
1056         msg->status = -EINPROGRESS;
1057         msg->state = START_STATE;
1058
1059         list_add_tail(&msg->queue, &drv_data->queue);
1060
1061         if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
1062                 queue_work(drv_data->workqueue, &drv_data->pump_messages);
1063
1064         spin_unlock_irqrestore(&drv_data->lock, flags);
1065
1066         return 0;
1067 }
1068
1069 /* the spi->mode bits understood by this driver: */
1070 #define MODEBITS (SPI_CPOL | SPI_CPHA)
1071
1072 static int setup(struct spi_device *spi)
1073 {
1074         struct pxa2xx_spi_chip *chip_info = NULL;
1075         struct chip_data *chip;
1076         struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1077         unsigned int clk_div;
1078
1079         if (!spi->bits_per_word)
1080                 spi->bits_per_word = 8;
1081
1082         if (drv_data->ssp_type != PXA25x_SSP
1083                 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
1084                 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1085                                 "b/w not 4-32 for type non-PXA25x_SSP\n",
1086                                 drv_data->ssp_type, spi->bits_per_word);
1087                 return -EINVAL;
1088         }
1089         else if (drv_data->ssp_type == PXA25x_SSP
1090                         && (spi->bits_per_word < 4
1091                                 || spi->bits_per_word > 16)) {
1092                 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1093                                 "b/w not 4-16 for type PXA25x_SSP\n",
1094                                 drv_data->ssp_type, spi->bits_per_word);
1095                 return -EINVAL;
1096         }
1097
1098         if (spi->mode & ~MODEBITS) {
1099                 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
1100                         spi->mode & ~MODEBITS);
1101                 return -EINVAL;
1102         }
1103
1104         /* Only alloc on first setup */
1105         chip = spi_get_ctldata(spi);
1106         if (!chip) {
1107                 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1108                 if (!chip) {
1109                         dev_err(&spi->dev,
1110                                 "failed setup: can't allocate chip data\n");
1111                         return -ENOMEM;
1112                 }
1113
1114                 chip->cs_control = null_cs_control;
1115                 chip->enable_dma = 0;
1116                 chip->timeout = 1000;
1117                 chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1);
1118                 chip->dma_burst_size = drv_data->master_info->enable_dma ?
1119                                         DCMD_BURST8 : 0;
1120         }
1121
1122         /* protocol drivers may change the chip settings, so...
1123          * if chip_info exists, use it */
1124         chip_info = spi->controller_data;
1125
1126         /* chip_info isn't always needed */
1127         chip->cr1 = 0;
1128         if (chip_info) {
1129                 if (chip_info->cs_control)
1130                         chip->cs_control = chip_info->cs_control;
1131
1132                 chip->timeout = chip_info->timeout;
1133
1134                 chip->threshold = (SSCR1_RxTresh(chip_info->rx_threshold) &
1135                                                                 SSCR1_RFT) |
1136                                 (SSCR1_TxTresh(chip_info->tx_threshold) &
1137                                                                 SSCR1_TFT);
1138
1139                 chip->enable_dma = chip_info->dma_burst_size != 0
1140                                         && drv_data->master_info->enable_dma;
1141                 chip->dma_threshold = 0;
1142
1143                 if (chip_info->enable_loopback)
1144                         chip->cr1 = SSCR1_LBM;
1145         }
1146
1147         /* set dma burst and threshold outside of chip_info path so that if
1148          * chip_info goes away after setting chip->enable_dma, the
1149          * burst and threshold can still respond to changes in bits_per_word */
1150         if (chip->enable_dma) {
1151                 /* set up legal burst and threshold for dma */
1152                 if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word,
1153                                                 &chip->dma_burst_size,
1154                                                 &chip->dma_threshold)) {
1155                         dev_warn(&spi->dev, "in setup: DMA burst size reduced "
1156                                         "to match bits_per_word\n");
1157                 }
1158         }
1159
1160         if (drv_data->ioaddr == SSP1_VIRT)
1161                 clk_div = SSP1_SerClkDiv(spi->max_speed_hz);
1162         else if (drv_data->ioaddr == SSP2_VIRT)
1163                 clk_div = SSP2_SerClkDiv(spi->max_speed_hz);
1164         else if (drv_data->ioaddr == SSP3_VIRT)
1165                 clk_div = SSP3_SerClkDiv(spi->max_speed_hz);
1166         else
1167         {
1168                 dev_err(&spi->dev, "failed setup: unknown IO address=0x%p\n",
1169                         drv_data->ioaddr);
1170                 return -ENODEV;
1171         }
1172         chip->speed_hz = spi->max_speed_hz;
1173
1174         chip->cr0 = clk_div
1175                         | SSCR0_Motorola
1176                         | SSCR0_DataSize(spi->bits_per_word > 16 ?
1177                                 spi->bits_per_word - 16 : spi->bits_per_word)
1178                         | SSCR0_SSE
1179                         | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
1180         chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
1181         chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
1182                         | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1183
1184         /* NOTE:  PXA25x_SSP _could_ use external clocking ... */
1185         if (drv_data->ssp_type != PXA25x_SSP)
1186                 dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
1187                                 spi->bits_per_word,
1188                                 (CLOCK_SPEED_HZ)
1189                                         / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
1190                                 spi->mode & 0x3);
1191         else
1192                 dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
1193                                 spi->bits_per_word,
1194                                 (CLOCK_SPEED_HZ/2)
1195                                         / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
1196                                 spi->mode & 0x3);
1197
1198         if (spi->bits_per_word <= 8) {
1199                 chip->n_bytes = 1;
1200                 chip->dma_width = DCMD_WIDTH1;
1201                 chip->read = u8_reader;
1202                 chip->write = u8_writer;
1203         } else if (spi->bits_per_word <= 16) {
1204                 chip->n_bytes = 2;
1205                 chip->dma_width = DCMD_WIDTH2;
1206                 chip->read = u16_reader;
1207                 chip->write = u16_writer;
1208         } else if (spi->bits_per_word <= 32) {
1209                 chip->cr0 |= SSCR0_EDSS;
1210                 chip->n_bytes = 4;
1211                 chip->dma_width = DCMD_WIDTH4;
1212                 chip->read = u32_reader;
1213                 chip->write = u32_writer;
1214         } else {
1215                 dev_err(&spi->dev, "invalid wordsize\n");
1216                 return -ENODEV;
1217         }
1218         chip->bits_per_word = spi->bits_per_word;
1219
1220         spi_set_ctldata(spi, chip);
1221
1222         return 0;
1223 }
1224
1225 static void cleanup(struct spi_device *spi)
1226 {
1227         struct chip_data *chip = spi_get_ctldata(spi);
1228
1229         kfree(chip);
1230 }
1231
1232 static int __init init_queue(struct driver_data *drv_data)
1233 {
1234         INIT_LIST_HEAD(&drv_data->queue);
1235         spin_lock_init(&drv_data->lock);
1236
1237         drv_data->run = QUEUE_STOPPED;
1238         drv_data->busy = 0;
1239
1240         tasklet_init(&drv_data->pump_transfers,
1241                         pump_transfers, (unsigned long)drv_data);
1242
1243         INIT_WORK(&drv_data->pump_messages, pump_messages);
1244         drv_data->workqueue = create_singlethread_workqueue(
1245                                         drv_data->master->dev.parent->bus_id);
1246         if (drv_data->workqueue == NULL)
1247                 return -EBUSY;
1248
1249         return 0;
1250 }
1251
1252 static int start_queue(struct driver_data *drv_data)
1253 {
1254         unsigned long flags;
1255
1256         spin_lock_irqsave(&drv_data->lock, flags);
1257
1258         if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1259                 spin_unlock_irqrestore(&drv_data->lock, flags);
1260                 return -EBUSY;
1261         }
1262
1263         drv_data->run = QUEUE_RUNNING;
1264         drv_data->cur_msg = NULL;
1265         drv_data->cur_transfer = NULL;
1266         drv_data->cur_chip = NULL;
1267         spin_unlock_irqrestore(&drv_data->lock, flags);
1268
1269         queue_work(drv_data->workqueue, &drv_data->pump_messages);
1270
1271         return 0;
1272 }
1273
1274 static int stop_queue(struct driver_data *drv_data)
1275 {
1276         unsigned long flags;
1277         unsigned limit = 500;
1278         int status = 0;
1279
1280         spin_lock_irqsave(&drv_data->lock, flags);
1281
1282         /* This is a bit lame, but is optimized for the common execution path.
1283          * A wait_queue on the drv_data->busy could be used, but then the common
1284          * execution path (pump_messages) would be required to call wake_up or
1285          * friends on every SPI message. Do this instead */
1286         drv_data->run = QUEUE_STOPPED;
1287         while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1288                 spin_unlock_irqrestore(&drv_data->lock, flags);
1289                 msleep(10);
1290                 spin_lock_irqsave(&drv_data->lock, flags);
1291         }
1292
1293         if (!list_empty(&drv_data->queue) || drv_data->busy)
1294                 status = -EBUSY;
1295
1296         spin_unlock_irqrestore(&drv_data->lock, flags);
1297
1298         return status;
1299 }
1300
1301 static int destroy_queue(struct driver_data *drv_data)
1302 {
1303         int status;
1304
1305         status = stop_queue(drv_data);
1306         /* we are unloading the module or failing to load (only two calls
1307          * to this routine), and neither call can handle a return value.
1308          * However, destroy_workqueue calls flush_workqueue, and that will
1309          * block until all work is done.  If the reason that stop_queue
1310          * timed out is that the work will never finish, then it does no
1311          * good to call destroy_workqueue, so return anyway. */
1312         if (status != 0)
1313                 return status;
1314
1315         destroy_workqueue(drv_data->workqueue);
1316
1317         return 0;
1318 }
1319
1320 static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1321 {
1322         struct device *dev = &pdev->dev;
1323         struct pxa2xx_spi_master *platform_info;
1324         struct spi_master *master;
1325         struct driver_data *drv_data = 0;
1326         struct resource *memory_resource;
1327         int irq;
1328         int status = 0;
1329
1330         platform_info = dev->platform_data;
1331
1332         if (platform_info->ssp_type == SSP_UNDEFINED) {
1333                 dev_err(&pdev->dev, "undefined SSP\n");
1334                 return -ENODEV;
1335         }
1336
1337         /* Allocate master with space for drv_data and null dma buffer */
1338         master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1339         if (!master) {
1340                 dev_err(&pdev->dev, "can not alloc spi_master\n");
1341                 return -ENOMEM;
1342         }
1343         drv_data = spi_master_get_devdata(master);
1344         drv_data->master = master;
1345         drv_data->master_info = platform_info;
1346         drv_data->pdev = pdev;
1347
1348         master->bus_num = pdev->id;
1349         master->num_chipselect = platform_info->num_chipselect;
1350         master->cleanup = cleanup;
1351         master->setup = setup;
1352         master->transfer = transfer;
1353
1354         drv_data->ssp_type = platform_info->ssp_type;
1355         drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
1356                                                 sizeof(struct driver_data)), 8);
1357
1358         /* Setup register addresses */
1359         memory_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1360         if (!memory_resource) {
1361                 dev_err(&pdev->dev, "memory resources not defined\n");
1362                 status = -ENODEV;
1363                 goto out_error_master_alloc;
1364         }
1365
1366         drv_data->ioaddr = (void *)io_p2v((unsigned long)(memory_resource->start));
1367         drv_data->ssdr_physical = memory_resource->start + 0x00000010;
1368         if (platform_info->ssp_type == PXA25x_SSP) {
1369                 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
1370                 drv_data->dma_cr1 = 0;
1371                 drv_data->clear_sr = SSSR_ROR;
1372                 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
1373         } else {
1374                 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1375                 drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
1376                 drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
1377                 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
1378         }
1379
1380         /* Attach to IRQ */
1381         irq = platform_get_irq(pdev, 0);
1382         if (irq < 0) {
1383                 dev_err(&pdev->dev, "irq resource not defined\n");
1384                 status = -ENODEV;
1385                 goto out_error_master_alloc;
1386         }
1387
1388         status = request_irq(irq, ssp_int, 0, dev->bus_id, drv_data);
1389         if (status < 0) {
1390                 dev_err(&pdev->dev, "can not get IRQ\n");
1391                 goto out_error_master_alloc;
1392         }
1393
1394         /* Setup DMA if requested */
1395         drv_data->tx_channel = -1;
1396         drv_data->rx_channel = -1;
1397         if (platform_info->enable_dma) {
1398
1399                 /* Get two DMA channels (rx and tx) */
1400                 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
1401                                                         DMA_PRIO_HIGH,
1402                                                         dma_handler,
1403                                                         drv_data);
1404                 if (drv_data->rx_channel < 0) {
1405                         dev_err(dev, "problem (%d) requesting rx channel\n",
1406                                 drv_data->rx_channel);
1407                         status = -ENODEV;
1408                         goto out_error_irq_alloc;
1409                 }
1410                 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
1411                                                         DMA_PRIO_MEDIUM,
1412                                                         dma_handler,
1413                                                         drv_data);
1414                 if (drv_data->tx_channel < 0) {
1415                         dev_err(dev, "problem (%d) requesting tx channel\n",
1416                                 drv_data->tx_channel);
1417                         status = -ENODEV;
1418                         goto out_error_dma_alloc;
1419                 }
1420
1421                 if (drv_data->ioaddr == SSP1_VIRT) {
1422                                 DRCMRRXSSDR = DRCMR_MAPVLD
1423                                                 | drv_data->rx_channel;
1424                                 DRCMRTXSSDR = DRCMR_MAPVLD
1425                                                 | drv_data->tx_channel;
1426                 } else if (drv_data->ioaddr == SSP2_VIRT) {
1427                                 DRCMRRXSS2DR = DRCMR_MAPVLD
1428                                                 | drv_data->rx_channel;
1429                                 DRCMRTXSS2DR = DRCMR_MAPVLD
1430                                                 | drv_data->tx_channel;
1431                 } else if (drv_data->ioaddr == SSP3_VIRT) {
1432                                 DRCMRRXSS3DR = DRCMR_MAPVLD
1433                                                 | drv_data->rx_channel;
1434                                 DRCMRTXSS3DR = DRCMR_MAPVLD
1435                                                 | drv_data->tx_channel;
1436                 } else {
1437                         dev_err(dev, "bad SSP type\n");
1438                         goto out_error_dma_alloc;
1439                 }
1440         }
1441
1442         /* Enable SOC clock */
1443         pxa_set_cken(platform_info->clock_enable, 1);
1444
1445         /* Load default SSP configuration */
1446         write_SSCR0(0, drv_data->ioaddr);
1447         write_SSCR1(SSCR1_RxTresh(4) | SSCR1_TxTresh(12), drv_data->ioaddr);
1448         write_SSCR0(SSCR0_SerClkDiv(2)
1449                         | SSCR0_Motorola
1450                         | SSCR0_DataSize(8),
1451                         drv_data->ioaddr);
1452         if (drv_data->ssp_type != PXA25x_SSP)
1453                 write_SSTO(0, drv_data->ioaddr);
1454         write_SSPSP(0, drv_data->ioaddr);
1455
1456         /* Initial and start queue */
1457         status = init_queue(drv_data);
1458         if (status != 0) {
1459                 dev_err(&pdev->dev, "problem initializing queue\n");
1460                 goto out_error_clock_enabled;
1461         }
1462         status = start_queue(drv_data);
1463         if (status != 0) {
1464                 dev_err(&pdev->dev, "problem starting queue\n");
1465                 goto out_error_clock_enabled;
1466         }
1467
1468         /* Register with the SPI framework */
1469         platform_set_drvdata(pdev, drv_data);
1470         status = spi_register_master(master);
1471         if (status != 0) {
1472                 dev_err(&pdev->dev, "problem registering spi master\n");
1473                 goto out_error_queue_alloc;
1474         }
1475
1476         return status;
1477
1478 out_error_queue_alloc:
1479         destroy_queue(drv_data);
1480
1481 out_error_clock_enabled:
1482         pxa_set_cken(platform_info->clock_enable, 0);
1483
1484 out_error_dma_alloc:
1485         if (drv_data->tx_channel != -1)
1486                 pxa_free_dma(drv_data->tx_channel);
1487         if (drv_data->rx_channel != -1)
1488                 pxa_free_dma(drv_data->rx_channel);
1489
1490 out_error_irq_alloc:
1491         free_irq(irq, drv_data);
1492
1493 out_error_master_alloc:
1494         spi_master_put(master);
1495         return status;
1496 }
1497
1498 static int pxa2xx_spi_remove(struct platform_device *pdev)
1499 {
1500         struct driver_data *drv_data = platform_get_drvdata(pdev);
1501         int irq;
1502         int status = 0;
1503
1504         if (!drv_data)
1505                 return 0;
1506
1507         /* Remove the queue */
1508         status = destroy_queue(drv_data);
1509         if (status != 0)
1510                 /* the kernel does not check the return status of this
1511                  * this routine (mod->exit, within the kernel).  Therefore
1512                  * nothing is gained by returning from here, the module is
1513                  * going away regardless, and we should not leave any more
1514                  * resources allocated than necessary.  We cannot free the
1515                  * message memory in drv_data->queue, but we can release the
1516                  * resources below.  I think the kernel should honor -EBUSY
1517                  * returns but... */
1518                 dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not "
1519                         "complete, message memory not freed\n");
1520
1521         /* Disable the SSP at the peripheral and SOC level */
1522         write_SSCR0(0, drv_data->ioaddr);
1523         pxa_set_cken(drv_data->master_info->clock_enable, 0);
1524
1525         /* Release DMA */
1526         if (drv_data->master_info->enable_dma) {
1527                 if (drv_data->ioaddr == SSP1_VIRT) {
1528                         DRCMRRXSSDR = 0;
1529                         DRCMRTXSSDR = 0;
1530                 } else if (drv_data->ioaddr == SSP2_VIRT) {
1531                         DRCMRRXSS2DR = 0;
1532                         DRCMRTXSS2DR = 0;
1533                 } else if (drv_data->ioaddr == SSP3_VIRT) {
1534                         DRCMRRXSS3DR = 0;
1535                         DRCMRTXSS3DR = 0;
1536                 }
1537                 pxa_free_dma(drv_data->tx_channel);
1538                 pxa_free_dma(drv_data->rx_channel);
1539         }
1540
1541         /* Release IRQ */
1542         irq = platform_get_irq(pdev, 0);
1543         if (irq >= 0)
1544                 free_irq(irq, drv_data);
1545
1546         /* Disconnect from the SPI framework */
1547         spi_unregister_master(drv_data->master);
1548
1549         /* Prevent double remove */
1550         platform_set_drvdata(pdev, NULL);
1551
1552         return 0;
1553 }
1554
1555 static void pxa2xx_spi_shutdown(struct platform_device *pdev)
1556 {
1557         int status = 0;
1558
1559         if ((status = pxa2xx_spi_remove(pdev)) != 0)
1560                 dev_err(&pdev->dev, "shutdown failed with %d\n", status);
1561 }
1562
1563 #ifdef CONFIG_PM
1564 static int suspend_devices(struct device *dev, void *pm_message)
1565 {
1566         pm_message_t *state = pm_message;
1567
1568         if (dev->power.power_state.event != state->event) {
1569                 dev_warn(dev, "pm state does not match request\n");
1570                 return -1;
1571         }
1572
1573         return 0;
1574 }
1575
1576 static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1577 {
1578         struct driver_data *drv_data = platform_get_drvdata(pdev);
1579         int status = 0;
1580
1581         /* Check all childern for current power state */
1582         if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) {
1583                 dev_warn(&pdev->dev, "suspend aborted\n");
1584                 return -1;
1585         }
1586
1587         status = stop_queue(drv_data);
1588         if (status != 0)
1589                 return status;
1590         write_SSCR0(0, drv_data->ioaddr);
1591         pxa_set_cken(drv_data->master_info->clock_enable, 0);
1592
1593         return 0;
1594 }
1595
1596 static int pxa2xx_spi_resume(struct platform_device *pdev)
1597 {
1598         struct driver_data *drv_data = platform_get_drvdata(pdev);
1599         int status = 0;
1600
1601         /* Enable the SSP clock */
1602         pxa_set_cken(drv_data->master_info->clock_enable, 1);
1603
1604         /* Start the queue running */
1605         status = start_queue(drv_data);
1606         if (status != 0) {
1607                 dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
1608                 return status;
1609         }
1610
1611         return 0;
1612 }
1613 #else
1614 #define pxa2xx_spi_suspend NULL
1615 #define pxa2xx_spi_resume NULL
1616 #endif /* CONFIG_PM */
1617
1618 static struct platform_driver driver = {
1619         .driver = {
1620                 .name = "pxa2xx-spi",
1621                 .bus = &platform_bus_type,
1622                 .owner = THIS_MODULE,
1623         },
1624         .remove = pxa2xx_spi_remove,
1625         .shutdown = pxa2xx_spi_shutdown,
1626         .suspend = pxa2xx_spi_suspend,
1627         .resume = pxa2xx_spi_resume,
1628 };
1629
1630 static int __init pxa2xx_spi_init(void)
1631 {
1632         return platform_driver_probe(&driver, pxa2xx_spi_probe);
1633 }
1634 module_init(pxa2xx_spi_init);
1635
1636 static void __exit pxa2xx_spi_exit(void)
1637 {
1638         platform_driver_unregister(&driver);
1639 }
1640 module_exit(pxa2xx_spi_exit);