Merge git://git.infradead.org/battery-2.6
[sfrench/cifs-2.6.git] / arch / arm / mach-tegra / dma.c
1 /*
2  * arch/arm/mach-tegra/dma.c
3  *
4  * System DMA driver for NVIDIA Tegra SoCs
5  *
6  * Copyright (c) 2008-2009, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/io.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/err.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <linux/clk.h>
31 #include <mach/dma.h>
32 #include <mach/irqs.h>
33 #include <mach/iomap.h>
34 #include <mach/suspend.h>
35
36 #define APB_DMA_GEN                             0x000
37 #define GEN_ENABLE                              (1<<31)
38
39 #define APB_DMA_CNTRL                           0x010
40
41 #define APB_DMA_IRQ_MASK                        0x01c
42
43 #define APB_DMA_IRQ_MASK_SET                    0x020
44
45 #define APB_DMA_CHAN_CSR                        0x000
46 #define CSR_ENB                                 (1<<31)
47 #define CSR_IE_EOC                              (1<<30)
48 #define CSR_HOLD                                (1<<29)
49 #define CSR_DIR                                 (1<<28)
50 #define CSR_ONCE                                (1<<27)
51 #define CSR_FLOW                                (1<<21)
52 #define CSR_REQ_SEL_SHIFT                       16
53 #define CSR_REQ_SEL_MASK                        (0x1F<<CSR_REQ_SEL_SHIFT)
54 #define CSR_REQ_SEL_INVALID                     (31<<CSR_REQ_SEL_SHIFT)
55 #define CSR_WCOUNT_SHIFT                        2
56 #define CSR_WCOUNT_MASK                         0xFFFC
57
58 #define APB_DMA_CHAN_STA                                0x004
59 #define STA_BUSY                                (1<<31)
60 #define STA_ISE_EOC                             (1<<30)
61 #define STA_HALT                                (1<<29)
62 #define STA_PING_PONG                           (1<<28)
63 #define STA_COUNT_SHIFT                         2
64 #define STA_COUNT_MASK                          0xFFFC
65
66 #define APB_DMA_CHAN_AHB_PTR                            0x010
67
68 #define APB_DMA_CHAN_AHB_SEQ                            0x014
69 #define AHB_SEQ_INTR_ENB                        (1<<31)
70 #define AHB_SEQ_BUS_WIDTH_SHIFT                 28
71 #define AHB_SEQ_BUS_WIDTH_MASK                  (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
72 #define AHB_SEQ_BUS_WIDTH_8                     (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
73 #define AHB_SEQ_BUS_WIDTH_16                    (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
74 #define AHB_SEQ_BUS_WIDTH_32                    (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
75 #define AHB_SEQ_BUS_WIDTH_64                    (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
76 #define AHB_SEQ_BUS_WIDTH_128                   (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
77 #define AHB_SEQ_DATA_SWAP                       (1<<27)
78 #define AHB_SEQ_BURST_MASK                      (0x7<<24)
79 #define AHB_SEQ_BURST_1                         (4<<24)
80 #define AHB_SEQ_BURST_4                         (5<<24)
81 #define AHB_SEQ_BURST_8                         (6<<24)
82 #define AHB_SEQ_DBL_BUF                         (1<<19)
83 #define AHB_SEQ_WRAP_SHIFT                      16
84 #define AHB_SEQ_WRAP_MASK                       (0x7<<AHB_SEQ_WRAP_SHIFT)
85
86 #define APB_DMA_CHAN_APB_PTR                            0x018
87
88 #define APB_DMA_CHAN_APB_SEQ                            0x01c
89 #define APB_SEQ_BUS_WIDTH_SHIFT                 28
90 #define APB_SEQ_BUS_WIDTH_MASK                  (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
91 #define APB_SEQ_BUS_WIDTH_8                     (0<<APB_SEQ_BUS_WIDTH_SHIFT)
92 #define APB_SEQ_BUS_WIDTH_16                    (1<<APB_SEQ_BUS_WIDTH_SHIFT)
93 #define APB_SEQ_BUS_WIDTH_32                    (2<<APB_SEQ_BUS_WIDTH_SHIFT)
94 #define APB_SEQ_BUS_WIDTH_64                    (3<<APB_SEQ_BUS_WIDTH_SHIFT)
95 #define APB_SEQ_BUS_WIDTH_128                   (4<<APB_SEQ_BUS_WIDTH_SHIFT)
96 #define APB_SEQ_DATA_SWAP                       (1<<27)
97 #define APB_SEQ_WRAP_SHIFT                      16
98 #define APB_SEQ_WRAP_MASK                       (0x7<<APB_SEQ_WRAP_SHIFT)
99
100 #define TEGRA_SYSTEM_DMA_CH_NR                  16
101 #define TEGRA_SYSTEM_DMA_AVP_CH_NUM             4
102 #define TEGRA_SYSTEM_DMA_CH_MIN                 0
103 #define TEGRA_SYSTEM_DMA_CH_MAX \
104         (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
105
106 #define NV_DMA_MAX_TRASFER_SIZE 0x10000
107
108 const unsigned int ahb_addr_wrap_table[8] = {
109         0, 32, 64, 128, 256, 512, 1024, 2048
110 };
111
112 const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
113
114 const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
115
116 #define TEGRA_DMA_NAME_SIZE 16
117 struct tegra_dma_channel {
118         struct list_head        list;
119         int                     id;
120         spinlock_t              lock;
121         char                    name[TEGRA_DMA_NAME_SIZE];
122         void  __iomem           *addr;
123         int                     mode;
124         int                     irq;
125         int                     req_transfer_count;
126 };
127
128 #define  NV_DMA_MAX_CHANNELS  32
129
130 static bool tegra_dma_initialized;
131 static DEFINE_MUTEX(tegra_dma_lock);
132
133 static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
134 static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
135
136 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
137         struct tegra_dma_req *req);
138 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
139         struct tegra_dma_req *req);
140 static void tegra_dma_stop(struct tegra_dma_channel *ch);
141
142 void tegra_dma_flush(struct tegra_dma_channel *ch)
143 {
144 }
145 EXPORT_SYMBOL(tegra_dma_flush);
146
147 void tegra_dma_dequeue(struct tegra_dma_channel *ch)
148 {
149         struct tegra_dma_req *req;
150
151         if (tegra_dma_is_empty(ch))
152                 return;
153
154         req = list_entry(ch->list.next, typeof(*req), node);
155
156         tegra_dma_dequeue_req(ch, req);
157         return;
158 }
159
160 void tegra_dma_stop(struct tegra_dma_channel *ch)
161 {
162         u32 csr;
163         u32 status;
164
165         csr = readl(ch->addr + APB_DMA_CHAN_CSR);
166         csr &= ~CSR_IE_EOC;
167         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
168
169         csr &= ~CSR_ENB;
170         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
171
172         status = readl(ch->addr + APB_DMA_CHAN_STA);
173         if (status & STA_ISE_EOC)
174                 writel(status, ch->addr + APB_DMA_CHAN_STA);
175 }
176
177 int tegra_dma_cancel(struct tegra_dma_channel *ch)
178 {
179         u32 csr;
180         unsigned long irq_flags;
181
182         spin_lock_irqsave(&ch->lock, irq_flags);
183         while (!list_empty(&ch->list))
184                 list_del(ch->list.next);
185
186         csr = readl(ch->addr + APB_DMA_CHAN_CSR);
187         csr &= ~CSR_REQ_SEL_MASK;
188         csr |= CSR_REQ_SEL_INVALID;
189         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
190
191         tegra_dma_stop(ch);
192
193         spin_unlock_irqrestore(&ch->lock, irq_flags);
194         return 0;
195 }
196
197 int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
198         struct tegra_dma_req *_req)
199 {
200         unsigned int csr;
201         unsigned int status;
202         struct tegra_dma_req *req = NULL;
203         int found = 0;
204         unsigned long irq_flags;
205         int to_transfer;
206         int req_transfer_count;
207
208         spin_lock_irqsave(&ch->lock, irq_flags);
209         list_for_each_entry(req, &ch->list, node) {
210                 if (req == _req) {
211                         list_del(&req->node);
212                         found = 1;
213                         break;
214                 }
215         }
216         if (!found) {
217                 spin_unlock_irqrestore(&ch->lock, irq_flags);
218                 return 0;
219         }
220
221         /* STOP the DMA and get the transfer count.
222          * Getting the transfer count is tricky.
223          *  - Change the source selector to invalid to stop the DMA from
224          *    FIFO to memory.
225          *  - Read the status register to know the number of pending
226          *    bytes to be transfered.
227          *  - Finally stop or program the DMA to the next buffer in the
228          *    list.
229          */
230         csr = readl(ch->addr + APB_DMA_CHAN_CSR);
231         csr &= ~CSR_REQ_SEL_MASK;
232         csr |= CSR_REQ_SEL_INVALID;
233         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
234
235         /* Get the transfer count */
236         status = readl(ch->addr + APB_DMA_CHAN_STA);
237         to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
238         req_transfer_count = ch->req_transfer_count;
239         req_transfer_count += 1;
240         to_transfer += 1;
241
242         req->bytes_transferred = req_transfer_count;
243
244         if (status & STA_BUSY)
245                 req->bytes_transferred -= to_transfer;
246
247         /* In continous transfer mode, DMA only tracks the count of the
248          * half DMA buffer. So, if the DMA already finished half the DMA
249          * then add the half buffer to the completed count.
250          *
251          *      FIXME: There can be a race here. What if the req to
252          *      dequue happens at the same time as the DMA just moved to
253          *      the new buffer and SW didn't yet received the interrupt?
254          */
255         if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
256                 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
257                         req->bytes_transferred += req_transfer_count;
258
259         req->bytes_transferred *= 4;
260
261         tegra_dma_stop(ch);
262         if (!list_empty(&ch->list)) {
263                 /* if the list is not empty, queue the next request */
264                 struct tegra_dma_req *next_req;
265                 next_req = list_entry(ch->list.next,
266                         typeof(*next_req), node);
267                 tegra_dma_update_hw(ch, next_req);
268         }
269         req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
270
271         spin_unlock_irqrestore(&ch->lock, irq_flags);
272
273         /* Callback should be called without any lock */
274         req->complete(req);
275         return 0;
276 }
277 EXPORT_SYMBOL(tegra_dma_dequeue_req);
278
279 bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
280 {
281         unsigned long irq_flags;
282         bool is_empty;
283
284         spin_lock_irqsave(&ch->lock, irq_flags);
285         if (list_empty(&ch->list))
286                 is_empty = true;
287         else
288                 is_empty = false;
289         spin_unlock_irqrestore(&ch->lock, irq_flags);
290         return is_empty;
291 }
292 EXPORT_SYMBOL(tegra_dma_is_empty);
293
294 bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
295         struct tegra_dma_req *_req)
296 {
297         unsigned long irq_flags;
298         struct tegra_dma_req *req;
299
300         spin_lock_irqsave(&ch->lock, irq_flags);
301         list_for_each_entry(req, &ch->list, node) {
302                 if (req == _req) {
303                         spin_unlock_irqrestore(&ch->lock, irq_flags);
304                         return true;
305                 }
306         }
307         spin_unlock_irqrestore(&ch->lock, irq_flags);
308         return false;
309 }
310 EXPORT_SYMBOL(tegra_dma_is_req_inflight);
311
312 int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
313         struct tegra_dma_req *req)
314 {
315         unsigned long irq_flags;
316         struct tegra_dma_req *_req;
317         int start_dma = 0;
318
319         if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
320                 req->source_addr & 0x3 || req->dest_addr & 0x3) {
321                 pr_err("Invalid DMA request for channel %d\n", ch->id);
322                 return -EINVAL;
323         }
324
325         spin_lock_irqsave(&ch->lock, irq_flags);
326
327         list_for_each_entry(_req, &ch->list, node) {
328                 if (req == _req) {
329                     spin_unlock_irqrestore(&ch->lock, irq_flags);
330                     return -EEXIST;
331                 }
332         }
333
334         req->bytes_transferred = 0;
335         req->status = 0;
336         req->buffer_status = 0;
337         if (list_empty(&ch->list))
338                 start_dma = 1;
339
340         list_add_tail(&req->node, &ch->list);
341
342         if (start_dma)
343                 tegra_dma_update_hw(ch, req);
344
345         spin_unlock_irqrestore(&ch->lock, irq_flags);
346
347         return 0;
348 }
349 EXPORT_SYMBOL(tegra_dma_enqueue_req);
350
351 struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
352 {
353         int channel;
354         struct tegra_dma_channel *ch = NULL;
355
356         if (WARN_ON(!tegra_dma_initialized))
357                 return NULL;
358
359         mutex_lock(&tegra_dma_lock);
360
361         /* first channel is the shared channel */
362         if (mode & TEGRA_DMA_SHARED) {
363                 channel = TEGRA_SYSTEM_DMA_CH_MIN;
364         } else {
365                 channel = find_first_zero_bit(channel_usage,
366                         ARRAY_SIZE(dma_channels));
367                 if (channel >= ARRAY_SIZE(dma_channels))
368                         goto out;
369         }
370         __set_bit(channel, channel_usage);
371         ch = &dma_channels[channel];
372         ch->mode = mode;
373
374 out:
375         mutex_unlock(&tegra_dma_lock);
376         return ch;
377 }
378 EXPORT_SYMBOL(tegra_dma_allocate_channel);
379
380 void tegra_dma_free_channel(struct tegra_dma_channel *ch)
381 {
382         if (ch->mode & TEGRA_DMA_SHARED)
383                 return;
384         tegra_dma_cancel(ch);
385         mutex_lock(&tegra_dma_lock);
386         __clear_bit(ch->id, channel_usage);
387         mutex_unlock(&tegra_dma_lock);
388 }
389 EXPORT_SYMBOL(tegra_dma_free_channel);
390
391 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
392         struct tegra_dma_req *req)
393 {
394         u32 apb_ptr;
395         u32 ahb_ptr;
396
397         if (req->to_memory) {
398                 apb_ptr = req->source_addr;
399                 ahb_ptr = req->dest_addr;
400         } else {
401                 apb_ptr = req->dest_addr;
402                 ahb_ptr = req->source_addr;
403         }
404         writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
405         writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
406
407         req->status = TEGRA_DMA_REQ_INFLIGHT;
408         return;
409 }
410
411 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
412         struct tegra_dma_req *req)
413 {
414         int ahb_addr_wrap;
415         int apb_addr_wrap;
416         int ahb_bus_width;
417         int apb_bus_width;
418         int index;
419
420         u32 ahb_seq;
421         u32 apb_seq;
422         u32 ahb_ptr;
423         u32 apb_ptr;
424         u32 csr;
425
426         csr = CSR_IE_EOC | CSR_FLOW;
427         ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
428         apb_seq = 0;
429
430         csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
431
432         /* One shot mode is always single buffered,
433          * continuous mode is always double buffered
434          * */
435         if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
436                 csr |= CSR_ONCE;
437                 ch->req_transfer_count = (req->size >> 2) - 1;
438         } else {
439                 ahb_seq |= AHB_SEQ_DBL_BUF;
440
441                 /* In double buffered mode, we set the size to half the
442                  * requested size and interrupt when half the buffer
443                  * is full */
444                 ch->req_transfer_count = (req->size >> 3) - 1;
445         }
446
447         csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
448
449         if (req->to_memory) {
450                 apb_ptr = req->source_addr;
451                 ahb_ptr = req->dest_addr;
452
453                 apb_addr_wrap = req->source_wrap;
454                 ahb_addr_wrap = req->dest_wrap;
455                 apb_bus_width = req->source_bus_width;
456                 ahb_bus_width = req->dest_bus_width;
457
458         } else {
459                 csr |= CSR_DIR;
460                 apb_ptr = req->dest_addr;
461                 ahb_ptr = req->source_addr;
462
463                 apb_addr_wrap = req->dest_wrap;
464                 ahb_addr_wrap = req->source_wrap;
465                 apb_bus_width = req->dest_bus_width;
466                 ahb_bus_width = req->source_bus_width;
467         }
468
469         apb_addr_wrap >>= 2;
470         ahb_addr_wrap >>= 2;
471
472         /* set address wrap for APB size */
473         index = 0;
474         do  {
475                 if (apb_addr_wrap_table[index] == apb_addr_wrap)
476                         break;
477                 index++;
478         } while (index < ARRAY_SIZE(apb_addr_wrap_table));
479         BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
480         apb_seq |= index << APB_SEQ_WRAP_SHIFT;
481
482         /* set address wrap for AHB size */
483         index = 0;
484         do  {
485                 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
486                         break;
487                 index++;
488         } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
489         BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
490         ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
491
492         for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
493                 if (bus_width_table[index] == ahb_bus_width)
494                         break;
495         }
496         BUG_ON(index == ARRAY_SIZE(bus_width_table));
497         ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
498
499         for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
500                 if (bus_width_table[index] == apb_bus_width)
501                         break;
502         }
503         BUG_ON(index == ARRAY_SIZE(bus_width_table));
504         apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
505
506         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
507         writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
508         writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
509         writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
510         writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
511
512         csr |= CSR_ENB;
513         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
514
515         req->status = TEGRA_DMA_REQ_INFLIGHT;
516 }
517
518 static void handle_oneshot_dma(struct tegra_dma_channel *ch)
519 {
520         struct tegra_dma_req *req;
521         unsigned long irq_flags;
522
523         spin_lock_irqsave(&ch->lock, irq_flags);
524         if (list_empty(&ch->list)) {
525                 spin_unlock_irqrestore(&ch->lock, irq_flags);
526                 return;
527         }
528
529         req = list_entry(ch->list.next, typeof(*req), node);
530         if (req) {
531                 int bytes_transferred;
532
533                 bytes_transferred = ch->req_transfer_count;
534                 bytes_transferred += 1;
535                 bytes_transferred <<= 2;
536
537                 list_del(&req->node);
538                 req->bytes_transferred = bytes_transferred;
539                 req->status = TEGRA_DMA_REQ_SUCCESS;
540
541                 spin_unlock_irqrestore(&ch->lock, irq_flags);
542                 /* Callback should be called without any lock */
543                 pr_debug("%s: transferred %d bytes\n", __func__,
544                         req->bytes_transferred);
545                 req->complete(req);
546                 spin_lock_irqsave(&ch->lock, irq_flags);
547         }
548
549         if (!list_empty(&ch->list)) {
550                 req = list_entry(ch->list.next, typeof(*req), node);
551                 /* the complete function we just called may have enqueued
552                    another req, in which case dma has already started */
553                 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
554                         tegra_dma_update_hw(ch, req);
555         }
556         spin_unlock_irqrestore(&ch->lock, irq_flags);
557 }
558
559 static void handle_continuous_dma(struct tegra_dma_channel *ch)
560 {
561         struct tegra_dma_req *req;
562         unsigned long irq_flags;
563
564         spin_lock_irqsave(&ch->lock, irq_flags);
565         if (list_empty(&ch->list)) {
566                 spin_unlock_irqrestore(&ch->lock, irq_flags);
567                 return;
568         }
569
570         req = list_entry(ch->list.next, typeof(*req), node);
571         if (req) {
572                 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
573                         bool is_dma_ping_complete;
574                         is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
575                                                 & STA_PING_PONG) ? true : false;
576                         if (req->to_memory)
577                                 is_dma_ping_complete = !is_dma_ping_complete;
578                         /* Out of sync - Release current buffer */
579                         if (!is_dma_ping_complete) {
580                                 int bytes_transferred;
581
582                                 bytes_transferred = ch->req_transfer_count;
583                                 bytes_transferred += 1;
584                                 bytes_transferred <<= 3;
585                                 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
586                                 req->bytes_transferred = bytes_transferred;
587                                 req->status = TEGRA_DMA_REQ_SUCCESS;
588                                 tegra_dma_stop(ch);
589
590                                 if (!list_is_last(&req->node, &ch->list)) {
591                                         struct tegra_dma_req *next_req;
592
593                                         next_req = list_entry(req->node.next,
594                                                 typeof(*next_req), node);
595                                         tegra_dma_update_hw(ch, next_req);
596                                 }
597
598                                 list_del(&req->node);
599
600                                 /* DMA lock is NOT held when callbak is called */
601                                 spin_unlock_irqrestore(&ch->lock, irq_flags);
602                                 req->complete(req);
603                                 return;
604                         }
605                         /* Load the next request into the hardware, if available
606                          * */
607                         if (!list_is_last(&req->node, &ch->list)) {
608                                 struct tegra_dma_req *next_req;
609
610                                 next_req = list_entry(req->node.next,
611                                         typeof(*next_req), node);
612                                 tegra_dma_update_hw_partial(ch, next_req);
613                         }
614                         req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
615                         req->status = TEGRA_DMA_REQ_SUCCESS;
616                         /* DMA lock is NOT held when callback is called */
617                         spin_unlock_irqrestore(&ch->lock, irq_flags);
618                         if (likely(req->threshold))
619                                 req->threshold(req);
620                         return;
621
622                 } else if (req->buffer_status ==
623                         TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
624                         /* Callback when the buffer is completely full (i.e on
625                          * the second  interrupt */
626                         int bytes_transferred;
627
628                         bytes_transferred = ch->req_transfer_count;
629                         bytes_transferred += 1;
630                         bytes_transferred <<= 3;
631
632                         req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
633                         req->bytes_transferred = bytes_transferred;
634                         req->status = TEGRA_DMA_REQ_SUCCESS;
635                         list_del(&req->node);
636
637                         /* DMA lock is NOT held when callbak is called */
638                         spin_unlock_irqrestore(&ch->lock, irq_flags);
639                         req->complete(req);
640                         return;
641
642                 } else {
643                         BUG();
644                 }
645         }
646         spin_unlock_irqrestore(&ch->lock, irq_flags);
647 }
648
649 static irqreturn_t dma_isr(int irq, void *data)
650 {
651         struct tegra_dma_channel *ch = data;
652         unsigned long status;
653
654         status = readl(ch->addr + APB_DMA_CHAN_STA);
655         if (status & STA_ISE_EOC)
656                 writel(status, ch->addr + APB_DMA_CHAN_STA);
657         else {
658                 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
659                 return IRQ_HANDLED;
660         }
661         return IRQ_WAKE_THREAD;
662 }
663
664 static irqreturn_t dma_thread_fn(int irq, void *data)
665 {
666         struct tegra_dma_channel *ch = data;
667
668         if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
669                 handle_oneshot_dma(ch);
670         else
671                 handle_continuous_dma(ch);
672
673
674         return IRQ_HANDLED;
675 }
676
677 int __init tegra_dma_init(void)
678 {
679         int ret = 0;
680         int i;
681         unsigned int irq;
682         void __iomem *addr;
683         struct clk *c;
684
685         bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
686
687         c = clk_get_sys("tegra-dma", NULL);
688         if (IS_ERR(c)) {
689                 pr_err("Unable to get clock for APB DMA\n");
690                 ret = PTR_ERR(c);
691                 goto fail;
692         }
693         ret = clk_enable(c);
694         if (ret != 0) {
695                 pr_err("Unable to enable clock for APB DMA\n");
696                 goto fail;
697         }
698
699         addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
700         writel(GEN_ENABLE, addr + APB_DMA_GEN);
701         writel(0, addr + APB_DMA_CNTRL);
702         writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
703                addr + APB_DMA_IRQ_MASK_SET);
704
705         for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
706                 struct tegra_dma_channel *ch = &dma_channels[i];
707
708                 ch->id = i;
709                 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
710
711                 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
712                         TEGRA_APB_DMA_CH0_SIZE * i);
713
714                 spin_lock_init(&ch->lock);
715                 INIT_LIST_HEAD(&ch->list);
716
717                 irq = INT_APB_DMA_CH0 + i;
718                 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
719                         dma_channels[i].name, ch);
720                 if (ret) {
721                         pr_err("Failed to register IRQ %d for DMA %d\n",
722                                 irq, i);
723                         goto fail;
724                 }
725                 ch->irq = irq;
726
727                 __clear_bit(i, channel_usage);
728         }
729         /* mark the shared channel allocated */
730         __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
731
732         tegra_dma_initialized = true;
733
734         return 0;
735 fail:
736         writel(0, addr + APB_DMA_GEN);
737         for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
738                 struct tegra_dma_channel *ch = &dma_channels[i];
739                 if (ch->irq)
740                         free_irq(ch->irq, ch);
741         }
742         return ret;
743 }
744 postcore_initcall(tegra_dma_init);
745
746 #ifdef CONFIG_PM
747 static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
748
749 void tegra_dma_suspend(void)
750 {
751         void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
752         u32 *ctx = apb_dma;
753         int i;
754
755         *ctx++ = readl(addr + APB_DMA_GEN);
756         *ctx++ = readl(addr + APB_DMA_CNTRL);
757         *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
758
759         for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
760                 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
761                                   TEGRA_APB_DMA_CH0_SIZE * i);
762
763                 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
764                 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
765                 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
766                 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
767                 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
768         }
769 }
770
771 void tegra_dma_resume(void)
772 {
773         void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
774         u32 *ctx = apb_dma;
775         int i;
776
777         writel(*ctx++, addr + APB_DMA_GEN);
778         writel(*ctx++, addr + APB_DMA_CNTRL);
779         writel(*ctx++, addr + APB_DMA_IRQ_MASK);
780
781         for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
782                 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
783                                   TEGRA_APB_DMA_CH0_SIZE * i);
784
785                 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
786                 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
787                 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
788                 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
789                 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
790         }
791 }
792
793 #endif