Documentation: Docbook: Fix files location change of kernel/[hr]timer.c
[sfrench/cifs-2.6.git] / drivers / dma / pch_dma.c
1 /*
2  * Topcliff PCH DMA controller driver
3  * Copyright (c) 2010 Intel Corporation
4  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  */
19
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/pch_dma.h>
27
28 #include "dmaengine.h"
29
30 #define DRV_NAME "pch-dma"
31
32 #define DMA_CTL0_DISABLE                0x0
33 #define DMA_CTL0_SG                     0x1
34 #define DMA_CTL0_ONESHOT                0x2
35 #define DMA_CTL0_MODE_MASK_BITS         0x3
36 #define DMA_CTL0_DIR_SHIFT_BITS         2
37 #define DMA_CTL0_BITS_PER_CH            4
38
39 #define DMA_CTL2_START_SHIFT_BITS       8
40 #define DMA_CTL2_IRQ_ENABLE_MASK        ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
41
42 #define DMA_STATUS_IDLE                 0x0
43 #define DMA_STATUS_DESC_READ            0x1
44 #define DMA_STATUS_WAIT                 0x2
45 #define DMA_STATUS_ACCESS               0x3
46 #define DMA_STATUS_BITS_PER_CH          2
47 #define DMA_STATUS_MASK_BITS            0x3
48 #define DMA_STATUS_SHIFT_BITS           16
49 #define DMA_STATUS_IRQ(x)               (0x1 << (x))
50 #define DMA_STATUS0_ERR(x)              (0x1 << ((x) + 8))
51 #define DMA_STATUS2_ERR(x)              (0x1 << (x))
52
53 #define DMA_DESC_WIDTH_SHIFT_BITS       12
54 #define DMA_DESC_WIDTH_1_BYTE           (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
55 #define DMA_DESC_WIDTH_2_BYTES          (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
56 #define DMA_DESC_WIDTH_4_BYTES          (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
57 #define DMA_DESC_MAX_COUNT_1_BYTE       0x3FF
58 #define DMA_DESC_MAX_COUNT_2_BYTES      0x3FF
59 #define DMA_DESC_MAX_COUNT_4_BYTES      0x7FF
60 #define DMA_DESC_END_WITHOUT_IRQ        0x0
61 #define DMA_DESC_END_WITH_IRQ           0x1
62 #define DMA_DESC_FOLLOW_WITHOUT_IRQ     0x2
63 #define DMA_DESC_FOLLOW_WITH_IRQ        0x3
64
65 #define MAX_CHAN_NR                     12
66
67 #define DMA_MASK_CTL0_MODE      0x33333333
68 #define DMA_MASK_CTL2_MODE      0x00003333
69
70 static unsigned int init_nr_desc_per_channel = 64;
71 module_param(init_nr_desc_per_channel, uint, 0644);
72 MODULE_PARM_DESC(init_nr_desc_per_channel,
73                  "initial descriptors per channel (default: 64)");
74
75 struct pch_dma_desc_regs {
76         u32     dev_addr;
77         u32     mem_addr;
78         u32     size;
79         u32     next;
80 };
81
82 struct pch_dma_regs {
83         u32     dma_ctl0;
84         u32     dma_ctl1;
85         u32     dma_ctl2;
86         u32     dma_ctl3;
87         u32     dma_sts0;
88         u32     dma_sts1;
89         u32     dma_sts2;
90         u32     reserved3;
91         struct pch_dma_desc_regs desc[MAX_CHAN_NR];
92 };
93
94 struct pch_dma_desc {
95         struct pch_dma_desc_regs regs;
96         struct dma_async_tx_descriptor txd;
97         struct list_head        desc_node;
98         struct list_head        tx_list;
99 };
100
101 struct pch_dma_chan {
102         struct dma_chan         chan;
103         void __iomem *membase;
104         enum dma_transfer_direction dir;
105         struct tasklet_struct   tasklet;
106         unsigned long           err_status;
107
108         spinlock_t              lock;
109
110         struct list_head        active_list;
111         struct list_head        queue;
112         struct list_head        free_list;
113         unsigned int            descs_allocated;
114 };
115
116 #define PDC_DEV_ADDR    0x00
117 #define PDC_MEM_ADDR    0x04
118 #define PDC_SIZE        0x08
119 #define PDC_NEXT        0x0C
120
121 #define channel_readl(pdc, name) \
122         readl((pdc)->membase + PDC_##name)
123 #define channel_writel(pdc, name, val) \
124         writel((val), (pdc)->membase + PDC_##name)
125
126 struct pch_dma {
127         struct dma_device       dma;
128         void __iomem *membase;
129         struct pci_pool         *pool;
130         struct pch_dma_regs     regs;
131         struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
132         struct pch_dma_chan     channels[MAX_CHAN_NR];
133 };
134
135 #define PCH_DMA_CTL0    0x00
136 #define PCH_DMA_CTL1    0x04
137 #define PCH_DMA_CTL2    0x08
138 #define PCH_DMA_CTL3    0x0C
139 #define PCH_DMA_STS0    0x10
140 #define PCH_DMA_STS1    0x14
141 #define PCH_DMA_STS2    0x18
142
143 #define dma_readl(pd, name) \
144         readl((pd)->membase + PCH_DMA_##name)
145 #define dma_writel(pd, name, val) \
146         writel((val), (pd)->membase + PCH_DMA_##name)
147
148 static inline
149 struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
150 {
151         return container_of(txd, struct pch_dma_desc, txd);
152 }
153
154 static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
155 {
156         return container_of(chan, struct pch_dma_chan, chan);
157 }
158
159 static inline struct pch_dma *to_pd(struct dma_device *ddev)
160 {
161         return container_of(ddev, struct pch_dma, dma);
162 }
163
164 static inline struct device *chan2dev(struct dma_chan *chan)
165 {
166         return &chan->dev->device;
167 }
168
169 static inline struct device *chan2parent(struct dma_chan *chan)
170 {
171         return chan->dev->device.parent;
172 }
173
174 static inline
175 struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
176 {
177         return list_first_entry(&pd_chan->active_list,
178                                 struct pch_dma_desc, desc_node);
179 }
180
181 static inline
182 struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
183 {
184         return list_first_entry(&pd_chan->queue,
185                                 struct pch_dma_desc, desc_node);
186 }
187
188 static void pdc_enable_irq(struct dma_chan *chan, int enable)
189 {
190         struct pch_dma *pd = to_pd(chan->device);
191         u32 val;
192         int pos;
193
194         if (chan->chan_id < 8)
195                 pos = chan->chan_id;
196         else
197                 pos = chan->chan_id + 8;
198
199         val = dma_readl(pd, CTL2);
200
201         if (enable)
202                 val |= 0x1 << pos;
203         else
204                 val &= ~(0x1 << pos);
205
206         dma_writel(pd, CTL2, val);
207
208         dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
209                 chan->chan_id, val);
210 }
211
212 static void pdc_set_dir(struct dma_chan *chan)
213 {
214         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
215         struct pch_dma *pd = to_pd(chan->device);
216         u32 val;
217         u32 mask_mode;
218         u32 mask_ctl;
219
220         if (chan->chan_id < 8) {
221                 val = dma_readl(pd, CTL0);
222
223                 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
224                                         (DMA_CTL0_BITS_PER_CH * chan->chan_id);
225                 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
226                                        (DMA_CTL0_BITS_PER_CH * chan->chan_id));
227                 val &= mask_mode;
228                 if (pd_chan->dir == DMA_MEM_TO_DEV)
229                         val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
230                                        DMA_CTL0_DIR_SHIFT_BITS);
231                 else
232                         val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
233                                          DMA_CTL0_DIR_SHIFT_BITS));
234
235                 val |= mask_ctl;
236                 dma_writel(pd, CTL0, val);
237         } else {
238                 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
239                 val = dma_readl(pd, CTL3);
240
241                 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
242                                                 (DMA_CTL0_BITS_PER_CH * ch);
243                 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
244                                                  (DMA_CTL0_BITS_PER_CH * ch));
245                 val &= mask_mode;
246                 if (pd_chan->dir == DMA_MEM_TO_DEV)
247                         val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
248                                        DMA_CTL0_DIR_SHIFT_BITS);
249                 else
250                         val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
251                                          DMA_CTL0_DIR_SHIFT_BITS));
252                 val |= mask_ctl;
253                 dma_writel(pd, CTL3, val);
254         }
255
256         dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
257                 chan->chan_id, val);
258 }
259
260 static void pdc_set_mode(struct dma_chan *chan, u32 mode)
261 {
262         struct pch_dma *pd = to_pd(chan->device);
263         u32 val;
264         u32 mask_ctl;
265         u32 mask_dir;
266
267         if (chan->chan_id < 8) {
268                 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
269                            (DMA_CTL0_BITS_PER_CH * chan->chan_id));
270                 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
271                                  DMA_CTL0_DIR_SHIFT_BITS);
272                 val = dma_readl(pd, CTL0);
273                 val &= mask_dir;
274                 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
275                 val |= mask_ctl;
276                 dma_writel(pd, CTL0, val);
277         } else {
278                 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
279                 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
280                                                  (DMA_CTL0_BITS_PER_CH * ch));
281                 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
282                                  DMA_CTL0_DIR_SHIFT_BITS);
283                 val = dma_readl(pd, CTL3);
284                 val &= mask_dir;
285                 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
286                 val |= mask_ctl;
287                 dma_writel(pd, CTL3, val);
288         }
289
290         dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
291                 chan->chan_id, val);
292 }
293
294 static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
295 {
296         struct pch_dma *pd = to_pd(pd_chan->chan.device);
297         u32 val;
298
299         val = dma_readl(pd, STS0);
300         return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
301                         DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
302 }
303
304 static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
305 {
306         struct pch_dma *pd = to_pd(pd_chan->chan.device);
307         u32 val;
308
309         val = dma_readl(pd, STS2);
310         return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
311                         DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
312 }
313
314 static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
315 {
316         u32 sts;
317
318         if (pd_chan->chan.chan_id < 8)
319                 sts = pdc_get_status0(pd_chan);
320         else
321                 sts = pdc_get_status2(pd_chan);
322
323
324         if (sts == DMA_STATUS_IDLE)
325                 return true;
326         else
327                 return false;
328 }
329
330 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
331 {
332         if (!pdc_is_idle(pd_chan)) {
333                 dev_err(chan2dev(&pd_chan->chan),
334                         "BUG: Attempt to start non-idle channel\n");
335                 return;
336         }
337
338         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
339                 pd_chan->chan.chan_id, desc->regs.dev_addr);
340         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
341                 pd_chan->chan.chan_id, desc->regs.mem_addr);
342         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
343                 pd_chan->chan.chan_id, desc->regs.size);
344         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
345                 pd_chan->chan.chan_id, desc->regs.next);
346
347         if (list_empty(&desc->tx_list)) {
348                 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
349                 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
350                 channel_writel(pd_chan, SIZE, desc->regs.size);
351                 channel_writel(pd_chan, NEXT, desc->regs.next);
352                 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
353         } else {
354                 channel_writel(pd_chan, NEXT, desc->txd.phys);
355                 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
356         }
357 }
358
359 static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
360                                struct pch_dma_desc *desc)
361 {
362         struct dma_async_tx_descriptor *txd = &desc->txd;
363         dma_async_tx_callback callback = txd->callback;
364         void *param = txd->callback_param;
365
366         list_splice_init(&desc->tx_list, &pd_chan->free_list);
367         list_move(&desc->desc_node, &pd_chan->free_list);
368
369         if (callback)
370                 callback(param);
371 }
372
373 static void pdc_complete_all(struct pch_dma_chan *pd_chan)
374 {
375         struct pch_dma_desc *desc, *_d;
376         LIST_HEAD(list);
377
378         BUG_ON(!pdc_is_idle(pd_chan));
379
380         if (!list_empty(&pd_chan->queue))
381                 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
382
383         list_splice_init(&pd_chan->active_list, &list);
384         list_splice_init(&pd_chan->queue, &pd_chan->active_list);
385
386         list_for_each_entry_safe(desc, _d, &list, desc_node)
387                 pdc_chain_complete(pd_chan, desc);
388 }
389
390 static void pdc_handle_error(struct pch_dma_chan *pd_chan)
391 {
392         struct pch_dma_desc *bad_desc;
393
394         bad_desc = pdc_first_active(pd_chan);
395         list_del(&bad_desc->desc_node);
396
397         list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
398
399         if (!list_empty(&pd_chan->active_list))
400                 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
401
402         dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
403         dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
404                  bad_desc->txd.cookie);
405
406         pdc_chain_complete(pd_chan, bad_desc);
407 }
408
409 static void pdc_advance_work(struct pch_dma_chan *pd_chan)
410 {
411         if (list_empty(&pd_chan->active_list) ||
412                 list_is_singular(&pd_chan->active_list)) {
413                 pdc_complete_all(pd_chan);
414         } else {
415                 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
416                 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
417         }
418 }
419
420 static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
421 {
422         struct pch_dma_desc *desc = to_pd_desc(txd);
423         struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
424         dma_cookie_t cookie;
425
426         spin_lock(&pd_chan->lock);
427         cookie = dma_cookie_assign(txd);
428
429         if (list_empty(&pd_chan->active_list)) {
430                 list_add_tail(&desc->desc_node, &pd_chan->active_list);
431                 pdc_dostart(pd_chan, desc);
432         } else {
433                 list_add_tail(&desc->desc_node, &pd_chan->queue);
434         }
435
436         spin_unlock(&pd_chan->lock);
437         return 0;
438 }
439
440 static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
441 {
442         struct pch_dma_desc *desc = NULL;
443         struct pch_dma *pd = to_pd(chan->device);
444         dma_addr_t addr;
445
446         desc = pci_pool_alloc(pd->pool, flags, &addr);
447         if (desc) {
448                 memset(desc, 0, sizeof(struct pch_dma_desc));
449                 INIT_LIST_HEAD(&desc->tx_list);
450                 dma_async_tx_descriptor_init(&desc->txd, chan);
451                 desc->txd.tx_submit = pd_tx_submit;
452                 desc->txd.flags = DMA_CTRL_ACK;
453                 desc->txd.phys = addr;
454         }
455
456         return desc;
457 }
458
459 static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
460 {
461         struct pch_dma_desc *desc, *_d;
462         struct pch_dma_desc *ret = NULL;
463         int i = 0;
464
465         spin_lock(&pd_chan->lock);
466         list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
467                 i++;
468                 if (async_tx_test_ack(&desc->txd)) {
469                         list_del(&desc->desc_node);
470                         ret = desc;
471                         break;
472                 }
473                 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
474         }
475         spin_unlock(&pd_chan->lock);
476         dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
477
478         if (!ret) {
479                 ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
480                 if (ret) {
481                         spin_lock(&pd_chan->lock);
482                         pd_chan->descs_allocated++;
483                         spin_unlock(&pd_chan->lock);
484                 } else {
485                         dev_err(chan2dev(&pd_chan->chan),
486                                 "failed to alloc desc\n");
487                 }
488         }
489
490         return ret;
491 }
492
493 static void pdc_desc_put(struct pch_dma_chan *pd_chan,
494                          struct pch_dma_desc *desc)
495 {
496         if (desc) {
497                 spin_lock(&pd_chan->lock);
498                 list_splice_init(&desc->tx_list, &pd_chan->free_list);
499                 list_add(&desc->desc_node, &pd_chan->free_list);
500                 spin_unlock(&pd_chan->lock);
501         }
502 }
503
504 static int pd_alloc_chan_resources(struct dma_chan *chan)
505 {
506         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
507         struct pch_dma_desc *desc;
508         LIST_HEAD(tmp_list);
509         int i;
510
511         if (!pdc_is_idle(pd_chan)) {
512                 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
513                 return -EIO;
514         }
515
516         if (!list_empty(&pd_chan->free_list))
517                 return pd_chan->descs_allocated;
518
519         for (i = 0; i < init_nr_desc_per_channel; i++) {
520                 desc = pdc_alloc_desc(chan, GFP_KERNEL);
521
522                 if (!desc) {
523                         dev_warn(chan2dev(chan),
524                                 "Only allocated %d initial descriptors\n", i);
525                         break;
526                 }
527
528                 list_add_tail(&desc->desc_node, &tmp_list);
529         }
530
531         spin_lock_irq(&pd_chan->lock);
532         list_splice(&tmp_list, &pd_chan->free_list);
533         pd_chan->descs_allocated = i;
534         dma_cookie_init(chan);
535         spin_unlock_irq(&pd_chan->lock);
536
537         pdc_enable_irq(chan, 1);
538
539         return pd_chan->descs_allocated;
540 }
541
542 static void pd_free_chan_resources(struct dma_chan *chan)
543 {
544         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
545         struct pch_dma *pd = to_pd(chan->device);
546         struct pch_dma_desc *desc, *_d;
547         LIST_HEAD(tmp_list);
548
549         BUG_ON(!pdc_is_idle(pd_chan));
550         BUG_ON(!list_empty(&pd_chan->active_list));
551         BUG_ON(!list_empty(&pd_chan->queue));
552
553         spin_lock_irq(&pd_chan->lock);
554         list_splice_init(&pd_chan->free_list, &tmp_list);
555         pd_chan->descs_allocated = 0;
556         spin_unlock_irq(&pd_chan->lock);
557
558         list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
559                 pci_pool_free(pd->pool, desc, desc->txd.phys);
560
561         pdc_enable_irq(chan, 0);
562 }
563
564 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
565                                     struct dma_tx_state *txstate)
566 {
567         return dma_cookie_status(chan, cookie, txstate);
568 }
569
570 static void pd_issue_pending(struct dma_chan *chan)
571 {
572         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
573
574         if (pdc_is_idle(pd_chan)) {
575                 spin_lock(&pd_chan->lock);
576                 pdc_advance_work(pd_chan);
577                 spin_unlock(&pd_chan->lock);
578         }
579 }
580
581 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
582                         struct scatterlist *sgl, unsigned int sg_len,
583                         enum dma_transfer_direction direction, unsigned long flags,
584                         void *context)
585 {
586         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
587         struct pch_dma_slave *pd_slave = chan->private;
588         struct pch_dma_desc *first = NULL;
589         struct pch_dma_desc *prev = NULL;
590         struct pch_dma_desc *desc = NULL;
591         struct scatterlist *sg;
592         dma_addr_t reg;
593         int i;
594
595         if (unlikely(!sg_len)) {
596                 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
597                 return NULL;
598         }
599
600         if (direction == DMA_DEV_TO_MEM)
601                 reg = pd_slave->rx_reg;
602         else if (direction == DMA_MEM_TO_DEV)
603                 reg = pd_slave->tx_reg;
604         else
605                 return NULL;
606
607         pd_chan->dir = direction;
608         pdc_set_dir(chan);
609
610         for_each_sg(sgl, sg, sg_len, i) {
611                 desc = pdc_desc_get(pd_chan);
612
613                 if (!desc)
614                         goto err_desc_get;
615
616                 desc->regs.dev_addr = reg;
617                 desc->regs.mem_addr = sg_dma_address(sg);
618                 desc->regs.size = sg_dma_len(sg);
619                 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
620
621                 switch (pd_slave->width) {
622                 case PCH_DMA_WIDTH_1_BYTE:
623                         if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
624                                 goto err_desc_get;
625                         desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
626                         break;
627                 case PCH_DMA_WIDTH_2_BYTES:
628                         if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
629                                 goto err_desc_get;
630                         desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
631                         break;
632                 case PCH_DMA_WIDTH_4_BYTES:
633                         if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
634                                 goto err_desc_get;
635                         desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
636                         break;
637                 default:
638                         goto err_desc_get;
639                 }
640
641                 if (!first) {
642                         first = desc;
643                 } else {
644                         prev->regs.next |= desc->txd.phys;
645                         list_add_tail(&desc->desc_node, &first->tx_list);
646                 }
647
648                 prev = desc;
649         }
650
651         if (flags & DMA_PREP_INTERRUPT)
652                 desc->regs.next = DMA_DESC_END_WITH_IRQ;
653         else
654                 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
655
656         first->txd.cookie = -EBUSY;
657         desc->txd.flags = flags;
658
659         return &first->txd;
660
661 err_desc_get:
662         dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
663         pdc_desc_put(pd_chan, first);
664         return NULL;
665 }
666
667 static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
668                              unsigned long arg)
669 {
670         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
671         struct pch_dma_desc *desc, *_d;
672         LIST_HEAD(list);
673
674         if (cmd != DMA_TERMINATE_ALL)
675                 return -ENXIO;
676
677         spin_lock_irq(&pd_chan->lock);
678
679         pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
680
681         list_splice_init(&pd_chan->active_list, &list);
682         list_splice_init(&pd_chan->queue, &list);
683
684         list_for_each_entry_safe(desc, _d, &list, desc_node)
685                 pdc_chain_complete(pd_chan, desc);
686
687         spin_unlock_irq(&pd_chan->lock);
688
689         return 0;
690 }
691
692 static void pdc_tasklet(unsigned long data)
693 {
694         struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
695         unsigned long flags;
696
697         if (!pdc_is_idle(pd_chan)) {
698                 dev_err(chan2dev(&pd_chan->chan),
699                         "BUG: handle non-idle channel in tasklet\n");
700                 return;
701         }
702
703         spin_lock_irqsave(&pd_chan->lock, flags);
704         if (test_and_clear_bit(0, &pd_chan->err_status))
705                 pdc_handle_error(pd_chan);
706         else
707                 pdc_advance_work(pd_chan);
708         spin_unlock_irqrestore(&pd_chan->lock, flags);
709 }
710
711 static irqreturn_t pd_irq(int irq, void *devid)
712 {
713         struct pch_dma *pd = (struct pch_dma *)devid;
714         struct pch_dma_chan *pd_chan;
715         u32 sts0;
716         u32 sts2;
717         int i;
718         int ret0 = IRQ_NONE;
719         int ret2 = IRQ_NONE;
720
721         sts0 = dma_readl(pd, STS0);
722         sts2 = dma_readl(pd, STS2);
723
724         dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
725
726         for (i = 0; i < pd->dma.chancnt; i++) {
727                 pd_chan = &pd->channels[i];
728
729                 if (i < 8) {
730                         if (sts0 & DMA_STATUS_IRQ(i)) {
731                                 if (sts0 & DMA_STATUS0_ERR(i))
732                                         set_bit(0, &pd_chan->err_status);
733
734                                 tasklet_schedule(&pd_chan->tasklet);
735                                 ret0 = IRQ_HANDLED;
736                         }
737                 } else {
738                         if (sts2 & DMA_STATUS_IRQ(i - 8)) {
739                                 if (sts2 & DMA_STATUS2_ERR(i))
740                                         set_bit(0, &pd_chan->err_status);
741
742                                 tasklet_schedule(&pd_chan->tasklet);
743                                 ret2 = IRQ_HANDLED;
744                         }
745                 }
746         }
747
748         /* clear interrupt bits in status register */
749         if (ret0)
750                 dma_writel(pd, STS0, sts0);
751         if (ret2)
752                 dma_writel(pd, STS2, sts2);
753
754         return ret0 | ret2;
755 }
756
757 #ifdef  CONFIG_PM
758 static void pch_dma_save_regs(struct pch_dma *pd)
759 {
760         struct pch_dma_chan *pd_chan;
761         struct dma_chan *chan, *_c;
762         int i = 0;
763
764         pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
765         pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
766         pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
767         pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
768
769         list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
770                 pd_chan = to_pd_chan(chan);
771
772                 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
773                 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
774                 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
775                 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
776
777                 i++;
778         }
779 }
780
781 static void pch_dma_restore_regs(struct pch_dma *pd)
782 {
783         struct pch_dma_chan *pd_chan;
784         struct dma_chan *chan, *_c;
785         int i = 0;
786
787         dma_writel(pd, CTL0, pd->regs.dma_ctl0);
788         dma_writel(pd, CTL1, pd->regs.dma_ctl1);
789         dma_writel(pd, CTL2, pd->regs.dma_ctl2);
790         dma_writel(pd, CTL3, pd->regs.dma_ctl3);
791
792         list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
793                 pd_chan = to_pd_chan(chan);
794
795                 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
796                 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
797                 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
798                 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
799
800                 i++;
801         }
802 }
803
804 static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
805 {
806         struct pch_dma *pd = pci_get_drvdata(pdev);
807
808         if (pd)
809                 pch_dma_save_regs(pd);
810
811         pci_save_state(pdev);
812         pci_disable_device(pdev);
813         pci_set_power_state(pdev, pci_choose_state(pdev, state));
814
815         return 0;
816 }
817
818 static int pch_dma_resume(struct pci_dev *pdev)
819 {
820         struct pch_dma *pd = pci_get_drvdata(pdev);
821         int err;
822
823         pci_set_power_state(pdev, PCI_D0);
824         pci_restore_state(pdev);
825
826         err = pci_enable_device(pdev);
827         if (err) {
828                 dev_dbg(&pdev->dev, "failed to enable device\n");
829                 return err;
830         }
831
832         if (pd)
833                 pch_dma_restore_regs(pd);
834
835         return 0;
836 }
837 #endif
838
839 static int pch_dma_probe(struct pci_dev *pdev,
840                                    const struct pci_device_id *id)
841 {
842         struct pch_dma *pd;
843         struct pch_dma_regs *regs;
844         unsigned int nr_channels;
845         int err;
846         int i;
847
848         nr_channels = id->driver_data;
849         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
850         if (!pd)
851                 return -ENOMEM;
852
853         pci_set_drvdata(pdev, pd);
854
855         err = pci_enable_device(pdev);
856         if (err) {
857                 dev_err(&pdev->dev, "Cannot enable PCI device\n");
858                 goto err_free_mem;
859         }
860
861         if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
862                 dev_err(&pdev->dev, "Cannot find proper base address\n");
863                 err = -ENODEV;
864                 goto err_disable_pdev;
865         }
866
867         err = pci_request_regions(pdev, DRV_NAME);
868         if (err) {
869                 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
870                 goto err_disable_pdev;
871         }
872
873         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
874         if (err) {
875                 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
876                 goto err_free_res;
877         }
878
879         regs = pd->membase = pci_iomap(pdev, 1, 0);
880         if (!pd->membase) {
881                 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
882                 err = -ENOMEM;
883                 goto err_free_res;
884         }
885
886         pci_set_master(pdev);
887
888         err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
889         if (err) {
890                 dev_err(&pdev->dev, "Failed to request IRQ\n");
891                 goto err_iounmap;
892         }
893
894         pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
895                                    sizeof(struct pch_dma_desc), 4, 0);
896         if (!pd->pool) {
897                 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
898                 err = -ENOMEM;
899                 goto err_free_irq;
900         }
901
902         pd->dma.dev = &pdev->dev;
903
904         INIT_LIST_HEAD(&pd->dma.channels);
905
906         for (i = 0; i < nr_channels; i++) {
907                 struct pch_dma_chan *pd_chan = &pd->channels[i];
908
909                 pd_chan->chan.device = &pd->dma;
910                 dma_cookie_init(&pd_chan->chan);
911
912                 pd_chan->membase = &regs->desc[i];
913
914                 spin_lock_init(&pd_chan->lock);
915
916                 INIT_LIST_HEAD(&pd_chan->active_list);
917                 INIT_LIST_HEAD(&pd_chan->queue);
918                 INIT_LIST_HEAD(&pd_chan->free_list);
919
920                 tasklet_init(&pd_chan->tasklet, pdc_tasklet,
921                              (unsigned long)pd_chan);
922                 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
923         }
924
925         dma_cap_zero(pd->dma.cap_mask);
926         dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
927         dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
928
929         pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
930         pd->dma.device_free_chan_resources = pd_free_chan_resources;
931         pd->dma.device_tx_status = pd_tx_status;
932         pd->dma.device_issue_pending = pd_issue_pending;
933         pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
934         pd->dma.device_control = pd_device_control;
935
936         err = dma_async_device_register(&pd->dma);
937         if (err) {
938                 dev_err(&pdev->dev, "Failed to register DMA device\n");
939                 goto err_free_pool;
940         }
941
942         return 0;
943
944 err_free_pool:
945         pci_pool_destroy(pd->pool);
946 err_free_irq:
947         free_irq(pdev->irq, pd);
948 err_iounmap:
949         pci_iounmap(pdev, pd->membase);
950 err_free_res:
951         pci_release_regions(pdev);
952 err_disable_pdev:
953         pci_disable_device(pdev);
954 err_free_mem:
955         return err;
956 }
957
958 static void pch_dma_remove(struct pci_dev *pdev)
959 {
960         struct pch_dma *pd = pci_get_drvdata(pdev);
961         struct pch_dma_chan *pd_chan;
962         struct dma_chan *chan, *_c;
963
964         if (pd) {
965                 dma_async_device_unregister(&pd->dma);
966
967                 free_irq(pdev->irq, pd);
968
969                 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
970                                          device_node) {
971                         pd_chan = to_pd_chan(chan);
972
973                         tasklet_kill(&pd_chan->tasklet);
974                 }
975
976                 pci_pool_destroy(pd->pool);
977                 pci_iounmap(pdev, pd->membase);
978                 pci_release_regions(pdev);
979                 pci_disable_device(pdev);
980                 kfree(pd);
981         }
982 }
983
984 /* PCI Device ID of DMA device */
985 #define PCI_VENDOR_ID_ROHM             0x10DB
986 #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH        0x8810
987 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH        0x8815
988 #define PCI_DEVICE_ID_ML7213_DMA1_8CH   0x8026
989 #define PCI_DEVICE_ID_ML7213_DMA2_8CH   0x802B
990 #define PCI_DEVICE_ID_ML7213_DMA3_4CH   0x8034
991 #define PCI_DEVICE_ID_ML7213_DMA4_12CH  0x8032
992 #define PCI_DEVICE_ID_ML7223_DMA1_4CH   0x800B
993 #define PCI_DEVICE_ID_ML7223_DMA2_4CH   0x800E
994 #define PCI_DEVICE_ID_ML7223_DMA3_4CH   0x8017
995 #define PCI_DEVICE_ID_ML7223_DMA4_4CH   0x803B
996 #define PCI_DEVICE_ID_ML7831_DMA1_8CH   0x8810
997 #define PCI_DEVICE_ID_ML7831_DMA2_4CH   0x8815
998
999 DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
1000         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
1001         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
1002         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
1003         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
1004         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
1005         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
1006         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
1007         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
1008         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
1009         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
1010         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
1011         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
1012         { 0, },
1013 };
1014
1015 static struct pci_driver pch_dma_driver = {
1016         .name           = DRV_NAME,
1017         .id_table       = pch_dma_id_table,
1018         .probe          = pch_dma_probe,
1019         .remove         = pch_dma_remove,
1020 #ifdef CONFIG_PM
1021         .suspend        = pch_dma_suspend,
1022         .resume         = pch_dma_resume,
1023 #endif
1024 };
1025
1026 module_pci_driver(pch_dma_driver);
1027
1028 MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
1029                    "DMA controller driver");
1030 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1031 MODULE_LICENSE("GPL v2");
1032 MODULE_DEVICE_TABLE(pci, pch_dma_id_table);