2 * bfin_dma_5xx.c - Blackfin DMA implementation
4 * Copyright 2004-2008 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
8 #include <linux/errno.h>
9 #include <linux/interrupt.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/param.h>
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/spinlock.h>
18 #include <asm/blackfin.h>
19 #include <asm/cacheflush.h>
21 #include <asm/uaccess.h>
22 #include <asm/early_printk.h>
25 * To make sure we work around 05000119 - we always check DMA_DONE bit,
26 * never the DMA_RUN bit
29 struct dma_channel dma_ch[MAX_DMA_CHANNELS];
30 EXPORT_SYMBOL(dma_ch);
32 static int __init blackfin_dma_init(void)
36 printk(KERN_INFO "Blackfin DMA Controller\n");
38 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
39 dma_ch[i].chan_status = DMA_CHANNEL_FREE;
40 dma_ch[i].regs = dma_io_base_addr[i];
41 mutex_init(&(dma_ch[i].dmalock));
43 /* Mark MEMDMA Channel 0 as requested since we're using it internally */
44 request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
45 request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy");
47 #if defined(CONFIG_DEB_DMA_URGENT)
48 bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE()
49 | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT);
54 arch_initcall(blackfin_dma_init);
57 static int proc_dma_show(struct seq_file *m, void *v)
61 for (i = 0; i < MAX_DMA_CHANNELS; ++i)
62 if (dma_ch[i].chan_status != DMA_CHANNEL_FREE)
63 seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id);
68 static int proc_dma_open(struct inode *inode, struct file *file)
70 return single_open(file, proc_dma_show, NULL);
73 static const struct file_operations proc_dma_operations = {
74 .open = proc_dma_open,
77 .release = single_release,
80 static int __init proc_dma_init(void)
82 return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL;
84 late_initcall(proc_dma_init);
88 * request_dma - request a DMA channel
90 * Request the specific DMA channel from the system if it's available.
92 int request_dma(unsigned int channel, const char *device_id)
94 pr_debug("request_dma() : BEGIN \n");
96 if (device_id == NULL)
97 printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);
99 #if defined(CONFIG_BF561) && ANOMALY_05000182
100 if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) {
101 if (get_cclk() > 500000000) {
103 "Request IMDMA failed due to ANOMALY 05000182\n");
109 mutex_lock(&(dma_ch[channel].dmalock));
111 if ((dma_ch[channel].chan_status == DMA_CHANNEL_REQUESTED)
112 || (dma_ch[channel].chan_status == DMA_CHANNEL_ENABLED)) {
113 mutex_unlock(&(dma_ch[channel].dmalock));
114 pr_debug("DMA CHANNEL IN USE \n");
117 dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED;
118 pr_debug("DMA CHANNEL IS ALLOCATED \n");
121 mutex_unlock(&(dma_ch[channel].dmalock));
124 if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) {
125 unsigned int per_map;
126 per_map = dma_ch[channel].regs->peripheral_map & 0xFFF;
127 if (strncmp(device_id, "BFIN_UART", 9) == 0)
128 dma_ch[channel].regs->peripheral_map = per_map |
129 ((channel - CH_UART2_RX + 0xC)<<12);
131 dma_ch[channel].regs->peripheral_map = per_map |
132 ((channel - CH_UART2_RX + 0x6)<<12);
136 dma_ch[channel].device_id = device_id;
137 dma_ch[channel].irq = 0;
139 /* This is to be enabled by putting a restriction -
140 * you have to request DMA, before doing any operations on
143 pr_debug("request_dma() : END \n");
146 EXPORT_SYMBOL(request_dma);
148 int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data)
150 BUG_ON(channel >= MAX_DMA_CHANNELS ||
151 dma_ch[channel].chan_status == DMA_CHANNEL_FREE);
153 if (callback != NULL) {
155 unsigned int irq = channel2irq(channel);
157 ret = request_irq(irq, callback, IRQF_DISABLED,
158 dma_ch[channel].device_id, data);
162 dma_ch[channel].irq = irq;
163 dma_ch[channel].data = data;
167 EXPORT_SYMBOL(set_dma_callback);
170 * clear_dma_buffer - clear DMA fifos for specified channel
172 * Set the Buffer Clear bit in the Configuration register of specific DMA
173 * channel. This will stop the descriptor based DMA operation.
175 static void clear_dma_buffer(unsigned int channel)
177 dma_ch[channel].regs->cfg |= RESTART;
179 dma_ch[channel].regs->cfg &= ~RESTART;
182 void free_dma(unsigned int channel)
184 pr_debug("freedma() : BEGIN \n");
185 BUG_ON(channel >= MAX_DMA_CHANNELS ||
186 dma_ch[channel].chan_status == DMA_CHANNEL_FREE);
189 disable_dma(channel);
190 clear_dma_buffer(channel);
192 if (dma_ch[channel].irq)
193 free_irq(dma_ch[channel].irq, dma_ch[channel].data);
195 /* Clear the DMA Variable in the Channel */
196 mutex_lock(&(dma_ch[channel].dmalock));
197 dma_ch[channel].chan_status = DMA_CHANNEL_FREE;
198 mutex_unlock(&(dma_ch[channel].dmalock));
200 pr_debug("freedma() : END \n");
202 EXPORT_SYMBOL(free_dma);
205 # ifndef MAX_DMA_SUSPEND_CHANNELS
206 # define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS
208 int blackfin_dma_suspend(void)
212 for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) {
213 if (dma_ch[i].chan_status == DMA_CHANNEL_ENABLED) {
214 printk(KERN_ERR "DMA Channel %d failed to suspend\n", i);
218 dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map;
224 void blackfin_dma_resume(void)
227 for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i)
228 dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
233 * blackfin_dma_early_init - minimal DMA init
235 * Setup a few DMA registers so we can safely do DMA transfers early on in
236 * the kernel booting process. Really this just means using dma_memcpy().
238 void __init blackfin_dma_early_init(void)
240 early_shadow_stamp();
241 bfin_write_MDMA_S0_CONFIG(0);
242 bfin_write_MDMA_S1_CONFIG(0);
245 void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
247 unsigned long dst = (unsigned long)pdst;
248 unsigned long src = (unsigned long)psrc;
249 struct dma_register *dst_ch, *src_ch;
251 early_shadow_stamp();
253 /* We assume that everything is 4 byte aligned, so include
254 * a basic sanity check
261 /* Find an avalible memDMA channel */
263 if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) {
264 dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
265 src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
267 dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
268 src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
271 if (!bfin_read16(&src_ch->cfg))
273 else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) {
274 bfin_write16(&src_ch->cfg, 0);
279 /* Force a sync in case a previous config reset on this channel
280 * occurred. This is needed so subsequent writes to DMA registers
281 * are not spuriously lost/corrupted.
283 __builtin_bfin_ssync();
286 bfin_write32(&dst_ch->start_addr, dst);
287 bfin_write16(&dst_ch->x_count, size >> 2);
288 bfin_write16(&dst_ch->x_modify, 1 << 2);
289 bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR);
292 bfin_write32(&src_ch->start_addr, src);
293 bfin_write16(&src_ch->x_count, size >> 2);
294 bfin_write16(&src_ch->x_modify, 1 << 2);
295 bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR);
298 bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32);
299 bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32);
301 /* Since we are atomic now, don't use the workaround ssync */
302 __builtin_bfin_ssync();
305 void __init early_dma_memcpy_done(void)
307 early_shadow_stamp();
309 while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
310 (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
313 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
314 bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
316 * Now that DMA is done, we would normally flush cache, but
317 * i/d cache isn't running this early, so we don't bother,
318 * and just clear out the DMA channel for next time
320 bfin_write_MDMA_S0_CONFIG(0);
321 bfin_write_MDMA_S1_CONFIG(0);
322 bfin_write_MDMA_D0_CONFIG(0);
323 bfin_write_MDMA_D1_CONFIG(0);
325 __builtin_bfin_ssync();
329 * __dma_memcpy - program the MDMA registers
331 * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs
332 * while programming registers so that everything is fully configured. Wait
333 * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE
334 * check will make sure we don't clobber any existing transfer.
336 static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf)
338 static DEFINE_SPINLOCK(mdma_lock);
341 spin_lock_irqsave(&mdma_lock, flags);
343 /* Force a sync in case a previous config reset on this channel
344 * occurred. This is needed so subsequent writes to DMA registers
345 * are not spuriously lost/corrupted. Do it under irq lock and
346 * without the anomaly version (because we are atomic already).
348 __builtin_bfin_ssync();
350 if (bfin_read_MDMA_S0_CONFIG())
351 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
355 /* For larger bit sizes, we've already divided down cnt so it
356 * is no longer a multiple of 64k. So we have to break down
357 * the limit here so it is a multiple of the incoming size.
358 * There is no limitation here in terms of total size other
359 * than the hardware though as the bits lost in the shift are
360 * made up by MODIFY (== we can hit the whole address space).
361 * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4
363 u32 shift = abs(dmod) >> 1;
364 size_t ycnt = cnt >> (16 - shift);
365 cnt = 1 << (16 - shift);
366 bfin_write_MDMA_D0_Y_COUNT(ycnt);
367 bfin_write_MDMA_S0_Y_COUNT(ycnt);
368 bfin_write_MDMA_D0_Y_MODIFY(dmod);
369 bfin_write_MDMA_S0_Y_MODIFY(smod);
372 bfin_write_MDMA_D0_START_ADDR(daddr);
373 bfin_write_MDMA_D0_X_COUNT(cnt);
374 bfin_write_MDMA_D0_X_MODIFY(dmod);
375 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
377 bfin_write_MDMA_S0_START_ADDR(saddr);
378 bfin_write_MDMA_S0_X_COUNT(cnt);
379 bfin_write_MDMA_S0_X_MODIFY(smod);
380 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
382 bfin_write_MDMA_S0_CONFIG(DMAEN | conf);
383 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf);
385 spin_unlock_irqrestore(&mdma_lock, flags);
389 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
390 if (bfin_read_MDMA_S0_CONFIG())
395 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
397 bfin_write_MDMA_S0_CONFIG(0);
398 bfin_write_MDMA_D0_CONFIG(0);
402 * _dma_memcpy - translate C memcpy settings into MDMA settings
404 * Handle all the high level steps before we touch the MDMA registers. So
405 * handle direction, tweaking of sizes, and formatting of addresses.
407 static void *_dma_memcpy(void *pdst, const void *psrc, size_t size)
411 unsigned long dst = (unsigned long)pdst;
412 unsigned long src = (unsigned long)psrc;
417 if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) {
420 } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) {
428 /* If the two memory regions have a chance of overlapping, make
429 * sure the memcpy still works as expected. Do this by having the
430 * copy run backwards instead.
443 __dma_memcpy(dst, mod, src, mod, size, conf);
449 * dma_memcpy - DMA memcpy under mutex lock
451 * Do not check arguments before starting the DMA memcpy. Break the transfer
452 * up into two pieces. The first transfer is in multiples of 64k and the
453 * second transfer is the piece smaller than 64k.
455 void *dma_memcpy(void *pdst, const void *psrc, size_t size)
457 unsigned long dst = (unsigned long)pdst;
458 unsigned long src = (unsigned long)psrc;
461 if (bfin_addr_dcacheable(src))
462 blackfin_dcache_flush_range(src, src + size);
464 if (bfin_addr_dcacheable(dst))
465 blackfin_dcache_invalidate_range(dst, dst + size);
467 bulk = size & ~0xffff;
470 _dma_memcpy(pdst, psrc, bulk);
471 _dma_memcpy(pdst + bulk, psrc + bulk, rest);
474 EXPORT_SYMBOL(dma_memcpy);
477 * safe_dma_memcpy - DMA memcpy w/argument checking
479 * Verify arguments are safe before heading to dma_memcpy().
481 void *safe_dma_memcpy(void *dst, const void *src, size_t size)
483 if (!access_ok(VERIFY_WRITE, dst, size))
485 if (!access_ok(VERIFY_READ, src, size))
487 return dma_memcpy(dst, src, size);
489 EXPORT_SYMBOL(safe_dma_memcpy);
491 static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len,
492 u16 size, u16 dma_size)
494 blackfin_dcache_flush_range(buf, buf + len * size);
495 __dma_memcpy(addr, 0, buf, size, len, dma_size);
498 static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len,
499 u16 size, u16 dma_size)
501 blackfin_dcache_invalidate_range(buf, buf + len * size);
502 __dma_memcpy(buf, size, addr, 0, len, dma_size);
505 #define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \
506 void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \
508 _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \
510 EXPORT_SYMBOL(dma_##io##s##bwl)
511 MAKE_DMA_IO(out, b, 1, 8, const);
512 MAKE_DMA_IO(in, b, 1, 8, );
513 MAKE_DMA_IO(out, w, 2, 16, const);
514 MAKE_DMA_IO(in, w, 2, 16, );
515 MAKE_DMA_IO(out, l, 4, 32, const);
516 MAKE_DMA_IO(in, l, 4, 32, );