Merge branch 'master' of hera.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6
[sfrench/cifs-2.6.git] / arch / parisc / kernel / pci-dma.c
1 /*
2 ** PARISC 1.1 Dynamic DMA mapping support.
3 ** This implementation is for PA-RISC platforms that do not support
4 ** I/O TLBs (aka DMA address translation hardware).
5 ** See Documentation/DMA-mapping.txt for interface definitions.
6 **
7 **      (c) Copyright 1999,2000 Hewlett-Packard Company
8 **      (c) Copyright 2000 Grant Grundler
9 **      (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
10 **      (c) Copyright 2000 John Marvin
11 **
12 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
13 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
14 **
15 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
16 **
17 ** - ggg
18 */
19
20 #include <linux/init.h>
21 #include <linux/mm.h>
22 #include <linux/pci.h>
23 #include <linux/proc_fs.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28
29 #include <asm/cacheflush.h>
30 #include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
31 #include <asm/io.h>
32 #include <asm/page.h>   /* get_order */
33 #include <asm/pgalloc.h>
34 #include <asm/uaccess.h>
35 #include <asm/tlbflush.h>       /* for purge_tlb_*() macros */
36
37 static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
38 static unsigned long pcxl_used_bytes __read_mostly = 0;
39 static unsigned long pcxl_used_pages __read_mostly = 0;
40
41 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
42 static spinlock_t   pcxl_res_lock;
43 static char    *pcxl_res_map;
44 static int     pcxl_res_hint;
45 static int     pcxl_res_size;
46
47 #ifdef DEBUG_PCXL_RESOURCE
48 #define DBG_RES(x...)   printk(x)
49 #else
50 #define DBG_RES(x...)
51 #endif
52
53
54 /*
55 ** Dump a hex representation of the resource map.
56 */
57
58 #ifdef DUMP_RESMAP
59 static
60 void dump_resmap(void)
61 {
62         u_long *res_ptr = (unsigned long *)pcxl_res_map;
63         u_long i = 0;
64
65         printk("res_map: ");
66         for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
67                 printk("%08lx ", *res_ptr);
68
69         printk("\n");
70 }
71 #else
72 static inline void dump_resmap(void) {;}
73 #endif
74
75 static int pa11_dma_supported( struct device *dev, u64 mask)
76 {
77         return 1;
78 }
79
80 static inline int map_pte_uncached(pte_t * pte,
81                 unsigned long vaddr,
82                 unsigned long size, unsigned long *paddr_ptr)
83 {
84         unsigned long end;
85         unsigned long orig_vaddr = vaddr;
86
87         vaddr &= ~PMD_MASK;
88         end = vaddr + size;
89         if (end > PMD_SIZE)
90                 end = PMD_SIZE;
91         do {
92                 if (!pte_none(*pte))
93                         printk(KERN_ERR "map_pte_uncached: page already exists\n");
94                 set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
95                 purge_tlb_start();
96                 pdtlb_kernel(orig_vaddr);
97                 purge_tlb_end();
98                 vaddr += PAGE_SIZE;
99                 orig_vaddr += PAGE_SIZE;
100                 (*paddr_ptr) += PAGE_SIZE;
101                 pte++;
102         } while (vaddr < end);
103         return 0;
104 }
105
106 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
107                 unsigned long size, unsigned long *paddr_ptr)
108 {
109         unsigned long end;
110         unsigned long orig_vaddr = vaddr;
111
112         vaddr &= ~PGDIR_MASK;
113         end = vaddr + size;
114         if (end > PGDIR_SIZE)
115                 end = PGDIR_SIZE;
116         do {
117                 pte_t * pte = pte_alloc_kernel(pmd, vaddr);
118                 if (!pte)
119                         return -ENOMEM;
120                 if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
121                         return -ENOMEM;
122                 vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
123                 orig_vaddr += PMD_SIZE;
124                 pmd++;
125         } while (vaddr < end);
126         return 0;
127 }
128
129 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
130                 unsigned long paddr)
131 {
132         pgd_t * dir;
133         unsigned long end = vaddr + size;
134
135         dir = pgd_offset_k(vaddr);
136         do {
137                 pmd_t *pmd;
138                 
139                 pmd = pmd_alloc(NULL, dir, vaddr);
140                 if (!pmd)
141                         return -ENOMEM;
142                 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
143                         return -ENOMEM;
144                 vaddr = vaddr + PGDIR_SIZE;
145                 dir++;
146         } while (vaddr && (vaddr < end));
147         return 0;
148 }
149
150 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
151                 unsigned long size)
152 {
153         pte_t * pte;
154         unsigned long end;
155         unsigned long orig_vaddr = vaddr;
156
157         if (pmd_none(*pmd))
158                 return;
159         if (pmd_bad(*pmd)) {
160                 pmd_ERROR(*pmd);
161                 pmd_clear(pmd);
162                 return;
163         }
164         pte = pte_offset_map(pmd, vaddr);
165         vaddr &= ~PMD_MASK;
166         end = vaddr + size;
167         if (end > PMD_SIZE)
168                 end = PMD_SIZE;
169         do {
170                 pte_t page = *pte;
171                 pte_clear(&init_mm, vaddr, pte);
172                 purge_tlb_start();
173                 pdtlb_kernel(orig_vaddr);
174                 purge_tlb_end();
175                 vaddr += PAGE_SIZE;
176                 orig_vaddr += PAGE_SIZE;
177                 pte++;
178                 if (pte_none(page) || pte_present(page))
179                         continue;
180                 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
181         } while (vaddr < end);
182 }
183
184 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
185                 unsigned long size)
186 {
187         pmd_t * pmd;
188         unsigned long end;
189         unsigned long orig_vaddr = vaddr;
190
191         if (pgd_none(*dir))
192                 return;
193         if (pgd_bad(*dir)) {
194                 pgd_ERROR(*dir);
195                 pgd_clear(dir);
196                 return;
197         }
198         pmd = pmd_offset(dir, vaddr);
199         vaddr &= ~PGDIR_MASK;
200         end = vaddr + size;
201         if (end > PGDIR_SIZE)
202                 end = PGDIR_SIZE;
203         do {
204                 unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
205                 vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
206                 orig_vaddr += PMD_SIZE;
207                 pmd++;
208         } while (vaddr < end);
209 }
210
211 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
212 {
213         pgd_t * dir;
214         unsigned long end = vaddr + size;
215
216         dir = pgd_offset_k(vaddr);
217         do {
218                 unmap_uncached_pmd(dir, vaddr, end - vaddr);
219                 vaddr = vaddr + PGDIR_SIZE;
220                 dir++;
221         } while (vaddr && (vaddr < end));
222 }
223
224 #define PCXL_SEARCH_LOOP(idx, mask, size)  \
225        for(; res_ptr < res_end; ++res_ptr) \
226        { \
227                if(0 == ((*res_ptr) & mask)) { \
228                        *res_ptr |= mask; \
229                        idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
230                        pcxl_res_hint = idx + (size >> 3); \
231                        goto resource_found; \
232                } \
233        }
234
235 #define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
236        u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
237        u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
238        PCXL_SEARCH_LOOP(idx, mask, size); \
239        res_ptr = (u##size *)&pcxl_res_map[0]; \
240        PCXL_SEARCH_LOOP(idx, mask, size); \
241 }
242
243 unsigned long
244 pcxl_alloc_range(size_t size)
245 {
246         int res_idx;
247         u_long mask, flags;
248         unsigned int pages_needed = size >> PAGE_SHIFT;
249
250         mask = (u_long) -1L;
251         mask >>= BITS_PER_LONG - pages_needed;
252
253         DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n", 
254                 size, pages_needed, mask);
255
256         spin_lock_irqsave(&pcxl_res_lock, flags);
257
258         if(pages_needed <= 8) {
259                 PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
260         } else if(pages_needed <= 16) {
261                 PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
262         } else if(pages_needed <= 32) {
263                 PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
264         } else {
265                 panic("%s: pcxl_alloc_range() Too many pages to map.\n",
266                       __FILE__);
267         }
268
269         dump_resmap();
270         panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
271               __FILE__);
272         
273 resource_found:
274         
275         DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
276                 res_idx, mask, pcxl_res_hint);
277
278         pcxl_used_pages += pages_needed;
279         pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
280
281         spin_unlock_irqrestore(&pcxl_res_lock, flags);
282
283         dump_resmap();
284
285         /* 
286         ** return the corresponding vaddr in the pcxl dma map
287         */
288         return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
289 }
290
291 #define PCXL_FREE_MAPPINGS(idx, m, size) \
292                 u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
293                 /* BUG_ON((*res_ptr & m) != m); */ \
294                 *res_ptr &= ~m;
295
296 /*
297 ** clear bits in the pcxl resource map
298 */
299 static void
300 pcxl_free_range(unsigned long vaddr, size_t size)
301 {
302         u_long mask, flags;
303         unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
304         unsigned int pages_mapped = size >> PAGE_SHIFT;
305
306         mask = (u_long) -1L;
307         mask >>= BITS_PER_LONG - pages_mapped;
308
309         DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", 
310                 res_idx, size, pages_mapped, mask);
311
312         spin_lock_irqsave(&pcxl_res_lock, flags);
313
314         if(pages_mapped <= 8) {
315                 PCXL_FREE_MAPPINGS(res_idx, mask, 8);
316         } else if(pages_mapped <= 16) {
317                 PCXL_FREE_MAPPINGS(res_idx, mask, 16);
318         } else if(pages_mapped <= 32) {
319                 PCXL_FREE_MAPPINGS(res_idx, mask, 32);
320         } else {
321                 panic("%s: pcxl_free_range() Too many pages to unmap.\n",
322                       __FILE__);
323         }
324         
325         pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
326         pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
327
328         spin_unlock_irqrestore(&pcxl_res_lock, flags);
329
330         dump_resmap();
331 }
332
333 static int proc_pcxl_dma_show(struct seq_file *m, void *v)
334 {
335 #if 0
336         u_long i = 0;
337         unsigned long *res_ptr = (u_long *)pcxl_res_map;
338 #endif
339         unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
340
341         seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
342                 PCXL_DMA_MAP_SIZE, total_pages);
343
344         seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
345
346         seq_puts(m,  "            total:    free:    used:   % used:\n");
347         seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
348                 pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
349                 (pcxl_used_bytes * 100) / pcxl_res_size);
350
351         seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
352                 total_pages - pcxl_used_pages, pcxl_used_pages,
353                 (pcxl_used_pages * 100 / total_pages));
354
355 #if 0
356         seq_puts(m, "\nResource bitmap:");
357
358         for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
359                 if ((i & 7) == 0)
360                     seq_puts(m,"\n   ");
361                 seq_printf(m, "%s %08lx", buf, *res_ptr);
362         }
363 #endif
364         seq_putc(m, '\n');
365         return 0;
366 }
367
368 static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
369 {
370         return single_open(file, proc_pcxl_dma_show, NULL);
371 }
372
373 static const struct file_operations proc_pcxl_dma_ops = {
374         .owner          = THIS_MODULE,
375         .open           = proc_pcxl_dma_open,
376         .read           = seq_read,
377         .llseek         = seq_lseek,
378         .release        = single_release,
379 };
380
381 static int __init
382 pcxl_dma_init(void)
383 {
384         if (pcxl_dma_start == 0)
385                 return 0;
386
387         spin_lock_init(&pcxl_res_lock);
388         pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
389         pcxl_res_hint = 0;
390         pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
391                                             get_order(pcxl_res_size));
392         memset(pcxl_res_map, 0, pcxl_res_size);
393         proc_gsc_root = proc_mkdir("gsc", NULL);
394         if (!proc_gsc_root)
395                 printk(KERN_WARNING
396                         "pcxl_dma_init: Unable to create gsc /proc dir entry\n");
397         else {
398                 struct proc_dir_entry* ent;
399                 ent = create_proc_entry("pcxl_dma", 0, proc_gsc_root);
400                 if (ent)
401                         ent->proc_fops = &proc_pcxl_dma_ops;
402                 else
403                         printk(KERN_WARNING
404                                 "pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
405         }
406         return 0;
407 }
408
409 __initcall(pcxl_dma_init);
410
411 static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
412 {
413         unsigned long vaddr;
414         unsigned long paddr;
415         int order;
416
417         order = get_order(size);
418         size = 1 << (order + PAGE_SHIFT);
419         vaddr = pcxl_alloc_range(size);
420         paddr = __get_free_pages(flag, order);
421         flush_kernel_dcache_range(paddr, size);
422         paddr = __pa(paddr);
423         map_uncached_pages(vaddr, size, paddr);
424         *dma_handle = (dma_addr_t) paddr;
425
426 #if 0
427 /* This probably isn't needed to support EISA cards.
428 ** ISA cards will certainly only support 24-bit DMA addressing.
429 ** Not clear if we can, want, or need to support ISA.
430 */
431         if (!dev || *dev->coherent_dma_mask < 0xffffffff)
432                 gfp |= GFP_DMA;
433 #endif
434         return (void *)vaddr;
435 }
436
437 static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
438 {
439         int order;
440
441         order = get_order(size);
442         size = 1 << (order + PAGE_SHIFT);
443         unmap_uncached_pages((unsigned long)vaddr, size);
444         pcxl_free_range((unsigned long)vaddr, size);
445         free_pages((unsigned long)__va(dma_handle), order);
446 }
447
448 static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
449 {
450         if (direction == DMA_NONE) {
451                 printk(KERN_ERR "pa11_dma_map_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
452                 BUG();
453         }
454
455         flush_kernel_dcache_range((unsigned long) addr, size);
456         return virt_to_phys(addr);
457 }
458
459 static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
460 {
461         if (direction == DMA_NONE) {
462                 printk(KERN_ERR "pa11_dma_unmap_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
463                 BUG();
464         }
465
466         if (direction == DMA_TO_DEVICE)
467             return;
468
469         /*
470          * For PCI_DMA_FROMDEVICE this flush is not necessary for the
471          * simple map/unmap case. However, it IS necessary if if
472          * pci_dma_sync_single_* has been called and the buffer reused.
473          */
474
475         flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
476         return;
477 }
478
479 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
480 {
481         int i;
482
483         if (direction == DMA_NONE)
484             BUG();
485
486         for (i = 0; i < nents; i++, sglist++ ) {
487                 unsigned long vaddr = sg_virt_addr(sglist);
488                 sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
489                 sg_dma_len(sglist) = sglist->length;
490                 flush_kernel_dcache_range(vaddr, sglist->length);
491         }
492         return nents;
493 }
494
495 static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
496 {
497         int i;
498
499         if (direction == DMA_NONE)
500             BUG();
501
502         if (direction == DMA_TO_DEVICE)
503             return;
504
505         /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
506
507         for (i = 0; i < nents; i++, sglist++ )
508                 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
509         return;
510 }
511
512 static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
513 {
514         if (direction == DMA_NONE)
515             BUG();
516
517         flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
518 }
519
520 static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
521 {
522         if (direction == DMA_NONE)
523             BUG();
524
525         flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
526 }
527
528 static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
529 {
530         int i;
531
532         /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
533
534         for (i = 0; i < nents; i++, sglist++ )
535                 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
536 }
537
538 static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
539 {
540         int i;
541
542         /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
543
544         for (i = 0; i < nents; i++, sglist++ )
545                 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
546 }
547
548 struct hppa_dma_ops pcxl_dma_ops = {
549         .dma_supported =        pa11_dma_supported,
550         .alloc_consistent =     pa11_dma_alloc_consistent,
551         .alloc_noncoherent =    pa11_dma_alloc_consistent,
552         .free_consistent =      pa11_dma_free_consistent,
553         .map_single =           pa11_dma_map_single,
554         .unmap_single =         pa11_dma_unmap_single,
555         .map_sg =               pa11_dma_map_sg,
556         .unmap_sg =             pa11_dma_unmap_sg,
557         .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
558         .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
559         .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
560         .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
561 };
562
563 static void *fail_alloc_consistent(struct device *dev, size_t size,
564                                    dma_addr_t *dma_handle, gfp_t flag)
565 {
566         return NULL;
567 }
568
569 static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
570                                           dma_addr_t *dma_handle, gfp_t flag)
571 {
572         void *addr;
573
574         addr = (void *)__get_free_pages(flag, get_order(size));
575         if (addr)
576                 *dma_handle = (dma_addr_t)virt_to_phys(addr);
577
578         return addr;
579 }
580
581 static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
582                                         void *vaddr, dma_addr_t iova)
583 {
584         free_pages((unsigned long)vaddr, get_order(size));
585         return;
586 }
587
588 struct hppa_dma_ops pcx_dma_ops = {
589         .dma_supported =        pa11_dma_supported,
590         .alloc_consistent =     fail_alloc_consistent,
591         .alloc_noncoherent =    pa11_dma_alloc_noncoherent,
592         .free_consistent =      pa11_dma_free_noncoherent,
593         .map_single =           pa11_dma_map_single,
594         .unmap_single =         pa11_dma_unmap_single,
595         .map_sg =               pa11_dma_map_sg,
596         .unmap_sg =             pa11_dma_unmap_sg,
597         .dma_sync_single_for_cpu =      pa11_dma_sync_single_for_cpu,
598         .dma_sync_single_for_device =   pa11_dma_sync_single_for_device,
599         .dma_sync_sg_for_cpu =          pa11_dma_sync_sg_for_cpu,
600         .dma_sync_sg_for_device =       pa11_dma_sync_sg_for_device,
601 };