Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[sfrench/cifs-2.6.git] / arch / arm / mm / consistent.c
index 82f4d5e27c5492f98dac798f807194eb6800fc03..8a1bfcd500871dde0eb02b72962ef06060ae6b88 100644 (file)
 #include <linux/dma-mapping.h>
 
 #include <asm/cacheflush.h>
-#include <asm/io.h>
 #include <asm/tlbflush.h>
+#include <asm/sizes.h>
+
+/* Sanity check size */
+#if (CONSISTENT_DMA_SIZE % SZ_2M)
+#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
+#endif
 
-#define CONSISTENT_BASE        (0xffc00000)
 #define CONSISTENT_END (0xffe00000)
+#define CONSISTENT_BASE        (CONSISTENT_END - CONSISTENT_DMA_SIZE)
+
 #define CONSISTENT_OFFSET(x)   (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
+#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
+#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+
 
 /*
- * This is the page table (2MB) covering uncached, DMA consistent allocations
+ * These are the page tables (2MB each) covering uncached, DMA consistent allocations
  */
-static pte_t *consistent_pte;
+static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
 static DEFINE_SPINLOCK(consistent_lock);
 
 /*
@@ -66,6 +75,7 @@ struct vm_region {
        unsigned long           vm_start;
        unsigned long           vm_end;
        struct page             *vm_pages;
+       int                     vm_active;
 };
 
 static struct vm_region consistent_head = {
@@ -104,6 +114,7 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
        list_add_tail(&new->vm_list, &c->vm_list);
        new->vm_start = addr;
        new->vm_end = addr + size;
+       new->vm_active = 1;
 
        spin_unlock_irqrestore(&consistent_lock, flags);
        return new;
@@ -120,7 +131,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad
        struct vm_region *c;
        
        list_for_each_entry(c, &head->vm_list, vm_list) {
-               if (c->vm_start == addr)
+               if (c->vm_active && c->vm_start == addr)
                        goto out;
        }
        c = NULL;
@@ -141,7 +152,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
        unsigned long order;
        u64 mask = ISA_DMA_THRESHOLD, limit;
 
-       if (!consistent_pte) {
+       if (!consistent_pte[0]) {
                printk(KERN_ERR "%s: not initialised\n", __func__);
                dump_stack();
                return NULL;
@@ -204,11 +215,16 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
        c = vm_region_alloc(&consistent_head, size,
                            gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
        if (c) {
-               pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+               pte_t *pte;
                struct page *end = page + (1 << order);
+               int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+               u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
 
+               pte = consistent_pte[idx] + off;
                c->vm_pages = page;
 
+               split_page(page, order);
+
                /*
                 * Set the "dma handle"
                 */
@@ -217,7 +233,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
                do {
                        BUG_ON(!pte_none(*pte));
 
-                       set_page_count(page, 1);
                        /*
                         * x86 does not mark the pages reserved...
                         */
@@ -225,13 +240,17 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
                        set_pte(pte, mk_pte(page, prot));
                        page++;
                        pte++;
+                       off++;
+                       if (off >= PTRS_PER_PTE) {
+                               off = 0;
+                               pte = consistent_pte[++idx];
+                       }
                } while (size -= PAGE_SIZE);
 
                /*
                 * Free the otherwise unused pages.
                 */
                while (page < end) {
-                       set_page_count(page, 1);
                        __free_page(page);
                        page++;
                }
@@ -319,21 +338,28 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
 
 /*
  * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
  */
 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
 {
        struct vm_region *c;
        unsigned long flags, addr;
        pte_t *ptep;
+       int idx;
+       u32 off;
+
+       WARN_ON(irqs_disabled());
 
        size = PAGE_ALIGN(size);
 
        spin_lock_irqsave(&consistent_lock, flags);
-
        c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
        if (!c)
                goto no_area;
 
+       c->vm_active = 0;
+       spin_unlock_irqrestore(&consistent_lock, flags);
+
        if ((c->vm_end - c->vm_start) != size) {
                printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
                       __func__, c->vm_end - c->vm_start, size);
@@ -341,7 +367,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
                size = c->vm_end - c->vm_start;
        }
 
-       ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+       idx = CONSISTENT_PTE_INDEX(c->vm_start);
+       off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+       ptep = consistent_pte[idx] + off;
        addr = c->vm_start;
        do {
                pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
@@ -349,6 +377,11 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
 
                ptep++;
                addr += PAGE_SIZE;
+               off++;
+               if (off >= PTRS_PER_PTE) {
+                       off = 0;
+                       ptep = consistent_pte[++idx];
+               }
 
                if (!pte_none(pte) && pte_present(pte)) {
                        pfn = pte_pfn(pte);
@@ -372,8 +405,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
 
        flush_tlb_kernel_range(c->vm_start, c->vm_end);
 
+       spin_lock_irqsave(&consistent_lock, flags);
        list_del(&c->vm_list);
-
        spin_unlock_irqrestore(&consistent_lock, flags);
 
        kfree(c);
@@ -395,13 +428,12 @@ static int __init consistent_init(void)
        pgd_t *pgd;
        pmd_t *pmd;
        pte_t *pte;
-       int ret = 0;
-
-       spin_lock(&init_mm.page_table_lock);
+       int ret = 0, i = 0;
+       u32 base = CONSISTENT_BASE;
 
        do {
-               pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
-               pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
+               pgd = pgd_offset(&init_mm, base);
+               pmd = pmd_alloc(&init_mm, pgd, base);
                if (!pmd) {
                        printk(KERN_ERR "%s: no pmd tables\n", __func__);
                        ret = -ENOMEM;
@@ -409,17 +441,16 @@ static int __init consistent_init(void)
                }
                WARN_ON(!pmd_none(*pmd));
 
-               pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE);
+               pte = pte_alloc_kernel(pmd, base);
                if (!pte) {
                        printk(KERN_ERR "%s: no pte tables\n", __func__);
                        ret = -ENOMEM;
                        break;
                }
 
-               consistent_pte = pte;
-       } while (0);
-
-       spin_unlock(&init_mm.page_table_lock);
+               consistent_pte[i++] = pte;
+               base += (1 << PGDIR_SHIFT);
+       } while (base < CONSISTENT_END);
 
        return ret;
 }