Merge branches 'at91', 'cache', 'cup', 'ep93xx', 'ixp4xx', 'nuc', 'pending-dma-stream...
authorRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 25 Feb 2010 22:06:43 +0000 (22:06 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 25 Feb 2010 22:06:43 +0000 (22:06 +0000)
1  2  3  4  5  6  7  8  9 
arch/arm/Kconfig
arch/arm/common/dmabounce.c
arch/arm/include/asm/cacheflush.h
arch/arm/mm/copypage-v6.c
arch/arm/mm/proc-xsc3.S

Simple merge
index cc32c1e54a59825dc93fcbba098fcdbd51d9c8f0,bc90364a96c7bf364fdd10aa198a0d93ab2af2b3,734ac9135998d9efffb0a1b84c3a82416b31d325,cc32c1e54a59825dc93fcbba098fcdbd51d9c8f0,cc32c1e54a59825dc93fcbba098fcdbd51d9c8f0,cc32c1e54a59825dc93fcbba098fcdbd51d9c8f0,51499d68b1613dcde03da01d22a42ba918ff2d88,cc32c1e54a59825dc93fcbba098fcdbd51d9c8f0,cc32c1e54a59825dc93fcbba098fcdbd51d9c8f0..cc0a932bbea90780f659becc4ee7d93087ab61cf
@@@@@@@@@@ -308,13 -308,13 -308,17 -308,13 -308,13 -308,13 -308,15 -308,13 -308,13 +308,15 @@@@@@@@@@ static inline void unmap_single(struct 
                                memcpy(ptr, buf->safe, size);
         
                                /*
  -                              * DMA buffers must have the same cache properties
  -                              * as if they were really used for DMA - which means
  -                              * data must be written back to RAM.  Note that
  -                              * we don't use dmac_flush_range() here for the
  -                              * bidirectional case because we know the cache
  -                              * lines will be coherent with the data written.
  +                              * Since we may have written to a page cache page,
  +                              * we need to ensure that the data will be coherent
  +                              * with user mappings.
                                 */
 -    -                         __cpuc_flush_kernel_dcache_area(ptr, size);
  -                             dmac_clean_range(ptr, ptr + size);
  -                             outer_clean_range(__pa(ptr), __pa(ptr) + size);
 ++   +                         __cpuc_flush_dcache_area(ptr, size);
                        }
                        free_safe_buffer(dev->archdata.dmabounce, buf);
++++++ ++       } else {
++++++ ++               __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
                }
         }
         
index 730aefcfbee3eb8e0c46ea0a6dc0b9ba0fd1e27f,3d2ef54c7cb99a44105212ba31ab9a8fbd0b732b,fd03fb63a33222ca6ff67a25469414371d68431d,730aefcfbee3eb8e0c46ea0a6dc0b9ba0fd1e27f,730aefcfbee3eb8e0c46ea0a6dc0b9ba0fd1e27f,730aefcfbee3eb8e0c46ea0a6dc0b9ba0fd1e27f,e290885874127e2f097636e1323bc1078c1bd423,730aefcfbee3eb8e0c46ea0a6dc0b9ba0fd1e27f,730aefcfbee3eb8e0c46ea0a6dc0b9ba0fd1e27f..be8b4d79cf414690504a6e4d8669a8ed2b0a8e67
@@@@@@@@@@ -211,10 -211,10 -211,10 -211,10 -211,10 -211,10 -196,11 -211,10 -211,10 +196,11 @@@@@@@@@@ struct cpu_cache_fns 
         
                void (*coherent_kern_range)(unsigned long, unsigned long);
                void (*coherent_user_range)(unsigned long, unsigned long);
  -             void (*flush_kern_dcache_page)(void *);
  +             void (*flush_kern_dcache_area)(void *, size_t);
  +      
-- --- --       void (*dma_inv_range)(const void *, const void *);
-- --- --       void (*dma_clean_range)(const void *, const void *);
++++++ ++       void (*dma_map_area)(const void *, size_t, int);
++++++ ++       void (*dma_unmap_area)(const void *, size_t, int);
++ +++ ++
  -             void (*dma_inv_range)(const void *, const void *);
  -             void (*dma_clean_range)(const void *, const void *);
                void (*dma_flush_range)(const void *, const void *);
         };
         
@@@@@@@@@@ -355,26 -351,13 -355,16 -355,26 -355,26 -355,26 -341,26 -355,26 -355,26 +337,13 @@@@@@@@@@ vivt_flush_cache_page(struct vm_area_st
                }
         }
         
- -------static inline void
-  ------vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  -      flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- -------                        unsigned long uaddr, void *kaddr,
- -------                        unsigned long len, int write)
- -------{
- -------       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- -------               unsigned long addr = (unsigned long)kaddr;
- -------               __cpuc_coherent_kern_range(addr, addr + len);
- -------       }
- -------}
-  ------
  +      #ifndef CONFIG_CPU_CACHE_VIPT
  +      #define flush_cache_mm(mm) \
  +                     vivt_flush_cache_mm(mm)
  +      #define flush_cache_range(vma,start,end) \
  +                     vivt_flush_cache_range(vma,start,end)
  +      #define flush_cache_page(vma,addr,pfn) \
  +                     vivt_flush_cache_page(vma,addr,pfn)
-  ------#define flush_ptrace_access(vma,page,ua,ka,len,write) \
-  ------               vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
         #else
         extern void flush_cache_mm(struct mm_struct *mm);
         extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
index 0fa1319273dead26070b178751828f025d661561,841f355319bfee86725ef6348d07d96b9b4f446e,334d5602770eb40d5349b0eadb6cafa7348fb283,0fa1319273dead26070b178751828f025d661561,0fa1319273dead26070b178751828f025d661561,0fa1319273dead26070b178751828f025d661561,841f355319bfee86725ef6348d07d96b9b4f446e,0fa1319273dead26070b178751828f025d661561,0fa1319273dead26070b178751828f025d661561..8bca4dea6dfa234bbcf0c343a70f87f751de3b66
@@@@@@@@@@ -41,14 -41,14 -41,6 -41,14 -41,14 -41,14 -41,14 -41,14 -41,14 +41,14 @@@@@@@@@@ static void v6_copy_user_highpage_nonal
                kfrom = kmap_atomic(from, KM_USER0);
                kto = kmap_atomic(to, KM_USER1);
                copy_page(kto, kfrom);
 -    -                 __cpuc_flush_dcache_page(kto);
  +      #ifdef CONFIG_HIGHMEM
  +             /*
  +              * kmap_atomic() doesn't set the page virtual address, and
  +              * kunmap_atomic() takes care of cache flushing already.
  +              */
  +             if (page_address(to) != NULL)
  +      #endif
 ++   +                 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
                kunmap_atomic(kto, KM_USER1);
                kunmap_atomic(kfrom, KM_USER0);
         }
index 96456f5487986f349513b154be2ed22de1bd9eab,96456f5487986f349513b154be2ed22de1bd9eab,2028f370288113507d2d5dcfa6e5599629270df5,96456f5487986f349513b154be2ed22de1bd9eab,96456f5487986f349513b154be2ed22de1bd9eab,96456f5487986f349513b154be2ed22de1bd9eab,046b3d88955e55a1578f463dde20968dae4ec3f0,96456f5487986f349513b154be2ed22de1bd9eab,8e4f6dca89976c8809933a96fe76600720d133bf..e5797f1c1db7d0dd4ccf06ff308655be1d2aa2a9
@@@@@@@@@@ -310,9 -310,9 -309,9 -310,9 -310,9 -310,9 -334,9 -310,9 -310,9 +334,9 @@@@@@@@@@ ENTRY(xsc3_cache_fns
                .long   xsc3_flush_user_cache_range
                .long   xsc3_coherent_kern_range
                .long   xsc3_coherent_user_range
  -             .long   xsc3_flush_kern_dcache_page
  -             .long   xsc3_dma_inv_range
  -             .long   xsc3_dma_clean_range
  +             .long   xsc3_flush_kern_dcache_area
-- --- --       .long   xsc3_dma_inv_range
-- --- --       .long   xsc3_dma_clean_range
++++++ ++       .long   xsc3_dma_map_area
++++++ ++       .long   xsc3_dma_unmap_area
                .long   xsc3_dma_flush_range
         
         ENTRY(cpu_xsc3_dcache_clean_area)