Pull cpumask into release branch
[sfrench/cifs-2.6.git] / arch / mips / mm / highmem.c
1 #include <linux/module.h>
2 #include <linux/highmem.h>
3 #include <asm/tlbflush.h>
4
5 void *__kmap(struct page *page)
6 {
7         void *addr;
8
9         might_sleep();
10         if (!PageHighMem(page))
11                 return page_address(page);
12         addr = kmap_high(page);
13         flush_tlb_one((unsigned long)addr);
14
15         return addr;
16 }
17
18 void __kunmap(struct page *page)
19 {
20         BUG_ON(in_interrupt());
21         if (!PageHighMem(page))
22                 return;
23         kunmap_high(page);
24 }
25
26 /*
27  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
28  * no global lock is needed and because the kmap code must perform a global TLB
29  * invalidation when the kmap pool wraps.
30  *
31  * However when holding an atomic kmap is is not legal to sleep, so atomic
32  * kmaps are appropriate for short, tight code paths only.
33  */
34
35 void *__kmap_atomic(struct page *page, enum km_type type)
36 {
37         enum fixed_addresses idx;
38         unsigned long vaddr;
39
40         /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
41         pagefault_disable();
42         if (!PageHighMem(page))
43                 return page_address(page);
44
45         idx = type + KM_TYPE_NR*smp_processor_id();
46         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
47 #ifdef CONFIG_DEBUG_HIGHMEM
48         BUG_ON(!pte_none(*(kmap_pte - idx)));
49 #endif
50         set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
51         local_flush_tlb_one((unsigned long)vaddr);
52
53         return (void*) vaddr;
54 }
55
56 void __kunmap_atomic(void *kvaddr, enum km_type type)
57 {
58 #ifdef CONFIG_DEBUG_HIGHMEM
59         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
60         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
61
62         if (vaddr < FIXADDR_START) { // FIXME
63                 pagefault_enable();
64                 return;
65         }
66
67         BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
68
69         /*
70          * force other mappings to Oops if they'll try to access
71          * this pte without first remap it
72          */
73         pte_clear(&init_mm, vaddr, kmap_pte-idx);
74         local_flush_tlb_one(vaddr);
75 #endif
76
77         pagefault_enable();
78 }
79
80 /*
81  * This is the same as kmap_atomic() but can map memory that doesn't
82  * have a struct page associated with it.
83  */
84 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
85 {
86         enum fixed_addresses idx;
87         unsigned long vaddr;
88
89         pagefault_disable();
90
91         idx = type + KM_TYPE_NR*smp_processor_id();
92         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
93         set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
94         flush_tlb_one(vaddr);
95
96         return (void*) vaddr;
97 }
98
99 struct page *__kmap_atomic_to_page(void *ptr)
100 {
101         unsigned long idx, vaddr = (unsigned long)ptr;
102         pte_t *pte;
103
104         if (vaddr < FIXADDR_START)
105                 return virt_to_page(ptr);
106
107         idx = virt_to_fix(vaddr);
108         pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
109         return pte_page(*pte);
110 }
111
112 EXPORT_SYMBOL(__kmap);
113 EXPORT_SYMBOL(__kunmap);
114 EXPORT_SYMBOL(__kmap_atomic);
115 EXPORT_SYMBOL(__kunmap_atomic);