Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-mmc
[sfrench/cifs-2.6.git] / arch / i386 / mm / highmem.c
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3
4 void *kmap(struct page *page)
5 {
6         might_sleep();
7         if (!PageHighMem(page))
8                 return page_address(page);
9         return kmap_high(page);
10 }
11
12 void kunmap(struct page *page)
13 {
14         if (in_interrupt())
15                 BUG();
16         if (!PageHighMem(page))
17                 return;
18         kunmap_high(page);
19 }
20
21 /*
22  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
23  * no global lock is needed and because the kmap code must perform a global TLB
24  * invalidation when the kmap pool wraps.
25  *
26  * However when holding an atomic kmap is is not legal to sleep, so atomic
27  * kmaps are appropriate for short, tight code paths only.
28  */
29 void *kmap_atomic(struct page *page, enum km_type type)
30 {
31         enum fixed_addresses idx;
32         unsigned long vaddr;
33
34         /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
35         inc_preempt_count();
36         if (!PageHighMem(page))
37                 return page_address(page);
38
39         idx = type + KM_TYPE_NR*smp_processor_id();
40         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
41 #ifdef CONFIG_DEBUG_HIGHMEM
42         if (!pte_none(*(kmap_pte-idx)))
43                 BUG();
44 #endif
45         set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
46         __flush_tlb_one(vaddr);
47
48         return (void*) vaddr;
49 }
50
51 void kunmap_atomic(void *kvaddr, enum km_type type)
52 {
53 #ifdef CONFIG_DEBUG_HIGHMEM
54         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
55         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
56
57         if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
58                 dec_preempt_count();
59                 preempt_check_resched();
60                 return;
61         }
62
63         if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
64                 BUG();
65
66         /*
67          * force other mappings to Oops if they'll try to access
68          * this pte without first remap it
69          */
70         pte_clear(&init_mm, vaddr, kmap_pte-idx);
71         __flush_tlb_one(vaddr);
72 #endif
73
74         dec_preempt_count();
75         preempt_check_resched();
76 }
77
78 /* This is the same as kmap_atomic() but can map memory that doesn't
79  * have a struct page associated with it.
80  */
81 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
82 {
83         enum fixed_addresses idx;
84         unsigned long vaddr;
85
86         inc_preempt_count();
87
88         idx = type + KM_TYPE_NR*smp_processor_id();
89         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
90         set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
91         __flush_tlb_one(vaddr);
92
93         return (void*) vaddr;
94 }
95
96 struct page *kmap_atomic_to_page(void *ptr)
97 {
98         unsigned long idx, vaddr = (unsigned long)ptr;
99         pte_t *pte;
100
101         if (vaddr < FIXADDR_START)
102                 return virt_to_page(ptr);
103
104         idx = virt_to_fix(vaddr);
105         pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
106         return pte_page(*pte);
107 }
108
109 EXPORT_SYMBOL(kmap);
110 EXPORT_SYMBOL(kunmap);
111 EXPORT_SYMBOL(kmap_atomic);
112 EXPORT_SYMBOL(kunmap_atomic);
113 EXPORT_SYMBOL(kmap_atomic_to_page);