Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[sfrench/cifs-2.6.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15
16 #include <asm/cacheflush.h>
17 #include <asm/e820.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pat.h>
23
24 #ifdef CONFIG_X86_64
25
26 unsigned long __phys_addr(unsigned long x)
27 {
28         if (x >= __START_KERNEL_map)
29                 return x - __START_KERNEL_map + phys_base;
30         return x - PAGE_OFFSET;
31 }
32 EXPORT_SYMBOL(__phys_addr);
33
34 static inline int phys_addr_valid(unsigned long addr)
35 {
36         return addr < (1UL << boot_cpu_data.x86_phys_bits);
37 }
38
39 #else
40
41 static inline int phys_addr_valid(unsigned long addr)
42 {
43         return 1;
44 }
45
46 #endif
47
48 int page_is_ram(unsigned long pagenr)
49 {
50         resource_size_t addr, end;
51         int i;
52
53         /*
54          * A special case is the first 4Kb of memory;
55          * This is a BIOS owned area, not kernel ram, but generally
56          * not listed as such in the E820 table.
57          */
58         if (pagenr == 0)
59                 return 0;
60
61         /*
62          * Second special case: Some BIOSen report the PC BIOS
63          * area (640->1Mb) as ram even though it is not.
64          */
65         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66                     pagenr < (BIOS_END >> PAGE_SHIFT))
67                 return 0;
68
69         for (i = 0; i < e820.nr_map; i++) {
70                 /*
71                  * Not usable memory:
72                  */
73                 if (e820.map[i].type != E820_RAM)
74                         continue;
75                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
77
78
79                 if ((pagenr >= addr) && (pagenr < end))
80                         return 1;
81         }
82         return 0;
83 }
84
85 /*
86  * Fix up the linear direct mapping of the kernel to avoid cache attribute
87  * conflicts.
88  */
89 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90                                unsigned long prot_val)
91 {
92         unsigned long nrpages = size >> PAGE_SHIFT;
93         int err;
94
95         switch (prot_val) {
96         case _PAGE_CACHE_UC:
97         default:
98                 err = _set_memory_uc(vaddr, nrpages);
99                 break;
100         case _PAGE_CACHE_WC:
101                 err = _set_memory_wc(vaddr, nrpages);
102                 break;
103         case _PAGE_CACHE_WB:
104                 err = _set_memory_wb(vaddr, nrpages);
105                 break;
106         }
107
108         return err;
109 }
110
111 /*
112  * Remap an arbitrary physical address space into the kernel virtual
113  * address space. Needed when the kernel wants to access high addresses
114  * directly.
115  *
116  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117  * have to convert them into an offset in a page-aligned mapping, but the
118  * caller shouldn't need to know that small detail.
119  */
120 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
121                                unsigned long prot_val)
122 {
123         unsigned long pfn, offset, vaddr;
124         resource_size_t last_addr;
125         struct vm_struct *area;
126         unsigned long new_prot_val;
127         pgprot_t prot;
128         int retval;
129
130         /* Don't allow wraparound or zero size */
131         last_addr = phys_addr + size - 1;
132         if (!size || last_addr < phys_addr)
133                 return NULL;
134
135         if (!phys_addr_valid(phys_addr)) {
136                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
137                        (unsigned long long)phys_addr);
138                 WARN_ON_ONCE(1);
139                 return NULL;
140         }
141
142         /*
143          * Don't remap the low PCI/ISA area, it's always mapped..
144          */
145         if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
146                 return (__force void __iomem *)phys_to_virt(phys_addr);
147
148         /*
149          * Don't allow anybody to remap normal RAM that we're using..
150          */
151         for (pfn = phys_addr >> PAGE_SHIFT;
152                                 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
153
154                 int is_ram = page_is_ram(pfn);
155
156                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
157                         return NULL;
158                 WARN_ON_ONCE(is_ram);
159         }
160
161         /*
162          * Mappings have to be page-aligned
163          */
164         offset = phys_addr & ~PAGE_MASK;
165         phys_addr &= PAGE_MASK;
166         size = PAGE_ALIGN(last_addr+1) - phys_addr;
167
168         retval = reserve_memtype(phys_addr, phys_addr + size,
169                                                 prot_val, &new_prot_val);
170         if (retval) {
171                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
172                 return NULL;
173         }
174
175         if (prot_val != new_prot_val) {
176                 /*
177                  * Do not fallback to certain memory types with certain
178                  * requested type:
179                  * - request is uncached, return cannot be write-back
180                  * - request is uncached, return cannot be write-combine
181                  * - request is write-combine, return cannot be write-back
182                  */
183                 if ((prot_val == _PAGE_CACHE_UC &&
184                      (new_prot_val == _PAGE_CACHE_WB ||
185                       new_prot_val == _PAGE_CACHE_WC)) ||
186                     (prot_val == _PAGE_CACHE_WC &&
187                      new_prot_val == _PAGE_CACHE_WB)) {
188                         pr_debug(
189                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
190                                 (unsigned long long)phys_addr,
191                                 (unsigned long long)(phys_addr + size),
192                                 prot_val, new_prot_val);
193                         free_memtype(phys_addr, phys_addr + size);
194                         return NULL;
195                 }
196                 prot_val = new_prot_val;
197         }
198
199         switch (prot_val) {
200         case _PAGE_CACHE_UC:
201         default:
202                 prot = PAGE_KERNEL_NOCACHE;
203                 break;
204         case _PAGE_CACHE_WC:
205                 prot = PAGE_KERNEL_WC;
206                 break;
207         case _PAGE_CACHE_WB:
208                 prot = PAGE_KERNEL;
209                 break;
210         }
211
212         /*
213          * Ok, go for it..
214          */
215         area = get_vm_area(size, VM_IOREMAP);
216         if (!area)
217                 return NULL;
218         area->phys_addr = phys_addr;
219         vaddr = (unsigned long) area->addr;
220         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
221                 free_memtype(phys_addr, phys_addr + size);
222                 free_vm_area(area);
223                 return NULL;
224         }
225
226         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
227                 free_memtype(phys_addr, phys_addr + size);
228                 vunmap(area->addr);
229                 return NULL;
230         }
231
232         return (void __iomem *) (vaddr + offset);
233 }
234
235 /**
236  * ioremap_nocache     -   map bus memory into CPU space
237  * @offset:    bus address of the memory
238  * @size:      size of the resource to map
239  *
240  * ioremap_nocache performs a platform specific sequence of operations to
241  * make bus memory CPU accessible via the readb/readw/readl/writeb/
242  * writew/writel functions and the other mmio helpers. The returned
243  * address is not guaranteed to be usable directly as a virtual
244  * address.
245  *
246  * This version of ioremap ensures that the memory is marked uncachable
247  * on the CPU as well as honouring existing caching rules from things like
248  * the PCI bus. Note that there are other caches and buffers on many
249  * busses. In particular driver authors should read up on PCI writes
250  *
251  * It's useful if some control registers are in such an area and
252  * write combining or read caching is not desirable:
253  *
254  * Must be freed with iounmap.
255  */
256 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
257 {
258         return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
259 }
260 EXPORT_SYMBOL(ioremap_nocache);
261
262 /**
263  * ioremap_wc   -       map memory into CPU space write combined
264  * @offset:     bus address of the memory
265  * @size:       size of the resource to map
266  *
267  * This version of ioremap ensures that the memory is marked write combining.
268  * Write combining allows faster writes to some hardware devices.
269  *
270  * Must be freed with iounmap.
271  */
272 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
273 {
274         if (pat_wc_enabled)
275                 return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
276         else
277                 return ioremap_nocache(phys_addr, size);
278 }
279 EXPORT_SYMBOL(ioremap_wc);
280
281 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
282 {
283         return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
284 }
285 EXPORT_SYMBOL(ioremap_cache);
286
287 /**
288  * iounmap - Free a IO remapping
289  * @addr: virtual address from ioremap_*
290  *
291  * Caller must ensure there is only one unmapping for the same pointer.
292  */
293 void iounmap(volatile void __iomem *addr)
294 {
295         struct vm_struct *p, *o;
296
297         if ((void __force *)addr <= high_memory)
298                 return;
299
300         /*
301          * __ioremap special-cases the PCI/ISA range by not instantiating a
302          * vm_area and by simply returning an address into the kernel mapping
303          * of ISA space.   So handle that here.
304          */
305         if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
306             addr < phys_to_virt(ISA_END_ADDRESS))
307                 return;
308
309         addr = (volatile void __iomem *)
310                 (PAGE_MASK & (unsigned long __force)addr);
311
312         /* Use the vm area unlocked, assuming the caller
313            ensures there isn't another iounmap for the same address
314            in parallel. Reuse of the virtual address is prevented by
315            leaving it in the global lists until we're done with it.
316            cpa takes care of the direct mappings. */
317         read_lock(&vmlist_lock);
318         for (p = vmlist; p; p = p->next) {
319                 if (p->addr == addr)
320                         break;
321         }
322         read_unlock(&vmlist_lock);
323
324         if (!p) {
325                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
326                 dump_stack();
327                 return;
328         }
329
330         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
331
332         /* Finally remove it */
333         o = remove_vm_area((void *)addr);
334         BUG_ON(p != o || o == NULL);
335         kfree(p);
336 }
337 EXPORT_SYMBOL(iounmap);
338
339 #ifdef CONFIG_X86_32
340
341 int __initdata early_ioremap_debug;
342
343 static int __init early_ioremap_debug_setup(char *str)
344 {
345         early_ioremap_debug = 1;
346
347         return 0;
348 }
349 early_param("early_ioremap_debug", early_ioremap_debug_setup);
350
351 static __initdata int after_paging_init;
352 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
353                 __section(.bss.page_aligned);
354
355 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
356 {
357         /* Don't assume we're using swapper_pg_dir at this point */
358         pgd_t *base = __va(read_cr3());
359         pgd_t *pgd = &base[pgd_index(addr)];
360         pud_t *pud = pud_offset(pgd, addr);
361         pmd_t *pmd = pmd_offset(pud, addr);
362
363         return pmd;
364 }
365
366 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
367 {
368         return &bm_pte[pte_index(addr)];
369 }
370
371 void __init early_ioremap_init(void)
372 {
373         pmd_t *pmd;
374
375         if (early_ioremap_debug)
376                 printk(KERN_INFO "early_ioremap_init()\n");
377
378         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
379         memset(bm_pte, 0, sizeof(bm_pte));
380         pmd_populate_kernel(&init_mm, pmd, bm_pte);
381
382         /*
383          * The boot-ioremap range spans multiple pmds, for which
384          * we are not prepared:
385          */
386         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
387                 WARN_ON(1);
388                 printk(KERN_WARNING "pmd %p != %p\n",
389                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
390                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
391                         fix_to_virt(FIX_BTMAP_BEGIN));
392                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
393                         fix_to_virt(FIX_BTMAP_END));
394
395                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
396                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
397                        FIX_BTMAP_BEGIN);
398         }
399 }
400
401 void __init early_ioremap_clear(void)
402 {
403         pmd_t *pmd;
404
405         if (early_ioremap_debug)
406                 printk(KERN_INFO "early_ioremap_clear()\n");
407
408         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
409         pmd_clear(pmd);
410         paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
411         __flush_tlb_all();
412 }
413
414 void __init early_ioremap_reset(void)
415 {
416         enum fixed_addresses idx;
417         unsigned long addr, phys;
418         pte_t *pte;
419
420         after_paging_init = 1;
421         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
422                 addr = fix_to_virt(idx);
423                 pte = early_ioremap_pte(addr);
424                 if (pte_present(*pte)) {
425                         phys = pte_val(*pte) & PAGE_MASK;
426                         set_fixmap(idx, phys);
427                 }
428         }
429 }
430
431 static void __init __early_set_fixmap(enum fixed_addresses idx,
432                                    unsigned long phys, pgprot_t flags)
433 {
434         unsigned long addr = __fix_to_virt(idx);
435         pte_t *pte;
436
437         if (idx >= __end_of_fixed_addresses) {
438                 BUG();
439                 return;
440         }
441         pte = early_ioremap_pte(addr);
442         if (pgprot_val(flags))
443                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
444         else
445                 pte_clear(NULL, addr, pte);
446         __flush_tlb_one(addr);
447 }
448
449 static inline void __init early_set_fixmap(enum fixed_addresses idx,
450                                         unsigned long phys)
451 {
452         if (after_paging_init)
453                 set_fixmap(idx, phys);
454         else
455                 __early_set_fixmap(idx, phys, PAGE_KERNEL);
456 }
457
458 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
459 {
460         if (after_paging_init)
461                 clear_fixmap(idx);
462         else
463                 __early_set_fixmap(idx, 0, __pgprot(0));
464 }
465
466
467 int __initdata early_ioremap_nested;
468
469 static int __init check_early_ioremap_leak(void)
470 {
471         if (!early_ioremap_nested)
472                 return 0;
473
474         printk(KERN_WARNING
475                "Debug warning: early ioremap leak of %d areas detected.\n",
476                early_ioremap_nested);
477         printk(KERN_WARNING
478                "please boot with early_ioremap_debug and report the dmesg.\n");
479         WARN_ON(1);
480
481         return 1;
482 }
483 late_initcall(check_early_ioremap_leak);
484
485 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
486 {
487         unsigned long offset, last_addr;
488         unsigned int nrpages, nesting;
489         enum fixed_addresses idx0, idx;
490
491         WARN_ON(system_state != SYSTEM_BOOTING);
492
493         nesting = early_ioremap_nested;
494         if (early_ioremap_debug) {
495                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
496                        phys_addr, size, nesting);
497                 dump_stack();
498         }
499
500         /* Don't allow wraparound or zero size */
501         last_addr = phys_addr + size - 1;
502         if (!size || last_addr < phys_addr) {
503                 WARN_ON(1);
504                 return NULL;
505         }
506
507         if (nesting >= FIX_BTMAPS_NESTING) {
508                 WARN_ON(1);
509                 return NULL;
510         }
511         early_ioremap_nested++;
512         /*
513          * Mappings have to be page-aligned
514          */
515         offset = phys_addr & ~PAGE_MASK;
516         phys_addr &= PAGE_MASK;
517         size = PAGE_ALIGN(last_addr) - phys_addr;
518
519         /*
520          * Mappings have to fit in the FIX_BTMAP area.
521          */
522         nrpages = size >> PAGE_SHIFT;
523         if (nrpages > NR_FIX_BTMAPS) {
524                 WARN_ON(1);
525                 return NULL;
526         }
527
528         /*
529          * Ok, go for it..
530          */
531         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
532         idx = idx0;
533         while (nrpages > 0) {
534                 early_set_fixmap(idx, phys_addr);
535                 phys_addr += PAGE_SIZE;
536                 --idx;
537                 --nrpages;
538         }
539         if (early_ioremap_debug)
540                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
541
542         return (void *) (offset + fix_to_virt(idx0));
543 }
544
545 void __init early_iounmap(void *addr, unsigned long size)
546 {
547         unsigned long virt_addr;
548         unsigned long offset;
549         unsigned int nrpages;
550         enum fixed_addresses idx;
551         unsigned int nesting;
552
553         nesting = --early_ioremap_nested;
554         WARN_ON(nesting < 0);
555
556         if (early_ioremap_debug) {
557                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
558                        size, nesting);
559                 dump_stack();
560         }
561
562         virt_addr = (unsigned long)addr;
563         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
564                 WARN_ON(1);
565                 return;
566         }
567         offset = virt_addr & ~PAGE_MASK;
568         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
569
570         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
571         while (nrpages > 0) {
572                 early_clear_fixmap(idx);
573                 --idx;
574                 --nrpages;
575         }
576 }
577
578 void __this_fixmap_does_not_exist(void)
579 {
580         WARN_ON(1);
581 }
582
583 #endif /* CONFIG_X86_32 */