Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux...
[sfrench/cifs-2.6.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15
16 #include <asm/cacheflush.h>
17 #include <asm/e820.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pat.h>
23
24 #ifdef CONFIG_X86_64
25
26 unsigned long __phys_addr(unsigned long x)
27 {
28         if (x >= __START_KERNEL_map)
29                 return x - __START_KERNEL_map + phys_base;
30         return x - PAGE_OFFSET;
31 }
32 EXPORT_SYMBOL(__phys_addr);
33
34 static inline int phys_addr_valid(unsigned long addr)
35 {
36         return addr < (1UL << boot_cpu_data.x86_phys_bits);
37 }
38
39 #else
40
41 static inline int phys_addr_valid(unsigned long addr)
42 {
43         return 1;
44 }
45
46 #endif
47
48 int page_is_ram(unsigned long pagenr)
49 {
50         resource_size_t addr, end;
51         int i;
52
53         /*
54          * A special case is the first 4Kb of memory;
55          * This is a BIOS owned area, not kernel ram, but generally
56          * not listed as such in the E820 table.
57          */
58         if (pagenr == 0)
59                 return 0;
60
61         /*
62          * Second special case: Some BIOSen report the PC BIOS
63          * area (640->1Mb) as ram even though it is not.
64          */
65         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66                     pagenr < (BIOS_END >> PAGE_SHIFT))
67                 return 0;
68
69         for (i = 0; i < e820.nr_map; i++) {
70                 /*
71                  * Not usable memory:
72                  */
73                 if (e820.map[i].type != E820_RAM)
74                         continue;
75                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
77
78
79                 if ((pagenr >= addr) && (pagenr < end))
80                         return 1;
81         }
82         return 0;
83 }
84
85 /*
86  * Fix up the linear direct mapping of the kernel to avoid cache attribute
87  * conflicts.
88  */
89 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90                                unsigned long prot_val)
91 {
92         unsigned long nrpages = size >> PAGE_SHIFT;
93         int err;
94
95         switch (prot_val) {
96         case _PAGE_CACHE_UC:
97         default:
98                 err = _set_memory_uc(vaddr, nrpages);
99                 break;
100         case _PAGE_CACHE_WC:
101                 err = _set_memory_wc(vaddr, nrpages);
102                 break;
103         case _PAGE_CACHE_WB:
104                 err = _set_memory_wb(vaddr, nrpages);
105                 break;
106         }
107
108         return err;
109 }
110
111 /*
112  * Remap an arbitrary physical address space into the kernel virtual
113  * address space. Needed when the kernel wants to access high addresses
114  * directly.
115  *
116  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117  * have to convert them into an offset in a page-aligned mapping, but the
118  * caller shouldn't need to know that small detail.
119  */
120 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
121                                unsigned long prot_val)
122 {
123         unsigned long pfn, offset, vaddr;
124         resource_size_t last_addr;
125         struct vm_struct *area;
126         unsigned long new_prot_val;
127         pgprot_t prot;
128         int retval;
129
130         /* Don't allow wraparound or zero size */
131         last_addr = phys_addr + size - 1;
132         if (!size || last_addr < phys_addr)
133                 return NULL;
134
135         if (!phys_addr_valid(phys_addr)) {
136                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
137                        (unsigned long long)phys_addr);
138                 WARN_ON_ONCE(1);
139                 return NULL;
140         }
141
142         /*
143          * Don't remap the low PCI/ISA area, it's always mapped..
144          */
145         if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
146                 return (__force void __iomem *)phys_to_virt(phys_addr);
147
148         /*
149          * Don't allow anybody to remap normal RAM that we're using..
150          */
151         for (pfn = phys_addr >> PAGE_SHIFT;
152                                 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
153
154                 int is_ram = page_is_ram(pfn);
155
156                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
157                         return NULL;
158                 WARN_ON_ONCE(is_ram);
159         }
160
161         /*
162          * Mappings have to be page-aligned
163          */
164         offset = phys_addr & ~PAGE_MASK;
165         phys_addr &= PAGE_MASK;
166         size = PAGE_ALIGN(last_addr+1) - phys_addr;
167
168         retval = reserve_memtype(phys_addr, phys_addr + size,
169                                                 prot_val, &new_prot_val);
170         if (retval) {
171                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
172                 return NULL;
173         }
174
175         if (prot_val != new_prot_val) {
176                 /*
177                  * Do not fallback to certain memory types with certain
178                  * requested type:
179                  * - request is uncached, return cannot be write-back
180                  * - request is uncached, return cannot be write-combine
181                  * - request is write-combine, return cannot be write-back
182                  */
183                 if ((prot_val == _PAGE_CACHE_UC &&
184                      (new_prot_val == _PAGE_CACHE_WB ||
185                       new_prot_val == _PAGE_CACHE_WC)) ||
186                     (prot_val == _PAGE_CACHE_WC &&
187                      new_prot_val == _PAGE_CACHE_WB)) {
188                         pr_debug(
189                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
190                                 (unsigned long long)phys_addr,
191                                 (unsigned long long)(phys_addr + size),
192                                 prot_val, new_prot_val);
193                         free_memtype(phys_addr, phys_addr + size);
194                         return NULL;
195                 }
196                 prot_val = new_prot_val;
197         }
198
199         switch (prot_val) {
200         case _PAGE_CACHE_UC:
201         default:
202                 prot = PAGE_KERNEL_NOCACHE;
203                 break;
204         case _PAGE_CACHE_WC:
205                 prot = PAGE_KERNEL_WC;
206                 break;
207         case _PAGE_CACHE_WB:
208                 prot = PAGE_KERNEL;
209                 break;
210         }
211
212         /*
213          * Ok, go for it..
214          */
215         area = get_vm_area(size, VM_IOREMAP);
216         if (!area)
217                 return NULL;
218         area->phys_addr = phys_addr;
219         vaddr = (unsigned long) area->addr;
220         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
221                 free_memtype(phys_addr, phys_addr + size);
222                 free_vm_area(area);
223                 return NULL;
224         }
225
226         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
227                 free_memtype(phys_addr, phys_addr + size);
228                 vunmap(area->addr);
229                 return NULL;
230         }
231
232         return (void __iomem *) (vaddr + offset);
233 }
234
235 /**
236  * ioremap_nocache     -   map bus memory into CPU space
237  * @offset:    bus address of the memory
238  * @size:      size of the resource to map
239  *
240  * ioremap_nocache performs a platform specific sequence of operations to
241  * make bus memory CPU accessible via the readb/readw/readl/writeb/
242  * writew/writel functions and the other mmio helpers. The returned
243  * address is not guaranteed to be usable directly as a virtual
244  * address.
245  *
246  * This version of ioremap ensures that the memory is marked uncachable
247  * on the CPU as well as honouring existing caching rules from things like
248  * the PCI bus. Note that there are other caches and buffers on many
249  * busses. In particular driver authors should read up on PCI writes
250  *
251  * It's useful if some control registers are in such an area and
252  * write combining or read caching is not desirable:
253  *
254  * Must be freed with iounmap.
255  */
256 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
257 {
258         return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
259 }
260 EXPORT_SYMBOL(ioremap_nocache);
261
262 /**
263  * ioremap_wc   -       map memory into CPU space write combined
264  * @offset:     bus address of the memory
265  * @size:       size of the resource to map
266  *
267  * This version of ioremap ensures that the memory is marked write combining.
268  * Write combining allows faster writes to some hardware devices.
269  *
270  * Must be freed with iounmap.
271  */
272 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
273 {
274         if (pat_wc_enabled)
275                 return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
276         else
277                 return ioremap_nocache(phys_addr, size);
278 }
279 EXPORT_SYMBOL(ioremap_wc);
280
281 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
282 {
283         return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
284 }
285 EXPORT_SYMBOL(ioremap_cache);
286
287 /**
288  * iounmap - Free a IO remapping
289  * @addr: virtual address from ioremap_*
290  *
291  * Caller must ensure there is only one unmapping for the same pointer.
292  */
293 void iounmap(volatile void __iomem *addr)
294 {
295         struct vm_struct *p, *o;
296
297         if ((void __force *)addr <= high_memory)
298                 return;
299
300         /*
301          * __ioremap special-cases the PCI/ISA range by not instantiating a
302          * vm_area and by simply returning an address into the kernel mapping
303          * of ISA space.   So handle that here.
304          */
305         if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
306             addr < phys_to_virt(ISA_END_ADDRESS))
307                 return;
308
309         addr = (volatile void __iomem *)
310                 (PAGE_MASK & (unsigned long __force)addr);
311
312         /* Use the vm area unlocked, assuming the caller
313            ensures there isn't another iounmap for the same address
314            in parallel. Reuse of the virtual address is prevented by
315            leaving it in the global lists until we're done with it.
316            cpa takes care of the direct mappings. */
317         read_lock(&vmlist_lock);
318         for (p = vmlist; p; p = p->next) {
319                 if (p->addr == addr)
320                         break;
321         }
322         read_unlock(&vmlist_lock);
323
324         if (!p) {
325                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
326                 dump_stack();
327                 return;
328         }
329
330         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
331
332         /* Finally remove it */
333         o = remove_vm_area((void *)addr);
334         BUG_ON(p != o || o == NULL);
335         kfree(p);
336 }
337 EXPORT_SYMBOL(iounmap);
338
339 /*
340  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
341  * access
342  */
343 void *xlate_dev_mem_ptr(unsigned long phys)
344 {
345         void *addr;
346         unsigned long start = phys & PAGE_MASK;
347
348         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
349         if (page_is_ram(start >> PAGE_SHIFT))
350                 return __va(phys);
351
352         addr = (void *)ioremap(start, PAGE_SIZE);
353         if (addr)
354                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
355
356         return addr;
357 }
358
359 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
360 {
361         if (page_is_ram(phys >> PAGE_SHIFT))
362                 return;
363
364         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
365         return;
366 }
367
368 #ifdef CONFIG_X86_32
369
370 int __initdata early_ioremap_debug;
371
372 static int __init early_ioremap_debug_setup(char *str)
373 {
374         early_ioremap_debug = 1;
375
376         return 0;
377 }
378 early_param("early_ioremap_debug", early_ioremap_debug_setup);
379
380 static __initdata int after_paging_init;
381 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
382                 __section(.bss.page_aligned);
383
384 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
385 {
386         /* Don't assume we're using swapper_pg_dir at this point */
387         pgd_t *base = __va(read_cr3());
388         pgd_t *pgd = &base[pgd_index(addr)];
389         pud_t *pud = pud_offset(pgd, addr);
390         pmd_t *pmd = pmd_offset(pud, addr);
391
392         return pmd;
393 }
394
395 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
396 {
397         return &bm_pte[pte_index(addr)];
398 }
399
400 void __init early_ioremap_init(void)
401 {
402         pmd_t *pmd;
403
404         if (early_ioremap_debug)
405                 printk(KERN_INFO "early_ioremap_init()\n");
406
407         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
408         memset(bm_pte, 0, sizeof(bm_pte));
409         pmd_populate_kernel(&init_mm, pmd, bm_pte);
410
411         /*
412          * The boot-ioremap range spans multiple pmds, for which
413          * we are not prepared:
414          */
415         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
416                 WARN_ON(1);
417                 printk(KERN_WARNING "pmd %p != %p\n",
418                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
419                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
420                         fix_to_virt(FIX_BTMAP_BEGIN));
421                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
422                         fix_to_virt(FIX_BTMAP_END));
423
424                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
425                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
426                        FIX_BTMAP_BEGIN);
427         }
428 }
429
430 void __init early_ioremap_clear(void)
431 {
432         pmd_t *pmd;
433
434         if (early_ioremap_debug)
435                 printk(KERN_INFO "early_ioremap_clear()\n");
436
437         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
438         pmd_clear(pmd);
439         paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
440         __flush_tlb_all();
441 }
442
443 void __init early_ioremap_reset(void)
444 {
445         enum fixed_addresses idx;
446         unsigned long addr, phys;
447         pte_t *pte;
448
449         after_paging_init = 1;
450         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
451                 addr = fix_to_virt(idx);
452                 pte = early_ioremap_pte(addr);
453                 if (pte_present(*pte)) {
454                         phys = pte_val(*pte) & PAGE_MASK;
455                         set_fixmap(idx, phys);
456                 }
457         }
458 }
459
460 static void __init __early_set_fixmap(enum fixed_addresses idx,
461                                    unsigned long phys, pgprot_t flags)
462 {
463         unsigned long addr = __fix_to_virt(idx);
464         pte_t *pte;
465
466         if (idx >= __end_of_fixed_addresses) {
467                 BUG();
468                 return;
469         }
470         pte = early_ioremap_pte(addr);
471         if (pgprot_val(flags))
472                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
473         else
474                 pte_clear(NULL, addr, pte);
475         __flush_tlb_one(addr);
476 }
477
478 static inline void __init early_set_fixmap(enum fixed_addresses idx,
479                                         unsigned long phys)
480 {
481         if (after_paging_init)
482                 set_fixmap(idx, phys);
483         else
484                 __early_set_fixmap(idx, phys, PAGE_KERNEL);
485 }
486
487 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
488 {
489         if (after_paging_init)
490                 clear_fixmap(idx);
491         else
492                 __early_set_fixmap(idx, 0, __pgprot(0));
493 }
494
495
496 int __initdata early_ioremap_nested;
497
498 static int __init check_early_ioremap_leak(void)
499 {
500         if (!early_ioremap_nested)
501                 return 0;
502
503         printk(KERN_WARNING
504                "Debug warning: early ioremap leak of %d areas detected.\n",
505                early_ioremap_nested);
506         printk(KERN_WARNING
507                "please boot with early_ioremap_debug and report the dmesg.\n");
508         WARN_ON(1);
509
510         return 1;
511 }
512 late_initcall(check_early_ioremap_leak);
513
514 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
515 {
516         unsigned long offset, last_addr;
517         unsigned int nrpages, nesting;
518         enum fixed_addresses idx0, idx;
519
520         WARN_ON(system_state != SYSTEM_BOOTING);
521
522         nesting = early_ioremap_nested;
523         if (early_ioremap_debug) {
524                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
525                        phys_addr, size, nesting);
526                 dump_stack();
527         }
528
529         /* Don't allow wraparound or zero size */
530         last_addr = phys_addr + size - 1;
531         if (!size || last_addr < phys_addr) {
532                 WARN_ON(1);
533                 return NULL;
534         }
535
536         if (nesting >= FIX_BTMAPS_NESTING) {
537                 WARN_ON(1);
538                 return NULL;
539         }
540         early_ioremap_nested++;
541         /*
542          * Mappings have to be page-aligned
543          */
544         offset = phys_addr & ~PAGE_MASK;
545         phys_addr &= PAGE_MASK;
546         size = PAGE_ALIGN(last_addr) - phys_addr;
547
548         /*
549          * Mappings have to fit in the FIX_BTMAP area.
550          */
551         nrpages = size >> PAGE_SHIFT;
552         if (nrpages > NR_FIX_BTMAPS) {
553                 WARN_ON(1);
554                 return NULL;
555         }
556
557         /*
558          * Ok, go for it..
559          */
560         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
561         idx = idx0;
562         while (nrpages > 0) {
563                 early_set_fixmap(idx, phys_addr);
564                 phys_addr += PAGE_SIZE;
565                 --idx;
566                 --nrpages;
567         }
568         if (early_ioremap_debug)
569                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
570
571         return (void *) (offset + fix_to_virt(idx0));
572 }
573
574 void __init early_iounmap(void *addr, unsigned long size)
575 {
576         unsigned long virt_addr;
577         unsigned long offset;
578         unsigned int nrpages;
579         enum fixed_addresses idx;
580         unsigned int nesting;
581
582         nesting = --early_ioremap_nested;
583         WARN_ON(nesting < 0);
584
585         if (early_ioremap_debug) {
586                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
587                        size, nesting);
588                 dump_stack();
589         }
590
591         virt_addr = (unsigned long)addr;
592         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
593                 WARN_ON(1);
594                 return;
595         }
596         offset = virt_addr & ~PAGE_MASK;
597         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
598
599         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
600         while (nrpages > 0) {
601                 early_clear_fixmap(idx);
602                 --idx;
603                 --nrpages;
604         }
605 }
606
607 void __this_fixmap_does_not_exist(void)
608 {
609         WARN_ON(1);
610 }
611
612 #endif /* CONFIG_X86_32 */