2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
30 unsigned long __phys_addr(unsigned long x)
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
36 EXPORT_SYMBOL(__phys_addr);
38 static inline int phys_addr_valid(unsigned long addr)
40 return addr < (1UL << boot_cpu_data.x86_phys_bits);
45 static inline int phys_addr_valid(unsigned long addr)
52 int page_is_ram(unsigned long pagenr)
54 unsigned long addr, end;
58 * A special case is the first 4Kb of memory;
59 * This is a BIOS owned area, not kernel ram, but generally
60 * not listed as such in the E820 table.
66 * Second special case: Some BIOSen report the PC BIOS
67 * area (640->1Mb) as ram even though it is not.
69 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
70 pagenr < (BIOS_END >> PAGE_SHIFT))
73 for (i = 0; i < e820.nr_map; i++) {
77 if (e820.map[i].type != E820_RAM)
79 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
80 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
83 if ((pagenr >= addr) && (pagenr < end))
90 * Fix up the linear direct mapping of the kernel to avoid cache attribute
93 static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
94 enum ioremap_mode mode)
96 unsigned long nrpages = size >> PAGE_SHIFT;
100 case IOR_MODE_UNCACHED:
102 err = set_memory_uc(vaddr, nrpages);
104 case IOR_MODE_CACHED:
105 err = set_memory_wb(vaddr, nrpages);
113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
121 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
122 enum ioremap_mode mode)
124 unsigned long pfn, offset, last_addr, vaddr;
125 struct vm_struct *area;
128 /* Don't allow wraparound or zero size */
129 last_addr = phys_addr + size - 1;
130 if (!size || last_addr < phys_addr)
133 if (!phys_addr_valid(phys_addr)) {
134 printk(KERN_WARNING "ioremap: invalid physical address %lx\n",
141 * Don't remap the low PCI/ISA area, it's always mapped..
143 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
144 return (__force void __iomem *)phys_to_virt(phys_addr);
147 * Don't allow anybody to remap normal RAM that we're using..
149 for (pfn = phys_addr >> PAGE_SHIFT;
150 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
152 int is_ram = page_is_ram(pfn);
154 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
156 WARN_ON_ONCE(is_ram);
160 case IOR_MODE_UNCACHED:
163 * FIXME: we will use UC MINUS for now, as video fb drivers
164 * depend on it. Upcoming ioremap_wc() will fix this behavior.
166 prot = PAGE_KERNEL_UC_MINUS;
168 case IOR_MODE_CACHED:
174 * Mappings have to be page-aligned
176 offset = phys_addr & ~PAGE_MASK;
177 phys_addr &= PAGE_MASK;
178 size = PAGE_ALIGN(last_addr+1) - phys_addr;
183 area = get_vm_area(size, VM_IOREMAP);
186 area->phys_addr = phys_addr;
187 vaddr = (unsigned long) area->addr;
188 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
193 if (ioremap_change_attr(vaddr, size, mode) < 0) {
198 return (void __iomem *) (vaddr + offset);
202 * ioremap_nocache - map bus memory into CPU space
203 * @offset: bus address of the memory
204 * @size: size of the resource to map
206 * ioremap_nocache performs a platform specific sequence of operations to
207 * make bus memory CPU accessible via the readb/readw/readl/writeb/
208 * writew/writel functions and the other mmio helpers. The returned
209 * address is not guaranteed to be usable directly as a virtual
212 * This version of ioremap ensures that the memory is marked uncachable
213 * on the CPU as well as honouring existing caching rules from things like
214 * the PCI bus. Note that there are other caches and buffers on many
215 * busses. In particular driver authors should read up on PCI writes
217 * It's useful if some control registers are in such an area and
218 * write combining or read caching is not desirable:
220 * Must be freed with iounmap.
222 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
224 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
226 EXPORT_SYMBOL(ioremap_nocache);
228 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
230 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
232 EXPORT_SYMBOL(ioremap_cache);
235 * iounmap - Free a IO remapping
236 * @addr: virtual address from ioremap_*
238 * Caller must ensure there is only one unmapping for the same pointer.
240 void iounmap(volatile void __iomem *addr)
242 struct vm_struct *p, *o;
244 if ((void __force *)addr <= high_memory)
248 * __ioremap special-cases the PCI/ISA range by not instantiating a
249 * vm_area and by simply returning an address into the kernel mapping
250 * of ISA space. So handle that here.
252 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
253 addr < phys_to_virt(ISA_END_ADDRESS))
256 addr = (volatile void __iomem *)
257 (PAGE_MASK & (unsigned long __force)addr);
259 /* Use the vm area unlocked, assuming the caller
260 ensures there isn't another iounmap for the same address
261 in parallel. Reuse of the virtual address is prevented by
262 leaving it in the global lists until we're done with it.
263 cpa takes care of the direct mappings. */
264 read_lock(&vmlist_lock);
265 for (p = vmlist; p; p = p->next) {
269 read_unlock(&vmlist_lock);
272 printk(KERN_ERR "iounmap: bad address %p\n", addr);
277 /* Finally remove it */
278 o = remove_vm_area((void *)addr);
279 BUG_ON(p != o || o == NULL);
282 EXPORT_SYMBOL(iounmap);
286 int __initdata early_ioremap_debug;
288 static int __init early_ioremap_debug_setup(char *str)
290 early_ioremap_debug = 1;
294 early_param("early_ioremap_debug", early_ioremap_debug_setup);
296 static __initdata int after_paging_init;
297 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
298 __section(.bss.page_aligned);
300 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
302 /* Don't assume we're using swapper_pg_dir at this point */
303 pgd_t *base = __va(read_cr3());
304 pgd_t *pgd = &base[pgd_index(addr)];
305 pud_t *pud = pud_offset(pgd, addr);
306 pmd_t *pmd = pmd_offset(pud, addr);
311 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
313 return &bm_pte[pte_index(addr)];
316 void __init early_ioremap_init(void)
320 if (early_ioremap_debug)
321 printk(KERN_INFO "early_ioremap_init()\n");
323 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
324 memset(bm_pte, 0, sizeof(bm_pte));
325 pmd_populate_kernel(&init_mm, pmd, bm_pte);
328 * The boot-ioremap range spans multiple pmds, for which
329 * we are not prepared:
331 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
333 printk(KERN_WARNING "pmd %p != %p\n",
334 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
335 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
336 fix_to_virt(FIX_BTMAP_BEGIN));
337 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
338 fix_to_virt(FIX_BTMAP_END));
340 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
341 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
346 void __init early_ioremap_clear(void)
350 if (early_ioremap_debug)
351 printk(KERN_INFO "early_ioremap_clear()\n");
353 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
355 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
359 void __init early_ioremap_reset(void)
361 enum fixed_addresses idx;
362 unsigned long addr, phys;
365 after_paging_init = 1;
366 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
367 addr = fix_to_virt(idx);
368 pte = early_ioremap_pte(addr);
369 if (pte_present(*pte)) {
370 phys = pte_val(*pte) & PAGE_MASK;
371 set_fixmap(idx, phys);
376 static void __init __early_set_fixmap(enum fixed_addresses idx,
377 unsigned long phys, pgprot_t flags)
379 unsigned long addr = __fix_to_virt(idx);
382 if (idx >= __end_of_fixed_addresses) {
386 pte = early_ioremap_pte(addr);
387 if (pgprot_val(flags))
388 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
390 pte_clear(NULL, addr, pte);
391 __flush_tlb_one(addr);
394 static inline void __init early_set_fixmap(enum fixed_addresses idx,
397 if (after_paging_init)
398 set_fixmap(idx, phys);
400 __early_set_fixmap(idx, phys, PAGE_KERNEL);
403 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
405 if (after_paging_init)
408 __early_set_fixmap(idx, 0, __pgprot(0));
412 int __initdata early_ioremap_nested;
414 static int __init check_early_ioremap_leak(void)
416 if (!early_ioremap_nested)
420 "Debug warning: early ioremap leak of %d areas detected.\n",
421 early_ioremap_nested);
423 "please boot with early_ioremap_debug and report the dmesg.\n");
428 late_initcall(check_early_ioremap_leak);
430 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
432 unsigned long offset, last_addr;
433 unsigned int nrpages, nesting;
434 enum fixed_addresses idx0, idx;
436 WARN_ON(system_state != SYSTEM_BOOTING);
438 nesting = early_ioremap_nested;
439 if (early_ioremap_debug) {
440 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
441 phys_addr, size, nesting);
445 /* Don't allow wraparound or zero size */
446 last_addr = phys_addr + size - 1;
447 if (!size || last_addr < phys_addr) {
452 if (nesting >= FIX_BTMAPS_NESTING) {
456 early_ioremap_nested++;
458 * Mappings have to be page-aligned
460 offset = phys_addr & ~PAGE_MASK;
461 phys_addr &= PAGE_MASK;
462 size = PAGE_ALIGN(last_addr) - phys_addr;
465 * Mappings have to fit in the FIX_BTMAP area.
467 nrpages = size >> PAGE_SHIFT;
468 if (nrpages > NR_FIX_BTMAPS) {
476 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
478 while (nrpages > 0) {
479 early_set_fixmap(idx, phys_addr);
480 phys_addr += PAGE_SIZE;
484 if (early_ioremap_debug)
485 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
487 return (void *) (offset + fix_to_virt(idx0));
490 void __init early_iounmap(void *addr, unsigned long size)
492 unsigned long virt_addr;
493 unsigned long offset;
494 unsigned int nrpages;
495 enum fixed_addresses idx;
496 unsigned int nesting;
498 nesting = --early_ioremap_nested;
499 WARN_ON(nesting < 0);
501 if (early_ioremap_debug) {
502 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
507 virt_addr = (unsigned long)addr;
508 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
512 offset = virt_addr & ~PAGE_MASK;
513 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
515 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
516 while (nrpages > 0) {
517 early_clear_fixmap(idx);
523 void __this_fixmap_does_not_exist(void)
528 #endif /* CONFIG_X86_32 */