1 // SPDX-License-Identifier: GPL-2.0-only
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
10 #include <linux/memblock.h>
11 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmiotrace.h>
17 #include <linux/cc_platform.h>
18 #include <linux/efi.h>
19 #include <linux/pgtable.h>
20 #include <linux/kmsan.h>
22 #include <asm/set_memory.h>
23 #include <asm/e820/api.h>
25 #include <asm/fixmap.h>
26 #include <asm/tlbflush.h>
27 #include <asm/pgalloc.h>
28 #include <asm/memtype.h>
29 #include <asm/setup.h>
34 * Descriptor controlling ioremap() behavior.
41 * Fix up the linear direct mapping of the kernel to avoid cache attribute
44 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
45 enum page_cache_mode pcm)
47 unsigned long nrpages = size >> PAGE_SHIFT;
51 case _PAGE_CACHE_MODE_UC:
53 err = _set_memory_uc(vaddr, nrpages);
55 case _PAGE_CACHE_MODE_WC:
56 err = _set_memory_wc(vaddr, nrpages);
58 case _PAGE_CACHE_MODE_WT:
59 err = _set_memory_wt(vaddr, nrpages);
61 case _PAGE_CACHE_MODE_WB:
62 err = _set_memory_wb(vaddr, nrpages);
69 /* Does the range (or a subset of) contain normal RAM? */
70 static unsigned int __ioremap_check_ram(struct resource *res)
72 unsigned long start_pfn, stop_pfn;
75 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
78 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
79 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
80 if (stop_pfn > start_pfn) {
81 for (i = 0; i < (stop_pfn - start_pfn); ++i)
82 if (pfn_valid(start_pfn + i) &&
83 !PageReserved(pfn_to_page(start_pfn + i)))
84 return IORES_MAP_SYSTEM_RAM;
91 * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
92 * there the whole memory is already encrypted.
94 static unsigned int __ioremap_check_encrypted(struct resource *res)
96 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
100 case IORES_DESC_NONE:
101 case IORES_DESC_RESERVED:
104 return IORES_MAP_ENCRYPTED;
111 * The EFI runtime services data area is not covered by walk_mem_res(), but must
112 * be mapped encrypted when SEV is active.
114 static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
116 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
119 if (!IS_ENABLED(CONFIG_EFI))
122 if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
123 (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
124 efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
125 desc->flags |= IORES_MAP_ENCRYPTED;
128 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
130 struct ioremap_desc *desc = arg;
132 if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
133 desc->flags |= __ioremap_check_ram(res);
135 if (!(desc->flags & IORES_MAP_ENCRYPTED))
136 desc->flags |= __ioremap_check_encrypted(res);
138 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
139 (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
143 * To avoid multiple resource walks, this function walks resources marked as
144 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
145 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
147 * After that, deal with misc other ranges in __ioremap_check_other() which do
148 * not fall into the above category.
150 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
151 struct ioremap_desc *desc)
156 end = start + size - 1;
157 memset(desc, 0, sizeof(struct ioremap_desc));
159 walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
161 __ioremap_check_other(addr, desc);
165 * Remap an arbitrary physical address space into the kernel virtual
166 * address space. It transparently creates kernel huge I/O mapping when
167 * the physical address is aligned by a huge page size (1GB or 2MB) and
168 * the requested size is at least the huge page size.
170 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
171 * Therefore, the mapping code falls back to use a smaller page toward 4KB
172 * when a mapping range is covered by non-WB type of MTRRs.
174 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
175 * have to convert them into an offset in a page-aligned mapping, but the
176 * caller shouldn't need to know that small detail.
178 static void __iomem *
179 __ioremap_caller(resource_size_t phys_addr, unsigned long size,
180 enum page_cache_mode pcm, void *caller, bool encrypted)
182 unsigned long offset, vaddr;
183 resource_size_t last_addr;
184 const resource_size_t unaligned_phys_addr = phys_addr;
185 const unsigned long unaligned_size = size;
186 struct ioremap_desc io_desc;
187 struct vm_struct *area;
188 enum page_cache_mode new_pcm;
191 void __iomem *ret_addr;
193 /* Don't allow wraparound or zero size */
194 last_addr = phys_addr + size - 1;
195 if (!size || last_addr < phys_addr)
198 if (!phys_addr_valid(phys_addr)) {
199 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
200 (unsigned long long)phys_addr);
205 __ioremap_check_mem(phys_addr, size, &io_desc);
208 * Don't allow anybody to remap normal RAM that we're using..
210 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
211 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
212 &phys_addr, &last_addr);
217 * Mappings have to be page-aligned
219 offset = phys_addr & ~PAGE_MASK;
220 phys_addr &= PHYSICAL_PAGE_MASK;
221 size = PAGE_ALIGN(last_addr+1) - phys_addr;
223 retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
226 printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
230 if (pcm != new_pcm) {
231 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
233 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
234 (unsigned long long)phys_addr,
235 (unsigned long long)(phys_addr + size),
237 goto err_free_memtype;
243 * If the page being mapped is in memory and SEV is active then
244 * make sure the memory encryption attribute is enabled in the
246 * In TDX guests, memory is marked private by default. If encryption
247 * is not requested (using encrypted), explicitly set decrypt
248 * attribute in all IOREMAPPED memory.
250 prot = PAGE_KERNEL_IO;
251 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
252 prot = pgprot_encrypted(prot);
254 prot = pgprot_decrypted(prot);
257 case _PAGE_CACHE_MODE_UC:
259 prot = __pgprot(pgprot_val(prot) |
260 cachemode2protval(_PAGE_CACHE_MODE_UC));
262 case _PAGE_CACHE_MODE_UC_MINUS:
263 prot = __pgprot(pgprot_val(prot) |
264 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
266 case _PAGE_CACHE_MODE_WC:
267 prot = __pgprot(pgprot_val(prot) |
268 cachemode2protval(_PAGE_CACHE_MODE_WC));
270 case _PAGE_CACHE_MODE_WT:
271 prot = __pgprot(pgprot_val(prot) |
272 cachemode2protval(_PAGE_CACHE_MODE_WT));
274 case _PAGE_CACHE_MODE_WB:
281 area = get_vm_area_caller(size, VM_IOREMAP, caller);
283 goto err_free_memtype;
284 area->phys_addr = phys_addr;
285 vaddr = (unsigned long) area->addr;
287 if (memtype_kernel_map_sync(phys_addr, size, pcm))
290 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
293 ret_addr = (void __iomem *) (vaddr + offset);
294 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
297 * Check if the request spans more than any BAR in the iomem resource
300 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
301 pr_warn("caller %pS mapping multiple BARs\n", caller);
307 memtype_free(phys_addr, phys_addr + size);
312 * ioremap - map bus memory into CPU space
313 * @phys_addr: bus address of the memory
314 * @size: size of the resource to map
316 * ioremap performs a platform specific sequence of operations to
317 * make bus memory CPU accessible via the readb/readw/readl/writeb/
318 * writew/writel functions and the other mmio helpers. The returned
319 * address is not guaranteed to be usable directly as a virtual
322 * This version of ioremap ensures that the memory is marked uncachable
323 * on the CPU as well as honouring existing caching rules from things like
324 * the PCI bus. Note that there are other caches and buffers on many
325 * busses. In particular driver authors should read up on PCI writes
327 * It's useful if some control registers are in such an area and
328 * write combining or read caching is not desirable:
330 * Must be freed with iounmap.
332 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
335 * Ideally, this should be:
336 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
338 * Till we fix all X drivers to use ioremap_wc(), we will use
339 * UC MINUS. Drivers that are certain they need or can already
340 * be converted over to strong UC can use ioremap_uc().
342 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
344 return __ioremap_caller(phys_addr, size, pcm,
345 __builtin_return_address(0), false);
347 EXPORT_SYMBOL(ioremap);
350 * ioremap_uc - map bus memory into CPU space as strongly uncachable
351 * @phys_addr: bus address of the memory
352 * @size: size of the resource to map
354 * ioremap_uc performs a platform specific sequence of operations to
355 * make bus memory CPU accessible via the readb/readw/readl/writeb/
356 * writew/writel functions and the other mmio helpers. The returned
357 * address is not guaranteed to be usable directly as a virtual
360 * This version of ioremap ensures that the memory is marked with a strong
361 * preference as completely uncachable on the CPU when possible. For non-PAT
362 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
363 * systems this will set the PAT entry for the pages as strong UC. This call
364 * will honor existing caching rules from things like the PCI bus. Note that
365 * there are other caches and buffers on many busses. In particular driver
366 * authors should read up on PCI writes.
368 * It's useful if some control registers are in such an area and
369 * write combining or read caching is not desirable:
371 * Must be freed with iounmap.
373 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
375 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
377 return __ioremap_caller(phys_addr, size, pcm,
378 __builtin_return_address(0), false);
380 EXPORT_SYMBOL_GPL(ioremap_uc);
383 * ioremap_wc - map memory into CPU space write combined
384 * @phys_addr: bus address of the memory
385 * @size: size of the resource to map
387 * This version of ioremap ensures that the memory is marked write combining.
388 * Write combining allows faster writes to some hardware devices.
390 * Must be freed with iounmap.
392 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
394 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
395 __builtin_return_address(0), false);
397 EXPORT_SYMBOL(ioremap_wc);
400 * ioremap_wt - map memory into CPU space write through
401 * @phys_addr: bus address of the memory
402 * @size: size of the resource to map
404 * This version of ioremap ensures that the memory is marked write through.
405 * Write through stores data into memory while keeping the cache up-to-date.
407 * Must be freed with iounmap.
409 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
411 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
412 __builtin_return_address(0), false);
414 EXPORT_SYMBOL(ioremap_wt);
416 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
418 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
419 __builtin_return_address(0), true);
421 EXPORT_SYMBOL(ioremap_encrypted);
423 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
425 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
426 __builtin_return_address(0), false);
428 EXPORT_SYMBOL(ioremap_cache);
430 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
431 unsigned long prot_val)
433 return __ioremap_caller(phys_addr, size,
434 pgprot2cachemode(__pgprot(prot_val)),
435 __builtin_return_address(0), false);
437 EXPORT_SYMBOL(ioremap_prot);
440 * iounmap - Free a IO remapping
441 * @addr: virtual address from ioremap_*
443 * Caller must ensure there is only one unmapping for the same pointer.
445 void iounmap(volatile void __iomem *addr)
447 struct vm_struct *p, *o;
449 if ((void __force *)addr <= high_memory)
453 * The PCI/ISA range special-casing was removed from __ioremap()
454 * so this check, in theory, can be removed. However, there are
455 * cases where iounmap() is called for addresses not obtained via
456 * ioremap() (vga16fb for example). Add a warning so that these
457 * cases can be caught and fixed.
459 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
460 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
461 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
465 mmiotrace_iounmap(addr);
467 addr = (volatile void __iomem *)
468 (PAGE_MASK & (unsigned long __force)addr);
470 /* Use the vm area unlocked, assuming the caller
471 ensures there isn't another iounmap for the same address
472 in parallel. Reuse of the virtual address is prevented by
473 leaving it in the global lists until we're done with it.
474 cpa takes care of the direct mappings. */
475 p = find_vm_area((void __force *)addr);
478 printk(KERN_ERR "iounmap: bad address %p\n", addr);
483 kmsan_iounmap_page_range((unsigned long)addr,
484 (unsigned long)addr + get_vm_area_size(p));
485 memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
487 /* Finally remove it */
488 o = remove_vm_area((void __force *)addr);
489 BUG_ON(p != o || o == NULL);
492 EXPORT_SYMBOL(iounmap);
495 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
498 void *xlate_dev_mem_ptr(phys_addr_t phys)
500 unsigned long start = phys & PAGE_MASK;
501 unsigned long offset = phys & ~PAGE_MASK;
504 /* memremap() maps if RAM, otherwise falls back to ioremap() */
505 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
507 /* Only add the offset on success and return NULL if memremap() failed */
514 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
516 memunmap((void *)((unsigned long)addr & PAGE_MASK));
519 #ifdef CONFIG_AMD_MEM_ENCRYPT
521 * Examine the physical address to determine if it is an area of memory
522 * that should be mapped decrypted. If the memory is not part of the
523 * kernel usable area it was accessed and created decrypted, so these
524 * areas should be mapped decrypted. And since the encryption key can
525 * change across reboots, persistent memory should also be mapped
528 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
529 * only persistent memory should be mapped decrypted.
531 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
537 * Check if the address is part of a persistent memory region.
538 * This check covers areas added by E820, EFI and ACPI.
540 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
541 IORES_DESC_PERSISTENT_MEMORY);
542 if (is_pmem != REGION_DISJOINT)
546 * Check if the non-volatile attribute is set for an EFI
549 if (efi_enabled(EFI_BOOT)) {
550 switch (efi_mem_type(phys_addr)) {
551 case EFI_RESERVED_TYPE:
552 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
560 /* Check if the address is outside kernel usable area */
561 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
562 case E820_TYPE_RESERVED:
565 case E820_TYPE_UNUSABLE:
566 /* For SEV, these areas are encrypted */
567 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
581 * Examine the physical address to determine if it is EFI data. Check
582 * it against the boot params structure and EFI tables and memory types.
584 static bool memremap_is_efi_data(resource_size_t phys_addr,
589 /* Check if the address is part of EFI boot/runtime data */
590 if (!efi_enabled(EFI_BOOT))
593 paddr = boot_params.efi_info.efi_memmap_hi;
595 paddr |= boot_params.efi_info.efi_memmap;
596 if (phys_addr == paddr)
599 paddr = boot_params.efi_info.efi_systab_hi;
601 paddr |= boot_params.efi_info.efi_systab;
602 if (phys_addr == paddr)
605 if (efi_is_table_address(phys_addr))
608 switch (efi_mem_type(phys_addr)) {
609 case EFI_BOOT_SERVICES_DATA:
610 case EFI_RUNTIME_SERVICES_DATA:
620 * Examine the physical address to determine if it is boot data by checking
621 * it against the boot params setup_data chain.
623 static bool memremap_is_setup_data(resource_size_t phys_addr,
626 struct setup_indirect *indirect;
627 struct setup_data *data;
628 u64 paddr, paddr_next;
630 paddr = boot_params.hdr.setup_data;
634 if (phys_addr == paddr)
637 data = memremap(paddr, sizeof(*data),
638 MEMREMAP_WB | MEMREMAP_DEC);
640 pr_warn("failed to memremap setup_data entry\n");
644 paddr_next = data->next;
647 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
652 if (data->type == SETUP_INDIRECT) {
654 data = memremap(paddr, sizeof(*data) + len,
655 MEMREMAP_WB | MEMREMAP_DEC);
657 pr_warn("failed to memremap indirect setup_data\n");
661 indirect = (struct setup_indirect *)data->data;
663 if (indirect->type != SETUP_INDIRECT) {
664 paddr = indirect->addr;
671 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
681 * Examine the physical address to determine if it is boot data by checking
682 * it against the boot params setup_data chain (early boot version).
684 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
687 struct setup_indirect *indirect;
688 struct setup_data *data;
689 u64 paddr, paddr_next;
691 paddr = boot_params.hdr.setup_data;
693 unsigned int len, size;
695 if (phys_addr == paddr)
698 data = early_memremap_decrypted(paddr, sizeof(*data));
700 pr_warn("failed to early memremap setup_data entry\n");
704 size = sizeof(*data);
706 paddr_next = data->next;
709 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
710 early_memunmap(data, sizeof(*data));
714 if (data->type == SETUP_INDIRECT) {
716 early_memunmap(data, sizeof(*data));
717 data = early_memremap_decrypted(paddr, size);
719 pr_warn("failed to early memremap indirect setup_data\n");
723 indirect = (struct setup_indirect *)data->data;
725 if (indirect->type != SETUP_INDIRECT) {
726 paddr = indirect->addr;
731 early_memunmap(data, size);
733 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
743 * Architecture function to determine if RAM remap is allowed. By default, a
744 * RAM remap will map the data as encrypted. Determine if a RAM remap should
745 * not be done so that the data will be mapped decrypted.
747 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
750 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
753 if (flags & MEMREMAP_ENC)
756 if (flags & MEMREMAP_DEC)
759 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
760 if (memremap_is_setup_data(phys_addr, size) ||
761 memremap_is_efi_data(phys_addr, size))
765 return !memremap_should_map_decrypted(phys_addr, size);
769 * Architecture override of __weak function to adjust the protection attributes
770 * used when remapping memory. By default, early_memremap() will map the data
771 * as encrypted. Determine if an encrypted mapping should not be done and set
772 * the appropriate protection attributes.
774 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
780 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
783 encrypted_prot = true;
785 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
786 if (early_memremap_is_setup_data(phys_addr, size) ||
787 memremap_is_efi_data(phys_addr, size))
788 encrypted_prot = false;
791 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
792 encrypted_prot = false;
794 return encrypted_prot ? pgprot_encrypted(prot)
795 : pgprot_decrypted(prot);
798 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
800 return arch_memremap_can_ram_remap(phys_addr, size, 0);
803 /* Remap memory with encryption */
804 void __init *early_memremap_encrypted(resource_size_t phys_addr,
807 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
811 * Remap memory with encryption and write-protected - cannot be called
812 * before pat_init() is called
814 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
817 if (!x86_has_pat_wp())
819 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
822 /* Remap memory without encryption */
823 void __init *early_memremap_decrypted(resource_size_t phys_addr,
826 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
830 * Remap memory without encryption and write-protected - cannot be called
831 * before pat_init() is called
833 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
836 if (!x86_has_pat_wp())
838 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
840 #endif /* CONFIG_AMD_MEM_ENCRYPT */
842 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
844 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
846 /* Don't assume we're using swapper_pg_dir at this point */
847 pgd_t *base = __va(read_cr3_pa());
848 pgd_t *pgd = &base[pgd_index(addr)];
849 p4d_t *p4d = p4d_offset(pgd, addr);
850 pud_t *pud = pud_offset(p4d, addr);
851 pmd_t *pmd = pmd_offset(pud, addr);
856 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
858 return &bm_pte[pte_index(addr)];
861 bool __init is_early_ioremap_ptep(pte_t *ptep)
863 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
866 void __init early_ioremap_init(void)
871 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
873 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
876 early_ioremap_setup();
878 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
879 memset(bm_pte, 0, sizeof(bm_pte));
880 pmd_populate_kernel(&init_mm, pmd, bm_pte);
883 * The boot-ioremap range spans multiple pmds, for which
884 * we are not prepared:
886 #define __FIXADDR_TOP (-PAGE_SIZE)
887 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
888 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
890 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
892 printk(KERN_WARNING "pmd %p != %p\n",
893 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
894 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
895 fix_to_virt(FIX_BTMAP_BEGIN));
896 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
897 fix_to_virt(FIX_BTMAP_END));
899 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
900 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
905 void __init __early_set_fixmap(enum fixed_addresses idx,
906 phys_addr_t phys, pgprot_t flags)
908 unsigned long addr = __fix_to_virt(idx);
911 if (idx >= __end_of_fixed_addresses) {
915 pte = early_ioremap_pte(addr);
917 /* Sanitize 'prot' against any unsupported bits: */
918 pgprot_val(flags) &= __supported_pte_mask;
920 if (pgprot_val(flags))
921 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
923 pte_clear(&init_mm, addr, pte);
924 flush_tlb_one_kernel(addr);