1 // SPDX-License-Identifier: GPL-2.0-only
3 * Page Attribute Table (PAT) support: handle memory caching attributes in page tables.
5 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * Suresh B Siddha <suresh.b.siddha@intel.com>
8 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
12 * PAT is a CPU feature supported by all modern x86 CPUs, to allow the firmware and
13 * the kernel to set one of a handful of 'caching type' attributes for physical
14 * memory ranges: uncached, write-combining, write-through, write-protected,
15 * and the most commonly used and default attribute: write-back caching.
17 * PAT support supercedes and augments MTRR support in a compatible fashion: MTRR is
18 * a hardware interface to enumerate a limited number of physical memory ranges
19 * and set their caching attributes explicitly, programmed into the CPU via MSRs.
20 * Even modern CPUs have MTRRs enabled - but these are typically not touched
21 * by the kernel or by user-space (such as the X server), we rely on PAT for any
22 * additional cache attribute logic.
24 * PAT doesn't work via explicit memory ranges, but uses page table entries to add
25 * cache attribute information to the mapped memory range: there's 3 bits used,
26 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT), with the 8 possible values mapped by the
27 * CPU to actual cache attributes via an MSR loaded into the CPU (MSR_IA32_CR_PAT).
29 * ( There's a metric ton of finer details, such as compatibility with CPU quirks
30 * that only support 4 types of PAT entries, and interaction with MTRRs, see
31 * below for details. )
34 #include <linux/seq_file.h>
35 #include <linux/memblock.h>
36 #include <linux/debugfs.h>
37 #include <linux/ioport.h>
38 #include <linux/kernel.h>
39 #include <linux/pfn_t.h>
40 #include <linux/slab.h>
43 #include <linux/rbtree.h>
45 #include <asm/cacheflush.h>
46 #include <asm/processor.h>
47 #include <asm/tlbflush.h>
48 #include <asm/x86_init.h>
49 #include <asm/pgtable.h>
50 #include <asm/fcntl.h>
51 #include <asm/e820/api.h>
58 #include "pat_internal.h"
59 #include "mm_internal.h"
62 #define pr_fmt(fmt) "" fmt
64 static bool __read_mostly boot_cpu_done;
65 static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
66 static bool __read_mostly pat_initialized;
67 static bool __read_mostly init_cm_done;
70 * PAT support is enabled by default, but can be disabled for
71 * various user-requested or hardware-forced reasons:
73 void pat_disable(const char *msg_reason)
79 WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
84 pr_info("x86/PAT: %s\n", msg_reason);
87 static int __init nopat(char *str)
89 pat_disable("PAT support disabled via boot option.");
92 early_param("nopat", nopat);
94 bool pat_enabled(void)
96 return pat_initialized;
98 EXPORT_SYMBOL_GPL(pat_enabled);
100 int pat_debug_enable;
102 static int __init pat_debug_setup(char *str)
104 pat_debug_enable = 1;
107 __setup("debugpat", pat_debug_setup);
109 #ifdef CONFIG_X86_PAT
111 * X86 PAT uses page flags arch_1 and uncached together to keep track of
112 * memory type of pages that have backing page struct.
114 * X86 PAT supports 4 different memory types:
115 * - _PAGE_CACHE_MODE_WB
116 * - _PAGE_CACHE_MODE_WC
117 * - _PAGE_CACHE_MODE_UC_MINUS
118 * - _PAGE_CACHE_MODE_WT
120 * _PAGE_CACHE_MODE_WB is the default type.
124 #define _PGMT_WC (1UL << PG_arch_1)
125 #define _PGMT_UC_MINUS (1UL << PG_uncached)
126 #define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1)
127 #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
128 #define _PGMT_CLEAR_MASK (~_PGMT_MASK)
130 static inline enum page_cache_mode get_page_memtype(struct page *pg)
132 unsigned long pg_flags = pg->flags & _PGMT_MASK;
134 if (pg_flags == _PGMT_WB)
135 return _PAGE_CACHE_MODE_WB;
136 else if (pg_flags == _PGMT_WC)
137 return _PAGE_CACHE_MODE_WC;
138 else if (pg_flags == _PGMT_UC_MINUS)
139 return _PAGE_CACHE_MODE_UC_MINUS;
141 return _PAGE_CACHE_MODE_WT;
144 static inline void set_page_memtype(struct page *pg,
145 enum page_cache_mode memtype)
147 unsigned long memtype_flags;
148 unsigned long old_flags;
149 unsigned long new_flags;
152 case _PAGE_CACHE_MODE_WC:
153 memtype_flags = _PGMT_WC;
155 case _PAGE_CACHE_MODE_UC_MINUS:
156 memtype_flags = _PGMT_UC_MINUS;
158 case _PAGE_CACHE_MODE_WT:
159 memtype_flags = _PGMT_WT;
161 case _PAGE_CACHE_MODE_WB:
163 memtype_flags = _PGMT_WB;
168 old_flags = pg->flags;
169 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
170 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
173 static inline enum page_cache_mode get_page_memtype(struct page *pg)
177 static inline void set_page_memtype(struct page *pg,
178 enum page_cache_mode memtype)
184 PAT_UC = 0, /* uncached */
185 PAT_WC = 1, /* Write combining */
186 PAT_WT = 4, /* Write Through */
187 PAT_WP = 5, /* Write Protected */
188 PAT_WB = 6, /* Write Back (default) */
189 PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */
192 #define CM(c) (_PAGE_CACHE_MODE_ ## c)
194 static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
196 enum page_cache_mode cache;
200 case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
201 case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
202 case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
203 case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
204 case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
205 case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
206 default: cache = CM(WB); cache_mode = "WB "; break;
209 memcpy(msg, cache_mode, 4);
217 * Update the cache mode to pgprot translation tables according to PAT
219 * Using lower indices is preferred, so we start with highest index.
221 static void __init_cache_modes(u64 pat)
223 enum page_cache_mode cache;
228 for (i = 7; i >= 0; i--) {
229 cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
231 update_cache_mode_entry(i, cache);
233 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
238 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
240 static void pat_bsp_init(u64 pat)
244 if (!boot_cpu_has(X86_FEATURE_PAT)) {
245 pat_disable("PAT not supported by the CPU.");
249 rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
251 pat_disable("PAT support disabled by the firmware.");
255 wrmsrl(MSR_IA32_CR_PAT, pat);
256 pat_initialized = true;
258 __init_cache_modes(pat);
261 static void pat_ap_init(u64 pat)
263 if (!boot_cpu_has(X86_FEATURE_PAT)) {
265 * If this happens we are on a secondary CPU, but switched to
266 * PAT on the boot CPU. We have no way to undo PAT.
268 panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
271 wrmsrl(MSR_IA32_CR_PAT, pat);
274 void init_cache_modes(void)
281 if (boot_cpu_has(X86_FEATURE_PAT)) {
283 * CPU supports PAT. Set PAT table to be consistent with
284 * PAT MSR. This case supports "nopat" boot option, and
285 * virtual machine environments which support PAT without
286 * MTRRs. In specific, Xen has unique setup to PAT MSR.
288 * If PAT MSR returns 0, it is considered invalid and emulates
291 rdmsrl(MSR_IA32_CR_PAT, pat);
296 * No PAT. Emulate the PAT table that corresponds to the two
297 * cache bits, PWT (Write Through) and PCD (Cache Disable).
298 * This setup is also the same as the BIOS default setup.
305 * 00 0 WB : _PAGE_CACHE_MODE_WB
306 * 01 1 WT : _PAGE_CACHE_MODE_WT
307 * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
308 * 11 3 UC : _PAGE_CACHE_MODE_UC
310 * NOTE: When WC or WP is used, it is redirected to UC- per
311 * the default setup in __cachemode2pte_tbl[].
313 pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
314 PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
317 __init_cache_modes(pat);
321 * pat_init - Initialize the PAT MSR and PAT table on the current CPU
323 * This function initializes PAT MSR and PAT table with an OS-defined value
324 * to enable additional cache attributes, WC, WT and WP.
326 * This function must be called on all CPUs using the specific sequence of
327 * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
333 struct cpuinfo_x86 *c = &boot_cpu_data;
335 #ifndef CONFIG_X86_PAT
336 pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n");
342 if ((c->x86_vendor == X86_VENDOR_INTEL) &&
343 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
344 ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
346 * PAT support with the lower four entries. Intel Pentium 2,
347 * 3, M, and 4 are affected by PAT errata, which makes the
348 * upper four entries unusable. To be on the safe side, we don't
356 * 000 0 WB : _PAGE_CACHE_MODE_WB
357 * 001 1 WC : _PAGE_CACHE_MODE_WC
358 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
359 * 011 3 UC : _PAGE_CACHE_MODE_UC
362 * NOTE: When WT or WP is used, it is redirected to UC- per
363 * the default setup in __cachemode2pte_tbl[].
365 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
366 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
369 * Full PAT support. We put WT in slot 7 to improve
370 * robustness in the presence of errata that might cause
371 * the high PAT bit to be ignored. This way, a buggy slot 7
372 * access will hit slot 3, and slot 3 is UC, so at worst
373 * we lose performance without causing a correctness issue.
374 * Pentium 4 erratum N46 is an example for such an erratum,
375 * although we try not to use PAT at all on affected CPUs.
382 * 000 0 WB : _PAGE_CACHE_MODE_WB
383 * 001 1 WC : _PAGE_CACHE_MODE_WC
384 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
385 * 011 3 UC : _PAGE_CACHE_MODE_UC
386 * 100 4 WB : Reserved
387 * 101 5 WP : _PAGE_CACHE_MODE_WP
388 * 110 6 UC-: Reserved
389 * 111 7 WT : _PAGE_CACHE_MODE_WT
391 * The reserved slots are unused, but mapped to their
392 * corresponding types in the presence of PAT errata.
394 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
395 PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT);
398 if (!boot_cpu_done) {
400 boot_cpu_done = true;
408 static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
411 * Does intersection of PAT memory type and MTRR memory type and returns
412 * the resulting memory type as PAT understands it.
413 * (Type in pat and mtrr will not have same value)
414 * The intersection is based on "Effective Memory Type" tables in IA-32
417 static unsigned long pat_x_mtrr_type(u64 start, u64 end,
418 enum page_cache_mode req_type)
421 * Look for MTRR hint to get the effective type in case where PAT
424 if (req_type == _PAGE_CACHE_MODE_WB) {
425 u8 mtrr_type, uniform;
427 mtrr_type = mtrr_type_lookup(start, end, &uniform);
428 if (mtrr_type != MTRR_TYPE_WRBACK)
429 return _PAGE_CACHE_MODE_UC_MINUS;
431 return _PAGE_CACHE_MODE_WB;
437 struct pagerange_state {
438 unsigned long cur_pfn;
444 pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
446 struct pagerange_state *state = arg;
448 state->not_ram |= initial_pfn > state->cur_pfn;
449 state->ram |= total_nr_pages > 0;
450 state->cur_pfn = initial_pfn + total_nr_pages;
452 return state->ram && state->not_ram;
455 static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
458 unsigned long start_pfn = start >> PAGE_SHIFT;
459 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
460 struct pagerange_state state = {start_pfn, 0, 0};
463 * For legacy reasons, physical address range in the legacy ISA
464 * region is tracked as non-RAM. This will allow users of
465 * /dev/mem to map portions of legacy ISA region, even when
466 * some of those portions are listed(or not even listed) with
467 * different e820 types(RAM/reserved/..)
469 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
470 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
472 if (start_pfn < end_pfn) {
473 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
474 &state, pagerange_is_ram_callback);
477 return (ret > 0) ? -1 : (state.ram ? 1 : 0);
481 * For RAM pages, we use page flags to mark the pages with appropriate type.
482 * The page flags are limited to four types, WB (default), WC, WT and UC-.
483 * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting
484 * a new memory type is only allowed for a page mapped with the default WB
487 * Here we do two passes:
488 * - Find the memtype of all the pages in the range, look for any conflicts.
489 * - In case of no conflicts, set the new memtype for pages in the range.
491 static int reserve_ram_pages_type(u64 start, u64 end,
492 enum page_cache_mode req_type,
493 enum page_cache_mode *new_type)
498 if (req_type == _PAGE_CACHE_MODE_WP) {
500 *new_type = _PAGE_CACHE_MODE_UC_MINUS;
504 if (req_type == _PAGE_CACHE_MODE_UC) {
505 /* We do not support strong UC */
507 req_type = _PAGE_CACHE_MODE_UC_MINUS;
510 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
511 enum page_cache_mode type;
513 page = pfn_to_page(pfn);
514 type = get_page_memtype(page);
515 if (type != _PAGE_CACHE_MODE_WB) {
516 pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
517 start, end - 1, type, req_type);
526 *new_type = req_type;
528 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
529 page = pfn_to_page(pfn);
530 set_page_memtype(page, req_type);
535 static int free_ram_pages_type(u64 start, u64 end)
540 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
541 page = pfn_to_page(pfn);
542 set_page_memtype(page, _PAGE_CACHE_MODE_WB);
547 static u64 sanitize_phys(u64 address)
550 * When changing the memtype for pages containing poison allow
551 * for a "decoy" virtual address (bit 63 clear) passed to
552 * set_memory_X(). __pa() on a "decoy" address results in a
553 * physical address with bit 63 set.
555 * Decoy addresses are not present for 32-bit builds, see
558 if (IS_ENABLED(CONFIG_X86_64))
559 return address & __PHYSICAL_MASK;
564 * req_type typically has one of the:
565 * - _PAGE_CACHE_MODE_WB
566 * - _PAGE_CACHE_MODE_WC
567 * - _PAGE_CACHE_MODE_UC_MINUS
568 * - _PAGE_CACHE_MODE_UC
569 * - _PAGE_CACHE_MODE_WT
571 * If new_type is NULL, function will return an error if it cannot reserve the
572 * region with req_type. If new_type is non-NULL, function will return
573 * available type in new_type in case of no error. In case of any error
574 * it will return a negative return value.
576 int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
577 enum page_cache_mode *new_type)
580 enum page_cache_mode actual_type;
584 start = sanitize_phys(start);
585 end = sanitize_phys(end);
587 WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
588 start, end - 1, cattr_name(req_type));
592 if (!pat_enabled()) {
593 /* This is identical to page table setting without PAT */
595 *new_type = req_type;
599 /* Low ISA region is always mapped WB in page table. No need to track */
600 if (x86_platform.is_untracked_pat_range(start, end)) {
602 *new_type = _PAGE_CACHE_MODE_WB;
607 * Call mtrr_lookup to get the type hint. This is an
608 * optimization for /dev/mem mmap'ers into WB memory (BIOS
609 * tools and ACPI tools). Use WB request for WB memory and use
610 * UC_MINUS otherwise.
612 actual_type = pat_x_mtrr_type(start, end, req_type);
615 *new_type = actual_type;
617 is_range_ram = pat_pagerange_is_ram(start, end);
618 if (is_range_ram == 1) {
620 err = reserve_ram_pages_type(start, end, req_type, new_type);
623 } else if (is_range_ram < 0) {
627 new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
633 new->type = actual_type;
635 spin_lock(&memtype_lock);
637 err = memtype_check_insert(new, new_type);
639 pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
641 cattr_name(new->type), cattr_name(req_type));
643 spin_unlock(&memtype_lock);
648 spin_unlock(&memtype_lock);
650 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
651 start, end - 1, cattr_name(new->type), cattr_name(req_type),
652 new_type ? cattr_name(*new_type) : "-");
657 int free_memtype(u64 start, u64 end)
661 struct memtype *entry;
666 start = sanitize_phys(start);
667 end = sanitize_phys(end);
669 /* Low ISA region is always mapped WB. No need to track */
670 if (x86_platform.is_untracked_pat_range(start, end))
673 is_range_ram = pat_pagerange_is_ram(start, end);
674 if (is_range_ram == 1) {
676 err = free_ram_pages_type(start, end);
679 } else if (is_range_ram < 0) {
683 spin_lock(&memtype_lock);
684 entry = memtype_erase(start, end);
685 spin_unlock(&memtype_lock);
688 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
689 current->comm, current->pid, start, end - 1);
695 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
702 * lookup_memtype - Looksup the memory type for a physical address
703 * @paddr: physical address of which memory type needs to be looked up
705 * Only to be called when PAT is enabled
707 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
708 * or _PAGE_CACHE_MODE_WT.
710 static enum page_cache_mode lookup_memtype(u64 paddr)
712 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
713 struct memtype *entry;
715 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
718 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
721 page = pfn_to_page(paddr >> PAGE_SHIFT);
722 return get_page_memtype(page);
725 spin_lock(&memtype_lock);
727 entry = memtype_lookup(paddr);
729 rettype = entry->type;
731 rettype = _PAGE_CACHE_MODE_UC_MINUS;
733 spin_unlock(&memtype_lock);
738 * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type
739 * of @pfn cannot be overridden by UC MTRR memory type.
741 * Only to be called when PAT is enabled.
743 * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC.
744 * Returns false in other cases.
746 bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn)
748 enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn));
750 return cm == _PAGE_CACHE_MODE_UC ||
751 cm == _PAGE_CACHE_MODE_UC_MINUS ||
752 cm == _PAGE_CACHE_MODE_WC;
754 EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr);
757 * io_reserve_memtype - Request a memory type mapping for a region of memory
758 * @start: start (physical address) of the region
759 * @end: end (physical address) of the region
760 * @type: A pointer to memtype, with requested type. On success, requested
761 * or any other compatible type that was available for the region is returned
763 * On success, returns 0
764 * On failure, returns non-zero
766 int io_reserve_memtype(resource_size_t start, resource_size_t end,
767 enum page_cache_mode *type)
769 resource_size_t size = end - start;
770 enum page_cache_mode req_type = *type;
771 enum page_cache_mode new_type;
774 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
776 ret = reserve_memtype(start, end, req_type, &new_type);
780 if (!is_new_memtype_allowed(start, size, req_type, new_type))
783 if (kernel_map_sync_memtype(start, size, new_type) < 0)
790 free_memtype(start, end);
797 * io_free_memtype - Release a memory type mapping for a region of memory
798 * @start: start (physical address) of the region
799 * @end: end (physical address) of the region
801 void io_free_memtype(resource_size_t start, resource_size_t end)
803 free_memtype(start, end);
806 int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
808 enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
810 return io_reserve_memtype(start, start + size, &type);
812 EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
814 void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
816 io_free_memtype(start, start + size);
818 EXPORT_SYMBOL(arch_io_free_memtype_wc);
820 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
821 unsigned long size, pgprot_t vma_prot)
823 if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size))
824 vma_prot = pgprot_decrypted(vma_prot);
829 #ifdef CONFIG_STRICT_DEVMEM
830 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
831 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
836 /* This check is needed to avoid cache aliasing when PAT is enabled */
837 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
839 u64 from = ((u64)pfn) << PAGE_SHIFT;
840 u64 to = from + size;
846 while (cursor < to) {
847 if (!devmem_is_allowed(pfn))
854 #endif /* CONFIG_STRICT_DEVMEM */
856 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
857 unsigned long size, pgprot_t *vma_prot)
859 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
861 if (!range_is_allowed(pfn, size))
864 if (file->f_flags & O_DSYNC)
865 pcm = _PAGE_CACHE_MODE_UC_MINUS;
867 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
868 cachemode2protval(pcm));
873 * Change the memory type for the physical address range in kernel identity
874 * mapping space if that range is a part of identity map.
876 int kernel_map_sync_memtype(u64 base, unsigned long size,
877 enum page_cache_mode pcm)
881 if (base > __pa(high_memory-1))
885 * Some areas in the middle of the kernel identity range
886 * are not mapped, for example the PCI space.
888 if (!page_is_ram(base >> PAGE_SHIFT))
891 id_sz = (__pa(high_memory-1) <= base + size) ?
892 __pa(high_memory) - base : size;
894 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
895 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
896 current->comm, current->pid,
898 base, (unsigned long long)(base + size-1));
905 * Internal interface to reserve a range of physical memory with prot.
906 * Reserved non RAM regions only and after successful reserve_memtype,
907 * this func also keeps identity mapping (if any) in sync with this new prot.
909 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
914 enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
915 enum page_cache_mode pcm = want_pcm;
917 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
920 * reserve_pfn_range() for RAM pages. We do not refcount to keep
921 * track of number of mappings of RAM pages. We can assert that
922 * the type requested matches the type of first page in the range.
928 pcm = lookup_memtype(paddr);
929 if (want_pcm != pcm) {
930 pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
931 current->comm, current->pid,
932 cattr_name(want_pcm),
933 (unsigned long long)paddr,
934 (unsigned long long)(paddr + size - 1),
936 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
937 (~_PAGE_CACHE_MASK)) |
938 cachemode2protval(pcm));
943 ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
947 if (pcm != want_pcm) {
949 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
950 free_memtype(paddr, paddr + size);
951 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
952 current->comm, current->pid,
953 cattr_name(want_pcm),
954 (unsigned long long)paddr,
955 (unsigned long long)(paddr + size - 1),
960 * We allow returning different type than the one requested in
963 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
964 (~_PAGE_CACHE_MASK)) |
965 cachemode2protval(pcm));
968 if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
969 free_memtype(paddr, paddr + size);
976 * Internal interface to free a range of physical memory.
977 * Frees non RAM regions only.
979 static void free_pfn_range(u64 paddr, unsigned long size)
983 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
985 free_memtype(paddr, paddr + size);
989 * track_pfn_copy is called when vma that is covering the pfnmap gets
990 * copied through copy_page_range().
992 * If the vma has a linear pfn mapping for the entire range, we get the prot
993 * from pte and reserve the entire vma range with single reserve_pfn_range call.
995 int track_pfn_copy(struct vm_area_struct *vma)
997 resource_size_t paddr;
999 unsigned long vma_size = vma->vm_end - vma->vm_start;
1002 if (vma->vm_flags & VM_PAT) {
1004 * reserve the whole chunk covered by vma. We need the
1005 * starting address and protection from pte.
1007 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
1011 pgprot = __pgprot(prot);
1012 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
1019 * prot is passed in as a parameter for the new mapping. If the vma has
1020 * a linear pfn mapping for the entire range, or no vma is provided,
1021 * reserve the entire pfn + size range with single reserve_pfn_range
1024 int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
1025 unsigned long pfn, unsigned long addr, unsigned long size)
1027 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
1028 enum page_cache_mode pcm;
1030 /* reserve the whole chunk starting from paddr */
1031 if (!vma || (addr == vma->vm_start
1032 && size == (vma->vm_end - vma->vm_start))) {
1035 ret = reserve_pfn_range(paddr, size, prot, 0);
1036 if (ret == 0 && vma)
1037 vma->vm_flags |= VM_PAT;
1045 * For anything smaller than the vma size we set prot based on the
1048 pcm = lookup_memtype(paddr);
1050 /* Check memtype for the remaining pages */
1051 while (size > PAGE_SIZE) {
1054 if (pcm != lookup_memtype(paddr))
1058 *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
1059 cachemode2protval(pcm));
1064 void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
1066 enum page_cache_mode pcm;
1071 /* Set prot based on lookup */
1072 pcm = lookup_memtype(pfn_t_to_phys(pfn));
1073 *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
1074 cachemode2protval(pcm));
1078 * untrack_pfn is called while unmapping a pfnmap for a region.
1079 * untrack can be called for a specific region indicated by pfn and size or
1080 * can be for the entire vma (in which case pfn, size are zero).
1082 void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
1085 resource_size_t paddr;
1088 if (vma && !(vma->vm_flags & VM_PAT))
1091 /* free the chunk starting from pfn or the whole chunk */
1092 paddr = (resource_size_t)pfn << PAGE_SHIFT;
1093 if (!paddr && !size) {
1094 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
1099 size = vma->vm_end - vma->vm_start;
1101 free_pfn_range(paddr, size);
1103 vma->vm_flags &= ~VM_PAT;
1107 * untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
1108 * with the old vma after its pfnmap page table has been removed. The new
1109 * vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
1111 void untrack_pfn_moved(struct vm_area_struct *vma)
1113 vma->vm_flags &= ~VM_PAT;
1116 pgprot_t pgprot_writecombine(pgprot_t prot)
1118 return __pgprot(pgprot_val(prot) |
1119 cachemode2protval(_PAGE_CACHE_MODE_WC));
1121 EXPORT_SYMBOL_GPL(pgprot_writecombine);
1123 pgprot_t pgprot_writethrough(pgprot_t prot)
1125 return __pgprot(pgprot_val(prot) |
1126 cachemode2protval(_PAGE_CACHE_MODE_WT));
1128 EXPORT_SYMBOL_GPL(pgprot_writethrough);
1130 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
1133 * We are allocating a temporary printout-entry to be passed
1134 * between seq_start()/next() and seq_show():
1136 static struct memtype *memtype_get_idx(loff_t pos)
1138 struct memtype *print_entry;
1141 print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
1145 spin_lock(&memtype_lock);
1146 ret = memtype_copy_nth_element(print_entry, pos);
1147 spin_unlock(&memtype_lock);
1149 /* Free it on error: */
1158 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
1162 seq_puts(seq, "PAT memtype list:\n");
1165 return memtype_get_idx(*pos);
1168 static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1171 return memtype_get_idx(*pos);
1174 static void memtype_seq_stop(struct seq_file *seq, void *v)
1178 static int memtype_seq_show(struct seq_file *seq, void *v)
1180 struct memtype *print_entry = (struct memtype *)v;
1182 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n",
1183 cattr_name(print_entry->type),
1192 static const struct seq_operations memtype_seq_ops = {
1193 .start = memtype_seq_start,
1194 .next = memtype_seq_next,
1195 .stop = memtype_seq_stop,
1196 .show = memtype_seq_show,
1199 static int memtype_seq_open(struct inode *inode, struct file *file)
1201 return seq_open(file, &memtype_seq_ops);
1204 static const struct file_operations memtype_fops = {
1205 .open = memtype_seq_open,
1207 .llseek = seq_lseek,
1208 .release = seq_release,
1211 static int __init pat_memtype_list_init(void)
1213 if (pat_enabled()) {
1214 debugfs_create_file("pat_memtype_list", S_IRUSR,
1215 arch_debugfs_dir, NULL, &memtype_fops);
1219 late_initcall(pat_memtype_list_init);
1221 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */