2 * Copyright IBM Corp. 2008
4 * Guest page hinting for unused pages.
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
13 #include <linux/memblock.h>
14 #include <linux/gfp.h>
15 #include <linux/init.h>
16 #include <asm/facility.h>
17 #include <asm/page-states.h>
19 static int cmma_flag = 1;
21 static int __init cmma(char *str)
26 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
31 if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
35 __setup("cmma=", cmma);
37 static inline int cmma_test_essa(void)
39 register unsigned long tmp asm("0") = 0;
40 register int rc asm("1");
42 /* test ESSA_GET_STATE */
44 " .insn rrf,0xb9ab0000,%1,%1,%2,0\n"
48 : "=&d" (rc), "+&d" (tmp)
49 : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
53 void __init cmma_init(void)
57 if (cmma_test_essa()) {
61 if (test_facility(147))
65 static inline unsigned char get_page_state(struct page *page)
69 asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0"
71 : "a" (page_to_phys(page)),
72 "i" (ESSA_GET_STATE));
76 static inline void set_page_unused(struct page *page, int order)
80 for (i = 0; i < (1 << order); i++)
81 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
83 : "a" (page_to_phys(page + i)),
84 "i" (ESSA_SET_UNUSED));
87 static inline void set_page_stable_dat(struct page *page, int order)
91 for (i = 0; i < (1 << order); i++)
92 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
94 : "a" (page_to_phys(page + i)),
95 "i" (ESSA_SET_STABLE));
98 static inline void set_page_stable_nodat(struct page *page, int order)
102 for (i = 0; i < (1 << order); i++)
103 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
105 : "a" (page_to_phys(page + i)),
106 "i" (ESSA_SET_STABLE_NODAT));
109 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
115 pmd = pmd_offset(pud, addr);
117 next = pmd_addr_end(addr, end);
118 if (pmd_none(*pmd) || pmd_large(*pmd))
120 page = virt_to_page(pmd_val(*pmd));
121 set_bit(PG_arch_1, &page->flags);
122 } while (pmd++, addr = next, addr != end);
125 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
132 pud = pud_offset(p4d, addr);
134 next = pud_addr_end(addr, end);
135 if (pud_none(*pud) || pud_large(*pud))
137 if (!pud_folded(*pud)) {
138 page = virt_to_page(pud_val(*pud));
139 for (i = 0; i < 3; i++)
140 set_bit(PG_arch_1, &page[i].flags);
142 mark_kernel_pmd(pud, addr, next);
143 } while (pud++, addr = next, addr != end);
146 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
153 p4d = p4d_offset(pgd, addr);
155 next = p4d_addr_end(addr, end);
158 if (!p4d_folded(*p4d)) {
159 page = virt_to_page(p4d_val(*p4d));
160 for (i = 0; i < 3; i++)
161 set_bit(PG_arch_1, &page[i].flags);
163 mark_kernel_pud(p4d, addr, next);
164 } while (p4d++, addr = next, addr != end);
167 static void mark_kernel_pgd(void)
169 unsigned long addr, next;
175 pgd = pgd_offset_k(addr);
177 next = pgd_addr_end(addr, MODULES_END);
180 if (!pgd_folded(*pgd)) {
181 page = virt_to_page(pgd_val(*pgd));
182 for (i = 0; i < 3; i++)
183 set_bit(PG_arch_1, &page[i].flags);
185 mark_kernel_p4d(pgd, addr, next);
186 } while (pgd++, addr = next, addr != MODULES_END);
189 void __init cmma_init_nodat(void)
191 struct memblock_region *reg;
193 unsigned long start, end, ix;
197 /* Mark pages used in kernel page tables */
200 /* Set all kernel pages not used for page tables to stable/no-dat */
201 for_each_memblock(memory, reg) {
202 start = memblock_region_memory_base_pfn(reg);
203 end = memblock_region_memory_end_pfn(reg);
204 page = pfn_to_page(start);
205 for (ix = start; ix < end; ix++, page++) {
206 if (__test_and_clear_bit(PG_arch_1, &page->flags))
207 continue; /* skip page table pages */
208 if (!list_empty(&page->lru))
209 continue; /* skip free pages */
210 set_page_stable_nodat(page, 0);
215 void arch_free_page(struct page *page, int order)
219 set_page_unused(page, order);
222 void arch_alloc_page(struct page *page, int order)
227 set_page_stable_dat(page, order);
229 set_page_stable_nodat(page, order);
232 void arch_set_page_dat(struct page *page, int order)
236 set_page_stable_dat(page, order);
239 void arch_set_page_nodat(struct page *page, int order)
243 set_page_stable_nodat(page, order);
246 int arch_test_page_nodat(struct page *page)
252 state = get_page_state(page);
253 return !!(state & 0x20);
256 void arch_set_page_states(int make_stable)
258 unsigned long flags, order, t;
266 drain_local_pages(NULL);
267 for_each_populated_zone(zone) {
268 spin_lock_irqsave(&zone->lock, flags);
269 for_each_migratetype_order(order, t) {
270 list_for_each(l, &zone->free_area[order].free_list[t]) {
271 page = list_entry(l, struct page, lru);
273 set_page_stable_dat(page, 0);
275 set_page_unused(page, order);
278 spin_unlock_irqrestore(&zone->lock, flags);