2 * linux/arch/i386/mm/pgtable.c
5 #include <linux/sched.h>
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
19 #include <asm/system.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
22 #include <asm/fixmap.h>
25 #include <asm/tlbflush.h>
29 int total = 0, reserved = 0;
30 int shared = 0, cached = 0;
37 printk(KERN_INFO "Mem-info:\n");
39 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
40 for_each_online_pgdat(pgdat) {
41 pgdat_resize_lock(pgdat, &flags);
42 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
43 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
45 page = pgdat_page_nr(pgdat, i);
47 if (PageHighMem(page))
49 if (PageReserved(page))
51 else if (PageSwapCache(page))
53 else if (page_count(page))
54 shared += page_count(page) - 1;
56 pgdat_resize_unlock(pgdat, &flags);
58 printk(KERN_INFO "%d pages of RAM\n", total);
59 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
60 printk(KERN_INFO "%d reserved pages\n", reserved);
61 printk(KERN_INFO "%d pages shared\n", shared);
62 printk(KERN_INFO "%d pages swap cached\n", cached);
64 printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
65 printk(KERN_INFO "%lu pages writeback\n",
66 global_page_state(NR_WRITEBACK));
67 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
68 printk(KERN_INFO "%lu pages slab\n",
69 global_page_state(NR_SLAB_RECLAIMABLE) +
70 global_page_state(NR_SLAB_UNRECLAIMABLE));
71 printk(KERN_INFO "%lu pages pagetables\n",
72 global_page_state(NR_PAGETABLE));
76 * Associate a virtual page frame with a given physical page frame
77 * and protection flags for that frame.
79 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
86 pgd = swapper_pg_dir + pgd_index(vaddr);
91 pud = pud_offset(pgd, vaddr);
96 pmd = pmd_offset(pud, vaddr);
101 pte = pte_offset_kernel(pmd, vaddr);
102 if (pgprot_val(flags))
103 set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
105 pte_clear(&init_mm, vaddr, pte);
108 * It's enough to flush this one mapping.
109 * (PGE mappings get flushed as well)
111 __flush_tlb_one(vaddr);
115 * Associate a large virtual page frame with a given physical page frame
116 * and protection flags for that frame. pfn is for the base of the page,
117 * vaddr is what the page gets mapped to - both must be properly aligned.
118 * The pmd must already be instantiated. Assumes PAE mode.
120 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
126 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
127 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
130 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
131 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
134 pgd = swapper_pg_dir + pgd_index(vaddr);
135 if (pgd_none(*pgd)) {
136 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
139 pud = pud_offset(pgd, vaddr);
140 pmd = pmd_offset(pud, vaddr);
141 set_pmd(pmd, pfn_pmd(pfn, flags));
143 * It's enough to flush this one mapping.
144 * (PGE mappings get flushed as well)
146 __flush_tlb_one(vaddr);
150 unsigned long __FIXADDR_TOP = 0xfffff000;
151 EXPORT_SYMBOL(__FIXADDR_TOP);
153 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
155 unsigned long address = __fix_to_virt(idx);
157 if (idx >= __end_of_fixed_addresses) {
161 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
166 * reserve_top_address - reserves a hole in the top of kernel address space
167 * @reserve - size of hole to reserve
169 * Can be used to relocate the fixmap area and poke a hole in the top
170 * of kernel address space to make room for a hypervisor.
172 void reserve_top_address(unsigned long reserve)
175 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
177 __FIXADDR_TOP = -reserve - PAGE_SIZE;
178 __VMALLOC_RESERVE += reserve;
181 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
183 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
186 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
190 #ifdef CONFIG_HIGHPTE
191 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
193 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
199 * List of all pgd's needed for non-PAE so it can invalidate entries
200 * in both cached and uncached pgd's; not needed for PAE since the
201 * kernel pmd is shared. If PAE were not to share the pmd a similar
202 * tactic would be needed. This is essentially codepath-based locking
203 * against pageattr.c; it is the unique case in which a valid change
204 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
205 * vmalloc faults work because attached pagetables are never freed.
208 static inline void pgd_list_add(pgd_t *pgd)
210 struct page *page = virt_to_page(pgd);
212 list_add(&page->lru, &pgd_list);
215 static inline void pgd_list_del(pgd_t *pgd)
217 struct page *page = virt_to_page(pgd);
219 list_del(&page->lru);
224 #if (PTRS_PER_PMD == 1)
225 /* Non-PAE pgd constructor */
226 static void pgd_ctor(void *pgd)
230 /* !PAE, no pagetable sharing */
231 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
233 spin_lock_irqsave(&pgd_lock, flags);
235 /* must happen under lock */
236 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
237 swapper_pg_dir + USER_PTRS_PER_PGD,
239 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
240 __pa(swapper_pg_dir) >> PAGE_SHIFT,
244 spin_unlock_irqrestore(&pgd_lock, flags);
246 #else /* PTRS_PER_PMD > 1 */
247 /* PAE pgd constructor */
248 static void pgd_ctor(void *pgd)
250 /* PAE, kernel PMD may be shared */
252 if (SHARED_KERNEL_PMD) {
253 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
254 swapper_pg_dir + USER_PTRS_PER_PGD,
259 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
260 spin_lock_irqsave(&pgd_lock, flags);
262 spin_unlock_irqrestore(&pgd_lock, flags);
265 #endif /* PTRS_PER_PMD */
267 static void pgd_dtor(void *pgd)
269 unsigned long flags; /* can be called from interrupt context */
271 if (SHARED_KERNEL_PMD)
274 spin_lock_irqsave(&pgd_lock, flags);
276 spin_unlock_irqrestore(&pgd_lock, flags);
279 #define UNSHARED_PTRS_PER_PGD \
280 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
282 #ifdef CONFIG_X86_PAE
284 * Mop up any pmd pages which may still be attached to the pgd.
285 * Normally they will be freed by munmap/exit_mmap, but any pmd we
286 * preallocate which never got a corresponding vma will need to be
289 static void pgd_mop_up_pmds(pgd_t *pgdp)
293 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
296 if (pgd_val(pgd) != 0) {
297 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
299 pgdp[i] = native_make_pgd(0);
301 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
308 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
309 * updating the top-level pagetable entries to guarantee the
310 * processor notices the update. Since this is expensive, and
311 * all 4 top-level entries are used almost immediately in a
312 * new process's life, we just pre-populate them here.
314 * Also, if we're in a paravirt environment where the kernel pmd is
315 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
316 * and initialize the kernel pmds here.
318 static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
324 pud = pud_offset(pgd, 0);
325 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
326 i++, pud++, addr += PUD_SIZE) {
327 pmd_t *pmd = pmd_alloc_one(mm, addr);
330 pgd_mop_up_pmds(pgd);
334 if (i >= USER_PTRS_PER_PGD)
335 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
336 sizeof(pmd_t) * PTRS_PER_PMD);
338 pud_populate(mm, pud, pmd);
343 #else /* !CONFIG_X86_PAE */
344 /* No need to prepopulate any pagetable entries in non-PAE modes. */
345 static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
350 static void pgd_mop_up_pmds(pgd_t *pgd)
353 #endif /* CONFIG_X86_PAE */
355 pgd_t *pgd_alloc(struct mm_struct *mm)
357 pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
359 mm->pgd = pgd; /* so that alloc_pd can use it */
361 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
362 quicklist_free(0, pgd_dtor, pgd);
369 void pgd_free(pgd_t *pgd)
371 pgd_mop_up_pmds(pgd);
372 quicklist_free(0, pgd_dtor, pgd);
375 void check_pgt_cache(void)
377 quicklist_trim(0, pgd_dtor, 25, 16);
380 void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
382 paravirt_release_pt(page_to_pfn(pte));
383 tlb_remove_page(tlb, pte);
386 #ifdef CONFIG_X86_PAE
388 void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
390 /* This is called just after the pmd has been detached from
391 the pgd, which requires a full tlb flush to be recognized
392 by the CPU. Rather than incurring multiple tlb flushes
393 while the address space is being pulled down, make the tlb
394 gathering machinery do a full flush when we're done. */
397 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
398 tlb_remove_page(tlb, virt_to_page(pmd));