1 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
2 #define _ASM_POWERPC_NOHASH_PGTABLE_H
4 #if defined(CONFIG_PPC64)
5 #include <asm/nohash/64/pgtable.h>
7 #include <asm/nohash/32/pgtable.h>
12 /* Generic accessors to PTE bits */
13 static inline int pte_write(pte_t pte)
15 return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO;
17 static inline int pte_read(pte_t pte) { return 1; }
18 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
19 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
20 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
21 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
22 static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
24 #ifdef CONFIG_NUMA_BALANCING
26 * These work without NUMA balancing but the kernel does not care. See the
27 * comment in include/asm-generic/pgtable.h . On powerpc, this will only
28 * work for user pages and always return true for kernel pages.
30 static inline int pte_protnone(pte_t pte)
32 return (pte_val(pte) &
33 (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
36 static inline int pmd_protnone(pmd_t pmd)
38 return pte_protnone(pmd_pte(pmd));
40 #endif /* CONFIG_NUMA_BALANCING */
42 static inline int pte_present(pte_t pte)
44 return pte_val(pte) & _PAGE_PRESENT;
47 /* Conversion functions: convert a page and protection to a page entry,
48 * and a page entry and page directory to the page they refer to.
50 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
53 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
54 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
55 pgprot_val(pgprot)); }
56 static inline unsigned long pte_pfn(pte_t pte) {
57 return pte_val(pte) >> PTE_RPN_SHIFT; }
59 /* Generic modifiers for PTE bits */
60 static inline pte_t pte_wrprotect(pte_t pte)
64 ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE);
69 static inline pte_t pte_mkclean(pte_t pte)
71 return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
74 static inline pte_t pte_mkold(pte_t pte)
76 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
79 static inline pte_t pte_mkwrite(pte_t pte)
83 ptev = pte_val(pte) & ~_PAGE_RO;
88 static inline pte_t pte_mkdirty(pte_t pte)
90 return __pte(pte_val(pte) | _PAGE_DIRTY);
93 static inline pte_t pte_mkyoung(pte_t pte)
95 return __pte(pte_val(pte) | _PAGE_ACCESSED);
98 static inline pte_t pte_mkspecial(pte_t pte)
100 return __pte(pte_val(pte) | _PAGE_SPECIAL);
103 static inline pte_t pte_mkhuge(pte_t pte)
108 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
110 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
113 /* Insert a PTE, top-level function is out of line. It uses an inline
114 * low level function in the respective pgtable-* files
116 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
119 /* This low level function performs the actual PTE insertion
120 * Setting the PTE depends on the MMU type and other factors. It's
121 * an horrible mess that I'm not going to try to clean up now but
122 * I'm keeping it in one place rather than spread around
124 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
125 pte_t *ptep, pte_t pte, int percpu)
127 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
128 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
129 * helper pte_update() which does an atomic update. We need to do that
130 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
131 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
132 * the hash bits instead (ie, same as the non-SMP case)
135 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
136 | (pte_val(pte) & ~_PAGE_HASHPTE));
138 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
140 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
141 /* Second case is 32-bit with 64-bit PTE. In this case, we
142 * can just store as long as we do the two halves in the right order
143 * with a barrier in between. This is possible because we take care,
144 * in the hash code, to pre-invalidate if the PTE was already hashed,
145 * which synchronizes us with any concurrent invalidation.
146 * In the percpu case, we also fallback to the simple update preserving
150 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
151 | (pte_val(pte) & ~_PAGE_HASHPTE));
154 #if _PAGE_HASHPTE != 0
155 if (pte_val(*ptep) & _PAGE_HASHPTE)
156 flush_hash_entry(mm, ptep, addr);
158 __asm__ __volatile__("\
162 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
163 : "r" (pte) : "memory");
165 #elif defined(CONFIG_PPC_STD_MMU_32)
166 /* Third case is 32-bit hash table in UP mode, we need to preserve
167 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
168 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
169 * and see we need to keep track that this PTE needs invalidating
171 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
172 | (pte_val(pte) & ~_PAGE_HASHPTE));
175 /* Anything else just stores the PTE normally. That covers all 64-bit
176 * cases, and 32-bit non-hash with 32-bit PTEs.
180 #ifdef CONFIG_PPC_BOOK3E_64
182 * With hardware tablewalk, a sync is needed to ensure that
183 * subsequent accesses see the PTE we just wrote. Unlike userspace
184 * mappings, we can't tolerate spurious faults, so make sure
185 * the new PTE will be seen the first time.
187 if (is_kernel_addr(addr))
194 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
195 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
196 pte_t *ptep, pte_t entry, int dirty);
199 * Macro to mark a page protection value as "uncacheable".
202 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
205 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
206 _PAGE_NO_CACHE | _PAGE_GUARDED))
208 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
211 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
214 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
215 _PAGE_COHERENT | _PAGE_WRITETHRU))
217 #define pgprot_cached_noncoherent(prot) \
218 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
220 #define pgprot_writecombine pgprot_noncached_wc
223 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
224 unsigned long size, pgprot_t vma_prot);
225 #define __HAVE_PHYS_MEM_ACCESS_PROT
227 #ifdef CONFIG_HUGETLB_PAGE
228 static inline int hugepd_ok(hugepd_t hpd)
230 #ifdef CONFIG_PPC_8xx
231 return ((hpd_val(hpd) & 0x4) != 0);
233 /* We clear the top bit to indicate hugepd */
234 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
238 static inline int pmd_huge(pmd_t pmd)
243 static inline int pud_huge(pud_t pud)
248 static inline int pgd_huge(pgd_t pgd)
252 #define pgd_huge pgd_huge
254 #define is_hugepd(hpd) (hugepd_ok(hpd))
257 #endif /* __ASSEMBLY__ */