1 /* SPDX-License-Identifier: GPL-2.0 */
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgtable.h"
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
24 extern pgd_t swapper_pg_dir[];
25 extern void paging_init(void);
34 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
36 static inline void update_page_count(int level, long count)
38 if (IS_ENABLED(CONFIG_PROC_FS))
39 atomic_long_add(count, &direct_pages_count[level]);
43 void arch_report_meminfo(struct seq_file *m);
46 * The S390 doesn't have any external MMU info: the kernel page
47 * tables contain all the necessary information.
49 #define update_mmu_cache(vma, address, ptep) do { } while (0)
50 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
53 * ZERO_PAGE is a global shared page that is always zero; used
54 * for zero-mapped memory areas etc..
57 extern unsigned long empty_zero_page;
58 extern unsigned long zero_page_mask;
60 #define ZERO_PAGE(vaddr) \
61 (virt_to_page((void *)(empty_zero_page + \
62 (((unsigned long)(vaddr)) &zero_page_mask))))
63 #define __HAVE_COLOR_ZERO_PAGE
65 /* TODO: s390 cannot support io_remap_pfn_range... */
67 #define FIRST_USER_ADDRESS 0UL
69 #define pte_ERROR(e) \
70 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
71 #define pmd_ERROR(e) \
72 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
73 #define pud_ERROR(e) \
74 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
75 #define p4d_ERROR(e) \
76 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
77 #define pgd_ERROR(e) \
78 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
81 * The vmalloc and module area will always be on the topmost area of the
82 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
83 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
84 * modules will reside. That makes sure that inter module branches always
85 * happen without trampolines and in addition the placement within a 2GB frame
86 * is branch prediction unit friendly.
88 extern unsigned long VMALLOC_START;
89 extern unsigned long VMALLOC_END;
90 #define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN)
91 extern struct page *vmemmap;
92 extern unsigned long vmemmap_size;
94 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
96 extern unsigned long MODULES_VADDR;
97 extern unsigned long MODULES_END;
98 #define MODULES_VADDR MODULES_VADDR
99 #define MODULES_END MODULES_END
100 #define MODULES_LEN (1UL << 31)
102 static inline int is_module_addr(void *addr)
104 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
105 if (addr < (void *)MODULES_VADDR)
107 if (addr > (void *)MODULES_END)
113 * A 64 bit pagetable entry of S390 has following format:
115 * 0000000000111111111122222222223333333333444444444455555555556666
116 * 0123456789012345678901234567890123456789012345678901234567890123
118 * I Page-Invalid Bit: Page is not available for address-translation
119 * P Page-Protection Bit: Store access not possible for page
120 * C Change-bit override: HW is not required to set change bit
122 * A 64 bit segmenttable entry of S390 has following format:
123 * | P-table origin | TT
124 * 0000000000111111111122222222223333333333444444444455555555556666
125 * 0123456789012345678901234567890123456789012345678901234567890123
127 * I Segment-Invalid Bit: Segment is not available for address-translation
128 * C Common-Segment Bit: Segment is not private (PoP 3-30)
129 * P Page-Protection Bit: Store access not possible for page
132 * A 64 bit region table entry of S390 has following format:
133 * | S-table origin | TF TTTL
134 * 0000000000111111111122222222223333333333444444444455555555556666
135 * 0123456789012345678901234567890123456789012345678901234567890123
137 * I Segment-Invalid Bit: Segment is not available for address-translation
142 * The 64 bit regiontable origin of S390 has following format:
143 * | region table origon | DTTL
144 * 0000000000111111111122222222223333333333444444444455555555556666
145 * 0123456789012345678901234567890123456789012345678901234567890123
147 * X Space-Switch event:
148 * G Segment-Invalid Bit:
149 * P Private-Space Bit:
150 * S Storage-Alteration:
154 * A storage key has the following format:
158 * F : fetch protection bit
163 /* Hardware bits in the page table entry */
164 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
165 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
166 #define _PAGE_INVALID 0x400 /* HW invalid bit */
167 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
169 /* Software bits in the page table entry */
170 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
171 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
172 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
173 #define _PAGE_READ 0x010 /* SW pte read bit */
174 #define _PAGE_WRITE 0x020 /* SW pte write bit */
175 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
176 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
178 #ifdef CONFIG_MEM_SOFT_DIRTY
179 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
181 #define _PAGE_SOFT_DIRTY 0x000
184 /* Set of bits not changed in pte_modify */
185 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
186 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
189 * handle_pte_fault uses pte_present and pte_none to find out the pte type
190 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
191 * distinguish present from not-present ptes. It is changed only with the page
194 * The following table gives the different possible bit combinations for
195 * the pte hardware and software bits in the last 12 bits of a pte
196 * (. unassigned bit, x don't care, t swap type):
204 * prot-none, clean, old .11.xx0000.1
205 * prot-none, clean, young .11.xx0001.1
206 * prot-none, dirty, old .11.xx0010.1
207 * prot-none, dirty, young .11.xx0011.1
208 * read-only, clean, old .11.xx0100.1
209 * read-only, clean, young .01.xx0101.1
210 * read-only, dirty, old .11.xx0110.1
211 * read-only, dirty, young .01.xx0111.1
212 * read-write, clean, old .11.xx1100.1
213 * read-write, clean, young .01.xx1101.1
214 * read-write, dirty, old .10.xx1110.1
215 * read-write, dirty, young .00.xx1111.1
216 * HW-bits: R read-only, I invalid
217 * SW-bits: p present, y young, d dirty, r read, w write, s special,
220 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
221 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
222 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
225 /* Bits in the segment/region table address-space-control-element */
226 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
227 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
228 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
229 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
230 #define _ASCE_REAL_SPACE 0x20 /* real space control */
231 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
232 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
233 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
234 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
235 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
236 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
238 /* Bits in the region table entry */
239 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
240 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
241 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
242 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
243 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
244 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
245 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
246 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
247 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
248 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
250 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
251 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
252 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
253 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
254 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
255 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
257 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
258 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
259 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
260 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
261 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
262 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
264 #ifdef CONFIG_MEM_SOFT_DIRTY
265 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
267 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
270 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
272 /* Bits in the segment table entry */
273 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
274 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
275 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
276 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
277 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
278 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
279 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
280 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
281 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
283 #define _SEGMENT_ENTRY (0)
284 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
286 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
287 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
288 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
289 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
290 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
292 #ifdef CONFIG_MEM_SOFT_DIRTY
293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
295 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
298 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */
299 #define _PAGE_ENTRIES 256 /* number of page table entries */
301 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
304 #define _REGION1_SHIFT 53
305 #define _REGION2_SHIFT 42
306 #define _REGION3_SHIFT 31
307 #define _SEGMENT_SHIFT 20
309 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
310 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
311 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
312 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
313 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
315 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
316 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
317 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
318 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
320 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
321 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
322 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
323 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
325 #define PMD_SHIFT _SEGMENT_SHIFT
326 #define PUD_SHIFT _REGION3_SHIFT
327 #define P4D_SHIFT _REGION2_SHIFT
328 #define PGDIR_SHIFT _REGION1_SHIFT
330 #define PMD_SIZE _SEGMENT_SIZE
331 #define PUD_SIZE _REGION3_SIZE
332 #define P4D_SIZE _REGION2_SIZE
333 #define PGDIR_SIZE _REGION1_SIZE
335 #define PMD_MASK _SEGMENT_MASK
336 #define PUD_MASK _REGION3_MASK
337 #define P4D_MASK _REGION2_MASK
338 #define PGDIR_MASK _REGION1_MASK
340 #define PTRS_PER_PTE _PAGE_ENTRIES
341 #define PTRS_PER_PMD _CRST_ENTRIES
342 #define PTRS_PER_PUD _CRST_ENTRIES
343 #define PTRS_PER_P4D _CRST_ENTRIES
344 #define PTRS_PER_PGD _CRST_ENTRIES
346 #define MAX_PTRS_PER_P4D PTRS_PER_P4D
349 * Segment table and region3 table entry encoding
350 * (R = read-only, I = invalid, y = young bit):
352 * prot-none, clean, old 00..1...1...00
353 * prot-none, clean, young 01..1...1...00
354 * prot-none, dirty, old 10..1...1...00
355 * prot-none, dirty, young 11..1...1...00
356 * read-only, clean, old 00..1...1...01
357 * read-only, clean, young 01..1...0...01
358 * read-only, dirty, old 10..1...1...01
359 * read-only, dirty, young 11..1...0...01
360 * read-write, clean, old 00..1...1...11
361 * read-write, clean, young 01..1...0...11
362 * read-write, dirty, old 10..0...1...11
363 * read-write, dirty, young 11..0...0...11
364 * The segment table origin is used to distinguish empty (origin==0) from
365 * read-write, old segment table entries (origin!=0)
366 * HW-bits: R read-only, I invalid
367 * SW-bits: y young, d dirty, r read, w write
370 /* Page status table bits for virtualization */
371 #define PGSTE_ACC_BITS 0xf000000000000000UL
372 #define PGSTE_FP_BIT 0x0800000000000000UL
373 #define PGSTE_PCL_BIT 0x0080000000000000UL
374 #define PGSTE_HR_BIT 0x0040000000000000UL
375 #define PGSTE_HC_BIT 0x0020000000000000UL
376 #define PGSTE_GR_BIT 0x0004000000000000UL
377 #define PGSTE_GC_BIT 0x0002000000000000UL
378 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
379 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
380 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
382 /* Guest Page State used for virtualization */
383 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
384 #define _PGSTE_GPS_NODAT 0x0000000040000000UL
385 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
386 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
387 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
388 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
389 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
392 * A user page table pointer has the space-switch-event bit, the
393 * private-space-control bit and the storage-alteration-event-control
394 * bit set. A kernel page table pointer doesn't need them.
396 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
400 * Page protection definitions.
402 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
406 _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
409 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
410 _PAGE_INVALID | _PAGE_PROTECT)
412 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
416 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
417 _PAGE_PROTECT | _PAGE_NOEXEC)
418 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 _PAGE_YOUNG | _PAGE_DIRTY)
422 * On s390 the page table entry has an invalid bit and a read-only bit.
423 * Read permission implies execute permission and write permission
424 * implies read permission.
427 #define __P000 PAGE_NONE
428 #define __P001 PAGE_RO
429 #define __P010 PAGE_RO
430 #define __P011 PAGE_RO
431 #define __P100 PAGE_RX
432 #define __P101 PAGE_RX
433 #define __P110 PAGE_RX
434 #define __P111 PAGE_RX
436 #define __S000 PAGE_NONE
437 #define __S001 PAGE_RO
438 #define __S010 PAGE_RW
439 #define __S011 PAGE_RW
440 #define __S100 PAGE_RX
441 #define __S101 PAGE_RX
442 #define __S110 PAGE_RWX
443 #define __S111 PAGE_RWX
446 * Segment entry (large page) protection definitions.
448 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
449 _SEGMENT_ENTRY_PROTECT)
450 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
451 _SEGMENT_ENTRY_READ | \
452 _SEGMENT_ENTRY_NOEXEC)
453 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
455 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
456 _SEGMENT_ENTRY_WRITE | \
457 _SEGMENT_ENTRY_NOEXEC)
458 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
459 _SEGMENT_ENTRY_WRITE)
460 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
461 _SEGMENT_ENTRY_LARGE | \
462 _SEGMENT_ENTRY_READ | \
463 _SEGMENT_ENTRY_WRITE | \
464 _SEGMENT_ENTRY_YOUNG | \
465 _SEGMENT_ENTRY_DIRTY | \
466 _SEGMENT_ENTRY_NOEXEC)
467 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
468 _SEGMENT_ENTRY_LARGE | \
469 _SEGMENT_ENTRY_READ | \
470 _SEGMENT_ENTRY_YOUNG | \
471 _SEGMENT_ENTRY_PROTECT | \
472 _SEGMENT_ENTRY_NOEXEC)
473 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
474 _SEGMENT_ENTRY_LARGE | \
475 _SEGMENT_ENTRY_READ | \
476 _SEGMENT_ENTRY_WRITE | \
477 _SEGMENT_ENTRY_YOUNG | \
478 _SEGMENT_ENTRY_DIRTY)
481 * Region3 entry (large page) protection definitions.
484 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
485 _REGION3_ENTRY_LARGE | \
486 _REGION3_ENTRY_READ | \
487 _REGION3_ENTRY_WRITE | \
488 _REGION3_ENTRY_YOUNG | \
489 _REGION3_ENTRY_DIRTY | \
490 _REGION_ENTRY_NOEXEC)
491 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
492 _REGION3_ENTRY_LARGE | \
493 _REGION3_ENTRY_READ | \
494 _REGION3_ENTRY_YOUNG | \
495 _REGION_ENTRY_PROTECT | \
496 _REGION_ENTRY_NOEXEC)
498 static inline bool mm_p4d_folded(struct mm_struct *mm)
500 return mm->context.asce_limit <= _REGION1_SIZE;
502 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
504 static inline bool mm_pud_folded(struct mm_struct *mm)
506 return mm->context.asce_limit <= _REGION2_SIZE;
508 #define mm_pud_folded(mm) mm_pud_folded(mm)
510 static inline bool mm_pmd_folded(struct mm_struct *mm)
512 return mm->context.asce_limit <= _REGION3_SIZE;
514 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
516 static inline int mm_has_pgste(struct mm_struct *mm)
519 if (unlikely(mm->context.has_pgste))
525 static inline int mm_is_protected(struct mm_struct *mm)
528 if (unlikely(atomic_read(&mm->context.is_protected)))
534 static inline int mm_alloc_pgste(struct mm_struct *mm)
537 if (unlikely(mm->context.alloc_pgste))
544 * In the case that a guest uses storage keys
545 * faults should no longer be backed by zero pages
547 #define mm_forbids_zeropage mm_has_pgste
548 static inline int mm_uses_skeys(struct mm_struct *mm)
551 if (mm->context.uses_skeys)
557 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
559 register unsigned long reg2 asm("2") = old;
560 register unsigned long reg3 asm("3") = new;
561 unsigned long address = (unsigned long)ptr | 1;
565 : "+d" (reg2), "+m" (*ptr)
566 : "d" (reg3), "d" (address)
570 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
572 register unsigned long reg2 asm("2") = old;
573 register unsigned long reg3 asm("3") = new;
574 unsigned long address = (unsigned long)ptr | 1;
577 " .insn rre,0xb98a0000,%0,%3"
578 : "+d" (reg2), "+m" (*ptr)
579 : "d" (reg3), "d" (address)
583 #define CRDTE_DTT_PAGE 0x00UL
584 #define CRDTE_DTT_SEGMENT 0x10UL
585 #define CRDTE_DTT_REGION3 0x14UL
586 #define CRDTE_DTT_REGION2 0x18UL
587 #define CRDTE_DTT_REGION1 0x1cUL
589 static inline void crdte(unsigned long old, unsigned long new,
590 unsigned long table, unsigned long dtt,
591 unsigned long address, unsigned long asce)
593 register unsigned long reg2 asm("2") = old;
594 register unsigned long reg3 asm("3") = new;
595 register unsigned long reg4 asm("4") = table | dtt;
596 register unsigned long reg5 asm("5") = address;
598 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
600 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
605 * pgd/p4d/pud/pmd/pte query functions
607 static inline int pgd_folded(pgd_t pgd)
609 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
612 static inline int pgd_present(pgd_t pgd)
616 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
619 static inline int pgd_none(pgd_t pgd)
623 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
626 static inline int pgd_bad(pgd_t pgd)
628 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
630 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
633 static inline unsigned long pgd_pfn(pgd_t pgd)
635 unsigned long origin_mask;
637 origin_mask = _REGION_ENTRY_ORIGIN;
638 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
641 static inline int p4d_folded(p4d_t p4d)
643 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
646 static inline int p4d_present(p4d_t p4d)
650 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
653 static inline int p4d_none(p4d_t p4d)
657 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
660 static inline unsigned long p4d_pfn(p4d_t p4d)
662 unsigned long origin_mask;
664 origin_mask = _REGION_ENTRY_ORIGIN;
665 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
668 static inline int pud_folded(pud_t pud)
670 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
673 static inline int pud_present(pud_t pud)
677 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
680 static inline int pud_none(pud_t pud)
684 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
687 #define pud_leaf pud_large
688 static inline int pud_large(pud_t pud)
690 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
692 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
695 static inline unsigned long pud_pfn(pud_t pud)
697 unsigned long origin_mask;
699 origin_mask = _REGION_ENTRY_ORIGIN;
701 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
702 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
705 #define pmd_leaf pmd_large
706 static inline int pmd_large(pmd_t pmd)
708 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
711 static inline int pmd_bad(pmd_t pmd)
713 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
715 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
718 static inline int pud_bad(pud_t pud)
720 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
722 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
724 if (type < _REGION_ENTRY_TYPE_R3)
726 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
729 static inline int p4d_bad(p4d_t p4d)
731 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
733 if (type > _REGION_ENTRY_TYPE_R2)
735 if (type < _REGION_ENTRY_TYPE_R2)
737 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
740 static inline int pmd_present(pmd_t pmd)
742 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
745 static inline int pmd_none(pmd_t pmd)
747 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
750 static inline unsigned long pmd_pfn(pmd_t pmd)
752 unsigned long origin_mask;
754 origin_mask = _SEGMENT_ENTRY_ORIGIN;
756 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
757 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
760 #define pmd_write pmd_write
761 static inline int pmd_write(pmd_t pmd)
763 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
766 #define pud_write pud_write
767 static inline int pud_write(pud_t pud)
769 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
772 static inline int pmd_dirty(pmd_t pmd)
774 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
777 static inline int pmd_young(pmd_t pmd)
779 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
782 static inline int pte_present(pte_t pte)
784 /* Bit pattern: (pte & 0x001) == 0x001 */
785 return (pte_val(pte) & _PAGE_PRESENT) != 0;
788 static inline int pte_none(pte_t pte)
790 /* Bit pattern: pte == 0x400 */
791 return pte_val(pte) == _PAGE_INVALID;
794 static inline int pte_swap(pte_t pte)
796 /* Bit pattern: (pte & 0x201) == 0x200 */
797 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
801 static inline int pte_special(pte_t pte)
803 return (pte_val(pte) & _PAGE_SPECIAL);
806 #define __HAVE_ARCH_PTE_SAME
807 static inline int pte_same(pte_t a, pte_t b)
809 return pte_val(a) == pte_val(b);
812 #ifdef CONFIG_NUMA_BALANCING
813 static inline int pte_protnone(pte_t pte)
815 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
818 static inline int pmd_protnone(pmd_t pmd)
820 /* pmd_large(pmd) implies pmd_present(pmd) */
821 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
825 static inline int pte_soft_dirty(pte_t pte)
827 return pte_val(pte) & _PAGE_SOFT_DIRTY;
829 #define pte_swp_soft_dirty pte_soft_dirty
831 static inline pte_t pte_mksoft_dirty(pte_t pte)
833 pte_val(pte) |= _PAGE_SOFT_DIRTY;
836 #define pte_swp_mksoft_dirty pte_mksoft_dirty
838 static inline pte_t pte_clear_soft_dirty(pte_t pte)
840 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
843 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
845 static inline int pmd_soft_dirty(pmd_t pmd)
847 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
850 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
852 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
856 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
858 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
863 * query functions pte_write/pte_dirty/pte_young only work if
864 * pte_present() is true. Undefined behaviour if not..
866 static inline int pte_write(pte_t pte)
868 return (pte_val(pte) & _PAGE_WRITE) != 0;
871 static inline int pte_dirty(pte_t pte)
873 return (pte_val(pte) & _PAGE_DIRTY) != 0;
876 static inline int pte_young(pte_t pte)
878 return (pte_val(pte) & _PAGE_YOUNG) != 0;
881 #define __HAVE_ARCH_PTE_UNUSED
882 static inline int pte_unused(pte_t pte)
884 return pte_val(pte) & _PAGE_UNUSED;
888 * pgd/pmd/pte modification functions
891 static inline void pgd_clear(pgd_t *pgd)
893 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
894 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
897 static inline void p4d_clear(p4d_t *p4d)
899 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
900 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
903 static inline void pud_clear(pud_t *pud)
905 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
906 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
909 static inline void pmd_clear(pmd_t *pmdp)
911 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
914 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
916 pte_val(*ptep) = _PAGE_INVALID;
920 * The following pte modification functions only work if
921 * pte_present() is true. Undefined behaviour if not..
923 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
925 pte_val(pte) &= _PAGE_CHG_MASK;
926 pte_val(pte) |= pgprot_val(newprot);
928 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
929 * has the invalid bit set, clear it again for readable, young pages
931 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
932 pte_val(pte) &= ~_PAGE_INVALID;
934 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
935 * protection bit set, clear it again for writable, dirty pages
937 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
938 pte_val(pte) &= ~_PAGE_PROTECT;
942 static inline pte_t pte_wrprotect(pte_t pte)
944 pte_val(pte) &= ~_PAGE_WRITE;
945 pte_val(pte) |= _PAGE_PROTECT;
949 static inline pte_t pte_mkwrite(pte_t pte)
951 pte_val(pte) |= _PAGE_WRITE;
952 if (pte_val(pte) & _PAGE_DIRTY)
953 pte_val(pte) &= ~_PAGE_PROTECT;
957 static inline pte_t pte_mkclean(pte_t pte)
959 pte_val(pte) &= ~_PAGE_DIRTY;
960 pte_val(pte) |= _PAGE_PROTECT;
964 static inline pte_t pte_mkdirty(pte_t pte)
966 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
967 if (pte_val(pte) & _PAGE_WRITE)
968 pte_val(pte) &= ~_PAGE_PROTECT;
972 static inline pte_t pte_mkold(pte_t pte)
974 pte_val(pte) &= ~_PAGE_YOUNG;
975 pte_val(pte) |= _PAGE_INVALID;
979 static inline pte_t pte_mkyoung(pte_t pte)
981 pte_val(pte) |= _PAGE_YOUNG;
982 if (pte_val(pte) & _PAGE_READ)
983 pte_val(pte) &= ~_PAGE_INVALID;
987 static inline pte_t pte_mkspecial(pte_t pte)
989 pte_val(pte) |= _PAGE_SPECIAL;
993 #ifdef CONFIG_HUGETLB_PAGE
994 static inline pte_t pte_mkhuge(pte_t pte)
996 pte_val(pte) |= _PAGE_LARGE;
1001 #define IPTE_GLOBAL 0
1002 #define IPTE_LOCAL 1
1004 #define IPTE_NODAT 0x400
1005 #define IPTE_GUEST_ASCE 0x800
1007 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1008 unsigned long opt, unsigned long asce,
1011 unsigned long pto = (unsigned long) ptep;
1013 if (__builtin_constant_p(opt) && opt == 0) {
1014 /* Invalidation + TLB flush for the pte */
1016 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1017 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1022 /* Invalidate ptes with options + TLB flush of the ptes */
1023 opt = opt | (asce & _ASCE_ORIGIN);
1025 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1026 : [r2] "+a" (address), [r3] "+a" (opt)
1027 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1030 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1031 pte_t *ptep, int local)
1033 unsigned long pto = (unsigned long) ptep;
1035 /* Invalidate a range of ptes + TLB flush of the ptes */
1038 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1039 : [r2] "+a" (address), [r3] "+a" (nr)
1040 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1041 } while (nr != 255);
1045 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1046 * both clear the TLB for the unmapped pte. The reason is that
1047 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1048 * to modify an active pte. The sequence is
1049 * 1) ptep_get_and_clear
1051 * 3) flush_tlb_range
1052 * On s390 the tlb needs to get flushed with the modification of the pte
1053 * if the pte is active. The only way how this can be implemented is to
1054 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1057 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1058 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1060 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1061 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1062 unsigned long addr, pte_t *ptep)
1066 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1067 return pte_young(pte);
1070 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1071 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1072 unsigned long address, pte_t *ptep)
1074 return ptep_test_and_clear_young(vma, address, ptep);
1077 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1078 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1079 unsigned long addr, pte_t *ptep)
1083 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1084 if (mm_is_protected(mm) && pte_present(res))
1085 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1089 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1090 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1091 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1092 pte_t *, pte_t, pte_t);
1094 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1095 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1096 unsigned long addr, pte_t *ptep)
1100 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1101 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1102 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1107 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1108 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1109 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1110 * cannot be accessed while the batched unmap is running. In this case
1111 * full==1 and a simple pte_clear is enough. See tlb.h.
1113 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1114 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1116 pte_t *ptep, int full)
1122 *ptep = __pte(_PAGE_INVALID);
1124 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1126 if (mm_is_protected(mm) && pte_present(res))
1127 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1131 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1132 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1133 unsigned long addr, pte_t *ptep)
1138 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1141 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1142 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1143 unsigned long addr, pte_t *ptep,
1144 pte_t entry, int dirty)
1146 if (pte_same(*ptep, entry))
1148 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1153 * Additional functions to handle KVM guest page tables
1155 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1156 pte_t *ptep, pte_t entry);
1157 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1158 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1159 pte_t *ptep, unsigned long bits);
1160 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1161 pte_t *ptep, int prot, unsigned long bit);
1162 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1163 pte_t *ptep , int reset);
1164 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1165 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1166 pte_t *sptep, pte_t *tptep, pte_t pte);
1167 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1169 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1171 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1172 unsigned char key, bool nq);
1173 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1174 unsigned char key, unsigned char *oldkey,
1175 bool nq, bool mr, bool mc);
1176 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1177 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1178 unsigned char *key);
1180 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1181 unsigned long bits, unsigned long value);
1182 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1183 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1184 unsigned long *oldpte, unsigned long *oldpgste);
1185 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1186 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1187 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1188 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1190 #define pgprot_writecombine pgprot_writecombine
1191 pgprot_t pgprot_writecombine(pgprot_t prot);
1193 #define pgprot_writethrough pgprot_writethrough
1194 pgprot_t pgprot_writethrough(pgprot_t prot);
1197 * Certain architectures need to do special things when PTEs
1198 * within a page table are directly modified. Thus, the following
1199 * hook is made available.
1201 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1202 pte_t *ptep, pte_t entry)
1204 if (pte_present(entry))
1205 pte_val(entry) &= ~_PAGE_UNUSED;
1206 if (mm_has_pgste(mm))
1207 ptep_set_pte_at(mm, addr, ptep, entry);
1213 * Conversion functions: convert a page and protection to a page entry,
1214 * and a page entry and page directory to the page they refer to.
1216 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1220 pte_val(__pte) = physpage | pgprot_val(pgprot);
1221 if (!MACHINE_HAS_NX)
1222 pte_val(__pte) &= ~_PAGE_NOEXEC;
1223 return pte_mkyoung(__pte);
1226 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1228 unsigned long physpage = page_to_phys(page);
1229 pte_t __pte = mk_pte_phys(physpage, pgprot);
1231 if (pte_write(__pte) && PageDirty(page))
1232 __pte = pte_mkdirty(__pte);
1236 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1237 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1238 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1239 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1241 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1242 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1243 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1244 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1247 * The pgd_offset function *always* adds the index for the top-level
1248 * region/segment table. This is done to get a sequence like the
1249 * following to work:
1250 * pgdp = pgd_offset(current->mm, addr);
1251 * pgd = READ_ONCE(*pgdp);
1252 * p4dp = p4d_offset(&pgd, addr);
1254 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1255 * only add an index if they dereferenced the pointer.
1257 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1262 /* Get the first entry of the top level table */
1263 rste = pgd_val(*pgd);
1264 /* Pick up the shift from the table type of the first entry */
1265 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1266 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1269 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1271 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1273 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1274 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1275 return (p4d_t *) pgdp;
1277 #define p4d_offset_lockless p4d_offset_lockless
1279 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1281 return p4d_offset_lockless(pgdp, *pgdp, address);
1284 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1286 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1287 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1288 return (pud_t *) p4dp;
1290 #define pud_offset_lockless pud_offset_lockless
1292 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1294 return pud_offset_lockless(p4dp, *p4dp, address);
1296 #define pud_offset pud_offset
1298 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1300 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1301 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1302 return (pmd_t *) pudp;
1304 #define pmd_offset_lockless pmd_offset_lockless
1306 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1308 return pmd_offset_lockless(pudp, *pudp, address);
1310 #define pmd_offset pmd_offset
1312 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1314 return (unsigned long) pmd_deref(pmd);
1317 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1319 return end <= current->mm->context.asce_limit;
1321 #define gup_fast_permitted gup_fast_permitted
1323 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1324 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1325 #define pte_page(x) pfn_to_page(pte_pfn(x))
1327 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1328 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1329 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1330 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1332 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1334 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1335 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1339 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1341 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1342 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1343 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1347 static inline pmd_t pmd_mkclean(pmd_t pmd)
1349 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1350 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1354 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1356 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1357 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1358 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1362 static inline pud_t pud_wrprotect(pud_t pud)
1364 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1365 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1369 static inline pud_t pud_mkwrite(pud_t pud)
1371 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1372 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1373 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1377 static inline pud_t pud_mkclean(pud_t pud)
1379 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1380 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1384 static inline pud_t pud_mkdirty(pud_t pud)
1386 pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1387 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1388 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1392 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1393 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1396 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1397 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1399 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1400 return pgprot_val(SEGMENT_NONE);
1401 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1402 return pgprot_val(SEGMENT_RO);
1403 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1404 return pgprot_val(SEGMENT_RX);
1405 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1406 return pgprot_val(SEGMENT_RW);
1407 return pgprot_val(SEGMENT_RWX);
1410 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1412 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1413 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1414 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1418 static inline pmd_t pmd_mkold(pmd_t pmd)
1420 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1421 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1425 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1427 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1428 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1429 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1430 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1431 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1432 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1433 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1434 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1438 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1441 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1445 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1447 static inline void __pmdp_csp(pmd_t *pmdp)
1449 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1450 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1453 #define IDTE_GLOBAL 0
1454 #define IDTE_LOCAL 1
1456 #define IDTE_PTOA 0x0800
1457 #define IDTE_NODAT 0x1000
1458 #define IDTE_GUEST_ASCE 0x2000
1460 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1461 unsigned long opt, unsigned long asce,
1466 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1467 if (__builtin_constant_p(opt) && opt == 0) {
1468 /* flush without guest asce */
1470 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1472 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1476 /* flush with guest asce */
1478 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1480 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1481 [r3] "a" (asce), [m4] "i" (local)
1486 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1487 unsigned long opt, unsigned long asce,
1492 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1493 r3o |= _ASCE_TYPE_REGION3;
1494 if (__builtin_constant_p(opt) && opt == 0) {
1495 /* flush without guest asce */
1497 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1499 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1503 /* flush with guest asce */
1505 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1507 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1508 [r3] "a" (asce), [m4] "i" (local)
1513 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1514 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1515 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1517 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1519 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1520 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1523 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1524 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1526 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1527 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1528 unsigned long addr, pmd_t *pmdp,
1529 pmd_t entry, int dirty)
1531 VM_BUG_ON(addr & ~HPAGE_MASK);
1533 entry = pmd_mkyoung(entry);
1535 entry = pmd_mkdirty(entry);
1536 if (pmd_val(*pmdp) == pmd_val(entry))
1538 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1542 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1543 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1544 unsigned long addr, pmd_t *pmdp)
1548 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1549 return pmd_young(pmd);
1552 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1553 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1554 unsigned long addr, pmd_t *pmdp)
1556 VM_BUG_ON(addr & ~HPAGE_MASK);
1557 return pmdp_test_and_clear_young(vma, addr, pmdp);
1560 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1561 pmd_t *pmdp, pmd_t entry)
1563 if (!MACHINE_HAS_NX)
1564 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1568 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1570 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1571 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1572 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1576 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1577 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1578 unsigned long addr, pmd_t *pmdp)
1580 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1583 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1584 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1586 pmd_t *pmdp, int full)
1590 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1593 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1596 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1597 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1598 unsigned long addr, pmd_t *pmdp)
1600 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1603 #define __HAVE_ARCH_PMDP_INVALIDATE
1604 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1605 unsigned long addr, pmd_t *pmdp)
1607 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1609 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1612 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1613 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1614 unsigned long addr, pmd_t *pmdp)
1619 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1622 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1623 unsigned long address,
1626 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1628 #define pmdp_collapse_flush pmdp_collapse_flush
1630 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1631 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1633 static inline int pmd_trans_huge(pmd_t pmd)
1635 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1638 #define has_transparent_hugepage has_transparent_hugepage
1639 static inline int has_transparent_hugepage(void)
1641 return MACHINE_HAS_EDAT1 ? 1 : 0;
1643 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1646 * 64 bit swap entry format:
1647 * A page-table entry has some bits we have to treat in a special way.
1648 * Bits 52 and bit 55 have to be zero, otherwise a specification
1649 * exception will occur instead of a page translation exception. The
1650 * specification exception has the bad habit not to store necessary
1651 * information in the lowcore.
1652 * Bits 54 and 63 are used to indicate the page type.
1653 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1654 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1655 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1657 * | offset |01100|type |00|
1658 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1659 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1662 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1663 #define __SWP_OFFSET_SHIFT 12
1664 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1665 #define __SWP_TYPE_SHIFT 2
1667 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1671 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1672 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1673 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1677 static inline unsigned long __swp_type(swp_entry_t entry)
1679 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1682 static inline unsigned long __swp_offset(swp_entry_t entry)
1684 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1687 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1689 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1692 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1693 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1695 #define kern_addr_valid(addr) (1)
1697 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1698 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1699 extern int s390_enable_sie(void);
1700 extern int s390_enable_skey(void);
1701 extern void s390_reset_cmma(struct mm_struct *mm);
1703 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1704 #define HAVE_ARCH_UNMAPPED_AREA
1705 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1707 #endif /* _S390_PAGE_H */