Merge branch 'i2c/for-4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[sfrench/cifs-2.6.git] / arch / powerpc / include / asm / book3s / 64 / tlbflush.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4
5 #define MMU_NO_CONTEXT  ~0UL
6
7
8 #include <asm/book3s/64/tlbflush-hash.h>
9 #include <asm/book3s/64/tlbflush-radix.h>
10
11 /* TLB flush actions. Used as argument to tlbiel_all() */
12 enum {
13         TLB_INVAL_SCOPE_GLOBAL = 0,     /* invalidate all TLBs */
14         TLB_INVAL_SCOPE_LPID = 1,       /* invalidate TLBs for current LPID */
15 };
16
17 #ifdef CONFIG_PPC_NATIVE
18 static inline void tlbiel_all(void)
19 {
20         /*
21          * This is used for host machine check and bootup.
22          *
23          * This uses early_radix_enabled and implementations use
24          * early_cpu_has_feature etc because that works early in boot
25          * and this is the machine check path which is not performance
26          * critical.
27          */
28         if (early_radix_enabled())
29                 radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
30         else
31                 hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
32 }
33 #else
34 static inline void tlbiel_all(void) { BUG(); };
35 #endif
36
37 static inline void tlbiel_all_lpid(bool radix)
38 {
39         /*
40          * This is used for guest machine check.
41          */
42         if (radix)
43                 radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
44         else
45                 hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
46 }
47
48
49 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
50 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
51                                        unsigned long start, unsigned long end)
52 {
53         if (radix_enabled())
54                 return radix__flush_pmd_tlb_range(vma, start, end);
55         return hash__flush_tlb_range(vma, start, end);
56 }
57
58 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
59 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
60                                            unsigned long start,
61                                            unsigned long end)
62 {
63         if (radix_enabled())
64                 return radix__flush_hugetlb_tlb_range(vma, start, end);
65         return hash__flush_tlb_range(vma, start, end);
66 }
67
68 static inline void flush_tlb_range(struct vm_area_struct *vma,
69                                    unsigned long start, unsigned long end)
70 {
71         if (radix_enabled())
72                 return radix__flush_tlb_range(vma, start, end);
73         return hash__flush_tlb_range(vma, start, end);
74 }
75
76 static inline void flush_tlb_kernel_range(unsigned long start,
77                                           unsigned long end)
78 {
79         if (radix_enabled())
80                 return radix__flush_tlb_kernel_range(start, end);
81         return hash__flush_tlb_kernel_range(start, end);
82 }
83
84 static inline void local_flush_tlb_mm(struct mm_struct *mm)
85 {
86         if (radix_enabled())
87                 return radix__local_flush_tlb_mm(mm);
88         return hash__local_flush_tlb_mm(mm);
89 }
90
91 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
92                                         unsigned long vmaddr)
93 {
94         if (radix_enabled())
95                 return radix__local_flush_tlb_page(vma, vmaddr);
96         return hash__local_flush_tlb_page(vma, vmaddr);
97 }
98
99 static inline void local_flush_all_mm(struct mm_struct *mm)
100 {
101         if (radix_enabled())
102                 return radix__local_flush_all_mm(mm);
103         return hash__local_flush_all_mm(mm);
104 }
105
106 static inline void tlb_flush(struct mmu_gather *tlb)
107 {
108         if (radix_enabled())
109                 return radix__tlb_flush(tlb);
110         return hash__tlb_flush(tlb);
111 }
112
113 #ifdef CONFIG_SMP
114 static inline void flush_tlb_mm(struct mm_struct *mm)
115 {
116         if (radix_enabled())
117                 return radix__flush_tlb_mm(mm);
118         return hash__flush_tlb_mm(mm);
119 }
120
121 static inline void flush_tlb_page(struct vm_area_struct *vma,
122                                   unsigned long vmaddr)
123 {
124         if (radix_enabled())
125                 return radix__flush_tlb_page(vma, vmaddr);
126         return hash__flush_tlb_page(vma, vmaddr);
127 }
128
129 static inline void flush_all_mm(struct mm_struct *mm)
130 {
131         if (radix_enabled())
132                 return radix__flush_all_mm(mm);
133         return hash__flush_all_mm(mm);
134 }
135 #else
136 #define flush_tlb_mm(mm)                local_flush_tlb_mm(mm)
137 #define flush_tlb_page(vma, addr)       local_flush_tlb_page(vma, addr)
138 #define flush_all_mm(mm)                local_flush_all_mm(mm)
139 #endif /* CONFIG_SMP */
140 /*
141  * flush the page walk cache for the address
142  */
143 static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
144 {
145         /*
146          * Flush the page table walk cache on freeing a page table. We already
147          * have marked the upper/higher level page table entry none by now.
148          * So it is safe to flush PWC here.
149          */
150         if (!radix_enabled())
151                 return;
152
153         radix__flush_tlb_pwc(tlb, address);
154 }
155 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */