1 #ifndef _SPARC64_TLBFLUSH_H
2 #define _SPARC64_TLBFLUSH_H
4 #include <asm/mmu_context.h>
6 /* TSB flush operations. */
8 #define TLB_BATCH_NR 192
11 unsigned int hugepage_shift;
15 unsigned long vaddrs[TLB_BATCH_NR];
18 void flush_tsb_kernel_range(unsigned long start, unsigned long end);
19 void flush_tsb_user(struct tlb_batch *tb);
20 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
21 unsigned int hugepage_shift);
23 /* TLB flush operations. */
25 static inline void flush_tlb_mm(struct mm_struct *mm)
29 static inline void flush_tlb_page(struct vm_area_struct *vma,
34 static inline void flush_tlb_range(struct vm_area_struct *vma,
35 unsigned long start, unsigned long end)
39 void flush_tlb_kernel_range(unsigned long start, unsigned long end);
41 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
43 void flush_tlb_pending(void);
44 void arch_enter_lazy_mmu_mode(void);
45 void arch_leave_lazy_mmu_mode(void);
46 #define arch_flush_lazy_mmu_mode() do {} while (0)
49 void __flush_tlb_all(void);
50 void __flush_tlb_page(unsigned long context, unsigned long vaddr);
51 void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
55 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
57 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
60 #else /* CONFIG_SMP */
62 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
63 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
65 #define global_flush_tlb_page(mm, vaddr) \
66 smp_flush_tlb_page(mm, vaddr)
68 #endif /* ! CONFIG_SMP */
70 #endif /* _SPARC64_TLBFLUSH_H */