/* TBD: optimize this */
#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
dsb(ishst);
}
+#define flush_cache_vmap_early(start, end) do { } while (0)
+
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (!cache_is_vipt_nonaliasing())
*/
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end) cache_wbinv_all()
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) cache_wbinv_all()
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
void flush_icache_deferred(struct mm_struct *mm);
#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
#define flush_cache_all() __flush_cache_all()
#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()
static inline void flush_cache_mm(struct mm_struct *mm)
__flush_cache_vmap();
}
+#define flush_cache_vmap_early(start, end) do { } while (0)
+
extern void (*__flush_cache_vunmap)(void);
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#define flush_icache_pages flush_icache_pages
#define flush_cache_vmap(start, end) flush_dcache_range(start, end)
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_dcache_range(start, end)
extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()
void flush_dcache_folio(struct folio *folio);
flush_icache_mm(vma->vm_mm, 0)
#ifdef CONFIG_64BIT
-#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
+#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
+#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
#endif
#ifndef CONFIG_SMP
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
local_flush_tlb_range_threshold_asid(start, size, stride, asid);
}
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ local_flush_tlb_range_asid(start, end, PAGE_SIZE, FLUSH_TLB_NO_ASID);
+}
+
static void __ipi_flush_tlb_all(void *info)
{
local_flush_tlb_all();
unsigned long len);
#define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()
/* When a context switch happens we must flush all user windows so that
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#endif /* !__ASSEMBLY__ */
#define flush_cache_mm(mm) flush_cache_all()
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
-#define flush_cache_vmap(start,end) flush_cache_all()
-#define flush_cache_vunmap(start,end) flush_cache_all()
+#define flush_cache_vmap(start,end) flush_cache_all()
+#define flush_cache_vmap_early(start,end) do { } while (0)
+#define flush_cache_vunmap(start,end) flush_cache_all()
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_vmap(start,end) do { } while (0)
+#define flush_cache_vmap_early(start,end) do { } while (0)
#define flush_cache_vunmap(start,end) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
}
#endif
+#ifndef flush_cache_vmap_early
+static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
+{
+}
+#endif
+
#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (rc < 0)
panic("failed to map percpu area, err=%d\n", rc);
- /*
- * FIXME: Archs with virtual cache should flush local
- * cache for the linear mapping here - something
- * equivalent to flush_cache_vmap() on the local cpu.
- * flush_cache_vmap() can't be used as most supporting
- * data structures are not set up yet.
- */
+ flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
/* copy static data */
memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);