arm64: Rename arm64-internal cache maintenance functions
authorFuad Tabba <tabba@google.com>
Mon, 24 May 2021 08:30:01 +0000 (09:30 +0100)
committerWill Deacon <will@kernel.org>
Tue, 25 May 2021 18:27:49 +0000 (19:27 +0100)
Although naming across the codebase isn't that consistent, it
tends to follow certain patterns. Moreover, the term "flush"
isn't defined in the Arm Architecture reference manual, and might
be interpreted to mean clean, invalidate, or both for a cache.

Rename arm64-internal functions to make the naming internally
consistent, as well as making it consistent with the Arm ARM, by
specifying whether it applies to the instruction, data, or both
caches, whether the operation is a clean, invalidate, or both.
Also specify which point the operation applies to, i.e., to the
point of unification (PoU), coherency (PoC), or persistence
(PoP).

This commit applies the following sed transformation to all files
under arch/arm64:

"s/\b__flush_cache_range\b/caches_clean_inval_pou_macro/g;"\
"s/\b__flush_icache_range\b/caches_clean_inval_pou/g;"\
"s/\binvalidate_icache_range\b/icache_inval_pou/g;"\
"s/\b__flush_dcache_area\b/dcache_clean_inval_poc/g;"\
"s/\b__inval_dcache_area\b/dcache_inval_poc/g;"\
"s/__clean_dcache_area_poc\b/dcache_clean_poc/g;"\
"s/\b__clean_dcache_area_pop\b/dcache_clean_pop/g;"\
"s/\b__clean_dcache_area_pou\b/dcache_clean_pou/g;"\
"s/\b__flush_cache_user_range\b/caches_clean_inval_user_pou/g;"\
"s/\b__flush_icache_all\b/icache_inval_all_pou/g;"

Note that __clean_dcache_area_poc is deliberately missing a word
boundary check at the beginning in order to match the efistub
symbols in image-vars.h.

Also note that, despite its name, __flush_icache_range operates
on both instruction and data caches. The name change here
reflects that.

No functional change intended.

Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20210524083001.2586635-19-tabba@google.com
Signed-off-by: Will Deacon <will@kernel.org>
25 files changed:
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/efi-entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/hibernate-asm.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/idreg-override.c
arch/arm64/kernel/image-vars.h
arch/arm64/kernel/insn.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/machine_kexec.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kernel/sys_compat.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/cache.S
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/hyp/nvhe/tlb.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/lib/uaccess_flushcache.c
arch/arm64/mm/cache.S
arch/arm64/mm/flush.c

index ed1cc9d8e6df70e69f12269fa1cdfd961769bd22..4ad22c3135dbb916b6e427f517f1ae2038c7da44 100644 (file)
@@ -125,7 +125,7 @@ static inline u32 gic_read_rpr(void)
 #define gic_write_lpir(v, c)           writeq_relaxed(v, c)
 
 #define gic_flush_dcache_to_poc(a,l)   \
-       __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l))
+       dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
 
 #define gits_read_baser(c)             readq_relaxed(c)
 #define gits_write_baser(v, c)         writeq_relaxed(v, c)
index 26617df1fa4592b504c4b20a189b91ee1b537fe5..543c997eb3b7d7508b44800581e8b82288b80e97 100644 (file)
  *             - start  - virtual start address (inclusive)
  *             - end    - virtual end address (exclusive)
  *
- *     __flush_icache_range(start, end)
+ *     caches_clean_inval_pou(start, end)
  *
  *             Ensure coherency between the I-cache and the D-cache region to
  *             the Point of Unification.
  *
- *     __flush_cache_user_range(start, end)
+ *     caches_clean_inval_user_pou(start, end)
  *
  *             Ensure coherency between the I-cache and the D-cache region to
  *             the Point of Unification.
  *             Use only if the region might access user memory.
  *
- *     invalidate_icache_range(start, end)
+ *     icache_inval_pou(start, end)
  *
  *             Invalidate I-cache region to the Point of Unification.
  *
- *     __flush_dcache_area(start, end)
+ *     dcache_clean_inval_poc(start, end)
  *
  *             Clean and invalidate D-cache region to the Point of Coherency.
  *
- *     __inval_dcache_area(start, end)
+ *     dcache_inval_poc(start, end)
  *
  *             Invalidate D-cache region to the Point of Coherency.
  *
- *     __clean_dcache_area_poc(start, end)
+ *     dcache_clean_poc(start, end)
  *
  *             Clean D-cache region to the Point of Coherency.
  *
- *     __clean_dcache_area_pop(start, end)
+ *     dcache_clean_pop(start, end)
  *
  *             Clean D-cache region to the Point of Persistence.
  *
- *     __clean_dcache_area_pou(start, end)
+ *     dcache_clean_pou(start, end)
  *
  *             Clean D-cache region to the Point of Unification.
  */
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern void invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __flush_dcache_area(unsigned long start, unsigned long end);
-extern void __inval_dcache_area(unsigned long start, unsigned long end);
-extern void __clean_dcache_area_poc(unsigned long start, unsigned long end);
-extern void __clean_dcache_area_pop(unsigned long start, unsigned long end);
-extern void __clean_dcache_area_pou(unsigned long start, unsigned long end);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
+extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
+extern void icache_inval_pou(unsigned long start, unsigned long end);
+extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_pop(unsigned long start, unsigned long end);
+extern void dcache_clean_pou(unsigned long start, unsigned long end);
+extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
 extern void sync_icache_aliases(unsigned long start, unsigned long end);
 
 static inline void flush_icache_range(unsigned long start, unsigned long end)
 {
-       __flush_icache_range(start, end);
+       caches_clean_inval_pou(start, end);
 
        /*
         * IPI all online CPUs so that they undergo a context synchronization
@@ -135,7 +135,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *);
 
-static __always_inline void __flush_icache_all(void)
+static __always_inline void icache_inval_all_pou(void)
 {
        if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
                return;
index 0ae2397076fd7724c6031a16713beaccbdf87bda..1bed37eb013a1624046eea8a855f368204045ed4 100644 (file)
@@ -137,7 +137,7 @@ void efi_virtmap_unload(void);
 
 static inline void efi_capsule_flush_cache_range(void *addr, int size)
 {
-       __flush_dcache_area((unsigned long)addr, (unsigned long)addr + size);
+       dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
 }
 
 #endif /* _ASM_EFI_H */
index 33293d5855af42072d3c8be3c6d2ba780f150908..f4cbfa9025a8323e6af8c51a4233239304df474f 100644 (file)
@@ -181,7 +181,7 @@ static inline void *__kvm_vector_slot2addr(void *base,
 struct kvm;
 
 #define kvm_flush_dcache_to_poc(a,l)   \
-       __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l))
+       dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
 
 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 {
@@ -209,12 +209,12 @@ static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
 {
        if (icache_is_aliasing()) {
                /* any kind of VIPT cache */
-               __flush_icache_all();
+               icache_inval_all_pou();
        } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
                /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
                void *va = page_address(pfn_to_page(pfn));
 
-               invalidate_icache_range((unsigned long)va,
+               icache_inval_pou((unsigned long)va,
                                        (unsigned long)va + size);
        }
 }
index c906d20c7b529ca80af17d94a245ae3d0df9eaa9..3fb79b76e9d96adae5de2f08f3974a4a40ff898e 100644 (file)
@@ -181,7 +181,7 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
         */
        if (!is_module) {
                dsb(ish);
-               __flush_icache_all();
+               icache_inval_all_pou();
                isb();
 
                /* Ignore ARM64_CB bit from feature mask */
index b0f728fb61f0175eee8a2660a5273b769eba567c..61a87fa1c3055e26252c2b5e029359aceb4d0574 100644 (file)
@@ -29,7 +29,7 @@ SYM_CODE_START(efi_enter_kernel)
         */
        ldr     w1, =kernel_size
        add     x1, x0, x1
-       bl      __clean_dcache_area_poc
+       bl      dcache_clean_poc
        ic      ialluis
 
        /*
@@ -38,7 +38,7 @@ SYM_CODE_START(efi_enter_kernel)
         */
        adr     x0, 0f
        adr     x1, 3f
-       bl      __clean_dcache_area_poc
+       bl      dcache_clean_poc
 0:
        /* Turn off Dcache and MMU */
        mrs     x0, CurrentEL
index 8df0ac8d9123ebdeb97288ef539109d9a2e737a1..6928cb67d3a033f5000495f2cc484530e67733de 100644 (file)
@@ -118,7 +118,7 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
                                                // MMU off
 
        add     x1, x0, #0x20                   // 4 x 8 bytes
-       b       __inval_dcache_area             // tail call
+       b       dcache_inval_poc                // tail call
 SYM_CODE_END(preserve_boot_args)
 
 /*
@@ -268,7 +268,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
         */
        adrp    x0, init_pg_dir
        adrp    x1, init_pg_end
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        /*
         * Clear the init page tables.
@@ -381,11 +381,11 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 
        adrp    x0, idmap_pg_dir
        adrp    x1, idmap_pg_end
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        adrp    x0, init_pg_dir
        adrp    x1, init_pg_end
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        ret     x28
 SYM_FUNC_END(__create_page_tables)
index ef2ab7caf8155a0e62c6aa2651aafe755015b5d7..81c0186a5e322a8d2b2cf4b6be94b75d752bc90b 100644 (file)
@@ -45,7 +45,7 @@
  * Because this code has to be copied to a 'safe' page, it can't call out to
  * other functions by PC-relative address. Also remember that it may be
  * mid-way through over-writing other functions. For this reason it contains
- * code from __flush_icache_range() and uses the copy_page() macro.
+ * code from caches_clean_inval_pou() and uses the copy_page() macro.
  *
  * This 'safe' page is mapped via ttbr0, and executed from there. This function
  * switches to a copy of the linear map in ttbr1, performs the restore, then
@@ -87,7 +87,7 @@ SYM_CODE_START(swsusp_arch_suspend_exit)
        copy_page       x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
 
        add     x1, x10, #PAGE_SIZE
-       /* Clean the copied page to PoU - based on __flush_icache_range() */
+       /* Clean the copied page to PoU - based on caches_clean_inval_pou() */
        raw_dcache_line_size x2, x3
        sub     x3, x2, #1
        bic     x4, x10, x3
index b40ddce7150733dbb05d0443f44018303e1c423e..46a0b4d6e2519768eaa86bc141f75f2efa698550 100644 (file)
@@ -210,7 +210,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
                return -ENOMEM;
 
        memcpy(page, src_start, length);
-       __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+       caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
        rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
        if (rc)
                return rc;
@@ -381,17 +381,17 @@ int swsusp_arch_suspend(void)
                ret = swsusp_save();
        } else {
                /* Clean kernel core startup/idle code to PoC*/
-               __flush_dcache_area((unsigned long)__mmuoff_data_start,
+               dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
                                    (unsigned long)__mmuoff_data_end);
-               __flush_dcache_area((unsigned long)__idmap_text_start,
+               dcache_clean_inval_poc((unsigned long)__idmap_text_start,
                                    (unsigned long)__idmap_text_end);
 
                /* Clean kvm setup code to PoC? */
                if (el2_reset_needed()) {
-                       __flush_dcache_area(
+                       dcache_clean_inval_poc(
                                (unsigned long)__hyp_idmap_text_start,
                                (unsigned long)__hyp_idmap_text_end);
-                       __flush_dcache_area((unsigned long)__hyp_text_start,
+                       dcache_clean_inval_poc((unsigned long)__hyp_text_start,
                                            (unsigned long)__hyp_text_end);
                }
 
@@ -477,7 +477,7 @@ int swsusp_arch_resume(void)
         * The hibernate exit text contains a set of el2 vectors, that will
         * be executed at el2 with the mmu off in order to reload hyp-stub.
         */
-       __flush_dcache_area((unsigned long)hibernate_exit,
+       dcache_clean_inval_poc((unsigned long)hibernate_exit,
                            (unsigned long)hibernate_exit + exit_size);
 
        /*
index 3dd515baf526883b82be5ff268f50dea6be01e0e..53a381a7f65dd48b23ddd82246be7a59453d635b 100644 (file)
@@ -237,7 +237,7 @@ asmlinkage void __init init_feature_override(void)
 
        for (i = 0; i < ARRAY_SIZE(regs); i++) {
                if (regs[i]->override)
-                       __flush_dcache_area((unsigned long)regs[i]->override,
+                       dcache_clean_inval_poc((unsigned long)regs[i]->override,
                                            (unsigned long)regs[i]->override +
                                            sizeof(*regs[i]->override));
        }
index bcf3c27553708ddc97a96b00f8312581b78d3738..c96a9a0043bf4a6d73d90f3d435f2c80b5bbce85 100644 (file)
@@ -35,7 +35,7 @@ __efistub_strnlen             = __pi_strnlen;
 __efistub_strcmp               = __pi_strcmp;
 __efistub_strncmp              = __pi_strncmp;
 __efistub_strrchr              = __pi_strrchr;
-__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
+__efistub_dcache_clean_poc = __pi_dcache_clean_poc;
 
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 __efistub___memcpy             = __pi_memcpy;
index 6c0de2f60ea96a19fc15aa098604b833cae0a2a6..51cb8dc98d008268e151bfd6bd85925a541184de 100644 (file)
@@ -198,7 +198,7 @@ int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
 
        ret = aarch64_insn_write(tp, insn);
        if (ret == 0)
-               __flush_icache_range((uintptr_t)tp,
+               caches_clean_inval_pou((uintptr_t)tp,
                                     (uintptr_t)tp + AARCH64_INSN_SIZE);
 
        return ret;
index 49cccd03cb3704c78c66feec1b9ce00c9e839dba..cfa2cfde3019d678e32241acd21416891dd74c0e 100644 (file)
@@ -72,7 +72,7 @@ u64 __init kaslr_early_init(void)
         * we end up running with module randomization disabled.
         */
        module_alloc_base = (u64)_etext - MODULES_VSIZE;
-       __flush_dcache_area((unsigned long)&module_alloc_base,
+       dcache_clean_inval_poc((unsigned long)&module_alloc_base,
                            (unsigned long)&module_alloc_base +
                                    sizeof(module_alloc_base));
 
@@ -172,10 +172,10 @@ u64 __init kaslr_early_init(void)
        module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
        module_alloc_base &= PAGE_MASK;
 
-       __flush_dcache_area((unsigned long)&module_alloc_base,
+       dcache_clean_inval_poc((unsigned long)&module_alloc_base,
                            (unsigned long)&module_alloc_base +
                                    sizeof(module_alloc_base));
-       __flush_dcache_area((unsigned long)&memstart_offset_seed,
+       dcache_clean_inval_poc((unsigned long)&memstart_offset_seed,
                            (unsigned long)&memstart_offset_seed +
                                    sizeof(memstart_offset_seed));
 
index 3e79110c8f3a8dbef8574606177b4fe5cbfb8299..03ceabe4d912c0ac79985e20bdb313b41cf2096a 100644 (file)
@@ -72,10 +72,10 @@ int machine_kexec_post_load(struct kimage *kimage)
         * For execution with the MMU off, reloc_code needs to be cleaned to the
         * PoC and invalidated from the I-cache.
         */
-       __flush_dcache_area((unsigned long)reloc_code,
+       dcache_clean_inval_poc((unsigned long)reloc_code,
                            (unsigned long)reloc_code +
                                    arm64_relocate_new_kernel_size);
-       invalidate_icache_range((uintptr_t)reloc_code,
+       icache_inval_pou((uintptr_t)reloc_code,
                                (uintptr_t)reloc_code +
                                        arm64_relocate_new_kernel_size);
 
@@ -111,7 +111,7 @@ static void kexec_list_flush(struct kimage *kimage)
                unsigned long addr;
 
                /* flush the list entries. */
-               __flush_dcache_area((unsigned long)entry,
+               dcache_clean_inval_poc((unsigned long)entry,
                                    (unsigned long)entry +
                                            sizeof(kimage_entry_t));
 
@@ -128,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage)
                        break;
                case IND_SOURCE:
                        /* flush the source pages. */
-                       __flush_dcache_area(addr, addr + PAGE_SIZE);
+                       dcache_clean_inval_poc(addr, addr + PAGE_SIZE);
                        break;
                case IND_DESTINATION:
                        break;
@@ -155,7 +155,7 @@ static void kexec_segment_flush(const struct kimage *kimage)
                        kimage->segment[i].memsz,
                        kimage->segment[i].memsz /  PAGE_SIZE);
 
-               __flush_dcache_area(
+               dcache_clean_inval_poc(
                        (unsigned long)phys_to_virt(kimage->segment[i].mem),
                        (unsigned long)phys_to_virt(kimage->segment[i].mem) +
                                kimage->segment[i].memsz);
index 5fcdee331087465acb27bd7206b0c67ef1c62ebc..9b4c1118194dac2d966e6f454098c3b7bd98b48c 100644 (file)
@@ -122,7 +122,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        secondary_data.task = idle;
        secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
        update_cpu_boot_status(CPU_MMU_OFF);
-       __flush_dcache_area((unsigned long)&secondary_data,
+       dcache_clean_inval_poc((unsigned long)&secondary_data,
                            (unsigned long)&secondary_data +
                                    sizeof(secondary_data));
 
@@ -145,7 +145,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        pr_crit("CPU%u: failed to come online\n", cpu);
        secondary_data.task = NULL;
        secondary_data.stack = NULL;
-       __flush_dcache_area((unsigned long)&secondary_data,
+       dcache_clean_inval_poc((unsigned long)&secondary_data,
                            (unsigned long)&secondary_data +
                                    sizeof(secondary_data));
        status = READ_ONCE(secondary_data.status);
index 58d804582a35f756d3f54604599637b48fc7034b..7e1624ecab3c80ea6b34437d91672630e434a52a 100644 (file)
@@ -36,7 +36,7 @@ static void write_pen_release(u64 val)
        unsigned long size = sizeof(secondary_holding_pen_release);
 
        secondary_holding_pen_release = val;
-       __flush_dcache_area((unsigned long)start, (unsigned long)start + size);
+       dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size);
 }
 
 
@@ -90,7 +90,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
         * the boot protocol.
         */
        writeq_relaxed(pa_holding_pen, release_addr);
-       __flush_dcache_area((__force unsigned long)release_addr,
+       dcache_clean_inval_poc((__force unsigned long)release_addr,
                            (__force unsigned long)release_addr +
                                    sizeof(*release_addr));
 
index 265fe3eb10699b71033d350b004b0549af6dfd58..db5159a3055fc3d9863210db32a75e8b95e845ca 100644 (file)
@@ -41,7 +41,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
                        dsb(ish);
                }
 
-               ret = __flush_cache_user_range(start, start + chunk);
+               ret = caches_clean_inval_user_pou(start, start + chunk);
                if (ret)
                        return ret;
 
index 1cb39c0803a447872559c30f60437b67f21bbd71..c1953f65ca0ecc53fad9d769310c8eab807ff58e 100644 (file)
@@ -1064,7 +1064,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
                if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
                        stage2_unmap_vm(vcpu->kvm);
                else
-                       __flush_icache_all();
+                       icache_inval_all_pou();
        }
 
        vcpu_reset_hcr(vcpu);
index 36cef69154281cfd71d1c17fbe0538fbaa8c1a12..958734f4d6b0ed820ee7fc006d2fd38d2cf0a105 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/assembler.h>
 #include <asm/alternative.h>
 
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
index 5dffe928f256314fa8646c4d63be037b5854175b..8143ebd4fb721297404781f86261800ebb0dc4f0 100644 (file)
@@ -134,7 +134,7 @@ static void update_nvhe_init_params(void)
        for (i = 0; i < hyp_nr_cpus; i++) {
                params = per_cpu_ptr(&kvm_init_params, i);
                params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
-               __flush_dcache_area((unsigned long)params,
+               dcache_clean_inval_poc((unsigned long)params,
                                    (unsigned long)params + sizeof(*params));
        }
 }
index 83dc3b271bc5b82bd77b0abe786b5eefefbb857e..38ed0f6f27032d297dfbf5466d45cbd65441f469 100644 (file)
@@ -104,7 +104,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
         * you should be running with VHE enabled.
         */
        if (icache_is_vpipt())
-               __flush_icache_all();
+               icache_inval_all_pou();
 
        __tlb_switch_to_host(&cxt);
 }
index 10d2f04013d44ef8da4413f3ff2e6c1db3d86503..e9ad7fb28ee32d3d4f78b6a2428c0eef4f61c694 100644 (file)
@@ -841,7 +841,7 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
        if (need_flush) {
                kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
 
-               __flush_dcache_area((unsigned long)pte_follow,
+               dcache_clean_inval_poc((unsigned long)pte_follow,
                                    (unsigned long)pte_follow +
                                            kvm_granule_size(level));
        }
@@ -997,7 +997,7 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
                return 0;
 
        pte_follow = kvm_pte_follow(pte, mm_ops);
-       __flush_dcache_area((unsigned long)pte_follow,
+       dcache_clean_inval_poc((unsigned long)pte_follow,
                            (unsigned long)pte_follow +
                                    kvm_granule_size(level));
        return 0;
index 62ea989effe8040699d96e3ccb7847420e6d7190..baee22961bdba2eb63dc395e1c79ca069f70f8ff 100644 (file)
@@ -15,7 +15,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
         * barrier to order the cache maintenance against the memcpy.
         */
        memcpy(dst, src, cnt);
-       __clean_dcache_area_pop((unsigned long)dst, (unsigned long)dst + cnt);
+       dcache_clean_pop((unsigned long)dst, (unsigned long)dst + cnt);
 }
 EXPORT_SYMBOL_GPL(memcpy_flushcache);
 
@@ -33,6 +33,6 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from,
        rc = raw_copy_from_user(to, from, n);
 
        /* See above */
-       __clean_dcache_area_pop((unsigned long)to, (unsigned long)to + n - rc);
+       dcache_clean_pop((unsigned long)to, (unsigned long)to + n - rc);
        return rc;
 }
index ea605d94182fd4d6d2d38b4464d4b4314601322c..5051b3c1a4f1263fbd8b7619c88d5f76bed87867 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/asm-uaccess.h>
 
 /*
- *     __flush_cache_range(start,end) [fixup]
+ *     caches_clean_inval_pou_macro(start,end) [fixup]
  *
  *     Ensure that the I and D caches are coherent within specified region.
  *     This is typically used when code has been written to a memory region,
@@ -25,7 +25,7 @@
  *     - end     - virtual end address of region
  *     - fixup   - optional label to branch to on user fault
  */
-.macro __flush_cache_range, fixup
+.macro caches_clean_inval_pou_macro, fixup
 alternative_if ARM64_HAS_CACHE_IDC
        dsb     ishst
        b       .Ldc_skip_\@
@@ -43,7 +43,7 @@ alternative_else_nop_endif
 .endm
 
 /*
- *     __flush_icache_range(start,end)
+ *     caches_clean_inval_pou(start,end)
  *
  *     Ensure that the I and D caches are coherent within specified region.
  *     This is typically used when code has been written to a memory region,
@@ -52,13 +52,13 @@ alternative_else_nop_endif
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(__flush_icache_range)
-       __flush_cache_range
+SYM_FUNC_START(caches_clean_inval_pou)
+       caches_clean_inval_pou_macro
        ret
-SYM_FUNC_END(__flush_icache_range)
+SYM_FUNC_END(caches_clean_inval_pou)
 
 /*
- *     __flush_cache_user_range(start,end)
+ *     caches_clean_inval_user_pou(start,end)
  *
  *     Ensure that the I and D caches are coherent within specified region.
  *     This is typically used when code has been written to a memory region,
@@ -67,10 +67,10 @@ SYM_FUNC_END(__flush_icache_range)
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(__flush_cache_user_range)
+SYM_FUNC_START(caches_clean_inval_user_pou)
        uaccess_ttbr0_enable x2, x3, x4
 
-       __flush_cache_range 2f
+       caches_clean_inval_pou_macro 2f
        mov     x0, xzr
 1:
        uaccess_ttbr0_disable x1, x2
@@ -78,17 +78,17 @@ SYM_FUNC_START(__flush_cache_user_range)
 2:
        mov     x0, #-EFAULT
        b       1b
-SYM_FUNC_END(__flush_cache_user_range)
+SYM_FUNC_END(caches_clean_inval_user_pou)
 
 /*
- *     invalidate_icache_range(start,end)
+ *     icache_inval_pou(start,end)
  *
  *     Ensure that the I cache is invalid within specified region.
  *
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(invalidate_icache_range)
+SYM_FUNC_START(icache_inval_pou)
 alternative_if ARM64_HAS_CACHE_DIC
        isb
        ret
@@ -96,10 +96,10 @@ alternative_else_nop_endif
 
        invalidate_icache_by_line x0, x1, x2, x3
        ret
-SYM_FUNC_END(invalidate_icache_range)
+SYM_FUNC_END(icache_inval_pou)
 
 /*
- *     __flush_dcache_area(start, end)
+ *     dcache_clean_inval_poc(start, end)
  *
  *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned and invalidated to the PoC.
@@ -107,13 +107,13 @@ SYM_FUNC_END(invalidate_icache_range)
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
 
 /*
- *     __clean_dcache_area_pou(start, end)
+ *     dcache_clean_pou(start, end)
  *
  *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoU.
@@ -121,17 +121,17 @@ SYM_FUNC_END_PI(__flush_dcache_area)
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(__clean_dcache_area_pou)
+SYM_FUNC_START(dcache_clean_pou)
 alternative_if ARM64_HAS_CACHE_IDC
        dsb     ishst
        ret
 alternative_else_nop_endif
        dcache_by_line_op cvau, ish, x0, x1, x2, x3
        ret
-SYM_FUNC_END(__clean_dcache_area_pou)
+SYM_FUNC_END(dcache_clean_pou)
 
 /*
- *     __inval_dcache_area(start, end)
+ *     dcache_inval_poc(start, end)
  *
  *     Ensure that any D-cache lines for the interval [start, end)
  *     are invalidated. Any partial lines at the ends of the interval are
@@ -141,7 +141,7 @@ SYM_FUNC_END(__clean_dcache_area_pou)
  *     - end     - kernel end address of region
  */
 SYM_FUNC_START_LOCAL(__dma_inv_area)
-SYM_FUNC_START_PI(__inval_dcache_area)
+SYM_FUNC_START_PI(dcache_inval_poc)
        /* FALLTHROUGH */
 
 /*
@@ -166,11 +166,11 @@ SYM_FUNC_START_PI(__inval_dcache_area)
        b.lo    2b
        dsb     sy
        ret
-SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END_PI(dcache_inval_poc)
 SYM_FUNC_END(__dma_inv_area)
 
 /*
- *     __clean_dcache_area_poc(start, end)
+ *     dcache_clean_poc(start, end)
  *
  *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoC.
@@ -179,7 +179,7 @@ SYM_FUNC_END(__dma_inv_area)
  *     - end     - virtual end address of region
  */
 SYM_FUNC_START_LOCAL(__dma_clean_area)
-SYM_FUNC_START_PI(__clean_dcache_area_poc)
+SYM_FUNC_START_PI(dcache_clean_poc)
        /* FALLTHROUGH */
 
 /*
@@ -189,11 +189,11 @@ SYM_FUNC_START_PI(__clean_dcache_area_poc)
  */
        dcache_by_line_op cvac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END_PI(dcache_clean_poc)
 SYM_FUNC_END(__dma_clean_area)
 
 /*
- *     __clean_dcache_area_pop(start, end)
+ *     dcache_clean_pop(start, end)
  *
  *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoP.
@@ -201,13 +201,13 @@ SYM_FUNC_END(__dma_clean_area)
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START_PI(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(dcache_clean_pop)
        alternative_if_not ARM64_HAS_DCPOP
-       b       __clean_dcache_area_poc
+       b       dcache_clean_poc
        alternative_else_nop_endif
        dcache_by_line_op cvap, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(dcache_clean_pop)
 
 /*
  *     __dma_flush_area(start, size)
index c4ca7e05fdb80f6321e62d2f6572a990b54fcc7c..2aaf950b906cbabac1f145555dcc1b37b0271631 100644 (file)
 void sync_icache_aliases(unsigned long start, unsigned long end)
 {
        if (icache_is_aliasing()) {
-               __clean_dcache_area_pou(start, end);
-               __flush_icache_all();
+               dcache_clean_pou(start, end);
+               icache_inval_all_pou();
        } else {
                /*
                 * Don't issue kick_all_cpus_sync() after I-cache invalidation
                 * for user mappings.
                 */
-               __flush_icache_range(start, end);
+               caches_clean_inval_pou(start, end);
        }
 }
 
@@ -76,20 +76,20 @@ EXPORT_SYMBOL(flush_dcache_page);
 /*
  * Additional functions defined in assembly.
  */
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(caches_clean_inval_pou);
 
 #ifdef CONFIG_ARCH_HAS_PMEM_API
 void arch_wb_cache_pmem(void *addr, size_t size)
 {
        /* Ensure order against any prior non-cacheable writes */
        dmb(osh);
-       __clean_dcache_area_pop((unsigned long)addr, (unsigned long)addr + size);
+       dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
 }
 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
 
 void arch_invalidate_pmem(void *addr, size_t size)
 {
-       __inval_dcache_area((unsigned long)addr, (unsigned long)addr + size);
+       dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
 #endif