arm64: assembler: add set_this_cpu_offset
authorMark Rutland <mark.rutland@arm.com>
Thu, 20 May 2021 11:50:27 +0000 (12:50 +0100)
committerWill Deacon <will@kernel.org>
Wed, 26 May 2021 21:45:45 +0000 (22:45 +0100)
There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Suzuki Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20210520115031.18509-3-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/assembler.h
arch/arm64/mm/proc.S

index 8418c1bd8f044e0bb27bd21f3ae6fd01bd51bff4..f0188903557f04de10c08ae291036efca8e49853 100644 (file)
@@ -232,15 +232,23 @@ lr        .req    x30             // link register
         * @dst: destination register
         */
 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
-       .macro  this_cpu_offset, dst
+       .macro  get_this_cpu_offset, dst
        mrs     \dst, tpidr_el2
        .endm
 #else
-       .macro  this_cpu_offset, dst
+       .macro  get_this_cpu_offset, dst
 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
        mrs     \dst, tpidr_el1
 alternative_else
        mrs     \dst, tpidr_el2
+alternative_endif
+       .endm
+
+       .macro  set_this_cpu_offset, src
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+       msr     tpidr_el1, \src
+alternative_else
+       msr     tpidr_el2, \src
 alternative_endif
        .endm
 #endif
@@ -253,7 +261,7 @@ alternative_endif
        .macro adr_this_cpu, dst, sym, tmp
        adrp    \tmp, \sym
        add     \dst, \tmp, #:lo12:\sym
-       this_cpu_offset \tmp
+       get_this_cpu_offset \tmp
        add     \dst, \dst, \tmp
        .endm
 
@@ -264,7 +272,7 @@ alternative_endif
         */
        .macro ldr_this_cpu dst, sym, tmp
        adr_l   \dst, \sym
-       this_cpu_offset \tmp
+       get_this_cpu_offset \tmp
        ldr     \dst, [\dst, \tmp]
        .endm
 
@@ -745,7 +753,7 @@ USER(\label, ic     ivau, \tmp2)                    // invalidate I line PoU
        cbz             \tmp, \lbl
 #endif
        adr_l           \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
-       this_cpu_offset \tmp2
+       get_this_cpu_offset     \tmp2
        ldr             w\tmp, [\tmp, \tmp2]
        cbnz            w\tmp, \lbl     // yield on pending softirq in task context
 .Lnoyield_\@:
index 97d7bcd8d4f26c5001c14bc346c5ce88059f2a0a..bc555cd5e6b1e9172d3423193d872d41803b313f 100644 (file)
@@ -83,11 +83,7 @@ SYM_FUNC_START(cpu_do_suspend)
        mrs     x9, mdscr_el1
        mrs     x10, oslsr_el1
        mrs     x11, sctlr_el1
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       mrs     x12, tpidr_el1
-alternative_else
-       mrs     x12, tpidr_el2
-alternative_endif
+       get_this_cpu_offset x12
        mrs     x13, sp_el0
        stp     x2, x3, [x0]
        stp     x4, x5, [x0, #16]
@@ -145,11 +141,7 @@ SYM_FUNC_START(cpu_do_resume)
        msr     mdscr_el1, x10
 
        msr     sctlr_el1, x12
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       msr     tpidr_el1, x13
-alternative_else
-       msr     tpidr_el2, x13
-alternative_endif
+       set_this_cpu_offset x13
        msr     sp_el0, x14
        /*
         * Restore oslsr_el1 by writing oslar_el1