arm64: Add initial support for E0PD
authorMark Brown <broonie@kernel.org>
Mon, 9 Dec 2019 18:12:14 +0000 (18:12 +0000)
committerWill Deacon <will@kernel.org>
Wed, 15 Jan 2020 14:11:02 +0000 (14:11 +0000)
Kernel Page Table Isolation (KPTI) is used to mitigate some speculation
based security issues by ensuring that the kernel is not mapped when
userspace is running but this approach is expensive and is incompatible
with SPE.  E0PD, introduced in the ARMv8.5 extensions, provides an
alternative to this which ensures that accesses from userspace to the
kernel's half of the memory map to always fault with constant time,
preventing timing attacks without requiring constant unmapping and
remapping or preventing legitimate accesses.

Currently this feature will only be enabled if all CPUs in the system
support E0PD, if some CPUs do not support the feature at boot time then
the feature will not be enabled and in the unlikely event that a late
CPU is the first CPU to lack the feature then we will reject that CPU.

This initial patch does not yet integrate with KPTI, this will be dealt
with in followup patches.  Ideally we could ensure that by default we
don't use KPTI on CPUs where E0PD is present.

Signed-off-by: Mark Brown <broonie@kernel.org>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
[will: Fixed typo in Kconfig text]
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/Kconfig
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/cpufeature.c

index b1b4476ddb834ba64a8ba93e55e88d3a7282fed1..9cee2008ea9ec32fc0068d66c4bb8219bc35fba8 100644 (file)
@@ -1484,6 +1484,22 @@ config ARM64_PTR_AUTH
 
 endmenu
 
+menu "ARMv8.5 architectural features"
+
+config ARM64_E0PD
+       bool "Enable support for E0PD"
+       default y
+       help
+          E0PD (part of the ARMv8.5 extensions) allows us to ensure
+          that EL0 accesses made via TTBR1 always fault in constant time,
+          providing similar benefits to KASLR as those provided by KPTI, but
+          with lower overhead and without disrupting legitimate access to
+          kernel memory such as SPE.
+
+          This option enables E0PD for TTBR1 where available.
+
+endmenu
+
 config ARM64_SVE
        bool "ARM Scalable Vector Extension support"
        default y
index b926838711194b005060815503955b0199078b34..33ff25c1ab1bdbde13cf7d0d72730e951b682303 100644 (file)
@@ -56,7 +56,8 @@
 #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM   46
 #define ARM64_WORKAROUND_1542419               47
 #define ARM64_WORKAROUND_1319367               48
+#define ARM64_HAS_E0PD                         49
 
-#define ARM64_NCAPS                            49
+#define ARM64_NCAPS                            50
 
 #endif /* __ASM_CPUCAPS_H */
index d9fbd433cc1753258c28176592e5383565fd3c7f..378566f4882e794a6ebb23d7472712dd23ca94d1 100644 (file)
 #define TCR_HD                 (UL(1) << 40)
 #define TCR_NFD0               (UL(1) << 53)
 #define TCR_NFD1               (UL(1) << 54)
+#define TCR_E0PD0              (UL(1) << 55)
+#define TCR_E0PD1              (UL(1) << 56)
 
 /*
  * TTBR.
index 6e919fafb43dd65b407a3ec537408b240319ce7d..b085258cfe4ea593b9e557b27b6b08fe3159566a 100644 (file)
 #define ID_AA64MMFR1_VMIDBITS_16       2
 
 /* id_aa64mmfr2 */
+#define ID_AA64MMFR2_E0PD_SHIFT                60
 #define ID_AA64MMFR2_FWB_SHIFT         40
 #define ID_AA64MMFR2_AT_SHIFT          32
 #define ID_AA64MMFR2_LVA_SHIFT         16
index 04cf64e9f0c978a0e9578c6f8adca3570bbd5dd4..9d578e7201683893aefe5e073f8cd615a05d8279 100644 (file)
@@ -225,6 +225,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
@@ -1251,6 +1252,14 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
 }
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
+#ifdef CONFIG_ARM64_E0PD
+static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
+{
+       if (this_cpu_has_cap(ARM64_HAS_E0PD))
+               sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
+}
+#endif /* CONFIG_ARM64_E0PD */
+
 #ifdef CONFIG_ARM64_PSEUDO_NMI
 static bool enable_pseudo_nmi;
 
@@ -1566,6 +1575,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .sign = FTR_UNSIGNED,
                .min_field_value = 1,
        },
+#endif
+#ifdef CONFIG_ARM64_E0PD
+       {
+               .desc = "E0PD",
+               .capability = ARM64_HAS_E0PD,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .sys_reg = SYS_ID_AA64MMFR2_EL1,
+               .sign = FTR_UNSIGNED,
+               .field_pos = ID_AA64MMFR2_E0PD_SHIFT,
+               .matches = has_cpuid_feature,
+               .min_field_value = 1,
+               .cpu_enable = cpu_enable_e0pd,
+       },
 #endif
        {},
 };