Andrew Geissler | 9347dd4 | 2023-03-03 12:38:41 -0600 | [diff] [blame^] | 1 | From 8e44fac113d935affed1550480631f3fe7f30584 Mon Sep 17 00:00:00 2001 |
Brad Bishop | bec4ebc | 2022-08-03 09:55:16 -0400 | [diff] [blame] | 2 | From: Jaxson Han <jaxson.han@arm.com> |
| 3 | Date: Tue, 25 May 2021 07:25:00 +0100 |
| 4 | Subject: [PATCH] aarch64: Introduce EL2 boot code for Armv8-R AArch64 |
| 5 | |
| 6 | The Armv8-R AArch64 profile does not support the EL3 exception level. |
| 7 | The Armv8-R AArch64 profile allows for an (optional) VMSAv8-64 MMU |
| 8 | at EL1, which allows to run off-the-shelf Linux. However EL2 only |
| 9 | supports a PMSA, which is not supported by Linux, so we need to drop |
| 10 | into EL1 before entering the kernel. |
| 11 | |
| 12 | We add a new err_invalid_arch symbol as a dead loop. If we detect the |
| 13 | current Armv8-R aarch64 only supports with PMSA, meaning we cannot boot |
| 14 | Linux anymore, then we jump to err_invalid_arch. |
| 15 | |
| 16 | During Armv8-R aarch64 init, to make sure nothing unexpected traps into |
| 17 | EL2, we auto-detect and config FIEN and EnSCXT in HCR_EL2. |
| 18 | |
| 19 | The boot sequence is: |
| 20 | If CurrentEL == EL3, then goto EL3 initialisation and drop to lower EL |
| 21 | before entering the kernel. |
| 22 | If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf (Armv8-R aarch64), |
| 23 | if id_aa64mmfr0_el1.MSA_frac == 0x2, |
| 24 | then goto Armv8-R AArch64 initialisation and drop to EL1 before |
| 25 | entering the kernel. |
| 26 | else, which means VMSA unsupported and cannot boot Linux, |
| 27 | goto err_invalid_arch (dead loop). |
| 28 | Else, no initialisation and keep the current EL before entering the |
| 29 | kernel. |
| 30 | |
| 31 | Upstream-Status: Pending |
| 32 | Signed-off-by: Jaxson Han <jaxson.han@arm.com> |
| 33 | --- |
| 34 | arch/aarch64/boot.S | 92 +++++++++++++++++++++++++++++++++- |
| 35 | arch/aarch64/include/asm/cpu.h | 2 + |
| 36 | 2 files changed, 92 insertions(+), 2 deletions(-) |
| 37 | |
| 38 | diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S |
Andrew Geissler | 9347dd4 | 2023-03-03 12:38:41 -0600 | [diff] [blame^] | 39 | index 3593ca5..a219ea7 100644 |
Brad Bishop | bec4ebc | 2022-08-03 09:55:16 -0400 | [diff] [blame] | 40 | --- a/arch/aarch64/boot.S |
| 41 | +++ b/arch/aarch64/boot.S |
Andrew Geissler | 9347dd4 | 2023-03-03 12:38:41 -0600 | [diff] [blame^] | 42 | @@ -37,16 +37,24 @@ ASM_FUNC(_start) |
Brad Bishop | bec4ebc | 2022-08-03 09:55:16 -0400 | [diff] [blame] | 43 | * Boot sequence |
| 44 | * If CurrentEL == EL3, then goto EL3 initialisation and drop to |
| 45 | * lower EL before entering the kernel. |
| 46 | + * If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf, then |
| 47 | + * If id_aa64mmfr0_el1.MSA_frac == 0x2, then goto |
| 48 | + * Armv8-R AArch64 initialisation and drop to EL1 before |
| 49 | + * entering the kernel. |
| 50 | + * Else, which means VMSA unsupported and cannot boot Linux, |
| 51 | + * goto err_invalid_arch (dead loop). |
| 52 | * Else, no initialisation and keep the current EL before |
| 53 | * entering the kernel. |
| 54 | */ |
| 55 | mrs x0, CurrentEL |
| 56 | - cmp x0, #CURRENTEL_EL3 |
| 57 | - b.eq el3_init |
| 58 | + cmp x0, #CURRENTEL_EL2 |
| 59 | + bgt el3_init |
| 60 | + beq el2_init |
| 61 | |
| 62 | /* |
| 63 | * We stay in the current EL for entering the kernel |
| 64 | */ |
| 65 | +keep_el: |
| 66 | mov w0, #1 |
| 67 | ldr x1, =flag_keep_el |
| 68 | str w0, [x1] |
Andrew Geissler | 9347dd4 | 2023-03-03 12:38:41 -0600 | [diff] [blame^] | 69 | @@ -160,6 +168,85 @@ el3_init: |
Brad Bishop | bec4ebc | 2022-08-03 09:55:16 -0400 | [diff] [blame] | 70 | str w0, [x1] |
| 71 | b el_max_init |
| 72 | |
| 73 | + /* |
| 74 | + * EL2 Armv8-R AArch64 initialisation |
| 75 | + */ |
| 76 | +el2_init: |
| 77 | + /* Detect Armv8-R AArch64 */ |
| 78 | + mrs x1, id_aa64mmfr0_el1 |
| 79 | + /* |
| 80 | + * Check MSA, bits [51:48]: |
| 81 | + * 0xf means Armv8-R AArch64. |
| 82 | + * If not 0xf, proceed in Armv8-A EL2. |
| 83 | + */ |
| 84 | + ubfx x0, x1, #48, #4 // MSA |
| 85 | + cmp x0, 0xf |
| 86 | + bne keep_el |
| 87 | + /* |
| 88 | + * Check MSA_frac, bits [55:52]: |
| 89 | + * 0x2 means EL1&0 translation regime also supports VMSAv8-64. |
| 90 | + */ |
| 91 | + ubfx x0, x1, #52, #4 // MSA_frac |
| 92 | + cmp x0, 0x2 |
| 93 | + /* |
| 94 | + * If not 0x2, no VMSA, so cannot boot Linux and dead loop. |
| 95 | + * Also, since the architecture guarantees that those CPUID |
| 96 | + * fields never lose features when the value in a field |
| 97 | + * increases, we use blt to cover it. |
| 98 | + */ |
| 99 | + blt err_invalid_arch |
| 100 | + |
| 101 | + mrs x0, midr_el1 |
| 102 | + msr vpidr_el2, x0 |
| 103 | + |
| 104 | + mrs x0, mpidr_el1 |
| 105 | + msr vmpidr_el2, x0 |
| 106 | + |
| 107 | + mov x0, #(1 << 31) // VTCR_MSA: VMSAv8-64 support |
| 108 | + msr vtcr_el2, x0 |
| 109 | + |
| 110 | + /* Init HCR_EL2 */ |
| 111 | + mov x0, #(1 << 31) // RES1: Armv8-R aarch64 only |
| 112 | + |
| 113 | + mrs x1, id_aa64pfr0_el1 |
| 114 | + ubfx x2, x1, #56, 4 // ID_AA64PFR0_EL1.CSV2 |
| 115 | + cmp x2, 0x2 |
| 116 | + b.lt 1f |
| 117 | + /* |
| 118 | + * Disable trap when accessing SCTXNUM_EL0 or SCTXNUM_EL1 |
| 119 | + * if FEAT_CSV2. |
| 120 | + */ |
| 121 | + orr x0, x0, #(1 << 53) // HCR_EL2.EnSCXT |
| 122 | + |
| 123 | +1: ubfx x2, x1, #28, 4 // ID_AA64PFR0_EL1.RAS |
| 124 | + cmp x2, 0x2 |
| 125 | + b.lt 1f |
| 126 | + /* Disable trap when accessing ERXPFGCDN_EL1 if FEAT_RASv1p1. */ |
| 127 | + orr x0, x0, #(1 << 47) // HCR_EL2.FIEN |
| 128 | + |
| 129 | + /* Enable pointer authentication if present */ |
| 130 | +1: mrs x1, id_aa64isar1_el1 |
| 131 | + /* |
| 132 | + * If ID_AA64ISAR1_EL1.{GPI, GPA, API, APA} == {0000, 0000, 0000, 0000} |
| 133 | + * then HCR_EL2.APK and HCR_EL2.API are RES 0. |
| 134 | + * Else |
| 135 | + * set HCR_EL2.APK and HCR_EL2.API. |
| 136 | + */ |
| 137 | + ldr x2, =(((0xff) << 24) | (0xff << 4)) |
| 138 | + and x1, x1, x2 |
| 139 | + cbz x1, 1f |
| 140 | + |
| 141 | + orr x0, x0, #(1 << 40) // HCR_EL2.APK |
| 142 | + orr x0, x0, #(1 << 41) // HCR_EL2.API |
| 143 | + |
| 144 | +1: msr hcr_el2, x0 |
| 145 | + isb |
| 146 | + |
| 147 | + mov w0, #SPSR_KERNEL_EL1 |
| 148 | + ldr x1, =spsr_to_elx |
| 149 | + str w0, [x1] |
| 150 | + // fall through |
| 151 | + |
| 152 | el_max_init: |
| 153 | ldr x0, =COUNTER_FREQ |
| 154 | msr cntfrq_el0, x0 |
Andrew Geissler | 9347dd4 | 2023-03-03 12:38:41 -0600 | [diff] [blame^] | 155 | @@ -169,6 +256,7 @@ el_max_init: |
Brad Bishop | bec4ebc | 2022-08-03 09:55:16 -0400 | [diff] [blame] | 156 | b start_el_max |
| 157 | |
| 158 | err_invalid_id: |
| 159 | +err_invalid_arch: |
| 160 | b . |
| 161 | |
| 162 | /* |
| 163 | diff --git a/arch/aarch64/include/asm/cpu.h b/arch/aarch64/include/asm/cpu.h |
Andrew Geissler | 9347dd4 | 2023-03-03 12:38:41 -0600 | [diff] [blame^] | 164 | index 3767da3..3c0e00d 100644 |
Brad Bishop | bec4ebc | 2022-08-03 09:55:16 -0400 | [diff] [blame] | 165 | --- a/arch/aarch64/include/asm/cpu.h |
| 166 | +++ b/arch/aarch64/include/asm/cpu.h |
| 167 | @@ -25,6 +25,7 @@ |
| 168 | #define SPSR_I (1 << 7) /* IRQ masked */ |
| 169 | #define SPSR_F (1 << 6) /* FIQ masked */ |
| 170 | #define SPSR_T (1 << 5) /* Thumb */ |
| 171 | +#define SPSR_EL1H (5 << 0) /* EL1 Handler mode */ |
| 172 | #define SPSR_EL2H (9 << 0) /* EL2 Handler mode */ |
| 173 | #define SPSR_HYP (0x1a << 0) /* M[3:0] = hyp, M[4] = AArch32 */ |
| 174 | |
Andrew Geissler | 9347dd4 | 2023-03-03 12:38:41 -0600 | [diff] [blame^] | 175 | @@ -50,6 +51,7 @@ |
Brad Bishop | bec4ebc | 2022-08-03 09:55:16 -0400 | [diff] [blame] | 176 | #else |
| 177 | #define SCTLR_EL1_KERNEL SCTLR_EL1_RES1 |
| 178 | #define SPSR_KERNEL (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL2H) |
| 179 | +#define SPSR_KERNEL_EL1 (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL1H) |
| 180 | #endif |
| 181 | |
| 182 | #ifndef __ASSEMBLY__ |