blob: d1e10d0ea09b293be7de739b601b0abbf01ae7fa [file] [log] [blame]
Patrick Williams8dd68482022-10-04 07:57:18 -05001From 1e24b45a8ff34af45dda45c57f8403452d384f99 Mon Sep 17 00:00:00 2001
2From: Olivier Deprez <olivier.deprez@arm.com>
3Date: Mon, 8 Aug 2022 19:14:23 +0200
4Subject: [PATCH] feat: disable alignment check for EL0 partitions
5
6Relax hw alignment check specifically for (S-)EL0 partitions when
7Hafnium runs with VHE enabled. EL1 partitions have a specific control
8for EL1 and EL0 with respect to alignment check.
9Create a hyp_state structure (from already defined flying registers)
10within the vCPU context to hold the Hypervisor EL2 static configuration
11applied when a vCPU runs. This state is switched back and forth when
12running the Hypervisor or the VM.
13Add SCTLR_EL2 to this context. An EL0 partition context is initialized
14with SCTLR_EL2.A=0 such that alignment check is disabled when EL0 runs
15in the EL2&0 translation regime. SCTLR_EL2.A is set back when returning
16to the Hypervisor such that Hypervisor execution runs with aligment
17check enabled at EL2.
18Remove HCR_EL2 saving from vCPU exit path provided this register state
19is static and doesn't change while a vCPU runs.
20The rationale for such change is to permit running upstream SW stacks
21such as the EDKII/StandaloneMm [1] for which default build assumes
22unaligned accesses are permitted. Similar query exists for running
23Trusted Services on top of Hafnium [2].
24
25[1] https://github.com/tianocore/edk2/tree/master/StandaloneMmPkg
26[2] https://trusted-services.readthedocs.io/en/integration/
27
28Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
29Change-Id: I2906f4c712425fcfb31adbf89e2e3b9ca293f181
30Upstream-Status: Submitted [https://review.trustedfirmware.org/c/hafnium/hafnium/+/16195]
31---
32 src/arch/aarch64/hypervisor/cpu.c | 9 ++++---
33 src/arch/aarch64/hypervisor/exceptions.S | 32 ++++++++++++++++--------
34 src/arch/aarch64/hypervisor/feature_id.c | 6 ++---
35 src/arch/aarch64/hypervisor/handler.c | 18 +++++++------
36 src/arch/aarch64/inc/hf/arch/types.h | 9 +++++--
37 src/arch/aarch64/mm.c | 2 +-
38 src/arch/aarch64/sysregs.c | 11 ++++++--
39 src/arch/aarch64/sysregs.h | 2 +-
40 8 files changed, 59 insertions(+), 30 deletions(-)
41
42diff --git a/src/arch/aarch64/hypervisor/cpu.c b/src/arch/aarch64/hypervisor/cpu.c
43index d2df77d..a000159 100644
44--- a/src/arch/aarch64/hypervisor/cpu.c
45+++ b/src/arch/aarch64/hypervisor/cpu.c
46@@ -115,7 +115,9 @@ void arch_regs_reset(struct vcpu *vcpu)
47 }
48 }
49
50- r->hcr_el2 = get_hcr_el2_value(vm_id, vcpu->vm->el0_partition);
51+ r->hyp_state.hcr_el2 =
52+ get_hcr_el2_value(vm_id, vcpu->vm->el0_partition);
53+ r->hyp_state.sctlr_el2 = get_sctlr_el2_value(vcpu->vm->el0_partition);
54 r->lazy.cnthctl_el2 = cnthctl;
55 if (vcpu->vm->el0_partition) {
56 CHECK(has_vhe_support());
57@@ -125,10 +127,11 @@ void arch_regs_reset(struct vcpu *vcpu)
58 * are ignored and treated as 0. There is no need to mask the
59 * VMID (used as asid) to only 8 bits.
60 */
61- r->ttbr0_el2 = pa_addr(table) | ((uint64_t)vm_id << 48);
62+ r->hyp_state.ttbr0_el2 =
63+ pa_addr(table) | ((uint64_t)vm_id << 48);
64 r->spsr = PSR_PE_MODE_EL0T;
65 } else {
66- r->ttbr0_el2 = read_msr(ttbr0_el2);
67+ r->hyp_state.ttbr0_el2 = read_msr(ttbr0_el2);
68 r->lazy.vtcr_el2 = arch_mm_get_vtcr_el2();
69 r->lazy.vttbr_el2 = pa_addr(table) | ((uint64_t)vm_id << 48);
70 #if SECURE_WORLD == 1
71diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
72index 539e196..d3732f8 100644
73--- a/src/arch/aarch64/hypervisor/exceptions.S
74+++ b/src/arch/aarch64/hypervisor/exceptions.S
75@@ -20,6 +20,9 @@
76 #define ID_AA64PFR0_SVE_SHIFT (32)
77 #define ID_AA64PFR0_SVE_LENGTH (4)
78
79+#define SCTLR_EL2_A_SHIFT (1)
80+#define HCR_EL2_TGE_SHIFT (27)
81+
82 /**
83 * Saves the volatile registers into the register buffer of the current vCPU.
84 */
85@@ -51,8 +54,6 @@
86 mrs x1, elr_el2
87 mrs x2, spsr_el2
88 stp x1, x2, [x18, #VCPU_REGS + 8 * 31]
89- mrs x1, hcr_el2
90- str x1, [x18, #VCPU_REGS + 8 * 33]
91 .endm
92
93 /**
94@@ -871,12 +872,13 @@ vcpu_restore_volatile_and_run:
95 msr elr_el2, x1
96 msr spsr_el2, x2
97
98- ldr x1, [x0, #VCPU_REGS + 8 * 33]
99+ ldp x1, x2, [x0, #VCPU_REGS + 8 * 33]
100 msr hcr_el2, x1
101+ msr ttbr0_el2, x2
102 isb
103
104- ldr x1, [x0, #VCPU_REGS + 8 * 34]
105- msr ttbr0_el2, x1
106+ ldr x1, [x0, #VCPU_REGS + 8 * 35]
107+ msr sctlr_el2, x1
108 isb
109
110 /* Restore x0..x3, which we have used as scratch before. */
111@@ -886,15 +888,17 @@ vcpu_restore_volatile_and_run:
112
113 #if ENABLE_VHE
114 enable_vhe_tge:
115+ mrs x0, id_aa64mmfr1_el1
116+ tst x0, #0xf00
117+ b.eq 1f
118+
119 /**
120 * Switch to host mode ({E2H, TGE} = {1,1}) when VHE is enabled.
121 * Note that E2H is always set when VHE is enabled.
122 */
123- mrs x0, id_aa64mmfr1_el1
124- tst x0, #0xf00
125- b.eq 1f
126- orr x1, x1, #(1 << 27)
127- msr hcr_el2, x1
128+ mrs x0, hcr_el2
129+ orr x0, x0, #(1 << HCR_EL2_TGE_SHIFT)
130+ msr hcr_el2, x0
131 isb
132
133 /**
134@@ -905,6 +909,14 @@ enable_vhe_tge:
135 ldr x0, [x0]
136 msr ttbr0_el2, x0
137 isb
138+
139+ /**
140+ * Enable alignment check while Hypervisor runs.
141+ */
142+ mrs x0, sctlr_el2
143+ orr x0, x0, #(1 << SCTLR_EL2_A_SHIFT)
144+ msr sctlr_el2, x0
145+ isb
146 1:
147 ret
148 #endif
149diff --git a/src/arch/aarch64/hypervisor/feature_id.c b/src/arch/aarch64/hypervisor/feature_id.c
150index ed3bf8f..57f3262 100644
151--- a/src/arch/aarch64/hypervisor/feature_id.c
152+++ b/src/arch/aarch64/hypervisor/feature_id.c
153@@ -175,7 +175,7 @@ void feature_set_traps(struct vm *vm, struct arch_regs *regs)
154 ~(ID_AA64MMFR1_EL1_VH_MASK << ID_AA64MMFR1_EL1_VH_SHIFT);
155
156 if (features & HF_FEATURE_RAS) {
157- regs->hcr_el2 |= HCR_EL2_TERR;
158+ regs->hyp_state.hcr_el2 |= HCR_EL2_TERR;
159 vm->arch.tid3_masks.id_aa64mmfr1_el1 &=
160 ~ID_AA64MMFR1_EL1_SPEC_SEI;
161 vm->arch.tid3_masks.id_aa64pfr0_el1 &= ~ID_AA64PFR0_EL1_RAS;
162@@ -221,14 +221,14 @@ void feature_set_traps(struct vm *vm, struct arch_regs *regs)
163 }
164
165 if (features & HF_FEATURE_LOR) {
166- regs->hcr_el2 |= HCR_EL2_TLOR;
167+ regs->hyp_state.hcr_el2 |= HCR_EL2_TLOR;
168
169 vm->arch.tid3_masks.id_aa64mmfr1_el1 &= ~ID_AA64MMFR1_EL1_LO;
170 }
171
172 if (features & HF_FEATURE_PAUTH) {
173 /* APK and API bits *enable* trapping when cleared. */
174- regs->hcr_el2 &= ~(HCR_EL2_APK | HCR_EL2_API);
175+ regs->hyp_state.hcr_el2 &= ~(HCR_EL2_APK | HCR_EL2_API);
176
177 vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPI;
178 vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPA;
179diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
180index cd5146b..8a3d628 100644
181--- a/src/arch/aarch64/hypervisor/handler.c
182+++ b/src/arch/aarch64/hypervisor/handler.c
183@@ -272,9 +272,9 @@ noreturn void sync_current_exception_noreturn(uintreg_t elr, uintreg_t spsr)
184 static void set_virtual_irq(struct arch_regs *r, bool enable)
185 {
186 if (enable) {
187- r->hcr_el2 |= HCR_EL2_VI;
188+ r->hyp_state.hcr_el2 |= HCR_EL2_VI;
189 } else {
190- r->hcr_el2 &= ~HCR_EL2_VI;
191+ r->hyp_state.hcr_el2 &= ~HCR_EL2_VI;
192 }
193 }
194
195@@ -283,14 +283,15 @@ static void set_virtual_irq(struct arch_regs *r, bool enable)
196 */
197 static void set_virtual_irq_current(bool enable)
198 {
199- uintreg_t hcr_el2 = current()->regs.hcr_el2;
200+ struct vcpu *vcpu = current();
201+ uintreg_t hcr_el2 = vcpu->regs.hyp_state.hcr_el2;
202
203 if (enable) {
204 hcr_el2 |= HCR_EL2_VI;
205 } else {
206 hcr_el2 &= ~HCR_EL2_VI;
207 }
208- current()->regs.hcr_el2 = hcr_el2;
209+ vcpu->regs.hyp_state.hcr_el2 = hcr_el2;
210 }
211
212 /**
213@@ -300,9 +301,9 @@ static void set_virtual_irq_current(bool enable)
214 static void set_virtual_fiq(struct arch_regs *r, bool enable)
215 {
216 if (enable) {
217- r->hcr_el2 |= HCR_EL2_VF;
218+ r->hyp_state.hcr_el2 |= HCR_EL2_VF;
219 } else {
220- r->hcr_el2 &= ~HCR_EL2_VF;
221+ r->hyp_state.hcr_el2 &= ~HCR_EL2_VF;
222 }
223 }
224
225@@ -311,14 +312,15 @@ static void set_virtual_fiq(struct arch_regs *r, bool enable)
226 */
227 static void set_virtual_fiq_current(bool enable)
228 {
229- uintreg_t hcr_el2 = current()->regs.hcr_el2;
230+ struct vcpu *vcpu = current();
231+ uintreg_t hcr_el2 = vcpu->regs.hyp_state.hcr_el2;
232
233 if (enable) {
234 hcr_el2 |= HCR_EL2_VF;
235 } else {
236 hcr_el2 &= ~HCR_EL2_VF;
237 }
238- current()->regs.hcr_el2 = hcr_el2;
239+ vcpu->regs.hyp_state.hcr_el2 = hcr_el2;
240 }
241
242 #if SECURE_WORLD == 1
243diff --git a/src/arch/aarch64/inc/hf/arch/types.h b/src/arch/aarch64/inc/hf/arch/types.h
244index 6379d73..6b8b24f 100644
245--- a/src/arch/aarch64/inc/hf/arch/types.h
246+++ b/src/arch/aarch64/inc/hf/arch/types.h
247@@ -79,8 +79,13 @@ struct arch_regs {
248 uintreg_t r[NUM_GP_REGS];
249 uintreg_t pc;
250 uintreg_t spsr;
251- uintreg_t hcr_el2;
252- uintreg_t ttbr0_el2;
253+
254+ /* Hypervisor configuration while a vCPU runs. */
255+ struct {
256+ uintreg_t hcr_el2;
257+ uintreg_t ttbr0_el2;
258+ uintreg_t sctlr_el2;
259+ } hyp_state;
260
261 /*
262 * System registers.
263diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
264index 8ee65ca..487ae35 100644
265--- a/src/arch/aarch64/mm.c
266+++ b/src/arch/aarch64/mm.c
267@@ -886,7 +886,7 @@ bool arch_mm_init(paddr_t table)
268 #endif
269 (0xff << (8 * STAGE1_NORMALINDX)),
270
271- .sctlr_el2 = get_sctlr_el2_value(),
272+ .sctlr_el2 = get_sctlr_el2_value(false),
273 .vstcr_el2 = (1U << 31) | /* RES1. */
274 (0 << 30) | /* SA. */
275 (0 << 29) | /* SW. */
276diff --git a/src/arch/aarch64/sysregs.c b/src/arch/aarch64/sysregs.c
277index e8c154b..087ba4e 100644
278--- a/src/arch/aarch64/sysregs.c
279+++ b/src/arch/aarch64/sysregs.c
280@@ -159,7 +159,7 @@ uintreg_t get_cptr_el2_value(void)
281 /**
282 * Returns the value for SCTLR_EL2 for the CPU.
283 */
284-uintreg_t get_sctlr_el2_value(void)
285+uintreg_t get_sctlr_el2_value(bool is_el0_partition)
286 {
287 uintreg_t sctlr_el2_value = 0;
288
289@@ -173,7 +173,14 @@ uintreg_t get_sctlr_el2_value(void)
290
291 /* MMU-related bits. */
292 sctlr_el2_value |= SCTLR_EL2_M;
293- sctlr_el2_value |= SCTLR_EL2_A;
294+
295+ /*
296+ * Alignment check enabled, but in the case of an EL0 partition
297+ * with VHE enabled.
298+ */
299+ if (!(has_vhe_support() && is_el0_partition)) {
300+ sctlr_el2_value |= SCTLR_EL2_A;
301+ }
302 sctlr_el2_value |= SCTLR_EL2_C;
303 sctlr_el2_value |= SCTLR_EL2_SA;
304 sctlr_el2_value |= SCTLR_EL2_I;
305diff --git a/src/arch/aarch64/sysregs.h b/src/arch/aarch64/sysregs.h
306index babd237..6fdab58 100644
307--- a/src/arch/aarch64/sysregs.h
308+++ b/src/arch/aarch64/sysregs.h
309@@ -668,7 +668,7 @@ uintreg_t get_mdcr_el2_value(void);
310
311 uintreg_t get_cptr_el2_value(void);
312
313-uintreg_t get_sctlr_el2_value(void);
314+uintreg_t get_sctlr_el2_value(bool is_el0_partition);
315
316 /**
317 * Branch Target Identification mechanism support in AArch64 state.
318--
3192.34.1
320