blob: dd6b77d3a5214985c5eb5e05ac9fca1342d646df [file] [log] [blame]
Brad Bishopbec4ebc2022-08-03 09:55:16 -04001From e90aa7853ae32cb03c86249a6c572ec88cdebaa2 Mon Sep 17 00:00:00 2001
2From: Peter Hoyes <Peter.Hoyes@arm.com>
3Date: Wed, 26 May 2021 17:41:10 +0100
4Subject: [PATCH 1/9] armv8: Add ARMv8 MPU configuration logic
5
6Detect whether an MMU is present at the current exception level. If
7not, initialize the MPU instead of the MMU during init, and clear the
8MPU regions before transition to Linux.
9
10The MSA in use at EL1&0 may be configurable but can only by determined
11by inspecting VTCR_EL2 at EL2, so assume that there is an MMU for
12backwards compatibility.
13
14Provide a default (blank) MPU memory map, which can be overridden by
15board configurations.
16
17Issue-Id: SCM-2443
18Upstream-Status: Inappropriate [other]
19 Implementation pending further discussion
20Signed-off-by: Peter Hoyes <Peter.Hoyes@arm.com>
21Change-Id: I0ee3879f9d7f03fe940664b3551c68eeaa458d17
22---
23 arch/arm/cpu/armv8/cache_v8.c | 101 ++++++++++++++++++++++++++++++-
24 arch/arm/include/asm/armv8/mpu.h | 59 ++++++++++++++++++
25 arch/arm/include/asm/system.h | 19 ++++++
26 3 files changed, 176 insertions(+), 3 deletions(-)
27 create mode 100644 arch/arm/include/asm/armv8/mpu.h
28
29diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c
30index e4736e5643..798aed8058 100644
31--- a/arch/arm/cpu/armv8/cache_v8.c
32+++ b/arch/arm/cpu/armv8/cache_v8.c
33@@ -15,6 +15,7 @@
34 #include <asm/global_data.h>
35 #include <asm/system.h>
36 #include <asm/armv8/mmu.h>
37+#include <asm/armv8/mpu.h>
38
39 DECLARE_GLOBAL_DATA_PTR;
40
41@@ -385,6 +386,91 @@ __weak u64 get_page_table_size(void)
42 return size;
43 }
44
45+static void mpu_clear_regions(void)
46+{
47+ int i;
48+
49+ for (i = 0; mpu_mem_map[i].end || mpu_mem_map[i].attrs; i++) {
50+ setup_el2_mpu_region(i, 0, 0);
51+ }
52+}
53+
54+static struct mpu_region default_mpu_mem_map[] = {{0,}};
55+__weak struct mpu_region *mpu_mem_map = default_mpu_mem_map;
56+
57+static void mpu_setup(void)
58+{
59+ int i;
60+
61+ if (current_el() != 2) {
62+ panic("MPU configuration is only supported at EL2");
63+ }
64+
65+ set_sctlr(get_sctlr() & ~(CR_M | CR_WXN));
66+
67+ asm volatile("msr MAIR_EL2, %0" : : "r" MEMORY_ATTRIBUTES);
68+
69+ for (i = 0; mpu_mem_map[i].end || mpu_mem_map[i].attrs; i++) {
70+ setup_el2_mpu_region(i,
71+ PRBAR_ADDRESS(mpu_mem_map[i].start)
72+ | PRBAR_OUTER_SH | PRBAR_AP_RW_ANY,
73+ PRLAR_ADDRESS(mpu_mem_map[i].end)
74+ | mpu_mem_map[i].attrs | PRLAR_EN_BIT
75+ );
76+ }
77+
78+ set_sctlr(get_sctlr() | CR_M);
79+}
80+
81+static bool el_has_mmu(void)
82+{
83+ if (current_el() < 2) {
84+ // We have no way of knowing, so assuming we have an MMU
85+ return true;
86+ }
87+
88+ uint64_t id_aa64mmfr0;
89+ asm volatile("mrs %0, id_aa64mmfr0_el1"
90+ : "=r" (id_aa64mmfr0) : : "cc");
91+ uint64_t msa = id_aa64mmfr0 & ID_AA64MMFR0_EL1_MSA_MASK;
92+ uint64_t msa_frac = id_aa64mmfr0 & ID_AA64MMFR0_EL1_MSA_FRAC_MASK;
93+
94+ switch (msa) {
95+ case ID_AA64MMFR0_EL1_MSA_VMSA:
96+ /*
97+ * VMSA supported in all translation regimes.
98+ * No support for PMSA.
99+ */
100+ return true;
101+ case ID_AA64MMFR0_EL1_MSA_USE_FRAC:
102+ /* See MSA_frac for the supported MSAs. */
103+ switch (msa_frac) {
104+ case ID_AA64MMFR0_EL1_MSA_FRAC_NO_PMSA:
105+ /*
106+ * PMSA not supported in any translation
107+ * regime.
108+ */
109+ return true;
110+ case ID_AA64MMFR0_EL1_MSA_FRAC_VMSA:
111+ /*
112+ * PMSA supported in all translation
113+ * regimes. No support for VMSA.
114+ */
115+ case ID_AA64MMFR0_EL1_MSA_FRAC_PMSA:
116+ /*
117+ * PMSA supported in all translation
118+ * regimes.
119+ */
120+ return false;
121+ default:
122+ panic("Unsupported id_aa64mmfr0_el1 " \
123+ "MSA_frac value");
124+ }
125+ default:
126+ panic("Unsupported id_aa64mmfr0_el1 MSA value");
127+ }
128+}
129+
130 void setup_pgtables(void)
131 {
132 int i;
133@@ -499,8 +585,13 @@ void dcache_enable(void)
134 /* The data cache is not active unless the mmu is enabled */
135 if (!(get_sctlr() & CR_M)) {
136 invalidate_dcache_all();
137- __asm_invalidate_tlb_all();
138- mmu_setup();
139+
140+ if (el_has_mmu()) {
141+ __asm_invalidate_tlb_all();
142+ mmu_setup();
143+ } else {
144+ mpu_setup();
145+ }
146 }
147
148 set_sctlr(get_sctlr() | CR_C);
149@@ -519,7 +610,11 @@ void dcache_disable(void)
150 set_sctlr(sctlr & ~(CR_C|CR_M));
151
152 flush_dcache_all();
153- __asm_invalidate_tlb_all();
154+
155+ if (el_has_mmu())
156+ __asm_invalidate_tlb_all();
157+ else
158+ mpu_clear_regions();
159 }
160
161 int dcache_status(void)
162diff --git a/arch/arm/include/asm/armv8/mpu.h b/arch/arm/include/asm/armv8/mpu.h
163new file mode 100644
164index 0000000000..8de627cafd
165--- /dev/null
166+++ b/arch/arm/include/asm/armv8/mpu.h
167@@ -0,0 +1,59 @@
168+/*
169+ * SPDX-License-Identifier: GPL-2.0+
170+ *
171+ * (C) Copyright 2021 Arm Limited
172+ */
173+
174+#ifndef _ASM_ARMV8_MPU_H_
175+#define _ASM_ARMV8_MPU_H_
176+
177+#include <asm/armv8/mmu.h>
178+#include <linux/stringify.h>
179+
180+#define PRSELR_EL2 S3_4_c6_c2_1
181+#define PRBAR_EL2 S3_4_c6_c8_0
182+#define PRLAR_EL2 S3_4_c6_c8_1
183+#define MPUIR_EL2 S3_4_c0_c0_4
184+
185+#define PRBAR_ADDRESS(addr) ((addr) & ~(0x3fULL))
186+
187+/* Access permissions */
188+#define PRBAR_AP(val) (((val) & 0x3) << 2)
189+#define PRBAR_AP_RW_HYP PRBAR_AP(0x0)
190+#define PRBAR_AP_RW_ANY PRBAR_AP(0x1)
191+#define PRBAR_AP_RO_HYP PRBAR_AP(0x2)
192+#define PRBAR_AP_RO_ANY PRBAR_AP(0x3)
193+
194+/* Shareability */
195+#define PRBAR_SH(val) (((val) & 0x3) << 4)
196+#define PRBAR_NON_SH PRBAR_SH(0x0)
197+#define PRBAR_OUTER_SH PRBAR_SH(0x2)
198+#define PRBAR_INNER_SH PRBAR_SH(0x3)
199+
200+/* Memory attribute (MAIR idx) */
201+#define PRLAR_ATTRIDX(val) (((val) & 0x7) << 1)
202+#define PRLAR_EN_BIT (0x1)
203+#define PRLAR_ADDRESS(addr) ((addr) & ~(0x3fULL))
204+
205+#ifndef __ASSEMBLY__
206+
207+static inline void setup_el2_mpu_region(uint8_t region, uint64_t base, uint64_t limit)
208+{
209+ asm volatile("msr " __stringify(PRSELR_EL2) ", %0" : : "r" (region));
210+ asm volatile("msr " __stringify(PRBAR_EL2) ", %0" : : "r" (base));
211+ asm volatile("msr " __stringify(PRLAR_EL2) ", %0" : : "r" (limit));
212+
213+ asm volatile("isb");
214+}
215+
216+#endif
217+
218+struct mpu_region {
219+ u64 start;
220+ u64 end;
221+ u64 attrs;
222+};
223+
224+extern struct mpu_region *mpu_mem_map;
225+
226+#endif /* _ASM_ARMV8_MPU_H_ */
227diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
228index 87d1c77e8b..4510db98a2 100644
229--- a/arch/arm/include/asm/system.h
230+++ b/arch/arm/include/asm/system.h
231@@ -95,6 +95,25 @@
232 auth algorithm */
233 #define ID_AA64ISAR1_EL1_APA (0xF << 4) /* QARMA address auth algorithm */
234
235+/*
236+ * ID_AA64MMFR0_EL1 bits definitions
237+ */
238+#define ID_AA64MMFR0_EL1_MSA_FRAC_MASK (0xFUL << 52) /* Memory system
239+ architecture
240+ frac */
241+#define ID_AA64MMFR0_EL1_MSA_FRAC_VMSA (0x2UL << 52) /* EL1&0 supports
242+ VMSA */
243+#define ID_AA64MMFR0_EL1_MSA_FRAC_PMSA (0x1UL << 52) /* EL1&0 only
244+ supports PMSA*/
245+#define ID_AA64MMFR0_EL1_MSA_FRAC_NO_PMSA (0x0UL << 52) /* No PMSA
246+ support */
247+#define ID_AA64MMFR0_EL1_MSA_MASK (0xFUL << 48) /* Memory system
248+ architecture */
249+#define ID_AA64MMFR0_EL1_MSA_USE_FRAC (0xFUL << 48) /* Use MSA_FRAC */
250+#define ID_AA64MMFR0_EL1_MSA_VMSA (0x0UL << 48) /* Memory system
251+ architecture
252+ is VMSA */
253+
254 /*
255 * ID_AA64PFR0_EL1 bits definitions
256 */
257--
2582.25.1
259