blob: fa0c5d9d16ab467da3d01282345896e01f946f3a [file] [log] [blame]
Brad Bishopbec4ebc2022-08-03 09:55:16 -04001From a10c446ba1f7516c16dd6400c9a7f5e203779a5d Mon Sep 17 00:00:00 2001
2From: Robin Murphy <robin.murphy@arm.com>
3Date: Fri, 3 Dec 2021 11:45:00 +0000
4Subject: [PATCH 13/14] perf/arm-cmn: Support new IP features
5
6The second generation of CMN IPs add new node types and significantly
7expand the configuration space with options for extra device ports on
8edge XPs, either plumbed into the regular DTM or with extra dedicated
9DTMs to monitor them, plus larger (and smaller) mesh sizes. Add basic
10support for pulling this new information out of the hardware, piping
11it around as necessary, and handling (most of) the new choices.
12
13Signed-off-by: Robin Murphy <robin.murphy@arm.com>
14Link: https://lore.kernel.org/r/e58b495bcc7deec3882be4bac910ed0bf6979674.1638530442.git.robin.murphy@arm.com
15Signed-off-by: Will Deacon <will@kernel.org>
16Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com>
17---
18 drivers/perf/arm-cmn.c | 222 ++++++++++++++++++++++++++++++++---------
19 1 file changed, 173 insertions(+), 49 deletions(-)
20
21diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
22index 92ff273fbe58..871c86687379 100644
23--- a/drivers/perf/arm-cmn.c
24+++ b/drivers/perf/arm-cmn.c
25@@ -24,7 +24,10 @@
26 #define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
27
28 #define CMN_NODEID_DEVID(reg) ((reg) & 3)
29+#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1)
30 #define CMN_NODEID_PID(reg) (((reg) >> 2) & 1)
31+#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3)
32+#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7)
33 #define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits)))
34 #define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1))
35
36@@ -37,13 +40,26 @@
37
38 #define CMN_MAX_DIMENSION 8
39 #define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
40-#define CMN_MAX_DTMS CMN_MAX_XPS
41+#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
42
43-/* The CFG node has one other useful purpose */
44+/* The CFG node has various info besides the discovery tree */
45 #define CMN_CFGM_PERIPH_ID_2 0x0010
46 #define CMN_CFGM_PID2_REVISION GENMASK(7, 4)
47
48-/* PMU registers occupy the 3rd 4KB page of each node's 16KB space */
49+#define CMN_CFGM_INFO_GLOBAL 0x900
50+#define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63)
51+#define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52)
52+#define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50)
53+
54+/* XPs also have some local topology info which has uses too */
55+#define CMN_MXP__CONNECT_INFO_P0 0x0008
56+#define CMN_MXP__CONNECT_INFO_P1 0x0010
57+#define CMN_MXP__CONNECT_INFO_P2 0x0028
58+#define CMN_MXP__CONNECT_INFO_P3 0x0030
59+#define CMN_MXP__CONNECT_INFO_P4 0x0038
60+#define CMN_MXP__CONNECT_INFO_P5 0x0040
61+
62+/* PMU registers occupy the 3rd 4KB page of each node's region */
63 #define CMN_PMU_OFFSET 0x2000
64
65 /* For most nodes, this is all there is */
66@@ -53,6 +69,7 @@
67 /* DTMs live in the PMU space of XP registers */
68 #define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18)
69 #define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00)
70+#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17)
71 #define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(6)
72 #define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
73 #define CMN_DTM_WPn_CONFIG_WP_GRP BIT(4)
74@@ -77,7 +94,11 @@
75
76 #define CMN_DTM_PMEVCNTSR 0x240
77
78+#define CMN_DTM_UNIT_INFO 0x0910
79+
80 #define CMN_DTM_NUM_COUNTERS 4
81+/* Want more local counters? Why not replicate the whole DTM! Ugh... */
82+#define CMN_DTM_OFFSET(n) ((n) * 0x200)
83
84 /* The DTC node is where the magic happens */
85 #define CMN_DT_DTC_CTL 0x0a00
86@@ -131,10 +152,10 @@
87 #define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
88
89 #define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24)
90-#define CMN_CONFIG_WP_DEV_SEL BIT_ULL(48)
91-#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(50, 49)
92-#define CMN_CONFIG_WP_GRP BIT_ULL(52)
93-#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(53)
94+#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
95+#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
96+#define CMN_CONFIG_WP_GRP BIT_ULL(56)
97+#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57)
98 #define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0)
99 #define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0)
100
101@@ -176,9 +197,12 @@ enum cmn_node_type {
102 CMN_TYPE_HNF,
103 CMN_TYPE_XP,
104 CMN_TYPE_SBSX,
105- CMN_TYPE_RNI = 0xa,
106+ CMN_TYPE_MPAM_S,
107+ CMN_TYPE_MPAM_NS,
108+ CMN_TYPE_RNI,
109 CMN_TYPE_RND = 0xd,
110 CMN_TYPE_RNSAM = 0xf,
111+ CMN_TYPE_MTSX,
112 CMN_TYPE_CXRA = 0x100,
113 CMN_TYPE_CXHA = 0x101,
114 CMN_TYPE_CXLA = 0x102,
115@@ -233,6 +257,7 @@ struct arm_cmn_dtc {
116 struct arm_cmn {
117 struct device *dev;
118 void __iomem *base;
119+ unsigned int state;
120
121 enum cmn_revision rev;
122 enum cmn_model model;
123@@ -240,6 +265,13 @@ struct arm_cmn {
124 u8 mesh_y;
125 u16 num_xps;
126 u16 num_dns;
127+ bool multi_dtm;
128+ u8 ports_used;
129+ struct {
130+ unsigned int rsp_vc_num : 2;
131+ unsigned int dat_vc_num : 2;
132+ };
133+
134 struct arm_cmn_node *xps;
135 struct arm_cmn_node *dns;
136
137@@ -250,7 +282,6 @@ struct arm_cmn {
138 int cpu;
139 struct hlist_node cpuhp_node;
140
141- unsigned int state;
142 struct pmu pmu;
143 };
144
145@@ -275,13 +306,25 @@ static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
146 static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
147 {
148 struct arm_cmn_nodeid nid;
149- int bits = arm_cmn_xyidbits(cmn);
150
151- nid.x = CMN_NODEID_X(id, bits);
152- nid.y = CMN_NODEID_Y(id, bits);
153- nid.port = CMN_NODEID_PID(id);
154- nid.dev = CMN_NODEID_DEVID(id);
155+ if (cmn->num_xps == 1) {
156+ nid.x = 0;
157+ nid.y = 0;
158+ nid.port = CMN_NODEID_1x1_PID(id);
159+ nid.dev = CMN_NODEID_DEVID(id);
160+ } else {
161+ int bits = arm_cmn_xyidbits(cmn);
162
163+ nid.x = CMN_NODEID_X(id, bits);
164+ nid.y = CMN_NODEID_Y(id, bits);
165+ if (cmn->ports_used & 0xc) {
166+ nid.port = CMN_NODEID_EXT_PID(id);
167+ nid.dev = CMN_NODEID_EXT_DEVID(id);
168+ } else {
169+ nid.port = CMN_NODEID_PID(id);
170+ nid.dev = CMN_NODEID_DEVID(id);
171+ }
172+ }
173 return nid;
174 }
175
176@@ -310,6 +353,7 @@ struct arm_cmn_hw_event {
177 unsigned int dtc_idx;
178 u8 dtcs_used;
179 u8 num_dns;
180+ u8 dtm_offset;
181 };
182
183 #define for_each_hw_dn(hw, dn, i) \
184@@ -354,7 +398,8 @@ struct arm_cmn_format_attr {
185 .occupid = _occupid, \
186 }})[0].attr.attr)
187
188-static bool arm_cmn_is_occup_event(enum cmn_node_type type, unsigned int id)
189+static bool arm_cmn_is_occup_event(enum cmn_model model,
190+ enum cmn_node_type type, unsigned int id)
191 {
192 return (type == CMN_TYPE_DVM && id == 0x05) ||
193 (type == CMN_TYPE_HNF && id == 0x0f);
194@@ -375,9 +420,9 @@ static ssize_t arm_cmn_event_show(struct device *dev,
195 "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
196 eattr->type, eattr->eventid);
197
198- if (arm_cmn_is_occup_event(eattr->type, eattr->eventid))
199- return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
200- eattr->type, eattr->eventid, eattr->occupid);
201+ if (arm_cmn_is_occup_event(eattr->model, eattr->type, eattr->eventid))
202+ return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
203+ eattr->type, eattr->eventid, eattr->occupid);
204
205 return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x\n",
206 eattr->type, eattr->eventid);
207@@ -390,25 +435,36 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
208 struct device *dev = kobj_to_dev(kobj);
209 struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
210 struct arm_cmn_event_attr *eattr;
211- enum cmn_node_type type;
212
213 eattr = container_of(attr, typeof(*eattr), attr.attr);
214- type = eattr->type;
215
216 if (!(eattr->model & cmn->model))
217 return 0;
218
219- /* Watchpoints aren't nodes */
220- if (type == CMN_TYPE_WP)
221- type = CMN_TYPE_XP;
222+ /* Watchpoints aren't nodes, so avoid confusion */
223+ if (eattr->type == CMN_TYPE_WP)
224+ return attr->mode;
225+
226+ /* Hide XP events for unused interfaces/channels */
227+ if (eattr->type == CMN_TYPE_XP) {
228+ unsigned int intf = (eattr->eventid >> 2) & 7;
229+ unsigned int chan = eattr->eventid >> 5;
230+
231+ if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
232+ return 0;
233+
234+ if ((chan == 5 && cmn->rsp_vc_num < 2) ||
235+ (chan == 6 && cmn->dat_vc_num < 2))
236+ return 0;
237+ }
238
239 /* Revision-specific differences */
240 if (cmn->model == CMN600 && cmn->rev < CMN600_R1P2) {
241- if (type == CMN_TYPE_HNF && eattr->eventid == 0x1b)
242+ if (eattr->type == CMN_TYPE_HNF && eattr->eventid == 0x1b)
243 return 0;
244 }
245
246- if (!arm_cmn_node(cmn, type))
247+ if (!arm_cmn_node(cmn, eattr->type))
248 return 0;
249
250 return attr->mode;
251@@ -669,7 +725,8 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
252 config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
253 FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
254 FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
255- FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc);
256+ FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc) |
257+ FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1);
258 if (combine && !grp)
259 config |= CMN_DTM_WPn_CONFIG_WP_COMBINE;
260
261@@ -712,7 +769,7 @@ static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
262 offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT;
263 for_each_hw_dn(hw, dn, i) {
264 if (dtm != &cmn->dtms[dn->dtm]) {
265- dtm = &cmn->dtms[dn->dtm];
266+ dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
267 reg = readq_relaxed(dtm->base + offset);
268 }
269 dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
270@@ -800,8 +857,10 @@ static void arm_cmn_event_start(struct perf_event *event, int flags)
271 u64 mask = CMN_EVENT_WP_MASK(event);
272
273 for_each_hw_dn(hw, dn, i) {
274- writeq_relaxed(val, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx));
275- writeq_relaxed(mask, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx));
276+ void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
277+
278+ writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx));
279+ writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx));
280 }
281 } else for_each_hw_dn(hw, dn, i) {
282 int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
283@@ -826,8 +885,10 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
284 int wp_idx = arm_cmn_wp_idx(event);
285
286 for_each_hw_dn(hw, dn, i) {
287- writeq_relaxed(0, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx));
288- writeq_relaxed(~0ULL, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx));
289+ void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
290+
291+ writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx));
292+ writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx));
293 }
294 } else for_each_hw_dn(hw, dn, i) {
295 int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
296@@ -847,7 +908,8 @@ struct arm_cmn_val {
297 bool cycles;
298 };
299
300-static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *event)
301+static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
302+ struct perf_event *event)
303 {
304 struct arm_cmn_hw_event *hw = to_cmn_hw(event);
305 struct arm_cmn_node *dn;
306@@ -865,7 +927,7 @@ static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *ev
307 }
308
309 val->dtc_count++;
310- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event)))
311+ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
312 occupid = CMN_EVENT_OCCUPID(event) + 1;
313 else
314 occupid = 0;
315@@ -884,7 +946,7 @@ static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *ev
316 }
317 }
318
319-static int arm_cmn_validate_group(struct perf_event *event)
320+static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
321 {
322 struct arm_cmn_hw_event *hw = to_cmn_hw(event);
323 struct arm_cmn_node *dn;
324@@ -904,9 +966,9 @@ static int arm_cmn_validate_group(struct perf_event *event)
325 if (!val)
326 return -ENOMEM;
327
328- arm_cmn_val_add_event(val, leader);
329+ arm_cmn_val_add_event(cmn, val, leader);
330 for_each_sibling_event(sibling, leader)
331- arm_cmn_val_add_event(val, sibling);
332+ arm_cmn_val_add_event(cmn, val, sibling);
333
334 type = CMN_EVENT_TYPE(event);
335 if (type == CMN_TYPE_DTC) {
336@@ -917,7 +979,7 @@ static int arm_cmn_validate_group(struct perf_event *event)
337 if (val->dtc_count == CMN_DT_NUM_COUNTERS)
338 goto done;
339
340- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event)))
341+ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
342 occupid = CMN_EVENT_OCCUPID(event) + 1;
343 else
344 occupid = 0;
345@@ -980,6 +1042,9 @@ static int arm_cmn_event_init(struct perf_event *event)
346 eventid = CMN_EVENT_EVENTID(event);
347 if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN)
348 return -EINVAL;
349+ /* ...but the DTM may depend on which port we're watching */
350+ if (cmn->multi_dtm)
351+ hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
352 }
353
354 bynodeid = CMN_EVENT_BYNODEID(event);
355@@ -1007,7 +1072,7 @@ static int arm_cmn_event_init(struct perf_event *event)
356 return -EINVAL;
357 }
358
359- return arm_cmn_validate_group(event);
360+ return arm_cmn_validate_group(cmn, event);
361 }
362
363 static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
364@@ -1017,13 +1082,13 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
365 enum cmn_node_type type = CMN_EVENT_TYPE(event);
366
367 while (i--) {
368- struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm];
369+ struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset;
370 unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
371
372 if (type == CMN_TYPE_WP)
373 dtm->wp_event[arm_cmn_wp_idx(event)] = -1;
374
375- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event)))
376+ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
377 hw->dn[i].occupid_count--;
378
379 dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
380@@ -1069,7 +1134,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
381
382 /* ...then the local counters to feed it. */
383 for_each_hw_dn(hw, dn, i) {
384- struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm];
385+ struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
386 unsigned int dtm_idx, shift;
387 u64 reg;
388
389@@ -1098,10 +1163,13 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
390 } else {
391 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
392
393+ if (cmn->multi_dtm)
394+ nid.port %= 2;
395+
396 input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx +
397 (nid.port << 4) + (nid.dev << 2);
398
399- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) {
400+ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) {
401 u8 occupid = CMN_EVENT_OCCUPID(event);
402
403 if (dn->occupid_count == 0) {
404@@ -1283,11 +1351,11 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn)
405 return 0;
406 }
407
408-static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp)
409+static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, int idx)
410 {
411 int i;
412
413- dtm->base = xp->pmu_base;
414+ dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx);
415 dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
416 for (i = 0; i < 4; i++) {
417 dtm->wp_event[i] = -1;
418@@ -1345,6 +1413,8 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
419
420 xp = arm_cmn_node_to_xp(cmn, dn);
421 dn->dtm = xp->dtm;
422+ if (cmn->multi_dtm)
423+ dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2;
424
425 if (dn->type == CMN_TYPE_DTC) {
426 int err;
427@@ -1408,6 +1478,11 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
428 reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2);
429 cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
430
431+ reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
432+ cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN;
433+ cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
434+ cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);
435+
436 reg = readq_relaxed(cfg_region + CMN_CHILD_INFO);
437 child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
438 child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
439@@ -1429,7 +1504,11 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
440 if (!dn)
441 return -ENOMEM;
442
443- dtm = devm_kcalloc(cmn->dev, cmn->num_xps, sizeof(*dtm), GFP_KERNEL);
444+ /* Initial safe upper bound on DTMs for any possible mesh layout */
445+ i = cmn->num_xps;
446+ if (cmn->multi_dtm)
447+ i += cmn->num_xps + 1;
448+ dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL);
449 if (!dtm)
450 return -ENOMEM;
451
452@@ -1439,6 +1518,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
453 for (i = 0; i < cmn->num_xps; i++) {
454 void __iomem *xp_region = cmn->base + xp_offset[i];
455 struct arm_cmn_node *xp = dn++;
456+ unsigned int xp_ports = 0;
457
458 arm_cmn_init_node_info(cmn, xp_offset[i], xp);
459 /*
460@@ -1450,9 +1530,39 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
461 if (xp->id == (1 << 3))
462 cmn->mesh_x = xp->logid;
463
464- xp->dtc = 0xf;
465+ if (cmn->model == CMN600)
466+ xp->dtc = 0xf;
467+ else
468+ xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
469+
470 xp->dtm = dtm - cmn->dtms;
471- arm_cmn_init_dtm(dtm++, xp);
472+ arm_cmn_init_dtm(dtm++, xp, 0);
473+ /*
474+ * Keeping track of connected ports will let us filter out
475+ * unnecessary XP events easily. We can also reliably infer the
476+ * "extra device ports" configuration for the node ID format
477+ * from this, since in that case we will see at least one XP
478+ * with port 2 connected, for the HN-D.
479+ */
480+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0))
481+ xp_ports |= BIT(0);
482+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1))
483+ xp_ports |= BIT(1);
484+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2))
485+ xp_ports |= BIT(2);
486+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3))
487+ xp_ports |= BIT(3);
488+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4))
489+ xp_ports |= BIT(4);
490+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5))
491+ xp_ports |= BIT(5);
492+
493+ if (cmn->multi_dtm && (xp_ports & 0xc))
494+ arm_cmn_init_dtm(dtm++, xp, 1);
495+ if (cmn->multi_dtm && (xp_ports & 0x30))
496+ arm_cmn_init_dtm(dtm++, xp, 2);
497+
498+ cmn->ports_used |= xp_ports;
499
500 reg = readq_relaxed(xp_region + CMN_CHILD_INFO);
501 child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
502@@ -1488,11 +1598,14 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
503 case CMN_TYPE_SBSX:
504 case CMN_TYPE_RNI:
505 case CMN_TYPE_RND:
506+ case CMN_TYPE_MTSX:
507 case CMN_TYPE_CXRA:
508 case CMN_TYPE_CXHA:
509 dn++;
510 break;
511 /* Nothing to see here */
512+ case CMN_TYPE_MPAM_S:
513+ case CMN_TYPE_MPAM_NS:
514 case CMN_TYPE_RNSAM:
515 case CMN_TYPE_CXLA:
516 break;
517@@ -1512,6 +1625,11 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
518 if (dn)
519 cmn->dns = dn;
520
521+ sz = (void *)dtm - (void *)cmn->dtms;
522+ dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL);
523+ if (dtm)
524+ cmn->dtms = dtm;
525+
526 /*
527 * If mesh_x wasn't set during discovery then we never saw
528 * an XP at (0,1), thus we must have an Nx1 configuration.
529@@ -1520,9 +1638,15 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
530 cmn->mesh_x = cmn->num_xps;
531 cmn->mesh_y = cmn->num_xps / cmn->mesh_x;
532
533+ /* 1x1 config plays havoc with XP event encodings */
534+ if (cmn->num_xps == 1)
535+ dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n");
536+
537 dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev);
538- dev_dbg(cmn->dev, "mesh %dx%d, ID width %d\n",
539- cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn));
540+ reg = cmn->ports_used;
541+ dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n",
542+ cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg,
543+ cmn->multi_dtm ? ", multi-DTM" : "");
544
545 return 0;
546 }
547--
5482.25.1
549