2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright (C) 2014 ARM Limited
14 #include <linux/ctype.h>
15 #include <linux/hrtimer.h>
16 #include <linux/idr.h>
17 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/perf_event.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
24 #define CCN_NUM_XP_PORTS 2
26 #define CCN_NUM_REGIONS 256
27 #define CCN_REGION_SIZE 0x10000
29 #define CCN_ALL_OLY_ID 0xff00
30 #define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0
31 #define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f
32 #define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8
33 #define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f
35 #define CCN_MN_ERRINT_STATUS 0x0008
36 #define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11
37 #define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02
38 #define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20
39 #define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22
40 #define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04
41 #define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40
42 #define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44
43 #define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08
44 #define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80
45 #define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88
46 #define CCN_MN_OLY_COMP_LIST_63_0 0x01e0
47 #define CCN_MN_ERR_SIG_VAL_63_0 0x0300
48 #define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1)
50 #define CCN_DT_ACTIVE_DSM 0x0000
51 #define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8)
52 #define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff
53 #define CCN_DT_CTL 0x0028
54 #define CCN_DT_CTL__DT_EN (1 << 0)
55 #define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8)
56 #define CCN_DT_PMCCNTR 0x0140
57 #define CCN_DT_PMCCNTRSR 0x0190
58 #define CCN_DT_PMOVSR 0x0198
59 #define CCN_DT_PMOVSR_CLR 0x01a0
60 #define CCN_DT_PMOVSR_CLR__MASK 0x1f
61 #define CCN_DT_PMCR 0x01a8
62 #define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6)
63 #define CCN_DT_PMCR__PMU_EN (1 << 0)
64 #define CCN_DT_PMSR 0x01b0
65 #define CCN_DT_PMSR_REQ 0x01b8
66 #define CCN_DT_PMSR_CLR 0x01c0
68 #define CCN_HNF_PMU_EVENT_SEL 0x0600
69 #define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
70 #define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf
72 #define CCN_XP_DT_CONFIG 0x0300
73 #define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4)
74 #define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf
75 #define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0
76 #define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1
77 #define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n))
78 #define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n))
79 #define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
80 #define CCN_XP_DT_INTERFACE_SEL 0x0308
81 #define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8)
82 #define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1
83 #define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8)
84 #define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1
85 #define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8)
86 #define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3
87 #define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40)
88 #define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40)
89 #define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40)
90 #define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40)
91 #define CCN_XP_DT_CONTROL 0x0370
92 #define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0)
93 #define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4)
94 #define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf
95 #define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf
96 #define CCN_XP_PMU_EVENT_SEL 0x0600
97 #define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7)
98 #define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f
100 #define CCN_SBAS_PMU_EVENT_SEL 0x0600
101 #define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
102 #define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf
104 #define CCN_RNI_PMU_EVENT_SEL 0x0600
105 #define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
106 #define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf
108 #define CCN_TYPE_MN 0x01
109 #define CCN_TYPE_DT 0x02
110 #define CCN_TYPE_HNF 0x04
111 #define CCN_TYPE_HNI 0x05
112 #define CCN_TYPE_XP 0x08
113 #define CCN_TYPE_SBSX 0x0c
114 #define CCN_TYPE_SBAS 0x10
115 #define CCN_TYPE_RNI_1P 0x14
116 #define CCN_TYPE_RNI_2P 0x15
117 #define CCN_TYPE_RNI_3P 0x16
118 #define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */
119 #define CCN_TYPE_RND_2P 0x19
120 #define CCN_TYPE_RND_3P 0x1a
121 #define CCN_TYPE_CYCLES 0xff /* Pseudotype */
123 #define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
125 #define CCN_NUM_PMU_EVENTS 4
126 #define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */
127 #define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */
128 #define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS
130 #define CCN_NUM_PREDEFINED_MASKS 4
131 #define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0)
132 #define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1)
133 #define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2)
134 #define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3)
136 struct arm_ccn_component {
140 DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS);
143 DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS);
148 #define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
149 struct arm_ccn_dt, pmu), struct arm_ccn, dt)
155 spinlock_t config_lock;
157 DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1);
159 struct arm_ccn_component *source;
160 struct perf_event *event;
161 } pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1];
165 } cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS];
167 struct hrtimer hrtimer;
170 struct list_head entry;
180 unsigned sbas_present:1;
181 unsigned sbsx_present:1;
184 struct arm_ccn_component *node;
187 struct arm_ccn_component *xp;
189 struct arm_ccn_dt dt;
193 static DEFINE_MUTEX(arm_ccn_mutex);
194 static LIST_HEAD(arm_ccn_list);
196 static int arm_ccn_node_to_xp(int node)
198 return node / CCN_NUM_XP_PORTS;
201 static int arm_ccn_node_to_xp_port(int node)
203 return node % CCN_NUM_XP_PORTS;
208 * Bit shifts and masks in these defines must be kept in sync with
209 * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
211 #define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff)
212 #define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff)
213 #define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
214 #define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
215 #define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
216 #define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
217 #define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
218 #define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
220 static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
222 *config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
223 *config |= (node_xp << 0) | (type << 8) | (port << 24);
226 static ssize_t arm_ccn_pmu_format_show(struct device *dev,
227 struct device_attribute *attr, char *buf)
229 struct dev_ext_attribute *ea = container_of(attr,
230 struct dev_ext_attribute, attr);
232 return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var);
235 #define CCN_FORMAT_ATTR(_name, _config) \
236 struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
237 { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
240 static CCN_FORMAT_ATTR(node, "config:0-7");
241 static CCN_FORMAT_ATTR(xp, "config:0-7");
242 static CCN_FORMAT_ATTR(type, "config:8-15");
243 static CCN_FORMAT_ATTR(event, "config:16-23");
244 static CCN_FORMAT_ATTR(port, "config:24-25");
245 static CCN_FORMAT_ATTR(vc, "config:26-28");
246 static CCN_FORMAT_ATTR(dir, "config:29-29");
247 static CCN_FORMAT_ATTR(mask, "config:30-33");
248 static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
249 static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
251 static struct attribute *arm_ccn_pmu_format_attrs[] = {
252 &arm_ccn_pmu_format_attr_node.attr.attr,
253 &arm_ccn_pmu_format_attr_xp.attr.attr,
254 &arm_ccn_pmu_format_attr_type.attr.attr,
255 &arm_ccn_pmu_format_attr_event.attr.attr,
256 &arm_ccn_pmu_format_attr_port.attr.attr,
257 &arm_ccn_pmu_format_attr_vc.attr.attr,
258 &arm_ccn_pmu_format_attr_dir.attr.attr,
259 &arm_ccn_pmu_format_attr_mask.attr.attr,
260 &arm_ccn_pmu_format_attr_cmp_l.attr.attr,
261 &arm_ccn_pmu_format_attr_cmp_h.attr.attr,
265 static struct attribute_group arm_ccn_pmu_format_attr_group = {
267 .attrs = arm_ccn_pmu_format_attrs,
271 struct arm_ccn_pmu_event {
272 struct device_attribute attr;
281 #define CCN_EVENT_ATTR(_name) \
282 __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
285 * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
286 * their ports in XP they are connected to. For the sake of usability they are
287 * explicitly defined here (and translated into a relevant watchpoint in
288 * arm_ccn_pmu_event_init()) so the user can easily request them without deep
289 * knowledge of the flit format.
292 #define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
293 .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
294 .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
295 .def = _def, .mask = _mask, }
297 #define CCN_EVENT_HNI(_name, _def, _mask) { \
298 .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
299 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
300 .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
302 #define CCN_EVENT_SBSX(_name, _def, _mask) { \
303 .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
304 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
305 .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
307 #define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
308 .type = CCN_TYPE_HNF, .event = _event, }
310 #define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
311 .type = CCN_TYPE_XP, .event = _event, \
312 .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
315 * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
316 * on configuration. One of them is picked to represent the whole group,
317 * as they all share the same event types.
319 #define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
320 .type = CCN_TYPE_RNI_3P, .event = _event, }
322 #define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
323 .type = CCN_TYPE_SBAS, .event = _event, }
325 #define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
326 .type = CCN_TYPE_CYCLES }
329 static ssize_t arm_ccn_pmu_event_show(struct device *dev,
330 struct device_attribute *attr, char *buf)
332 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
333 struct arm_ccn_pmu_event *event = container_of(attr,
334 struct arm_ccn_pmu_event, attr);
337 res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
339 res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
342 res += snprintf(buf + res, PAGE_SIZE - res, ",%s",
345 res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
348 /* Arguments required by an event */
349 switch (event->type) {
350 case CCN_TYPE_CYCLES:
353 res += snprintf(buf + res, PAGE_SIZE - res,
354 ",xp=?,port=?,vc=?,dir=?");
355 if (event->event == CCN_EVENT_WATCHPOINT)
356 res += snprintf(buf + res, PAGE_SIZE - res,
357 ",cmp_l=?,cmp_h=?,mask=?");
360 res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
363 res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
367 res += snprintf(buf + res, PAGE_SIZE - res, "\n");
372 static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
373 struct attribute *attr, int index)
375 struct device *dev = kobj_to_dev(kobj);
376 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
377 struct device_attribute *dev_attr = container_of(attr,
378 struct device_attribute, attr);
379 struct arm_ccn_pmu_event *event = container_of(dev_attr,
380 struct arm_ccn_pmu_event, attr);
382 if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present)
384 if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present)
390 static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
391 CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
392 CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
393 CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
394 CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
395 CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
396 CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
397 CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
398 CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
400 CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
401 CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
402 CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
403 CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
404 CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
406 CCN_EVENT_HNF(cache_miss, 0x1),
407 CCN_EVENT_HNF(l3_sf_cache_access, 0x02),
408 CCN_EVENT_HNF(cache_fill, 0x3),
409 CCN_EVENT_HNF(pocq_retry, 0x4),
410 CCN_EVENT_HNF(pocq_reqs_recvd, 0x5),
411 CCN_EVENT_HNF(sf_hit, 0x6),
412 CCN_EVENT_HNF(sf_evictions, 0x7),
413 CCN_EVENT_HNF(snoops_sent, 0x8),
414 CCN_EVENT_HNF(snoops_broadcast, 0x9),
415 CCN_EVENT_HNF(l3_eviction, 0xa),
416 CCN_EVENT_HNF(l3_fill_invalid_way, 0xb),
417 CCN_EVENT_HNF(mc_retries, 0xc),
418 CCN_EVENT_HNF(mc_reqs, 0xd),
419 CCN_EVENT_HNF(qos_hh_retry, 0xe),
420 CCN_EVENT_RNI(rdata_beats_p0, 0x1),
421 CCN_EVENT_RNI(rdata_beats_p1, 0x2),
422 CCN_EVENT_RNI(rdata_beats_p2, 0x3),
423 CCN_EVENT_RNI(rxdat_flits, 0x4),
424 CCN_EVENT_RNI(txdat_flits, 0x5),
425 CCN_EVENT_RNI(txreq_flits, 0x6),
426 CCN_EVENT_RNI(txreq_flits_retried, 0x7),
427 CCN_EVENT_RNI(rrt_full, 0x8),
428 CCN_EVENT_RNI(wrt_full, 0x9),
429 CCN_EVENT_RNI(txreq_flits_replayed, 0xa),
430 CCN_EVENT_XP(upload_starvation, 0x1),
431 CCN_EVENT_XP(download_starvation, 0x2),
432 CCN_EVENT_XP(respin, 0x3),
433 CCN_EVENT_XP(valid_flit, 0x4),
434 CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT),
435 CCN_EVENT_SBAS(rdata_beats_p0, 0x1),
436 CCN_EVENT_SBAS(rxdat_flits, 0x4),
437 CCN_EVENT_SBAS(txdat_flits, 0x5),
438 CCN_EVENT_SBAS(txreq_flits, 0x6),
439 CCN_EVENT_SBAS(txreq_flits_retried, 0x7),
440 CCN_EVENT_SBAS(rrt_full, 0x8),
441 CCN_EVENT_SBAS(wrt_full, 0x9),
442 CCN_EVENT_SBAS(txreq_flits_replayed, 0xa),
443 CCN_EVENT_CYCLES(cycles),
446 /* Populated in arm_ccn_init() */
447 static struct attribute
448 *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
450 static struct attribute_group arm_ccn_pmu_events_attr_group = {
452 .is_visible = arm_ccn_pmu_events_is_visible,
453 .attrs = arm_ccn_pmu_events_attrs,
457 static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name)
461 if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1]))
463 i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a';
467 return &ccn->dt.cmp_mask[i].l;
469 return &ccn->dt.cmp_mask[i].h;
475 static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
476 struct device_attribute *attr, char *buf)
478 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
479 u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
481 return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL;
484 static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
485 struct device_attribute *attr, const char *buf, size_t count)
487 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
488 u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
492 err = kstrtoull(buf, 0, mask);
494 return err ? err : count;
497 #define CCN_CMP_MASK_ATTR(_name) \
498 struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
499 __ATTR(_name, S_IRUGO | S_IWUSR, \
500 arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
502 #define CCN_CMP_MASK_ATTR_RO(_name) \
503 struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
504 __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
506 static CCN_CMP_MASK_ATTR(0l);
507 static CCN_CMP_MASK_ATTR(0h);
508 static CCN_CMP_MASK_ATTR(1l);
509 static CCN_CMP_MASK_ATTR(1h);
510 static CCN_CMP_MASK_ATTR(2l);
511 static CCN_CMP_MASK_ATTR(2h);
512 static CCN_CMP_MASK_ATTR(3l);
513 static CCN_CMP_MASK_ATTR(3h);
514 static CCN_CMP_MASK_ATTR(4l);
515 static CCN_CMP_MASK_ATTR(4h);
516 static CCN_CMP_MASK_ATTR(5l);
517 static CCN_CMP_MASK_ATTR(5h);
518 static CCN_CMP_MASK_ATTR(6l);
519 static CCN_CMP_MASK_ATTR(6h);
520 static CCN_CMP_MASK_ATTR(7l);
521 static CCN_CMP_MASK_ATTR(7h);
522 static CCN_CMP_MASK_ATTR_RO(8l);
523 static CCN_CMP_MASK_ATTR_RO(8h);
524 static CCN_CMP_MASK_ATTR_RO(9l);
525 static CCN_CMP_MASK_ATTR_RO(9h);
526 static CCN_CMP_MASK_ATTR_RO(al);
527 static CCN_CMP_MASK_ATTR_RO(ah);
528 static CCN_CMP_MASK_ATTR_RO(bl);
529 static CCN_CMP_MASK_ATTR_RO(bh);
531 static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
532 &arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr,
533 &arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr,
534 &arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr,
535 &arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr,
536 &arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr,
537 &arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr,
538 &arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr,
539 &arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr,
540 &arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr,
541 &arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr,
542 &arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr,
543 &arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr,
547 static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
549 .attrs = arm_ccn_pmu_cmp_mask_attrs,
552 static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev,
553 struct device_attribute *attr, char *buf)
555 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
557 return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu);
560 static struct device_attribute arm_ccn_pmu_cpumask_attr =
561 __ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL);
563 static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
564 &arm_ccn_pmu_cpumask_attr.attr,
568 static struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
569 .attrs = arm_ccn_pmu_cpumask_attrs,
573 * Default poll period is 10ms, which is way over the top anyway,
574 * as in the worst case scenario (an event every cycle), with 1GHz
575 * clocked bus, the smallest, 32 bit counter will overflow in
578 static unsigned int arm_ccn_pmu_poll_period_us = 10000;
579 module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
582 static ktime_t arm_ccn_pmu_timer_period(void)
584 return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
588 static const struct attribute_group *arm_ccn_pmu_attr_groups[] = {
589 &arm_ccn_pmu_events_attr_group,
590 &arm_ccn_pmu_format_attr_group,
591 &arm_ccn_pmu_cmp_mask_attr_group,
592 &arm_ccn_pmu_cpumask_attr_group,
597 static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size)
602 bit = find_first_zero_bit(bitmap, size);
605 } while (test_and_set_bit(bit, bitmap));
610 /* All RN-I and RN-D nodes have identical PMUs */
611 static int arm_ccn_pmu_type_eq(u32 a, u32 b)
617 case CCN_TYPE_RNI_1P:
618 case CCN_TYPE_RNI_2P:
619 case CCN_TYPE_RNI_3P:
620 case CCN_TYPE_RND_1P:
621 case CCN_TYPE_RND_2P:
622 case CCN_TYPE_RND_3P:
624 case CCN_TYPE_RNI_1P:
625 case CCN_TYPE_RNI_2P:
626 case CCN_TYPE_RNI_3P:
627 case CCN_TYPE_RND_1P:
628 case CCN_TYPE_RND_2P:
629 case CCN_TYPE_RND_3P:
638 static int arm_ccn_pmu_event_alloc(struct perf_event *event)
640 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
641 struct hw_perf_event *hw = &event->hw;
642 u32 node_xp, type, event_id;
643 struct arm_ccn_component *source;
646 node_xp = CCN_CONFIG_NODE(event->attr.config);
647 type = CCN_CONFIG_TYPE(event->attr.config);
648 event_id = CCN_CONFIG_EVENT(event->attr.config);
650 /* Allocate the cycle counter */
651 if (type == CCN_TYPE_CYCLES) {
652 if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
653 ccn->dt.pmu_counters_mask))
656 hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
657 ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;
662 /* Allocate an event counter */
663 hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
664 CCN_NUM_PMU_EVENT_COUNTERS);
666 dev_dbg(ccn->dev, "No more counters available!\n");
670 if (type == CCN_TYPE_XP)
671 source = &ccn->xp[node_xp];
673 source = &ccn->node[node_xp];
674 ccn->dt.pmu_counters[hw->idx].source = source;
676 /* Allocate an event source or a watchpoint */
677 if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
678 bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
679 CCN_NUM_XP_WATCHPOINTS);
681 bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
684 dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
686 clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
689 hw->config_base = bit;
691 ccn->dt.pmu_counters[hw->idx].event = event;
696 static void arm_ccn_pmu_event_release(struct perf_event *event)
698 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
699 struct hw_perf_event *hw = &event->hw;
701 if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
702 clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
704 struct arm_ccn_component *source =
705 ccn->dt.pmu_counters[hw->idx].source;
707 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
708 CCN_CONFIG_EVENT(event->attr.config) ==
709 CCN_EVENT_WATCHPOINT)
710 clear_bit(hw->config_base, source->xp.dt_cmp_mask);
712 clear_bit(hw->config_base, source->pmu_events_mask);
713 clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
716 ccn->dt.pmu_counters[hw->idx].source = NULL;
717 ccn->dt.pmu_counters[hw->idx].event = NULL;
720 static int arm_ccn_pmu_event_init(struct perf_event *event)
723 struct hw_perf_event *hw = &event->hw;
724 u32 node_xp, type, event_id;
727 struct perf_event *sibling;
729 if (event->attr.type != event->pmu->type)
732 ccn = pmu_to_arm_ccn(event->pmu);
734 if (hw->sample_period) {
735 dev_warn(ccn->dev, "Sampling not supported!\n");
739 if (has_branch_stack(event) || event->attr.exclude_user ||
740 event->attr.exclude_kernel || event->attr.exclude_hv ||
741 event->attr.exclude_idle) {
742 dev_warn(ccn->dev, "Can't exclude execution levels!\n");
746 if (event->cpu < 0) {
747 dev_warn(ccn->dev, "Can't provide per-task data!\n");
751 * Many perf core operations (eg. events rotation) operate on a
752 * single CPU context. This is obvious for CPU PMUs, where one
753 * expects the same sets of events being observed on all CPUs,
754 * but can lead to issues for off-core PMUs, like CCN, where each
755 * event could be theoretically assigned to a different CPU. To
756 * mitigate this, we enforce CPU assignment to one, selected
757 * processor (the one described in the "cpumask" attribute).
759 event->cpu = cpumask_first(&ccn->dt.cpu);
761 node_xp = CCN_CONFIG_NODE(event->attr.config);
762 type = CCN_CONFIG_TYPE(event->attr.config);
763 event_id = CCN_CONFIG_EVENT(event->attr.config);
765 /* Validate node/xp vs topology */
768 if (node_xp != ccn->mn_id) {
769 dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
774 if (node_xp >= ccn->num_xps) {
775 dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
779 case CCN_TYPE_CYCLES:
782 if (node_xp >= ccn->num_nodes) {
783 dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp);
786 if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
787 dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n",
794 /* Validate event ID vs available for the type */
795 for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
797 struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
798 u32 port = CCN_CONFIG_PORT(event->attr.config);
799 u32 vc = CCN_CONFIG_VC(event->attr.config);
801 if (!arm_ccn_pmu_type_eq(type, e->type))
803 if (event_id != e->event)
805 if (e->num_ports && port >= e->num_ports) {
806 dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n",
810 if (e->num_vcs && vc >= e->num_vcs) {
811 dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
818 dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
823 /* Watchpoint-based event for a node is actually set on XP */
824 if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
828 port = arm_ccn_node_to_xp_port(node_xp);
829 node_xp = arm_ccn_node_to_xp(node_xp);
831 arm_ccn_pmu_config_set(&event->attr.config,
832 node_xp, type, port);
836 * We must NOT create groups containing mixed PMUs, although software
837 * events are acceptable (for example to create a CCN group
838 * periodically read when a hrtimer aka cpu-clock leader triggers).
840 if (event->group_leader->pmu != event->pmu &&
841 !is_software_event(event->group_leader))
844 list_for_each_entry(sibling, &event->group_leader->sibling_list,
846 if (sibling->pmu != event->pmu &&
847 !is_software_event(sibling))
853 static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
857 if (idx == CCN_IDX_PMU_CYCLE_COUNTER) {
859 res = readq(ccn->dt.base + CCN_DT_PMCCNTR);
861 /* 40 bit counter, can do snapshot and read in two parts */
862 writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ);
863 while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1))
865 writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
866 res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff;
868 res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR);
871 res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx));
877 static void arm_ccn_pmu_event_update(struct perf_event *event)
879 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
880 struct hw_perf_event *hw = &event->hw;
881 u64 prev_count, new_count, mask;
884 prev_count = local64_read(&hw->prev_count);
885 new_count = arm_ccn_pmu_read_counter(ccn, hw->idx);
886 } while (local64_xchg(&hw->prev_count, new_count) != prev_count);
888 mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1;
890 local64_add((new_count - prev_count) & mask, &event->count);
893 static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
895 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
896 struct hw_perf_event *hw = &event->hw;
897 struct arm_ccn_component *xp;
900 /* Nothing to do for cycle counter */
901 if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
904 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
905 xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
907 xp = &ccn->xp[arm_ccn_node_to_xp(
908 CCN_CONFIG_NODE(event->attr.config))];
911 dt_cfg = hw->event_base;
913 dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH;
915 spin_lock(&ccn->dt.config_lock);
917 val = readl(xp->base + CCN_XP_DT_CONFIG);
918 val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK <<
919 CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx));
920 val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx);
921 writel(val, xp->base + CCN_XP_DT_CONFIG);
923 spin_unlock(&ccn->dt.config_lock);
926 static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
928 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
929 struct hw_perf_event *hw = &event->hw;
931 local64_set(&event->hw.prev_count,
932 arm_ccn_pmu_read_counter(ccn, hw->idx));
936 * Pin the timer, so that the overflows are handled by the chosen
937 * event->cpu (this is the same one as presented in "cpumask"
941 hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
942 HRTIMER_MODE_REL_PINNED);
944 /* Set the DT bus input, engaging the counter */
945 arm_ccn_pmu_xp_dt_config(event, 1);
948 static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
950 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
951 struct hw_perf_event *hw = &event->hw;
954 /* Disable counting, setting the DT bus to pass-through mode */
955 arm_ccn_pmu_xp_dt_config(event, 0);
958 hrtimer_cancel(&ccn->dt.hrtimer);
960 /* Let the DT bus drain */
961 timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
963 while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
967 if (flags & PERF_EF_UPDATE)
968 arm_ccn_pmu_event_update(event);
970 hw->state |= PERF_HES_STOPPED;
973 static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
975 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
976 struct hw_perf_event *hw = &event->hw;
977 struct arm_ccn_component *source =
978 ccn->dt.pmu_counters[hw->idx].source;
979 unsigned long wp = hw->config_base;
981 u64 cmp_l = event->attr.config1;
982 u64 cmp_h = event->attr.config2;
983 u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l;
984 u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h;
986 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp);
988 /* Direction (RX/TX), device (port) & virtual channel */
989 val = readl(source->base + CCN_XP_DT_INTERFACE_SEL);
990 val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK <<
991 CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp));
992 val |= CCN_CONFIG_DIR(event->attr.config) <<
993 CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp);
994 val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK <<
995 CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp));
996 val |= CCN_CONFIG_PORT(event->attr.config) <<
997 CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp);
998 val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK <<
999 CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp));
1000 val |= CCN_CONFIG_VC(event->attr.config) <<
1001 CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp);
1002 writel(val, source->base + CCN_XP_DT_INTERFACE_SEL);
1004 /* Comparison values */
1005 writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
1006 writel((cmp_l >> 32) & 0xefffffff,
1007 source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
1008 writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
1009 writel((cmp_h >> 32) & 0x0fffffff,
1010 source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4);
1013 writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
1014 writel((mask_l >> 32) & 0xefffffff,
1015 source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
1016 writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
1017 writel((mask_h >> 32) & 0x0fffffff,
1018 source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4);
1021 static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
1023 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1024 struct hw_perf_event *hw = &event->hw;
1025 struct arm_ccn_component *source =
1026 ccn->dt.pmu_counters[hw->idx].source;
1029 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
1031 id = (CCN_CONFIG_VC(event->attr.config) << 4) |
1032 (CCN_CONFIG_PORT(event->attr.config) << 3) |
1033 (CCN_CONFIG_EVENT(event->attr.config) << 0);
1035 val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
1036 val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK <<
1037 CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
1038 val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
1039 writel(val, source->base + CCN_XP_PMU_EVENT_SEL);
1042 static void arm_ccn_pmu_node_event_config(struct perf_event *event)
1044 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1045 struct hw_perf_event *hw = &event->hw;
1046 struct arm_ccn_component *source =
1047 ccn->dt.pmu_counters[hw->idx].source;
1048 u32 type = CCN_CONFIG_TYPE(event->attr.config);
1051 port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config));
1052 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port,
1055 /* These *_event_sel regs should be identical, but let's make sure... */
1056 BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL);
1057 BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL);
1058 BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
1059 CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
1060 BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
1061 CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
1062 BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK !=
1063 CCN_SBAS_PMU_EVENT_SEL__ID__MASK);
1064 BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK !=
1065 CCN_RNI_PMU_EVENT_SEL__ID__MASK);
1066 if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS &&
1067 !arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P)))
1070 /* Set the event id for the pre-allocated counter */
1071 val = readl(source->base + CCN_HNF_PMU_EVENT_SEL);
1072 val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK <<
1073 CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
1074 val |= CCN_CONFIG_EVENT(event->attr.config) <<
1075 CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
1076 writel(val, source->base + CCN_HNF_PMU_EVENT_SEL);
1079 static void arm_ccn_pmu_event_config(struct perf_event *event)
1081 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1082 struct hw_perf_event *hw = &event->hw;
1083 u32 xp, offset, val;
1085 /* Cycle counter requires no setup */
1086 if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
1089 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
1090 xp = CCN_CONFIG_XP(event->attr.config);
1092 xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config));
1094 spin_lock(&ccn->dt.config_lock);
1096 /* Set the DT bus "distance" register */
1097 offset = (hw->idx / 4) * 4;
1098 val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
1099 val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK <<
1100 CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4));
1101 val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4);
1102 writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
1104 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) {
1105 if (CCN_CONFIG_EVENT(event->attr.config) ==
1106 CCN_EVENT_WATCHPOINT)
1107 arm_ccn_pmu_xp_watchpoint_config(event);
1109 arm_ccn_pmu_xp_event_config(event);
1111 arm_ccn_pmu_node_event_config(event);
1114 spin_unlock(&ccn->dt.config_lock);
1117 static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
1120 struct hw_perf_event *hw = &event->hw;
1122 err = arm_ccn_pmu_event_alloc(event);
1126 arm_ccn_pmu_event_config(event);
1128 hw->state = PERF_HES_STOPPED;
1130 if (flags & PERF_EF_START)
1131 arm_ccn_pmu_event_start(event, PERF_EF_UPDATE);
1136 static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
1138 arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
1140 arm_ccn_pmu_event_release(event);
1143 static void arm_ccn_pmu_event_read(struct perf_event *event)
1145 arm_ccn_pmu_event_update(event);
1148 static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
1150 u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
1156 writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR);
1158 BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS);
1160 for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) {
1161 struct perf_event *event = dt->pmu_counters[idx].event;
1162 int overflowed = pmovsr & BIT(idx);
1164 WARN_ON_ONCE(overflowed && !event &&
1165 idx != CCN_IDX_PMU_CYCLE_COUNTER);
1167 if (!event || !overflowed)
1170 arm_ccn_pmu_event_update(event);
1176 static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
1178 struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt,
1180 unsigned long flags;
1182 local_irq_save(flags);
1183 arm_ccn_pmu_overflow_handler(dt);
1184 local_irq_restore(flags);
1186 hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period());
1187 return HRTIMER_RESTART;
1191 static int arm_ccn_pmu_offline_cpu(unsigned int cpu)
1193 struct arm_ccn_dt *dt;
1194 unsigned int target;
1196 mutex_lock(&arm_ccn_mutex);
1197 list_for_each_entry(dt, &arm_ccn_list, entry) {
1198 struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
1200 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
1202 target = cpumask_any_but(cpu_online_mask, cpu);
1203 if (target >= nr_cpu_ids)
1205 perf_pmu_migrate_context(&dt->pmu, cpu, target);
1206 cpumask_set_cpu(target, &dt->cpu);
1208 WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
1210 mutex_unlock(&arm_ccn_mutex);
1215 static DEFINE_IDA(arm_ccn_pmu_ida);
1217 static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1223 /* Initialize DT subsystem */
1224 ccn->dt.base = ccn->base + CCN_REGION_SIZE;
1225 spin_lock_init(&ccn->dt.config_lock);
1226 writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR);
1227 writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL);
1228 writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN,
1229 ccn->dt.base + CCN_DT_PMCR);
1230 writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
1231 for (i = 0; i < ccn->num_xps; i++) {
1232 writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG);
1233 writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
1234 CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
1235 (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
1236 CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
1237 CCN_XP_DT_CONTROL__DT_ENABLE,
1238 ccn->xp[i].base + CCN_XP_DT_CONTROL);
1240 ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0;
1241 ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0;
1242 ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0;
1243 ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0;
1244 ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0;
1245 ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15);
1246 ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0;
1247 ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
1249 /* Get a convenient /sys/event_source/devices/ name */
1250 ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL);
1251 if (ccn->dt.id == 0) {
1254 int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
1256 name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
1257 snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
1260 /* Perf driver registration */
1261 ccn->dt.pmu = (struct pmu) {
1262 .attr_groups = arm_ccn_pmu_attr_groups,
1263 .task_ctx_nr = perf_invalid_context,
1264 .event_init = arm_ccn_pmu_event_init,
1265 .add = arm_ccn_pmu_event_add,
1266 .del = arm_ccn_pmu_event_del,
1267 .start = arm_ccn_pmu_event_start,
1268 .stop = arm_ccn_pmu_event_stop,
1269 .read = arm_ccn_pmu_event_read,
1272 /* No overflow interrupt? Have to use a timer instead. */
1274 dev_info(ccn->dev, "No access to interrupts, using timer.\n");
1275 hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
1277 ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
1280 /* Pick one CPU which we will use to collect data from CCN... */
1281 cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
1283 /* Also make sure that the overflow interrupt is handled by this CPU */
1285 err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
1287 dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
1288 goto error_set_affinity;
1292 err = perf_pmu_register(&ccn->dt.pmu, name, -1);
1294 goto error_pmu_register;
1296 mutex_lock(&arm_ccn_mutex);
1297 list_add(&ccn->dt.entry, &arm_ccn_list);
1298 mutex_unlock(&arm_ccn_mutex);
1303 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
1304 for (i = 0; i < ccn->num_xps; i++)
1305 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
1306 writel(0, ccn->dt.base + CCN_DT_PMCR);
1310 static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
1314 mutex_lock(&arm_ccn_mutex);
1315 list_del(&ccn->dt.entry);
1316 mutex_unlock(&arm_ccn_mutex);
1319 irq_set_affinity_hint(ccn->irq, NULL);
1320 for (i = 0; i < ccn->num_xps; i++)
1321 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
1322 writel(0, ccn->dt.base + CCN_DT_PMCR);
1323 perf_pmu_unregister(&ccn->dt.pmu);
1324 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
1327 static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
1328 int (*callback)(struct arm_ccn *ccn, int region,
1329 void __iomem *base, u32 type, u32 id))
1333 for (region = 0; region < CCN_NUM_REGIONS; region++) {
1338 val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 +
1340 if (!(val & (1 << (region % 32))))
1343 base = ccn->base + region * CCN_REGION_SIZE;
1344 val = readl(base + CCN_ALL_OLY_ID);
1345 type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) &
1346 CCN_ALL_OLY_ID__OLY_ID__MASK;
1347 id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) &
1348 CCN_ALL_OLY_ID__NODE_ID__MASK;
1350 err = callback(ccn, region, base, type, id);
1358 static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region,
1359 void __iomem *base, u32 type, u32 id)
1362 if (type == CCN_TYPE_XP && id >= ccn->num_xps)
1363 ccn->num_xps = id + 1;
1364 else if (id >= ccn->num_nodes)
1365 ccn->num_nodes = id + 1;
1370 static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
1371 void __iomem *base, u32 type, u32 id)
1373 struct arm_ccn_component *component;
1375 dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type);
1384 component = &ccn->xp[id];
1387 ccn->sbsx_present = 1;
1388 component = &ccn->node[id];
1391 ccn->sbas_present = 1;
1394 component = &ccn->node[id];
1398 component->base = base;
1399 component->type = type;
1405 static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn,
1406 const u32 *err_sig_val)
1408 /* This should be really handled by firmware... */
1409 dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
1410 err_sig_val[5], err_sig_val[4], err_sig_val[3],
1411 err_sig_val[2], err_sig_val[1], err_sig_val[0]);
1412 dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n");
1413 writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE,
1414 ccn->base + CCN_MN_ERRINT_STATUS);
1420 static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
1422 irqreturn_t res = IRQ_NONE;
1423 struct arm_ccn *ccn = dev_id;
1428 /* PMU overflow is a special case */
1429 err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0);
1430 if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) {
1431 err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT;
1432 res = arm_ccn_pmu_overflow_handler(&ccn->dt);
1435 /* Have to read all err_sig_vals to clear them */
1436 for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) {
1437 err_sig_val[i] = readl(ccn->base +
1438 CCN_MN_ERR_SIG_VAL_63_0 + i * 4);
1439 err_or |= err_sig_val[i];
1442 res |= arm_ccn_error_handler(ccn, err_sig_val);
1444 if (res != IRQ_NONE)
1445 writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT,
1446 ccn->base + CCN_MN_ERRINT_STATUS);
1452 static int arm_ccn_probe(struct platform_device *pdev)
1454 struct arm_ccn *ccn;
1455 struct resource *res;
1459 ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
1462 ccn->dev = &pdev->dev;
1463 platform_set_drvdata(pdev, ccn);
1465 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1469 if (!devm_request_mem_region(ccn->dev, res->start,
1470 resource_size(res), pdev->name))
1473 ccn->base = devm_ioremap(ccn->dev, res->start,
1474 resource_size(res));
1478 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1483 /* Check if we can use the interrupt */
1484 writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
1485 ccn->base + CCN_MN_ERRINT_STATUS);
1486 if (readl(ccn->base + CCN_MN_ERRINT_STATUS) &
1487 CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) {
1488 /* Can set 'disable' bits, so can acknowledge interrupts */
1489 writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
1490 ccn->base + CCN_MN_ERRINT_STATUS);
1491 err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0,
1492 dev_name(ccn->dev), ccn);
1500 /* Build topology */
1502 err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num);
1506 ccn->node = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_nodes,
1508 ccn->xp = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_xps,
1510 if (!ccn->node || !ccn->xp)
1513 err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes);
1517 return arm_ccn_pmu_init(ccn);
1520 static int arm_ccn_remove(struct platform_device *pdev)
1522 struct arm_ccn *ccn = platform_get_drvdata(pdev);
1524 arm_ccn_pmu_cleanup(ccn);
1529 static const struct of_device_id arm_ccn_match[] = {
1530 { .compatible = "arm,ccn-504", },
1534 static struct platform_driver arm_ccn_driver = {
1537 .of_match_table = arm_ccn_match,
1539 .probe = arm_ccn_probe,
1540 .remove = arm_ccn_remove,
1543 static int __init arm_ccn_init(void)
1547 ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1548 "AP_PERF_ARM_CCN_ONLINE", NULL,
1549 arm_ccn_pmu_offline_cpu);
1553 for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
1554 arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
1556 return platform_driver_register(&arm_ccn_driver);
1559 static void __exit arm_ccn_exit(void)
1561 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1562 platform_driver_unregister(&arm_ccn_driver);
1565 module_init(arm_ccn_init);
1566 module_exit(arm_ccn_exit);
1568 MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
1569 MODULE_LICENSE("GPL");