4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
7 * This code is based heavily on the ARMv7 perf event code.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) "hw perfevents: " fmt
23 #include <linux/bitmap.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/export.h>
27 #include <linux/perf_event.h>
28 #include <linux/platform_device.h>
29 #include <linux/spinlock.h>
30 #include <linux/uaccess.h>
32 #include <asm/cputype.h>
34 #include <asm/irq_regs.h>
36 #include <asm/stacktrace.h>
39 * ARMv8 supports a maximum of 32 events.
40 * The cycle counter is included in this total.
42 #define ARMPMU_MAX_HWEVENTS 32
44 static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
45 static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
46 static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
48 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
50 /* Set at runtime when we know what CPU type we are. */
51 static struct arm_pmu *cpu_pmu;
54 armpmu_get_max_events(void)
59 max_events = cpu_pmu->num_events;
63 EXPORT_SYMBOL_GPL(armpmu_get_max_events);
65 int perf_num_counters(void)
67 return armpmu_get_max_events();
69 EXPORT_SYMBOL_GPL(perf_num_counters);
71 #define HW_OP_UNSUPPORTED 0xFFFF
74 PERF_COUNT_HW_CACHE_##_x
76 #define CACHE_OP_UNSUPPORTED 0xFFFF
79 armpmu_map_cache_event(const unsigned (*cache_map)
80 [PERF_COUNT_HW_CACHE_MAX]
81 [PERF_COUNT_HW_CACHE_OP_MAX]
82 [PERF_COUNT_HW_CACHE_RESULT_MAX],
85 unsigned int cache_type, cache_op, cache_result, ret;
87 cache_type = (config >> 0) & 0xff;
88 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
91 cache_op = (config >> 8) & 0xff;
92 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
95 cache_result = (config >> 16) & 0xff;
96 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
99 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
101 if (ret == CACHE_OP_UNSUPPORTED)
108 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
112 if (config >= PERF_COUNT_HW_MAX)
115 mapping = (*event_map)[config];
116 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
120 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
122 return (int)(config & raw_event_mask);
125 static int map_cpu_event(struct perf_event *event,
126 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
127 const unsigned (*cache_map)
128 [PERF_COUNT_HW_CACHE_MAX]
129 [PERF_COUNT_HW_CACHE_OP_MAX]
130 [PERF_COUNT_HW_CACHE_RESULT_MAX],
133 u64 config = event->attr.config;
135 switch (event->attr.type) {
136 case PERF_TYPE_HARDWARE:
137 return armpmu_map_event(event_map, config);
138 case PERF_TYPE_HW_CACHE:
139 return armpmu_map_cache_event(cache_map, config);
141 return armpmu_map_raw_event(raw_event_mask, config);
148 armpmu_event_set_period(struct perf_event *event,
149 struct hw_perf_event *hwc,
152 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
153 s64 left = local64_read(&hwc->period_left);
154 s64 period = hwc->sample_period;
157 if (unlikely(left <= -period)) {
159 local64_set(&hwc->period_left, left);
160 hwc->last_period = period;
164 if (unlikely(left <= 0)) {
166 local64_set(&hwc->period_left, left);
167 hwc->last_period = period;
171 if (left > (s64)armpmu->max_period)
172 left = armpmu->max_period;
174 local64_set(&hwc->prev_count, (u64)-left);
176 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
178 perf_event_update_userpage(event);
184 armpmu_event_update(struct perf_event *event,
185 struct hw_perf_event *hwc,
188 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
189 u64 delta, prev_raw_count, new_raw_count;
192 prev_raw_count = local64_read(&hwc->prev_count);
193 new_raw_count = armpmu->read_counter(idx);
195 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
196 new_raw_count) != prev_raw_count)
199 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
201 local64_add(delta, &event->count);
202 local64_sub(delta, &hwc->period_left);
204 return new_raw_count;
208 armpmu_read(struct perf_event *event)
210 struct hw_perf_event *hwc = &event->hw;
212 /* Don't read disabled counters! */
216 armpmu_event_update(event, hwc, hwc->idx);
220 armpmu_stop(struct perf_event *event, int flags)
222 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
223 struct hw_perf_event *hwc = &event->hw;
226 * ARM pmu always has to update the counter, so ignore
227 * PERF_EF_UPDATE, see comments in armpmu_start().
229 if (!(hwc->state & PERF_HES_STOPPED)) {
230 armpmu->disable(hwc, hwc->idx);
231 barrier(); /* why? */
232 armpmu_event_update(event, hwc, hwc->idx);
233 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
238 armpmu_start(struct perf_event *event, int flags)
240 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
241 struct hw_perf_event *hwc = &event->hw;
244 * ARM pmu always has to reprogram the period, so ignore
245 * PERF_EF_RELOAD, see the comment below.
247 if (flags & PERF_EF_RELOAD)
248 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
252 * Set the period again. Some counters can't be stopped, so when we
253 * were stopped we simply disabled the IRQ source and the counter
254 * may have been left counting. If we don't do this step then we may
255 * get an interrupt too soon or *way* too late if the overflow has
256 * happened since disabling.
258 armpmu_event_set_period(event, hwc, hwc->idx);
259 armpmu->enable(hwc, hwc->idx);
263 armpmu_del(struct perf_event *event, int flags)
265 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
266 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
267 struct hw_perf_event *hwc = &event->hw;
272 armpmu_stop(event, PERF_EF_UPDATE);
273 hw_events->events[idx] = NULL;
274 clear_bit(idx, hw_events->used_mask);
276 perf_event_update_userpage(event);
280 armpmu_add(struct perf_event *event, int flags)
282 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
283 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
284 struct hw_perf_event *hwc = &event->hw;
288 perf_pmu_disable(event->pmu);
290 /* If we don't have a space for the counter then finish early. */
291 idx = armpmu->get_event_idx(hw_events, hwc);
298 * If there is an event in the counter we are going to use then make
299 * sure it is disabled.
302 armpmu->disable(hwc, idx);
303 hw_events->events[idx] = event;
305 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
306 if (flags & PERF_EF_START)
307 armpmu_start(event, PERF_EF_RELOAD);
309 /* Propagate our changes to the userspace mapping. */
310 perf_event_update_userpage(event);
313 perf_pmu_enable(event->pmu);
318 validate_event(struct pmu_hw_events *hw_events,
319 struct perf_event *event)
321 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
322 struct hw_perf_event fake_event = event->hw;
323 struct pmu *leader_pmu = event->group_leader->pmu;
325 if (is_software_event(event))
328 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
331 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
335 validate_group(struct perf_event *event)
337 struct perf_event *sibling, *leader = event->group_leader;
338 struct pmu_hw_events fake_pmu;
339 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
342 * Initialise the fake PMU. We only need to populate the
343 * used_mask for the purposes of validation.
345 memset(fake_used_mask, 0, sizeof(fake_used_mask));
346 fake_pmu.used_mask = fake_used_mask;
348 if (!validate_event(&fake_pmu, leader))
351 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
352 if (!validate_event(&fake_pmu, sibling))
356 if (!validate_event(&fake_pmu, event))
363 armpmu_release_hardware(struct arm_pmu *armpmu)
366 struct platform_device *pmu_device = armpmu->plat_device;
368 irqs = min(pmu_device->num_resources, num_possible_cpus());
370 for (i = 0; i < irqs; ++i) {
371 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
373 irq = platform_get_irq(pmu_device, i);
375 free_irq(irq, armpmu);
380 armpmu_reserve_hardware(struct arm_pmu *armpmu)
382 int i, err, irq, irqs;
383 struct platform_device *pmu_device = armpmu->plat_device;
386 pr_err("no PMU device registered\n");
390 irqs = min(pmu_device->num_resources, num_possible_cpus());
392 pr_err("no irqs for PMUs defined\n");
396 for (i = 0; i < irqs; ++i) {
398 irq = platform_get_irq(pmu_device, i);
403 * If we have a single PMU interrupt that we can't shift,
404 * assume that we're running on a uniprocessor machine and
405 * continue. Otherwise, continue without this interrupt.
407 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
408 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
413 err = request_irq(irq, armpmu->handle_irq,
417 pr_err("unable to request IRQ%d for ARM PMU counters\n",
419 armpmu_release_hardware(armpmu);
423 cpumask_set_cpu(i, &armpmu->active_irqs);
430 hw_perf_event_destroy(struct perf_event *event)
432 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
433 atomic_t *active_events = &armpmu->active_events;
434 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
436 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
437 armpmu_release_hardware(armpmu);
438 mutex_unlock(pmu_reserve_mutex);
443 event_requires_mode_exclusion(struct perf_event_attr *attr)
445 return attr->exclude_idle || attr->exclude_user ||
446 attr->exclude_kernel || attr->exclude_hv;
450 __hw_perf_event_init(struct perf_event *event)
452 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
453 struct hw_perf_event *hwc = &event->hw;
456 mapping = armpmu->map_event(event);
459 pr_debug("event %x:%llx not supported\n", event->attr.type,
465 * We don't assign an index until we actually place the event onto
466 * hardware. Use -1 to signify that we haven't decided where to put it
467 * yet. For SMP systems, each core has it's own PMU so we can't do any
468 * clever allocation or constraints checking at this point.
471 hwc->config_base = 0;
476 * Check whether we need to exclude the counter from certain modes.
478 if ((!armpmu->set_event_filter ||
479 armpmu->set_event_filter(hwc, &event->attr)) &&
480 event_requires_mode_exclusion(&event->attr)) {
481 pr_debug("ARM performance counters do not support mode exclusion\n");
486 * Store the event encoding into the config_base field.
488 hwc->config_base |= (unsigned long)mapping;
490 if (!hwc->sample_period) {
492 * For non-sampling runs, limit the sample_period to half
493 * of the counter width. That way, the new counter value
494 * is far less likely to overtake the previous one unless
495 * you have some serious IRQ latency issues.
497 hwc->sample_period = armpmu->max_period >> 1;
498 hwc->last_period = hwc->sample_period;
499 local64_set(&hwc->period_left, hwc->sample_period);
503 if (event->group_leader != event) {
504 err = validate_group(event);
512 static int armpmu_event_init(struct perf_event *event)
514 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
516 atomic_t *active_events = &armpmu->active_events;
518 if (armpmu->map_event(event) == -ENOENT)
521 event->destroy = hw_perf_event_destroy;
523 if (!atomic_inc_not_zero(active_events)) {
524 mutex_lock(&armpmu->reserve_mutex);
525 if (atomic_read(active_events) == 0)
526 err = armpmu_reserve_hardware(armpmu);
529 atomic_inc(active_events);
530 mutex_unlock(&armpmu->reserve_mutex);
536 err = __hw_perf_event_init(event);
538 hw_perf_event_destroy(event);
543 static void armpmu_enable(struct pmu *pmu)
545 struct arm_pmu *armpmu = to_arm_pmu(pmu);
546 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
547 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
553 static void armpmu_disable(struct pmu *pmu)
555 struct arm_pmu *armpmu = to_arm_pmu(pmu);
559 static void __init armpmu_init(struct arm_pmu *armpmu)
561 atomic_set(&armpmu->active_events, 0);
562 mutex_init(&armpmu->reserve_mutex);
564 armpmu->pmu = (struct pmu) {
565 .pmu_enable = armpmu_enable,
566 .pmu_disable = armpmu_disable,
567 .event_init = armpmu_event_init,
570 .start = armpmu_start,
576 int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
579 return perf_pmu_register(&armpmu->pmu, name, type);
583 * ARMv8 PMUv3 Performance Events handling code.
584 * Common event types.
586 enum armv8_pmuv3_perf_types {
587 /* Required events. */
588 ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
589 ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
590 ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
591 ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
592 ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
593 ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
595 /* At least one of the following is required. */
596 ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
597 ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
599 /* Common architectural events. */
600 ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
601 ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
602 ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
603 ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
604 ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
605 ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
606 ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
607 ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
608 ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
609 ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
611 /* Common microarchitectural events. */
612 ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
613 ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
614 ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
615 ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
616 ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
617 ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
618 ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
619 ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
620 ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
621 ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
622 ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
623 ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
626 /* PMUv3 HW events mapping. */
627 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
628 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
629 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
630 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
631 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
632 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
633 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
634 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
635 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
636 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
639 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
640 [PERF_COUNT_HW_CACHE_OP_MAX]
641 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
644 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
645 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
648 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
649 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
652 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
653 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
658 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
659 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
662 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
663 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
666 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
667 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
672 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
673 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
676 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
677 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
680 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
681 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
686 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
687 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
690 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
691 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
694 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
695 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
700 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
701 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
704 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
705 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
708 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
709 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
714 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
715 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
718 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
719 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
722 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
723 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
728 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
729 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
732 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
733 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
736 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
737 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
743 * Perf Events' indices
745 #define ARMV8_IDX_CYCLE_COUNTER 0
746 #define ARMV8_IDX_COUNTER0 1
747 #define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
749 #define ARMV8_MAX_COUNTERS 32
750 #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
753 * ARMv8 low level PMU access
757 * Perf Event to low level counters mapping
759 #define ARMV8_IDX_TO_COUNTER(x) \
760 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
763 * Per-CPU PMCR: config reg
765 #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
766 #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
767 #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
768 #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
769 #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
770 #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
771 #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
772 #define ARMV8_PMCR_N_MASK 0x1f
773 #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
776 * PMOVSR: counters overflow flag status reg
778 #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
779 #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
782 * PMXEVTYPER: Event selection reg
784 #define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
785 #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
788 * Event filters for PMUv3
790 #define ARMV8_EXCLUDE_EL1 (1 << 31)
791 #define ARMV8_EXCLUDE_EL0 (1 << 30)
792 #define ARMV8_INCLUDE_EL2 (1 << 27)
794 static inline u32 armv8pmu_pmcr_read(void)
797 asm volatile("mrs %0, pmcr_el0" : "=r" (val));
801 static inline void armv8pmu_pmcr_write(u32 val)
803 val &= ARMV8_PMCR_MASK;
805 asm volatile("msr pmcr_el0, %0" :: "r" (val));
808 static inline int armv8pmu_has_overflowed(u32 pmovsr)
810 return pmovsr & ARMV8_OVERFLOWED_MASK;
813 static inline int armv8pmu_counter_valid(int idx)
815 return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST;
818 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
823 if (!armv8pmu_counter_valid(idx)) {
824 pr_err("CPU%u checking wrong counter %d overflow status\n",
825 smp_processor_id(), idx);
827 counter = ARMV8_IDX_TO_COUNTER(idx);
828 ret = pmnc & BIT(counter);
834 static inline int armv8pmu_select_counter(int idx)
838 if (!armv8pmu_counter_valid(idx)) {
839 pr_err("CPU%u selecting wrong PMNC counter %d\n",
840 smp_processor_id(), idx);
844 counter = ARMV8_IDX_TO_COUNTER(idx);
845 asm volatile("msr pmselr_el0, %0" :: "r" (counter));
851 static inline u32 armv8pmu_read_counter(int idx)
855 if (!armv8pmu_counter_valid(idx))
856 pr_err("CPU%u reading wrong counter %d\n",
857 smp_processor_id(), idx);
858 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
859 asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
860 else if (armv8pmu_select_counter(idx) == idx)
861 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
866 static inline void armv8pmu_write_counter(int idx, u32 value)
868 if (!armv8pmu_counter_valid(idx))
869 pr_err("CPU%u writing wrong counter %d\n",
870 smp_processor_id(), idx);
871 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
872 asm volatile("msr pmccntr_el0, %0" :: "r" (value));
873 else if (armv8pmu_select_counter(idx) == idx)
874 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
877 static inline void armv8pmu_write_evtype(int idx, u32 val)
879 if (armv8pmu_select_counter(idx) == idx) {
880 val &= ARMV8_EVTYPE_MASK;
881 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
885 static inline int armv8pmu_enable_counter(int idx)
889 if (!armv8pmu_counter_valid(idx)) {
890 pr_err("CPU%u enabling wrong PMNC counter %d\n",
891 smp_processor_id(), idx);
895 counter = ARMV8_IDX_TO_COUNTER(idx);
896 asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
900 static inline int armv8pmu_disable_counter(int idx)
904 if (!armv8pmu_counter_valid(idx)) {
905 pr_err("CPU%u disabling wrong PMNC counter %d\n",
906 smp_processor_id(), idx);
910 counter = ARMV8_IDX_TO_COUNTER(idx);
911 asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
915 static inline int armv8pmu_enable_intens(int idx)
919 if (!armv8pmu_counter_valid(idx)) {
920 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
921 smp_processor_id(), idx);
925 counter = ARMV8_IDX_TO_COUNTER(idx);
926 asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
930 static inline int armv8pmu_disable_intens(int idx)
934 if (!armv8pmu_counter_valid(idx)) {
935 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
936 smp_processor_id(), idx);
940 counter = ARMV8_IDX_TO_COUNTER(idx);
941 asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
943 /* Clear the overflow flag in case an interrupt is pending. */
944 asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
949 static inline u32 armv8pmu_getreset_flags(void)
954 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
956 /* Write to clear flags */
957 value &= ARMV8_OVSR_MASK;
958 asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
963 static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
966 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
969 * Enable counter and interrupt, and set the counter to count
970 * the event that we're interested in.
972 raw_spin_lock_irqsave(&events->pmu_lock, flags);
977 armv8pmu_disable_counter(idx);
980 * Set event (if destined for PMNx counters).
982 armv8pmu_write_evtype(idx, hwc->config_base);
985 * Enable interrupt for this counter
987 armv8pmu_enable_intens(idx);
992 armv8pmu_enable_counter(idx);
994 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
997 static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
1000 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1003 * Disable counter and interrupt
1005 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1010 armv8pmu_disable_counter(idx);
1013 * Disable interrupt for this counter
1015 armv8pmu_disable_intens(idx);
1017 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1020 static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
1023 struct perf_sample_data data;
1024 struct pmu_hw_events *cpuc;
1025 struct pt_regs *regs;
1029 * Get and reset the IRQ flags
1031 pmovsr = armv8pmu_getreset_flags();
1034 * Did an overflow occur?
1036 if (!armv8pmu_has_overflowed(pmovsr))
1040 * Handle the counter(s) overflow(s)
1042 regs = get_irq_regs();
1044 cpuc = &__get_cpu_var(cpu_hw_events);
1045 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1046 struct perf_event *event = cpuc->events[idx];
1047 struct hw_perf_event *hwc;
1049 /* Ignore if we don't have an event. */
1054 * We have a single interrupt for all counters. Check that
1055 * each counter has overflowed before we process it.
1057 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
1061 armpmu_event_update(event, hwc, idx);
1062 perf_sample_data_init(&data, 0, hwc->last_period);
1063 if (!armpmu_event_set_period(event, hwc, idx))
1066 if (perf_event_overflow(event, &data, regs))
1067 cpu_pmu->disable(hwc, idx);
1071 * Handle the pending perf events.
1073 * Note: this call *must* be run with interrupts disabled. For
1074 * platforms that can have the PMU interrupts raised as an NMI, this
1082 static void armv8pmu_start(void)
1084 unsigned long flags;
1085 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1087 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1088 /* Enable all counters */
1089 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
1090 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1093 static void armv8pmu_stop(void)
1095 unsigned long flags;
1096 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1098 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1099 /* Disable all counters */
1100 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
1101 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1104 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
1105 struct hw_perf_event *event)
1108 unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
1110 /* Always place a cycle counter into the cycle counter. */
1111 if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
1112 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
1115 return ARMV8_IDX_CYCLE_COUNTER;
1119 * For anything other than a cycle counter, try and use
1120 * the events counters
1122 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1123 if (!test_and_set_bit(idx, cpuc->used_mask))
1127 /* The counters are all in use. */
1132 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1134 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
1135 struct perf_event_attr *attr)
1137 unsigned long config_base = 0;
1139 if (attr->exclude_idle)
1141 if (attr->exclude_user)
1142 config_base |= ARMV8_EXCLUDE_EL0;
1143 if (attr->exclude_kernel)
1144 config_base |= ARMV8_EXCLUDE_EL1;
1145 if (!attr->exclude_hv)
1146 config_base |= ARMV8_INCLUDE_EL2;
1149 * Install the filter into config_base as this is used to
1150 * construct the event type.
1152 event->config_base = config_base;
1157 static void armv8pmu_reset(void *info)
1159 u32 idx, nb_cnt = cpu_pmu->num_events;
1161 /* The counter and interrupt enable registers are unknown at reset. */
1162 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
1163 armv8pmu_disable_event(NULL, idx);
1165 /* Initialize & Reset PMNC: C and P bits. */
1166 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
1168 /* Disable access from userspace. */
1169 asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
1172 static int armv8_pmuv3_map_event(struct perf_event *event)
1174 return map_cpu_event(event, &armv8_pmuv3_perf_map,
1175 &armv8_pmuv3_perf_cache_map, 0xFF);
1178 static struct arm_pmu armv8pmu = {
1179 .handle_irq = armv8pmu_handle_irq,
1180 .enable = armv8pmu_enable_event,
1181 .disable = armv8pmu_disable_event,
1182 .read_counter = armv8pmu_read_counter,
1183 .write_counter = armv8pmu_write_counter,
1184 .get_event_idx = armv8pmu_get_event_idx,
1185 .start = armv8pmu_start,
1186 .stop = armv8pmu_stop,
1187 .reset = armv8pmu_reset,
1188 .max_period = (1LLU << 32) - 1,
1191 static u32 __init armv8pmu_read_num_pmnc_events(void)
1195 /* Read the nb of CNTx counters supported from PMNC */
1196 nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
1198 /* Add the CPU cycles counter and return */
1202 static struct arm_pmu *__init armv8_pmuv3_pmu_init(void)
1204 armv8pmu.name = "arm/armv8-pmuv3";
1205 armv8pmu.map_event = armv8_pmuv3_map_event;
1206 armv8pmu.num_events = armv8pmu_read_num_pmnc_events();
1207 armv8pmu.set_event_filter = armv8pmu_set_event_filter;
1212 * Ensure the PMU has sane values out of reset.
1213 * This requires SMP to be available, so exists as a separate initcall.
1218 if (cpu_pmu && cpu_pmu->reset)
1219 return on_each_cpu(cpu_pmu->reset, NULL, 1);
1222 arch_initcall(cpu_pmu_reset);
1225 * PMU platform driver and devicetree bindings.
1227 static struct of_device_id armpmu_of_device_ids[] = {
1228 {.compatible = "arm,armv8-pmuv3"},
1232 static int armpmu_device_probe(struct platform_device *pdev)
1237 cpu_pmu->plat_device = pdev;
1241 static struct platform_driver armpmu_driver = {
1244 .of_match_table = armpmu_of_device_ids,
1246 .probe = armpmu_device_probe,
1249 static int __init register_pmu_driver(void)
1251 return platform_driver_register(&armpmu_driver);
1253 device_initcall(register_pmu_driver);
1255 static struct pmu_hw_events *armpmu_get_cpu_events(void)
1257 return &__get_cpu_var(cpu_hw_events);
1260 static void __init cpu_pmu_init(struct arm_pmu *armpmu)
1263 for_each_possible_cpu(cpu) {
1264 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
1265 events->events = per_cpu(hw_events, cpu);
1266 events->used_mask = per_cpu(used_mask, cpu);
1267 raw_spin_lock_init(&events->pmu_lock);
1269 armpmu->get_hw_events = armpmu_get_cpu_events;
1272 static int __init init_hw_perf_events(void)
1274 u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
1276 switch ((dfr >> 8) & 0xf) {
1277 case 0x1: /* PMUv3 */
1278 cpu_pmu = armv8_pmuv3_pmu_init();
1283 pr_info("enabled with %s PMU driver, %d counters available\n",
1284 cpu_pmu->name, cpu_pmu->num_events);
1285 cpu_pmu_init(cpu_pmu);
1286 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
1288 pr_info("no hardware support available\n");
1293 early_initcall(init_hw_perf_events);
1296 * Callchain handling code.
1299 struct frame_tail __user *fp;
1301 } __attribute__((packed));
1304 * Get the return address for a single stackframe and return a pointer to the
1307 static struct frame_tail __user *
1308 user_backtrace(struct frame_tail __user *tail,
1309 struct perf_callchain_entry *entry)
1311 struct frame_tail buftail;
1314 /* Also check accessibility of one struct frame_tail beyond */
1315 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
1318 pagefault_disable();
1319 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
1325 perf_callchain_store(entry, buftail.lr);
1328 * Frame pointers should strictly progress back up the stack
1329 * (towards higher addresses).
1331 if (tail >= buftail.fp)
1337 void perf_callchain_user(struct perf_callchain_entry *entry,
1338 struct pt_regs *regs)
1340 struct frame_tail __user *tail;
1342 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1343 /* We don't support guest os callchain now */
1347 perf_callchain_store(entry, regs->pc);
1348 tail = (struct frame_tail __user *)regs->regs[29];
1350 while (entry->nr < PERF_MAX_STACK_DEPTH &&
1351 tail && !((unsigned long)tail & 0xf))
1352 tail = user_backtrace(tail, entry);
1356 * Gets called by walk_stackframe() for every stackframe. This will be called
1357 * whist unwinding the stackframe and is like a subroutine return so we use
1360 static int callchain_trace(struct stackframe *frame, void *data)
1362 struct perf_callchain_entry *entry = data;
1363 perf_callchain_store(entry, frame->pc);
1367 void perf_callchain_kernel(struct perf_callchain_entry *entry,
1368 struct pt_regs *regs)
1370 struct stackframe frame;
1372 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1373 /* We don't support guest os callchain now */
1377 frame.fp = regs->regs[29];
1378 frame.sp = regs->sp;
1379 frame.pc = regs->pc;
1380 walk_stackframe(&frame, callchain_trace, entry);
1383 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1385 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1386 return perf_guest_cbs->get_guest_ip();
1388 return instruction_pointer(regs);
1391 unsigned long perf_misc_flags(struct pt_regs *regs)
1395 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1396 if (perf_guest_cbs->is_user_mode())
1397 misc |= PERF_RECORD_MISC_GUEST_USER;
1399 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1401 if (user_mode(regs))
1402 misc |= PERF_RECORD_MISC_USER;
1404 misc |= PERF_RECORD_MISC_KERNEL;