2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/ptrace.h>
18 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
20 #include <linux/kprobes.h>
21 #include <linux/hardirq.h>
25 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
26 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
37 struct perf_event *event;
38 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
49 unsigned long offset_mask[1];
51 struct cpu_perf_ibs __percpu *pcpu;
52 u64 (*get_count)(u64 config);
55 struct perf_ibs_data {
58 u32 data[0]; /* data buffer starts here */
61 u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
65 perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *count)
67 s64 left = local64_read(&hwc->period_left);
68 s64 period = hwc->sample_period;
72 * If we are way outside a reasonable range then just skip forward:
74 if (unlikely(left <= -period)) {
76 local64_set(&hwc->period_left, left);
77 hwc->last_period = period;
81 if (unlikely(left <= 0)) {
83 local64_set(&hwc->period_left, left);
84 hwc->last_period = period;
88 if (unlikely(left < min))
100 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
102 struct hw_perf_event *hwc = &event->hw;
103 int shift = 64 - width;
108 * Careful: an NMI might modify the previous event value.
110 * Our tactic to handle this is to first atomically read and
111 * exchange a new raw count - then add that new-prev delta
112 * count to the generic event atomically:
114 prev_raw_count = local64_read(&hwc->prev_count);
115 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
116 new_raw_count) != prev_raw_count)
120 * Now we have the new raw value and have updated the prev
121 * timestamp already. We can now calculate the elapsed delta
122 * (event-)time and add that to the generic event.
124 * Careful, not all hw sign-extends above the physical width
127 delta = (new_raw_count << shift) - (prev_raw_count << shift);
130 local64_add(delta, &event->count);
131 local64_sub(delta, &hwc->period_left);
136 static struct perf_ibs perf_ibs_fetch;
137 static struct perf_ibs perf_ibs_op;
139 static struct perf_ibs *get_ibs_pmu(int type)
141 if (perf_ibs_fetch.pmu.type == type)
142 return &perf_ibs_fetch;
143 if (perf_ibs_op.pmu.type == type)
148 static int perf_ibs_init(struct perf_event *event)
150 struct hw_perf_event *hwc = &event->hw;
151 struct perf_ibs *perf_ibs;
154 perf_ibs = get_ibs_pmu(event->attr.type);
158 config = event->attr.config;
159 if (config & ~perf_ibs->config_mask)
162 if (hwc->sample_period) {
163 if (config & perf_ibs->cnt_mask)
164 /* raw max_cnt may not be set */
166 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
168 * lower 4 bits can not be set in ibs max cnt,
169 * but allowing it in case we adjust the
170 * sample period to set a frequency.
173 hwc->sample_period &= ~0x0FULL;
174 if (!hwc->sample_period)
175 hwc->sample_period = 0x10;
177 max_cnt = config & perf_ibs->cnt_mask;
178 config &= ~perf_ibs->cnt_mask;
179 event->attr.sample_period = max_cnt << 4;
180 hwc->sample_period = event->attr.sample_period;
183 if (!hwc->sample_period)
187 * If we modify hwc->sample_period, we also need to update
188 * hwc->last_period and hwc->period_left.
190 hwc->last_period = hwc->sample_period;
191 local64_set(&hwc->period_left, hwc->sample_period);
193 hwc->config_base = perf_ibs->msr;
194 hwc->config = config;
199 static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
200 struct hw_perf_event *hwc, u64 *period)
204 /* ignore lower 4 bits in min count: */
205 ret = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
206 local64_set(&hwc->prev_count, 0);
211 static u64 get_ibs_fetch_count(u64 config)
213 return (config & IBS_FETCH_CNT) >> 12;
216 static u64 get_ibs_op_count(u64 config)
218 return (config & IBS_OP_CUR_CNT) >> 32;
222 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
225 u64 count = perf_ibs->get_count(config);
227 while (!perf_event_try_update(event, count, 20)) {
228 rdmsrl(event->hw.config_base, config);
229 count = perf_ibs->get_count(config);
233 /* Note: The enable mask must be encoded in the config argument. */
234 static inline void perf_ibs_enable_event(struct hw_perf_event *hwc, u64 config)
236 wrmsrl(hwc->config_base, hwc->config | config);
240 * We cannot restore the ibs pmu state, so we always needs to update
241 * the event while stopping it and then reset the state when starting
242 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
243 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
245 static void perf_ibs_start(struct perf_event *event, int flags)
247 struct hw_perf_event *hwc = &event->hw;
248 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
249 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
252 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
255 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
258 perf_ibs_set_period(perf_ibs, hwc, &config);
259 config = (config >> 4) | perf_ibs->enable_mask;
260 set_bit(IBS_STARTED, pcpu->state);
261 perf_ibs_enable_event(hwc, config);
263 perf_event_update_userpage(event);
266 static void perf_ibs_stop(struct perf_event *event, int flags)
268 struct hw_perf_event *hwc = &event->hw;
269 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
270 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
274 stopping = test_and_clear_bit(IBS_STARTED, pcpu->state);
276 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
279 rdmsrl(hwc->config_base, val);
282 set_bit(IBS_STOPPING, pcpu->state);
283 val &= ~perf_ibs->enable_mask;
284 wrmsrl(hwc->config_base, val);
285 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
286 hwc->state |= PERF_HES_STOPPED;
289 if (hwc->state & PERF_HES_UPTODATE)
292 perf_ibs_event_update(perf_ibs, event, val);
293 hwc->state |= PERF_HES_UPTODATE;
296 static int perf_ibs_add(struct perf_event *event, int flags)
298 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
299 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
301 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
304 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
308 if (flags & PERF_EF_START)
309 perf_ibs_start(event, PERF_EF_RELOAD);
314 static void perf_ibs_del(struct perf_event *event, int flags)
316 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
317 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
319 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
322 perf_ibs_stop(event, PERF_EF_UPDATE);
326 perf_event_update_userpage(event);
329 static void perf_ibs_read(struct perf_event *event) { }
331 static struct perf_ibs perf_ibs_fetch = {
333 .task_ctx_nr = perf_invalid_context,
335 .event_init = perf_ibs_init,
338 .start = perf_ibs_start,
339 .stop = perf_ibs_stop,
340 .read = perf_ibs_read,
342 .msr = MSR_AMD64_IBSFETCHCTL,
343 .config_mask = IBS_FETCH_CONFIG_MASK,
344 .cnt_mask = IBS_FETCH_MAX_CNT,
345 .enable_mask = IBS_FETCH_ENABLE,
346 .valid_mask = IBS_FETCH_VAL,
347 .max_period = IBS_FETCH_MAX_CNT << 4,
348 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
349 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
351 .get_count = get_ibs_fetch_count,
354 static struct perf_ibs perf_ibs_op = {
356 .task_ctx_nr = perf_invalid_context,
358 .event_init = perf_ibs_init,
361 .start = perf_ibs_start,
362 .stop = perf_ibs_stop,
363 .read = perf_ibs_read,
365 .msr = MSR_AMD64_IBSOPCTL,
366 .config_mask = IBS_OP_CONFIG_MASK,
367 .cnt_mask = IBS_OP_MAX_CNT,
368 .enable_mask = IBS_OP_ENABLE,
369 .valid_mask = IBS_OP_VAL,
370 .max_period = IBS_OP_MAX_CNT << 4,
371 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
372 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
374 .get_count = get_ibs_op_count,
377 static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
379 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
380 struct perf_event *event = pcpu->event;
381 struct hw_perf_event *hwc = &event->hw;
382 struct perf_sample_data data;
383 struct perf_raw_record raw;
385 struct perf_ibs_data ibs_data;
386 int offset, size, check_rip, offset_max, throttle = 0;
390 if (!test_bit(IBS_STARTED, pcpu->state)) {
391 /* Catch spurious interrupts after stopping IBS: */
392 if (!test_and_clear_bit(IBS_STOPPING, pcpu->state))
394 rdmsrl(perf_ibs->msr, *ibs_data.regs);
395 return (*ibs_data.regs & perf_ibs->valid_mask) ? 1 : 0;
398 msr = hwc->config_base;
401 if (!(*buf++ & perf_ibs->valid_mask))
405 * Emulate IbsOpCurCnt in MSRC001_1033 (IbsOpCtl), not
406 * supported in all cpus. As this triggered an interrupt, we
407 * set the current count to the max count.
409 config = ibs_data.regs[0];
410 if (perf_ibs == &perf_ibs_op && !(ibs_caps & IBS_CAPS_RDWROPCNT)) {
411 config &= ~IBS_OP_CUR_CNT;
412 config |= (config & IBS_OP_MAX_CNT) << 36;
415 perf_ibs_event_update(perf_ibs, event, config);
416 perf_sample_data_init(&data, 0, hwc->last_period);
417 if (!perf_ibs_set_period(perf_ibs, hwc, &config))
418 goto out; /* no sw counter overflow */
420 ibs_data.caps = ibs_caps;
423 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
424 if (event->attr.sample_type & PERF_SAMPLE_RAW)
425 offset_max = perf_ibs->offset_max;
431 rdmsrl(msr + offset, *buf++);
433 offset = find_next_bit(perf_ibs->offset_mask,
434 perf_ibs->offset_max,
436 } while (offset < offset_max);
437 ibs_data.size = sizeof(u64) * size;
440 if (!check_rip || !(ibs_data.regs[2] & IBS_RIP_INVALID))
441 instruction_pointer_set(®s, ibs_data.regs[1]);
443 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
444 raw.size = sizeof(u32) + ibs_data.size;
445 raw.data = ibs_data.data;
449 throttle = perf_event_overflow(event, &data, ®s);
451 config = (config >> 4) | (throttle ? 0 : perf_ibs->enable_mask);
452 perf_ibs_enable_event(hwc, config);
454 perf_event_update_userpage(event);
460 perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
464 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
465 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
468 inc_irq_stat(apic_perf_irqs);
473 static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
475 struct cpu_perf_ibs __percpu *pcpu;
478 pcpu = alloc_percpu(struct cpu_perf_ibs);
482 perf_ibs->pcpu = pcpu;
484 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
486 perf_ibs->pcpu = NULL;
493 static __init int perf_event_ibs_init(void)
496 return -ENODEV; /* ibs not supported by the cpu */
498 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
499 if (ibs_caps & IBS_CAPS_OPCNT)
500 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
501 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
502 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
503 printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
508 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
510 static __init int perf_event_ibs_init(void) { return 0; }
514 /* IBS - apic initialization, for perf and oprofile */
516 static __init u32 __get_ibs_caps(void)
519 unsigned int max_level;
521 if (!boot_cpu_has(X86_FEATURE_IBS))
524 /* check IBS cpuid feature flags */
525 max_level = cpuid_eax(0x80000000);
526 if (max_level < IBS_CPUID_FEATURES)
527 return IBS_CAPS_DEFAULT;
529 caps = cpuid_eax(IBS_CPUID_FEATURES);
530 if (!(caps & IBS_CAPS_AVAIL))
531 /* cpuid flags not valid */
532 return IBS_CAPS_DEFAULT;
537 u32 get_ibs_caps(void)
542 EXPORT_SYMBOL(get_ibs_caps);
544 static inline int get_eilvt(int offset)
546 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
549 static inline int put_eilvt(int offset)
551 return !setup_APIC_eilvt(offset, 0, 0, 1);
555 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
557 static inline int ibs_eilvt_valid(void)
565 rdmsrl(MSR_AMD64_IBSCTL, val);
566 offset = val & IBSCTL_LVT_OFFSET_MASK;
568 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
569 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
570 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
574 if (!get_eilvt(offset)) {
575 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
576 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
587 static int setup_ibs_ctl(int ibs_eilvt_off)
589 struct pci_dev *cpu_cfg;
596 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
597 PCI_DEVICE_ID_AMD_10H_NB_MISC,
602 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
603 | IBSCTL_LVT_OFFSET_VALID);
604 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
605 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
606 pci_dev_put(cpu_cfg);
607 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
608 "IBSCTL = 0x%08x\n", value);
614 printk(KERN_DEBUG "No CPU node configured for IBS\n");
622 * This runs only on the current cpu. We try to find an LVT offset and
623 * setup the local APIC. For this we must disable preemption. On
624 * success we initialize all nodes with this offset. This updates then
625 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
626 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
627 * is using the new offset.
629 static int force_ibs_eilvt_setup(void)
635 /* find the next free available EILVT entry, skip offset 0 */
636 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
637 if (get_eilvt(offset))
642 if (offset == APIC_EILVT_NR_MAX) {
643 printk(KERN_DEBUG "No EILVT entry available\n");
647 ret = setup_ibs_ctl(offset);
651 if (!ibs_eilvt_valid()) {
656 pr_info("IBS: LVT offset %d assigned\n", offset);
666 static inline int get_ibs_lvt_offset(void)
670 rdmsrl(MSR_AMD64_IBSCTL, val);
671 if (!(val & IBSCTL_LVT_OFFSET_VALID))
674 return val & IBSCTL_LVT_OFFSET_MASK;
677 static void setup_APIC_ibs(void *dummy)
681 offset = get_ibs_lvt_offset();
685 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
688 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
692 static void clear_APIC_ibs(void *dummy)
696 offset = get_ibs_lvt_offset();
698 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
702 perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
704 switch (action & ~CPU_TASKS_FROZEN) {
706 setup_APIC_ibs(NULL);
709 clear_APIC_ibs(NULL);
718 static __init int amd_ibs_init(void)
723 caps = __get_ibs_caps();
725 return -ENODEV; /* ibs not supported by the cpu */
728 * Force LVT offset assignment for family 10h: The offsets are
729 * not assigned by the BIOS for this family, so the OS is
730 * responsible for doing it. If the OS assignment fails, fall
731 * back to BIOS settings and try to setup this.
733 if (boot_cpu_data.x86 == 0x10)
734 force_ibs_eilvt_setup();
736 if (!ibs_eilvt_valid())
741 /* make ibs_caps visible to other cpus: */
743 perf_cpu_notifier(perf_ibs_cpu_notifier);
744 smp_call_function(setup_APIC_ibs, NULL, 1);
747 ret = perf_event_ibs_init();
750 pr_err("Failed to setup IBS, %d\n", ret);
754 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
755 device_initcall(amd_ibs_init);