26ced536005a2cec4c035aa518e32251a2fef00d
[cascardo/linux.git] / arch / x86 / events / core.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27 #include <linux/device.h>
28
29 #include <asm/apic.h>
30 #include <asm/stacktrace.h>
31 #include <asm/nmi.h>
32 #include <asm/smp.h>
33 #include <asm/alternative.h>
34 #include <asm/mmu_context.h>
35 #include <asm/tlbflush.h>
36 #include <asm/timer.h>
37 #include <asm/desc.h>
38 #include <asm/ldt.h>
39
40 #include "perf_event.h"
41
42 struct x86_pmu x86_pmu __read_mostly;
43
44 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
45         .enabled = 1,
46 };
47
48 struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE;
49
50 u64 __read_mostly hw_cache_event_ids
51                                 [PERF_COUNT_HW_CACHE_MAX]
52                                 [PERF_COUNT_HW_CACHE_OP_MAX]
53                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
54 u64 __read_mostly hw_cache_extra_regs
55                                 [PERF_COUNT_HW_CACHE_MAX]
56                                 [PERF_COUNT_HW_CACHE_OP_MAX]
57                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
58
59 /*
60  * Propagate event elapsed time into the generic event.
61  * Can only be executed on the CPU where the event is active.
62  * Returns the delta events processed.
63  */
64 u64 x86_perf_event_update(struct perf_event *event)
65 {
66         struct hw_perf_event *hwc = &event->hw;
67         int shift = 64 - x86_pmu.cntval_bits;
68         u64 prev_raw_count, new_raw_count;
69         int idx = hwc->idx;
70         s64 delta;
71
72         if (idx == INTEL_PMC_IDX_FIXED_BTS)
73                 return 0;
74
75         /*
76          * Careful: an NMI might modify the previous event value.
77          *
78          * Our tactic to handle this is to first atomically read and
79          * exchange a new raw count - then add that new-prev delta
80          * count to the generic event atomically:
81          */
82 again:
83         prev_raw_count = local64_read(&hwc->prev_count);
84         rdpmcl(hwc->event_base_rdpmc, new_raw_count);
85
86         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
87                                         new_raw_count) != prev_raw_count)
88                 goto again;
89
90         /*
91          * Now we have the new raw value and have updated the prev
92          * timestamp already. We can now calculate the elapsed delta
93          * (event-)time and add that to the generic event.
94          *
95          * Careful, not all hw sign-extends above the physical width
96          * of the count.
97          */
98         delta = (new_raw_count << shift) - (prev_raw_count << shift);
99         delta >>= shift;
100
101         local64_add(delta, &event->count);
102         local64_sub(delta, &hwc->period_left);
103
104         return new_raw_count;
105 }
106
107 /*
108  * Find and validate any extra registers to set up.
109  */
110 static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
111 {
112         struct hw_perf_event_extra *reg;
113         struct extra_reg *er;
114
115         reg = &event->hw.extra_reg;
116
117         if (!x86_pmu.extra_regs)
118                 return 0;
119
120         for (er = x86_pmu.extra_regs; er->msr; er++) {
121                 if (er->event != (config & er->config_mask))
122                         continue;
123                 if (event->attr.config1 & ~er->valid_mask)
124                         return -EINVAL;
125                 /* Check if the extra msrs can be safely accessed*/
126                 if (!er->extra_msr_access)
127                         return -ENXIO;
128
129                 reg->idx = er->idx;
130                 reg->config = event->attr.config1;
131                 reg->reg = er->msr;
132                 break;
133         }
134         return 0;
135 }
136
137 static atomic_t active_events;
138 static atomic_t pmc_refcount;
139 static DEFINE_MUTEX(pmc_reserve_mutex);
140
141 #ifdef CONFIG_X86_LOCAL_APIC
142
143 static bool reserve_pmc_hardware(void)
144 {
145         int i;
146
147         for (i = 0; i < x86_pmu.num_counters; i++) {
148                 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
149                         goto perfctr_fail;
150         }
151
152         for (i = 0; i < x86_pmu.num_counters; i++) {
153                 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
154                         goto eventsel_fail;
155         }
156
157         return true;
158
159 eventsel_fail:
160         for (i--; i >= 0; i--)
161                 release_evntsel_nmi(x86_pmu_config_addr(i));
162
163         i = x86_pmu.num_counters;
164
165 perfctr_fail:
166         for (i--; i >= 0; i--)
167                 release_perfctr_nmi(x86_pmu_event_addr(i));
168
169         return false;
170 }
171
172 static void release_pmc_hardware(void)
173 {
174         int i;
175
176         for (i = 0; i < x86_pmu.num_counters; i++) {
177                 release_perfctr_nmi(x86_pmu_event_addr(i));
178                 release_evntsel_nmi(x86_pmu_config_addr(i));
179         }
180 }
181
182 #else
183
184 static bool reserve_pmc_hardware(void) { return true; }
185 static void release_pmc_hardware(void) {}
186
187 #endif
188
189 static bool check_hw_exists(void)
190 {
191         u64 val, val_fail, val_new= ~0;
192         int i, reg, reg_fail, ret = 0;
193         int bios_fail = 0;
194         int reg_safe = -1;
195
196         /*
197          * Check to see if the BIOS enabled any of the counters, if so
198          * complain and bail.
199          */
200         for (i = 0; i < x86_pmu.num_counters; i++) {
201                 reg = x86_pmu_config_addr(i);
202                 ret = rdmsrl_safe(reg, &val);
203                 if (ret)
204                         goto msr_fail;
205                 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
206                         bios_fail = 1;
207                         val_fail = val;
208                         reg_fail = reg;
209                 } else {
210                         reg_safe = i;
211                 }
212         }
213
214         if (x86_pmu.num_counters_fixed) {
215                 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
216                 ret = rdmsrl_safe(reg, &val);
217                 if (ret)
218                         goto msr_fail;
219                 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
220                         if (val & (0x03 << i*4)) {
221                                 bios_fail = 1;
222                                 val_fail = val;
223                                 reg_fail = reg;
224                         }
225                 }
226         }
227
228         /*
229          * If all the counters are enabled, the below test will always
230          * fail.  The tools will also become useless in this scenario.
231          * Just fail and disable the hardware counters.
232          */
233
234         if (reg_safe == -1) {
235                 reg = reg_safe;
236                 goto msr_fail;
237         }
238
239         /*
240          * Read the current value, change it and read it back to see if it
241          * matches, this is needed to detect certain hardware emulators
242          * (qemu/kvm) that don't trap on the MSR access and always return 0s.
243          */
244         reg = x86_pmu_event_addr(reg_safe);
245         if (rdmsrl_safe(reg, &val))
246                 goto msr_fail;
247         val ^= 0xffffUL;
248         ret = wrmsrl_safe(reg, val);
249         ret |= rdmsrl_safe(reg, &val_new);
250         if (ret || val != val_new)
251                 goto msr_fail;
252
253         /*
254          * We still allow the PMU driver to operate:
255          */
256         if (bios_fail) {
257                 pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
258                 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
259                               reg_fail, val_fail);
260         }
261
262         return true;
263
264 msr_fail:
265         pr_cont("Broken PMU hardware detected, using software events only.\n");
266         pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
267                 boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
268                 reg, val_new);
269
270         return false;
271 }
272
273 static void hw_perf_event_destroy(struct perf_event *event)
274 {
275         x86_release_hardware();
276         atomic_dec(&active_events);
277 }
278
279 void hw_perf_lbr_event_destroy(struct perf_event *event)
280 {
281         hw_perf_event_destroy(event);
282
283         /* undo the lbr/bts event accounting */
284         x86_del_exclusive(x86_lbr_exclusive_lbr);
285 }
286
287 static inline int x86_pmu_initialized(void)
288 {
289         return x86_pmu.handle_irq != NULL;
290 }
291
292 static inline int
293 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
294 {
295         struct perf_event_attr *attr = &event->attr;
296         unsigned int cache_type, cache_op, cache_result;
297         u64 config, val;
298
299         config = attr->config;
300
301         cache_type = (config >>  0) & 0xff;
302         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
303                 return -EINVAL;
304
305         cache_op = (config >>  8) & 0xff;
306         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
307                 return -EINVAL;
308
309         cache_result = (config >> 16) & 0xff;
310         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
311                 return -EINVAL;
312
313         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
314
315         if (val == 0)
316                 return -ENOENT;
317
318         if (val == -1)
319                 return -EINVAL;
320
321         hwc->config |= val;
322         attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
323         return x86_pmu_extra_regs(val, event);
324 }
325
326 int x86_reserve_hardware(void)
327 {
328         int err = 0;
329
330         if (!atomic_inc_not_zero(&pmc_refcount)) {
331                 mutex_lock(&pmc_reserve_mutex);
332                 if (atomic_read(&pmc_refcount) == 0) {
333                         if (!reserve_pmc_hardware())
334                                 err = -EBUSY;
335                         else
336                                 reserve_ds_buffers();
337                 }
338                 if (!err)
339                         atomic_inc(&pmc_refcount);
340                 mutex_unlock(&pmc_reserve_mutex);
341         }
342
343         return err;
344 }
345
346 void x86_release_hardware(void)
347 {
348         if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) {
349                 release_pmc_hardware();
350                 release_ds_buffers();
351                 mutex_unlock(&pmc_reserve_mutex);
352         }
353 }
354
355 /*
356  * Check if we can create event of a certain type (that no conflicting events
357  * are present).
358  */
359 int x86_add_exclusive(unsigned int what)
360 {
361         int i;
362
363         if (x86_pmu.lbr_pt_coexist)
364                 return 0;
365
366         if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
367                 mutex_lock(&pmc_reserve_mutex);
368                 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
369                         if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
370                                 goto fail_unlock;
371                 }
372                 atomic_inc(&x86_pmu.lbr_exclusive[what]);
373                 mutex_unlock(&pmc_reserve_mutex);
374         }
375
376         atomic_inc(&active_events);
377         return 0;
378
379 fail_unlock:
380         mutex_unlock(&pmc_reserve_mutex);
381         return -EBUSY;
382 }
383
384 void x86_del_exclusive(unsigned int what)
385 {
386         if (x86_pmu.lbr_pt_coexist)
387                 return;
388
389         atomic_dec(&x86_pmu.lbr_exclusive[what]);
390         atomic_dec(&active_events);
391 }
392
393 int x86_setup_perfctr(struct perf_event *event)
394 {
395         struct perf_event_attr *attr = &event->attr;
396         struct hw_perf_event *hwc = &event->hw;
397         u64 config;
398
399         if (!is_sampling_event(event)) {
400                 hwc->sample_period = x86_pmu.max_period;
401                 hwc->last_period = hwc->sample_period;
402                 local64_set(&hwc->period_left, hwc->sample_period);
403         }
404
405         if (attr->type == PERF_TYPE_RAW)
406                 return x86_pmu_extra_regs(event->attr.config, event);
407
408         if (attr->type == PERF_TYPE_HW_CACHE)
409                 return set_ext_hw_attr(hwc, event);
410
411         if (attr->config >= x86_pmu.max_events)
412                 return -EINVAL;
413
414         /*
415          * The generic map:
416          */
417         config = x86_pmu.event_map(attr->config);
418
419         if (config == 0)
420                 return -ENOENT;
421
422         if (config == -1LL)
423                 return -EINVAL;
424
425         /*
426          * Branch tracing:
427          */
428         if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
429             !attr->freq && hwc->sample_period == 1) {
430                 /* BTS is not supported by this architecture. */
431                 if (!x86_pmu.bts_active)
432                         return -EOPNOTSUPP;
433
434                 /* BTS is currently only allowed for user-mode. */
435                 if (!attr->exclude_kernel)
436                         return -EOPNOTSUPP;
437
438                 /* disallow bts if conflicting events are present */
439                 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
440                         return -EBUSY;
441
442                 event->destroy = hw_perf_lbr_event_destroy;
443         }
444
445         hwc->config |= config;
446
447         return 0;
448 }
449
450 /*
451  * check that branch_sample_type is compatible with
452  * settings needed for precise_ip > 1 which implies
453  * using the LBR to capture ALL taken branches at the
454  * priv levels of the measurement
455  */
456 static inline int precise_br_compat(struct perf_event *event)
457 {
458         u64 m = event->attr.branch_sample_type;
459         u64 b = 0;
460
461         /* must capture all branches */
462         if (!(m & PERF_SAMPLE_BRANCH_ANY))
463                 return 0;
464
465         m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
466
467         if (!event->attr.exclude_user)
468                 b |= PERF_SAMPLE_BRANCH_USER;
469
470         if (!event->attr.exclude_kernel)
471                 b |= PERF_SAMPLE_BRANCH_KERNEL;
472
473         /*
474          * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
475          */
476
477         return m == b;
478 }
479
480 int x86_pmu_hw_config(struct perf_event *event)
481 {
482         if (event->attr.precise_ip) {
483                 int precise = 0;
484
485                 /* Support for constant skid */
486                 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
487                         precise++;
488
489                         /* Support for IP fixup */
490                         if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
491                                 precise++;
492
493                         if (x86_pmu.pebs_prec_dist)
494                                 precise++;
495                 }
496
497                 if (event->attr.precise_ip > precise)
498                         return -EOPNOTSUPP;
499         }
500         /*
501          * check that PEBS LBR correction does not conflict with
502          * whatever the user is asking with attr->branch_sample_type
503          */
504         if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
505                 u64 *br_type = &event->attr.branch_sample_type;
506
507                 if (has_branch_stack(event)) {
508                         if (!precise_br_compat(event))
509                                 return -EOPNOTSUPP;
510
511                         /* branch_sample_type is compatible */
512
513                 } else {
514                         /*
515                          * user did not specify  branch_sample_type
516                          *
517                          * For PEBS fixups, we capture all
518                          * the branches at the priv level of the
519                          * event.
520                          */
521                         *br_type = PERF_SAMPLE_BRANCH_ANY;
522
523                         if (!event->attr.exclude_user)
524                                 *br_type |= PERF_SAMPLE_BRANCH_USER;
525
526                         if (!event->attr.exclude_kernel)
527                                 *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
528                 }
529         }
530
531         if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
532                 event->attach_state |= PERF_ATTACH_TASK_DATA;
533
534         /*
535          * Generate PMC IRQs:
536          * (keep 'enabled' bit clear for now)
537          */
538         event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
539
540         /*
541          * Count user and OS events unless requested not to
542          */
543         if (!event->attr.exclude_user)
544                 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
545         if (!event->attr.exclude_kernel)
546                 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
547
548         if (event->attr.type == PERF_TYPE_RAW)
549                 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
550
551         if (event->attr.sample_period && x86_pmu.limit_period) {
552                 if (x86_pmu.limit_period(event, event->attr.sample_period) >
553                                 event->attr.sample_period)
554                         return -EINVAL;
555         }
556
557         return x86_setup_perfctr(event);
558 }
559
560 /*
561  * Setup the hardware configuration for a given attr_type
562  */
563 static int __x86_pmu_event_init(struct perf_event *event)
564 {
565         int err;
566
567         if (!x86_pmu_initialized())
568                 return -ENODEV;
569
570         err = x86_reserve_hardware();
571         if (err)
572                 return err;
573
574         atomic_inc(&active_events);
575         event->destroy = hw_perf_event_destroy;
576
577         event->hw.idx = -1;
578         event->hw.last_cpu = -1;
579         event->hw.last_tag = ~0ULL;
580
581         /* mark unused */
582         event->hw.extra_reg.idx = EXTRA_REG_NONE;
583         event->hw.branch_reg.idx = EXTRA_REG_NONE;
584
585         return x86_pmu.hw_config(event);
586 }
587
588 void x86_pmu_disable_all(void)
589 {
590         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
591         int idx;
592
593         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
594                 u64 val;
595
596                 if (!test_bit(idx, cpuc->active_mask))
597                         continue;
598                 rdmsrl(x86_pmu_config_addr(idx), val);
599                 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
600                         continue;
601                 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
602                 wrmsrl(x86_pmu_config_addr(idx), val);
603         }
604 }
605
606 /*
607  * There may be PMI landing after enabled=0. The PMI hitting could be before or
608  * after disable_all.
609  *
610  * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
611  * It will not be re-enabled in the NMI handler again, because enabled=0. After
612  * handling the NMI, disable_all will be called, which will not change the
613  * state either. If PMI hits after disable_all, the PMU is already disabled
614  * before entering NMI handler. The NMI handler will not change the state
615  * either.
616  *
617  * So either situation is harmless.
618  */
619 static void x86_pmu_disable(struct pmu *pmu)
620 {
621         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
622
623         if (!x86_pmu_initialized())
624                 return;
625
626         if (!cpuc->enabled)
627                 return;
628
629         cpuc->n_added = 0;
630         cpuc->enabled = 0;
631         barrier();
632
633         x86_pmu.disable_all();
634 }
635
636 void x86_pmu_enable_all(int added)
637 {
638         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
639         int idx;
640
641         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
642                 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
643
644                 if (!test_bit(idx, cpuc->active_mask))
645                         continue;
646
647                 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
648         }
649 }
650
651 static struct pmu pmu;
652
653 static inline int is_x86_event(struct perf_event *event)
654 {
655         return event->pmu == &pmu;
656 }
657
658 /*
659  * Event scheduler state:
660  *
661  * Assign events iterating over all events and counters, beginning
662  * with events with least weights first. Keep the current iterator
663  * state in struct sched_state.
664  */
665 struct sched_state {
666         int     weight;
667         int     event;          /* event index */
668         int     counter;        /* counter index */
669         int     unassigned;     /* number of events to be assigned left */
670         int     nr_gp;          /* number of GP counters used */
671         unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
672 };
673
674 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
675 #define SCHED_STATES_MAX        2
676
677 struct perf_sched {
678         int                     max_weight;
679         int                     max_events;
680         int                     max_gp;
681         int                     saved_states;
682         struct event_constraint **constraints;
683         struct sched_state      state;
684         struct sched_state      saved[SCHED_STATES_MAX];
685 };
686
687 /*
688  * Initialize interator that runs through all events and counters.
689  */
690 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
691                             int num, int wmin, int wmax, int gpmax)
692 {
693         int idx;
694
695         memset(sched, 0, sizeof(*sched));
696         sched->max_events       = num;
697         sched->max_weight       = wmax;
698         sched->max_gp           = gpmax;
699         sched->constraints      = constraints;
700
701         for (idx = 0; idx < num; idx++) {
702                 if (constraints[idx]->weight == wmin)
703                         break;
704         }
705
706         sched->state.event      = idx;          /* start with min weight */
707         sched->state.weight     = wmin;
708         sched->state.unassigned = num;
709 }
710
711 static void perf_sched_save_state(struct perf_sched *sched)
712 {
713         if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
714                 return;
715
716         sched->saved[sched->saved_states] = sched->state;
717         sched->saved_states++;
718 }
719
720 static bool perf_sched_restore_state(struct perf_sched *sched)
721 {
722         if (!sched->saved_states)
723                 return false;
724
725         sched->saved_states--;
726         sched->state = sched->saved[sched->saved_states];
727
728         /* continue with next counter: */
729         clear_bit(sched->state.counter++, sched->state.used);
730
731         return true;
732 }
733
734 /*
735  * Select a counter for the current event to schedule. Return true on
736  * success.
737  */
738 static bool __perf_sched_find_counter(struct perf_sched *sched)
739 {
740         struct event_constraint *c;
741         int idx;
742
743         if (!sched->state.unassigned)
744                 return false;
745
746         if (sched->state.event >= sched->max_events)
747                 return false;
748
749         c = sched->constraints[sched->state.event];
750         /* Prefer fixed purpose counters */
751         if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
752                 idx = INTEL_PMC_IDX_FIXED;
753                 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
754                         if (!__test_and_set_bit(idx, sched->state.used))
755                                 goto done;
756                 }
757         }
758
759         /* Grab the first unused counter starting with idx */
760         idx = sched->state.counter;
761         for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
762                 if (!__test_and_set_bit(idx, sched->state.used)) {
763                         if (sched->state.nr_gp++ >= sched->max_gp)
764                                 return false;
765
766                         goto done;
767                 }
768         }
769
770         return false;
771
772 done:
773         sched->state.counter = idx;
774
775         if (c->overlap)
776                 perf_sched_save_state(sched);
777
778         return true;
779 }
780
781 static bool perf_sched_find_counter(struct perf_sched *sched)
782 {
783         while (!__perf_sched_find_counter(sched)) {
784                 if (!perf_sched_restore_state(sched))
785                         return false;
786         }
787
788         return true;
789 }
790
791 /*
792  * Go through all unassigned events and find the next one to schedule.
793  * Take events with the least weight first. Return true on success.
794  */
795 static bool perf_sched_next_event(struct perf_sched *sched)
796 {
797         struct event_constraint *c;
798
799         if (!sched->state.unassigned || !--sched->state.unassigned)
800                 return false;
801
802         do {
803                 /* next event */
804                 sched->state.event++;
805                 if (sched->state.event >= sched->max_events) {
806                         /* next weight */
807                         sched->state.event = 0;
808                         sched->state.weight++;
809                         if (sched->state.weight > sched->max_weight)
810                                 return false;
811                 }
812                 c = sched->constraints[sched->state.event];
813         } while (c->weight != sched->state.weight);
814
815         sched->state.counter = 0;       /* start with first counter */
816
817         return true;
818 }
819
820 /*
821  * Assign a counter for each event.
822  */
823 int perf_assign_events(struct event_constraint **constraints, int n,
824                         int wmin, int wmax, int gpmax, int *assign)
825 {
826         struct perf_sched sched;
827
828         perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
829
830         do {
831                 if (!perf_sched_find_counter(&sched))
832                         break;  /* failed */
833                 if (assign)
834                         assign[sched.state.event] = sched.state.counter;
835         } while (perf_sched_next_event(&sched));
836
837         return sched.state.unassigned;
838 }
839 EXPORT_SYMBOL_GPL(perf_assign_events);
840
841 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
842 {
843         struct event_constraint *c;
844         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
845         struct perf_event *e;
846         int i, wmin, wmax, unsched = 0;
847         struct hw_perf_event *hwc;
848
849         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
850
851         if (x86_pmu.start_scheduling)
852                 x86_pmu.start_scheduling(cpuc);
853
854         for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
855                 cpuc->event_constraint[i] = NULL;
856                 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
857                 cpuc->event_constraint[i] = c;
858
859                 wmin = min(wmin, c->weight);
860                 wmax = max(wmax, c->weight);
861         }
862
863         /*
864          * fastpath, try to reuse previous register
865          */
866         for (i = 0; i < n; i++) {
867                 hwc = &cpuc->event_list[i]->hw;
868                 c = cpuc->event_constraint[i];
869
870                 /* never assigned */
871                 if (hwc->idx == -1)
872                         break;
873
874                 /* constraint still honored */
875                 if (!test_bit(hwc->idx, c->idxmsk))
876                         break;
877
878                 /* not already used */
879                 if (test_bit(hwc->idx, used_mask))
880                         break;
881
882                 __set_bit(hwc->idx, used_mask);
883                 if (assign)
884                         assign[i] = hwc->idx;
885         }
886
887         /* slow path */
888         if (i != n) {
889                 int gpmax = x86_pmu.num_counters;
890
891                 /*
892                  * Do not allow scheduling of more than half the available
893                  * generic counters.
894                  *
895                  * This helps avoid counter starvation of sibling thread by
896                  * ensuring at most half the counters cannot be in exclusive
897                  * mode. There is no designated counters for the limits. Any
898                  * N/2 counters can be used. This helps with events with
899                  * specific counter constraints.
900                  */
901                 if (is_ht_workaround_enabled() && !cpuc->is_fake &&
902                     READ_ONCE(cpuc->excl_cntrs->exclusive_present))
903                         gpmax /= 2;
904
905                 unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
906                                              wmax, gpmax, assign);
907         }
908
909         /*
910          * In case of success (unsched = 0), mark events as committed,
911          * so we do not put_constraint() in case new events are added
912          * and fail to be scheduled
913          *
914          * We invoke the lower level commit callback to lock the resource
915          *
916          * We do not need to do all of this in case we are called to
917          * validate an event group (assign == NULL)
918          */
919         if (!unsched && assign) {
920                 for (i = 0; i < n; i++) {
921                         e = cpuc->event_list[i];
922                         e->hw.flags |= PERF_X86_EVENT_COMMITTED;
923                         if (x86_pmu.commit_scheduling)
924                                 x86_pmu.commit_scheduling(cpuc, i, assign[i]);
925                 }
926         } else {
927                 for (i = 0; i < n; i++) {
928                         e = cpuc->event_list[i];
929                         /*
930                          * do not put_constraint() on comitted events,
931                          * because they are good to go
932                          */
933                         if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
934                                 continue;
935
936                         /*
937                          * release events that failed scheduling
938                          */
939                         if (x86_pmu.put_event_constraints)
940                                 x86_pmu.put_event_constraints(cpuc, e);
941                 }
942         }
943
944         if (x86_pmu.stop_scheduling)
945                 x86_pmu.stop_scheduling(cpuc);
946
947         return unsched ? -EINVAL : 0;
948 }
949
950 /*
951  * dogrp: true if must collect siblings events (group)
952  * returns total number of events and error code
953  */
954 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
955 {
956         struct perf_event *event;
957         int n, max_count;
958
959         max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
960
961         /* current number of events already accepted */
962         n = cpuc->n_events;
963
964         if (is_x86_event(leader)) {
965                 if (n >= max_count)
966                         return -EINVAL;
967                 cpuc->event_list[n] = leader;
968                 n++;
969         }
970         if (!dogrp)
971                 return n;
972
973         list_for_each_entry(event, &leader->sibling_list, group_entry) {
974                 if (!is_x86_event(event) ||
975                     event->state <= PERF_EVENT_STATE_OFF)
976                         continue;
977
978                 if (n >= max_count)
979                         return -EINVAL;
980
981                 cpuc->event_list[n] = event;
982                 n++;
983         }
984         return n;
985 }
986
987 static inline void x86_assign_hw_event(struct perf_event *event,
988                                 struct cpu_hw_events *cpuc, int i)
989 {
990         struct hw_perf_event *hwc = &event->hw;
991
992         hwc->idx = cpuc->assign[i];
993         hwc->last_cpu = smp_processor_id();
994         hwc->last_tag = ++cpuc->tags[i];
995
996         if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
997                 hwc->config_base = 0;
998                 hwc->event_base = 0;
999         } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
1000                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1001                 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
1002                 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
1003         } else {
1004                 hwc->config_base = x86_pmu_config_addr(hwc->idx);
1005                 hwc->event_base  = x86_pmu_event_addr(hwc->idx);
1006                 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
1007         }
1008 }
1009
1010 static inline int match_prev_assignment(struct hw_perf_event *hwc,
1011                                         struct cpu_hw_events *cpuc,
1012                                         int i)
1013 {
1014         return hwc->idx == cpuc->assign[i] &&
1015                 hwc->last_cpu == smp_processor_id() &&
1016                 hwc->last_tag == cpuc->tags[i];
1017 }
1018
1019 static void x86_pmu_start(struct perf_event *event, int flags);
1020
1021 static void x86_pmu_enable(struct pmu *pmu)
1022 {
1023         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1024         struct perf_event *event;
1025         struct hw_perf_event *hwc;
1026         int i, added = cpuc->n_added;
1027
1028         if (!x86_pmu_initialized())
1029                 return;
1030
1031         if (cpuc->enabled)
1032                 return;
1033
1034         if (cpuc->n_added) {
1035                 int n_running = cpuc->n_events - cpuc->n_added;
1036                 /*
1037                  * apply assignment obtained either from
1038                  * hw_perf_group_sched_in() or x86_pmu_enable()
1039                  *
1040                  * step1: save events moving to new counters
1041                  */
1042                 for (i = 0; i < n_running; i++) {
1043                         event = cpuc->event_list[i];
1044                         hwc = &event->hw;
1045
1046                         /*
1047                          * we can avoid reprogramming counter if:
1048                          * - assigned same counter as last time
1049                          * - running on same CPU as last time
1050                          * - no other event has used the counter since
1051                          */
1052                         if (hwc->idx == -1 ||
1053                             match_prev_assignment(hwc, cpuc, i))
1054                                 continue;
1055
1056                         /*
1057                          * Ensure we don't accidentally enable a stopped
1058                          * counter simply because we rescheduled.
1059                          */
1060                         if (hwc->state & PERF_HES_STOPPED)
1061                                 hwc->state |= PERF_HES_ARCH;
1062
1063                         x86_pmu_stop(event, PERF_EF_UPDATE);
1064                 }
1065
1066                 /*
1067                  * step2: reprogram moved events into new counters
1068                  */
1069                 for (i = 0; i < cpuc->n_events; i++) {
1070                         event = cpuc->event_list[i];
1071                         hwc = &event->hw;
1072
1073                         if (!match_prev_assignment(hwc, cpuc, i))
1074                                 x86_assign_hw_event(event, cpuc, i);
1075                         else if (i < n_running)
1076                                 continue;
1077
1078                         if (hwc->state & PERF_HES_ARCH)
1079                                 continue;
1080
1081                         x86_pmu_start(event, PERF_EF_RELOAD);
1082                 }
1083                 cpuc->n_added = 0;
1084                 perf_events_lapic_init();
1085         }
1086
1087         cpuc->enabled = 1;
1088         barrier();
1089
1090         x86_pmu.enable_all(added);
1091 }
1092
1093 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1094
1095 /*
1096  * Set the next IRQ period, based on the hwc->period_left value.
1097  * To be called with the event disabled in hw:
1098  */
1099 int x86_perf_event_set_period(struct perf_event *event)
1100 {
1101         struct hw_perf_event *hwc = &event->hw;
1102         s64 left = local64_read(&hwc->period_left);
1103         s64 period = hwc->sample_period;
1104         int ret = 0, idx = hwc->idx;
1105
1106         if (idx == INTEL_PMC_IDX_FIXED_BTS)
1107                 return 0;
1108
1109         /*
1110          * If we are way outside a reasonable range then just skip forward:
1111          */
1112         if (unlikely(left <= -period)) {
1113                 left = period;
1114                 local64_set(&hwc->period_left, left);
1115                 hwc->last_period = period;
1116                 ret = 1;
1117         }
1118
1119         if (unlikely(left <= 0)) {
1120                 left += period;
1121                 local64_set(&hwc->period_left, left);
1122                 hwc->last_period = period;
1123                 ret = 1;
1124         }
1125         /*
1126          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1127          */
1128         if (unlikely(left < 2))
1129                 left = 2;
1130
1131         if (left > x86_pmu.max_period)
1132                 left = x86_pmu.max_period;
1133
1134         if (x86_pmu.limit_period)
1135                 left = x86_pmu.limit_period(event, left);
1136
1137         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1138
1139         if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
1140             local64_read(&hwc->prev_count) != (u64)-left) {
1141                 /*
1142                  * The hw event starts counting from this event offset,
1143                  * mark it to be able to extra future deltas:
1144                  */
1145                 local64_set(&hwc->prev_count, (u64)-left);
1146
1147                 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1148         }
1149
1150         /*
1151          * Due to erratum on certan cpu we need
1152          * a second write to be sure the register
1153          * is updated properly
1154          */
1155         if (x86_pmu.perfctr_second_write) {
1156                 wrmsrl(hwc->event_base,
1157                         (u64)(-left) & x86_pmu.cntval_mask);
1158         }
1159
1160         perf_event_update_userpage(event);
1161
1162         return ret;
1163 }
1164
1165 void x86_pmu_enable_event(struct perf_event *event)
1166 {
1167         if (__this_cpu_read(cpu_hw_events.enabled))
1168                 __x86_pmu_enable_event(&event->hw,
1169                                        ARCH_PERFMON_EVENTSEL_ENABLE);
1170 }
1171
1172 /*
1173  * Add a single event to the PMU.
1174  *
1175  * The event is added to the group of enabled events
1176  * but only if it can be scehduled with existing events.
1177  */
1178 static int x86_pmu_add(struct perf_event *event, int flags)
1179 {
1180         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1181         struct hw_perf_event *hwc;
1182         int assign[X86_PMC_IDX_MAX];
1183         int n, n0, ret;
1184
1185         hwc = &event->hw;
1186
1187         n0 = cpuc->n_events;
1188         ret = n = collect_events(cpuc, event, false);
1189         if (ret < 0)
1190                 goto out;
1191
1192         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1193         if (!(flags & PERF_EF_START))
1194                 hwc->state |= PERF_HES_ARCH;
1195
1196         /*
1197          * If group events scheduling transaction was started,
1198          * skip the schedulability test here, it will be performed
1199          * at commit time (->commit_txn) as a whole.
1200          */
1201         if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1202                 goto done_collect;
1203
1204         ret = x86_pmu.schedule_events(cpuc, n, assign);
1205         if (ret)
1206                 goto out;
1207         /*
1208          * copy new assignment, now we know it is possible
1209          * will be used by hw_perf_enable()
1210          */
1211         memcpy(cpuc->assign, assign, n*sizeof(int));
1212
1213 done_collect:
1214         /*
1215          * Commit the collect_events() state. See x86_pmu_del() and
1216          * x86_pmu_*_txn().
1217          */
1218         cpuc->n_events = n;
1219         cpuc->n_added += n - n0;
1220         cpuc->n_txn += n - n0;
1221
1222         ret = 0;
1223 out:
1224         return ret;
1225 }
1226
1227 static void x86_pmu_start(struct perf_event *event, int flags)
1228 {
1229         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1230         int idx = event->hw.idx;
1231
1232         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1233                 return;
1234
1235         if (WARN_ON_ONCE(idx == -1))
1236                 return;
1237
1238         if (flags & PERF_EF_RELOAD) {
1239                 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1240                 x86_perf_event_set_period(event);
1241         }
1242
1243         event->hw.state = 0;
1244
1245         cpuc->events[idx] = event;
1246         __set_bit(idx, cpuc->active_mask);
1247         __set_bit(idx, cpuc->running);
1248         x86_pmu.enable(event);
1249         perf_event_update_userpage(event);
1250 }
1251
1252 void perf_event_print_debug(void)
1253 {
1254         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1255         u64 pebs, debugctl;
1256         struct cpu_hw_events *cpuc;
1257         unsigned long flags;
1258         int cpu, idx;
1259
1260         if (!x86_pmu.num_counters)
1261                 return;
1262
1263         local_irq_save(flags);
1264
1265         cpu = smp_processor_id();
1266         cpuc = &per_cpu(cpu_hw_events, cpu);
1267
1268         if (x86_pmu.version >= 2) {
1269                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1270                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1271                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1272                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1273
1274                 pr_info("\n");
1275                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1276                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1277                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1278                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1279                 if (x86_pmu.pebs_constraints) {
1280                         rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1281                         pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
1282                 }
1283                 if (x86_pmu.lbr_nr) {
1284                         rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1285                         pr_info("CPU#%d: debugctl:   %016llx\n", cpu, debugctl);
1286                 }
1287         }
1288         pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1289
1290         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1291                 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1292                 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1293
1294                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1295
1296                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1297                         cpu, idx, pmc_ctrl);
1298                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1299                         cpu, idx, pmc_count);
1300                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1301                         cpu, idx, prev_left);
1302         }
1303         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1304                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1305
1306                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1307                         cpu, idx, pmc_count);
1308         }
1309         local_irq_restore(flags);
1310 }
1311
1312 void x86_pmu_stop(struct perf_event *event, int flags)
1313 {
1314         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1315         struct hw_perf_event *hwc = &event->hw;
1316
1317         if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1318                 x86_pmu.disable(event);
1319                 cpuc->events[hwc->idx] = NULL;
1320                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1321                 hwc->state |= PERF_HES_STOPPED;
1322         }
1323
1324         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1325                 /*
1326                  * Drain the remaining delta count out of a event
1327                  * that we are disabling:
1328                  */
1329                 x86_perf_event_update(event);
1330                 hwc->state |= PERF_HES_UPTODATE;
1331         }
1332 }
1333
1334 static void x86_pmu_del(struct perf_event *event, int flags)
1335 {
1336         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1337         int i;
1338
1339         /*
1340          * event is descheduled
1341          */
1342         event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
1343
1344         /*
1345          * If we're called during a txn, we don't need to do anything.
1346          * The events never got scheduled and ->cancel_txn will truncate
1347          * the event_list.
1348          *
1349          * XXX assumes any ->del() called during a TXN will only be on
1350          * an event added during that same TXN.
1351          */
1352         if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1353                 return;
1354
1355         /*
1356          * Not a TXN, therefore cleanup properly.
1357          */
1358         x86_pmu_stop(event, PERF_EF_UPDATE);
1359
1360         for (i = 0; i < cpuc->n_events; i++) {
1361                 if (event == cpuc->event_list[i])
1362                         break;
1363         }
1364
1365         if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
1366                 return;
1367
1368         /* If we have a newly added event; make sure to decrease n_added. */
1369         if (i >= cpuc->n_events - cpuc->n_added)
1370                 --cpuc->n_added;
1371
1372         if (x86_pmu.put_event_constraints)
1373                 x86_pmu.put_event_constraints(cpuc, event);
1374
1375         /* Delete the array entry. */
1376         while (++i < cpuc->n_events) {
1377                 cpuc->event_list[i-1] = cpuc->event_list[i];
1378                 cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
1379         }
1380         --cpuc->n_events;
1381
1382         perf_event_update_userpage(event);
1383 }
1384
1385 int x86_pmu_handle_irq(struct pt_regs *regs)
1386 {
1387         struct perf_sample_data data;
1388         struct cpu_hw_events *cpuc;
1389         struct perf_event *event;
1390         int idx, handled = 0;
1391         u64 val;
1392
1393         cpuc = this_cpu_ptr(&cpu_hw_events);
1394
1395         /*
1396          * Some chipsets need to unmask the LVTPC in a particular spot
1397          * inside the nmi handler.  As a result, the unmasking was pushed
1398          * into all the nmi handlers.
1399          *
1400          * This generic handler doesn't seem to have any issues where the
1401          * unmasking occurs so it was left at the top.
1402          */
1403         apic_write(APIC_LVTPC, APIC_DM_NMI);
1404
1405         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1406                 if (!test_bit(idx, cpuc->active_mask)) {
1407                         /*
1408                          * Though we deactivated the counter some cpus
1409                          * might still deliver spurious interrupts still
1410                          * in flight. Catch them:
1411                          */
1412                         if (__test_and_clear_bit(idx, cpuc->running))
1413                                 handled++;
1414                         continue;
1415                 }
1416
1417                 event = cpuc->events[idx];
1418
1419                 val = x86_perf_event_update(event);
1420                 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1421                         continue;
1422
1423                 /*
1424                  * event overflow
1425                  */
1426                 handled++;
1427                 perf_sample_data_init(&data, 0, event->hw.last_period);
1428
1429                 if (!x86_perf_event_set_period(event))
1430                         continue;
1431
1432                 if (perf_event_overflow(event, &data, regs))
1433                         x86_pmu_stop(event, 0);
1434         }
1435
1436         if (handled)
1437                 inc_irq_stat(apic_perf_irqs);
1438
1439         return handled;
1440 }
1441
1442 void perf_events_lapic_init(void)
1443 {
1444         if (!x86_pmu.apic || !x86_pmu_initialized())
1445                 return;
1446
1447         /*
1448          * Always use NMI for PMU
1449          */
1450         apic_write(APIC_LVTPC, APIC_DM_NMI);
1451 }
1452
1453 static int
1454 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1455 {
1456         u64 start_clock;
1457         u64 finish_clock;
1458         int ret;
1459
1460         /*
1461          * All PMUs/events that share this PMI handler should make sure to
1462          * increment active_events for their events.
1463          */
1464         if (!atomic_read(&active_events))
1465                 return NMI_DONE;
1466
1467         start_clock = sched_clock();
1468         ret = x86_pmu.handle_irq(regs);
1469         finish_clock = sched_clock();
1470
1471         perf_sample_event_took(finish_clock - start_clock);
1472
1473         return ret;
1474 }
1475 NOKPROBE_SYMBOL(perf_event_nmi_handler);
1476
1477 struct event_constraint emptyconstraint;
1478 struct event_constraint unconstrained;
1479
1480 static int
1481 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1482 {
1483         unsigned int cpu = (long)hcpu;
1484         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1485         int i, ret = NOTIFY_OK;
1486
1487         switch (action & ~CPU_TASKS_FROZEN) {
1488         case CPU_UP_PREPARE:
1489                 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
1490                         cpuc->kfree_on_online[i] = NULL;
1491                 if (x86_pmu.cpu_prepare)
1492                         ret = x86_pmu.cpu_prepare(cpu);
1493                 break;
1494
1495         case CPU_STARTING:
1496                 if (x86_pmu.cpu_starting)
1497                         x86_pmu.cpu_starting(cpu);
1498                 break;
1499
1500         case CPU_ONLINE:
1501                 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
1502                         kfree(cpuc->kfree_on_online[i]);
1503                         cpuc->kfree_on_online[i] = NULL;
1504                 }
1505                 break;
1506
1507         case CPU_DYING:
1508                 if (x86_pmu.cpu_dying)
1509                         x86_pmu.cpu_dying(cpu);
1510                 break;
1511
1512         case CPU_UP_CANCELED:
1513         case CPU_DEAD:
1514                 if (x86_pmu.cpu_dead)
1515                         x86_pmu.cpu_dead(cpu);
1516                 break;
1517
1518         default:
1519                 break;
1520         }
1521
1522         return ret;
1523 }
1524
1525 static void __init pmu_check_apic(void)
1526 {
1527         if (boot_cpu_has(X86_FEATURE_APIC))
1528                 return;
1529
1530         x86_pmu.apic = 0;
1531         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1532         pr_info("no hardware sampling interrupt available.\n");
1533
1534         /*
1535          * If we have a PMU initialized but no APIC
1536          * interrupts, we cannot sample hardware
1537          * events (user-space has to fall back and
1538          * sample via a hrtimer based software event):
1539          */
1540         pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1541
1542 }
1543
1544 static struct attribute_group x86_pmu_format_group = {
1545         .name = "format",
1546         .attrs = NULL,
1547 };
1548
1549 /*
1550  * Remove all undefined events (x86_pmu.event_map(id) == 0)
1551  * out of events_attr attributes.
1552  */
1553 static void __init filter_events(struct attribute **attrs)
1554 {
1555         struct device_attribute *d;
1556         struct perf_pmu_events_attr *pmu_attr;
1557         int offset = 0;
1558         int i, j;
1559
1560         for (i = 0; attrs[i]; i++) {
1561                 d = (struct device_attribute *)attrs[i];
1562                 pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
1563                 /* str trumps id */
1564                 if (pmu_attr->event_str)
1565                         continue;
1566                 if (x86_pmu.event_map(i + offset))
1567                         continue;
1568
1569                 for (j = i; attrs[j]; j++)
1570                         attrs[j] = attrs[j + 1];
1571
1572                 /* Check the shifted attr. */
1573                 i--;
1574
1575                 /*
1576                  * event_map() is index based, the attrs array is organized
1577                  * by increasing event index. If we shift the events, then
1578                  * we need to compensate for the event_map(), otherwise
1579                  * we are looking up the wrong event in the map
1580                  */
1581                 offset++;
1582         }
1583 }
1584
1585 /* Merge two pointer arrays */
1586 __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
1587 {
1588         struct attribute **new;
1589         int j, i;
1590
1591         for (j = 0; a[j]; j++)
1592                 ;
1593         for (i = 0; b[i]; i++)
1594                 j++;
1595         j++;
1596
1597         new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
1598         if (!new)
1599                 return NULL;
1600
1601         j = 0;
1602         for (i = 0; a[i]; i++)
1603                 new[j++] = a[i];
1604         for (i = 0; b[i]; i++)
1605                 new[j++] = b[i];
1606         new[j] = NULL;
1607
1608         return new;
1609 }
1610
1611 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
1612 {
1613         struct perf_pmu_events_attr *pmu_attr = \
1614                 container_of(attr, struct perf_pmu_events_attr, attr);
1615         u64 config = x86_pmu.event_map(pmu_attr->id);
1616
1617         /* string trumps id */
1618         if (pmu_attr->event_str)
1619                 return sprintf(page, "%s", pmu_attr->event_str);
1620
1621         return x86_pmu.events_sysfs_show(page, config);
1622 }
1623 EXPORT_SYMBOL_GPL(events_sysfs_show);
1624
1625 EVENT_ATTR(cpu-cycles,                  CPU_CYCLES              );
1626 EVENT_ATTR(instructions,                INSTRUCTIONS            );
1627 EVENT_ATTR(cache-references,            CACHE_REFERENCES        );
1628 EVENT_ATTR(cache-misses,                CACHE_MISSES            );
1629 EVENT_ATTR(branch-instructions,         BRANCH_INSTRUCTIONS     );
1630 EVENT_ATTR(branch-misses,               BRANCH_MISSES           );
1631 EVENT_ATTR(bus-cycles,                  BUS_CYCLES              );
1632 EVENT_ATTR(stalled-cycles-frontend,     STALLED_CYCLES_FRONTEND );
1633 EVENT_ATTR(stalled-cycles-backend,      STALLED_CYCLES_BACKEND  );
1634 EVENT_ATTR(ref-cycles,                  REF_CPU_CYCLES          );
1635
1636 static struct attribute *empty_attrs;
1637
1638 static struct attribute *events_attr[] = {
1639         EVENT_PTR(CPU_CYCLES),
1640         EVENT_PTR(INSTRUCTIONS),
1641         EVENT_PTR(CACHE_REFERENCES),
1642         EVENT_PTR(CACHE_MISSES),
1643         EVENT_PTR(BRANCH_INSTRUCTIONS),
1644         EVENT_PTR(BRANCH_MISSES),
1645         EVENT_PTR(BUS_CYCLES),
1646         EVENT_PTR(STALLED_CYCLES_FRONTEND),
1647         EVENT_PTR(STALLED_CYCLES_BACKEND),
1648         EVENT_PTR(REF_CPU_CYCLES),
1649         NULL,
1650 };
1651
1652 static struct attribute_group x86_pmu_events_group = {
1653         .name = "events",
1654         .attrs = events_attr,
1655 };
1656
1657 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
1658 {
1659         u64 umask  = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1660         u64 cmask  = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1661         bool edge  = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1662         bool pc    = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1663         bool any   = (config & ARCH_PERFMON_EVENTSEL_ANY);
1664         bool inv   = (config & ARCH_PERFMON_EVENTSEL_INV);
1665         ssize_t ret;
1666
1667         /*
1668         * We have whole page size to spend and just little data
1669         * to write, so we can safely use sprintf.
1670         */
1671         ret = sprintf(page, "event=0x%02llx", event);
1672
1673         if (umask)
1674                 ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1675
1676         if (edge)
1677                 ret += sprintf(page + ret, ",edge");
1678
1679         if (pc)
1680                 ret += sprintf(page + ret, ",pc");
1681
1682         if (any)
1683                 ret += sprintf(page + ret, ",any");
1684
1685         if (inv)
1686                 ret += sprintf(page + ret, ",inv");
1687
1688         if (cmask)
1689                 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
1690
1691         ret += sprintf(page + ret, "\n");
1692
1693         return ret;
1694 }
1695
1696 static int __init init_hw_perf_events(void)
1697 {
1698         struct x86_pmu_quirk *quirk;
1699         int err;
1700
1701         pr_info("Performance Events: ");
1702
1703         switch (boot_cpu_data.x86_vendor) {
1704         case X86_VENDOR_INTEL:
1705                 err = intel_pmu_init();
1706                 break;
1707         case X86_VENDOR_AMD:
1708                 err = amd_pmu_init();
1709                 break;
1710         default:
1711                 err = -ENOTSUPP;
1712         }
1713         if (err != 0) {
1714                 pr_cont("no PMU driver, software events only.\n");
1715                 return 0;
1716         }
1717
1718         pmu_check_apic();
1719
1720         /* sanity check that the hardware exists or is emulated */
1721         if (!check_hw_exists())
1722                 return 0;
1723
1724         pr_cont("%s PMU driver.\n", x86_pmu.name);
1725
1726         x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1727
1728         for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1729                 quirk->func();
1730
1731         if (!x86_pmu.intel_ctrl)
1732                 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1733
1734         perf_events_lapic_init();
1735         register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1736
1737         unconstrained = (struct event_constraint)
1738                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1739                                    0, x86_pmu.num_counters, 0, 0);
1740
1741         x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1742
1743         if (x86_pmu.event_attrs)
1744                 x86_pmu_events_group.attrs = x86_pmu.event_attrs;
1745
1746         if (!x86_pmu.events_sysfs_show)
1747                 x86_pmu_events_group.attrs = &empty_attrs;
1748         else
1749                 filter_events(x86_pmu_events_group.attrs);
1750
1751         if (x86_pmu.cpu_events) {
1752                 struct attribute **tmp;
1753
1754                 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
1755                 if (!WARN_ON(!tmp))
1756                         x86_pmu_events_group.attrs = tmp;
1757         }
1758
1759         pr_info("... version:                %d\n",     x86_pmu.version);
1760         pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
1761         pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
1762         pr_info("... value mask:             %016Lx\n", x86_pmu.cntval_mask);
1763         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
1764         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
1765         pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
1766
1767         perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1768         perf_cpu_notifier(x86_pmu_notifier);
1769
1770         return 0;
1771 }
1772 early_initcall(init_hw_perf_events);
1773
1774 static inline void x86_pmu_read(struct perf_event *event)
1775 {
1776         x86_perf_event_update(event);
1777 }
1778
1779 /*
1780  * Start group events scheduling transaction
1781  * Set the flag to make pmu::enable() not perform the
1782  * schedulability test, it will be performed at commit time
1783  *
1784  * We only support PERF_PMU_TXN_ADD transactions. Save the
1785  * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1786  * transactions.
1787  */
1788 static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
1789 {
1790         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1791
1792         WARN_ON_ONCE(cpuc->txn_flags);          /* txn already in flight */
1793
1794         cpuc->txn_flags = txn_flags;
1795         if (txn_flags & ~PERF_PMU_TXN_ADD)
1796                 return;
1797
1798         perf_pmu_disable(pmu);
1799         __this_cpu_write(cpu_hw_events.n_txn, 0);
1800 }
1801
1802 /*
1803  * Stop group events scheduling transaction
1804  * Clear the flag and pmu::enable() will perform the
1805  * schedulability test.
1806  */
1807 static void x86_pmu_cancel_txn(struct pmu *pmu)
1808 {
1809         unsigned int txn_flags;
1810         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1811
1812         WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1813
1814         txn_flags = cpuc->txn_flags;
1815         cpuc->txn_flags = 0;
1816         if (txn_flags & ~PERF_PMU_TXN_ADD)
1817                 return;
1818
1819         /*
1820          * Truncate collected array by the number of events added in this
1821          * transaction. See x86_pmu_add() and x86_pmu_*_txn().
1822          */
1823         __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1824         __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1825         perf_pmu_enable(pmu);
1826 }
1827
1828 /*
1829  * Commit group events scheduling transaction
1830  * Perform the group schedulability test as a whole
1831  * Return 0 if success
1832  *
1833  * Does not cancel the transaction on failure; expects the caller to do this.
1834  */
1835 static int x86_pmu_commit_txn(struct pmu *pmu)
1836 {
1837         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1838         int assign[X86_PMC_IDX_MAX];
1839         int n, ret;
1840
1841         WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1842
1843         if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
1844                 cpuc->txn_flags = 0;
1845                 return 0;
1846         }
1847
1848         n = cpuc->n_events;
1849
1850         if (!x86_pmu_initialized())
1851                 return -EAGAIN;
1852
1853         ret = x86_pmu.schedule_events(cpuc, n, assign);
1854         if (ret)
1855                 return ret;
1856
1857         /*
1858          * copy new assignment, now we know it is possible
1859          * will be used by hw_perf_enable()
1860          */
1861         memcpy(cpuc->assign, assign, n*sizeof(int));
1862
1863         cpuc->txn_flags = 0;
1864         perf_pmu_enable(pmu);
1865         return 0;
1866 }
1867 /*
1868  * a fake_cpuc is used to validate event groups. Due to
1869  * the extra reg logic, we need to also allocate a fake
1870  * per_core and per_cpu structure. Otherwise, group events
1871  * using extra reg may conflict without the kernel being
1872  * able to catch this when the last event gets added to
1873  * the group.
1874  */
1875 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1876 {
1877         kfree(cpuc->shared_regs);
1878         kfree(cpuc);
1879 }
1880
1881 static struct cpu_hw_events *allocate_fake_cpuc(void)
1882 {
1883         struct cpu_hw_events *cpuc;
1884         int cpu = raw_smp_processor_id();
1885
1886         cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1887         if (!cpuc)
1888                 return ERR_PTR(-ENOMEM);
1889
1890         /* only needed, if we have extra_regs */
1891         if (x86_pmu.extra_regs) {
1892                 cpuc->shared_regs = allocate_shared_regs(cpu);
1893                 if (!cpuc->shared_regs)
1894                         goto error;
1895         }
1896         cpuc->is_fake = 1;
1897         return cpuc;
1898 error:
1899         free_fake_cpuc(cpuc);
1900         return ERR_PTR(-ENOMEM);
1901 }
1902
1903 /*
1904  * validate that we can schedule this event
1905  */
1906 static int validate_event(struct perf_event *event)
1907 {
1908         struct cpu_hw_events *fake_cpuc;
1909         struct event_constraint *c;
1910         int ret = 0;
1911
1912         fake_cpuc = allocate_fake_cpuc();
1913         if (IS_ERR(fake_cpuc))
1914                 return PTR_ERR(fake_cpuc);
1915
1916         c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
1917
1918         if (!c || !c->weight)
1919                 ret = -EINVAL;
1920
1921         if (x86_pmu.put_event_constraints)
1922                 x86_pmu.put_event_constraints(fake_cpuc, event);
1923
1924         free_fake_cpuc(fake_cpuc);
1925
1926         return ret;
1927 }
1928
1929 /*
1930  * validate a single event group
1931  *
1932  * validation include:
1933  *      - check events are compatible which each other
1934  *      - events do not compete for the same counter
1935  *      - number of events <= number of counters
1936  *
1937  * validation ensures the group can be loaded onto the
1938  * PMU if it was the only group available.
1939  */
1940 static int validate_group(struct perf_event *event)
1941 {
1942         struct perf_event *leader = event->group_leader;
1943         struct cpu_hw_events *fake_cpuc;
1944         int ret = -EINVAL, n;
1945
1946         fake_cpuc = allocate_fake_cpuc();
1947         if (IS_ERR(fake_cpuc))
1948                 return PTR_ERR(fake_cpuc);
1949         /*
1950          * the event is not yet connected with its
1951          * siblings therefore we must first collect
1952          * existing siblings, then add the new event
1953          * before we can simulate the scheduling
1954          */
1955         n = collect_events(fake_cpuc, leader, true);
1956         if (n < 0)
1957                 goto out;
1958
1959         fake_cpuc->n_events = n;
1960         n = collect_events(fake_cpuc, event, false);
1961         if (n < 0)
1962                 goto out;
1963
1964         fake_cpuc->n_events = n;
1965
1966         ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1967
1968 out:
1969         free_fake_cpuc(fake_cpuc);
1970         return ret;
1971 }
1972
1973 static int x86_pmu_event_init(struct perf_event *event)
1974 {
1975         struct pmu *tmp;
1976         int err;
1977
1978         switch (event->attr.type) {
1979         case PERF_TYPE_RAW:
1980         case PERF_TYPE_HARDWARE:
1981         case PERF_TYPE_HW_CACHE:
1982                 break;
1983
1984         default:
1985                 return -ENOENT;
1986         }
1987
1988         err = __x86_pmu_event_init(event);
1989         if (!err) {
1990                 /*
1991                  * we temporarily connect event to its pmu
1992                  * such that validate_group() can classify
1993                  * it as an x86 event using is_x86_event()
1994                  */
1995                 tmp = event->pmu;
1996                 event->pmu = &pmu;
1997
1998                 if (event->group_leader != event)
1999                         err = validate_group(event);
2000                 else
2001                         err = validate_event(event);
2002
2003                 event->pmu = tmp;
2004         }
2005         if (err) {
2006                 if (event->destroy)
2007                         event->destroy(event);
2008         }
2009
2010         if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
2011                 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
2012
2013         return err;
2014 }
2015
2016 static void refresh_pce(void *ignored)
2017 {
2018         if (current->mm)
2019                 load_mm_cr4(current->mm);
2020 }
2021
2022 static void x86_pmu_event_mapped(struct perf_event *event)
2023 {
2024         if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2025                 return;
2026
2027         if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
2028                 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2029 }
2030
2031 static void x86_pmu_event_unmapped(struct perf_event *event)
2032 {
2033         if (!current->mm)
2034                 return;
2035
2036         if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2037                 return;
2038
2039         if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
2040                 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2041 }
2042
2043 static int x86_pmu_event_idx(struct perf_event *event)
2044 {
2045         int idx = event->hw.idx;
2046
2047         if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2048                 return 0;
2049
2050         if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
2051                 idx -= INTEL_PMC_IDX_FIXED;
2052                 idx |= 1 << 30;
2053         }
2054
2055         return idx + 1;
2056 }
2057
2058 static ssize_t get_attr_rdpmc(struct device *cdev,
2059                               struct device_attribute *attr,
2060                               char *buf)
2061 {
2062         return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
2063 }
2064
2065 static ssize_t set_attr_rdpmc(struct device *cdev,
2066                               struct device_attribute *attr,
2067                               const char *buf, size_t count)
2068 {
2069         unsigned long val;
2070         ssize_t ret;
2071
2072         ret = kstrtoul(buf, 0, &val);
2073         if (ret)
2074                 return ret;
2075
2076         if (val > 2)
2077                 return -EINVAL;
2078
2079         if (x86_pmu.attr_rdpmc_broken)
2080                 return -ENOTSUPP;
2081
2082         if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
2083                 /*
2084                  * Changing into or out of always available, aka
2085                  * perf-event-bypassing mode.  This path is extremely slow,
2086                  * but only root can trigger it, so it's okay.
2087                  */
2088                 if (val == 2)
2089                         static_key_slow_inc(&rdpmc_always_available);
2090                 else
2091                         static_key_slow_dec(&rdpmc_always_available);
2092                 on_each_cpu(refresh_pce, NULL, 1);
2093         }
2094
2095         x86_pmu.attr_rdpmc = val;
2096
2097         return count;
2098 }
2099
2100 static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
2101
2102 static struct attribute *x86_pmu_attrs[] = {
2103         &dev_attr_rdpmc.attr,
2104         NULL,
2105 };
2106
2107 static struct attribute_group x86_pmu_attr_group = {
2108         .attrs = x86_pmu_attrs,
2109 };
2110
2111 static const struct attribute_group *x86_pmu_attr_groups[] = {
2112         &x86_pmu_attr_group,
2113         &x86_pmu_format_group,
2114         &x86_pmu_events_group,
2115         NULL,
2116 };
2117
2118 static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
2119 {
2120         if (x86_pmu.sched_task)
2121                 x86_pmu.sched_task(ctx, sched_in);
2122 }
2123
2124 void perf_check_microcode(void)
2125 {
2126         if (x86_pmu.check_microcode)
2127                 x86_pmu.check_microcode();
2128 }
2129 EXPORT_SYMBOL_GPL(perf_check_microcode);
2130
2131 static struct pmu pmu = {
2132         .pmu_enable             = x86_pmu_enable,
2133         .pmu_disable            = x86_pmu_disable,
2134
2135         .attr_groups            = x86_pmu_attr_groups,
2136
2137         .event_init             = x86_pmu_event_init,
2138
2139         .event_mapped           = x86_pmu_event_mapped,
2140         .event_unmapped         = x86_pmu_event_unmapped,
2141
2142         .add                    = x86_pmu_add,
2143         .del                    = x86_pmu_del,
2144         .start                  = x86_pmu_start,
2145         .stop                   = x86_pmu_stop,
2146         .read                   = x86_pmu_read,
2147
2148         .start_txn              = x86_pmu_start_txn,
2149         .cancel_txn             = x86_pmu_cancel_txn,
2150         .commit_txn             = x86_pmu_commit_txn,
2151
2152         .event_idx              = x86_pmu_event_idx,
2153         .sched_task             = x86_pmu_sched_task,
2154         .task_ctx_size          = sizeof(struct x86_perf_task_context),
2155 };
2156
2157 void arch_perf_update_userpage(struct perf_event *event,
2158                                struct perf_event_mmap_page *userpg, u64 now)
2159 {
2160         struct cyc2ns_data *data;
2161
2162         userpg->cap_user_time = 0;
2163         userpg->cap_user_time_zero = 0;
2164         userpg->cap_user_rdpmc =
2165                 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
2166         userpg->pmc_width = x86_pmu.cntval_bits;
2167
2168         if (!sched_clock_stable())
2169                 return;
2170
2171         data = cyc2ns_read_begin();
2172
2173         /*
2174          * Internal timekeeping for enabled/running/stopped times
2175          * is always in the local_clock domain.
2176          */
2177         userpg->cap_user_time = 1;
2178         userpg->time_mult = data->cyc2ns_mul;
2179         userpg->time_shift = data->cyc2ns_shift;
2180         userpg->time_offset = data->cyc2ns_offset - now;
2181
2182         /*
2183          * cap_user_time_zero doesn't make sense when we're using a different
2184          * time base for the records.
2185          */
2186         if (!event->attr.use_clockid) {
2187                 userpg->cap_user_time_zero = 1;
2188                 userpg->time_zero = data->cyc2ns_offset;
2189         }
2190
2191         cyc2ns_read_end(data);
2192 }
2193
2194 /*
2195  * callchain support
2196  */
2197
2198 static int backtrace_stack(void *data, char *name)
2199 {
2200         return 0;
2201 }
2202
2203 static int backtrace_address(void *data, unsigned long addr, int reliable)
2204 {
2205         struct perf_callchain_entry_ctx *entry = data;
2206
2207         return perf_callchain_store(entry, addr);
2208 }
2209
2210 static const struct stacktrace_ops backtrace_ops = {
2211         .stack                  = backtrace_stack,
2212         .address                = backtrace_address,
2213         .walk_stack             = print_context_stack_bp,
2214 };
2215
2216 void
2217 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2218 {
2219         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2220                 /* TODO: We don't support guest os callchain now */
2221                 return;
2222         }
2223
2224         perf_callchain_store(entry, regs->ip);
2225
2226         dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
2227 }
2228
2229 static inline int
2230 valid_user_frame(const void __user *fp, unsigned long size)
2231 {
2232         return (__range_not_ok(fp, size, TASK_SIZE) == 0);
2233 }
2234
2235 static unsigned long get_segment_base(unsigned int segment)
2236 {
2237         struct desc_struct *desc;
2238         int idx = segment >> 3;
2239
2240         if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2241 #ifdef CONFIG_MODIFY_LDT_SYSCALL
2242                 struct ldt_struct *ldt;
2243
2244                 if (idx > LDT_ENTRIES)
2245                         return 0;
2246
2247                 /* IRQs are off, so this synchronizes with smp_store_release */
2248                 ldt = lockless_dereference(current->active_mm->context.ldt);
2249                 if (!ldt || idx > ldt->size)
2250                         return 0;
2251
2252                 desc = &ldt->entries[idx];
2253 #else
2254                 return 0;
2255 #endif
2256         } else {
2257                 if (idx > GDT_ENTRIES)
2258                         return 0;
2259
2260                 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
2261         }
2262
2263         return get_desc_base(desc);
2264 }
2265
2266 #ifdef CONFIG_IA32_EMULATION
2267
2268 #include <asm/compat.h>
2269
2270 static inline int
2271 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2272 {
2273         /* 32-bit process in 64-bit kernel. */
2274         unsigned long ss_base, cs_base;
2275         struct stack_frame_ia32 frame;
2276         const void __user *fp;
2277
2278         if (!test_thread_flag(TIF_IA32))
2279                 return 0;
2280
2281         cs_base = get_segment_base(regs->cs);
2282         ss_base = get_segment_base(regs->ss);
2283
2284         fp = compat_ptr(ss_base + regs->bp);
2285         pagefault_disable();
2286         while (entry->nr < entry->max_stack) {
2287                 unsigned long bytes;
2288                 frame.next_frame     = 0;
2289                 frame.return_address = 0;
2290
2291                 if (!access_ok(VERIFY_READ, fp, 8))
2292                         break;
2293
2294                 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
2295                 if (bytes != 0)
2296                         break;
2297                 bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4);
2298                 if (bytes != 0)
2299                         break;
2300
2301                 if (!valid_user_frame(fp, sizeof(frame)))
2302                         break;
2303
2304                 perf_callchain_store(entry, cs_base + frame.return_address);
2305                 fp = compat_ptr(ss_base + frame.next_frame);
2306         }
2307         pagefault_enable();
2308         return 1;
2309 }
2310 #else
2311 static inline int
2312 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2313 {
2314     return 0;
2315 }
2316 #endif
2317
2318 void
2319 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2320 {
2321         struct stack_frame frame;
2322         const unsigned long __user *fp;
2323
2324         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2325                 /* TODO: We don't support guest os callchain now */
2326                 return;
2327         }
2328
2329         /*
2330          * We don't know what to do with VM86 stacks.. ignore them for now.
2331          */
2332         if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
2333                 return;
2334
2335         fp = (unsigned long __user *)regs->bp;
2336
2337         perf_callchain_store(entry, regs->ip);
2338
2339         if (!current->mm)
2340                 return;
2341
2342         if (perf_callchain_user32(regs, entry))
2343                 return;
2344
2345         pagefault_disable();
2346         while (entry->nr < entry->max_stack) {
2347                 unsigned long bytes;
2348
2349                 frame.next_frame             = NULL;
2350                 frame.return_address = 0;
2351
2352                 if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
2353                         break;
2354
2355                 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
2356                 if (bytes != 0)
2357                         break;
2358                 bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
2359                 if (bytes != 0)
2360                         break;
2361
2362                 if (!valid_user_frame(fp, sizeof(frame)))
2363                         break;
2364
2365                 perf_callchain_store(entry, frame.return_address);
2366                 fp = (void __user *)frame.next_frame;
2367         }
2368         pagefault_enable();
2369 }
2370
2371 /*
2372  * Deal with code segment offsets for the various execution modes:
2373  *
2374  *   VM86 - the good olde 16 bit days, where the linear address is
2375  *          20 bits and we use regs->ip + 0x10 * regs->cs.
2376  *
2377  *   IA32 - Where we need to look at GDT/LDT segment descriptor tables
2378  *          to figure out what the 32bit base address is.
2379  *
2380  *    X32 - has TIF_X32 set, but is running in x86_64
2381  *
2382  * X86_64 - CS,DS,SS,ES are all zero based.
2383  */
2384 static unsigned long code_segment_base(struct pt_regs *regs)
2385 {
2386         /*
2387          * For IA32 we look at the GDT/LDT segment base to convert the
2388          * effective IP to a linear address.
2389          */
2390
2391 #ifdef CONFIG_X86_32
2392         /*
2393          * If we are in VM86 mode, add the segment offset to convert to a
2394          * linear address.
2395          */
2396         if (regs->flags & X86_VM_MASK)
2397                 return 0x10 * regs->cs;
2398
2399         if (user_mode(regs) && regs->cs != __USER_CS)
2400                 return get_segment_base(regs->cs);
2401 #else
2402         if (user_mode(regs) && !user_64bit_mode(regs) &&
2403             regs->cs != __USER32_CS)
2404                 return get_segment_base(regs->cs);
2405 #endif
2406         return 0;
2407 }
2408
2409 unsigned long perf_instruction_pointer(struct pt_regs *regs)
2410 {
2411         if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
2412                 return perf_guest_cbs->get_guest_ip();
2413
2414         return regs->ip + code_segment_base(regs);
2415 }
2416
2417 unsigned long perf_misc_flags(struct pt_regs *regs)
2418 {
2419         int misc = 0;
2420
2421         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2422                 if (perf_guest_cbs->is_user_mode())
2423                         misc |= PERF_RECORD_MISC_GUEST_USER;
2424                 else
2425                         misc |= PERF_RECORD_MISC_GUEST_KERNEL;
2426         } else {
2427                 if (user_mode(regs))
2428                         misc |= PERF_RECORD_MISC_USER;
2429                 else
2430                         misc |= PERF_RECORD_MISC_KERNEL;
2431         }
2432
2433         if (regs->flags & PERF_EFLAGS_EXACT)
2434                 misc |= PERF_RECORD_MISC_EXACT_IP;
2435
2436         return misc;
2437 }
2438
2439 void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2440 {
2441         cap->version            = x86_pmu.version;
2442         cap->num_counters_gp    = x86_pmu.num_counters;
2443         cap->num_counters_fixed = x86_pmu.num_counters_fixed;
2444         cap->bit_width_gp       = x86_pmu.cntval_bits;
2445         cap->bit_width_fixed    = x86_pmu.cntval_bits;
2446         cap->events_mask        = (unsigned int)x86_pmu.events_maskl;
2447         cap->events_mask_len    = x86_pmu.events_mask_len;
2448 }
2449 EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);