driver core: Make Kconfig text for DEBUG_TEST_DRIVER_REMOVE stronger
[cascardo/linux.git] / arch / x86 / events / intel / cstate.c
1 /*
2  * perf_event_intel_cstate.c: support cstate residency counters
3  *
4  * Copyright (C) 2015, Intel Corp.
5  * Author: Kan Liang (kan.liang@intel.com)
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Library General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Library General Public License for more details.
16  *
17  */
18
19 /*
20  * This file export cstate related free running (read-only) counters
21  * for perf. These counters may be use simultaneously by other tools,
22  * such as turbostat. However, it still make sense to implement them
23  * in perf. Because we can conveniently collect them together with
24  * other events, and allow to use them from tools without special MSR
25  * access code.
26  *
27  * The events only support system-wide mode counting. There is no
28  * sampling support because it is not supported by the hardware.
29  *
30  * According to counters' scope and category, two PMUs are registered
31  * with the perf_event core subsystem.
32  *  - 'cstate_core': The counter is available for each physical core.
33  *    The counters include CORE_C*_RESIDENCY.
34  *  - 'cstate_pkg': The counter is available for each physical package.
35  *    The counters include PKG_C*_RESIDENCY.
36  *
37  * All of these counters are specified in the IntelĀ® 64 and IA-32
38  * Architectures Software Developer.s Manual Vol3b.
39  *
40  * Model specific counters:
41  *      MSR_CORE_C1_RES: CORE C1 Residency Counter
42  *                       perf code: 0x00
43  *                       Available model: SLM,AMT
44  *                       Scope: Core (each processor core has a MSR)
45  *      MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
46  *                             perf code: 0x01
47  *                             Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
48  *                             Scope: Core
49  *      MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
50  *                             perf code: 0x02
51  *                             Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
52  *                             Scope: Core
53  *      MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
54  *                             perf code: 0x03
55  *                             Available model: SNB,IVB,HSW,BDW,SKL
56  *                             Scope: Core
57  *      MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
58  *                             perf code: 0x00
59  *                             Available model: SNB,IVB,HSW,BDW,SKL
60  *                             Scope: Package (physical package)
61  *      MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
62  *                             perf code: 0x01
63  *                             Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
64  *                             Scope: Package (physical package)
65  *      MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
66  *                             perf code: 0x02
67  *                             Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
68  *                             Scope: Package (physical package)
69  *      MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
70  *                             perf code: 0x03
71  *                             Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
72  *                             Scope: Package (physical package)
73  *      MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
74  *                             perf code: 0x04
75  *                             Available model: HSW ULT only
76  *                             Scope: Package (physical package)
77  *      MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
78  *                             perf code: 0x05
79  *                             Available model: HSW ULT only
80  *                             Scope: Package (physical package)
81  *      MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
82  *                             perf code: 0x06
83  *                             Available model: HSW ULT only
84  *                             Scope: Package (physical package)
85  *
86  */
87
88 #include <linux/module.h>
89 #include <linux/slab.h>
90 #include <linux/perf_event.h>
91 #include <asm/cpu_device_id.h>
92 #include <asm/intel-family.h>
93 #include "../perf_event.h"
94
95 MODULE_LICENSE("GPL");
96
97 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)         \
98 static ssize_t __cstate_##_var##_show(struct kobject *kobj,     \
99                                 struct kobj_attribute *attr,    \
100                                 char *page)                     \
101 {                                                               \
102         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);             \
103         return sprintf(page, _format "\n");                     \
104 }                                                               \
105 static struct kobj_attribute format_attr_##_var =               \
106         __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
107
108 static ssize_t cstate_get_attr_cpumask(struct device *dev,
109                                        struct device_attribute *attr,
110                                        char *buf);
111
112 /* Model -> events mapping */
113 struct cstate_model {
114         unsigned long           core_events;
115         unsigned long           pkg_events;
116         unsigned long           quirks;
117 };
118
119 /* Quirk flags */
120 #define SLM_PKG_C6_USE_C7_MSR   (1UL << 0)
121
122 struct perf_cstate_msr {
123         u64     msr;
124         struct  perf_pmu_events_attr *attr;
125 };
126
127
128 /* cstate_core PMU */
129 static struct pmu cstate_core_pmu;
130 static bool has_cstate_core;
131
132 enum perf_cstate_core_events {
133         PERF_CSTATE_CORE_C1_RES = 0,
134         PERF_CSTATE_CORE_C3_RES,
135         PERF_CSTATE_CORE_C6_RES,
136         PERF_CSTATE_CORE_C7_RES,
137
138         PERF_CSTATE_CORE_EVENT_MAX,
139 };
140
141 PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
142 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
143 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
144 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
145
146 static struct perf_cstate_msr core_msr[] = {
147         [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES,          &evattr_cstate_core_c1 },
148         [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY,    &evattr_cstate_core_c3 },
149         [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY,    &evattr_cstate_core_c6 },
150         [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY,    &evattr_cstate_core_c7 },
151 };
152
153 static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
154         NULL,
155 };
156
157 static struct attribute_group core_events_attr_group = {
158         .name = "events",
159         .attrs = core_events_attrs,
160 };
161
162 DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
163 static struct attribute *core_format_attrs[] = {
164         &format_attr_core_event.attr,
165         NULL,
166 };
167
168 static struct attribute_group core_format_attr_group = {
169         .name = "format",
170         .attrs = core_format_attrs,
171 };
172
173 static cpumask_t cstate_core_cpu_mask;
174 static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
175
176 static struct attribute *cstate_cpumask_attrs[] = {
177         &dev_attr_cpumask.attr,
178         NULL,
179 };
180
181 static struct attribute_group cpumask_attr_group = {
182         .attrs = cstate_cpumask_attrs,
183 };
184
185 static const struct attribute_group *core_attr_groups[] = {
186         &core_events_attr_group,
187         &core_format_attr_group,
188         &cpumask_attr_group,
189         NULL,
190 };
191
192 /* cstate_pkg PMU */
193 static struct pmu cstate_pkg_pmu;
194 static bool has_cstate_pkg;
195
196 enum perf_cstate_pkg_events {
197         PERF_CSTATE_PKG_C2_RES = 0,
198         PERF_CSTATE_PKG_C3_RES,
199         PERF_CSTATE_PKG_C6_RES,
200         PERF_CSTATE_PKG_C7_RES,
201         PERF_CSTATE_PKG_C8_RES,
202         PERF_CSTATE_PKG_C9_RES,
203         PERF_CSTATE_PKG_C10_RES,
204
205         PERF_CSTATE_PKG_EVENT_MAX,
206 };
207
208 PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
209 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
210 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
211 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03");
212 PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04");
213 PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
214 PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
215
216 static struct perf_cstate_msr pkg_msr[] = {
217         [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY,      &evattr_cstate_pkg_c2 },
218         [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY,      &evattr_cstate_pkg_c3 },
219         [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY,      &evattr_cstate_pkg_c6 },
220         [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY,      &evattr_cstate_pkg_c7 },
221         [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY,      &evattr_cstate_pkg_c8 },
222         [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY,      &evattr_cstate_pkg_c9 },
223         [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY,    &evattr_cstate_pkg_c10 },
224 };
225
226 static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
227         NULL,
228 };
229
230 static struct attribute_group pkg_events_attr_group = {
231         .name = "events",
232         .attrs = pkg_events_attrs,
233 };
234
235 DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
236 static struct attribute *pkg_format_attrs[] = {
237         &format_attr_pkg_event.attr,
238         NULL,
239 };
240 static struct attribute_group pkg_format_attr_group = {
241         .name = "format",
242         .attrs = pkg_format_attrs,
243 };
244
245 static cpumask_t cstate_pkg_cpu_mask;
246
247 static const struct attribute_group *pkg_attr_groups[] = {
248         &pkg_events_attr_group,
249         &pkg_format_attr_group,
250         &cpumask_attr_group,
251         NULL,
252 };
253
254 static ssize_t cstate_get_attr_cpumask(struct device *dev,
255                                        struct device_attribute *attr,
256                                        char *buf)
257 {
258         struct pmu *pmu = dev_get_drvdata(dev);
259
260         if (pmu == &cstate_core_pmu)
261                 return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
262         else if (pmu == &cstate_pkg_pmu)
263                 return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
264         else
265                 return 0;
266 }
267
268 static int cstate_pmu_event_init(struct perf_event *event)
269 {
270         u64 cfg = event->attr.config;
271         int cpu;
272
273         if (event->attr.type != event->pmu->type)
274                 return -ENOENT;
275
276         /* unsupported modes and filters */
277         if (event->attr.exclude_user   ||
278             event->attr.exclude_kernel ||
279             event->attr.exclude_hv     ||
280             event->attr.exclude_idle   ||
281             event->attr.exclude_host   ||
282             event->attr.exclude_guest  ||
283             event->attr.sample_period) /* no sampling */
284                 return -EINVAL;
285
286         if (event->cpu < 0)
287                 return -EINVAL;
288
289         if (event->pmu == &cstate_core_pmu) {
290                 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
291                         return -EINVAL;
292                 if (!core_msr[cfg].attr)
293                         return -EINVAL;
294                 event->hw.event_base = core_msr[cfg].msr;
295                 cpu = cpumask_any_and(&cstate_core_cpu_mask,
296                                       topology_sibling_cpumask(event->cpu));
297         } else if (event->pmu == &cstate_pkg_pmu) {
298                 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
299                         return -EINVAL;
300                 if (!pkg_msr[cfg].attr)
301                         return -EINVAL;
302                 event->hw.event_base = pkg_msr[cfg].msr;
303                 cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
304                                       topology_core_cpumask(event->cpu));
305         } else {
306                 return -ENOENT;
307         }
308
309         if (cpu >= nr_cpu_ids)
310                 return -ENODEV;
311
312         event->cpu = cpu;
313         event->hw.config = cfg;
314         event->hw.idx = -1;
315         return 0;
316 }
317
318 static inline u64 cstate_pmu_read_counter(struct perf_event *event)
319 {
320         u64 val;
321
322         rdmsrl(event->hw.event_base, val);
323         return val;
324 }
325
326 static void cstate_pmu_event_update(struct perf_event *event)
327 {
328         struct hw_perf_event *hwc = &event->hw;
329         u64 prev_raw_count, new_raw_count;
330
331 again:
332         prev_raw_count = local64_read(&hwc->prev_count);
333         new_raw_count = cstate_pmu_read_counter(event);
334
335         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
336                             new_raw_count) != prev_raw_count)
337                 goto again;
338
339         local64_add(new_raw_count - prev_raw_count, &event->count);
340 }
341
342 static void cstate_pmu_event_start(struct perf_event *event, int mode)
343 {
344         local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
345 }
346
347 static void cstate_pmu_event_stop(struct perf_event *event, int mode)
348 {
349         cstate_pmu_event_update(event);
350 }
351
352 static void cstate_pmu_event_del(struct perf_event *event, int mode)
353 {
354         cstate_pmu_event_stop(event, PERF_EF_UPDATE);
355 }
356
357 static int cstate_pmu_event_add(struct perf_event *event, int mode)
358 {
359         if (mode & PERF_EF_START)
360                 cstate_pmu_event_start(event, mode);
361
362         return 0;
363 }
364
365 /*
366  * Check if exiting cpu is the designated reader. If so migrate the
367  * events when there is a valid target available
368  */
369 static int cstate_cpu_exit(unsigned int cpu)
370 {
371         unsigned int target;
372
373         if (has_cstate_core &&
374             cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
375
376                 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
377                 /* Migrate events if there is a valid target */
378                 if (target < nr_cpu_ids) {
379                         cpumask_set_cpu(target, &cstate_core_cpu_mask);
380                         perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
381                 }
382         }
383
384         if (has_cstate_pkg &&
385             cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
386
387                 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
388                 /* Migrate events if there is a valid target */
389                 if (target < nr_cpu_ids) {
390                         cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
391                         perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
392                 }
393         }
394         return 0;
395 }
396
397 static int cstate_cpu_init(unsigned int cpu)
398 {
399         unsigned int target;
400
401         /*
402          * If this is the first online thread of that core, set it in
403          * the core cpu mask as the designated reader.
404          */
405         target = cpumask_any_and(&cstate_core_cpu_mask,
406                                  topology_sibling_cpumask(cpu));
407
408         if (has_cstate_core && target >= nr_cpu_ids)
409                 cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
410
411         /*
412          * If this is the first online thread of that package, set it
413          * in the package cpu mask as the designated reader.
414          */
415         target = cpumask_any_and(&cstate_pkg_cpu_mask,
416                                  topology_core_cpumask(cpu));
417         if (has_cstate_pkg && target >= nr_cpu_ids)
418                 cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
419
420         return 0;
421 }
422
423 static struct pmu cstate_core_pmu = {
424         .attr_groups    = core_attr_groups,
425         .name           = "cstate_core",
426         .task_ctx_nr    = perf_invalid_context,
427         .event_init     = cstate_pmu_event_init,
428         .add            = cstate_pmu_event_add,
429         .del            = cstate_pmu_event_del,
430         .start          = cstate_pmu_event_start,
431         .stop           = cstate_pmu_event_stop,
432         .read           = cstate_pmu_event_update,
433         .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
434 };
435
436 static struct pmu cstate_pkg_pmu = {
437         .attr_groups    = pkg_attr_groups,
438         .name           = "cstate_pkg",
439         .task_ctx_nr    = perf_invalid_context,
440         .event_init     = cstate_pmu_event_init,
441         .add            = cstate_pmu_event_add,
442         .del            = cstate_pmu_event_del,
443         .start          = cstate_pmu_event_start,
444         .stop           = cstate_pmu_event_stop,
445         .read           = cstate_pmu_event_update,
446         .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
447 };
448
449 static const struct cstate_model nhm_cstates __initconst = {
450         .core_events            = BIT(PERF_CSTATE_CORE_C3_RES) |
451                                   BIT(PERF_CSTATE_CORE_C6_RES),
452
453         .pkg_events             = BIT(PERF_CSTATE_PKG_C3_RES) |
454                                   BIT(PERF_CSTATE_PKG_C6_RES) |
455                                   BIT(PERF_CSTATE_PKG_C7_RES),
456 };
457
458 static const struct cstate_model snb_cstates __initconst = {
459         .core_events            = BIT(PERF_CSTATE_CORE_C3_RES) |
460                                   BIT(PERF_CSTATE_CORE_C6_RES) |
461                                   BIT(PERF_CSTATE_CORE_C7_RES),
462
463         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
464                                   BIT(PERF_CSTATE_PKG_C3_RES) |
465                                   BIT(PERF_CSTATE_PKG_C6_RES) |
466                                   BIT(PERF_CSTATE_PKG_C7_RES),
467 };
468
469 static const struct cstate_model hswult_cstates __initconst = {
470         .core_events            = BIT(PERF_CSTATE_CORE_C3_RES) |
471                                   BIT(PERF_CSTATE_CORE_C6_RES) |
472                                   BIT(PERF_CSTATE_CORE_C7_RES),
473
474         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
475                                   BIT(PERF_CSTATE_PKG_C3_RES) |
476                                   BIT(PERF_CSTATE_PKG_C6_RES) |
477                                   BIT(PERF_CSTATE_PKG_C7_RES) |
478                                   BIT(PERF_CSTATE_PKG_C8_RES) |
479                                   BIT(PERF_CSTATE_PKG_C9_RES) |
480                                   BIT(PERF_CSTATE_PKG_C10_RES),
481 };
482
483 static const struct cstate_model slm_cstates __initconst = {
484         .core_events            = BIT(PERF_CSTATE_CORE_C1_RES) |
485                                   BIT(PERF_CSTATE_CORE_C6_RES),
486
487         .pkg_events             = BIT(PERF_CSTATE_PKG_C6_RES),
488         .quirks                 = SLM_PKG_C6_USE_C7_MSR,
489 };
490
491 #define X86_CSTATES_MODEL(model, states)                                \
492         { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
493
494 static const struct x86_cpu_id intel_cstates_match[] __initconst = {
495         X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM,    nhm_cstates),
496         X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP, nhm_cstates),
497         X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX, nhm_cstates),
498
499         X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE,    nhm_cstates),
500         X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP, nhm_cstates),
501         X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX, nhm_cstates),
502
503         X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE,   snb_cstates),
504         X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X, snb_cstates),
505
506         X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE,   snb_cstates),
507         X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates),
508
509         X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE, snb_cstates),
510         X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X,    snb_cstates),
511         X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates),
512
513         X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
514
515         X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
516         X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
517         X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT,     slm_cstates),
518
519         X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE,   snb_cstates),
520         X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates),
521         X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E,   snb_cstates),
522         X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X,      snb_cstates),
523
524         X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE,  snb_cstates),
525         X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
526         { },
527 };
528 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
529
530 /*
531  * Probe the cstate events and insert the available one into sysfs attrs
532  * Return false if there are no available events.
533  */
534 static bool __init cstate_probe_msr(const unsigned long evmsk, int max,
535                                    struct perf_cstate_msr *msr,
536                                    struct attribute **attrs)
537 {
538         bool found = false;
539         unsigned int bit;
540         u64 val;
541
542         for (bit = 0; bit < max; bit++) {
543                 if (test_bit(bit, &evmsk) && !rdmsrl_safe(msr[bit].msr, &val)) {
544                         *attrs++ = &msr[bit].attr->attr.attr;
545                         found = true;
546                 } else {
547                         msr[bit].attr = NULL;
548                 }
549         }
550         *attrs = NULL;
551
552         return found;
553 }
554
555 static int __init cstate_probe(const struct cstate_model *cm)
556 {
557         /* SLM has different MSR for PKG C6 */
558         if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
559                 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
560
561         has_cstate_core = cstate_probe_msr(cm->core_events,
562                                            PERF_CSTATE_CORE_EVENT_MAX,
563                                            core_msr, core_events_attrs);
564
565         has_cstate_pkg = cstate_probe_msr(cm->pkg_events,
566                                           PERF_CSTATE_PKG_EVENT_MAX,
567                                           pkg_msr, pkg_events_attrs);
568
569         return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
570 }
571
572 static inline void cstate_cleanup(void)
573 {
574         if (has_cstate_core)
575                 perf_pmu_unregister(&cstate_core_pmu);
576
577         if (has_cstate_pkg)
578                 perf_pmu_unregister(&cstate_pkg_pmu);
579 }
580
581 static int __init cstate_init(void)
582 {
583         int err;
584
585         cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
586                           "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init,
587                           NULL);
588         cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
589                           "AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit);
590
591         if (has_cstate_core) {
592                 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
593                 if (err) {
594                         has_cstate_core = false;
595                         pr_info("Failed to register cstate core pmu\n");
596                         return err;
597                 }
598         }
599
600         if (has_cstate_pkg) {
601                 err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1);
602                 if (err) {
603                         has_cstate_pkg = false;
604                         pr_info("Failed to register cstate pkg pmu\n");
605                         cstate_cleanup();
606                         return err;
607                 }
608         }
609
610         return err;
611 }
612
613 static int __init cstate_pmu_init(void)
614 {
615         const struct x86_cpu_id *id;
616         int err;
617
618         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
619                 return -ENODEV;
620
621         id = x86_match_cpu(intel_cstates_match);
622         if (!id)
623                 return -ENODEV;
624
625         err = cstate_probe((const struct cstate_model *) id->driver_data);
626         if (err)
627                 return err;
628
629         return cstate_init();
630 }
631 module_init(cstate_pmu_init);
632
633 static void __exit cstate_pmu_exit(void)
634 {
635         cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
636         cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
637         cstate_cleanup();
638 }
639 module_exit(cstate_pmu_exit);