perf/x86/intel/rapl: Refactor the code some more
authorThomas Gleixner <tglx@linutronix.de>
Mon, 22 Feb 2016 22:19:24 +0000 (22:19 +0000)
committerIngo Molnar <mingo@kernel.org>
Mon, 29 Feb 2016 08:35:24 +0000 (09:35 +0100)
Split out code from init into seperate functions. Tidy up the code and get rid
of pointless comments. I wish there would be comments for code which is not
obvious....

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Harish Chegondi <harish.chegondi@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/20160222221012.588544679@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/events/intel/rapl.c

index f31e4b4..ba5043b 100644 (file)
@@ -110,7 +110,7 @@ static ssize_t __rapl_##_var##_show(struct kobject *kobj,   \
 static struct kobj_attribute format_attr_##_var =              \
        __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
 
-#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
+#define RAPL_CNTR_WIDTH 32
 
 #define RAPL_EVENT_ATTR_STR(_name, v, str)                                     \
 static struct perf_pmu_events_attr event_attr_##v = {                          \
@@ -120,15 +120,16 @@ static struct perf_pmu_events_attr event_attr_##v = {                             \
 };
 
 struct rapl_pmu {
-       spinlock_t       lock;
-       int              n_active; /* number of active events */
-       struct list_head active_list;
-       struct pmu       *pmu; /* pointer to rapl_pmu_class */
-       ktime_t          timer_interval; /* in ktime_t unit */
-       struct hrtimer   hrtimer;
+       spinlock_t              lock;
+       int                     n_active;
+       struct list_head        active_list;
+       struct pmu              *pmu;
+       ktime_t                 timer_interval;
+       struct hrtimer          hrtimer;
 };
 
-static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;  /* 1/2^hw_unit Joule */
+ /* 1/2^hw_unit Joule */
+static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
 static struct pmu rapl_pmu_class;
 static cpumask_t rapl_cpu_mask;
 static int rapl_cntr_mask;
@@ -200,11 +201,6 @@ static void rapl_start_hrtimer(struct rapl_pmu *pmu)
                     HRTIMER_MODE_REL_PINNED);
 }
 
-static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
-{
-       hrtimer_cancel(&pmu->hrtimer);
-}
-
 static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
 {
        struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
@@ -216,9 +212,8 @@ static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
 
        spin_lock_irqsave(&pmu->lock, flags);
 
-       list_for_each_entry(event, &pmu->active_list, active_entry) {
+       list_for_each_entry(event, &pmu->active_list, active_entry)
                rapl_event_update(event);
-       }
 
        spin_unlock_irqrestore(&pmu->lock, flags);
 
@@ -275,7 +270,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
                WARN_ON_ONCE(pmu->n_active <= 0);
                pmu->n_active--;
                if (pmu->n_active == 0)
-                       rapl_stop_hrtimer(pmu);
+                       hrtimer_cancel(&pmu->hrtimer);
 
                list_del(&event->active_entry);
 
@@ -542,7 +537,7 @@ static void rapl_cpu_exit(int cpu)
                perf_pmu_migrate_context(pmu->pmu, cpu, target);
 
        /* cancel overflow polling timer for CPU */
-       rapl_stop_hrtimer(pmu);
+       hrtimer_cancel(&pmu->hrtimer);
 }
 
 static void rapl_cpu_init(int cpu)
@@ -698,6 +693,20 @@ static void __init rapl_advertise(void)
        }
 }
 
+static int __init rapl_prepare_cpus(void)
+{
+       unsigned int cpu;
+       int ret;
+
+       for_each_online_cpu(cpu) {
+               ret = rapl_cpu_prepare(cpu);
+               if (ret)
+                       return ret;
+               rapl_cpu_init(cpu);
+       }
+       return 0;
+}
+
 static void __init cleanup_rapl_pmus(void)
 {
        int cpu;
@@ -706,7 +715,7 @@ static void __init cleanup_rapl_pmus(void)
                kfree(per_cpu(rapl_pmu, cpu));
 }
 
-static const struct x86_cpu_id rapl_cpu_match[] = {
+static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
        [0] = { .vendor = X86_VENDOR_INTEL, .family = 6 },
        [1] = {},
 };
@@ -714,15 +723,11 @@ static const struct x86_cpu_id rapl_cpu_match[] = {
 static int __init rapl_pmu_init(void)
 {
        void (*quirk)(void) = NULL;
-       int cpu, ret;
+       int ret;
 
-       /*
-        * check for Intel processor family 6
-        */
        if (!x86_match_cpu(rapl_cpu_match))
                return -ENODEV;
 
-       /* check supported CPU */
        switch (boot_cpu_data.x86_model) {
        case 42: /* Sandy Bridge */
        case 58: /* Ivy Bridge */
@@ -751,7 +756,6 @@ static int __init rapl_pmu_init(void)
                rapl_pmu_events_group.attrs = rapl_events_knl_attr;
                break;
        default:
-               /* unsupported */
                return -ENODEV;
        }
 
@@ -761,12 +765,9 @@ static int __init rapl_pmu_init(void)
 
        cpu_notifier_register_begin();
 
-       for_each_online_cpu(cpu) {
-               ret = rapl_cpu_prepare(cpu);
-               if (ret)
-                       goto out;
-               rapl_cpu_init(cpu);
-       }
+       ret = rapl_prepare_cpus();
+       if (ret)
+               goto out;
 
        ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
        if (ret)