Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[cascardo/linux.git] / arch / x86 / kernel / cpu / mcheck / therm_throt.c
1 /*
2  * Thermal throttle event support code (such as syslog messaging and rate
3  * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
4  *
5  * This allows consistent reporting of CPU thermal throttle events.
6  *
7  * Maintains a counter in /sys that keeps track of the number of thermal
8  * events, such that the user knows how bad the thermal problem might be
9  * (since the logging to syslog and mcelog is rate limited).
10  *
11  * Author: Dmitriy Zavin (dmitriyz@google.com)
12  *
13  * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
14  *          Inspired by Ross Biro's and Al Borchers' counter code.
15  */
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel.h>
20 #include <linux/percpu.h>
21 #include <linux/export.h>
22 #include <linux/types.h>
23 #include <linux/init.h>
24 #include <linux/smp.h>
25 #include <linux/cpu.h>
26
27 #include <asm/processor.h>
28 #include <asm/apic.h>
29 #include <asm/idle.h>
30 #include <asm/mce.h>
31 #include <asm/msr.h>
32 #include <asm/trace/irq_vectors.h>
33
34 /* How long to wait between reporting thermal events */
35 #define CHECK_INTERVAL          (300 * HZ)
36
37 #define THERMAL_THROTTLING_EVENT        0
38 #define POWER_LIMIT_EVENT               1
39
40 /*
41  * Current thermal event state:
42  */
43 struct _thermal_state {
44         bool                    new_event;
45         int                     event;
46         u64                     next_check;
47         unsigned long           count;
48         unsigned long           last_count;
49 };
50
51 struct thermal_state {
52         struct _thermal_state core_throttle;
53         struct _thermal_state core_power_limit;
54         struct _thermal_state package_throttle;
55         struct _thermal_state package_power_limit;
56         struct _thermal_state core_thresh0;
57         struct _thermal_state core_thresh1;
58         struct _thermal_state pkg_thresh0;
59         struct _thermal_state pkg_thresh1;
60 };
61
62 /* Callback to handle core threshold interrupts */
63 int (*platform_thermal_notify)(__u64 msr_val);
64 EXPORT_SYMBOL(platform_thermal_notify);
65
66 /* Callback to handle core package threshold_interrupts */
67 int (*platform_thermal_package_notify)(__u64 msr_val);
68 EXPORT_SYMBOL_GPL(platform_thermal_package_notify);
69
70 /* Callback support of rate control, return true, if
71  * callback has rate control */
72 bool (*platform_thermal_package_rate_control)(void);
73 EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control);
74
75
76 static DEFINE_PER_CPU(struct thermal_state, thermal_state);
77
78 static atomic_t therm_throt_en  = ATOMIC_INIT(0);
79
80 static u32 lvtthmr_init __read_mostly;
81
82 #ifdef CONFIG_SYSFS
83 #define define_therm_throt_device_one_ro(_name)                         \
84         static DEVICE_ATTR(_name, 0444,                                 \
85                            therm_throt_device_show_##_name,             \
86                                    NULL)                                \
87
88 #define define_therm_throt_device_show_func(event, name)                \
89                                                                         \
90 static ssize_t therm_throt_device_show_##event##_##name(                \
91                         struct device *dev,                             \
92                         struct device_attribute *attr,                  \
93                         char *buf)                                      \
94 {                                                                       \
95         unsigned int cpu = dev->id;                                     \
96         ssize_t ret;                                                    \
97                                                                         \
98         preempt_disable();      /* CPU hotplug */                       \
99         if (cpu_online(cpu)) {                                          \
100                 ret = sprintf(buf, "%lu\n",                             \
101                               per_cpu(thermal_state, cpu).event.name);  \
102         } else                                                          \
103                 ret = 0;                                                \
104         preempt_enable();                                               \
105                                                                         \
106         return ret;                                                     \
107 }
108
109 define_therm_throt_device_show_func(core_throttle, count);
110 define_therm_throt_device_one_ro(core_throttle_count);
111
112 define_therm_throt_device_show_func(core_power_limit, count);
113 define_therm_throt_device_one_ro(core_power_limit_count);
114
115 define_therm_throt_device_show_func(package_throttle, count);
116 define_therm_throt_device_one_ro(package_throttle_count);
117
118 define_therm_throt_device_show_func(package_power_limit, count);
119 define_therm_throt_device_one_ro(package_power_limit_count);
120
121 static struct attribute *thermal_throttle_attrs[] = {
122         &dev_attr_core_throttle_count.attr,
123         NULL
124 };
125
126 static struct attribute_group thermal_attr_group = {
127         .attrs  = thermal_throttle_attrs,
128         .name   = "thermal_throttle"
129 };
130 #endif /* CONFIG_SYSFS */
131
132 #define CORE_LEVEL      0
133 #define PACKAGE_LEVEL   1
134
135 /***
136  * therm_throt_process - Process thermal throttling event from interrupt
137  * @curr: Whether the condition is current or not (boolean), since the
138  *        thermal interrupt normally gets called both when the thermal
139  *        event begins and once the event has ended.
140  *
141  * This function is called by the thermal interrupt after the
142  * IRQ has been acknowledged.
143  *
144  * It will take care of rate limiting and printing messages to the syslog.
145  *
146  * Returns: 0 : Event should NOT be further logged, i.e. still in
147  *              "timeout" from previous log message.
148  *          1 : Event should be logged further, and a message has been
149  *              printed to the syslog.
150  */
151 static int therm_throt_process(bool new_event, int event, int level)
152 {
153         struct _thermal_state *state;
154         unsigned int this_cpu = smp_processor_id();
155         bool old_event;
156         u64 now;
157         struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
158
159         now = get_jiffies_64();
160         if (level == CORE_LEVEL) {
161                 if (event == THERMAL_THROTTLING_EVENT)
162                         state = &pstate->core_throttle;
163                 else if (event == POWER_LIMIT_EVENT)
164                         state = &pstate->core_power_limit;
165                 else
166                          return 0;
167         } else if (level == PACKAGE_LEVEL) {
168                 if (event == THERMAL_THROTTLING_EVENT)
169                         state = &pstate->package_throttle;
170                 else if (event == POWER_LIMIT_EVENT)
171                         state = &pstate->package_power_limit;
172                 else
173                         return 0;
174         } else
175                 return 0;
176
177         old_event = state->new_event;
178         state->new_event = new_event;
179
180         if (new_event)
181                 state->count++;
182
183         if (time_before64(now, state->next_check) &&
184                         state->count != state->last_count)
185                 return 0;
186
187         state->next_check = now + CHECK_INTERVAL;
188         state->last_count = state->count;
189
190         /* if we just entered the thermal event */
191         if (new_event) {
192                 if (event == THERMAL_THROTTLING_EVENT)
193                         printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
194                                 this_cpu,
195                                 level == CORE_LEVEL ? "Core" : "Package",
196                                 state->count);
197                 return 1;
198         }
199         if (old_event) {
200                 if (event == THERMAL_THROTTLING_EVENT)
201                         printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
202                                 this_cpu,
203                                 level == CORE_LEVEL ? "Core" : "Package");
204                 return 1;
205         }
206
207         return 0;
208 }
209
210 static int thresh_event_valid(int level, int event)
211 {
212         struct _thermal_state *state;
213         unsigned int this_cpu = smp_processor_id();
214         struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
215         u64 now = get_jiffies_64();
216
217         if (level == PACKAGE_LEVEL)
218                 state = (event == 0) ? &pstate->pkg_thresh0 :
219                                                 &pstate->pkg_thresh1;
220         else
221                 state = (event == 0) ? &pstate->core_thresh0 :
222                                                 &pstate->core_thresh1;
223
224         if (time_before64(now, state->next_check))
225                 return 0;
226
227         state->next_check = now + CHECK_INTERVAL;
228
229         return 1;
230 }
231
232 static bool int_pln_enable;
233 static int __init int_pln_enable_setup(char *s)
234 {
235         int_pln_enable = true;
236
237         return 1;
238 }
239 __setup("int_pln_enable", int_pln_enable_setup);
240
241 #ifdef CONFIG_SYSFS
242 /* Add/Remove thermal_throttle interface for CPU device: */
243 static __cpuinit int thermal_throttle_add_dev(struct device *dev,
244                                 unsigned int cpu)
245 {
246         int err;
247         struct cpuinfo_x86 *c = &cpu_data(cpu);
248
249         err = sysfs_create_group(&dev->kobj, &thermal_attr_group);
250         if (err)
251                 return err;
252
253         if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
254                 err = sysfs_add_file_to_group(&dev->kobj,
255                                               &dev_attr_core_power_limit_count.attr,
256                                               thermal_attr_group.name);
257         if (cpu_has(c, X86_FEATURE_PTS)) {
258                 err = sysfs_add_file_to_group(&dev->kobj,
259                                               &dev_attr_package_throttle_count.attr,
260                                               thermal_attr_group.name);
261                 if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
262                         err = sysfs_add_file_to_group(&dev->kobj,
263                                         &dev_attr_package_power_limit_count.attr,
264                                         thermal_attr_group.name);
265         }
266
267         return err;
268 }
269
270 static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
271 {
272         sysfs_remove_group(&dev->kobj, &thermal_attr_group);
273 }
274
275 /* Mutex protecting device creation against CPU hotplug: */
276 static DEFINE_MUTEX(therm_cpu_lock);
277
278 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
279 static __cpuinit int
280 thermal_throttle_cpu_callback(struct notifier_block *nfb,
281                               unsigned long action,
282                               void *hcpu)
283 {
284         unsigned int cpu = (unsigned long)hcpu;
285         struct device *dev;
286         int err = 0;
287
288         dev = get_cpu_device(cpu);
289
290         switch (action) {
291         case CPU_UP_PREPARE:
292         case CPU_UP_PREPARE_FROZEN:
293                 mutex_lock(&therm_cpu_lock);
294                 err = thermal_throttle_add_dev(dev, cpu);
295                 mutex_unlock(&therm_cpu_lock);
296                 WARN_ON(err);
297                 break;
298         case CPU_UP_CANCELED:
299         case CPU_UP_CANCELED_FROZEN:
300         case CPU_DEAD:
301         case CPU_DEAD_FROZEN:
302                 mutex_lock(&therm_cpu_lock);
303                 thermal_throttle_remove_dev(dev);
304                 mutex_unlock(&therm_cpu_lock);
305                 break;
306         }
307         return notifier_from_errno(err);
308 }
309
310 static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
311 {
312         .notifier_call = thermal_throttle_cpu_callback,
313 };
314
315 static __init int thermal_throttle_init_device(void)
316 {
317         unsigned int cpu = 0;
318         int err;
319
320         if (!atomic_read(&therm_throt_en))
321                 return 0;
322
323         register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
324
325 #ifdef CONFIG_HOTPLUG_CPU
326         mutex_lock(&therm_cpu_lock);
327 #endif
328         /* connect live CPUs to sysfs */
329         for_each_online_cpu(cpu) {
330                 err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
331                 WARN_ON(err);
332         }
333 #ifdef CONFIG_HOTPLUG_CPU
334         mutex_unlock(&therm_cpu_lock);
335 #endif
336
337         return 0;
338 }
339 device_initcall(thermal_throttle_init_device);
340
341 #endif /* CONFIG_SYSFS */
342
343 static void notify_package_thresholds(__u64 msr_val)
344 {
345         bool notify_thres_0 = false;
346         bool notify_thres_1 = false;
347
348         if (!platform_thermal_package_notify)
349                 return;
350
351         /* lower threshold check */
352         if (msr_val & THERM_LOG_THRESHOLD0)
353                 notify_thres_0 = true;
354         /* higher threshold check */
355         if (msr_val & THERM_LOG_THRESHOLD1)
356                 notify_thres_1 = true;
357
358         if (!notify_thres_0 && !notify_thres_1)
359                 return;
360
361         if (platform_thermal_package_rate_control &&
362                 platform_thermal_package_rate_control()) {
363                 /* Rate control is implemented in callback */
364                 platform_thermal_package_notify(msr_val);
365                 return;
366         }
367
368         /* lower threshold reached */
369         if (notify_thres_0 && thresh_event_valid(PACKAGE_LEVEL, 0))
370                 platform_thermal_package_notify(msr_val);
371         /* higher threshold reached */
372         if (notify_thres_1 && thresh_event_valid(PACKAGE_LEVEL, 1))
373                 platform_thermal_package_notify(msr_val);
374 }
375
376 static void notify_thresholds(__u64 msr_val)
377 {
378         /* check whether the interrupt handler is defined;
379          * otherwise simply return
380          */
381         if (!platform_thermal_notify)
382                 return;
383
384         /* lower threshold reached */
385         if ((msr_val & THERM_LOG_THRESHOLD0) &&
386                         thresh_event_valid(CORE_LEVEL, 0))
387                 platform_thermal_notify(msr_val);
388         /* higher threshold reached */
389         if ((msr_val & THERM_LOG_THRESHOLD1) &&
390                         thresh_event_valid(CORE_LEVEL, 1))
391                 platform_thermal_notify(msr_val);
392 }
393
394 /* Thermal transition interrupt handler */
395 static void intel_thermal_interrupt(void)
396 {
397         __u64 msr_val;
398
399         rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
400
401         /* Check for violation of core thermal thresholds*/
402         notify_thresholds(msr_val);
403
404         if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
405                                 THERMAL_THROTTLING_EVENT,
406                                 CORE_LEVEL) != 0)
407                 mce_log_therm_throt_event(msr_val);
408
409         if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
410                 therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
411                                         POWER_LIMIT_EVENT,
412                                         CORE_LEVEL);
413
414         if (this_cpu_has(X86_FEATURE_PTS)) {
415                 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
416                 /* check violations of package thermal thresholds */
417                 notify_package_thresholds(msr_val);
418                 therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
419                                         THERMAL_THROTTLING_EVENT,
420                                         PACKAGE_LEVEL);
421                 if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
422                         therm_throt_process(msr_val &
423                                         PACKAGE_THERM_STATUS_POWER_LIMIT,
424                                         POWER_LIMIT_EVENT,
425                                         PACKAGE_LEVEL);
426         }
427 }
428
429 static void unexpected_thermal_interrupt(void)
430 {
431         printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
432                         smp_processor_id());
433 }
434
435 static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
436
437 static inline void __smp_thermal_interrupt(void)
438 {
439         inc_irq_stat(irq_thermal_count);
440         smp_thermal_vector();
441 }
442
443 asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
444 {
445         entering_irq();
446         __smp_thermal_interrupt();
447         exiting_ack_irq();
448 }
449
450 asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs)
451 {
452         entering_irq();
453         trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
454         __smp_thermal_interrupt();
455         trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
456         exiting_ack_irq();
457 }
458
459 /* Thermal monitoring depends on APIC, ACPI and clock modulation */
460 static int intel_thermal_supported(struct cpuinfo_x86 *c)
461 {
462         if (!cpu_has_apic)
463                 return 0;
464         if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
465                 return 0;
466         return 1;
467 }
468
469 void __init mcheck_intel_therm_init(void)
470 {
471         /*
472          * This function is only called on boot CPU. Save the init thermal
473          * LVT value on BSP and use that value to restore APs' thermal LVT
474          * entry BIOS programmed later
475          */
476         if (intel_thermal_supported(&boot_cpu_data))
477                 lvtthmr_init = apic_read(APIC_LVTTHMR);
478 }
479
480 void intel_init_thermal(struct cpuinfo_x86 *c)
481 {
482         unsigned int cpu = smp_processor_id();
483         int tm2 = 0;
484         u32 l, h;
485
486         if (!intel_thermal_supported(c))
487                 return;
488
489         /*
490          * First check if its enabled already, in which case there might
491          * be some SMM goo which handles it, so we can't even put a handler
492          * since it might be delivered via SMI already:
493          */
494         rdmsr(MSR_IA32_MISC_ENABLE, l, h);
495
496         h = lvtthmr_init;
497         /*
498          * The initial value of thermal LVT entries on all APs always reads
499          * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
500          * sequence to them and LVT registers are reset to 0s except for
501          * the mask bits which are set to 1s when APs receive INIT IPI.
502          * If BIOS takes over the thermal interrupt and sets its interrupt
503          * delivery mode to SMI (not fixed), it restores the value that the
504          * BIOS has programmed on AP based on BSP's info we saved since BIOS
505          * is always setting the same value for all threads/cores.
506          */
507         if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
508                 apic_write(APIC_LVTTHMR, lvtthmr_init);
509
510
511         if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
512                 printk(KERN_DEBUG
513                        "CPU%d: Thermal monitoring handled by SMI\n", cpu);
514                 return;
515         }
516
517         /* Check whether a vector already exists */
518         if (h & APIC_VECTOR_MASK) {
519                 printk(KERN_DEBUG
520                        "CPU%d: Thermal LVT vector (%#x) already installed\n",
521                        cpu, (h & APIC_VECTOR_MASK));
522                 return;
523         }
524
525         /* early Pentium M models use different method for enabling TM2 */
526         if (cpu_has(c, X86_FEATURE_TM2)) {
527                 if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
528                         rdmsr(MSR_THERM2_CTL, l, h);
529                         if (l & MSR_THERM2_CTL_TM_SELECT)
530                                 tm2 = 1;
531                 } else if (l & MSR_IA32_MISC_ENABLE_TM2)
532                         tm2 = 1;
533         }
534
535         /* We'll mask the thermal vector in the lapic till we're ready: */
536         h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
537         apic_write(APIC_LVTTHMR, h);
538
539         rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
540         if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
541                 wrmsr(MSR_IA32_THERM_INTERRUPT,
542                         (l | (THERM_INT_LOW_ENABLE
543                         | THERM_INT_HIGH_ENABLE)) & ~THERM_INT_PLN_ENABLE, h);
544         else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
545                 wrmsr(MSR_IA32_THERM_INTERRUPT,
546                         l | (THERM_INT_LOW_ENABLE
547                         | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h);
548         else
549                 wrmsr(MSR_IA32_THERM_INTERRUPT,
550                       l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
551
552         if (cpu_has(c, X86_FEATURE_PTS)) {
553                 rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
554                 if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
555                         wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
556                                 (l | (PACKAGE_THERM_INT_LOW_ENABLE
557                                 | PACKAGE_THERM_INT_HIGH_ENABLE))
558                                 & ~PACKAGE_THERM_INT_PLN_ENABLE, h);
559                 else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
560                         wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
561                                 l | (PACKAGE_THERM_INT_LOW_ENABLE
562                                 | PACKAGE_THERM_INT_HIGH_ENABLE
563                                 | PACKAGE_THERM_INT_PLN_ENABLE), h);
564                 else
565                         wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
566                               l | (PACKAGE_THERM_INT_LOW_ENABLE
567                                 | PACKAGE_THERM_INT_HIGH_ENABLE), h);
568         }
569
570         smp_thermal_vector = intel_thermal_interrupt;
571
572         rdmsr(MSR_IA32_MISC_ENABLE, l, h);
573         wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
574
575         /* Unmask the thermal vector: */
576         l = apic_read(APIC_LVTTHMR);
577         apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
578
579         printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
580                        tm2 ? "TM2" : "TM1");
581
582         /* enable thermal throttle processing */
583         atomic_set(&therm_throt_en, 1);
584 }