2 * Intel specific MCE features.
3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
4 * Copyright (C) 2008, 2009 Intel Corporation
9 #include <linux/interrupt.h>
10 #include <linux/percpu.h>
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
14 #include <asm/processor.h>
18 #include "mce-internal.h"
21 * Support for Intel Correct Machine Check Interrupts. This allows
22 * the CPU to raise an interrupt when a corrected machine check happened.
23 * Normally we pick those up using a regular polling timer.
24 * Also supports reliable discovery of shared banks.
28 * CMCI can be delivered to multiple cpus that share a machine check bank
29 * so we need to designate a single cpu to process errors logged in each bank
30 * in the interrupt handler (otherwise we would have many races and potential
31 * double reporting of the same error).
32 * Note that this can change when a cpu is offlined or brought online since
33 * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear()
34 * disables CMCI on all banks owned by the cpu and clears this bitfield. At
35 * this point, cmci_rediscover() kicks in and a different cpu may end up
36 * taking ownership of some of the shared MCA banks that were previously
37 * owned by the offlined cpu.
39 static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
42 * CMCI storm detection backoff counter
44 * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've
45 * encountered an error. If not, we decrement it by one. We signal the end of
46 * the CMCI storm when it reaches 0.
48 static DEFINE_PER_CPU(int, cmci_backoff_cnt);
51 * cmci_discover_lock protects against parallel discovery attempts
52 * which could race against each other.
54 static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
56 #define CMCI_THRESHOLD 1
57 #define CMCI_POLL_INTERVAL (30 * HZ)
58 #define CMCI_STORM_INTERVAL (HZ)
59 #define CMCI_STORM_THRESHOLD 15
61 static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
62 static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
63 static DEFINE_PER_CPU(unsigned int, cmci_storm_state);
71 static atomic_t cmci_storm_on_cpus;
73 static int cmci_supported(int *banks)
77 if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce)
81 * Vendor check is not strictly needed, but the initial
82 * initialization is vendor keyed and this
83 * makes sure none of the backdoors are entered otherwise.
85 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
87 if (!cpu_has_apic || lapic_get_maxlvt() < 6)
89 rdmsrl(MSR_IA32_MCG_CAP, cap);
90 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
91 return !!(cap & MCG_CMCI_P);
94 static bool lmce_supported(void)
98 if (mca_cfg.lmce_disabled)
101 rdmsrl(MSR_IA32_MCG_CAP, tmp);
104 * LMCE depends on recovery support in the processor. Hence both
105 * MCG_SER_P and MCG_LMCE_P should be present in MCG_CAP.
107 if ((tmp & (MCG_SER_P | MCG_LMCE_P)) !=
108 (MCG_SER_P | MCG_LMCE_P))
112 * BIOS should indicate support for LMCE by setting bit 20 in
113 * IA32_FEATURE_CONTROL without which touching MCG_EXT_CTL will
114 * generate a #GP fault.
116 rdmsrl(MSR_IA32_FEATURE_CONTROL, tmp);
117 if ((tmp & (FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_LMCE)) ==
118 (FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_LMCE))
124 bool mce_intel_cmci_poll(void)
126 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
130 * Reset the counter if we've logged an error in the last poll
133 if (machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)))
134 this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
136 this_cpu_dec(cmci_backoff_cnt);
141 void mce_intel_hcpu_update(unsigned long cpu)
143 if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
144 atomic_dec(&cmci_storm_on_cpus);
146 per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
149 unsigned long cmci_intel_adjust_timer(unsigned long interval)
151 if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
152 (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) {
154 return CMCI_STORM_INTERVAL;
157 switch (__this_cpu_read(cmci_storm_state)) {
158 case CMCI_STORM_ACTIVE:
161 * We switch back to interrupt mode once the poll timer has
162 * silenced itself. That means no events recorded and the timer
163 * interval is back to our poll interval.
165 __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
166 if (!atomic_sub_return(1, &cmci_storm_on_cpus))
167 pr_notice("CMCI storm subsided: switching to interrupt mode\n");
171 case CMCI_STORM_SUBSIDED:
173 * We wait for all CPUs to go back to SUBSIDED state. When that
174 * happens we switch back to interrupt mode.
176 if (!atomic_read(&cmci_storm_on_cpus)) {
177 __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
181 return CMCI_POLL_INTERVAL;
184 /* We have shiny weather. Let the poll do whatever it thinks. */
189 static void cmci_storm_disable_banks(void)
191 unsigned long flags, *owned;
195 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
196 owned = this_cpu_ptr(mce_banks_owned);
197 for_each_set_bit(bank, owned, MAX_NR_BANKS) {
198 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
199 val &= ~MCI_CTL2_CMCI_EN;
200 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
202 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
205 static bool cmci_storm_detect(void)
207 unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
208 unsigned long ts = __this_cpu_read(cmci_time_stamp);
209 unsigned long now = jiffies;
212 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE)
215 if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) {
219 __this_cpu_write(cmci_time_stamp, now);
221 __this_cpu_write(cmci_storm_cnt, cnt);
223 if (cnt <= CMCI_STORM_THRESHOLD)
226 cmci_storm_disable_banks();
227 __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
228 r = atomic_add_return(1, &cmci_storm_on_cpus);
229 mce_timer_kick(CMCI_STORM_INTERVAL);
230 this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
233 pr_notice("CMCI storm detected: switching to poll mode\n");
238 * The interrupt handler. This is called on every event.
239 * Just call the poller directly to log any events.
240 * This could in theory increase the threshold under high load,
241 * but doesn't for now.
243 static void intel_threshold_interrupt(void)
245 if (cmci_storm_detect())
248 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
253 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
254 * on this CPU. Use the algorithm recommended in the SDM to discover shared
257 static void cmci_discover(int banks)
259 unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
262 int bios_wrong_thresh = 0;
264 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
265 for (i = 0; i < banks; i++) {
267 int bios_zero_thresh = 0;
269 if (test_bit(i, owned))
272 /* Skip banks in firmware first mode */
273 if (test_bit(i, mce_banks_ce_disabled))
276 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
278 /* Already owned by someone else? */
279 if (val & MCI_CTL2_CMCI_EN) {
281 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
285 if (!mca_cfg.bios_cmci_threshold) {
286 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
287 val |= CMCI_THRESHOLD;
288 } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
290 * If bios_cmci_threshold boot option was specified
291 * but the threshold is zero, we'll try to initialize
294 bios_zero_thresh = 1;
295 val |= CMCI_THRESHOLD;
298 val |= MCI_CTL2_CMCI_EN;
299 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
300 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
302 /* Did the enable bit stick? -- the bank supports CMCI */
303 if (val & MCI_CTL2_CMCI_EN) {
305 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
307 * We are able to set thresholds for some banks that
308 * had a threshold of 0. This means the BIOS has not
309 * set the thresholds properly or does not work with
310 * this boot option. Note down now and report later.
312 if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
313 (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
314 bios_wrong_thresh = 1;
316 WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
319 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
320 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
322 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
324 "bios_cmci_threshold: Make sure your BIOS supports this boot option\n");
329 * Just in case we missed an event during initialization check
330 * all the CMCI owned banks.
332 void cmci_recheck(void)
337 if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
340 local_irq_save(flags);
341 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
342 local_irq_restore(flags);
345 /* Caller must hold the lock on cmci_discover_lock */
346 static void __cmci_disable_bank(int bank)
350 if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
352 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
353 val &= ~MCI_CTL2_CMCI_EN;
354 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
355 __clear_bit(bank, this_cpu_ptr(mce_banks_owned));
359 * Disable CMCI on this CPU for all banks it owns when it goes down.
360 * This allows other CPUs to claim the banks on rediscovery.
362 void cmci_clear(void)
368 if (!cmci_supported(&banks))
370 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
371 for (i = 0; i < banks; i++)
372 __cmci_disable_bank(i);
373 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
376 static void cmci_rediscover_work_func(void *arg)
380 /* Recheck banks in case CPUs don't all have the same */
381 if (cmci_supported(&banks))
382 cmci_discover(banks);
385 /* After a CPU went down cycle through all the others and rediscover */
386 void cmci_rediscover(void)
390 if (!cmci_supported(&banks))
393 on_each_cpu(cmci_rediscover_work_func, NULL, 1);
397 * Reenable CMCI on this CPU in case a CPU down failed.
399 void cmci_reenable(void)
402 if (cmci_supported(&banks))
403 cmci_discover(banks);
406 void cmci_disable_bank(int bank)
411 if (!cmci_supported(&banks))
414 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
415 __cmci_disable_bank(bank);
416 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
419 static void intel_init_cmci(void)
423 if (!cmci_supported(&banks))
426 mce_threshold_vector = intel_threshold_interrupt;
427 cmci_discover(banks);
429 * For CPU #0 this runs with still disabled APIC, but that's
430 * ok because only the vector is set up. We still do another
431 * check for the banks later for CPU #0 just to make sure
432 * to not miss any events.
434 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
438 void intel_init_lmce(void)
442 if (!lmce_supported())
445 rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
447 if (!(val & MCG_EXT_CTL_LMCE_EN))
448 wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
451 void mce_intel_feature_init(struct cpuinfo_x86 *c)
453 intel_init_thermal(c);