Merge branch 'pm-opp'
[cascardo/linux.git] / drivers / acpi / processor_idle.c
1 /*
2  * processor_idle - idle state submodule to the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10  *                      - Added support for C3 on SMP
11  *
12  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13  *
14  *  This program is free software; you can redistribute it and/or modify
15  *  it under the terms of the GNU General Public License as published by
16  *  the Free Software Foundation; either version 2 of the License, or (at
17  *  your option) any later version.
18  *
19  *  This program is distributed in the hope that it will be useful, but
20  *  WITHOUT ANY WARRANTY; without even the implied warranty of
21  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22  *  General Public License for more details.
23  *
24  *  You should have received a copy of the GNU General Public License along
25  *  with this program; if not, write to the Free Software Foundation, Inc.,
26  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27  *
28  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29  */
30
31 #include <linux/module.h>
32 #include <linux/acpi.h>
33 #include <linux/dmi.h>
34 #include <linux/sched.h>       /* need_resched() */
35 #include <linux/clockchips.h>
36 #include <linux/cpuidle.h>
37 #include <linux/syscore_ops.h>
38 #include <acpi/processor.h>
39
40 /*
41  * Include the apic definitions for x86 to have the APIC timer related defines
42  * available also for UP (on SMP it gets magically included via linux/smp.h).
43  * asm/acpi.h is not an option, as it would require more include magic. Also
44  * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
45  */
46 #ifdef CONFIG_X86
47 #include <asm/apic.h>
48 #endif
49
50 #define PREFIX "ACPI: "
51
52 #define ACPI_PROCESSOR_CLASS            "processor"
53 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
54 ACPI_MODULE_NAME("processor_idle");
55
56 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
57 module_param(max_cstate, uint, 0000);
58 static unsigned int nocst __read_mostly;
59 module_param(nocst, uint, 0000);
60 static int bm_check_disable __read_mostly;
61 module_param(bm_check_disable, uint, 0000);
62
63 static unsigned int latency_factor __read_mostly = 2;
64 module_param(latency_factor, uint, 0644);
65
66 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
67
68 static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
69                                                                 acpi_cstate);
70
71 static int disabled_by_idle_boot_param(void)
72 {
73         return boot_option_idle_override == IDLE_POLL ||
74                 boot_option_idle_override == IDLE_HALT;
75 }
76
77 /*
78  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
79  * For now disable this. Probably a bug somewhere else.
80  *
81  * To skip this limit, boot/load with a large max_cstate limit.
82  */
83 static int set_max_cstate(const struct dmi_system_id *id)
84 {
85         if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
86                 return 0;
87
88         printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
89                " Override with \"processor.max_cstate=%d\"\n", id->ident,
90                (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
91
92         max_cstate = (long)id->driver_data;
93
94         return 0;
95 }
96
97 static struct dmi_system_id processor_power_dmi_table[] = {
98         { set_max_cstate, "Clevo 5600D", {
99           DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
100           DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
101          (void *)2},
102         { set_max_cstate, "Pavilion zv5000", {
103           DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
104           DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
105          (void *)1},
106         { set_max_cstate, "Asus L8400B", {
107           DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
108           DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
109          (void *)1},
110         {},
111 };
112
113
114 /*
115  * Callers should disable interrupts before the call and enable
116  * interrupts after return.
117  */
118 static void acpi_safe_halt(void)
119 {
120         if (!tif_need_resched()) {
121                 safe_halt();
122                 local_irq_disable();
123         }
124 }
125
126 #ifdef ARCH_APICTIMER_STOPS_ON_C3
127
128 /*
129  * Some BIOS implementations switch to C3 in the published C2 state.
130  * This seems to be a common problem on AMD boxen, but other vendors
131  * are affected too. We pick the most conservative approach: we assume
132  * that the local APIC stops in both C2 and C3.
133  */
134 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
135                                    struct acpi_processor_cx *cx)
136 {
137         struct acpi_processor_power *pwr = &pr->power;
138         u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
139
140         if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
141                 return;
142
143         if (amd_e400_c1e_detected)
144                 type = ACPI_STATE_C1;
145
146         /*
147          * Check, if one of the previous states already marked the lapic
148          * unstable
149          */
150         if (pwr->timer_broadcast_on_state < state)
151                 return;
152
153         if (cx->type >= type)
154                 pr->power.timer_broadcast_on_state = state;
155 }
156
157 static void __lapic_timer_propagate_broadcast(void *arg)
158 {
159         struct acpi_processor *pr = (struct acpi_processor *) arg;
160         unsigned long reason;
161
162         reason = pr->power.timer_broadcast_on_state < INT_MAX ?
163                 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
164
165         clockevents_notify(reason, &pr->id);
166 }
167
168 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
169 {
170         smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
171                                  (void *)pr, 1);
172 }
173
174 /* Power(C) State timer broadcast control */
175 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
176                                        struct acpi_processor_cx *cx,
177                                        int broadcast)
178 {
179         int state = cx - pr->power.states;
180
181         if (state >= pr->power.timer_broadcast_on_state) {
182                 unsigned long reason;
183
184                 reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
185                         CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
186                 clockevents_notify(reason, &pr->id);
187         }
188 }
189
190 #else
191
192 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
193                                    struct acpi_processor_cx *cstate) { }
194 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
195 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
196                                        struct acpi_processor_cx *cx,
197                                        int broadcast)
198 {
199 }
200
201 #endif
202
203 #ifdef CONFIG_PM_SLEEP
204 static u32 saved_bm_rld;
205
206 static int acpi_processor_suspend(void)
207 {
208         acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
209         return 0;
210 }
211
212 static void acpi_processor_resume(void)
213 {
214         u32 resumed_bm_rld = 0;
215
216         acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
217         if (resumed_bm_rld == saved_bm_rld)
218                 return;
219
220         acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
221 }
222
223 static struct syscore_ops acpi_processor_syscore_ops = {
224         .suspend = acpi_processor_suspend,
225         .resume = acpi_processor_resume,
226 };
227
228 void acpi_processor_syscore_init(void)
229 {
230         register_syscore_ops(&acpi_processor_syscore_ops);
231 }
232
233 void acpi_processor_syscore_exit(void)
234 {
235         unregister_syscore_ops(&acpi_processor_syscore_ops);
236 }
237 #endif /* CONFIG_PM_SLEEP */
238
239 #if defined(CONFIG_X86)
240 static void tsc_check_state(int state)
241 {
242         switch (boot_cpu_data.x86_vendor) {
243         case X86_VENDOR_AMD:
244         case X86_VENDOR_INTEL:
245                 /*
246                  * AMD Fam10h TSC will tick in all
247                  * C/P/S0/S1 states when this bit is set.
248                  */
249                 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
250                         return;
251
252                 /*FALL THROUGH*/
253         default:
254                 /* TSC could halt in idle, so notify users */
255                 if (state > ACPI_STATE_C1)
256                         mark_tsc_unstable("TSC halts in idle");
257         }
258 }
259 #else
260 static void tsc_check_state(int state) { return; }
261 #endif
262
263 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
264 {
265
266         if (!pr->pblk)
267                 return -ENODEV;
268
269         /* if info is obtained from pblk/fadt, type equals state */
270         pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
271         pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
272
273 #ifndef CONFIG_HOTPLUG_CPU
274         /*
275          * Check for P_LVL2_UP flag before entering C2 and above on
276          * an SMP system.
277          */
278         if ((num_online_cpus() > 1) &&
279             !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
280                 return -ENODEV;
281 #endif
282
283         /* determine C2 and C3 address from pblk */
284         pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
285         pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
286
287         /* determine latencies from FADT */
288         pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
289         pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
290
291         /*
292          * FADT specified C2 latency must be less than or equal to
293          * 100 microseconds.
294          */
295         if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
296                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
297                         "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
298                 /* invalidate C2 */
299                 pr->power.states[ACPI_STATE_C2].address = 0;
300         }
301
302         /*
303          * FADT supplied C3 latency must be less than or equal to
304          * 1000 microseconds.
305          */
306         if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
307                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
308                         "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
309                 /* invalidate C3 */
310                 pr->power.states[ACPI_STATE_C3].address = 0;
311         }
312
313         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
314                           "lvl2[0x%08x] lvl3[0x%08x]\n",
315                           pr->power.states[ACPI_STATE_C2].address,
316                           pr->power.states[ACPI_STATE_C3].address));
317
318         return 0;
319 }
320
321 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
322 {
323         if (!pr->power.states[ACPI_STATE_C1].valid) {
324                 /* set the first C-State to C1 */
325                 /* all processors need to support C1 */
326                 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
327                 pr->power.states[ACPI_STATE_C1].valid = 1;
328                 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
329         }
330         /* the C0 state only exists as a filler in our array */
331         pr->power.states[ACPI_STATE_C0].valid = 1;
332         return 0;
333 }
334
335 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
336 {
337         acpi_status status;
338         u64 count;
339         int current_count;
340         int i, ret = 0;
341         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
342         union acpi_object *cst;
343
344
345         if (nocst)
346                 return -ENODEV;
347
348         current_count = 0;
349
350         status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
351         if (ACPI_FAILURE(status)) {
352                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
353                 return -ENODEV;
354         }
355
356         cst = buffer.pointer;
357
358         /* There must be at least 2 elements */
359         if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
360                 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
361                 ret = -EFAULT;
362                 goto end;
363         }
364
365         count = cst->package.elements[0].integer.value;
366
367         /* Validate number of power states. */
368         if (count < 1 || count != cst->package.count - 1) {
369                 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
370                 ret = -EFAULT;
371                 goto end;
372         }
373
374         /* Tell driver that at least _CST is supported. */
375         pr->flags.has_cst = 1;
376
377         for (i = 1; i <= count; i++) {
378                 union acpi_object *element;
379                 union acpi_object *obj;
380                 struct acpi_power_register *reg;
381                 struct acpi_processor_cx cx;
382
383                 memset(&cx, 0, sizeof(cx));
384
385                 element = &(cst->package.elements[i]);
386                 if (element->type != ACPI_TYPE_PACKAGE)
387                         continue;
388
389                 if (element->package.count != 4)
390                         continue;
391
392                 obj = &(element->package.elements[0]);
393
394                 if (obj->type != ACPI_TYPE_BUFFER)
395                         continue;
396
397                 reg = (struct acpi_power_register *)obj->buffer.pointer;
398
399                 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
400                     (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
401                         continue;
402
403                 /* There should be an easy way to extract an integer... */
404                 obj = &(element->package.elements[1]);
405                 if (obj->type != ACPI_TYPE_INTEGER)
406                         continue;
407
408                 cx.type = obj->integer.value;
409                 /*
410                  * Some buggy BIOSes won't list C1 in _CST -
411                  * Let acpi_processor_get_power_info_default() handle them later
412                  */
413                 if (i == 1 && cx.type != ACPI_STATE_C1)
414                         current_count++;
415
416                 cx.address = reg->address;
417                 cx.index = current_count + 1;
418
419                 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
420                 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
421                         if (acpi_processor_ffh_cstate_probe
422                                         (pr->id, &cx, reg) == 0) {
423                                 cx.entry_method = ACPI_CSTATE_FFH;
424                         } else if (cx.type == ACPI_STATE_C1) {
425                                 /*
426                                  * C1 is a special case where FIXED_HARDWARE
427                                  * can be handled in non-MWAIT way as well.
428                                  * In that case, save this _CST entry info.
429                                  * Otherwise, ignore this info and continue.
430                                  */
431                                 cx.entry_method = ACPI_CSTATE_HALT;
432                                 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
433                         } else {
434                                 continue;
435                         }
436                         if (cx.type == ACPI_STATE_C1 &&
437                             (boot_option_idle_override == IDLE_NOMWAIT)) {
438                                 /*
439                                  * In most cases the C1 space_id obtained from
440                                  * _CST object is FIXED_HARDWARE access mode.
441                                  * But when the option of idle=halt is added,
442                                  * the entry_method type should be changed from
443                                  * CSTATE_FFH to CSTATE_HALT.
444                                  * When the option of idle=nomwait is added,
445                                  * the C1 entry_method type should be
446                                  * CSTATE_HALT.
447                                  */
448                                 cx.entry_method = ACPI_CSTATE_HALT;
449                                 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
450                         }
451                 } else {
452                         snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
453                                  cx.address);
454                 }
455
456                 if (cx.type == ACPI_STATE_C1) {
457                         cx.valid = 1;
458                 }
459
460                 obj = &(element->package.elements[2]);
461                 if (obj->type != ACPI_TYPE_INTEGER)
462                         continue;
463
464                 cx.latency = obj->integer.value;
465
466                 obj = &(element->package.elements[3]);
467                 if (obj->type != ACPI_TYPE_INTEGER)
468                         continue;
469
470                 current_count++;
471                 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
472
473                 /*
474                  * We support total ACPI_PROCESSOR_MAX_POWER - 1
475                  * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
476                  */
477                 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
478                         printk(KERN_WARNING
479                                "Limiting number of power states to max (%d)\n",
480                                ACPI_PROCESSOR_MAX_POWER);
481                         printk(KERN_WARNING
482                                "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
483                         break;
484                 }
485         }
486
487         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
488                           current_count));
489
490         /* Validate number of power states discovered */
491         if (current_count < 2)
492                 ret = -EFAULT;
493
494       end:
495         kfree(buffer.pointer);
496
497         return ret;
498 }
499
500 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
501                                            struct acpi_processor_cx *cx)
502 {
503         static int bm_check_flag = -1;
504         static int bm_control_flag = -1;
505
506
507         if (!cx->address)
508                 return;
509
510         /*
511          * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
512          * DMA transfers are used by any ISA device to avoid livelock.
513          * Note that we could disable Type-F DMA (as recommended by
514          * the erratum), but this is known to disrupt certain ISA
515          * devices thus we take the conservative approach.
516          */
517         else if (errata.piix4.fdma) {
518                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
519                                   "C3 not supported on PIIX4 with Type-F DMA\n"));
520                 return;
521         }
522
523         /* All the logic here assumes flags.bm_check is same across all CPUs */
524         if (bm_check_flag == -1) {
525                 /* Determine whether bm_check is needed based on CPU  */
526                 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
527                 bm_check_flag = pr->flags.bm_check;
528                 bm_control_flag = pr->flags.bm_control;
529         } else {
530                 pr->flags.bm_check = bm_check_flag;
531                 pr->flags.bm_control = bm_control_flag;
532         }
533
534         if (pr->flags.bm_check) {
535                 if (!pr->flags.bm_control) {
536                         if (pr->flags.has_cst != 1) {
537                                 /* bus mastering control is necessary */
538                                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
539                                         "C3 support requires BM control\n"));
540                                 return;
541                         } else {
542                                 /* Here we enter C3 without bus mastering */
543                                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
544                                         "C3 support without BM control\n"));
545                         }
546                 }
547         } else {
548                 /*
549                  * WBINVD should be set in fadt, for C3 state to be
550                  * supported on when bm_check is not required.
551                  */
552                 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
553                         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
554                                           "Cache invalidation should work properly"
555                                           " for C3 to be enabled on SMP systems\n"));
556                         return;
557                 }
558         }
559
560         /*
561          * Otherwise we've met all of our C3 requirements.
562          * Normalize the C3 latency to expidite policy.  Enable
563          * checking of bus mastering status (bm_check) so we can
564          * use this in our C3 policy
565          */
566         cx->valid = 1;
567
568         /*
569          * On older chipsets, BM_RLD needs to be set
570          * in order for Bus Master activity to wake the
571          * system from C3.  Newer chipsets handle DMA
572          * during C3 automatically and BM_RLD is a NOP.
573          * In either case, the proper way to
574          * handle BM_RLD is to set it and leave it set.
575          */
576         acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
577
578         return;
579 }
580
581 static int acpi_processor_power_verify(struct acpi_processor *pr)
582 {
583         unsigned int i;
584         unsigned int working = 0;
585
586         pr->power.timer_broadcast_on_state = INT_MAX;
587
588         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
589                 struct acpi_processor_cx *cx = &pr->power.states[i];
590
591                 switch (cx->type) {
592                 case ACPI_STATE_C1:
593                         cx->valid = 1;
594                         break;
595
596                 case ACPI_STATE_C2:
597                         if (!cx->address)
598                                 break;
599                         cx->valid = 1;
600                         break;
601
602                 case ACPI_STATE_C3:
603                         acpi_processor_power_verify_c3(pr, cx);
604                         break;
605                 }
606                 if (!cx->valid)
607                         continue;
608
609                 lapic_timer_check_state(i, pr, cx);
610                 tsc_check_state(cx->type);
611                 working++;
612         }
613
614         lapic_timer_propagate_broadcast(pr);
615
616         return (working);
617 }
618
619 static int acpi_processor_get_power_info(struct acpi_processor *pr)
620 {
621         unsigned int i;
622         int result;
623
624
625         /* NOTE: the idle thread may not be running while calling
626          * this function */
627
628         /* Zero initialize all the C-states info. */
629         memset(pr->power.states, 0, sizeof(pr->power.states));
630
631         result = acpi_processor_get_power_info_cst(pr);
632         if (result == -ENODEV)
633                 result = acpi_processor_get_power_info_fadt(pr);
634
635         if (result)
636                 return result;
637
638         acpi_processor_get_power_info_default(pr);
639
640         pr->power.count = acpi_processor_power_verify(pr);
641
642         /*
643          * if one state of type C2 or C3 is available, mark this
644          * CPU as being "idle manageable"
645          */
646         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
647                 if (pr->power.states[i].valid) {
648                         pr->power.count = i;
649                         if (pr->power.states[i].type >= ACPI_STATE_C2)
650                                 pr->flags.power = 1;
651                 }
652         }
653
654         return 0;
655 }
656
657 /**
658  * acpi_idle_bm_check - checks if bus master activity was detected
659  */
660 static int acpi_idle_bm_check(void)
661 {
662         u32 bm_status = 0;
663
664         if (bm_check_disable)
665                 return 0;
666
667         acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
668         if (bm_status)
669                 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
670         /*
671          * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
672          * the true state of bus mastering activity; forcing us to
673          * manually check the BMIDEA bit of each IDE channel.
674          */
675         else if (errata.piix4.bmisx) {
676                 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
677                     || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
678                         bm_status = 1;
679         }
680         return bm_status;
681 }
682
683 /**
684  * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
685  * @cx: cstate data
686  *
687  * Caller disables interrupt before call and enables interrupt after return.
688  */
689 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
690 {
691         /* Don't trace irqs off for idle */
692         stop_critical_timings();
693         if (cx->entry_method == ACPI_CSTATE_FFH) {
694                 /* Call into architectural FFH based C-state */
695                 acpi_processor_ffh_cstate_enter(cx);
696         } else if (cx->entry_method == ACPI_CSTATE_HALT) {
697                 acpi_safe_halt();
698         } else {
699                 /* IO port based C-state */
700                 inb(cx->address);
701                 /* Dummy wait op - must do something useless after P_LVL2 read
702                    because chipsets cannot guarantee that STPCLK# signal
703                    gets asserted in time to freeze execution properly. */
704                 inl(acpi_gbl_FADT.xpm_timer_block.address);
705         }
706         start_critical_timings();
707 }
708
709 /**
710  * acpi_idle_enter_c1 - enters an ACPI C1 state-type
711  * @dev: the target CPU
712  * @drv: cpuidle driver containing cpuidle state info
713  * @index: index of target state
714  *
715  * This is equivalent to the HALT instruction.
716  */
717 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
718                 struct cpuidle_driver *drv, int index)
719 {
720         struct acpi_processor *pr;
721         struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
722
723         pr = __this_cpu_read(processors);
724
725         if (unlikely(!pr))
726                 return -EINVAL;
727
728         lapic_timer_state_broadcast(pr, cx, 1);
729         acpi_idle_do_entry(cx);
730
731         lapic_timer_state_broadcast(pr, cx, 0);
732
733         return index;
734 }
735
736
737 /**
738  * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
739  * @dev: the target CPU
740  * @index: the index of suggested state
741  */
742 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
743 {
744         struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
745
746         ACPI_FLUSH_CPU_CACHE();
747
748         while (1) {
749
750                 if (cx->entry_method == ACPI_CSTATE_HALT)
751                         safe_halt();
752                 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
753                         inb(cx->address);
754                         /* See comment in acpi_idle_do_entry() */
755                         inl(acpi_gbl_FADT.xpm_timer_block.address);
756                 } else
757                         return -ENODEV;
758         }
759
760         /* Never reached */
761         return 0;
762 }
763
764 /**
765  * acpi_idle_enter_simple - enters an ACPI state without BM handling
766  * @dev: the target CPU
767  * @drv: cpuidle driver with cpuidle state information
768  * @index: the index of suggested state
769  */
770 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
771                 struct cpuidle_driver *drv, int index)
772 {
773         struct acpi_processor *pr;
774         struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
775
776         pr = __this_cpu_read(processors);
777
778         if (unlikely(!pr))
779                 return -EINVAL;
780
781 #ifdef CONFIG_HOTPLUG_CPU
782         if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
783             !pr->flags.has_cst &&
784             !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
785                 return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
786 #endif
787
788         /*
789          * Must be done before busmaster disable as we might need to
790          * access HPET !
791          */
792         lapic_timer_state_broadcast(pr, cx, 1);
793
794         if (cx->type == ACPI_STATE_C3)
795                 ACPI_FLUSH_CPU_CACHE();
796
797         /* Tell the scheduler that we are going deep-idle: */
798         sched_clock_idle_sleep_event();
799         acpi_idle_do_entry(cx);
800
801         sched_clock_idle_wakeup_event(0);
802
803         lapic_timer_state_broadcast(pr, cx, 0);
804         return index;
805 }
806
807 static int c3_cpu_count;
808 static DEFINE_RAW_SPINLOCK(c3_lock);
809
810 /**
811  * acpi_idle_enter_bm - enters C3 with proper BM handling
812  * @dev: the target CPU
813  * @drv: cpuidle driver containing state data
814  * @index: the index of suggested state
815  *
816  * If BM is detected, the deepest non-C3 idle state is entered instead.
817  */
818 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
819                 struct cpuidle_driver *drv, int index)
820 {
821         struct acpi_processor *pr;
822         struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
823
824         pr = __this_cpu_read(processors);
825
826         if (unlikely(!pr))
827                 return -EINVAL;
828
829 #ifdef CONFIG_HOTPLUG_CPU
830         if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
831             !pr->flags.has_cst &&
832             !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
833                 return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
834 #endif
835
836         if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
837                 if (drv->safe_state_index >= 0) {
838                         return drv->states[drv->safe_state_index].enter(dev,
839                                                 drv, drv->safe_state_index);
840                 } else {
841                         acpi_safe_halt();
842                         return -EBUSY;
843                 }
844         }
845
846         acpi_unlazy_tlb(smp_processor_id());
847
848         /* Tell the scheduler that we are going deep-idle: */
849         sched_clock_idle_sleep_event();
850         /*
851          * Must be done before busmaster disable as we might need to
852          * access HPET !
853          */
854         lapic_timer_state_broadcast(pr, cx, 1);
855
856         /*
857          * disable bus master
858          * bm_check implies we need ARB_DIS
859          * !bm_check implies we need cache flush
860          * bm_control implies whether we can do ARB_DIS
861          *
862          * That leaves a case where bm_check is set and bm_control is
863          * not set. In that case we cannot do much, we enter C3
864          * without doing anything.
865          */
866         if (pr->flags.bm_check && pr->flags.bm_control) {
867                 raw_spin_lock(&c3_lock);
868                 c3_cpu_count++;
869                 /* Disable bus master arbitration when all CPUs are in C3 */
870                 if (c3_cpu_count == num_online_cpus())
871                         acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
872                 raw_spin_unlock(&c3_lock);
873         } else if (!pr->flags.bm_check) {
874                 ACPI_FLUSH_CPU_CACHE();
875         }
876
877         acpi_idle_do_entry(cx);
878
879         /* Re-enable bus master arbitration */
880         if (pr->flags.bm_check && pr->flags.bm_control) {
881                 raw_spin_lock(&c3_lock);
882                 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
883                 c3_cpu_count--;
884                 raw_spin_unlock(&c3_lock);
885         }
886
887         sched_clock_idle_wakeup_event(0);
888
889         lapic_timer_state_broadcast(pr, cx, 0);
890         return index;
891 }
892
893 struct cpuidle_driver acpi_idle_driver = {
894         .name =         "acpi_idle",
895         .owner =        THIS_MODULE,
896 };
897
898 /**
899  * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
900  * device i.e. per-cpu data
901  *
902  * @pr: the ACPI processor
903  * @dev : the cpuidle device
904  */
905 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
906                                            struct cpuidle_device *dev)
907 {
908         int i, count = CPUIDLE_DRIVER_STATE_START;
909         struct acpi_processor_cx *cx;
910
911         if (!pr->flags.power_setup_done)
912                 return -EINVAL;
913
914         if (pr->flags.power == 0) {
915                 return -EINVAL;
916         }
917
918         if (!dev)
919                 return -EINVAL;
920
921         dev->cpu = pr->id;
922
923         if (max_cstate == 0)
924                 max_cstate = 1;
925
926         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
927                 cx = &pr->power.states[i];
928
929                 if (!cx->valid)
930                         continue;
931
932                 per_cpu(acpi_cstate[count], dev->cpu) = cx;
933
934                 count++;
935                 if (count == CPUIDLE_STATE_MAX)
936                         break;
937         }
938
939         if (!count)
940                 return -EINVAL;
941
942         return 0;
943 }
944
945 /**
946  * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
947  * global state data i.e. idle routines
948  *
949  * @pr: the ACPI processor
950  */
951 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
952 {
953         int i, count = CPUIDLE_DRIVER_STATE_START;
954         struct acpi_processor_cx *cx;
955         struct cpuidle_state *state;
956         struct cpuidle_driver *drv = &acpi_idle_driver;
957
958         if (!pr->flags.power_setup_done)
959                 return -EINVAL;
960
961         if (pr->flags.power == 0)
962                 return -EINVAL;
963
964         drv->safe_state_index = -1;
965         for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
966                 drv->states[i].name[0] = '\0';
967                 drv->states[i].desc[0] = '\0';
968         }
969
970         if (max_cstate == 0)
971                 max_cstate = 1;
972
973         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
974                 cx = &pr->power.states[i];
975
976                 if (!cx->valid)
977                         continue;
978
979                 state = &drv->states[count];
980                 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
981                 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
982                 state->exit_latency = cx->latency;
983                 state->target_residency = cx->latency * latency_factor;
984
985                 state->flags = 0;
986                 switch (cx->type) {
987                         case ACPI_STATE_C1:
988                         if (cx->entry_method != ACPI_CSTATE_FFH)
989                                 state->flags |= CPUIDLE_FLAG_TIME_INVALID;
990
991                         state->enter = acpi_idle_enter_c1;
992                         state->enter_dead = acpi_idle_play_dead;
993                         drv->safe_state_index = count;
994                         break;
995
996                         case ACPI_STATE_C2:
997                         state->enter = acpi_idle_enter_simple;
998                         state->enter_dead = acpi_idle_play_dead;
999                         drv->safe_state_index = count;
1000                         break;
1001
1002                         case ACPI_STATE_C3:
1003                         state->enter = pr->flags.bm_check ?
1004                                         acpi_idle_enter_bm :
1005                                         acpi_idle_enter_simple;
1006                         break;
1007                 }
1008
1009                 count++;
1010                 if (count == CPUIDLE_STATE_MAX)
1011                         break;
1012         }
1013
1014         drv->state_count = count;
1015
1016         if (!count)
1017                 return -EINVAL;
1018
1019         return 0;
1020 }
1021
1022 int acpi_processor_hotplug(struct acpi_processor *pr)
1023 {
1024         int ret = 0;
1025         struct cpuidle_device *dev;
1026
1027         if (disabled_by_idle_boot_param())
1028                 return 0;
1029
1030         if (nocst)
1031                 return -ENODEV;
1032
1033         if (!pr->flags.power_setup_done)
1034                 return -ENODEV;
1035
1036         dev = per_cpu(acpi_cpuidle_device, pr->id);
1037         cpuidle_pause_and_lock();
1038         cpuidle_disable_device(dev);
1039         acpi_processor_get_power_info(pr);
1040         if (pr->flags.power) {
1041                 acpi_processor_setup_cpuidle_cx(pr, dev);
1042                 ret = cpuidle_enable_device(dev);
1043         }
1044         cpuidle_resume_and_unlock();
1045
1046         return ret;
1047 }
1048
1049 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1050 {
1051         int cpu;
1052         struct acpi_processor *_pr;
1053         struct cpuidle_device *dev;
1054
1055         if (disabled_by_idle_boot_param())
1056                 return 0;
1057
1058         if (nocst)
1059                 return -ENODEV;
1060
1061         if (!pr->flags.power_setup_done)
1062                 return -ENODEV;
1063
1064         /*
1065          * FIXME:  Design the ACPI notification to make it once per
1066          * system instead of once per-cpu.  This condition is a hack
1067          * to make the code that updates C-States be called once.
1068          */
1069
1070         if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1071
1072                 /* Protect against cpu-hotplug */
1073                 get_online_cpus();
1074                 cpuidle_pause_and_lock();
1075
1076                 /* Disable all cpuidle devices */
1077                 for_each_online_cpu(cpu) {
1078                         _pr = per_cpu(processors, cpu);
1079                         if (!_pr || !_pr->flags.power_setup_done)
1080                                 continue;
1081                         dev = per_cpu(acpi_cpuidle_device, cpu);
1082                         cpuidle_disable_device(dev);
1083                 }
1084
1085                 /* Populate Updated C-state information */
1086                 acpi_processor_get_power_info(pr);
1087                 acpi_processor_setup_cpuidle_states(pr);
1088
1089                 /* Enable all cpuidle devices */
1090                 for_each_online_cpu(cpu) {
1091                         _pr = per_cpu(processors, cpu);
1092                         if (!_pr || !_pr->flags.power_setup_done)
1093                                 continue;
1094                         acpi_processor_get_power_info(_pr);
1095                         if (_pr->flags.power) {
1096                                 dev = per_cpu(acpi_cpuidle_device, cpu);
1097                                 acpi_processor_setup_cpuidle_cx(_pr, dev);
1098                                 cpuidle_enable_device(dev);
1099                         }
1100                 }
1101                 cpuidle_resume_and_unlock();
1102                 put_online_cpus();
1103         }
1104
1105         return 0;
1106 }
1107
1108 static int acpi_processor_registered;
1109
1110 int acpi_processor_power_init(struct acpi_processor *pr)
1111 {
1112         acpi_status status;
1113         int retval;
1114         struct cpuidle_device *dev;
1115         static int first_run;
1116
1117         if (disabled_by_idle_boot_param())
1118                 return 0;
1119
1120         if (!first_run) {
1121                 dmi_check_system(processor_power_dmi_table);
1122                 max_cstate = acpi_processor_cstate_check(max_cstate);
1123                 if (max_cstate < ACPI_C_STATES_MAX)
1124                         printk(KERN_NOTICE
1125                                "ACPI: processor limited to max C-state %d\n",
1126                                max_cstate);
1127                 first_run++;
1128         }
1129
1130         if (acpi_gbl_FADT.cst_control && !nocst) {
1131                 status =
1132                     acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1133                 if (ACPI_FAILURE(status)) {
1134                         ACPI_EXCEPTION((AE_INFO, status,
1135                                         "Notifying BIOS of _CST ability failed"));
1136                 }
1137         }
1138
1139         acpi_processor_get_power_info(pr);
1140         pr->flags.power_setup_done = 1;
1141
1142         /*
1143          * Install the idle handler if processor power management is supported.
1144          * Note that we use previously set idle handler will be used on
1145          * platforms that only support C1.
1146          */
1147         if (pr->flags.power) {
1148                 /* Register acpi_idle_driver if not already registered */
1149                 if (!acpi_processor_registered) {
1150                         acpi_processor_setup_cpuidle_states(pr);
1151                         retval = cpuidle_register_driver(&acpi_idle_driver);
1152                         if (retval)
1153                                 return retval;
1154                         printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
1155                                         acpi_idle_driver.name);
1156                 }
1157
1158                 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1159                 if (!dev)
1160                         return -ENOMEM;
1161                 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1162
1163                 acpi_processor_setup_cpuidle_cx(pr, dev);
1164
1165                 /* Register per-cpu cpuidle_device. Cpuidle driver
1166                  * must already be registered before registering device
1167                  */
1168                 retval = cpuidle_register_device(dev);
1169                 if (retval) {
1170                         if (acpi_processor_registered == 0)
1171                                 cpuidle_unregister_driver(&acpi_idle_driver);
1172                         return retval;
1173                 }
1174                 acpi_processor_registered++;
1175         }
1176         return 0;
1177 }
1178
1179 int acpi_processor_power_exit(struct acpi_processor *pr)
1180 {
1181         struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1182
1183         if (disabled_by_idle_boot_param())
1184                 return 0;
1185
1186         if (pr->flags.power) {
1187                 cpuidle_unregister_device(dev);
1188                 acpi_processor_registered--;
1189                 if (acpi_processor_registered == 0)
1190                         cpuidle_unregister_driver(&acpi_idle_driver);
1191         }
1192
1193         pr->flags.power_setup_done = 0;
1194         return 0;
1195 }