4305ee9db4b2d880faaf6314cde028546b5207a6
[cascardo/linux.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  *
24  */
25
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
43
44 #include <asm/io.h>
45 #include <asm/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
47
48 #include "internal.h"
49
50 #define _COMPONENT              ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52
53 struct acpi_os_dpc {
54         acpi_osd_exec_callback function;
55         void *context;
56         struct work_struct work;
57 };
58
59 #ifdef ENABLE_DEBUGGER
60 #include <linux/kdb.h>
61
62 /* stuff for debugger support */
63 int acpi_in_debugger;
64 EXPORT_SYMBOL(acpi_in_debugger);
65 #endif                          /*ENABLE_DEBUGGER */
66
67 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
68                                       u32 pm1b_ctrl);
69 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
70                                       u32 val_b);
71
72 static acpi_osd_handler acpi_irq_handler;
73 static void *acpi_irq_context;
74 static struct workqueue_struct *kacpid_wq;
75 static struct workqueue_struct *kacpi_notify_wq;
76 static struct workqueue_struct *kacpi_hotplug_wq;
77 static bool acpi_os_initialized;
78 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
79
80 /*
81  * This list of permanent mappings is for memory that may be accessed from
82  * interrupt context, where we can't do the ioremap().
83  */
84 struct acpi_ioremap {
85         struct list_head list;
86         void __iomem *virt;
87         acpi_physical_address phys;
88         acpi_size size;
89         unsigned long refcount;
90 };
91
92 static LIST_HEAD(acpi_ioremaps);
93 static DEFINE_MUTEX(acpi_ioremap_lock);
94
95 static void __init acpi_request_region (struct acpi_generic_address *gas,
96         unsigned int length, char *desc)
97 {
98         u64 addr;
99
100         /* Handle possible alignment issues */
101         memcpy(&addr, &gas->address, sizeof(addr));
102         if (!addr || !length)
103                 return;
104
105         /* Resources are never freed */
106         if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
107                 request_region(addr, length, desc);
108         else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
109                 request_mem_region(addr, length, desc);
110 }
111
112 static int __init acpi_reserve_resources(void)
113 {
114         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
115                 "ACPI PM1a_EVT_BLK");
116
117         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
118                 "ACPI PM1b_EVT_BLK");
119
120         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
121                 "ACPI PM1a_CNT_BLK");
122
123         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
124                 "ACPI PM1b_CNT_BLK");
125
126         if (acpi_gbl_FADT.pm_timer_length == 4)
127                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
128
129         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
130                 "ACPI PM2_CNT_BLK");
131
132         /* Length of GPE blocks must be a non-negative multiple of 2 */
133
134         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
135                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
136                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
137
138         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
139                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
140                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
141
142         return 0;
143 }
144 fs_initcall_sync(acpi_reserve_resources);
145
146 void acpi_os_printf(const char *fmt, ...)
147 {
148         va_list args;
149         va_start(args, fmt);
150         acpi_os_vprintf(fmt, args);
151         va_end(args);
152 }
153 EXPORT_SYMBOL(acpi_os_printf);
154
155 void acpi_os_vprintf(const char *fmt, va_list args)
156 {
157         static char buffer[512];
158
159         vsprintf(buffer, fmt, args);
160
161 #ifdef ENABLE_DEBUGGER
162         if (acpi_in_debugger) {
163                 kdb_printf("%s", buffer);
164         } else {
165                 printk(KERN_CONT "%s", buffer);
166         }
167 #else
168         if (acpi_debugger_write_log(buffer) < 0)
169                 printk(KERN_CONT "%s", buffer);
170 #endif
171 }
172
173 #ifdef CONFIG_KEXEC
174 static unsigned long acpi_rsdp;
175 static int __init setup_acpi_rsdp(char *arg)
176 {
177         if (kstrtoul(arg, 16, &acpi_rsdp))
178                 return -EINVAL;
179         return 0;
180 }
181 early_param("acpi_rsdp", setup_acpi_rsdp);
182 #endif
183
184 acpi_physical_address __init acpi_os_get_root_pointer(void)
185 {
186 #ifdef CONFIG_KEXEC
187         if (acpi_rsdp)
188                 return acpi_rsdp;
189 #endif
190
191         if (efi_enabled(EFI_CONFIG_TABLES)) {
192                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
193                         return efi.acpi20;
194                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
195                         return efi.acpi;
196                 else {
197                         printk(KERN_ERR PREFIX
198                                "System description tables not found\n");
199                         return 0;
200                 }
201         } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
202                 acpi_physical_address pa = 0;
203
204                 acpi_find_root_pointer(&pa);
205                 return pa;
206         }
207
208         return 0;
209 }
210
211 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
212 static struct acpi_ioremap *
213 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
214 {
215         struct acpi_ioremap *map;
216
217         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
218                 if (map->phys <= phys &&
219                     phys + size <= map->phys + map->size)
220                         return map;
221
222         return NULL;
223 }
224
225 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
226 static void __iomem *
227 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
228 {
229         struct acpi_ioremap *map;
230
231         map = acpi_map_lookup(phys, size);
232         if (map)
233                 return map->virt + (phys - map->phys);
234
235         return NULL;
236 }
237
238 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
239 {
240         struct acpi_ioremap *map;
241         void __iomem *virt = NULL;
242
243         mutex_lock(&acpi_ioremap_lock);
244         map = acpi_map_lookup(phys, size);
245         if (map) {
246                 virt = map->virt + (phys - map->phys);
247                 map->refcount++;
248         }
249         mutex_unlock(&acpi_ioremap_lock);
250         return virt;
251 }
252 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
253
254 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
255 static struct acpi_ioremap *
256 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
257 {
258         struct acpi_ioremap *map;
259
260         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
261                 if (map->virt <= virt &&
262                     virt + size <= map->virt + map->size)
263                         return map;
264
265         return NULL;
266 }
267
268 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
269 /* ioremap will take care of cache attributes */
270 #define should_use_kmap(pfn)   0
271 #else
272 #define should_use_kmap(pfn)   page_is_ram(pfn)
273 #endif
274
275 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
276 {
277         unsigned long pfn;
278
279         pfn = pg_off >> PAGE_SHIFT;
280         if (should_use_kmap(pfn)) {
281                 if (pg_sz > PAGE_SIZE)
282                         return NULL;
283                 return (void __iomem __force *)kmap(pfn_to_page(pfn));
284         } else
285                 return acpi_os_ioremap(pg_off, pg_sz);
286 }
287
288 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
289 {
290         unsigned long pfn;
291
292         pfn = pg_off >> PAGE_SHIFT;
293         if (should_use_kmap(pfn))
294                 kunmap(pfn_to_page(pfn));
295         else
296                 iounmap(vaddr);
297 }
298
299 /**
300  * acpi_os_map_iomem - Get a virtual address for a given physical address range.
301  * @phys: Start of the physical address range to map.
302  * @size: Size of the physical address range to map.
303  *
304  * Look up the given physical address range in the list of existing ACPI memory
305  * mappings.  If found, get a reference to it and return a pointer to it (its
306  * virtual address).  If not found, map it, add it to that list and return a
307  * pointer to it.
308  *
309  * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
310  * routine simply calls __acpi_map_table() to get the job done.
311  */
312 void __iomem *__ref
313 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
314 {
315         struct acpi_ioremap *map;
316         void __iomem *virt;
317         acpi_physical_address pg_off;
318         acpi_size pg_sz;
319
320         if (phys > ULONG_MAX) {
321                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
322                 return NULL;
323         }
324
325         if (!acpi_gbl_permanent_mmap)
326                 return __acpi_map_table((unsigned long)phys, size);
327
328         mutex_lock(&acpi_ioremap_lock);
329         /* Check if there's a suitable mapping already. */
330         map = acpi_map_lookup(phys, size);
331         if (map) {
332                 map->refcount++;
333                 goto out;
334         }
335
336         map = kzalloc(sizeof(*map), GFP_KERNEL);
337         if (!map) {
338                 mutex_unlock(&acpi_ioremap_lock);
339                 return NULL;
340         }
341
342         pg_off = round_down(phys, PAGE_SIZE);
343         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
344         virt = acpi_map(pg_off, pg_sz);
345         if (!virt) {
346                 mutex_unlock(&acpi_ioremap_lock);
347                 kfree(map);
348                 return NULL;
349         }
350
351         INIT_LIST_HEAD(&map->list);
352         map->virt = virt;
353         map->phys = pg_off;
354         map->size = pg_sz;
355         map->refcount = 1;
356
357         list_add_tail_rcu(&map->list, &acpi_ioremaps);
358
359 out:
360         mutex_unlock(&acpi_ioremap_lock);
361         return map->virt + (phys - map->phys);
362 }
363 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
364
365 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
366 {
367         return (void *)acpi_os_map_iomem(phys, size);
368 }
369 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
370
371 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
372 {
373         if (!--map->refcount)
374                 list_del_rcu(&map->list);
375 }
376
377 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
378 {
379         if (!map->refcount) {
380                 synchronize_rcu_expedited();
381                 acpi_unmap(map->phys, map->virt);
382                 kfree(map);
383         }
384 }
385
386 /**
387  * acpi_os_unmap_iomem - Drop a memory mapping reference.
388  * @virt: Start of the address range to drop a reference to.
389  * @size: Size of the address range to drop a reference to.
390  *
391  * Look up the given virtual address range in the list of existing ACPI memory
392  * mappings, drop a reference to it and unmap it if there are no more active
393  * references to it.
394  *
395  * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
396  * routine simply calls __acpi_unmap_table() to get the job done.  Since
397  * __acpi_unmap_table() is an __init function, the __ref annotation is needed
398  * here.
399  */
400 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
401 {
402         struct acpi_ioremap *map;
403
404         if (!acpi_gbl_permanent_mmap) {
405                 __acpi_unmap_table(virt, size);
406                 return;
407         }
408
409         mutex_lock(&acpi_ioremap_lock);
410         map = acpi_map_lookup_virt(virt, size);
411         if (!map) {
412                 mutex_unlock(&acpi_ioremap_lock);
413                 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
414                 return;
415         }
416         acpi_os_drop_map_ref(map);
417         mutex_unlock(&acpi_ioremap_lock);
418
419         acpi_os_map_cleanup(map);
420 }
421 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
422
423 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
424 {
425         return acpi_os_unmap_iomem((void __iomem *)virt, size);
426 }
427 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
428
429 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
430 {
431         if (!acpi_gbl_permanent_mmap)
432                 __acpi_unmap_table(virt, size);
433 }
434
435 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
436 {
437         u64 addr;
438         void __iomem *virt;
439
440         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
441                 return 0;
442
443         /* Handle possible alignment issues */
444         memcpy(&addr, &gas->address, sizeof(addr));
445         if (!addr || !gas->bit_width)
446                 return -EINVAL;
447
448         virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
449         if (!virt)
450                 return -EIO;
451
452         return 0;
453 }
454 EXPORT_SYMBOL(acpi_os_map_generic_address);
455
456 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
457 {
458         u64 addr;
459         struct acpi_ioremap *map;
460
461         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
462                 return;
463
464         /* Handle possible alignment issues */
465         memcpy(&addr, &gas->address, sizeof(addr));
466         if (!addr || !gas->bit_width)
467                 return;
468
469         mutex_lock(&acpi_ioremap_lock);
470         map = acpi_map_lookup(addr, gas->bit_width / 8);
471         if (!map) {
472                 mutex_unlock(&acpi_ioremap_lock);
473                 return;
474         }
475         acpi_os_drop_map_ref(map);
476         mutex_unlock(&acpi_ioremap_lock);
477
478         acpi_os_map_cleanup(map);
479 }
480 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
481
482 #ifdef ACPI_FUTURE_USAGE
483 acpi_status
484 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
485 {
486         if (!phys || !virt)
487                 return AE_BAD_PARAMETER;
488
489         *phys = virt_to_phys(virt);
490
491         return AE_OK;
492 }
493 #endif
494
495 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
496 static bool acpi_rev_override;
497
498 int __init acpi_rev_override_setup(char *str)
499 {
500         acpi_rev_override = true;
501         return 1;
502 }
503 __setup("acpi_rev_override", acpi_rev_override_setup);
504 #else
505 #define acpi_rev_override       false
506 #endif
507
508 #define ACPI_MAX_OVERRIDE_LEN 100
509
510 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
511
512 acpi_status
513 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
514                             acpi_string *new_val)
515 {
516         if (!init_val || !new_val)
517                 return AE_BAD_PARAMETER;
518
519         *new_val = NULL;
520         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
521                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
522                        acpi_os_name);
523                 *new_val = acpi_os_name;
524         }
525
526         if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
527                 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
528                 *new_val = (char *)5;
529         }
530
531         return AE_OK;
532 }
533
534 static irqreturn_t acpi_irq(int irq, void *dev_id)
535 {
536         u32 handled;
537
538         handled = (*acpi_irq_handler) (acpi_irq_context);
539
540         if (handled) {
541                 acpi_irq_handled++;
542                 return IRQ_HANDLED;
543         } else {
544                 acpi_irq_not_handled++;
545                 return IRQ_NONE;
546         }
547 }
548
549 acpi_status
550 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
551                                   void *context)
552 {
553         unsigned int irq;
554
555         acpi_irq_stats_init();
556
557         /*
558          * ACPI interrupts different from the SCI in our copy of the FADT are
559          * not supported.
560          */
561         if (gsi != acpi_gbl_FADT.sci_interrupt)
562                 return AE_BAD_PARAMETER;
563
564         if (acpi_irq_handler)
565                 return AE_ALREADY_ACQUIRED;
566
567         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
568                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
569                        gsi);
570                 return AE_OK;
571         }
572
573         acpi_irq_handler = handler;
574         acpi_irq_context = context;
575         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
576                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
577                 acpi_irq_handler = NULL;
578                 return AE_NOT_ACQUIRED;
579         }
580         acpi_sci_irq = irq;
581
582         return AE_OK;
583 }
584
585 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
586 {
587         if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
588                 return AE_BAD_PARAMETER;
589
590         free_irq(acpi_sci_irq, acpi_irq);
591         acpi_irq_handler = NULL;
592         acpi_sci_irq = INVALID_ACPI_IRQ;
593
594         return AE_OK;
595 }
596
597 /*
598  * Running in interpreter thread context, safe to sleep
599  */
600
601 void acpi_os_sleep(u64 ms)
602 {
603         msleep(ms);
604 }
605
606 void acpi_os_stall(u32 us)
607 {
608         while (us) {
609                 u32 delay = 1000;
610
611                 if (delay > us)
612                         delay = us;
613                 udelay(delay);
614                 touch_nmi_watchdog();
615                 us -= delay;
616         }
617 }
618
619 /*
620  * Support ACPI 3.0 AML Timer operand
621  * Returns 64-bit free-running, monotonically increasing timer
622  * with 100ns granularity
623  */
624 u64 acpi_os_get_timer(void)
625 {
626         u64 time_ns = ktime_to_ns(ktime_get());
627         do_div(time_ns, 100);
628         return time_ns;
629 }
630
631 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
632 {
633         u32 dummy;
634
635         if (!value)
636                 value = &dummy;
637
638         *value = 0;
639         if (width <= 8) {
640                 *(u8 *) value = inb(port);
641         } else if (width <= 16) {
642                 *(u16 *) value = inw(port);
643         } else if (width <= 32) {
644                 *(u32 *) value = inl(port);
645         } else {
646                 BUG();
647         }
648
649         return AE_OK;
650 }
651
652 EXPORT_SYMBOL(acpi_os_read_port);
653
654 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
655 {
656         if (width <= 8) {
657                 outb(value, port);
658         } else if (width <= 16) {
659                 outw(value, port);
660         } else if (width <= 32) {
661                 outl(value, port);
662         } else {
663                 BUG();
664         }
665
666         return AE_OK;
667 }
668
669 EXPORT_SYMBOL(acpi_os_write_port);
670
671 acpi_status
672 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
673 {
674         void __iomem *virt_addr;
675         unsigned int size = width / 8;
676         bool unmap = false;
677         u64 dummy;
678
679         rcu_read_lock();
680         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
681         if (!virt_addr) {
682                 rcu_read_unlock();
683                 virt_addr = acpi_os_ioremap(phys_addr, size);
684                 if (!virt_addr)
685                         return AE_BAD_ADDRESS;
686                 unmap = true;
687         }
688
689         if (!value)
690                 value = &dummy;
691
692         switch (width) {
693         case 8:
694                 *(u8 *) value = readb(virt_addr);
695                 break;
696         case 16:
697                 *(u16 *) value = readw(virt_addr);
698                 break;
699         case 32:
700                 *(u32 *) value = readl(virt_addr);
701                 break;
702         case 64:
703                 *(u64 *) value = readq(virt_addr);
704                 break;
705         default:
706                 BUG();
707         }
708
709         if (unmap)
710                 iounmap(virt_addr);
711         else
712                 rcu_read_unlock();
713
714         return AE_OK;
715 }
716
717 acpi_status
718 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
719 {
720         void __iomem *virt_addr;
721         unsigned int size = width / 8;
722         bool unmap = false;
723
724         rcu_read_lock();
725         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
726         if (!virt_addr) {
727                 rcu_read_unlock();
728                 virt_addr = acpi_os_ioremap(phys_addr, size);
729                 if (!virt_addr)
730                         return AE_BAD_ADDRESS;
731                 unmap = true;
732         }
733
734         switch (width) {
735         case 8:
736                 writeb(value, virt_addr);
737                 break;
738         case 16:
739                 writew(value, virt_addr);
740                 break;
741         case 32:
742                 writel(value, virt_addr);
743                 break;
744         case 64:
745                 writeq(value, virt_addr);
746                 break;
747         default:
748                 BUG();
749         }
750
751         if (unmap)
752                 iounmap(virt_addr);
753         else
754                 rcu_read_unlock();
755
756         return AE_OK;
757 }
758
759 acpi_status
760 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
761                                u64 *value, u32 width)
762 {
763         int result, size;
764         u32 value32;
765
766         if (!value)
767                 return AE_BAD_PARAMETER;
768
769         switch (width) {
770         case 8:
771                 size = 1;
772                 break;
773         case 16:
774                 size = 2;
775                 break;
776         case 32:
777                 size = 4;
778                 break;
779         default:
780                 return AE_ERROR;
781         }
782
783         result = raw_pci_read(pci_id->segment, pci_id->bus,
784                                 PCI_DEVFN(pci_id->device, pci_id->function),
785                                 reg, size, &value32);
786         *value = value32;
787
788         return (result ? AE_ERROR : AE_OK);
789 }
790
791 acpi_status
792 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
793                                 u64 value, u32 width)
794 {
795         int result, size;
796
797         switch (width) {
798         case 8:
799                 size = 1;
800                 break;
801         case 16:
802                 size = 2;
803                 break;
804         case 32:
805                 size = 4;
806                 break;
807         default:
808                 return AE_ERROR;
809         }
810
811         result = raw_pci_write(pci_id->segment, pci_id->bus,
812                                 PCI_DEVFN(pci_id->device, pci_id->function),
813                                 reg, size, value);
814
815         return (result ? AE_ERROR : AE_OK);
816 }
817
818 static void acpi_os_execute_deferred(struct work_struct *work)
819 {
820         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
821
822         dpc->function(dpc->context);
823         kfree(dpc);
824 }
825
826 #ifdef CONFIG_ACPI_DEBUGGER
827 static struct acpi_debugger acpi_debugger;
828 static bool acpi_debugger_initialized;
829
830 int acpi_register_debugger(struct module *owner,
831                            const struct acpi_debugger_ops *ops)
832 {
833         int ret = 0;
834
835         mutex_lock(&acpi_debugger.lock);
836         if (acpi_debugger.ops) {
837                 ret = -EBUSY;
838                 goto err_lock;
839         }
840
841         acpi_debugger.owner = owner;
842         acpi_debugger.ops = ops;
843
844 err_lock:
845         mutex_unlock(&acpi_debugger.lock);
846         return ret;
847 }
848 EXPORT_SYMBOL(acpi_register_debugger);
849
850 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
851 {
852         mutex_lock(&acpi_debugger.lock);
853         if (ops == acpi_debugger.ops) {
854                 acpi_debugger.ops = NULL;
855                 acpi_debugger.owner = NULL;
856         }
857         mutex_unlock(&acpi_debugger.lock);
858 }
859 EXPORT_SYMBOL(acpi_unregister_debugger);
860
861 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
862 {
863         int ret;
864         int (*func)(acpi_osd_exec_callback, void *);
865         struct module *owner;
866
867         if (!acpi_debugger_initialized)
868                 return -ENODEV;
869         mutex_lock(&acpi_debugger.lock);
870         if (!acpi_debugger.ops) {
871                 ret = -ENODEV;
872                 goto err_lock;
873         }
874         if (!try_module_get(acpi_debugger.owner)) {
875                 ret = -ENODEV;
876                 goto err_lock;
877         }
878         func = acpi_debugger.ops->create_thread;
879         owner = acpi_debugger.owner;
880         mutex_unlock(&acpi_debugger.lock);
881
882         ret = func(function, context);
883
884         mutex_lock(&acpi_debugger.lock);
885         module_put(owner);
886 err_lock:
887         mutex_unlock(&acpi_debugger.lock);
888         return ret;
889 }
890
891 ssize_t acpi_debugger_write_log(const char *msg)
892 {
893         ssize_t ret;
894         ssize_t (*func)(const char *);
895         struct module *owner;
896
897         if (!acpi_debugger_initialized)
898                 return -ENODEV;
899         mutex_lock(&acpi_debugger.lock);
900         if (!acpi_debugger.ops) {
901                 ret = -ENODEV;
902                 goto err_lock;
903         }
904         if (!try_module_get(acpi_debugger.owner)) {
905                 ret = -ENODEV;
906                 goto err_lock;
907         }
908         func = acpi_debugger.ops->write_log;
909         owner = acpi_debugger.owner;
910         mutex_unlock(&acpi_debugger.lock);
911
912         ret = func(msg);
913
914         mutex_lock(&acpi_debugger.lock);
915         module_put(owner);
916 err_lock:
917         mutex_unlock(&acpi_debugger.lock);
918         return ret;
919 }
920
921 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
922 {
923         ssize_t ret;
924         ssize_t (*func)(char *, size_t);
925         struct module *owner;
926
927         if (!acpi_debugger_initialized)
928                 return -ENODEV;
929         mutex_lock(&acpi_debugger.lock);
930         if (!acpi_debugger.ops) {
931                 ret = -ENODEV;
932                 goto err_lock;
933         }
934         if (!try_module_get(acpi_debugger.owner)) {
935                 ret = -ENODEV;
936                 goto err_lock;
937         }
938         func = acpi_debugger.ops->read_cmd;
939         owner = acpi_debugger.owner;
940         mutex_unlock(&acpi_debugger.lock);
941
942         ret = func(buffer, buffer_length);
943
944         mutex_lock(&acpi_debugger.lock);
945         module_put(owner);
946 err_lock:
947         mutex_unlock(&acpi_debugger.lock);
948         return ret;
949 }
950
951 int acpi_debugger_wait_command_ready(void)
952 {
953         int ret;
954         int (*func)(bool, char *, size_t);
955         struct module *owner;
956
957         if (!acpi_debugger_initialized)
958                 return -ENODEV;
959         mutex_lock(&acpi_debugger.lock);
960         if (!acpi_debugger.ops) {
961                 ret = -ENODEV;
962                 goto err_lock;
963         }
964         if (!try_module_get(acpi_debugger.owner)) {
965                 ret = -ENODEV;
966                 goto err_lock;
967         }
968         func = acpi_debugger.ops->wait_command_ready;
969         owner = acpi_debugger.owner;
970         mutex_unlock(&acpi_debugger.lock);
971
972         ret = func(acpi_gbl_method_executing,
973                    acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
974
975         mutex_lock(&acpi_debugger.lock);
976         module_put(owner);
977 err_lock:
978         mutex_unlock(&acpi_debugger.lock);
979         return ret;
980 }
981
982 int acpi_debugger_notify_command_complete(void)
983 {
984         int ret;
985         int (*func)(void);
986         struct module *owner;
987
988         if (!acpi_debugger_initialized)
989                 return -ENODEV;
990         mutex_lock(&acpi_debugger.lock);
991         if (!acpi_debugger.ops) {
992                 ret = -ENODEV;
993                 goto err_lock;
994         }
995         if (!try_module_get(acpi_debugger.owner)) {
996                 ret = -ENODEV;
997                 goto err_lock;
998         }
999         func = acpi_debugger.ops->notify_command_complete;
1000         owner = acpi_debugger.owner;
1001         mutex_unlock(&acpi_debugger.lock);
1002
1003         ret = func();
1004
1005         mutex_lock(&acpi_debugger.lock);
1006         module_put(owner);
1007 err_lock:
1008         mutex_unlock(&acpi_debugger.lock);
1009         return ret;
1010 }
1011
1012 int __init acpi_debugger_init(void)
1013 {
1014         mutex_init(&acpi_debugger.lock);
1015         acpi_debugger_initialized = true;
1016         return 0;
1017 }
1018 #endif
1019
1020 /*******************************************************************************
1021  *
1022  * FUNCTION:    acpi_os_execute
1023  *
1024  * PARAMETERS:  Type               - Type of the callback
1025  *              Function           - Function to be executed
1026  *              Context            - Function parameters
1027  *
1028  * RETURN:      Status
1029  *
1030  * DESCRIPTION: Depending on type, either queues function for deferred execution or
1031  *              immediately executes function on a separate thread.
1032  *
1033  ******************************************************************************/
1034
1035 acpi_status acpi_os_execute(acpi_execute_type type,
1036                             acpi_osd_exec_callback function, void *context)
1037 {
1038         acpi_status status = AE_OK;
1039         struct acpi_os_dpc *dpc;
1040         struct workqueue_struct *queue;
1041         int ret;
1042         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1043                           "Scheduling function [%p(%p)] for deferred execution.\n",
1044                           function, context));
1045
1046         if (type == OSL_DEBUGGER_MAIN_THREAD) {
1047                 ret = acpi_debugger_create_thread(function, context);
1048                 if (ret) {
1049                         pr_err("Call to kthread_create() failed.\n");
1050                         status = AE_ERROR;
1051                 }
1052                 goto out_thread;
1053         }
1054
1055         /*
1056          * Allocate/initialize DPC structure.  Note that this memory will be
1057          * freed by the callee.  The kernel handles the work_struct list  in a
1058          * way that allows us to also free its memory inside the callee.
1059          * Because we may want to schedule several tasks with different
1060          * parameters we can't use the approach some kernel code uses of
1061          * having a static work_struct.
1062          */
1063
1064         dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1065         if (!dpc)
1066                 return AE_NO_MEMORY;
1067
1068         dpc->function = function;
1069         dpc->context = context;
1070
1071         /*
1072          * To prevent lockdep from complaining unnecessarily, make sure that
1073          * there is a different static lockdep key for each workqueue by using
1074          * INIT_WORK() for each of them separately.
1075          */
1076         if (type == OSL_NOTIFY_HANDLER) {
1077                 queue = kacpi_notify_wq;
1078                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1079         } else if (type == OSL_GPE_HANDLER) {
1080                 queue = kacpid_wq;
1081                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1082         } else {
1083                 pr_err("Unsupported os_execute type %d.\n", type);
1084                 status = AE_ERROR;
1085         }
1086
1087         if (ACPI_FAILURE(status))
1088                 goto err_workqueue;
1089
1090         /*
1091          * On some machines, a software-initiated SMI causes corruption unless
1092          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
1093          * typically it's done in GPE-related methods that are run via
1094          * workqueues, so we can avoid the known corruption cases by always
1095          * queueing on CPU 0.
1096          */
1097         ret = queue_work_on(0, queue, &dpc->work);
1098         if (!ret) {
1099                 printk(KERN_ERR PREFIX
1100                           "Call to queue_work() failed.\n");
1101                 status = AE_ERROR;
1102         }
1103 err_workqueue:
1104         if (ACPI_FAILURE(status))
1105                 kfree(dpc);
1106 out_thread:
1107         return status;
1108 }
1109 EXPORT_SYMBOL(acpi_os_execute);
1110
1111 void acpi_os_wait_events_complete(void)
1112 {
1113         /*
1114          * Make sure the GPE handler or the fixed event handler is not used
1115          * on another CPU after removal.
1116          */
1117         if (acpi_sci_irq_valid())
1118                 synchronize_hardirq(acpi_sci_irq);
1119         flush_workqueue(kacpid_wq);
1120         flush_workqueue(kacpi_notify_wq);
1121 }
1122
1123 struct acpi_hp_work {
1124         struct work_struct work;
1125         struct acpi_device *adev;
1126         u32 src;
1127 };
1128
1129 static void acpi_hotplug_work_fn(struct work_struct *work)
1130 {
1131         struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1132
1133         acpi_os_wait_events_complete();
1134         acpi_device_hotplug(hpw->adev, hpw->src);
1135         kfree(hpw);
1136 }
1137
1138 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1139 {
1140         struct acpi_hp_work *hpw;
1141
1142         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1143                   "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1144                   adev, src));
1145
1146         hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1147         if (!hpw)
1148                 return AE_NO_MEMORY;
1149
1150         INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1151         hpw->adev = adev;
1152         hpw->src = src;
1153         /*
1154          * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1155          * the hotplug code may call driver .remove() functions, which may
1156          * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1157          * these workqueues.
1158          */
1159         if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1160                 kfree(hpw);
1161                 return AE_ERROR;
1162         }
1163         return AE_OK;
1164 }
1165
1166 bool acpi_queue_hotplug_work(struct work_struct *work)
1167 {
1168         return queue_work(kacpi_hotplug_wq, work);
1169 }
1170
1171 acpi_status
1172 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1173 {
1174         struct semaphore *sem = NULL;
1175
1176         sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1177         if (!sem)
1178                 return AE_NO_MEMORY;
1179
1180         sema_init(sem, initial_units);
1181
1182         *handle = (acpi_handle *) sem;
1183
1184         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1185                           *handle, initial_units));
1186
1187         return AE_OK;
1188 }
1189
1190 /*
1191  * TODO: A better way to delete semaphores?  Linux doesn't have a
1192  * 'delete_semaphore()' function -- may result in an invalid
1193  * pointer dereference for non-synchronized consumers.  Should
1194  * we at least check for blocked threads and signal/cancel them?
1195  */
1196
1197 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1198 {
1199         struct semaphore *sem = (struct semaphore *)handle;
1200
1201         if (!sem)
1202                 return AE_BAD_PARAMETER;
1203
1204         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1205
1206         BUG_ON(!list_empty(&sem->wait_list));
1207         kfree(sem);
1208         sem = NULL;
1209
1210         return AE_OK;
1211 }
1212
1213 /*
1214  * TODO: Support for units > 1?
1215  */
1216 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1217 {
1218         acpi_status status = AE_OK;
1219         struct semaphore *sem = (struct semaphore *)handle;
1220         long jiffies;
1221         int ret = 0;
1222
1223         if (!acpi_os_initialized)
1224                 return AE_OK;
1225
1226         if (!sem || (units < 1))
1227                 return AE_BAD_PARAMETER;
1228
1229         if (units > 1)
1230                 return AE_SUPPORT;
1231
1232         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1233                           handle, units, timeout));
1234
1235         if (timeout == ACPI_WAIT_FOREVER)
1236                 jiffies = MAX_SCHEDULE_TIMEOUT;
1237         else
1238                 jiffies = msecs_to_jiffies(timeout);
1239
1240         ret = down_timeout(sem, jiffies);
1241         if (ret)
1242                 status = AE_TIME;
1243
1244         if (ACPI_FAILURE(status)) {
1245                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1246                                   "Failed to acquire semaphore[%p|%d|%d], %s",
1247                                   handle, units, timeout,
1248                                   acpi_format_exception(status)));
1249         } else {
1250                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1251                                   "Acquired semaphore[%p|%d|%d]", handle,
1252                                   units, timeout));
1253         }
1254
1255         return status;
1256 }
1257
1258 /*
1259  * TODO: Support for units > 1?
1260  */
1261 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1262 {
1263         struct semaphore *sem = (struct semaphore *)handle;
1264
1265         if (!acpi_os_initialized)
1266                 return AE_OK;
1267
1268         if (!sem || (units < 1))
1269                 return AE_BAD_PARAMETER;
1270
1271         if (units > 1)
1272                 return AE_SUPPORT;
1273
1274         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1275                           units));
1276
1277         up(sem);
1278
1279         return AE_OK;
1280 }
1281
1282 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1283 {
1284 #ifdef ENABLE_DEBUGGER
1285         if (acpi_in_debugger) {
1286                 u32 chars;
1287
1288                 kdb_read(buffer, buffer_length);
1289
1290                 /* remove the CR kdb includes */
1291                 chars = strlen(buffer) - 1;
1292                 buffer[chars] = '\0';
1293         }
1294 #else
1295         int ret;
1296
1297         ret = acpi_debugger_read_cmd(buffer, buffer_length);
1298         if (ret < 0)
1299                 return AE_ERROR;
1300         if (bytes_read)
1301                 *bytes_read = ret;
1302 #endif
1303
1304         return AE_OK;
1305 }
1306 EXPORT_SYMBOL(acpi_os_get_line);
1307
1308 acpi_status acpi_os_wait_command_ready(void)
1309 {
1310         int ret;
1311
1312         ret = acpi_debugger_wait_command_ready();
1313         if (ret < 0)
1314                 return AE_ERROR;
1315         return AE_OK;
1316 }
1317
1318 acpi_status acpi_os_notify_command_complete(void)
1319 {
1320         int ret;
1321
1322         ret = acpi_debugger_notify_command_complete();
1323         if (ret < 0)
1324                 return AE_ERROR;
1325         return AE_OK;
1326 }
1327
1328 acpi_status acpi_os_signal(u32 function, void *info)
1329 {
1330         switch (function) {
1331         case ACPI_SIGNAL_FATAL:
1332                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1333                 break;
1334         case ACPI_SIGNAL_BREAKPOINT:
1335                 /*
1336                  * AML Breakpoint
1337                  * ACPI spec. says to treat it as a NOP unless
1338                  * you are debugging.  So if/when we integrate
1339                  * AML debugger into the kernel debugger its
1340                  * hook will go here.  But until then it is
1341                  * not useful to print anything on breakpoints.
1342                  */
1343                 break;
1344         default:
1345                 break;
1346         }
1347
1348         return AE_OK;
1349 }
1350
1351 static int __init acpi_os_name_setup(char *str)
1352 {
1353         char *p = acpi_os_name;
1354         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1355
1356         if (!str || !*str)
1357                 return 0;
1358
1359         for (; count-- && *str; str++) {
1360                 if (isalnum(*str) || *str == ' ' || *str == ':')
1361                         *p++ = *str;
1362                 else if (*str == '\'' || *str == '"')
1363                         continue;
1364                 else
1365                         break;
1366         }
1367         *p = 0;
1368
1369         return 1;
1370
1371 }
1372
1373 __setup("acpi_os_name=", acpi_os_name_setup);
1374
1375 /*
1376  * Disable the auto-serialization of named objects creation methods.
1377  *
1378  * This feature is enabled by default.  It marks the AML control methods
1379  * that contain the opcodes to create named objects as "Serialized".
1380  */
1381 static int __init acpi_no_auto_serialize_setup(char *str)
1382 {
1383         acpi_gbl_auto_serialize_methods = FALSE;
1384         pr_info("ACPI: auto-serialization disabled\n");
1385
1386         return 1;
1387 }
1388
1389 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1390
1391 /* Check of resource interference between native drivers and ACPI
1392  * OperationRegions (SystemIO and System Memory only).
1393  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1394  * in arbitrary AML code and can interfere with legacy drivers.
1395  * acpi_enforce_resources= can be set to:
1396  *
1397  *   - strict (default) (2)
1398  *     -> further driver trying to access the resources will not load
1399  *   - lax              (1)
1400  *     -> further driver trying to access the resources will load, but you
1401  *     get a system message that something might go wrong...
1402  *
1403  *   - no               (0)
1404  *     -> ACPI Operation Region resources will not be registered
1405  *
1406  */
1407 #define ENFORCE_RESOURCES_STRICT 2
1408 #define ENFORCE_RESOURCES_LAX    1
1409 #define ENFORCE_RESOURCES_NO     0
1410
1411 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1412
1413 static int __init acpi_enforce_resources_setup(char *str)
1414 {
1415         if (str == NULL || *str == '\0')
1416                 return 0;
1417
1418         if (!strcmp("strict", str))
1419                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1420         else if (!strcmp("lax", str))
1421                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1422         else if (!strcmp("no", str))
1423                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1424
1425         return 1;
1426 }
1427
1428 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1429
1430 /* Check for resource conflicts between ACPI OperationRegions and native
1431  * drivers */
1432 int acpi_check_resource_conflict(const struct resource *res)
1433 {
1434         acpi_adr_space_type space_id;
1435         acpi_size length;
1436         u8 warn = 0;
1437         int clash = 0;
1438
1439         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1440                 return 0;
1441         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1442                 return 0;
1443
1444         if (res->flags & IORESOURCE_IO)
1445                 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1446         else
1447                 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1448
1449         length = resource_size(res);
1450         if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1451                 warn = 1;
1452         clash = acpi_check_address_range(space_id, res->start, length, warn);
1453
1454         if (clash) {
1455                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1456                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1457                                 printk(KERN_NOTICE "ACPI: This conflict may"
1458                                        " cause random problems and system"
1459                                        " instability\n");
1460                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1461                                " for this device, you should use it instead of"
1462                                " the native driver\n");
1463                 }
1464                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1465                         return -EBUSY;
1466         }
1467         return 0;
1468 }
1469 EXPORT_SYMBOL(acpi_check_resource_conflict);
1470
1471 int acpi_check_region(resource_size_t start, resource_size_t n,
1472                       const char *name)
1473 {
1474         struct resource res = {
1475                 .start = start,
1476                 .end   = start + n - 1,
1477                 .name  = name,
1478                 .flags = IORESOURCE_IO,
1479         };
1480
1481         return acpi_check_resource_conflict(&res);
1482 }
1483 EXPORT_SYMBOL(acpi_check_region);
1484
1485 /*
1486  * Let drivers know whether the resource checks are effective
1487  */
1488 int acpi_resources_are_enforced(void)
1489 {
1490         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1491 }
1492 EXPORT_SYMBOL(acpi_resources_are_enforced);
1493
1494 /*
1495  * Deallocate the memory for a spinlock.
1496  */
1497 void acpi_os_delete_lock(acpi_spinlock handle)
1498 {
1499         ACPI_FREE(handle);
1500 }
1501
1502 /*
1503  * Acquire a spinlock.
1504  *
1505  * handle is a pointer to the spinlock_t.
1506  */
1507
1508 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1509 {
1510         acpi_cpu_flags flags;
1511         spin_lock_irqsave(lockp, flags);
1512         return flags;
1513 }
1514
1515 /*
1516  * Release a spinlock. See above.
1517  */
1518
1519 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1520 {
1521         spin_unlock_irqrestore(lockp, flags);
1522 }
1523
1524 #ifndef ACPI_USE_LOCAL_CACHE
1525
1526 /*******************************************************************************
1527  *
1528  * FUNCTION:    acpi_os_create_cache
1529  *
1530  * PARAMETERS:  name      - Ascii name for the cache
1531  *              size      - Size of each cached object
1532  *              depth     - Maximum depth of the cache (in objects) <ignored>
1533  *              cache     - Where the new cache object is returned
1534  *
1535  * RETURN:      status
1536  *
1537  * DESCRIPTION: Create a cache object
1538  *
1539  ******************************************************************************/
1540
1541 acpi_status
1542 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1543 {
1544         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1545         if (*cache == NULL)
1546                 return AE_ERROR;
1547         else
1548                 return AE_OK;
1549 }
1550
1551 /*******************************************************************************
1552  *
1553  * FUNCTION:    acpi_os_purge_cache
1554  *
1555  * PARAMETERS:  Cache           - Handle to cache object
1556  *
1557  * RETURN:      Status
1558  *
1559  * DESCRIPTION: Free all objects within the requested cache.
1560  *
1561  ******************************************************************************/
1562
1563 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1564 {
1565         kmem_cache_shrink(cache);
1566         return (AE_OK);
1567 }
1568
1569 /*******************************************************************************
1570  *
1571  * FUNCTION:    acpi_os_delete_cache
1572  *
1573  * PARAMETERS:  Cache           - Handle to cache object
1574  *
1575  * RETURN:      Status
1576  *
1577  * DESCRIPTION: Free all objects within the requested cache and delete the
1578  *              cache object.
1579  *
1580  ******************************************************************************/
1581
1582 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1583 {
1584         kmem_cache_destroy(cache);
1585         return (AE_OK);
1586 }
1587
1588 /*******************************************************************************
1589  *
1590  * FUNCTION:    acpi_os_release_object
1591  *
1592  * PARAMETERS:  Cache       - Handle to cache object
1593  *              Object      - The object to be released
1594  *
1595  * RETURN:      None
1596  *
1597  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1598  *              the object is deleted.
1599  *
1600  ******************************************************************************/
1601
1602 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1603 {
1604         kmem_cache_free(cache, object);
1605         return (AE_OK);
1606 }
1607 #endif
1608
1609 static int __init acpi_no_static_ssdt_setup(char *s)
1610 {
1611         acpi_gbl_disable_ssdt_table_install = TRUE;
1612         pr_info("ACPI: static SSDT installation disabled\n");
1613
1614         return 0;
1615 }
1616
1617 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1618
1619 static int __init acpi_disable_return_repair(char *s)
1620 {
1621         printk(KERN_NOTICE PREFIX
1622                "ACPI: Predefined validation mechanism disabled\n");
1623         acpi_gbl_disable_auto_repair = TRUE;
1624
1625         return 1;
1626 }
1627
1628 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1629
1630 acpi_status __init acpi_os_initialize(void)
1631 {
1632         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1633         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1634         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1635         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1636         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1637                 /*
1638                  * Use acpi_os_map_generic_address to pre-map the reset
1639                  * register if it's in system memory.
1640                  */
1641                 int rv;
1642
1643                 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1644                 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1645         }
1646         acpi_os_initialized = true;
1647
1648         return AE_OK;
1649 }
1650
1651 acpi_status __init acpi_os_initialize1(void)
1652 {
1653         kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1654         kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1655         kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1656         BUG_ON(!kacpid_wq);
1657         BUG_ON(!kacpi_notify_wq);
1658         BUG_ON(!kacpi_hotplug_wq);
1659         acpi_osi_init();
1660         return AE_OK;
1661 }
1662
1663 acpi_status acpi_os_terminate(void)
1664 {
1665         if (acpi_irq_handler) {
1666                 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1667                                                  acpi_irq_handler);
1668         }
1669
1670         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1671         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1672         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1673         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1674         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1675                 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1676
1677         destroy_workqueue(kacpid_wq);
1678         destroy_workqueue(kacpi_notify_wq);
1679         destroy_workqueue(kacpi_hotplug_wq);
1680
1681         return AE_OK;
1682 }
1683
1684 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1685                                   u32 pm1b_control)
1686 {
1687         int rc = 0;
1688         if (__acpi_os_prepare_sleep)
1689                 rc = __acpi_os_prepare_sleep(sleep_state,
1690                                              pm1a_control, pm1b_control);
1691         if (rc < 0)
1692                 return AE_ERROR;
1693         else if (rc > 0)
1694                 return AE_CTRL_SKIP;
1695
1696         return AE_OK;
1697 }
1698
1699 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1700                                u32 pm1a_ctrl, u32 pm1b_ctrl))
1701 {
1702         __acpi_os_prepare_sleep = func;
1703 }
1704
1705 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1706                                   u32 val_b)
1707 {
1708         int rc = 0;
1709         if (__acpi_os_prepare_extended_sleep)
1710                 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1711                                              val_a, val_b);
1712         if (rc < 0)
1713                 return AE_ERROR;
1714         else if (rc > 0)
1715                 return AE_CTRL_SKIP;
1716
1717         return AE_OK;
1718 }
1719
1720 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1721                                u32 val_a, u32 val_b))
1722 {
1723         __acpi_os_prepare_extended_sleep = func;
1724 }