]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/acpi/osl.c
UBUNTU: Ubuntu-raspi2-4.10.0-1011.14
[mirror_ubuntu-zesty-kernel.git] / drivers / acpi / osl.c
CommitLineData
1da177e4
LT
1/*
2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
f1241c87
MW
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
1da177e4
LT
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
1da177e4
LT
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 */
25
1da177e4
LT
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
ba242d5b 30#include <linux/highmem.h>
1da177e4 31#include <linux/pci.h>
1da177e4
LT
32#include <linux/interrupt.h>
33#include <linux/kmod.h>
34#include <linux/delay.h>
35#include <linux/workqueue.h>
36#include <linux/nmi.h>
ad71860a 37#include <linux/acpi.h>
1da177e4 38#include <linux/efi.h>
df92e695
TR
39#include <linux/ioport.h>
40#include <linux/list.h>
f1241c87
MW
41#include <linux/jiffies.h>
42#include <linux/semaphore.h>
1b19d978 43#include <linux/module.h>
f1241c87
MW
44
45#include <asm/io.h>
7c0f6ba6 46#include <linux/uaccess.h>
2f8e2c87 47#include <linux/io-64-nonatomic-lo-hi.h>
f1241c87 48
1129c92f 49#include "internal.h"
1da177e4 50
1da177e4 51#define _COMPONENT ACPI_OS_SERVICES
f52fd66d 52ACPI_MODULE_NAME("osl");
07070e12 53
4be44fcd
LB
54struct acpi_os_dpc {
55 acpi_osd_exec_callback function;
56 void *context;
65f27f38 57 struct work_struct work;
1da177e4
LT
58};
59
1da177e4
LT
60#ifdef ENABLE_DEBUGGER
61#include <linux/kdb.h>
62
63/* stuff for debugger support */
64int acpi_in_debugger;
65EXPORT_SYMBOL(acpi_in_debugger);
4be44fcd 66#endif /*ENABLE_DEBUGGER */
1da177e4 67
09f98a82
TL
68static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
69 u32 pm1b_ctrl);
d6b47b12
BG
70static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
71 u32 val_b);
09f98a82 72
1da177e4
LT
73static acpi_osd_handler acpi_irq_handler;
74static void *acpi_irq_context;
75static struct workqueue_struct *kacpid_wq;
88db5e14 76static struct workqueue_struct *kacpi_notify_wq;
92d8aff3 77static struct workqueue_struct *kacpi_hotplug_wq;
7901a052 78static bool acpi_os_initialized;
49e4b843 79unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
8d3523fb 80bool acpi_permanent_mmap = false;
1da177e4 81
620242ae
MS
82/*
83 * This list of permanent mappings is for memory that may be accessed from
84 * interrupt context, where we can't do the ioremap().
85 */
86struct acpi_ioremap {
87 struct list_head list;
88 void __iomem *virt;
89 acpi_physical_address phys;
90 acpi_size size;
b7c1fadd 91 unsigned long refcount;
620242ae
MS
92};
93
94static LIST_HEAD(acpi_ioremaps);
7bbb8903 95static DEFINE_MUTEX(acpi_ioremap_lock);
620242ae 96
bc9ffce2 97static void __init acpi_request_region (struct acpi_generic_address *gas,
9a47cdb1
BH
98 unsigned int length, char *desc)
99{
bc9ffce2
MS
100 u64 addr;
101
102 /* Handle possible alignment issues */
103 memcpy(&addr, &gas->address, sizeof(addr));
104 if (!addr || !length)
9a47cdb1
BH
105 return;
106
0294112e
RW
107 /* Resources are never freed */
108 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
109 request_region(addr, length, desc);
110 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
111 request_mem_region(addr, length, desc);
9a47cdb1
BH
112}
113
0294112e 114static int __init acpi_reserve_resources(void)
9a47cdb1 115{
eee3c859 116 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
9a47cdb1
BH
117 "ACPI PM1a_EVT_BLK");
118
eee3c859 119 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
9a47cdb1
BH
120 "ACPI PM1b_EVT_BLK");
121
eee3c859 122 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
9a47cdb1
BH
123 "ACPI PM1a_CNT_BLK");
124
eee3c859 125 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
9a47cdb1
BH
126 "ACPI PM1b_CNT_BLK");
127
eee3c859
LB
128 if (acpi_gbl_FADT.pm_timer_length == 4)
129 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
9a47cdb1 130
eee3c859 131 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
9a47cdb1
BH
132 "ACPI PM2_CNT_BLK");
133
134 /* Length of GPE blocks must be a non-negative multiple of 2 */
135
eee3c859
LB
136 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
137 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
138 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
9a47cdb1 139
eee3c859
LB
140 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
141 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
142 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
0294112e
RW
143
144 return 0;
9a47cdb1 145}
0294112e 146fs_initcall_sync(acpi_reserve_resources);
9a47cdb1 147
4be44fcd 148void acpi_os_printf(const char *fmt, ...)
1da177e4
LT
149{
150 va_list args;
151 va_start(args, fmt);
152 acpi_os_vprintf(fmt, args);
153 va_end(args);
154}
836d0830 155EXPORT_SYMBOL(acpi_os_printf);
4be44fcd 156
4be44fcd 157void acpi_os_vprintf(const char *fmt, va_list args)
1da177e4
LT
158{
159 static char buffer[512];
4be44fcd 160
1da177e4
LT
161 vsprintf(buffer, fmt, args);
162
163#ifdef ENABLE_DEBUGGER
164 if (acpi_in_debugger) {
165 kdb_printf("%s", buffer);
166 } else {
abc4b9a5
JP
167 if (printk_get_level(buffer))
168 printk("%s", buffer);
169 else
170 printk(KERN_CONT "%s", buffer);
1da177e4
LT
171 }
172#else
abc4b9a5
JP
173 if (acpi_debugger_write_log(buffer) < 0) {
174 if (printk_get_level(buffer))
175 printk("%s", buffer);
176 else
177 printk(KERN_CONT "%s", buffer);
178 }
1da177e4
LT
179#endif
180}
181
4996c023
TI
182#ifdef CONFIG_KEXEC
183static unsigned long acpi_rsdp;
184static int __init setup_acpi_rsdp(char *arg)
185{
5dcb9ca8 186 return kstrtoul(arg, 16, &acpi_rsdp);
4996c023
TI
187}
188early_param("acpi_rsdp", setup_acpi_rsdp);
189#endif
190
ad71860a 191acpi_physical_address __init acpi_os_get_root_pointer(void)
1da177e4 192{
2fb65f09
AS
193 acpi_physical_address pa = 0;
194
4996c023 195#ifdef CONFIG_KEXEC
1b19d978 196 if (acpi_rsdp && !secure_modules())
4996c023
TI
197 return acpi_rsdp;
198#endif
199
83e68189 200 if (efi_enabled(EFI_CONFIG_TABLES)) {
b2c99e3c 201 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
ad71860a 202 return efi.acpi20;
2fb65f09 203 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
ad71860a 204 return efi.acpi;
2fb65f09 205 pr_err(PREFIX "System description tables not found\n");
8a1664be 206 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
239665a3 207 acpi_find_root_pointer(&pa);
239665a3 208 }
8a1664be 209
2fb65f09 210 return pa;
1da177e4
LT
211}
212
78cdb3ed 213/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
4a3cba5e
MS
214static struct acpi_ioremap *
215acpi_map_lookup(acpi_physical_address phys, acpi_size size)
620242ae
MS
216{
217 struct acpi_ioremap *map;
218
78cdb3ed 219 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
620242ae
MS
220 if (map->phys <= phys &&
221 phys + size <= map->phys + map->size)
4a3cba5e
MS
222 return map;
223
224 return NULL;
225}
226
227/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
228static void __iomem *
229acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
230{
231 struct acpi_ioremap *map;
232
233 map = acpi_map_lookup(phys, size);
234 if (map)
235 return map->virt + (phys - map->phys);
620242ae
MS
236
237 return NULL;
238}
239
13606a2d
RW
240void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
241{
242 struct acpi_ioremap *map;
243 void __iomem *virt = NULL;
244
245 mutex_lock(&acpi_ioremap_lock);
246 map = acpi_map_lookup(phys, size);
247 if (map) {
248 virt = map->virt + (phys - map->phys);
249 map->refcount++;
250 }
251 mutex_unlock(&acpi_ioremap_lock);
252 return virt;
253}
254EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
255
78cdb3ed 256/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
620242ae
MS
257static struct acpi_ioremap *
258acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
259{
260 struct acpi_ioremap *map;
261
78cdb3ed 262 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
4a3cba5e
MS
263 if (map->virt <= virt &&
264 virt + size <= map->virt + map->size)
620242ae
MS
265 return map;
266
267 return NULL;
268}
269
aafc65c7 270#if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
ba242d5b
MS
271/* ioremap will take care of cache attributes */
272#define should_use_kmap(pfn) 0
aafc65c7
GG
273#else
274#define should_use_kmap(pfn) page_is_ram(pfn)
ba242d5b
MS
275#endif
276
277static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
278{
279 unsigned long pfn;
280
281 pfn = pg_off >> PAGE_SHIFT;
282 if (should_use_kmap(pfn)) {
283 if (pg_sz > PAGE_SIZE)
284 return NULL;
285 return (void __iomem __force *)kmap(pfn_to_page(pfn));
286 } else
287 return acpi_os_ioremap(pg_off, pg_sz);
288}
289
290static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
291{
292 unsigned long pfn;
293
294 pfn = pg_off >> PAGE_SHIFT;
e252675f 295 if (should_use_kmap(pfn))
ba242d5b
MS
296 kunmap(pfn_to_page(pfn));
297 else
298 iounmap(vaddr);
299}
300
9d128ed1
RW
301/**
302 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
303 * @phys: Start of the physical address range to map.
304 * @size: Size of the physical address range to map.
305 *
306 * Look up the given physical address range in the list of existing ACPI memory
307 * mappings. If found, get a reference to it and return a pointer to it (its
308 * virtual address). If not found, map it, add it to that list and return a
309 * pointer to it.
310 *
8d3523fb 311 * During early init (when acpi_permanent_mmap has not been set yet) this
9d128ed1
RW
312 * routine simply calls __acpi_map_table() to get the job done.
313 */
bd721ea7 314void __iomem *__ref
a238317c 315acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
1da177e4 316{
7ffd0443 317 struct acpi_ioremap *map;
620242ae 318 void __iomem *virt;
2d6d9fd3
RW
319 acpi_physical_address pg_off;
320 acpi_size pg_sz;
620242ae 321
9f4fd61f
BH
322 if (phys > ULONG_MAX) {
323 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
70c0846e 324 return NULL;
1da177e4 325 }
620242ae 326
8d3523fb 327 if (!acpi_permanent_mmap)
ad71860a 328 return __acpi_map_table((unsigned long)phys, size);
620242ae 329
7ffd0443
RW
330 mutex_lock(&acpi_ioremap_lock);
331 /* Check if there's a suitable mapping already. */
332 map = acpi_map_lookup(phys, size);
333 if (map) {
b7c1fadd 334 map->refcount++;
7ffd0443
RW
335 goto out;
336 }
337
620242ae 338 map = kzalloc(sizeof(*map), GFP_KERNEL);
7ffd0443
RW
339 if (!map) {
340 mutex_unlock(&acpi_ioremap_lock);
620242ae 341 return NULL;
7ffd0443 342 }
620242ae 343
4a3cba5e
MS
344 pg_off = round_down(phys, PAGE_SIZE);
345 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
ba242d5b 346 virt = acpi_map(pg_off, pg_sz);
620242ae 347 if (!virt) {
7ffd0443 348 mutex_unlock(&acpi_ioremap_lock);
620242ae
MS
349 kfree(map);
350 return NULL;
351 }
352
353 INIT_LIST_HEAD(&map->list);
354 map->virt = virt;
4a3cba5e
MS
355 map->phys = pg_off;
356 map->size = pg_sz;
b7c1fadd 357 map->refcount = 1;
620242ae 358
78cdb3ed 359 list_add_tail_rcu(&map->list, &acpi_ioremaps);
620242ae 360
a238317c 361out:
7ffd0443 362 mutex_unlock(&acpi_ioremap_lock);
4a3cba5e 363 return map->virt + (phys - map->phys);
1da177e4 364}
a238317c
LZ
365EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
366
bd721ea7 367void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
a238317c
LZ
368{
369 return (void *)acpi_os_map_iomem(phys, size);
370}
55a82ab3 371EXPORT_SYMBOL_GPL(acpi_os_map_memory);
1da177e4 372
b7c1fadd 373static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
4a3cba5e 374{
b7c1fadd
RW
375 if (!--map->refcount)
376 list_del_rcu(&map->list);
4a3cba5e 377}
4a3cba5e 378
b7c1fadd 379static void acpi_os_map_cleanup(struct acpi_ioremap *map)
7fe135dc 380{
b7c1fadd 381 if (!map->refcount) {
74b51ee1 382 synchronize_rcu_expedited();
ba242d5b 383 acpi_unmap(map->phys, map->virt);
b7c1fadd
RW
384 kfree(map);
385 }
4a3cba5e
MS
386}
387
9d128ed1
RW
388/**
389 * acpi_os_unmap_iomem - Drop a memory mapping reference.
390 * @virt: Start of the address range to drop a reference to.
391 * @size: Size of the address range to drop a reference to.
392 *
393 * Look up the given virtual address range in the list of existing ACPI memory
394 * mappings, drop a reference to it and unmap it if there are no more active
395 * references to it.
396 *
8d3523fb 397 * During early init (when acpi_permanent_mmap has not been set yet) this
9d128ed1
RW
398 * routine simply calls __acpi_unmap_table() to get the job done. Since
399 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
400 * here.
401 */
a238317c 402void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
1da177e4 403{
620242ae 404 struct acpi_ioremap *map;
620242ae 405
8d3523fb 406 if (!acpi_permanent_mmap) {
7d97277b 407 __acpi_unmap_table(virt, size);
620242ae
MS
408 return;
409 }
410
7bbb8903 411 mutex_lock(&acpi_ioremap_lock);
620242ae
MS
412 map = acpi_map_lookup_virt(virt, size);
413 if (!map) {
7bbb8903 414 mutex_unlock(&acpi_ioremap_lock);
7fe135dc 415 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
620242ae
MS
416 return;
417 }
b7c1fadd 418 acpi_os_drop_map_ref(map);
7bbb8903 419 mutex_unlock(&acpi_ioremap_lock);
620242ae 420
b7c1fadd 421 acpi_os_map_cleanup(map);
1da177e4 422}
a238317c
LZ
423EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
424
425void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
426{
427 return acpi_os_unmap_iomem((void __iomem *)virt, size);
428}
55a82ab3 429EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
1da177e4 430
6f68c91c 431int acpi_os_map_generic_address(struct acpi_generic_address *gas)
29718521 432{
bc9ffce2 433 u64 addr;
29718521
MS
434 void __iomem *virt;
435
bc9ffce2 436 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
29718521
MS
437 return 0;
438
bc9ffce2
MS
439 /* Handle possible alignment issues */
440 memcpy(&addr, &gas->address, sizeof(addr));
441 if (!addr || !gas->bit_width)
29718521
MS
442 return -EINVAL;
443
a238317c 444 virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
29718521
MS
445 if (!virt)
446 return -EIO;
447
448 return 0;
449}
6f68c91c 450EXPORT_SYMBOL(acpi_os_map_generic_address);
29718521 451
6f68c91c 452void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
29718521 453{
bc9ffce2 454 u64 addr;
7fe135dc 455 struct acpi_ioremap *map;
29718521 456
bc9ffce2 457 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
29718521
MS
458 return;
459
bc9ffce2
MS
460 /* Handle possible alignment issues */
461 memcpy(&addr, &gas->address, sizeof(addr));
462 if (!addr || !gas->bit_width)
29718521
MS
463 return;
464
7bbb8903 465 mutex_lock(&acpi_ioremap_lock);
bc9ffce2 466 map = acpi_map_lookup(addr, gas->bit_width / 8);
7fe135dc
RW
467 if (!map) {
468 mutex_unlock(&acpi_ioremap_lock);
469 return;
470 }
b7c1fadd 471 acpi_os_drop_map_ref(map);
7bbb8903 472 mutex_unlock(&acpi_ioremap_lock);
29718521 473
b7c1fadd 474 acpi_os_map_cleanup(map);
29718521 475}
6f68c91c 476EXPORT_SYMBOL(acpi_os_unmap_generic_address);
29718521 477
1da177e4
LT
478#ifdef ACPI_FUTURE_USAGE
479acpi_status
4be44fcd 480acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
1da177e4 481{
4be44fcd 482 if (!phys || !virt)
1da177e4
LT
483 return AE_BAD_PARAMETER;
484
485 *phys = virt_to_phys(virt);
486
487 return AE_OK;
488}
489#endif
490
18d78b64
RW
491#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
492static bool acpi_rev_override;
493
494int __init acpi_rev_override_setup(char *str)
495{
496 acpi_rev_override = true;
497 return 1;
498}
499__setup("acpi_rev_override", acpi_rev_override_setup);
500#else
501#define acpi_rev_override false
502#endif
503
1da177e4
LT
504#define ACPI_MAX_OVERRIDE_LEN 100
505
506static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
507
508acpi_status
4be44fcd 509acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
80b28810 510 acpi_string *new_val)
1da177e4
LT
511{
512 if (!init_val || !new_val)
513 return AE_BAD_PARAMETER;
514
515 *new_val = NULL;
4be44fcd 516 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
1da177e4 517 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
4be44fcd 518 acpi_os_name);
1da177e4
LT
519 *new_val = acpi_os_name;
520 }
521
18d78b64
RW
522 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
523 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
524 *new_val = (char *)5;
525 }
526
1da177e4
LT
527 return AE_OK;
528}
529
7d12e780 530static irqreturn_t acpi_irq(int irq, void *dev_id)
1da177e4 531{
5229e87d
LB
532 u32 handled;
533
534 handled = (*acpi_irq_handler) (acpi_irq_context);
535
536 if (handled) {
537 acpi_irq_handled++;
538 return IRQ_HANDLED;
88bea188
LB
539 } else {
540 acpi_irq_not_handled++;
5229e87d 541 return IRQ_NONE;
88bea188 542 }
1da177e4
LT
543}
544
545acpi_status
4be44fcd
LB
546acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
547 void *context)
1da177e4
LT
548{
549 unsigned int irq;
550
5229e87d
LB
551 acpi_irq_stats_init();
552
1da177e4 553 /*
23fe3630
RW
554 * ACPI interrupts different from the SCI in our copy of the FADT are
555 * not supported.
1da177e4 556 */
23fe3630
RW
557 if (gsi != acpi_gbl_FADT.sci_interrupt)
558 return AE_BAD_PARAMETER;
559
560 if (acpi_irq_handler)
561 return AE_ALREADY_ACQUIRED;
562
1da177e4
LT
563 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
564 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
565 gsi);
566 return AE_OK;
567 }
568
569 acpi_irq_handler = handler;
570 acpi_irq_context = context;
a8d46b9e 571 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
1da177e4 572 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
23fe3630 573 acpi_irq_handler = NULL;
1da177e4
LT
574 return AE_NOT_ACQUIRED;
575 }
49e4b843 576 acpi_sci_irq = irq;
1da177e4
LT
577
578 return AE_OK;
579}
580
49e4b843 581acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
1da177e4 582{
49e4b843 583 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
23fe3630
RW
584 return AE_BAD_PARAMETER;
585
49e4b843 586 free_irq(acpi_sci_irq, acpi_irq);
23fe3630 587 acpi_irq_handler = NULL;
49e4b843 588 acpi_sci_irq = INVALID_ACPI_IRQ;
1da177e4
LT
589
590 return AE_OK;
591}
592
593/*
594 * Running in interpreter thread context, safe to sleep
595 */
596
439913ff 597void acpi_os_sleep(u64 ms)
1da177e4 598{
30282299 599 msleep(ms);
1da177e4 600}
4be44fcd 601
4be44fcd 602void acpi_os_stall(u32 us)
1da177e4
LT
603{
604 while (us) {
605 u32 delay = 1000;
606
607 if (delay > us)
608 delay = us;
609 udelay(delay);
610 touch_nmi_watchdog();
611 us -= delay;
612 }
613}
4be44fcd 614
1da177e4
LT
615/*
616 * Support ACPI 3.0 AML Timer operand
617 * Returns 64-bit free-running, monotonically increasing timer
618 * with 100ns granularity
619 */
4be44fcd 620u64 acpi_os_get_timer(void)
1da177e4 621{
10619066
MW
622 u64 time_ns = ktime_to_ns(ktime_get());
623 do_div(time_ns, 100);
624 return time_ns;
1da177e4
LT
625}
626
4be44fcd 627acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
1da177e4
LT
628{
629 u32 dummy;
630
631 if (!value)
632 value = &dummy;
633
49fbabf5
ZY
634 *value = 0;
635 if (width <= 8) {
4be44fcd 636 *(u8 *) value = inb(port);
49fbabf5 637 } else if (width <= 16) {
4be44fcd 638 *(u16 *) value = inw(port);
49fbabf5 639 } else if (width <= 32) {
4be44fcd 640 *(u32 *) value = inl(port);
49fbabf5 641 } else {
1da177e4
LT
642 BUG();
643 }
644
645 return AE_OK;
646}
4be44fcd 647
1da177e4
LT
648EXPORT_SYMBOL(acpi_os_read_port);
649
4be44fcd 650acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
1da177e4 651{
49fbabf5 652 if (width <= 8) {
1da177e4 653 outb(value, port);
49fbabf5 654 } else if (width <= 16) {
1da177e4 655 outw(value, port);
49fbabf5 656 } else if (width <= 32) {
1da177e4 657 outl(value, port);
49fbabf5 658 } else {
1da177e4
LT
659 BUG();
660 }
661
662 return AE_OK;
663}
4be44fcd 664
1da177e4
LT
665EXPORT_SYMBOL(acpi_os_write_port);
666
e615bf5b 667acpi_status
653f4b53 668acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
e615bf5b
MS
669{
670 void __iomem *virt_addr;
671 unsigned int size = width / 8;
672 bool unmap = false;
673 u64 dummy;
674
675 rcu_read_lock();
676 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
677 if (!virt_addr) {
678 rcu_read_unlock();
679 virt_addr = acpi_os_ioremap(phys_addr, size);
680 if (!virt_addr)
681 return AE_BAD_ADDRESS;
682 unmap = true;
683 }
684
685 if (!value)
686 value = &dummy;
687
688 switch (width) {
689 case 8:
690 *(u8 *) value = readb(virt_addr);
691 break;
692 case 16:
693 *(u16 *) value = readw(virt_addr);
694 break;
695 case 32:
696 *(u32 *) value = readl(virt_addr);
697 break;
698 case 64:
3277b4ea 699 *(u64 *) value = readq(virt_addr);
e615bf5b
MS
700 break;
701 default:
702 BUG();
703 }
704
705 if (unmap)
706 iounmap(virt_addr);
707 else
708 rcu_read_unlock();
709
710 return AE_OK;
711}
712
e615bf5b 713acpi_status
653f4b53 714acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
e615bf5b
MS
715{
716 void __iomem *virt_addr;
717 unsigned int size = width / 8;
718 bool unmap = false;
719
720 rcu_read_lock();
721 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
722 if (!virt_addr) {
723 rcu_read_unlock();
724 virt_addr = acpi_os_ioremap(phys_addr, size);
725 if (!virt_addr)
726 return AE_BAD_ADDRESS;
727 unmap = true;
728 }
729
730 switch (width) {
731 case 8:
732 writeb(value, virt_addr);
733 break;
734 case 16:
735 writew(value, virt_addr);
736 break;
737 case 32:
738 writel(value, virt_addr);
739 break;
740 case 64:
3277b4ea 741 writeq(value, virt_addr);
e615bf5b
MS
742 break;
743 default:
744 BUG();
745 }
746
747 if (unmap)
748 iounmap(virt_addr);
749 else
750 rcu_read_unlock();
751
752 return AE_OK;
753}
754
1da177e4 755acpi_status
4be44fcd 756acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
c5f0231e 757 u64 *value, u32 width)
1da177e4
LT
758{
759 int result, size;
c5f0231e 760 u32 value32;
1da177e4
LT
761
762 if (!value)
763 return AE_BAD_PARAMETER;
764
765 switch (width) {
766 case 8:
767 size = 1;
768 break;
769 case 16:
770 size = 2;
771 break;
772 case 32:
773 size = 4;
774 break;
775 default:
776 return AE_ERROR;
777 }
778
b6ce068a
MW
779 result = raw_pci_read(pci_id->segment, pci_id->bus,
780 PCI_DEVFN(pci_id->device, pci_id->function),
c5f0231e
BM
781 reg, size, &value32);
782 *value = value32;
1da177e4
LT
783
784 return (result ? AE_ERROR : AE_OK);
785}
4be44fcd 786
1da177e4 787acpi_status
4be44fcd 788acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
439913ff 789 u64 value, u32 width)
1da177e4
LT
790{
791 int result, size;
792
793 switch (width) {
794 case 8:
795 size = 1;
796 break;
797 case 16:
798 size = 2;
799 break;
800 case 32:
801 size = 4;
802 break;
803 default:
804 return AE_ERROR;
805 }
806
b6ce068a
MW
807 result = raw_pci_write(pci_id->segment, pci_id->bus,
808 PCI_DEVFN(pci_id->device, pci_id->function),
809 reg, size, value);
1da177e4
LT
810
811 return (result ? AE_ERROR : AE_OK);
812}
813
65f27f38 814static void acpi_os_execute_deferred(struct work_struct *work)
88db5e14
AS
815{
816 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
88db5e14 817
19cd847a
ZR
818 dpc->function(dpc->context);
819 kfree(dpc);
19cd847a
ZR
820}
821
836d0830
LZ
822#ifdef CONFIG_ACPI_DEBUGGER
823static struct acpi_debugger acpi_debugger;
824static bool acpi_debugger_initialized;
825
826int acpi_register_debugger(struct module *owner,
827 const struct acpi_debugger_ops *ops)
828{
829 int ret = 0;
830
831 mutex_lock(&acpi_debugger.lock);
832 if (acpi_debugger.ops) {
833 ret = -EBUSY;
834 goto err_lock;
835 }
836
837 acpi_debugger.owner = owner;
838 acpi_debugger.ops = ops;
839
840err_lock:
841 mutex_unlock(&acpi_debugger.lock);
842 return ret;
843}
844EXPORT_SYMBOL(acpi_register_debugger);
845
846void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
847{
848 mutex_lock(&acpi_debugger.lock);
849 if (ops == acpi_debugger.ops) {
850 acpi_debugger.ops = NULL;
851 acpi_debugger.owner = NULL;
852 }
853 mutex_unlock(&acpi_debugger.lock);
854}
855EXPORT_SYMBOL(acpi_unregister_debugger);
856
857int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
858{
859 int ret;
860 int (*func)(acpi_osd_exec_callback, void *);
861 struct module *owner;
862
863 if (!acpi_debugger_initialized)
864 return -ENODEV;
865 mutex_lock(&acpi_debugger.lock);
866 if (!acpi_debugger.ops) {
867 ret = -ENODEV;
868 goto err_lock;
869 }
870 if (!try_module_get(acpi_debugger.owner)) {
871 ret = -ENODEV;
872 goto err_lock;
873 }
874 func = acpi_debugger.ops->create_thread;
875 owner = acpi_debugger.owner;
876 mutex_unlock(&acpi_debugger.lock);
877
878 ret = func(function, context);
879
880 mutex_lock(&acpi_debugger.lock);
881 module_put(owner);
882err_lock:
883 mutex_unlock(&acpi_debugger.lock);
884 return ret;
885}
886
887ssize_t acpi_debugger_write_log(const char *msg)
888{
889 ssize_t ret;
890 ssize_t (*func)(const char *);
891 struct module *owner;
892
893 if (!acpi_debugger_initialized)
894 return -ENODEV;
895 mutex_lock(&acpi_debugger.lock);
896 if (!acpi_debugger.ops) {
897 ret = -ENODEV;
898 goto err_lock;
899 }
900 if (!try_module_get(acpi_debugger.owner)) {
901 ret = -ENODEV;
902 goto err_lock;
903 }
904 func = acpi_debugger.ops->write_log;
905 owner = acpi_debugger.owner;
906 mutex_unlock(&acpi_debugger.lock);
907
908 ret = func(msg);
909
910 mutex_lock(&acpi_debugger.lock);
911 module_put(owner);
912err_lock:
913 mutex_unlock(&acpi_debugger.lock);
914 return ret;
915}
916
917ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
918{
919 ssize_t ret;
920 ssize_t (*func)(char *, size_t);
921 struct module *owner;
922
923 if (!acpi_debugger_initialized)
924 return -ENODEV;
925 mutex_lock(&acpi_debugger.lock);
926 if (!acpi_debugger.ops) {
927 ret = -ENODEV;
928 goto err_lock;
929 }
930 if (!try_module_get(acpi_debugger.owner)) {
931 ret = -ENODEV;
932 goto err_lock;
933 }
934 func = acpi_debugger.ops->read_cmd;
935 owner = acpi_debugger.owner;
936 mutex_unlock(&acpi_debugger.lock);
937
938 ret = func(buffer, buffer_length);
939
940 mutex_lock(&acpi_debugger.lock);
941 module_put(owner);
942err_lock:
943 mutex_unlock(&acpi_debugger.lock);
944 return ret;
945}
946
947int acpi_debugger_wait_command_ready(void)
948{
949 int ret;
950 int (*func)(bool, char *, size_t);
951 struct module *owner;
952
953 if (!acpi_debugger_initialized)
954 return -ENODEV;
955 mutex_lock(&acpi_debugger.lock);
956 if (!acpi_debugger.ops) {
957 ret = -ENODEV;
958 goto err_lock;
959 }
960 if (!try_module_get(acpi_debugger.owner)) {
961 ret = -ENODEV;
962 goto err_lock;
963 }
964 func = acpi_debugger.ops->wait_command_ready;
965 owner = acpi_debugger.owner;
966 mutex_unlock(&acpi_debugger.lock);
967
968 ret = func(acpi_gbl_method_executing,
969 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
970
971 mutex_lock(&acpi_debugger.lock);
972 module_put(owner);
973err_lock:
974 mutex_unlock(&acpi_debugger.lock);
975 return ret;
976}
977
978int acpi_debugger_notify_command_complete(void)
979{
980 int ret;
981 int (*func)(void);
982 struct module *owner;
983
984 if (!acpi_debugger_initialized)
985 return -ENODEV;
986 mutex_lock(&acpi_debugger.lock);
987 if (!acpi_debugger.ops) {
988 ret = -ENODEV;
989 goto err_lock;
990 }
991 if (!try_module_get(acpi_debugger.owner)) {
992 ret = -ENODEV;
993 goto err_lock;
994 }
995 func = acpi_debugger.ops->notify_command_complete;
996 owner = acpi_debugger.owner;
997 mutex_unlock(&acpi_debugger.lock);
998
999 ret = func();
1000
1001 mutex_lock(&acpi_debugger.lock);
1002 module_put(owner);
1003err_lock:
1004 mutex_unlock(&acpi_debugger.lock);
1005 return ret;
1006}
1007
1008int __init acpi_debugger_init(void)
1009{
1010 mutex_init(&acpi_debugger.lock);
1011 acpi_debugger_initialized = true;
1012 return 0;
1013}
1014#endif
1015
b8d35192
AS
1016/*******************************************************************************
1017 *
1018 * FUNCTION: acpi_os_execute
1019 *
1020 * PARAMETERS: Type - Type of the callback
1021 * Function - Function to be executed
1022 * Context - Function parameters
1023 *
1024 * RETURN: Status
1025 *
1026 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1027 * immediately executes function on a separate thread.
1028 *
1029 ******************************************************************************/
1030
7b98118a
RW
1031acpi_status acpi_os_execute(acpi_execute_type type,
1032 acpi_osd_exec_callback function, void *context)
1da177e4 1033{
4be44fcd
LB
1034 acpi_status status = AE_OK;
1035 struct acpi_os_dpc *dpc;
17bc54ee 1036 struct workqueue_struct *queue;
19cd847a 1037 int ret;
72945b2b
LB
1038 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1039 "Scheduling function [%p(%p)] for deferred execution.\n",
1040 function, context));
1da177e4 1041
8cfb0cdf 1042 if (type == OSL_DEBUGGER_MAIN_THREAD) {
836d0830 1043 ret = acpi_debugger_create_thread(function, context);
8cfb0cdf
LZ
1044 if (ret) {
1045 pr_err("Call to kthread_create() failed.\n");
1046 status = AE_ERROR;
1047 }
1048 goto out_thread;
1049 }
1050
1da177e4
LT
1051 /*
1052 * Allocate/initialize DPC structure. Note that this memory will be
65f27f38 1053 * freed by the callee. The kernel handles the work_struct list in a
1da177e4
LT
1054 * way that allows us to also free its memory inside the callee.
1055 * Because we may want to schedule several tasks with different
1056 * parameters we can't use the approach some kernel code uses of
65f27f38 1057 * having a static work_struct.
1da177e4 1058 */
72945b2b 1059
3ae45a27 1060 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1da177e4 1061 if (!dpc)
889c78be 1062 return AE_NO_MEMORY;
b976fe19 1063
1da177e4
LT
1064 dpc->function = function;
1065 dpc->context = context;
b976fe19 1066
c02256be 1067 /*
3ae45a27
RW
1068 * To prevent lockdep from complaining unnecessarily, make sure that
1069 * there is a different static lockdep key for each workqueue by using
1070 * INIT_WORK() for each of them separately.
c02256be 1071 */
7b98118a 1072 if (type == OSL_NOTIFY_HANDLER) {
3ae45a27 1073 queue = kacpi_notify_wq;
bc73675b 1074 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
8cfb0cdf 1075 } else if (type == OSL_GPE_HANDLER) {
3ae45a27 1076 queue = kacpid_wq;
bc73675b 1077 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
8cfb0cdf
LZ
1078 } else {
1079 pr_err("Unsupported os_execute type %d.\n", type);
1080 status = AE_ERROR;
3ae45a27 1081 }
bc73675b 1082
8cfb0cdf
LZ
1083 if (ACPI_FAILURE(status))
1084 goto err_workqueue;
1085
8fec62b2
TH
1086 /*
1087 * On some machines, a software-initiated SMI causes corruption unless
1088 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1089 * typically it's done in GPE-related methods that are run via
1090 * workqueues, so we can avoid the known corruption cases by always
1091 * queueing on CPU 0.
1092 */
1093 ret = queue_work_on(0, queue, &dpc->work);
19cd847a 1094 if (!ret) {
55ac9a01
LM
1095 printk(KERN_ERR PREFIX
1096 "Call to queue_work() failed.\n");
17bc54ee 1097 status = AE_ERROR;
1da177e4 1098 }
8cfb0cdf
LZ
1099err_workqueue:
1100 if (ACPI_FAILURE(status))
1101 kfree(dpc);
1102out_thread:
889c78be 1103 return status;
1da177e4 1104}
7b98118a 1105EXPORT_SYMBOL(acpi_os_execute);
4be44fcd 1106
7b98118a 1107void acpi_os_wait_events_complete(void)
19cd847a 1108{
90253a79
LZ
1109 /*
1110 * Make sure the GPE handler or the fixed event handler is not used
1111 * on another CPU after removal.
1112 */
efb1cf7d
CY
1113 if (acpi_sci_irq_valid())
1114 synchronize_hardirq(acpi_sci_irq);
7b98118a
RW
1115 flush_workqueue(kacpid_wq);
1116 flush_workqueue(kacpi_notify_wq);
19cd847a 1117}
1da177e4 1118
7b98118a
RW
1119struct acpi_hp_work {
1120 struct work_struct work;
1e3bcb59 1121 struct acpi_device *adev;
7b98118a
RW
1122 u32 src;
1123};
1124
1125static void acpi_hotplug_work_fn(struct work_struct *work)
19cd847a 1126{
7b98118a
RW
1127 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1128
1129 acpi_os_wait_events_complete();
1e3bcb59 1130 acpi_device_hotplug(hpw->adev, hpw->src);
7b98118a 1131 kfree(hpw);
19cd847a
ZR
1132}
1133
1e3bcb59 1134acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1da177e4 1135{
7b98118a
RW
1136 struct acpi_hp_work *hpw;
1137
1138 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1e3bcb59
RW
1139 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1140 adev, src));
7b98118a
RW
1141
1142 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1143 if (!hpw)
1144 return AE_NO_MEMORY;
1145
1146 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1e3bcb59 1147 hpw->adev = adev;
7b98118a
RW
1148 hpw->src = src;
1149 /*
1150 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1151 * the hotplug code may call driver .remove() functions, which may
1152 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1153 * these workqueues.
1154 */
1155 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1156 kfree(hpw);
1157 return AE_ERROR;
1158 }
1159 return AE_OK;
1da177e4 1160}
4be44fcd 1161
d783156e
RW
1162bool acpi_queue_hotplug_work(struct work_struct *work)
1163{
1164 return queue_work(kacpi_hotplug_wq, work);
1165}
1da177e4 1166
1da177e4 1167acpi_status
4be44fcd 1168acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1da177e4 1169{
4be44fcd 1170 struct semaphore *sem = NULL;
1da177e4 1171
2d0acb4a 1172 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1da177e4 1173 if (!sem)
d550d98d 1174 return AE_NO_MEMORY;
1da177e4
LT
1175
1176 sema_init(sem, initial_units);
1177
4be44fcd 1178 *handle = (acpi_handle *) sem;
1da177e4 1179
4be44fcd
LB
1180 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1181 *handle, initial_units));
1da177e4 1182
d550d98d 1183 return AE_OK;
1da177e4 1184}
1da177e4 1185
1da177e4
LT
1186/*
1187 * TODO: A better way to delete semaphores? Linux doesn't have a
1188 * 'delete_semaphore()' function -- may result in an invalid
1189 * pointer dereference for non-synchronized consumers. Should
1190 * we at least check for blocked threads and signal/cancel them?
1191 */
1192
4be44fcd 1193acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1da177e4 1194{
4be44fcd 1195 struct semaphore *sem = (struct semaphore *)handle;
1da177e4 1196
1da177e4 1197 if (!sem)
d550d98d 1198 return AE_BAD_PARAMETER;
1da177e4 1199
4be44fcd 1200 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1da177e4 1201
f1241c87 1202 BUG_ON(!list_empty(&sem->wait_list));
02438d87 1203 kfree(sem);
4be44fcd 1204 sem = NULL;
1da177e4 1205
d550d98d 1206 return AE_OK;
1da177e4 1207}
1da177e4 1208
1da177e4 1209/*
1da177e4
LT
1210 * TODO: Support for units > 1?
1211 */
4be44fcd 1212acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1da177e4 1213{
4be44fcd
LB
1214 acpi_status status = AE_OK;
1215 struct semaphore *sem = (struct semaphore *)handle;
f1241c87 1216 long jiffies;
4be44fcd 1217 int ret = 0;
1da177e4 1218
7901a052
LZ
1219 if (!acpi_os_initialized)
1220 return AE_OK;
1221
1da177e4 1222 if (!sem || (units < 1))
d550d98d 1223 return AE_BAD_PARAMETER;
1da177e4
LT
1224
1225 if (units > 1)
d550d98d 1226 return AE_SUPPORT;
1da177e4 1227
4be44fcd
LB
1228 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1229 handle, units, timeout));
1da177e4 1230
f1241c87
MW
1231 if (timeout == ACPI_WAIT_FOREVER)
1232 jiffies = MAX_SCHEDULE_TIMEOUT;
1233 else
1234 jiffies = msecs_to_jiffies(timeout);
cad1525a 1235
f1241c87
MW
1236 ret = down_timeout(sem, jiffies);
1237 if (ret)
1238 status = AE_TIME;
1da177e4
LT
1239
1240 if (ACPI_FAILURE(status)) {
9e7e2c04 1241 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
a6fc6720 1242 "Failed to acquire semaphore[%p|%d|%d], %s",
4be44fcd
LB
1243 handle, units, timeout,
1244 acpi_format_exception(status)));
1245 } else {
1246 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
a6fc6720 1247 "Acquired semaphore[%p|%d|%d]", handle,
4be44fcd 1248 units, timeout));
1da177e4
LT
1249 }
1250
d550d98d 1251 return status;
1da177e4 1252}
1da177e4 1253
1da177e4
LT
1254/*
1255 * TODO: Support for units > 1?
1256 */
4be44fcd 1257acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1da177e4 1258{
4be44fcd 1259 struct semaphore *sem = (struct semaphore *)handle;
1da177e4 1260
7901a052
LZ
1261 if (!acpi_os_initialized)
1262 return AE_OK;
1263
1da177e4 1264 if (!sem || (units < 1))
d550d98d 1265 return AE_BAD_PARAMETER;
1da177e4
LT
1266
1267 if (units > 1)
d550d98d 1268 return AE_SUPPORT;
1da177e4 1269
4be44fcd
LB
1270 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1271 units));
1da177e4
LT
1272
1273 up(sem);
1274
d550d98d 1275 return AE_OK;
1da177e4 1276}
4be44fcd 1277
4d946f79 1278acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1da177e4 1279{
1da177e4
LT
1280#ifdef ENABLE_DEBUGGER
1281 if (acpi_in_debugger) {
1282 u32 chars;
1283
4d946f79 1284 kdb_read(buffer, buffer_length);
1da177e4
LT
1285
1286 /* remove the CR kdb includes */
1287 chars = strlen(buffer) - 1;
1288 buffer[chars] = '\0';
1289 }
8cfb0cdf
LZ
1290#else
1291 int ret;
1292
836d0830 1293 ret = acpi_debugger_read_cmd(buffer, buffer_length);
8cfb0cdf
LZ
1294 if (ret < 0)
1295 return AE_ERROR;
1296 if (bytes_read)
1297 *bytes_read = ret;
1da177e4
LT
1298#endif
1299
4d946f79 1300 return AE_OK;
1da177e4 1301}
836d0830 1302EXPORT_SYMBOL(acpi_os_get_line);
1da177e4 1303
8cfb0cdf
LZ
1304acpi_status acpi_os_wait_command_ready(void)
1305{
1306 int ret;
1307
836d0830 1308 ret = acpi_debugger_wait_command_ready();
8cfb0cdf
LZ
1309 if (ret < 0)
1310 return AE_ERROR;
1311 return AE_OK;
1312}
1313
1314acpi_status acpi_os_notify_command_complete(void)
1315{
1316 int ret;
1317
836d0830 1318 ret = acpi_debugger_notify_command_complete();
8cfb0cdf
LZ
1319 if (ret < 0)
1320 return AE_ERROR;
1321 return AE_OK;
1322}
1323
4be44fcd 1324acpi_status acpi_os_signal(u32 function, void *info)
1da177e4 1325{
4be44fcd 1326 switch (function) {
1da177e4
LT
1327 case ACPI_SIGNAL_FATAL:
1328 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1329 break;
1330 case ACPI_SIGNAL_BREAKPOINT:
1331 /*
1332 * AML Breakpoint
1333 * ACPI spec. says to treat it as a NOP unless
1334 * you are debugging. So if/when we integrate
1335 * AML debugger into the kernel debugger its
1336 * hook will go here. But until then it is
1337 * not useful to print anything on breakpoints.
1338 */
1339 break;
1340 default:
1341 break;
1342 }
1343
1344 return AE_OK;
1345}
4be44fcd 1346
4be44fcd 1347static int __init acpi_os_name_setup(char *str)
1da177e4
LT
1348{
1349 char *p = acpi_os_name;
4be44fcd 1350 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1da177e4
LT
1351
1352 if (!str || !*str)
1353 return 0;
1354
5e2be4e0 1355 for (; count-- && *str; str++) {
1da177e4
LT
1356 if (isalnum(*str) || *str == ' ' || *str == ':')
1357 *p++ = *str;
1358 else if (*str == '\'' || *str == '"')
1359 continue;
1360 else
1361 break;
1362 }
1363 *p = 0;
1364
1365 return 1;
4be44fcd 1366
1da177e4
LT
1367}
1368
1369__setup("acpi_os_name=", acpi_os_name_setup);
1370
22b5afce 1371/*
08e1d7c0 1372 * Disable the auto-serialization of named objects creation methods.
22b5afce 1373 *
08e1d7c0 1374 * This feature is enabled by default. It marks the AML control methods
22b5afce
BM
1375 * that contain the opcodes to create named objects as "Serialized".
1376 */
08e1d7c0 1377static int __init acpi_no_auto_serialize_setup(char *str)
1da177e4 1378{
08e1d7c0
LZ
1379 acpi_gbl_auto_serialize_methods = FALSE;
1380 pr_info("ACPI: auto-serialization disabled\n");
1da177e4
LT
1381
1382 return 1;
1383}
1384
08e1d7c0 1385__setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1da177e4 1386
df92e695
TR
1387/* Check of resource interference between native drivers and ACPI
1388 * OperationRegions (SystemIO and System Memory only).
1389 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1390 * in arbitrary AML code and can interfere with legacy drivers.
1391 * acpi_enforce_resources= can be set to:
1392 *
7e90560c 1393 * - strict (default) (2)
df92e695 1394 * -> further driver trying to access the resources will not load
7e90560c 1395 * - lax (1)
df92e695
TR
1396 * -> further driver trying to access the resources will load, but you
1397 * get a system message that something might go wrong...
1398 *
1399 * - no (0)
1400 * -> ACPI Operation Region resources will not be registered
1401 *
1402 */
1403#define ENFORCE_RESOURCES_STRICT 2
1404#define ENFORCE_RESOURCES_LAX 1
1405#define ENFORCE_RESOURCES_NO 0
1406
7e90560c 1407static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
df92e695
TR
1408
1409static int __init acpi_enforce_resources_setup(char *str)
1410{
1411 if (str == NULL || *str == '\0')
1412 return 0;
1413
1414 if (!strcmp("strict", str))
1415 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1416 else if (!strcmp("lax", str))
1417 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1418 else if (!strcmp("no", str))
1419 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1420
1421 return 1;
1422}
1423
1424__setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1425
1426/* Check for resource conflicts between ACPI OperationRegions and native
1427 * drivers */
876fba43 1428int acpi_check_resource_conflict(const struct resource *res)
df92e695 1429{
f654c0fe
LM
1430 acpi_adr_space_type space_id;
1431 acpi_size length;
1432 u8 warn = 0;
1433 int clash = 0;
df92e695
TR
1434
1435 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1436 return 0;
1437 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1438 return 0;
1439
f654c0fe
LM
1440 if (res->flags & IORESOURCE_IO)
1441 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1442 else
1443 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
df92e695 1444
e4f52244 1445 length = resource_size(res);
f654c0fe
LM
1446 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1447 warn = 1;
1448 clash = acpi_check_address_range(space_id, res->start, length, warn);
df92e695
TR
1449
1450 if (clash) {
1451 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
14f03343
JD
1452 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1453 printk(KERN_NOTICE "ACPI: This conflict may"
1454 " cause random problems and system"
1455 " instability\n");
1456 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1457 " for this device, you should use it instead of"
1458 " the native driver\n");
df92e695
TR
1459 }
1460 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1461 return -EBUSY;
1462 }
1463 return 0;
1464}
443dea72 1465EXPORT_SYMBOL(acpi_check_resource_conflict);
df92e695
TR
1466
1467int acpi_check_region(resource_size_t start, resource_size_t n,
1468 const char *name)
1469{
1470 struct resource res = {
1471 .start = start,
1472 .end = start + n - 1,
1473 .name = name,
1474 .flags = IORESOURCE_IO,
1475 };
1476
1477 return acpi_check_resource_conflict(&res);
1478}
1479EXPORT_SYMBOL(acpi_check_region);
1480
70dd6bea
JD
1481/*
1482 * Let drivers know whether the resource checks are effective
1483 */
1484int acpi_resources_are_enforced(void)
1485{
1486 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1487}
1488EXPORT_SYMBOL(acpi_resources_are_enforced);
1489
9f63b88b
LM
1490/*
1491 * Deallocate the memory for a spinlock.
1492 */
1493void acpi_os_delete_lock(acpi_spinlock handle)
1494{
1495 ACPI_FREE(handle);
1496}
1497
73459f73
RM
1498/*
1499 * Acquire a spinlock.
1500 *
1501 * handle is a pointer to the spinlock_t.
73459f73
RM
1502 */
1503
967440e3 1504acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
73459f73 1505{
b8e4d893 1506 acpi_cpu_flags flags;
967440e3 1507 spin_lock_irqsave(lockp, flags);
73459f73
RM
1508 return flags;
1509}
1510
1511/*
1512 * Release a spinlock. See above.
1513 */
1514
967440e3 1515void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
73459f73 1516{
967440e3 1517 spin_unlock_irqrestore(lockp, flags);
73459f73
RM
1518}
1519
73459f73
RM
1520#ifndef ACPI_USE_LOCAL_CACHE
1521
1522/*******************************************************************************
1523 *
1524 * FUNCTION: acpi_os_create_cache
1525 *
b229cf92
BM
1526 * PARAMETERS: name - Ascii name for the cache
1527 * size - Size of each cached object
1528 * depth - Maximum depth of the cache (in objects) <ignored>
1529 * cache - Where the new cache object is returned
73459f73 1530 *
b229cf92 1531 * RETURN: status
73459f73
RM
1532 *
1533 * DESCRIPTION: Create a cache object
1534 *
1535 ******************************************************************************/
1536
1537acpi_status
4be44fcd 1538acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
73459f73 1539{
20c2df83 1540 *cache = kmem_cache_create(name, size, 0, 0, NULL);
a6fdbf90 1541 if (*cache == NULL)
b229cf92
BM
1542 return AE_ERROR;
1543 else
1544 return AE_OK;
73459f73
RM
1545}
1546
1547/*******************************************************************************
1548 *
1549 * FUNCTION: acpi_os_purge_cache
1550 *
1551 * PARAMETERS: Cache - Handle to cache object
1552 *
1553 * RETURN: Status
1554 *
1555 * DESCRIPTION: Free all objects within the requested cache.
1556 *
1557 ******************************************************************************/
1558
4be44fcd 1559acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
73459f73 1560{
50dd0969 1561 kmem_cache_shrink(cache);
4be44fcd 1562 return (AE_OK);
73459f73
RM
1563}
1564
1565/*******************************************************************************
1566 *
1567 * FUNCTION: acpi_os_delete_cache
1568 *
1569 * PARAMETERS: Cache - Handle to cache object
1570 *
1571 * RETURN: Status
1572 *
1573 * DESCRIPTION: Free all objects within the requested cache and delete the
1574 * cache object.
1575 *
1576 ******************************************************************************/
1577
4be44fcd 1578acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
73459f73 1579{
1a1d92c1 1580 kmem_cache_destroy(cache);
4be44fcd 1581 return (AE_OK);
73459f73
RM
1582}
1583
1584/*******************************************************************************
1585 *
1586 * FUNCTION: acpi_os_release_object
1587 *
1588 * PARAMETERS: Cache - Handle to cache object
1589 * Object - The object to be released
1590 *
1591 * RETURN: None
1592 *
1593 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1594 * the object is deleted.
1595 *
1596 ******************************************************************************/
1597
4be44fcd 1598acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
73459f73 1599{
4be44fcd
LB
1600 kmem_cache_free(cache, object);
1601 return (AE_OK);
73459f73 1602}
73459f73 1603#endif
d362edaf 1604
a94e88cd 1605static int __init acpi_no_static_ssdt_setup(char *s)
b75dd297 1606{
a94e88cd
LZ
1607 acpi_gbl_disable_ssdt_table_install = TRUE;
1608 pr_info("ACPI: static SSDT installation disabled\n");
b75dd297 1609
a94e88cd 1610 return 0;
b75dd297
LZ
1611}
1612
a94e88cd 1613early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
b75dd297 1614
4dde507f
LZ
1615static int __init acpi_disable_return_repair(char *s)
1616{
1617 printk(KERN_NOTICE PREFIX
1618 "ACPI: Predefined validation mechanism disabled\n");
1619 acpi_gbl_disable_auto_repair = TRUE;
1620
1621 return 1;
1622}
1623
1624__setup("acpica_no_return_repair", acpi_disable_return_repair);
1625
d362edaf
MS
1626acpi_status __init acpi_os_initialize(void)
1627{
1628 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1629 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1630 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1631 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
a4714a89
RW
1632 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1633 /*
1634 * Use acpi_os_map_generic_address to pre-map the reset
1635 * register if it's in system memory.
1636 */
1637 int rv;
1638
1639 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1640 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1641 }
7901a052 1642 acpi_os_initialized = true;
d362edaf
MS
1643
1644 return AE_OK;
1645}
1646
32d47eef 1647acpi_status __init acpi_os_initialize1(void)
d362edaf 1648{
44d2588e
TH
1649 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1650 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
d783156e 1651 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
d362edaf
MS
1652 BUG_ON(!kacpid_wq);
1653 BUG_ON(!kacpi_notify_wq);
1654 BUG_ON(!kacpi_hotplug_wq);
e5f660eb 1655 acpi_osi_init();
d362edaf
MS
1656 return AE_OK;
1657}
1658
1659acpi_status acpi_os_terminate(void)
1660{
1661 if (acpi_irq_handler) {
23fe3630 1662 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
d362edaf
MS
1663 acpi_irq_handler);
1664 }
1665
1666 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1667 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1668 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1669 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
a4714a89
RW
1670 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1671 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
d362edaf
MS
1672
1673 destroy_workqueue(kacpid_wq);
1674 destroy_workqueue(kacpi_notify_wq);
1675 destroy_workqueue(kacpi_hotplug_wq);
1676
1677 return AE_OK;
1678}
09f98a82
TL
1679
1680acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1681 u32 pm1b_control)
1682{
1683 int rc = 0;
1684 if (__acpi_os_prepare_sleep)
1685 rc = __acpi_os_prepare_sleep(sleep_state,
1686 pm1a_control, pm1b_control);
1687 if (rc < 0)
1688 return AE_ERROR;
1689 else if (rc > 0)
1690 return AE_CTRL_SKIP;
1691
1692 return AE_OK;
1693}
1694
1695void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1696 u32 pm1a_ctrl, u32 pm1b_ctrl))
1697{
1698 __acpi_os_prepare_sleep = func;
1699}
92d8aff3 1700
d6b47b12
BG
1701acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1702 u32 val_b)
1703{
1704 int rc = 0;
1705 if (__acpi_os_prepare_extended_sleep)
1706 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1707 val_a, val_b);
1708 if (rc < 0)
1709 return AE_ERROR;
1710 else if (rc > 0)
1711 return AE_CTRL_SKIP;
1712
1713 return AE_OK;
1714}
1715
1716void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1717 u32 val_a, u32 val_b))
1718{
1719 __acpi_os_prepare_extended_sleep = func;
1720}