]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/acpi/osl.c
ACPI / tables: Fix DSDT override mechanism
[mirror_ubuntu-artful-kernel.git] / drivers / acpi / osl.c
1 /*
2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 */
25
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
43
44 #include <asm/io.h>
45 #include <asm/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
47
48 #include "internal.h"
49
50 #define _COMPONENT ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52
53 struct acpi_os_dpc {
54 acpi_osd_exec_callback function;
55 void *context;
56 struct work_struct work;
57 };
58
59 #ifdef ENABLE_DEBUGGER
60 #include <linux/kdb.h>
61
62 /* stuff for debugger support */
63 int acpi_in_debugger;
64 EXPORT_SYMBOL(acpi_in_debugger);
65 #endif /*ENABLE_DEBUGGER */
66
67 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
68 u32 pm1b_ctrl);
69 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
70 u32 val_b);
71
72 static acpi_osd_handler acpi_irq_handler;
73 static void *acpi_irq_context;
74 static struct workqueue_struct *kacpid_wq;
75 static struct workqueue_struct *kacpi_notify_wq;
76 static struct workqueue_struct *kacpi_hotplug_wq;
77 static bool acpi_os_initialized;
78 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
79
80 /*
81 * This list of permanent mappings is for memory that may be accessed from
82 * interrupt context, where we can't do the ioremap().
83 */
84 struct acpi_ioremap {
85 struct list_head list;
86 void __iomem *virt;
87 acpi_physical_address phys;
88 acpi_size size;
89 unsigned long refcount;
90 };
91
92 static LIST_HEAD(acpi_ioremaps);
93 static DEFINE_MUTEX(acpi_ioremap_lock);
94
95 static void __init acpi_osi_setup_late(void);
96
97 /*
98 * The story of _OSI(Linux)
99 *
100 * From pre-history through Linux-2.6.22,
101 * Linux responded TRUE upon a BIOS OSI(Linux) query.
102 *
103 * Unfortunately, reference BIOS writers got wind of this
104 * and put OSI(Linux) in their example code, quickly exposing
105 * this string as ill-conceived and opening the door to
106 * an un-bounded number of BIOS incompatibilities.
107 *
108 * For example, OSI(Linux) was used on resume to re-POST a
109 * video card on one system, because Linux at that time
110 * could not do a speedy restore in its native driver.
111 * But then upon gaining quick native restore capability,
112 * Linux has no way to tell the BIOS to skip the time-consuming
113 * POST -- putting Linux at a permanent performance disadvantage.
114 * On another system, the BIOS writer used OSI(Linux)
115 * to infer native OS support for IPMI! On other systems,
116 * OSI(Linux) simply got in the way of Linux claiming to
117 * be compatible with other operating systems, exposing
118 * BIOS issues such as skipped device initialization.
119 *
120 * So "Linux" turned out to be a really poor chose of
121 * OSI string, and from Linux-2.6.23 onward we respond FALSE.
122 *
123 * BIOS writers should NOT query _OSI(Linux) on future systems.
124 * Linux will complain on the console when it sees it, and return FALSE.
125 * To get Linux to return TRUE for your system will require
126 * a kernel source update to add a DMI entry,
127 * or boot with "acpi_osi=Linux"
128 */
129
130 static struct osi_linux {
131 unsigned int enable:1;
132 unsigned int dmi:1;
133 unsigned int cmdline:1;
134 unsigned int default_disabling:1;
135 } osi_linux = {0, 0, 0, 0};
136
137 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
138 {
139 if (!strcmp("Linux", interface)) {
140
141 printk_once(KERN_NOTICE FW_BUG PREFIX
142 "BIOS _OSI(Linux) query %s%s\n",
143 osi_linux.enable ? "honored" : "ignored",
144 osi_linux.cmdline ? " via cmdline" :
145 osi_linux.dmi ? " via DMI" : "");
146 }
147
148 if (!strcmp("Darwin", interface)) {
149 /*
150 * Apple firmware will behave poorly if it receives positive
151 * answers to "Darwin" and any other OS. Respond positively
152 * to Darwin and then disable all other vendor strings.
153 */
154 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
155 supported = ACPI_UINT32_MAX;
156 }
157
158 return supported;
159 }
160
161 static void __init acpi_request_region (struct acpi_generic_address *gas,
162 unsigned int length, char *desc)
163 {
164 u64 addr;
165
166 /* Handle possible alignment issues */
167 memcpy(&addr, &gas->address, sizeof(addr));
168 if (!addr || !length)
169 return;
170
171 /* Resources are never freed */
172 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
173 request_region(addr, length, desc);
174 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
175 request_mem_region(addr, length, desc);
176 }
177
178 static int __init acpi_reserve_resources(void)
179 {
180 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
181 "ACPI PM1a_EVT_BLK");
182
183 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
184 "ACPI PM1b_EVT_BLK");
185
186 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
187 "ACPI PM1a_CNT_BLK");
188
189 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
190 "ACPI PM1b_CNT_BLK");
191
192 if (acpi_gbl_FADT.pm_timer_length == 4)
193 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
194
195 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
196 "ACPI PM2_CNT_BLK");
197
198 /* Length of GPE blocks must be a non-negative multiple of 2 */
199
200 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
201 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
202 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
203
204 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
205 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
206 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
207
208 return 0;
209 }
210 fs_initcall_sync(acpi_reserve_resources);
211
212 void acpi_os_printf(const char *fmt, ...)
213 {
214 va_list args;
215 va_start(args, fmt);
216 acpi_os_vprintf(fmt, args);
217 va_end(args);
218 }
219 EXPORT_SYMBOL(acpi_os_printf);
220
221 void acpi_os_vprintf(const char *fmt, va_list args)
222 {
223 static char buffer[512];
224
225 vsprintf(buffer, fmt, args);
226
227 #ifdef ENABLE_DEBUGGER
228 if (acpi_in_debugger) {
229 kdb_printf("%s", buffer);
230 } else {
231 printk(KERN_CONT "%s", buffer);
232 }
233 #else
234 if (acpi_debugger_write_log(buffer) < 0)
235 printk(KERN_CONT "%s", buffer);
236 #endif
237 }
238
239 #ifdef CONFIG_KEXEC
240 static unsigned long acpi_rsdp;
241 static int __init setup_acpi_rsdp(char *arg)
242 {
243 if (kstrtoul(arg, 16, &acpi_rsdp))
244 return -EINVAL;
245 return 0;
246 }
247 early_param("acpi_rsdp", setup_acpi_rsdp);
248 #endif
249
250 acpi_physical_address __init acpi_os_get_root_pointer(void)
251 {
252 #ifdef CONFIG_KEXEC
253 if (acpi_rsdp)
254 return acpi_rsdp;
255 #endif
256
257 if (efi_enabled(EFI_CONFIG_TABLES)) {
258 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
259 return efi.acpi20;
260 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
261 return efi.acpi;
262 else {
263 printk(KERN_ERR PREFIX
264 "System description tables not found\n");
265 return 0;
266 }
267 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
268 acpi_physical_address pa = 0;
269
270 acpi_find_root_pointer(&pa);
271 return pa;
272 }
273
274 return 0;
275 }
276
277 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
278 static struct acpi_ioremap *
279 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
280 {
281 struct acpi_ioremap *map;
282
283 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
284 if (map->phys <= phys &&
285 phys + size <= map->phys + map->size)
286 return map;
287
288 return NULL;
289 }
290
291 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
292 static void __iomem *
293 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
294 {
295 struct acpi_ioremap *map;
296
297 map = acpi_map_lookup(phys, size);
298 if (map)
299 return map->virt + (phys - map->phys);
300
301 return NULL;
302 }
303
304 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
305 {
306 struct acpi_ioremap *map;
307 void __iomem *virt = NULL;
308
309 mutex_lock(&acpi_ioremap_lock);
310 map = acpi_map_lookup(phys, size);
311 if (map) {
312 virt = map->virt + (phys - map->phys);
313 map->refcount++;
314 }
315 mutex_unlock(&acpi_ioremap_lock);
316 return virt;
317 }
318 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
319
320 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
321 static struct acpi_ioremap *
322 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
323 {
324 struct acpi_ioremap *map;
325
326 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
327 if (map->virt <= virt &&
328 virt + size <= map->virt + map->size)
329 return map;
330
331 return NULL;
332 }
333
334 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
335 /* ioremap will take care of cache attributes */
336 #define should_use_kmap(pfn) 0
337 #else
338 #define should_use_kmap(pfn) page_is_ram(pfn)
339 #endif
340
341 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
342 {
343 unsigned long pfn;
344
345 pfn = pg_off >> PAGE_SHIFT;
346 if (should_use_kmap(pfn)) {
347 if (pg_sz > PAGE_SIZE)
348 return NULL;
349 return (void __iomem __force *)kmap(pfn_to_page(pfn));
350 } else
351 return acpi_os_ioremap(pg_off, pg_sz);
352 }
353
354 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
355 {
356 unsigned long pfn;
357
358 pfn = pg_off >> PAGE_SHIFT;
359 if (should_use_kmap(pfn))
360 kunmap(pfn_to_page(pfn));
361 else
362 iounmap(vaddr);
363 }
364
365 /**
366 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
367 * @phys: Start of the physical address range to map.
368 * @size: Size of the physical address range to map.
369 *
370 * Look up the given physical address range in the list of existing ACPI memory
371 * mappings. If found, get a reference to it and return a pointer to it (its
372 * virtual address). If not found, map it, add it to that list and return a
373 * pointer to it.
374 *
375 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
376 * routine simply calls __acpi_map_table() to get the job done.
377 */
378 void __iomem *__init_refok
379 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
380 {
381 struct acpi_ioremap *map;
382 void __iomem *virt;
383 acpi_physical_address pg_off;
384 acpi_size pg_sz;
385
386 if (phys > ULONG_MAX) {
387 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
388 return NULL;
389 }
390
391 if (!acpi_gbl_permanent_mmap)
392 return __acpi_map_table((unsigned long)phys, size);
393
394 mutex_lock(&acpi_ioremap_lock);
395 /* Check if there's a suitable mapping already. */
396 map = acpi_map_lookup(phys, size);
397 if (map) {
398 map->refcount++;
399 goto out;
400 }
401
402 map = kzalloc(sizeof(*map), GFP_KERNEL);
403 if (!map) {
404 mutex_unlock(&acpi_ioremap_lock);
405 return NULL;
406 }
407
408 pg_off = round_down(phys, PAGE_SIZE);
409 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
410 virt = acpi_map(pg_off, pg_sz);
411 if (!virt) {
412 mutex_unlock(&acpi_ioremap_lock);
413 kfree(map);
414 return NULL;
415 }
416
417 INIT_LIST_HEAD(&map->list);
418 map->virt = virt;
419 map->phys = pg_off;
420 map->size = pg_sz;
421 map->refcount = 1;
422
423 list_add_tail_rcu(&map->list, &acpi_ioremaps);
424
425 out:
426 mutex_unlock(&acpi_ioremap_lock);
427 return map->virt + (phys - map->phys);
428 }
429 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
430
431 void *__init_refok
432 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
433 {
434 return (void *)acpi_os_map_iomem(phys, size);
435 }
436 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
437
438 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
439 {
440 if (!--map->refcount)
441 list_del_rcu(&map->list);
442 }
443
444 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
445 {
446 if (!map->refcount) {
447 synchronize_rcu_expedited();
448 acpi_unmap(map->phys, map->virt);
449 kfree(map);
450 }
451 }
452
453 /**
454 * acpi_os_unmap_iomem - Drop a memory mapping reference.
455 * @virt: Start of the address range to drop a reference to.
456 * @size: Size of the address range to drop a reference to.
457 *
458 * Look up the given virtual address range in the list of existing ACPI memory
459 * mappings, drop a reference to it and unmap it if there are no more active
460 * references to it.
461 *
462 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
463 * routine simply calls __acpi_unmap_table() to get the job done. Since
464 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
465 * here.
466 */
467 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
468 {
469 struct acpi_ioremap *map;
470
471 if (!acpi_gbl_permanent_mmap) {
472 __acpi_unmap_table(virt, size);
473 return;
474 }
475
476 mutex_lock(&acpi_ioremap_lock);
477 map = acpi_map_lookup_virt(virt, size);
478 if (!map) {
479 mutex_unlock(&acpi_ioremap_lock);
480 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
481 return;
482 }
483 acpi_os_drop_map_ref(map);
484 mutex_unlock(&acpi_ioremap_lock);
485
486 acpi_os_map_cleanup(map);
487 }
488 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
489
490 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
491 {
492 return acpi_os_unmap_iomem((void __iomem *)virt, size);
493 }
494 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
495
496 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
497 {
498 if (!acpi_gbl_permanent_mmap)
499 __acpi_unmap_table(virt, size);
500 }
501
502 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
503 {
504 u64 addr;
505 void __iomem *virt;
506
507 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
508 return 0;
509
510 /* Handle possible alignment issues */
511 memcpy(&addr, &gas->address, sizeof(addr));
512 if (!addr || !gas->bit_width)
513 return -EINVAL;
514
515 virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
516 if (!virt)
517 return -EIO;
518
519 return 0;
520 }
521 EXPORT_SYMBOL(acpi_os_map_generic_address);
522
523 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
524 {
525 u64 addr;
526 struct acpi_ioremap *map;
527
528 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
529 return;
530
531 /* Handle possible alignment issues */
532 memcpy(&addr, &gas->address, sizeof(addr));
533 if (!addr || !gas->bit_width)
534 return;
535
536 mutex_lock(&acpi_ioremap_lock);
537 map = acpi_map_lookup(addr, gas->bit_width / 8);
538 if (!map) {
539 mutex_unlock(&acpi_ioremap_lock);
540 return;
541 }
542 acpi_os_drop_map_ref(map);
543 mutex_unlock(&acpi_ioremap_lock);
544
545 acpi_os_map_cleanup(map);
546 }
547 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
548
549 #ifdef ACPI_FUTURE_USAGE
550 acpi_status
551 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
552 {
553 if (!phys || !virt)
554 return AE_BAD_PARAMETER;
555
556 *phys = virt_to_phys(virt);
557
558 return AE_OK;
559 }
560 #endif
561
562 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
563 static bool acpi_rev_override;
564
565 int __init acpi_rev_override_setup(char *str)
566 {
567 acpi_rev_override = true;
568 return 1;
569 }
570 __setup("acpi_rev_override", acpi_rev_override_setup);
571 #else
572 #define acpi_rev_override false
573 #endif
574
575 #define ACPI_MAX_OVERRIDE_LEN 100
576
577 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
578
579 acpi_status
580 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
581 char **new_val)
582 {
583 if (!init_val || !new_val)
584 return AE_BAD_PARAMETER;
585
586 *new_val = NULL;
587 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
588 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
589 acpi_os_name);
590 *new_val = acpi_os_name;
591 }
592
593 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
594 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
595 *new_val = (char *)5;
596 }
597
598 return AE_OK;
599 }
600
601 static irqreturn_t acpi_irq(int irq, void *dev_id)
602 {
603 u32 handled;
604
605 handled = (*acpi_irq_handler) (acpi_irq_context);
606
607 if (handled) {
608 acpi_irq_handled++;
609 return IRQ_HANDLED;
610 } else {
611 acpi_irq_not_handled++;
612 return IRQ_NONE;
613 }
614 }
615
616 acpi_status
617 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
618 void *context)
619 {
620 unsigned int irq;
621
622 acpi_irq_stats_init();
623
624 /*
625 * ACPI interrupts different from the SCI in our copy of the FADT are
626 * not supported.
627 */
628 if (gsi != acpi_gbl_FADT.sci_interrupt)
629 return AE_BAD_PARAMETER;
630
631 if (acpi_irq_handler)
632 return AE_ALREADY_ACQUIRED;
633
634 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
635 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
636 gsi);
637 return AE_OK;
638 }
639
640 acpi_irq_handler = handler;
641 acpi_irq_context = context;
642 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
643 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
644 acpi_irq_handler = NULL;
645 return AE_NOT_ACQUIRED;
646 }
647 acpi_sci_irq = irq;
648
649 return AE_OK;
650 }
651
652 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
653 {
654 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
655 return AE_BAD_PARAMETER;
656
657 free_irq(acpi_sci_irq, acpi_irq);
658 acpi_irq_handler = NULL;
659 acpi_sci_irq = INVALID_ACPI_IRQ;
660
661 return AE_OK;
662 }
663
664 /*
665 * Running in interpreter thread context, safe to sleep
666 */
667
668 void acpi_os_sleep(u64 ms)
669 {
670 msleep(ms);
671 }
672
673 void acpi_os_stall(u32 us)
674 {
675 while (us) {
676 u32 delay = 1000;
677
678 if (delay > us)
679 delay = us;
680 udelay(delay);
681 touch_nmi_watchdog();
682 us -= delay;
683 }
684 }
685
686 /*
687 * Support ACPI 3.0 AML Timer operand
688 * Returns 64-bit free-running, monotonically increasing timer
689 * with 100ns granularity
690 */
691 u64 acpi_os_get_timer(void)
692 {
693 u64 time_ns = ktime_to_ns(ktime_get());
694 do_div(time_ns, 100);
695 return time_ns;
696 }
697
698 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
699 {
700 u32 dummy;
701
702 if (!value)
703 value = &dummy;
704
705 *value = 0;
706 if (width <= 8) {
707 *(u8 *) value = inb(port);
708 } else if (width <= 16) {
709 *(u16 *) value = inw(port);
710 } else if (width <= 32) {
711 *(u32 *) value = inl(port);
712 } else {
713 BUG();
714 }
715
716 return AE_OK;
717 }
718
719 EXPORT_SYMBOL(acpi_os_read_port);
720
721 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
722 {
723 if (width <= 8) {
724 outb(value, port);
725 } else if (width <= 16) {
726 outw(value, port);
727 } else if (width <= 32) {
728 outl(value, port);
729 } else {
730 BUG();
731 }
732
733 return AE_OK;
734 }
735
736 EXPORT_SYMBOL(acpi_os_write_port);
737
738 acpi_status
739 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
740 {
741 void __iomem *virt_addr;
742 unsigned int size = width / 8;
743 bool unmap = false;
744 u64 dummy;
745
746 rcu_read_lock();
747 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
748 if (!virt_addr) {
749 rcu_read_unlock();
750 virt_addr = acpi_os_ioremap(phys_addr, size);
751 if (!virt_addr)
752 return AE_BAD_ADDRESS;
753 unmap = true;
754 }
755
756 if (!value)
757 value = &dummy;
758
759 switch (width) {
760 case 8:
761 *(u8 *) value = readb(virt_addr);
762 break;
763 case 16:
764 *(u16 *) value = readw(virt_addr);
765 break;
766 case 32:
767 *(u32 *) value = readl(virt_addr);
768 break;
769 case 64:
770 *(u64 *) value = readq(virt_addr);
771 break;
772 default:
773 BUG();
774 }
775
776 if (unmap)
777 iounmap(virt_addr);
778 else
779 rcu_read_unlock();
780
781 return AE_OK;
782 }
783
784 acpi_status
785 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
786 {
787 void __iomem *virt_addr;
788 unsigned int size = width / 8;
789 bool unmap = false;
790
791 rcu_read_lock();
792 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
793 if (!virt_addr) {
794 rcu_read_unlock();
795 virt_addr = acpi_os_ioremap(phys_addr, size);
796 if (!virt_addr)
797 return AE_BAD_ADDRESS;
798 unmap = true;
799 }
800
801 switch (width) {
802 case 8:
803 writeb(value, virt_addr);
804 break;
805 case 16:
806 writew(value, virt_addr);
807 break;
808 case 32:
809 writel(value, virt_addr);
810 break;
811 case 64:
812 writeq(value, virt_addr);
813 break;
814 default:
815 BUG();
816 }
817
818 if (unmap)
819 iounmap(virt_addr);
820 else
821 rcu_read_unlock();
822
823 return AE_OK;
824 }
825
826 acpi_status
827 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
828 u64 *value, u32 width)
829 {
830 int result, size;
831 u32 value32;
832
833 if (!value)
834 return AE_BAD_PARAMETER;
835
836 switch (width) {
837 case 8:
838 size = 1;
839 break;
840 case 16:
841 size = 2;
842 break;
843 case 32:
844 size = 4;
845 break;
846 default:
847 return AE_ERROR;
848 }
849
850 result = raw_pci_read(pci_id->segment, pci_id->bus,
851 PCI_DEVFN(pci_id->device, pci_id->function),
852 reg, size, &value32);
853 *value = value32;
854
855 return (result ? AE_ERROR : AE_OK);
856 }
857
858 acpi_status
859 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
860 u64 value, u32 width)
861 {
862 int result, size;
863
864 switch (width) {
865 case 8:
866 size = 1;
867 break;
868 case 16:
869 size = 2;
870 break;
871 case 32:
872 size = 4;
873 break;
874 default:
875 return AE_ERROR;
876 }
877
878 result = raw_pci_write(pci_id->segment, pci_id->bus,
879 PCI_DEVFN(pci_id->device, pci_id->function),
880 reg, size, value);
881
882 return (result ? AE_ERROR : AE_OK);
883 }
884
885 static void acpi_os_execute_deferred(struct work_struct *work)
886 {
887 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
888
889 dpc->function(dpc->context);
890 kfree(dpc);
891 }
892
893 #ifdef CONFIG_ACPI_DEBUGGER
894 static struct acpi_debugger acpi_debugger;
895 static bool acpi_debugger_initialized;
896
897 int acpi_register_debugger(struct module *owner,
898 const struct acpi_debugger_ops *ops)
899 {
900 int ret = 0;
901
902 mutex_lock(&acpi_debugger.lock);
903 if (acpi_debugger.ops) {
904 ret = -EBUSY;
905 goto err_lock;
906 }
907
908 acpi_debugger.owner = owner;
909 acpi_debugger.ops = ops;
910
911 err_lock:
912 mutex_unlock(&acpi_debugger.lock);
913 return ret;
914 }
915 EXPORT_SYMBOL(acpi_register_debugger);
916
917 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
918 {
919 mutex_lock(&acpi_debugger.lock);
920 if (ops == acpi_debugger.ops) {
921 acpi_debugger.ops = NULL;
922 acpi_debugger.owner = NULL;
923 }
924 mutex_unlock(&acpi_debugger.lock);
925 }
926 EXPORT_SYMBOL(acpi_unregister_debugger);
927
928 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
929 {
930 int ret;
931 int (*func)(acpi_osd_exec_callback, void *);
932 struct module *owner;
933
934 if (!acpi_debugger_initialized)
935 return -ENODEV;
936 mutex_lock(&acpi_debugger.lock);
937 if (!acpi_debugger.ops) {
938 ret = -ENODEV;
939 goto err_lock;
940 }
941 if (!try_module_get(acpi_debugger.owner)) {
942 ret = -ENODEV;
943 goto err_lock;
944 }
945 func = acpi_debugger.ops->create_thread;
946 owner = acpi_debugger.owner;
947 mutex_unlock(&acpi_debugger.lock);
948
949 ret = func(function, context);
950
951 mutex_lock(&acpi_debugger.lock);
952 module_put(owner);
953 err_lock:
954 mutex_unlock(&acpi_debugger.lock);
955 return ret;
956 }
957
958 ssize_t acpi_debugger_write_log(const char *msg)
959 {
960 ssize_t ret;
961 ssize_t (*func)(const char *);
962 struct module *owner;
963
964 if (!acpi_debugger_initialized)
965 return -ENODEV;
966 mutex_lock(&acpi_debugger.lock);
967 if (!acpi_debugger.ops) {
968 ret = -ENODEV;
969 goto err_lock;
970 }
971 if (!try_module_get(acpi_debugger.owner)) {
972 ret = -ENODEV;
973 goto err_lock;
974 }
975 func = acpi_debugger.ops->write_log;
976 owner = acpi_debugger.owner;
977 mutex_unlock(&acpi_debugger.lock);
978
979 ret = func(msg);
980
981 mutex_lock(&acpi_debugger.lock);
982 module_put(owner);
983 err_lock:
984 mutex_unlock(&acpi_debugger.lock);
985 return ret;
986 }
987
988 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
989 {
990 ssize_t ret;
991 ssize_t (*func)(char *, size_t);
992 struct module *owner;
993
994 if (!acpi_debugger_initialized)
995 return -ENODEV;
996 mutex_lock(&acpi_debugger.lock);
997 if (!acpi_debugger.ops) {
998 ret = -ENODEV;
999 goto err_lock;
1000 }
1001 if (!try_module_get(acpi_debugger.owner)) {
1002 ret = -ENODEV;
1003 goto err_lock;
1004 }
1005 func = acpi_debugger.ops->read_cmd;
1006 owner = acpi_debugger.owner;
1007 mutex_unlock(&acpi_debugger.lock);
1008
1009 ret = func(buffer, buffer_length);
1010
1011 mutex_lock(&acpi_debugger.lock);
1012 module_put(owner);
1013 err_lock:
1014 mutex_unlock(&acpi_debugger.lock);
1015 return ret;
1016 }
1017
1018 int acpi_debugger_wait_command_ready(void)
1019 {
1020 int ret;
1021 int (*func)(bool, char *, size_t);
1022 struct module *owner;
1023
1024 if (!acpi_debugger_initialized)
1025 return -ENODEV;
1026 mutex_lock(&acpi_debugger.lock);
1027 if (!acpi_debugger.ops) {
1028 ret = -ENODEV;
1029 goto err_lock;
1030 }
1031 if (!try_module_get(acpi_debugger.owner)) {
1032 ret = -ENODEV;
1033 goto err_lock;
1034 }
1035 func = acpi_debugger.ops->wait_command_ready;
1036 owner = acpi_debugger.owner;
1037 mutex_unlock(&acpi_debugger.lock);
1038
1039 ret = func(acpi_gbl_method_executing,
1040 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
1041
1042 mutex_lock(&acpi_debugger.lock);
1043 module_put(owner);
1044 err_lock:
1045 mutex_unlock(&acpi_debugger.lock);
1046 return ret;
1047 }
1048
1049 int acpi_debugger_notify_command_complete(void)
1050 {
1051 int ret;
1052 int (*func)(void);
1053 struct module *owner;
1054
1055 if (!acpi_debugger_initialized)
1056 return -ENODEV;
1057 mutex_lock(&acpi_debugger.lock);
1058 if (!acpi_debugger.ops) {
1059 ret = -ENODEV;
1060 goto err_lock;
1061 }
1062 if (!try_module_get(acpi_debugger.owner)) {
1063 ret = -ENODEV;
1064 goto err_lock;
1065 }
1066 func = acpi_debugger.ops->notify_command_complete;
1067 owner = acpi_debugger.owner;
1068 mutex_unlock(&acpi_debugger.lock);
1069
1070 ret = func();
1071
1072 mutex_lock(&acpi_debugger.lock);
1073 module_put(owner);
1074 err_lock:
1075 mutex_unlock(&acpi_debugger.lock);
1076 return ret;
1077 }
1078
1079 int __init acpi_debugger_init(void)
1080 {
1081 mutex_init(&acpi_debugger.lock);
1082 acpi_debugger_initialized = true;
1083 return 0;
1084 }
1085 #endif
1086
1087 /*******************************************************************************
1088 *
1089 * FUNCTION: acpi_os_execute
1090 *
1091 * PARAMETERS: Type - Type of the callback
1092 * Function - Function to be executed
1093 * Context - Function parameters
1094 *
1095 * RETURN: Status
1096 *
1097 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1098 * immediately executes function on a separate thread.
1099 *
1100 ******************************************************************************/
1101
1102 acpi_status acpi_os_execute(acpi_execute_type type,
1103 acpi_osd_exec_callback function, void *context)
1104 {
1105 acpi_status status = AE_OK;
1106 struct acpi_os_dpc *dpc;
1107 struct workqueue_struct *queue;
1108 int ret;
1109 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1110 "Scheduling function [%p(%p)] for deferred execution.\n",
1111 function, context));
1112
1113 if (type == OSL_DEBUGGER_MAIN_THREAD) {
1114 ret = acpi_debugger_create_thread(function, context);
1115 if (ret) {
1116 pr_err("Call to kthread_create() failed.\n");
1117 status = AE_ERROR;
1118 }
1119 goto out_thread;
1120 }
1121
1122 /*
1123 * Allocate/initialize DPC structure. Note that this memory will be
1124 * freed by the callee. The kernel handles the work_struct list in a
1125 * way that allows us to also free its memory inside the callee.
1126 * Because we may want to schedule several tasks with different
1127 * parameters we can't use the approach some kernel code uses of
1128 * having a static work_struct.
1129 */
1130
1131 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1132 if (!dpc)
1133 return AE_NO_MEMORY;
1134
1135 dpc->function = function;
1136 dpc->context = context;
1137
1138 /*
1139 * To prevent lockdep from complaining unnecessarily, make sure that
1140 * there is a different static lockdep key for each workqueue by using
1141 * INIT_WORK() for each of them separately.
1142 */
1143 if (type == OSL_NOTIFY_HANDLER) {
1144 queue = kacpi_notify_wq;
1145 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1146 } else if (type == OSL_GPE_HANDLER) {
1147 queue = kacpid_wq;
1148 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1149 } else {
1150 pr_err("Unsupported os_execute type %d.\n", type);
1151 status = AE_ERROR;
1152 }
1153
1154 if (ACPI_FAILURE(status))
1155 goto err_workqueue;
1156
1157 /*
1158 * On some machines, a software-initiated SMI causes corruption unless
1159 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1160 * typically it's done in GPE-related methods that are run via
1161 * workqueues, so we can avoid the known corruption cases by always
1162 * queueing on CPU 0.
1163 */
1164 ret = queue_work_on(0, queue, &dpc->work);
1165 if (!ret) {
1166 printk(KERN_ERR PREFIX
1167 "Call to queue_work() failed.\n");
1168 status = AE_ERROR;
1169 }
1170 err_workqueue:
1171 if (ACPI_FAILURE(status))
1172 kfree(dpc);
1173 out_thread:
1174 return status;
1175 }
1176 EXPORT_SYMBOL(acpi_os_execute);
1177
1178 void acpi_os_wait_events_complete(void)
1179 {
1180 /*
1181 * Make sure the GPE handler or the fixed event handler is not used
1182 * on another CPU after removal.
1183 */
1184 if (acpi_sci_irq_valid())
1185 synchronize_hardirq(acpi_sci_irq);
1186 flush_workqueue(kacpid_wq);
1187 flush_workqueue(kacpi_notify_wq);
1188 }
1189
1190 struct acpi_hp_work {
1191 struct work_struct work;
1192 struct acpi_device *adev;
1193 u32 src;
1194 };
1195
1196 static void acpi_hotplug_work_fn(struct work_struct *work)
1197 {
1198 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1199
1200 acpi_os_wait_events_complete();
1201 acpi_device_hotplug(hpw->adev, hpw->src);
1202 kfree(hpw);
1203 }
1204
1205 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1206 {
1207 struct acpi_hp_work *hpw;
1208
1209 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1210 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1211 adev, src));
1212
1213 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1214 if (!hpw)
1215 return AE_NO_MEMORY;
1216
1217 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1218 hpw->adev = adev;
1219 hpw->src = src;
1220 /*
1221 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1222 * the hotplug code may call driver .remove() functions, which may
1223 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1224 * these workqueues.
1225 */
1226 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1227 kfree(hpw);
1228 return AE_ERROR;
1229 }
1230 return AE_OK;
1231 }
1232
1233 bool acpi_queue_hotplug_work(struct work_struct *work)
1234 {
1235 return queue_work(kacpi_hotplug_wq, work);
1236 }
1237
1238 acpi_status
1239 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1240 {
1241 struct semaphore *sem = NULL;
1242
1243 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1244 if (!sem)
1245 return AE_NO_MEMORY;
1246
1247 sema_init(sem, initial_units);
1248
1249 *handle = (acpi_handle *) sem;
1250
1251 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1252 *handle, initial_units));
1253
1254 return AE_OK;
1255 }
1256
1257 /*
1258 * TODO: A better way to delete semaphores? Linux doesn't have a
1259 * 'delete_semaphore()' function -- may result in an invalid
1260 * pointer dereference for non-synchronized consumers. Should
1261 * we at least check for blocked threads and signal/cancel them?
1262 */
1263
1264 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1265 {
1266 struct semaphore *sem = (struct semaphore *)handle;
1267
1268 if (!sem)
1269 return AE_BAD_PARAMETER;
1270
1271 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1272
1273 BUG_ON(!list_empty(&sem->wait_list));
1274 kfree(sem);
1275 sem = NULL;
1276
1277 return AE_OK;
1278 }
1279
1280 /*
1281 * TODO: Support for units > 1?
1282 */
1283 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1284 {
1285 acpi_status status = AE_OK;
1286 struct semaphore *sem = (struct semaphore *)handle;
1287 long jiffies;
1288 int ret = 0;
1289
1290 if (!acpi_os_initialized)
1291 return AE_OK;
1292
1293 if (!sem || (units < 1))
1294 return AE_BAD_PARAMETER;
1295
1296 if (units > 1)
1297 return AE_SUPPORT;
1298
1299 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1300 handle, units, timeout));
1301
1302 if (timeout == ACPI_WAIT_FOREVER)
1303 jiffies = MAX_SCHEDULE_TIMEOUT;
1304 else
1305 jiffies = msecs_to_jiffies(timeout);
1306
1307 ret = down_timeout(sem, jiffies);
1308 if (ret)
1309 status = AE_TIME;
1310
1311 if (ACPI_FAILURE(status)) {
1312 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1313 "Failed to acquire semaphore[%p|%d|%d], %s",
1314 handle, units, timeout,
1315 acpi_format_exception(status)));
1316 } else {
1317 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1318 "Acquired semaphore[%p|%d|%d]", handle,
1319 units, timeout));
1320 }
1321
1322 return status;
1323 }
1324
1325 /*
1326 * TODO: Support for units > 1?
1327 */
1328 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1329 {
1330 struct semaphore *sem = (struct semaphore *)handle;
1331
1332 if (!acpi_os_initialized)
1333 return AE_OK;
1334
1335 if (!sem || (units < 1))
1336 return AE_BAD_PARAMETER;
1337
1338 if (units > 1)
1339 return AE_SUPPORT;
1340
1341 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1342 units));
1343
1344 up(sem);
1345
1346 return AE_OK;
1347 }
1348
1349 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1350 {
1351 #ifdef ENABLE_DEBUGGER
1352 if (acpi_in_debugger) {
1353 u32 chars;
1354
1355 kdb_read(buffer, buffer_length);
1356
1357 /* remove the CR kdb includes */
1358 chars = strlen(buffer) - 1;
1359 buffer[chars] = '\0';
1360 }
1361 #else
1362 int ret;
1363
1364 ret = acpi_debugger_read_cmd(buffer, buffer_length);
1365 if (ret < 0)
1366 return AE_ERROR;
1367 if (bytes_read)
1368 *bytes_read = ret;
1369 #endif
1370
1371 return AE_OK;
1372 }
1373 EXPORT_SYMBOL(acpi_os_get_line);
1374
1375 acpi_status acpi_os_wait_command_ready(void)
1376 {
1377 int ret;
1378
1379 ret = acpi_debugger_wait_command_ready();
1380 if (ret < 0)
1381 return AE_ERROR;
1382 return AE_OK;
1383 }
1384
1385 acpi_status acpi_os_notify_command_complete(void)
1386 {
1387 int ret;
1388
1389 ret = acpi_debugger_notify_command_complete();
1390 if (ret < 0)
1391 return AE_ERROR;
1392 return AE_OK;
1393 }
1394
1395 acpi_status acpi_os_signal(u32 function, void *info)
1396 {
1397 switch (function) {
1398 case ACPI_SIGNAL_FATAL:
1399 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1400 break;
1401 case ACPI_SIGNAL_BREAKPOINT:
1402 /*
1403 * AML Breakpoint
1404 * ACPI spec. says to treat it as a NOP unless
1405 * you are debugging. So if/when we integrate
1406 * AML debugger into the kernel debugger its
1407 * hook will go here. But until then it is
1408 * not useful to print anything on breakpoints.
1409 */
1410 break;
1411 default:
1412 break;
1413 }
1414
1415 return AE_OK;
1416 }
1417
1418 static int __init acpi_os_name_setup(char *str)
1419 {
1420 char *p = acpi_os_name;
1421 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1422
1423 if (!str || !*str)
1424 return 0;
1425
1426 for (; count-- && *str; str++) {
1427 if (isalnum(*str) || *str == ' ' || *str == ':')
1428 *p++ = *str;
1429 else if (*str == '\'' || *str == '"')
1430 continue;
1431 else
1432 break;
1433 }
1434 *p = 0;
1435
1436 return 1;
1437
1438 }
1439
1440 __setup("acpi_os_name=", acpi_os_name_setup);
1441
1442 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
1443 #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
1444
1445 struct osi_setup_entry {
1446 char string[OSI_STRING_LENGTH_MAX];
1447 bool enable;
1448 };
1449
1450 static struct osi_setup_entry
1451 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
1452 {"Module Device", true},
1453 {"Processor Device", true},
1454 {"3.0 _SCP Extensions", true},
1455 {"Processor Aggregator Device", true},
1456 };
1457
1458 void __init acpi_osi_setup(char *str)
1459 {
1460 struct osi_setup_entry *osi;
1461 bool enable = true;
1462 int i;
1463
1464 if (!acpi_gbl_create_osi_method)
1465 return;
1466
1467 if (str == NULL || *str == '\0') {
1468 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1469 acpi_gbl_create_osi_method = FALSE;
1470 return;
1471 }
1472
1473 if (*str == '!') {
1474 str++;
1475 if (*str == '\0') {
1476 osi_linux.default_disabling = 1;
1477 return;
1478 } else if (*str == '*') {
1479 acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
1480 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1481 osi = &osi_setup_entries[i];
1482 osi->enable = false;
1483 }
1484 return;
1485 }
1486 enable = false;
1487 }
1488
1489 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1490 osi = &osi_setup_entries[i];
1491 if (!strcmp(osi->string, str)) {
1492 osi->enable = enable;
1493 break;
1494 } else if (osi->string[0] == '\0') {
1495 osi->enable = enable;
1496 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1497 break;
1498 }
1499 }
1500 }
1501
1502 static void __init set_osi_linux(unsigned int enable)
1503 {
1504 if (osi_linux.enable != enable)
1505 osi_linux.enable = enable;
1506
1507 if (osi_linux.enable)
1508 acpi_osi_setup("Linux");
1509 else
1510 acpi_osi_setup("!Linux");
1511
1512 return;
1513 }
1514
1515 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1516 {
1517 osi_linux.cmdline = 1; /* cmdline set the default and override DMI */
1518 osi_linux.dmi = 0;
1519 set_osi_linux(enable);
1520
1521 return;
1522 }
1523
1524 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1525 {
1526 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1527
1528 if (enable == -1)
1529 return;
1530
1531 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
1532 set_osi_linux(enable);
1533
1534 return;
1535 }
1536
1537 /*
1538 * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1539 *
1540 * empty string disables _OSI
1541 * string starting with '!' disables that string
1542 * otherwise string is added to list, augmenting built-in strings
1543 */
1544 static void __init acpi_osi_setup_late(void)
1545 {
1546 struct osi_setup_entry *osi;
1547 char *str;
1548 int i;
1549 acpi_status status;
1550
1551 if (osi_linux.default_disabling) {
1552 status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
1553
1554 if (ACPI_SUCCESS(status))
1555 printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
1556 }
1557
1558 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1559 osi = &osi_setup_entries[i];
1560 str = osi->string;
1561
1562 if (*str == '\0')
1563 break;
1564 if (osi->enable) {
1565 status = acpi_install_interface(str);
1566
1567 if (ACPI_SUCCESS(status))
1568 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1569 } else {
1570 status = acpi_remove_interface(str);
1571
1572 if (ACPI_SUCCESS(status))
1573 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1574 }
1575 }
1576 }
1577
1578 static int __init osi_setup(char *str)
1579 {
1580 if (str && !strcmp("Linux", str))
1581 acpi_cmdline_osi_linux(1);
1582 else if (str && !strcmp("!Linux", str))
1583 acpi_cmdline_osi_linux(0);
1584 else
1585 acpi_osi_setup(str);
1586
1587 return 1;
1588 }
1589
1590 __setup("acpi_osi=", osi_setup);
1591
1592 /*
1593 * Disable the auto-serialization of named objects creation methods.
1594 *
1595 * This feature is enabled by default. It marks the AML control methods
1596 * that contain the opcodes to create named objects as "Serialized".
1597 */
1598 static int __init acpi_no_auto_serialize_setup(char *str)
1599 {
1600 acpi_gbl_auto_serialize_methods = FALSE;
1601 pr_info("ACPI: auto-serialization disabled\n");
1602
1603 return 1;
1604 }
1605
1606 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1607
1608 /* Check of resource interference between native drivers and ACPI
1609 * OperationRegions (SystemIO and System Memory only).
1610 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1611 * in arbitrary AML code and can interfere with legacy drivers.
1612 * acpi_enforce_resources= can be set to:
1613 *
1614 * - strict (default) (2)
1615 * -> further driver trying to access the resources will not load
1616 * - lax (1)
1617 * -> further driver trying to access the resources will load, but you
1618 * get a system message that something might go wrong...
1619 *
1620 * - no (0)
1621 * -> ACPI Operation Region resources will not be registered
1622 *
1623 */
1624 #define ENFORCE_RESOURCES_STRICT 2
1625 #define ENFORCE_RESOURCES_LAX 1
1626 #define ENFORCE_RESOURCES_NO 0
1627
1628 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1629
1630 static int __init acpi_enforce_resources_setup(char *str)
1631 {
1632 if (str == NULL || *str == '\0')
1633 return 0;
1634
1635 if (!strcmp("strict", str))
1636 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1637 else if (!strcmp("lax", str))
1638 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1639 else if (!strcmp("no", str))
1640 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1641
1642 return 1;
1643 }
1644
1645 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1646
1647 /* Check for resource conflicts between ACPI OperationRegions and native
1648 * drivers */
1649 int acpi_check_resource_conflict(const struct resource *res)
1650 {
1651 acpi_adr_space_type space_id;
1652 acpi_size length;
1653 u8 warn = 0;
1654 int clash = 0;
1655
1656 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1657 return 0;
1658 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1659 return 0;
1660
1661 if (res->flags & IORESOURCE_IO)
1662 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1663 else
1664 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1665
1666 length = resource_size(res);
1667 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1668 warn = 1;
1669 clash = acpi_check_address_range(space_id, res->start, length, warn);
1670
1671 if (clash) {
1672 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1673 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1674 printk(KERN_NOTICE "ACPI: This conflict may"
1675 " cause random problems and system"
1676 " instability\n");
1677 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1678 " for this device, you should use it instead of"
1679 " the native driver\n");
1680 }
1681 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1682 return -EBUSY;
1683 }
1684 return 0;
1685 }
1686 EXPORT_SYMBOL(acpi_check_resource_conflict);
1687
1688 int acpi_check_region(resource_size_t start, resource_size_t n,
1689 const char *name)
1690 {
1691 struct resource res = {
1692 .start = start,
1693 .end = start + n - 1,
1694 .name = name,
1695 .flags = IORESOURCE_IO,
1696 };
1697
1698 return acpi_check_resource_conflict(&res);
1699 }
1700 EXPORT_SYMBOL(acpi_check_region);
1701
1702 /*
1703 * Let drivers know whether the resource checks are effective
1704 */
1705 int acpi_resources_are_enforced(void)
1706 {
1707 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1708 }
1709 EXPORT_SYMBOL(acpi_resources_are_enforced);
1710
1711 bool acpi_osi_is_win8(void)
1712 {
1713 return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
1714 }
1715 EXPORT_SYMBOL(acpi_osi_is_win8);
1716
1717 /*
1718 * Deallocate the memory for a spinlock.
1719 */
1720 void acpi_os_delete_lock(acpi_spinlock handle)
1721 {
1722 ACPI_FREE(handle);
1723 }
1724
1725 /*
1726 * Acquire a spinlock.
1727 *
1728 * handle is a pointer to the spinlock_t.
1729 */
1730
1731 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1732 {
1733 acpi_cpu_flags flags;
1734 spin_lock_irqsave(lockp, flags);
1735 return flags;
1736 }
1737
1738 /*
1739 * Release a spinlock. See above.
1740 */
1741
1742 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1743 {
1744 spin_unlock_irqrestore(lockp, flags);
1745 }
1746
1747 #ifndef ACPI_USE_LOCAL_CACHE
1748
1749 /*******************************************************************************
1750 *
1751 * FUNCTION: acpi_os_create_cache
1752 *
1753 * PARAMETERS: name - Ascii name for the cache
1754 * size - Size of each cached object
1755 * depth - Maximum depth of the cache (in objects) <ignored>
1756 * cache - Where the new cache object is returned
1757 *
1758 * RETURN: status
1759 *
1760 * DESCRIPTION: Create a cache object
1761 *
1762 ******************************************************************************/
1763
1764 acpi_status
1765 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1766 {
1767 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1768 if (*cache == NULL)
1769 return AE_ERROR;
1770 else
1771 return AE_OK;
1772 }
1773
1774 /*******************************************************************************
1775 *
1776 * FUNCTION: acpi_os_purge_cache
1777 *
1778 * PARAMETERS: Cache - Handle to cache object
1779 *
1780 * RETURN: Status
1781 *
1782 * DESCRIPTION: Free all objects within the requested cache.
1783 *
1784 ******************************************************************************/
1785
1786 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1787 {
1788 kmem_cache_shrink(cache);
1789 return (AE_OK);
1790 }
1791
1792 /*******************************************************************************
1793 *
1794 * FUNCTION: acpi_os_delete_cache
1795 *
1796 * PARAMETERS: Cache - Handle to cache object
1797 *
1798 * RETURN: Status
1799 *
1800 * DESCRIPTION: Free all objects within the requested cache and delete the
1801 * cache object.
1802 *
1803 ******************************************************************************/
1804
1805 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1806 {
1807 kmem_cache_destroy(cache);
1808 return (AE_OK);
1809 }
1810
1811 /*******************************************************************************
1812 *
1813 * FUNCTION: acpi_os_release_object
1814 *
1815 * PARAMETERS: Cache - Handle to cache object
1816 * Object - The object to be released
1817 *
1818 * RETURN: None
1819 *
1820 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1821 * the object is deleted.
1822 *
1823 ******************************************************************************/
1824
1825 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1826 {
1827 kmem_cache_free(cache, object);
1828 return (AE_OK);
1829 }
1830 #endif
1831
1832 static int __init acpi_no_static_ssdt_setup(char *s)
1833 {
1834 acpi_gbl_disable_ssdt_table_install = TRUE;
1835 pr_info("ACPI: static SSDT installation disabled\n");
1836
1837 return 0;
1838 }
1839
1840 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1841
1842 static int __init acpi_disable_return_repair(char *s)
1843 {
1844 printk(KERN_NOTICE PREFIX
1845 "ACPI: Predefined validation mechanism disabled\n");
1846 acpi_gbl_disable_auto_repair = TRUE;
1847
1848 return 1;
1849 }
1850
1851 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1852
1853 acpi_status __init acpi_os_initialize(void)
1854 {
1855 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1856 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1857 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1858 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1859 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1860 /*
1861 * Use acpi_os_map_generic_address to pre-map the reset
1862 * register if it's in system memory.
1863 */
1864 int rv;
1865
1866 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1867 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1868 }
1869 acpi_os_initialized = true;
1870
1871 return AE_OK;
1872 }
1873
1874 acpi_status __init acpi_os_initialize1(void)
1875 {
1876 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1877 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1878 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1879 BUG_ON(!kacpid_wq);
1880 BUG_ON(!kacpi_notify_wq);
1881 BUG_ON(!kacpi_hotplug_wq);
1882 acpi_install_interface_handler(acpi_osi_handler);
1883 acpi_osi_setup_late();
1884 return AE_OK;
1885 }
1886
1887 acpi_status acpi_os_terminate(void)
1888 {
1889 if (acpi_irq_handler) {
1890 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1891 acpi_irq_handler);
1892 }
1893
1894 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1895 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1896 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1897 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1898 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1899 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1900
1901 destroy_workqueue(kacpid_wq);
1902 destroy_workqueue(kacpi_notify_wq);
1903 destroy_workqueue(kacpi_hotplug_wq);
1904
1905 return AE_OK;
1906 }
1907
1908 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1909 u32 pm1b_control)
1910 {
1911 int rc = 0;
1912 if (__acpi_os_prepare_sleep)
1913 rc = __acpi_os_prepare_sleep(sleep_state,
1914 pm1a_control, pm1b_control);
1915 if (rc < 0)
1916 return AE_ERROR;
1917 else if (rc > 0)
1918 return AE_CTRL_SKIP;
1919
1920 return AE_OK;
1921 }
1922
1923 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1924 u32 pm1a_ctrl, u32 pm1b_ctrl))
1925 {
1926 __acpi_os_prepare_sleep = func;
1927 }
1928
1929 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1930 u32 val_b)
1931 {
1932 int rc = 0;
1933 if (__acpi_os_prepare_extended_sleep)
1934 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1935 val_a, val_b);
1936 if (rc < 0)
1937 return AE_ERROR;
1938 else if (rc > 0)
1939 return AE_CTRL_SKIP;
1940
1941 return AE_OK;
1942 }
1943
1944 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1945 u32 val_a, u32 val_b))
1946 {
1947 __acpi_os_prepare_extended_sleep = func;
1948 }