]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/acpi/osl.c
qmi_wwan: add Dell DW5811e
[mirror_ubuntu-zesty-kernel.git] / drivers / acpi / osl.c
1 /*
2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 */
25
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
43 #include <linux/module.h>
44
45 #include <asm/io.h>
46 #include <linux/uaccess.h>
47 #include <linux/io-64-nonatomic-lo-hi.h>
48
49 #include "internal.h"
50
51 #define _COMPONENT ACPI_OS_SERVICES
52 ACPI_MODULE_NAME("osl");
53
54 struct acpi_os_dpc {
55 acpi_osd_exec_callback function;
56 void *context;
57 struct work_struct work;
58 };
59
60 #ifdef ENABLE_DEBUGGER
61 #include <linux/kdb.h>
62
63 /* stuff for debugger support */
64 int acpi_in_debugger;
65 EXPORT_SYMBOL(acpi_in_debugger);
66 #endif /*ENABLE_DEBUGGER */
67
68 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
69 u32 pm1b_ctrl);
70 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
71 u32 val_b);
72
73 static acpi_osd_handler acpi_irq_handler;
74 static void *acpi_irq_context;
75 static struct workqueue_struct *kacpid_wq;
76 static struct workqueue_struct *kacpi_notify_wq;
77 static struct workqueue_struct *kacpi_hotplug_wq;
78 static bool acpi_os_initialized;
79 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
80 bool acpi_permanent_mmap = false;
81
82 /*
83 * This list of permanent mappings is for memory that may be accessed from
84 * interrupt context, where we can't do the ioremap().
85 */
86 struct acpi_ioremap {
87 struct list_head list;
88 void __iomem *virt;
89 acpi_physical_address phys;
90 acpi_size size;
91 unsigned long refcount;
92 };
93
94 static LIST_HEAD(acpi_ioremaps);
95 static DEFINE_MUTEX(acpi_ioremap_lock);
96
97 static void __init acpi_request_region (struct acpi_generic_address *gas,
98 unsigned int length, char *desc)
99 {
100 u64 addr;
101
102 /* Handle possible alignment issues */
103 memcpy(&addr, &gas->address, sizeof(addr));
104 if (!addr || !length)
105 return;
106
107 /* Resources are never freed */
108 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
109 request_region(addr, length, desc);
110 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
111 request_mem_region(addr, length, desc);
112 }
113
114 static int __init acpi_reserve_resources(void)
115 {
116 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
117 "ACPI PM1a_EVT_BLK");
118
119 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
120 "ACPI PM1b_EVT_BLK");
121
122 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
123 "ACPI PM1a_CNT_BLK");
124
125 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
126 "ACPI PM1b_CNT_BLK");
127
128 if (acpi_gbl_FADT.pm_timer_length == 4)
129 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
130
131 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
132 "ACPI PM2_CNT_BLK");
133
134 /* Length of GPE blocks must be a non-negative multiple of 2 */
135
136 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
137 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
138 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
139
140 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
141 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
142 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
143
144 return 0;
145 }
146 fs_initcall_sync(acpi_reserve_resources);
147
148 void acpi_os_printf(const char *fmt, ...)
149 {
150 va_list args;
151 va_start(args, fmt);
152 acpi_os_vprintf(fmt, args);
153 va_end(args);
154 }
155 EXPORT_SYMBOL(acpi_os_printf);
156
157 void acpi_os_vprintf(const char *fmt, va_list args)
158 {
159 static char buffer[512];
160
161 vsprintf(buffer, fmt, args);
162
163 #ifdef ENABLE_DEBUGGER
164 if (acpi_in_debugger) {
165 kdb_printf("%s", buffer);
166 } else {
167 if (printk_get_level(buffer))
168 printk("%s", buffer);
169 else
170 printk(KERN_CONT "%s", buffer);
171 }
172 #else
173 if (acpi_debugger_write_log(buffer) < 0) {
174 if (printk_get_level(buffer))
175 printk("%s", buffer);
176 else
177 printk(KERN_CONT "%s", buffer);
178 }
179 #endif
180 }
181
182 #ifdef CONFIG_KEXEC
183 static unsigned long acpi_rsdp;
184 static int __init setup_acpi_rsdp(char *arg)
185 {
186 return kstrtoul(arg, 16, &acpi_rsdp);
187 }
188 early_param("acpi_rsdp", setup_acpi_rsdp);
189 #endif
190
191 acpi_physical_address __init acpi_os_get_root_pointer(void)
192 {
193 acpi_physical_address pa = 0;
194
195 #ifdef CONFIG_KEXEC
196 if (acpi_rsdp && !secure_modules())
197 return acpi_rsdp;
198 #endif
199
200 if (efi_enabled(EFI_CONFIG_TABLES)) {
201 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
202 return efi.acpi20;
203 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
204 return efi.acpi;
205 pr_err(PREFIX "System description tables not found\n");
206 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
207 acpi_find_root_pointer(&pa);
208 }
209
210 return pa;
211 }
212
213 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
214 static struct acpi_ioremap *
215 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
216 {
217 struct acpi_ioremap *map;
218
219 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
220 if (map->phys <= phys &&
221 phys + size <= map->phys + map->size)
222 return map;
223
224 return NULL;
225 }
226
227 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
228 static void __iomem *
229 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
230 {
231 struct acpi_ioremap *map;
232
233 map = acpi_map_lookup(phys, size);
234 if (map)
235 return map->virt + (phys - map->phys);
236
237 return NULL;
238 }
239
240 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
241 {
242 struct acpi_ioremap *map;
243 void __iomem *virt = NULL;
244
245 mutex_lock(&acpi_ioremap_lock);
246 map = acpi_map_lookup(phys, size);
247 if (map) {
248 virt = map->virt + (phys - map->phys);
249 map->refcount++;
250 }
251 mutex_unlock(&acpi_ioremap_lock);
252 return virt;
253 }
254 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
255
256 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
257 static struct acpi_ioremap *
258 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
259 {
260 struct acpi_ioremap *map;
261
262 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
263 if (map->virt <= virt &&
264 virt + size <= map->virt + map->size)
265 return map;
266
267 return NULL;
268 }
269
270 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
271 /* ioremap will take care of cache attributes */
272 #define should_use_kmap(pfn) 0
273 #else
274 #define should_use_kmap(pfn) page_is_ram(pfn)
275 #endif
276
277 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
278 {
279 unsigned long pfn;
280
281 pfn = pg_off >> PAGE_SHIFT;
282 if (should_use_kmap(pfn)) {
283 if (pg_sz > PAGE_SIZE)
284 return NULL;
285 return (void __iomem __force *)kmap(pfn_to_page(pfn));
286 } else
287 return acpi_os_ioremap(pg_off, pg_sz);
288 }
289
290 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
291 {
292 unsigned long pfn;
293
294 pfn = pg_off >> PAGE_SHIFT;
295 if (should_use_kmap(pfn))
296 kunmap(pfn_to_page(pfn));
297 else
298 iounmap(vaddr);
299 }
300
301 /**
302 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
303 * @phys: Start of the physical address range to map.
304 * @size: Size of the physical address range to map.
305 *
306 * Look up the given physical address range in the list of existing ACPI memory
307 * mappings. If found, get a reference to it and return a pointer to it (its
308 * virtual address). If not found, map it, add it to that list and return a
309 * pointer to it.
310 *
311 * During early init (when acpi_permanent_mmap has not been set yet) this
312 * routine simply calls __acpi_map_table() to get the job done.
313 */
314 void __iomem *__ref
315 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
316 {
317 struct acpi_ioremap *map;
318 void __iomem *virt;
319 acpi_physical_address pg_off;
320 acpi_size pg_sz;
321
322 if (phys > ULONG_MAX) {
323 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
324 return NULL;
325 }
326
327 if (!acpi_permanent_mmap)
328 return __acpi_map_table((unsigned long)phys, size);
329
330 mutex_lock(&acpi_ioremap_lock);
331 /* Check if there's a suitable mapping already. */
332 map = acpi_map_lookup(phys, size);
333 if (map) {
334 map->refcount++;
335 goto out;
336 }
337
338 map = kzalloc(sizeof(*map), GFP_KERNEL);
339 if (!map) {
340 mutex_unlock(&acpi_ioremap_lock);
341 return NULL;
342 }
343
344 pg_off = round_down(phys, PAGE_SIZE);
345 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
346 virt = acpi_map(pg_off, pg_sz);
347 if (!virt) {
348 mutex_unlock(&acpi_ioremap_lock);
349 kfree(map);
350 return NULL;
351 }
352
353 INIT_LIST_HEAD(&map->list);
354 map->virt = virt;
355 map->phys = pg_off;
356 map->size = pg_sz;
357 map->refcount = 1;
358
359 list_add_tail_rcu(&map->list, &acpi_ioremaps);
360
361 out:
362 mutex_unlock(&acpi_ioremap_lock);
363 return map->virt + (phys - map->phys);
364 }
365 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
366
367 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
368 {
369 return (void *)acpi_os_map_iomem(phys, size);
370 }
371 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
372
373 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
374 {
375 if (!--map->refcount)
376 list_del_rcu(&map->list);
377 }
378
379 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
380 {
381 if (!map->refcount) {
382 synchronize_rcu_expedited();
383 acpi_unmap(map->phys, map->virt);
384 kfree(map);
385 }
386 }
387
388 /**
389 * acpi_os_unmap_iomem - Drop a memory mapping reference.
390 * @virt: Start of the address range to drop a reference to.
391 * @size: Size of the address range to drop a reference to.
392 *
393 * Look up the given virtual address range in the list of existing ACPI memory
394 * mappings, drop a reference to it and unmap it if there are no more active
395 * references to it.
396 *
397 * During early init (when acpi_permanent_mmap has not been set yet) this
398 * routine simply calls __acpi_unmap_table() to get the job done. Since
399 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
400 * here.
401 */
402 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
403 {
404 struct acpi_ioremap *map;
405
406 if (!acpi_permanent_mmap) {
407 __acpi_unmap_table(virt, size);
408 return;
409 }
410
411 mutex_lock(&acpi_ioremap_lock);
412 map = acpi_map_lookup_virt(virt, size);
413 if (!map) {
414 mutex_unlock(&acpi_ioremap_lock);
415 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
416 return;
417 }
418 acpi_os_drop_map_ref(map);
419 mutex_unlock(&acpi_ioremap_lock);
420
421 acpi_os_map_cleanup(map);
422 }
423 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
424
425 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
426 {
427 return acpi_os_unmap_iomem((void __iomem *)virt, size);
428 }
429 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
430
431 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
432 {
433 u64 addr;
434 void __iomem *virt;
435
436 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
437 return 0;
438
439 /* Handle possible alignment issues */
440 memcpy(&addr, &gas->address, sizeof(addr));
441 if (!addr || !gas->bit_width)
442 return -EINVAL;
443
444 virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
445 if (!virt)
446 return -EIO;
447
448 return 0;
449 }
450 EXPORT_SYMBOL(acpi_os_map_generic_address);
451
452 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
453 {
454 u64 addr;
455 struct acpi_ioremap *map;
456
457 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
458 return;
459
460 /* Handle possible alignment issues */
461 memcpy(&addr, &gas->address, sizeof(addr));
462 if (!addr || !gas->bit_width)
463 return;
464
465 mutex_lock(&acpi_ioremap_lock);
466 map = acpi_map_lookup(addr, gas->bit_width / 8);
467 if (!map) {
468 mutex_unlock(&acpi_ioremap_lock);
469 return;
470 }
471 acpi_os_drop_map_ref(map);
472 mutex_unlock(&acpi_ioremap_lock);
473
474 acpi_os_map_cleanup(map);
475 }
476 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
477
478 #ifdef ACPI_FUTURE_USAGE
479 acpi_status
480 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
481 {
482 if (!phys || !virt)
483 return AE_BAD_PARAMETER;
484
485 *phys = virt_to_phys(virt);
486
487 return AE_OK;
488 }
489 #endif
490
491 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
492 static bool acpi_rev_override;
493
494 int __init acpi_rev_override_setup(char *str)
495 {
496 acpi_rev_override = true;
497 return 1;
498 }
499 __setup("acpi_rev_override", acpi_rev_override_setup);
500 #else
501 #define acpi_rev_override false
502 #endif
503
504 #define ACPI_MAX_OVERRIDE_LEN 100
505
506 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
507
508 acpi_status
509 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
510 acpi_string *new_val)
511 {
512 if (!init_val || !new_val)
513 return AE_BAD_PARAMETER;
514
515 *new_val = NULL;
516 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
517 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
518 acpi_os_name);
519 *new_val = acpi_os_name;
520 }
521
522 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
523 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
524 *new_val = (char *)5;
525 }
526
527 return AE_OK;
528 }
529
530 static irqreturn_t acpi_irq(int irq, void *dev_id)
531 {
532 u32 handled;
533
534 handled = (*acpi_irq_handler) (acpi_irq_context);
535
536 if (handled) {
537 acpi_irq_handled++;
538 return IRQ_HANDLED;
539 } else {
540 acpi_irq_not_handled++;
541 return IRQ_NONE;
542 }
543 }
544
545 acpi_status
546 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
547 void *context)
548 {
549 unsigned int irq;
550
551 acpi_irq_stats_init();
552
553 /*
554 * ACPI interrupts different from the SCI in our copy of the FADT are
555 * not supported.
556 */
557 if (gsi != acpi_gbl_FADT.sci_interrupt)
558 return AE_BAD_PARAMETER;
559
560 if (acpi_irq_handler)
561 return AE_ALREADY_ACQUIRED;
562
563 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
564 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
565 gsi);
566 return AE_OK;
567 }
568
569 acpi_irq_handler = handler;
570 acpi_irq_context = context;
571 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
572 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
573 acpi_irq_handler = NULL;
574 return AE_NOT_ACQUIRED;
575 }
576 acpi_sci_irq = irq;
577
578 return AE_OK;
579 }
580
581 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
582 {
583 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
584 return AE_BAD_PARAMETER;
585
586 free_irq(acpi_sci_irq, acpi_irq);
587 acpi_irq_handler = NULL;
588 acpi_sci_irq = INVALID_ACPI_IRQ;
589
590 return AE_OK;
591 }
592
593 /*
594 * Running in interpreter thread context, safe to sleep
595 */
596
597 void acpi_os_sleep(u64 ms)
598 {
599 msleep(ms);
600 }
601
602 void acpi_os_stall(u32 us)
603 {
604 while (us) {
605 u32 delay = 1000;
606
607 if (delay > us)
608 delay = us;
609 udelay(delay);
610 touch_nmi_watchdog();
611 us -= delay;
612 }
613 }
614
615 /*
616 * Support ACPI 3.0 AML Timer operand
617 * Returns 64-bit free-running, monotonically increasing timer
618 * with 100ns granularity
619 */
620 u64 acpi_os_get_timer(void)
621 {
622 u64 time_ns = ktime_to_ns(ktime_get());
623 do_div(time_ns, 100);
624 return time_ns;
625 }
626
627 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
628 {
629 u32 dummy;
630
631 if (!value)
632 value = &dummy;
633
634 *value = 0;
635 if (width <= 8) {
636 *(u8 *) value = inb(port);
637 } else if (width <= 16) {
638 *(u16 *) value = inw(port);
639 } else if (width <= 32) {
640 *(u32 *) value = inl(port);
641 } else {
642 BUG();
643 }
644
645 return AE_OK;
646 }
647
648 EXPORT_SYMBOL(acpi_os_read_port);
649
650 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
651 {
652 if (width <= 8) {
653 outb(value, port);
654 } else if (width <= 16) {
655 outw(value, port);
656 } else if (width <= 32) {
657 outl(value, port);
658 } else {
659 BUG();
660 }
661
662 return AE_OK;
663 }
664
665 EXPORT_SYMBOL(acpi_os_write_port);
666
667 acpi_status
668 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
669 {
670 void __iomem *virt_addr;
671 unsigned int size = width / 8;
672 bool unmap = false;
673 u64 dummy;
674
675 rcu_read_lock();
676 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
677 if (!virt_addr) {
678 rcu_read_unlock();
679 virt_addr = acpi_os_ioremap(phys_addr, size);
680 if (!virt_addr)
681 return AE_BAD_ADDRESS;
682 unmap = true;
683 }
684
685 if (!value)
686 value = &dummy;
687
688 switch (width) {
689 case 8:
690 *(u8 *) value = readb(virt_addr);
691 break;
692 case 16:
693 *(u16 *) value = readw(virt_addr);
694 break;
695 case 32:
696 *(u32 *) value = readl(virt_addr);
697 break;
698 case 64:
699 *(u64 *) value = readq(virt_addr);
700 break;
701 default:
702 BUG();
703 }
704
705 if (unmap)
706 iounmap(virt_addr);
707 else
708 rcu_read_unlock();
709
710 return AE_OK;
711 }
712
713 acpi_status
714 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
715 {
716 void __iomem *virt_addr;
717 unsigned int size = width / 8;
718 bool unmap = false;
719
720 rcu_read_lock();
721 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
722 if (!virt_addr) {
723 rcu_read_unlock();
724 virt_addr = acpi_os_ioremap(phys_addr, size);
725 if (!virt_addr)
726 return AE_BAD_ADDRESS;
727 unmap = true;
728 }
729
730 switch (width) {
731 case 8:
732 writeb(value, virt_addr);
733 break;
734 case 16:
735 writew(value, virt_addr);
736 break;
737 case 32:
738 writel(value, virt_addr);
739 break;
740 case 64:
741 writeq(value, virt_addr);
742 break;
743 default:
744 BUG();
745 }
746
747 if (unmap)
748 iounmap(virt_addr);
749 else
750 rcu_read_unlock();
751
752 return AE_OK;
753 }
754
755 acpi_status
756 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
757 u64 *value, u32 width)
758 {
759 int result, size;
760 u32 value32;
761
762 if (!value)
763 return AE_BAD_PARAMETER;
764
765 switch (width) {
766 case 8:
767 size = 1;
768 break;
769 case 16:
770 size = 2;
771 break;
772 case 32:
773 size = 4;
774 break;
775 default:
776 return AE_ERROR;
777 }
778
779 result = raw_pci_read(pci_id->segment, pci_id->bus,
780 PCI_DEVFN(pci_id->device, pci_id->function),
781 reg, size, &value32);
782 *value = value32;
783
784 return (result ? AE_ERROR : AE_OK);
785 }
786
787 acpi_status
788 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
789 u64 value, u32 width)
790 {
791 int result, size;
792
793 switch (width) {
794 case 8:
795 size = 1;
796 break;
797 case 16:
798 size = 2;
799 break;
800 case 32:
801 size = 4;
802 break;
803 default:
804 return AE_ERROR;
805 }
806
807 result = raw_pci_write(pci_id->segment, pci_id->bus,
808 PCI_DEVFN(pci_id->device, pci_id->function),
809 reg, size, value);
810
811 return (result ? AE_ERROR : AE_OK);
812 }
813
814 static void acpi_os_execute_deferred(struct work_struct *work)
815 {
816 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
817
818 dpc->function(dpc->context);
819 kfree(dpc);
820 }
821
822 #ifdef CONFIG_ACPI_DEBUGGER
823 static struct acpi_debugger acpi_debugger;
824 static bool acpi_debugger_initialized;
825
826 int acpi_register_debugger(struct module *owner,
827 const struct acpi_debugger_ops *ops)
828 {
829 int ret = 0;
830
831 mutex_lock(&acpi_debugger.lock);
832 if (acpi_debugger.ops) {
833 ret = -EBUSY;
834 goto err_lock;
835 }
836
837 acpi_debugger.owner = owner;
838 acpi_debugger.ops = ops;
839
840 err_lock:
841 mutex_unlock(&acpi_debugger.lock);
842 return ret;
843 }
844 EXPORT_SYMBOL(acpi_register_debugger);
845
846 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
847 {
848 mutex_lock(&acpi_debugger.lock);
849 if (ops == acpi_debugger.ops) {
850 acpi_debugger.ops = NULL;
851 acpi_debugger.owner = NULL;
852 }
853 mutex_unlock(&acpi_debugger.lock);
854 }
855 EXPORT_SYMBOL(acpi_unregister_debugger);
856
857 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
858 {
859 int ret;
860 int (*func)(acpi_osd_exec_callback, void *);
861 struct module *owner;
862
863 if (!acpi_debugger_initialized)
864 return -ENODEV;
865 mutex_lock(&acpi_debugger.lock);
866 if (!acpi_debugger.ops) {
867 ret = -ENODEV;
868 goto err_lock;
869 }
870 if (!try_module_get(acpi_debugger.owner)) {
871 ret = -ENODEV;
872 goto err_lock;
873 }
874 func = acpi_debugger.ops->create_thread;
875 owner = acpi_debugger.owner;
876 mutex_unlock(&acpi_debugger.lock);
877
878 ret = func(function, context);
879
880 mutex_lock(&acpi_debugger.lock);
881 module_put(owner);
882 err_lock:
883 mutex_unlock(&acpi_debugger.lock);
884 return ret;
885 }
886
887 ssize_t acpi_debugger_write_log(const char *msg)
888 {
889 ssize_t ret;
890 ssize_t (*func)(const char *);
891 struct module *owner;
892
893 if (!acpi_debugger_initialized)
894 return -ENODEV;
895 mutex_lock(&acpi_debugger.lock);
896 if (!acpi_debugger.ops) {
897 ret = -ENODEV;
898 goto err_lock;
899 }
900 if (!try_module_get(acpi_debugger.owner)) {
901 ret = -ENODEV;
902 goto err_lock;
903 }
904 func = acpi_debugger.ops->write_log;
905 owner = acpi_debugger.owner;
906 mutex_unlock(&acpi_debugger.lock);
907
908 ret = func(msg);
909
910 mutex_lock(&acpi_debugger.lock);
911 module_put(owner);
912 err_lock:
913 mutex_unlock(&acpi_debugger.lock);
914 return ret;
915 }
916
917 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
918 {
919 ssize_t ret;
920 ssize_t (*func)(char *, size_t);
921 struct module *owner;
922
923 if (!acpi_debugger_initialized)
924 return -ENODEV;
925 mutex_lock(&acpi_debugger.lock);
926 if (!acpi_debugger.ops) {
927 ret = -ENODEV;
928 goto err_lock;
929 }
930 if (!try_module_get(acpi_debugger.owner)) {
931 ret = -ENODEV;
932 goto err_lock;
933 }
934 func = acpi_debugger.ops->read_cmd;
935 owner = acpi_debugger.owner;
936 mutex_unlock(&acpi_debugger.lock);
937
938 ret = func(buffer, buffer_length);
939
940 mutex_lock(&acpi_debugger.lock);
941 module_put(owner);
942 err_lock:
943 mutex_unlock(&acpi_debugger.lock);
944 return ret;
945 }
946
947 int acpi_debugger_wait_command_ready(void)
948 {
949 int ret;
950 int (*func)(bool, char *, size_t);
951 struct module *owner;
952
953 if (!acpi_debugger_initialized)
954 return -ENODEV;
955 mutex_lock(&acpi_debugger.lock);
956 if (!acpi_debugger.ops) {
957 ret = -ENODEV;
958 goto err_lock;
959 }
960 if (!try_module_get(acpi_debugger.owner)) {
961 ret = -ENODEV;
962 goto err_lock;
963 }
964 func = acpi_debugger.ops->wait_command_ready;
965 owner = acpi_debugger.owner;
966 mutex_unlock(&acpi_debugger.lock);
967
968 ret = func(acpi_gbl_method_executing,
969 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
970
971 mutex_lock(&acpi_debugger.lock);
972 module_put(owner);
973 err_lock:
974 mutex_unlock(&acpi_debugger.lock);
975 return ret;
976 }
977
978 int acpi_debugger_notify_command_complete(void)
979 {
980 int ret;
981 int (*func)(void);
982 struct module *owner;
983
984 if (!acpi_debugger_initialized)
985 return -ENODEV;
986 mutex_lock(&acpi_debugger.lock);
987 if (!acpi_debugger.ops) {
988 ret = -ENODEV;
989 goto err_lock;
990 }
991 if (!try_module_get(acpi_debugger.owner)) {
992 ret = -ENODEV;
993 goto err_lock;
994 }
995 func = acpi_debugger.ops->notify_command_complete;
996 owner = acpi_debugger.owner;
997 mutex_unlock(&acpi_debugger.lock);
998
999 ret = func();
1000
1001 mutex_lock(&acpi_debugger.lock);
1002 module_put(owner);
1003 err_lock:
1004 mutex_unlock(&acpi_debugger.lock);
1005 return ret;
1006 }
1007
1008 int __init acpi_debugger_init(void)
1009 {
1010 mutex_init(&acpi_debugger.lock);
1011 acpi_debugger_initialized = true;
1012 return 0;
1013 }
1014 #endif
1015
1016 /*******************************************************************************
1017 *
1018 * FUNCTION: acpi_os_execute
1019 *
1020 * PARAMETERS: Type - Type of the callback
1021 * Function - Function to be executed
1022 * Context - Function parameters
1023 *
1024 * RETURN: Status
1025 *
1026 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1027 * immediately executes function on a separate thread.
1028 *
1029 ******************************************************************************/
1030
1031 acpi_status acpi_os_execute(acpi_execute_type type,
1032 acpi_osd_exec_callback function, void *context)
1033 {
1034 acpi_status status = AE_OK;
1035 struct acpi_os_dpc *dpc;
1036 struct workqueue_struct *queue;
1037 int ret;
1038 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1039 "Scheduling function [%p(%p)] for deferred execution.\n",
1040 function, context));
1041
1042 if (type == OSL_DEBUGGER_MAIN_THREAD) {
1043 ret = acpi_debugger_create_thread(function, context);
1044 if (ret) {
1045 pr_err("Call to kthread_create() failed.\n");
1046 status = AE_ERROR;
1047 }
1048 goto out_thread;
1049 }
1050
1051 /*
1052 * Allocate/initialize DPC structure. Note that this memory will be
1053 * freed by the callee. The kernel handles the work_struct list in a
1054 * way that allows us to also free its memory inside the callee.
1055 * Because we may want to schedule several tasks with different
1056 * parameters we can't use the approach some kernel code uses of
1057 * having a static work_struct.
1058 */
1059
1060 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1061 if (!dpc)
1062 return AE_NO_MEMORY;
1063
1064 dpc->function = function;
1065 dpc->context = context;
1066
1067 /*
1068 * To prevent lockdep from complaining unnecessarily, make sure that
1069 * there is a different static lockdep key for each workqueue by using
1070 * INIT_WORK() for each of them separately.
1071 */
1072 if (type == OSL_NOTIFY_HANDLER) {
1073 queue = kacpi_notify_wq;
1074 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1075 } else if (type == OSL_GPE_HANDLER) {
1076 queue = kacpid_wq;
1077 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1078 } else {
1079 pr_err("Unsupported os_execute type %d.\n", type);
1080 status = AE_ERROR;
1081 }
1082
1083 if (ACPI_FAILURE(status))
1084 goto err_workqueue;
1085
1086 /*
1087 * On some machines, a software-initiated SMI causes corruption unless
1088 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1089 * typically it's done in GPE-related methods that are run via
1090 * workqueues, so we can avoid the known corruption cases by always
1091 * queueing on CPU 0.
1092 */
1093 ret = queue_work_on(0, queue, &dpc->work);
1094 if (!ret) {
1095 printk(KERN_ERR PREFIX
1096 "Call to queue_work() failed.\n");
1097 status = AE_ERROR;
1098 }
1099 err_workqueue:
1100 if (ACPI_FAILURE(status))
1101 kfree(dpc);
1102 out_thread:
1103 return status;
1104 }
1105 EXPORT_SYMBOL(acpi_os_execute);
1106
1107 void acpi_os_wait_events_complete(void)
1108 {
1109 /*
1110 * Make sure the GPE handler or the fixed event handler is not used
1111 * on another CPU after removal.
1112 */
1113 if (acpi_sci_irq_valid())
1114 synchronize_hardirq(acpi_sci_irq);
1115 flush_workqueue(kacpid_wq);
1116 flush_workqueue(kacpi_notify_wq);
1117 }
1118
1119 struct acpi_hp_work {
1120 struct work_struct work;
1121 struct acpi_device *adev;
1122 u32 src;
1123 };
1124
1125 static void acpi_hotplug_work_fn(struct work_struct *work)
1126 {
1127 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1128
1129 acpi_os_wait_events_complete();
1130 acpi_device_hotplug(hpw->adev, hpw->src);
1131 kfree(hpw);
1132 }
1133
1134 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1135 {
1136 struct acpi_hp_work *hpw;
1137
1138 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1139 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1140 adev, src));
1141
1142 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1143 if (!hpw)
1144 return AE_NO_MEMORY;
1145
1146 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1147 hpw->adev = adev;
1148 hpw->src = src;
1149 /*
1150 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1151 * the hotplug code may call driver .remove() functions, which may
1152 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1153 * these workqueues.
1154 */
1155 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1156 kfree(hpw);
1157 return AE_ERROR;
1158 }
1159 return AE_OK;
1160 }
1161
1162 bool acpi_queue_hotplug_work(struct work_struct *work)
1163 {
1164 return queue_work(kacpi_hotplug_wq, work);
1165 }
1166
1167 acpi_status
1168 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1169 {
1170 struct semaphore *sem = NULL;
1171
1172 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1173 if (!sem)
1174 return AE_NO_MEMORY;
1175
1176 sema_init(sem, initial_units);
1177
1178 *handle = (acpi_handle *) sem;
1179
1180 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1181 *handle, initial_units));
1182
1183 return AE_OK;
1184 }
1185
1186 /*
1187 * TODO: A better way to delete semaphores? Linux doesn't have a
1188 * 'delete_semaphore()' function -- may result in an invalid
1189 * pointer dereference for non-synchronized consumers. Should
1190 * we at least check for blocked threads and signal/cancel them?
1191 */
1192
1193 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1194 {
1195 struct semaphore *sem = (struct semaphore *)handle;
1196
1197 if (!sem)
1198 return AE_BAD_PARAMETER;
1199
1200 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1201
1202 BUG_ON(!list_empty(&sem->wait_list));
1203 kfree(sem);
1204 sem = NULL;
1205
1206 return AE_OK;
1207 }
1208
1209 /*
1210 * TODO: Support for units > 1?
1211 */
1212 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1213 {
1214 acpi_status status = AE_OK;
1215 struct semaphore *sem = (struct semaphore *)handle;
1216 long jiffies;
1217 int ret = 0;
1218
1219 if (!acpi_os_initialized)
1220 return AE_OK;
1221
1222 if (!sem || (units < 1))
1223 return AE_BAD_PARAMETER;
1224
1225 if (units > 1)
1226 return AE_SUPPORT;
1227
1228 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1229 handle, units, timeout));
1230
1231 if (timeout == ACPI_WAIT_FOREVER)
1232 jiffies = MAX_SCHEDULE_TIMEOUT;
1233 else
1234 jiffies = msecs_to_jiffies(timeout);
1235
1236 ret = down_timeout(sem, jiffies);
1237 if (ret)
1238 status = AE_TIME;
1239
1240 if (ACPI_FAILURE(status)) {
1241 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1242 "Failed to acquire semaphore[%p|%d|%d], %s",
1243 handle, units, timeout,
1244 acpi_format_exception(status)));
1245 } else {
1246 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1247 "Acquired semaphore[%p|%d|%d]", handle,
1248 units, timeout));
1249 }
1250
1251 return status;
1252 }
1253
1254 /*
1255 * TODO: Support for units > 1?
1256 */
1257 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1258 {
1259 struct semaphore *sem = (struct semaphore *)handle;
1260
1261 if (!acpi_os_initialized)
1262 return AE_OK;
1263
1264 if (!sem || (units < 1))
1265 return AE_BAD_PARAMETER;
1266
1267 if (units > 1)
1268 return AE_SUPPORT;
1269
1270 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1271 units));
1272
1273 up(sem);
1274
1275 return AE_OK;
1276 }
1277
1278 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1279 {
1280 #ifdef ENABLE_DEBUGGER
1281 if (acpi_in_debugger) {
1282 u32 chars;
1283
1284 kdb_read(buffer, buffer_length);
1285
1286 /* remove the CR kdb includes */
1287 chars = strlen(buffer) - 1;
1288 buffer[chars] = '\0';
1289 }
1290 #else
1291 int ret;
1292
1293 ret = acpi_debugger_read_cmd(buffer, buffer_length);
1294 if (ret < 0)
1295 return AE_ERROR;
1296 if (bytes_read)
1297 *bytes_read = ret;
1298 #endif
1299
1300 return AE_OK;
1301 }
1302 EXPORT_SYMBOL(acpi_os_get_line);
1303
1304 acpi_status acpi_os_wait_command_ready(void)
1305 {
1306 int ret;
1307
1308 ret = acpi_debugger_wait_command_ready();
1309 if (ret < 0)
1310 return AE_ERROR;
1311 return AE_OK;
1312 }
1313
1314 acpi_status acpi_os_notify_command_complete(void)
1315 {
1316 int ret;
1317
1318 ret = acpi_debugger_notify_command_complete();
1319 if (ret < 0)
1320 return AE_ERROR;
1321 return AE_OK;
1322 }
1323
1324 acpi_status acpi_os_signal(u32 function, void *info)
1325 {
1326 switch (function) {
1327 case ACPI_SIGNAL_FATAL:
1328 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1329 break;
1330 case ACPI_SIGNAL_BREAKPOINT:
1331 /*
1332 * AML Breakpoint
1333 * ACPI spec. says to treat it as a NOP unless
1334 * you are debugging. So if/when we integrate
1335 * AML debugger into the kernel debugger its
1336 * hook will go here. But until then it is
1337 * not useful to print anything on breakpoints.
1338 */
1339 break;
1340 default:
1341 break;
1342 }
1343
1344 return AE_OK;
1345 }
1346
1347 static int __init acpi_os_name_setup(char *str)
1348 {
1349 char *p = acpi_os_name;
1350 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1351
1352 if (!str || !*str)
1353 return 0;
1354
1355 for (; count-- && *str; str++) {
1356 if (isalnum(*str) || *str == ' ' || *str == ':')
1357 *p++ = *str;
1358 else if (*str == '\'' || *str == '"')
1359 continue;
1360 else
1361 break;
1362 }
1363 *p = 0;
1364
1365 return 1;
1366
1367 }
1368
1369 __setup("acpi_os_name=", acpi_os_name_setup);
1370
1371 /*
1372 * Disable the auto-serialization of named objects creation methods.
1373 *
1374 * This feature is enabled by default. It marks the AML control methods
1375 * that contain the opcodes to create named objects as "Serialized".
1376 */
1377 static int __init acpi_no_auto_serialize_setup(char *str)
1378 {
1379 acpi_gbl_auto_serialize_methods = FALSE;
1380 pr_info("ACPI: auto-serialization disabled\n");
1381
1382 return 1;
1383 }
1384
1385 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1386
1387 /* Check of resource interference between native drivers and ACPI
1388 * OperationRegions (SystemIO and System Memory only).
1389 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1390 * in arbitrary AML code and can interfere with legacy drivers.
1391 * acpi_enforce_resources= can be set to:
1392 *
1393 * - strict (default) (2)
1394 * -> further driver trying to access the resources will not load
1395 * - lax (1)
1396 * -> further driver trying to access the resources will load, but you
1397 * get a system message that something might go wrong...
1398 *
1399 * - no (0)
1400 * -> ACPI Operation Region resources will not be registered
1401 *
1402 */
1403 #define ENFORCE_RESOURCES_STRICT 2
1404 #define ENFORCE_RESOURCES_LAX 1
1405 #define ENFORCE_RESOURCES_NO 0
1406
1407 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1408
1409 static int __init acpi_enforce_resources_setup(char *str)
1410 {
1411 if (str == NULL || *str == '\0')
1412 return 0;
1413
1414 if (!strcmp("strict", str))
1415 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1416 else if (!strcmp("lax", str))
1417 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1418 else if (!strcmp("no", str))
1419 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1420
1421 return 1;
1422 }
1423
1424 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1425
1426 /* Check for resource conflicts between ACPI OperationRegions and native
1427 * drivers */
1428 int acpi_check_resource_conflict(const struct resource *res)
1429 {
1430 acpi_adr_space_type space_id;
1431 acpi_size length;
1432 u8 warn = 0;
1433 int clash = 0;
1434
1435 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1436 return 0;
1437 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1438 return 0;
1439
1440 if (res->flags & IORESOURCE_IO)
1441 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1442 else
1443 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1444
1445 length = resource_size(res);
1446 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1447 warn = 1;
1448 clash = acpi_check_address_range(space_id, res->start, length, warn);
1449
1450 if (clash) {
1451 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1452 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1453 printk(KERN_NOTICE "ACPI: This conflict may"
1454 " cause random problems and system"
1455 " instability\n");
1456 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1457 " for this device, you should use it instead of"
1458 " the native driver\n");
1459 }
1460 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1461 return -EBUSY;
1462 }
1463 return 0;
1464 }
1465 EXPORT_SYMBOL(acpi_check_resource_conflict);
1466
1467 int acpi_check_region(resource_size_t start, resource_size_t n,
1468 const char *name)
1469 {
1470 struct resource res = {
1471 .start = start,
1472 .end = start + n - 1,
1473 .name = name,
1474 .flags = IORESOURCE_IO,
1475 };
1476
1477 return acpi_check_resource_conflict(&res);
1478 }
1479 EXPORT_SYMBOL(acpi_check_region);
1480
1481 /*
1482 * Let drivers know whether the resource checks are effective
1483 */
1484 int acpi_resources_are_enforced(void)
1485 {
1486 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1487 }
1488 EXPORT_SYMBOL(acpi_resources_are_enforced);
1489
1490 /*
1491 * Deallocate the memory for a spinlock.
1492 */
1493 void acpi_os_delete_lock(acpi_spinlock handle)
1494 {
1495 ACPI_FREE(handle);
1496 }
1497
1498 /*
1499 * Acquire a spinlock.
1500 *
1501 * handle is a pointer to the spinlock_t.
1502 */
1503
1504 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1505 {
1506 acpi_cpu_flags flags;
1507 spin_lock_irqsave(lockp, flags);
1508 return flags;
1509 }
1510
1511 /*
1512 * Release a spinlock. See above.
1513 */
1514
1515 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1516 {
1517 spin_unlock_irqrestore(lockp, flags);
1518 }
1519
1520 #ifndef ACPI_USE_LOCAL_CACHE
1521
1522 /*******************************************************************************
1523 *
1524 * FUNCTION: acpi_os_create_cache
1525 *
1526 * PARAMETERS: name - Ascii name for the cache
1527 * size - Size of each cached object
1528 * depth - Maximum depth of the cache (in objects) <ignored>
1529 * cache - Where the new cache object is returned
1530 *
1531 * RETURN: status
1532 *
1533 * DESCRIPTION: Create a cache object
1534 *
1535 ******************************************************************************/
1536
1537 acpi_status
1538 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1539 {
1540 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1541 if (*cache == NULL)
1542 return AE_ERROR;
1543 else
1544 return AE_OK;
1545 }
1546
1547 /*******************************************************************************
1548 *
1549 * FUNCTION: acpi_os_purge_cache
1550 *
1551 * PARAMETERS: Cache - Handle to cache object
1552 *
1553 * RETURN: Status
1554 *
1555 * DESCRIPTION: Free all objects within the requested cache.
1556 *
1557 ******************************************************************************/
1558
1559 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1560 {
1561 kmem_cache_shrink(cache);
1562 return (AE_OK);
1563 }
1564
1565 /*******************************************************************************
1566 *
1567 * FUNCTION: acpi_os_delete_cache
1568 *
1569 * PARAMETERS: Cache - Handle to cache object
1570 *
1571 * RETURN: Status
1572 *
1573 * DESCRIPTION: Free all objects within the requested cache and delete the
1574 * cache object.
1575 *
1576 ******************************************************************************/
1577
1578 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1579 {
1580 kmem_cache_destroy(cache);
1581 return (AE_OK);
1582 }
1583
1584 /*******************************************************************************
1585 *
1586 * FUNCTION: acpi_os_release_object
1587 *
1588 * PARAMETERS: Cache - Handle to cache object
1589 * Object - The object to be released
1590 *
1591 * RETURN: None
1592 *
1593 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1594 * the object is deleted.
1595 *
1596 ******************************************************************************/
1597
1598 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1599 {
1600 kmem_cache_free(cache, object);
1601 return (AE_OK);
1602 }
1603 #endif
1604
1605 static int __init acpi_no_static_ssdt_setup(char *s)
1606 {
1607 acpi_gbl_disable_ssdt_table_install = TRUE;
1608 pr_info("ACPI: static SSDT installation disabled\n");
1609
1610 return 0;
1611 }
1612
1613 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1614
1615 static int __init acpi_disable_return_repair(char *s)
1616 {
1617 printk(KERN_NOTICE PREFIX
1618 "ACPI: Predefined validation mechanism disabled\n");
1619 acpi_gbl_disable_auto_repair = TRUE;
1620
1621 return 1;
1622 }
1623
1624 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1625
1626 acpi_status __init acpi_os_initialize(void)
1627 {
1628 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1629 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1630 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1631 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1632 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1633 /*
1634 * Use acpi_os_map_generic_address to pre-map the reset
1635 * register if it's in system memory.
1636 */
1637 int rv;
1638
1639 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1640 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1641 }
1642 acpi_os_initialized = true;
1643
1644 return AE_OK;
1645 }
1646
1647 acpi_status __init acpi_os_initialize1(void)
1648 {
1649 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1650 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1651 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1652 BUG_ON(!kacpid_wq);
1653 BUG_ON(!kacpi_notify_wq);
1654 BUG_ON(!kacpi_hotplug_wq);
1655 acpi_osi_init();
1656 return AE_OK;
1657 }
1658
1659 acpi_status acpi_os_terminate(void)
1660 {
1661 if (acpi_irq_handler) {
1662 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1663 acpi_irq_handler);
1664 }
1665
1666 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1667 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1668 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1669 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1670 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1671 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1672
1673 destroy_workqueue(kacpid_wq);
1674 destroy_workqueue(kacpi_notify_wq);
1675 destroy_workqueue(kacpi_hotplug_wq);
1676
1677 return AE_OK;
1678 }
1679
1680 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1681 u32 pm1b_control)
1682 {
1683 int rc = 0;
1684 if (__acpi_os_prepare_sleep)
1685 rc = __acpi_os_prepare_sleep(sleep_state,
1686 pm1a_control, pm1b_control);
1687 if (rc < 0)
1688 return AE_ERROR;
1689 else if (rc > 0)
1690 return AE_CTRL_SKIP;
1691
1692 return AE_OK;
1693 }
1694
1695 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1696 u32 pm1a_ctrl, u32 pm1b_ctrl))
1697 {
1698 __acpi_os_prepare_sleep = func;
1699 }
1700
1701 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1702 u32 val_b)
1703 {
1704 int rc = 0;
1705 if (__acpi_os_prepare_extended_sleep)
1706 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1707 val_a, val_b);
1708 if (rc < 0)
1709 return AE_ERROR;
1710 else if (rc > 0)
1711 return AE_CTRL_SKIP;
1712
1713 return AE_OK;
1714 }
1715
1716 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1717 u32 val_a, u32 val_b))
1718 {
1719 __acpi_os_prepare_extended_sleep = func;
1720 }