2 * sysfs.c - ACPI sysfs interface to userspace.
5 #include <linux/init.h>
6 #include <linux/kernel.h>
7 #include <linux/moduleparam.h>
8 #include <linux/acpi.h>
12 #define _COMPONENT ACPI_SYSTEM_COMPONENT
13 ACPI_MODULE_NAME("sysfs");
15 #ifdef CONFIG_ACPI_DEBUG
17 * ACPI debug sysfs I/F, including:
18 * /sys/modules/acpi/parameters/debug_layer
19 * /sys/modules/acpi/parameters/debug_level
20 * /sys/modules/acpi/parameters/trace_method_name
21 * /sys/modules/acpi/parameters/trace_state
22 * /sys/modules/acpi/parameters/trace_debug_layer
23 * /sys/modules/acpi/parameters/trace_debug_level
34 #define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
36 static const struct acpi_dlayer acpi_debug_layers
[] = {
37 ACPI_DEBUG_INIT(ACPI_UTILITIES
),
38 ACPI_DEBUG_INIT(ACPI_HARDWARE
),
39 ACPI_DEBUG_INIT(ACPI_EVENTS
),
40 ACPI_DEBUG_INIT(ACPI_TABLES
),
41 ACPI_DEBUG_INIT(ACPI_NAMESPACE
),
42 ACPI_DEBUG_INIT(ACPI_PARSER
),
43 ACPI_DEBUG_INIT(ACPI_DISPATCHER
),
44 ACPI_DEBUG_INIT(ACPI_EXECUTER
),
45 ACPI_DEBUG_INIT(ACPI_RESOURCES
),
46 ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER
),
47 ACPI_DEBUG_INIT(ACPI_OS_SERVICES
),
48 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER
),
49 ACPI_DEBUG_INIT(ACPI_COMPILER
),
50 ACPI_DEBUG_INIT(ACPI_TOOLS
),
52 ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT
),
53 ACPI_DEBUG_INIT(ACPI_AC_COMPONENT
),
54 ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT
),
55 ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT
),
56 ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT
),
57 ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT
),
58 ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT
),
59 ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT
),
60 ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT
),
61 ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT
),
62 ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT
),
63 ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT
),
64 ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT
),
65 ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT
),
68 static const struct acpi_dlevel acpi_debug_levels
[] = {
69 ACPI_DEBUG_INIT(ACPI_LV_INIT
),
70 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT
),
71 ACPI_DEBUG_INIT(ACPI_LV_INFO
),
72 ACPI_DEBUG_INIT(ACPI_LV_REPAIR
),
73 ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT
),
75 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES
),
76 ACPI_DEBUG_INIT(ACPI_LV_PARSE
),
77 ACPI_DEBUG_INIT(ACPI_LV_LOAD
),
78 ACPI_DEBUG_INIT(ACPI_LV_DISPATCH
),
79 ACPI_DEBUG_INIT(ACPI_LV_EXEC
),
80 ACPI_DEBUG_INIT(ACPI_LV_NAMES
),
81 ACPI_DEBUG_INIT(ACPI_LV_OPREGION
),
82 ACPI_DEBUG_INIT(ACPI_LV_BFIELD
),
83 ACPI_DEBUG_INIT(ACPI_LV_TABLES
),
84 ACPI_DEBUG_INIT(ACPI_LV_VALUES
),
85 ACPI_DEBUG_INIT(ACPI_LV_OBJECTS
),
86 ACPI_DEBUG_INIT(ACPI_LV_RESOURCES
),
87 ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS
),
88 ACPI_DEBUG_INIT(ACPI_LV_PACKAGE
),
90 ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS
),
91 ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS
),
92 ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS
),
94 ACPI_DEBUG_INIT(ACPI_LV_MUTEX
),
95 ACPI_DEBUG_INIT(ACPI_LV_THREADS
),
96 ACPI_DEBUG_INIT(ACPI_LV_IO
),
97 ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS
),
99 ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE
),
100 ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO
),
101 ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES
),
102 ACPI_DEBUG_INIT(ACPI_LV_EVENTS
),
105 static int param_get_debug_layer(char *buffer
, const struct kernel_param
*kp
)
110 result
= sprintf(buffer
, "%-25s\tHex SET\n", "Description");
112 for (i
= 0; i
< ARRAY_SIZE(acpi_debug_layers
); i
++) {
113 result
+= sprintf(buffer
+ result
, "%-25s\t0x%08lX [%c]\n",
114 acpi_debug_layers
[i
].name
,
115 acpi_debug_layers
[i
].value
,
116 (acpi_dbg_layer
& acpi_debug_layers
[i
].value
)
120 sprintf(buffer
+ result
, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
122 (acpi_dbg_layer
& ACPI_ALL_DRIVERS
) ==
123 ACPI_ALL_DRIVERS
? '*' : (acpi_dbg_layer
& ACPI_ALL_DRIVERS
)
126 sprintf(buffer
+ result
,
127 "--\ndebug_layer = 0x%08X ( * = enabled)\n",
133 static int param_get_debug_level(char *buffer
, const struct kernel_param
*kp
)
138 result
= sprintf(buffer
, "%-25s\tHex SET\n", "Description");
140 for (i
= 0; i
< ARRAY_SIZE(acpi_debug_levels
); i
++) {
141 result
+= sprintf(buffer
+ result
, "%-25s\t0x%08lX [%c]\n",
142 acpi_debug_levels
[i
].name
,
143 acpi_debug_levels
[i
].value
,
144 (acpi_dbg_level
& acpi_debug_levels
[i
].value
)
148 sprintf(buffer
+ result
, "--\ndebug_level = 0x%08X (* = enabled)\n",
154 static const struct kernel_param_ops param_ops_debug_layer
= {
155 .set
= param_set_uint
,
156 .get
= param_get_debug_layer
,
159 static const struct kernel_param_ops param_ops_debug_level
= {
160 .set
= param_set_uint
,
161 .get
= param_get_debug_level
,
164 module_param_cb(debug_layer
, ¶m_ops_debug_layer
, &acpi_dbg_layer
, 0644);
165 module_param_cb(debug_level
, ¶m_ops_debug_level
, &acpi_dbg_level
, 0644);
167 static char trace_method_name
[1024];
169 int param_set_trace_method_name(const char *val
, const struct kernel_param
*kp
)
172 bool is_abs_path
= true;
177 if ((is_abs_path
&& strlen(val
) > 1023) ||
178 (!is_abs_path
&& strlen(val
) > 1022)) {
179 pr_err("%s: string parameter too long\n", kp
->name
);
184 * It's not safe to update acpi_gbl_trace_method_name without
185 * having the tracer stopped, so we save the original tracer
186 * state and disable it.
188 saved_flags
= acpi_gbl_trace_flags
;
189 (void)acpi_debug_trace(NULL
,
190 acpi_gbl_trace_dbg_level
,
191 acpi_gbl_trace_dbg_layer
,
194 /* This is a hack. We can't kmalloc in early boot. */
196 strcpy(trace_method_name
, val
);
198 trace_method_name
[0] = '\\';
199 strcpy(trace_method_name
+1, val
);
202 /* Restore the original tracer state */
203 (void)acpi_debug_trace(trace_method_name
,
204 acpi_gbl_trace_dbg_level
,
205 acpi_gbl_trace_dbg_layer
,
211 static int param_get_trace_method_name(char *buffer
, const struct kernel_param
*kp
)
213 return scnprintf(buffer
, PAGE_SIZE
, "%s", acpi_gbl_trace_method_name
);
216 static const struct kernel_param_ops param_ops_trace_method
= {
217 .set
= param_set_trace_method_name
,
218 .get
= param_get_trace_method_name
,
221 static const struct kernel_param_ops param_ops_trace_attrib
= {
222 .set
= param_set_uint
,
223 .get
= param_get_uint
,
226 module_param_cb(trace_method_name
, ¶m_ops_trace_method
, &trace_method_name
, 0644);
227 module_param_cb(trace_debug_layer
, ¶m_ops_trace_attrib
, &acpi_gbl_trace_dbg_layer
, 0644);
228 module_param_cb(trace_debug_level
, ¶m_ops_trace_attrib
, &acpi_gbl_trace_dbg_level
, 0644);
230 static int param_set_trace_state(const char *val
, struct kernel_param
*kp
)
233 const char *method
= trace_method_name
;
236 /* So "xxx-once" comparison should go prior than "xxx" comparison */
237 #define acpi_compare_param(val, key) \
238 strncmp((val), (key), sizeof(key) - 1)
240 if (!acpi_compare_param(val
, "enable")) {
242 flags
= ACPI_TRACE_ENABLED
;
243 } else if (!acpi_compare_param(val
, "disable"))
245 else if (!acpi_compare_param(val
, "method-once"))
246 flags
= ACPI_TRACE_ENABLED
| ACPI_TRACE_ONESHOT
;
247 else if (!acpi_compare_param(val
, "method"))
248 flags
= ACPI_TRACE_ENABLED
;
249 else if (!acpi_compare_param(val
, "opcode-once"))
250 flags
= ACPI_TRACE_ENABLED
| ACPI_TRACE_ONESHOT
| ACPI_TRACE_OPCODE
;
251 else if (!acpi_compare_param(val
, "opcode"))
252 flags
= ACPI_TRACE_ENABLED
| ACPI_TRACE_OPCODE
;
256 status
= acpi_debug_trace(method
,
257 acpi_gbl_trace_dbg_level
,
258 acpi_gbl_trace_dbg_layer
,
260 if (ACPI_FAILURE(status
))
266 static int param_get_trace_state(char *buffer
, struct kernel_param
*kp
)
268 if (!(acpi_gbl_trace_flags
& ACPI_TRACE_ENABLED
))
269 return sprintf(buffer
, "disable");
271 if (acpi_gbl_trace_method_name
) {
272 if (acpi_gbl_trace_flags
& ACPI_TRACE_ONESHOT
)
273 return sprintf(buffer
, "method-once");
275 return sprintf(buffer
, "method");
277 return sprintf(buffer
, "enable");
282 module_param_call(trace_state
, param_set_trace_state
, param_get_trace_state
,
284 #endif /* CONFIG_ACPI_DEBUG */
287 /* /sys/modules/acpi/parameters/aml_debug_output */
289 module_param_named(aml_debug_output
, acpi_gbl_enable_aml_debug_object
,
291 MODULE_PARM_DESC(aml_debug_output
,
292 "To enable/disable the ACPI Debug Object output.");
294 /* /sys/module/acpi/parameters/acpica_version */
295 static int param_get_acpica_version(char *buffer
, struct kernel_param
*kp
)
299 result
= sprintf(buffer
, "%x", ACPI_CA_VERSION
);
304 module_param_call(acpica_version
, NULL
, param_get_acpica_version
, NULL
, 0444);
307 * ACPI table sysfs I/F:
308 * /sys/firmware/acpi/tables/
309 * /sys/firmware/acpi/tables/dynamic/
312 static LIST_HEAD(acpi_table_attr_list
);
313 static struct kobject
*tables_kobj
;
314 static struct kobject
*dynamic_tables_kobj
;
315 static struct kobject
*hotplug_kobj
;
317 #define ACPI_MAX_TABLE_INSTANCES 999
318 #define ACPI_INST_SIZE 4 /* including trailing 0 */
320 struct acpi_table_attr
{
321 struct bin_attribute attr
;
322 char name
[ACPI_NAME_SIZE
];
324 char filename
[ACPI_NAME_SIZE
+ACPI_INST_SIZE
];
325 struct list_head node
;
328 static ssize_t
acpi_table_show(struct file
*filp
, struct kobject
*kobj
,
329 struct bin_attribute
*bin_attr
, char *buf
,
330 loff_t offset
, size_t count
)
332 struct acpi_table_attr
*table_attr
=
333 container_of(bin_attr
, struct acpi_table_attr
, attr
);
334 struct acpi_table_header
*table_header
= NULL
;
338 status
= acpi_get_table(table_attr
->name
, table_attr
->instance
,
340 if (ACPI_FAILURE(status
))
343 rc
= memory_read_from_buffer(buf
, count
, &offset
, table_header
,
344 table_header
->length
);
345 acpi_put_table(table_header
);
349 static int acpi_table_attr_init(struct kobject
*tables_obj
,
350 struct acpi_table_attr
*table_attr
,
351 struct acpi_table_header
*table_header
)
353 struct acpi_table_header
*header
= NULL
;
354 struct acpi_table_attr
*attr
= NULL
;
355 char instance_str
[ACPI_INST_SIZE
];
357 sysfs_attr_init(&table_attr
->attr
.attr
);
358 ACPI_MOVE_NAME(table_attr
->name
, table_header
->signature
);
360 list_for_each_entry(attr
, &acpi_table_attr_list
, node
) {
361 if (ACPI_COMPARE_NAME(table_attr
->name
, attr
->name
))
362 if (table_attr
->instance
< attr
->instance
)
363 table_attr
->instance
= attr
->instance
;
365 table_attr
->instance
++;
366 if (table_attr
->instance
> ACPI_MAX_TABLE_INSTANCES
) {
367 pr_warn("%4.4s: too many table instances\n",
372 ACPI_MOVE_NAME(table_attr
->filename
, table_header
->signature
);
373 table_attr
->filename
[ACPI_NAME_SIZE
] = '\0';
374 if (table_attr
->instance
> 1 || (table_attr
->instance
== 1 &&
376 (table_header
->signature
, 2, &header
))) {
377 snprintf(instance_str
, sizeof(instance_str
), "%u",
378 table_attr
->instance
);
379 strcat(table_attr
->filename
, instance_str
);
382 table_attr
->attr
.size
= table_header
->length
;
383 table_attr
->attr
.read
= acpi_table_show
;
384 table_attr
->attr
.attr
.name
= table_attr
->filename
;
385 table_attr
->attr
.attr
.mode
= 0400;
387 return sysfs_create_bin_file(tables_obj
, &table_attr
->attr
);
390 acpi_status
acpi_sysfs_table_handler(u32 event
, void *table
, void *context
)
392 struct acpi_table_attr
*table_attr
;
395 case ACPI_TABLE_EVENT_INSTALL
:
397 kzalloc(sizeof(struct acpi_table_attr
), GFP_KERNEL
);
401 if (acpi_table_attr_init(dynamic_tables_kobj
,
402 table_attr
, table
)) {
406 list_add_tail(&table_attr
->node
, &acpi_table_attr_list
);
408 case ACPI_TABLE_EVENT_LOAD
:
409 case ACPI_TABLE_EVENT_UNLOAD
:
410 case ACPI_TABLE_EVENT_UNINSTALL
:
412 * we do not need to do anything right now
413 * because the table is not deleted from the
414 * global table list when unloading it.
418 return AE_BAD_PARAMETER
;
423 static int acpi_tables_sysfs_init(void)
425 struct acpi_table_attr
*table_attr
;
426 struct acpi_table_header
*table_header
= NULL
;
431 tables_kobj
= kobject_create_and_add("tables", acpi_kobj
);
435 dynamic_tables_kobj
= kobject_create_and_add("dynamic", tables_kobj
);
436 if (!dynamic_tables_kobj
)
437 goto err_dynamic_tables
;
439 for (table_index
= 0;; table_index
++) {
440 status
= acpi_get_table_by_index(table_index
, &table_header
);
442 if (status
== AE_BAD_PARAMETER
)
445 if (ACPI_FAILURE(status
))
448 table_attr
= kzalloc(sizeof(*table_attr
), GFP_KERNEL
);
452 ret
= acpi_table_attr_init(tables_kobj
,
453 table_attr
, table_header
);
458 list_add_tail(&table_attr
->node
, &acpi_table_attr_list
);
461 kobject_uevent(tables_kobj
, KOBJ_ADD
);
462 kobject_uevent(dynamic_tables_kobj
, KOBJ_ADD
);
466 kobject_put(tables_kobj
);
472 * Detailed ACPI IRQ counters:
473 * /sys/firmware/acpi/interrupts/
476 u32 acpi_irq_handled
;
477 u32 acpi_irq_not_handled
;
480 #define COUNT_SCI 1 /* acpi_irq_handled */
481 #define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
482 #define COUNT_ERROR 3 /* other */
483 #define NUM_COUNTERS_EXTRA 4
485 struct event_counter
{
490 static struct event_counter
*all_counters
;
492 static u32 num_counters
;
493 static struct attribute
**all_attrs
;
494 static u32 acpi_gpe_count
;
496 static struct attribute_group interrupt_stats_attr_group
= {
497 .name
= "interrupts",
500 static struct kobj_attribute
*counter_attrs
;
502 static void delete_gpe_attr_array(void)
504 struct event_counter
*tmp
= all_counters
;
512 for (i
= 0; i
< num_gpes
; i
++)
513 kfree(counter_attrs
[i
].attr
.name
);
515 kfree(counter_attrs
);
522 static void gpe_count(u32 gpe_number
)
529 if (gpe_number
< num_gpes
)
530 all_counters
[gpe_number
].count
++;
532 all_counters
[num_gpes
+ ACPI_NUM_FIXED_EVENTS
+
533 COUNT_ERROR
].count
++;
538 static void fixed_event_count(u32 event_number
)
543 if (event_number
< ACPI_NUM_FIXED_EVENTS
)
544 all_counters
[num_gpes
+ event_number
].count
++;
546 all_counters
[num_gpes
+ ACPI_NUM_FIXED_EVENTS
+
547 COUNT_ERROR
].count
++;
552 static void acpi_global_event_handler(u32 event_type
, acpi_handle device
,
553 u32 event_number
, void *context
)
555 if (event_type
== ACPI_EVENT_TYPE_GPE
)
556 gpe_count(event_number
);
558 if (event_type
== ACPI_EVENT_TYPE_FIXED
)
559 fixed_event_count(event_number
);
562 static int get_status(u32 index
, acpi_event_status
*status
,
567 if (index
>= num_gpes
+ ACPI_NUM_FIXED_EVENTS
)
570 if (index
< num_gpes
) {
571 result
= acpi_get_gpe_device(index
, handle
);
573 ACPI_EXCEPTION((AE_INFO
, AE_NOT_FOUND
,
574 "Invalid GPE 0x%x", index
));
577 result
= acpi_get_gpe_status(*handle
, index
, status
);
578 } else if (index
< (num_gpes
+ ACPI_NUM_FIXED_EVENTS
))
579 result
= acpi_get_event_status(index
- num_gpes
, status
);
584 static ssize_t
counter_show(struct kobject
*kobj
,
585 struct kobj_attribute
*attr
, char *buf
)
587 int index
= attr
- counter_attrs
;
590 acpi_event_status status
;
593 all_counters
[num_gpes
+ ACPI_NUM_FIXED_EVENTS
+ COUNT_SCI
].count
=
595 all_counters
[num_gpes
+ ACPI_NUM_FIXED_EVENTS
+ COUNT_SCI_NOT
].count
=
596 acpi_irq_not_handled
;
597 all_counters
[num_gpes
+ ACPI_NUM_FIXED_EVENTS
+ COUNT_GPE
].count
=
599 size
= sprintf(buf
, "%8u", all_counters
[index
].count
);
601 /* "gpe_all" or "sci" */
602 if (index
>= num_gpes
+ ACPI_NUM_FIXED_EVENTS
)
605 result
= get_status(index
, &status
, &handle
);
609 if (status
& ACPI_EVENT_FLAG_ENABLE_SET
)
610 size
+= sprintf(buf
+ size
, " EN");
612 size
+= sprintf(buf
+ size
, " ");
613 if (status
& ACPI_EVENT_FLAG_STATUS_SET
)
614 size
+= sprintf(buf
+ size
, " STS");
616 size
+= sprintf(buf
+ size
, " ");
618 if (!(status
& ACPI_EVENT_FLAG_HAS_HANDLER
))
619 size
+= sprintf(buf
+ size
, " invalid ");
620 else if (status
& ACPI_EVENT_FLAG_ENABLED
)
621 size
+= sprintf(buf
+ size
, " enabled ");
622 else if (status
& ACPI_EVENT_FLAG_WAKE_ENABLED
)
623 size
+= sprintf(buf
+ size
, " wake_enabled");
625 size
+= sprintf(buf
+ size
, " disabled ");
626 if (status
& ACPI_EVENT_FLAG_MASKED
)
627 size
+= sprintf(buf
+ size
, " masked ");
629 size
+= sprintf(buf
+ size
, " unmasked");
632 size
+= sprintf(buf
+ size
, "\n");
633 return result
? result
: size
;
637 * counter_set() sets the specified counter.
638 * setting the total "sci" file to any value clears all counters.
639 * enable/disable/clear a gpe/fixed event in user space.
641 static ssize_t
counter_set(struct kobject
*kobj
,
642 struct kobj_attribute
*attr
, const char *buf
,
645 int index
= attr
- counter_attrs
;
646 acpi_event_status status
;
651 if (index
== num_gpes
+ ACPI_NUM_FIXED_EVENTS
+ COUNT_SCI
) {
653 for (i
= 0; i
< num_counters
; ++i
)
654 all_counters
[i
].count
= 0;
656 acpi_irq_handled
= 0;
657 acpi_irq_not_handled
= 0;
661 /* show the event status for both GPEs and Fixed Events */
662 result
= get_status(index
, &status
, &handle
);
666 if (!(status
& ACPI_EVENT_FLAG_HAS_HANDLER
)) {
667 printk(KERN_WARNING PREFIX
668 "Can not change Invalid GPE/Fixed Event status\n");
672 if (index
< num_gpes
) {
673 if (!strcmp(buf
, "disable\n") &&
674 (status
& ACPI_EVENT_FLAG_ENABLED
))
675 result
= acpi_disable_gpe(handle
, index
);
676 else if (!strcmp(buf
, "enable\n") &&
677 !(status
& ACPI_EVENT_FLAG_ENABLED
))
678 result
= acpi_enable_gpe(handle
, index
);
679 else if (!strcmp(buf
, "clear\n") &&
680 (status
& ACPI_EVENT_FLAG_STATUS_SET
))
681 result
= acpi_clear_gpe(handle
, index
);
682 else if (!strcmp(buf
, "mask\n"))
683 result
= acpi_mask_gpe(handle
, index
, TRUE
);
684 else if (!strcmp(buf
, "unmask\n"))
685 result
= acpi_mask_gpe(handle
, index
, FALSE
);
686 else if (!kstrtoul(buf
, 0, &tmp
))
687 all_counters
[index
].count
= tmp
;
690 } else if (index
< num_gpes
+ ACPI_NUM_FIXED_EVENTS
) {
691 int event
= index
- num_gpes
;
692 if (!strcmp(buf
, "disable\n") &&
693 (status
& ACPI_EVENT_FLAG_ENABLE_SET
))
694 result
= acpi_disable_event(event
, ACPI_NOT_ISR
);
695 else if (!strcmp(buf
, "enable\n") &&
696 !(status
& ACPI_EVENT_FLAG_ENABLE_SET
))
697 result
= acpi_enable_event(event
, ACPI_NOT_ISR
);
698 else if (!strcmp(buf
, "clear\n") &&
699 (status
& ACPI_EVENT_FLAG_STATUS_SET
))
700 result
= acpi_clear_event(event
);
701 else if (!kstrtoul(buf
, 0, &tmp
))
702 all_counters
[index
].count
= tmp
;
706 all_counters
[index
].count
= strtoul(buf
, NULL
, 0);
708 if (ACPI_FAILURE(result
))
711 return result
? result
: size
;
715 * A Quirk Mechanism for GPE Flooding Prevention:
717 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
718 * flooding typically cannot be detected and automatically prevented by
719 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
720 * the AML tables. This normally indicates a feature gap in Linux, thus
721 * instead of providing endless quirk tables, we provide a boot parameter
722 * for those who want this quirk. For example, if the users want to prevent
723 * the GPE flooding for GPE 00, they need to specify the following boot
726 * The masking status can be modified by the following runtime controlling
728 * echo unmask > /sys/firmware/acpi/interrupts/gpe00
732 * Currently, the GPE flooding prevention only supports to mask the GPEs
733 * numbered from 00 to 7f.
735 #define ACPI_MASKABLE_GPE_MAX 0x80
737 static u64 __initdata acpi_masked_gpes
;
739 static int __init
acpi_gpe_set_masked_gpes(char *val
)
743 if (kstrtou8(val
, 0, &gpe
) || gpe
> ACPI_MASKABLE_GPE_MAX
)
745 acpi_masked_gpes
|= ((u64
)1<<gpe
);
749 __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes
);
751 void __init
acpi_gpe_apply_masked_gpes(void)
758 gpe
< min_t(u8
, ACPI_MASKABLE_GPE_MAX
, acpi_current_gpe_count
);
760 if (acpi_masked_gpes
& ((u64
)1<<gpe
)) {
761 status
= acpi_get_gpe_device(gpe
, &handle
);
762 if (ACPI_SUCCESS(status
)) {
763 pr_info("Masking GPE 0x%x.\n", gpe
);
764 (void)acpi_mask_gpe(handle
, gpe
, TRUE
);
770 void acpi_irq_stats_init(void)
778 num_gpes
= acpi_current_gpe_count
;
779 num_counters
= num_gpes
+ ACPI_NUM_FIXED_EVENTS
+ NUM_COUNTERS_EXTRA
;
781 all_attrs
= kzalloc(sizeof(struct attribute
*) * (num_counters
+ 1),
783 if (all_attrs
== NULL
)
786 all_counters
= kzalloc(sizeof(struct event_counter
) * (num_counters
),
788 if (all_counters
== NULL
)
791 status
= acpi_install_global_event_handler(acpi_global_event_handler
, NULL
);
792 if (ACPI_FAILURE(status
))
795 counter_attrs
= kzalloc(sizeof(struct kobj_attribute
) * (num_counters
),
797 if (counter_attrs
== NULL
)
800 for (i
= 0; i
< num_counters
; ++i
) {
805 sprintf(buffer
, "gpe%02X", i
);
806 else if (i
== num_gpes
+ ACPI_EVENT_PMTIMER
)
807 sprintf(buffer
, "ff_pmtimer");
808 else if (i
== num_gpes
+ ACPI_EVENT_GLOBAL
)
809 sprintf(buffer
, "ff_gbl_lock");
810 else if (i
== num_gpes
+ ACPI_EVENT_POWER_BUTTON
)
811 sprintf(buffer
, "ff_pwr_btn");
812 else if (i
== num_gpes
+ ACPI_EVENT_SLEEP_BUTTON
)
813 sprintf(buffer
, "ff_slp_btn");
814 else if (i
== num_gpes
+ ACPI_EVENT_RTC
)
815 sprintf(buffer
, "ff_rt_clk");
816 else if (i
== num_gpes
+ ACPI_NUM_FIXED_EVENTS
+ COUNT_GPE
)
817 sprintf(buffer
, "gpe_all");
818 else if (i
== num_gpes
+ ACPI_NUM_FIXED_EVENTS
+ COUNT_SCI
)
819 sprintf(buffer
, "sci");
820 else if (i
== num_gpes
+ ACPI_NUM_FIXED_EVENTS
+ COUNT_SCI_NOT
)
821 sprintf(buffer
, "sci_not");
822 else if (i
== num_gpes
+ ACPI_NUM_FIXED_EVENTS
+ COUNT_ERROR
)
823 sprintf(buffer
, "error");
825 sprintf(buffer
, "bug%02X", i
);
827 name
= kstrdup(buffer
, GFP_KERNEL
);
831 sysfs_attr_init(&counter_attrs
[i
].attr
);
832 counter_attrs
[i
].attr
.name
= name
;
833 counter_attrs
[i
].attr
.mode
= 0644;
834 counter_attrs
[i
].show
= counter_show
;
835 counter_attrs
[i
].store
= counter_set
;
837 all_attrs
[i
] = &counter_attrs
[i
].attr
;
840 interrupt_stats_attr_group
.attrs
= all_attrs
;
841 if (!sysfs_create_group(acpi_kobj
, &interrupt_stats_attr_group
))
845 delete_gpe_attr_array();
849 static void __exit
interrupt_stats_exit(void)
851 sysfs_remove_group(acpi_kobj
, &interrupt_stats_attr_group
);
853 delete_gpe_attr_array();
859 acpi_show_profile(struct device
*dev
, struct device_attribute
*attr
,
862 return sprintf(buf
, "%d\n", acpi_gbl_FADT
.preferred_profile
);
865 static const struct device_attribute pm_profile_attr
=
866 __ATTR(pm_profile
, S_IRUGO
, acpi_show_profile
, NULL
);
868 static ssize_t
hotplug_enabled_show(struct kobject
*kobj
,
869 struct kobj_attribute
*attr
, char *buf
)
871 struct acpi_hotplug_profile
*hotplug
= to_acpi_hotplug_profile(kobj
);
873 return sprintf(buf
, "%d\n", hotplug
->enabled
);
876 static ssize_t
hotplug_enabled_store(struct kobject
*kobj
,
877 struct kobj_attribute
*attr
,
878 const char *buf
, size_t size
)
880 struct acpi_hotplug_profile
*hotplug
= to_acpi_hotplug_profile(kobj
);
883 if (kstrtouint(buf
, 10, &val
) || val
> 1)
886 acpi_scan_hotplug_enabled(hotplug
, val
);
890 static struct kobj_attribute hotplug_enabled_attr
=
891 __ATTR(enabled
, S_IRUGO
| S_IWUSR
, hotplug_enabled_show
,
892 hotplug_enabled_store
);
894 static struct attribute
*hotplug_profile_attrs
[] = {
895 &hotplug_enabled_attr
.attr
,
899 static struct kobj_type acpi_hotplug_profile_ktype
= {
900 .sysfs_ops
= &kobj_sysfs_ops
,
901 .default_attrs
= hotplug_profile_attrs
,
904 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile
*hotplug
,
912 error
= kobject_init_and_add(&hotplug
->kobj
,
913 &acpi_hotplug_profile_ktype
, hotplug_kobj
, "%s", name
);
917 kobject_uevent(&hotplug
->kobj
, KOBJ_ADD
);
921 pr_err(PREFIX
"Unable to add hotplug profile '%s'\n", name
);
924 static ssize_t
force_remove_show(struct kobject
*kobj
,
925 struct kobj_attribute
*attr
, char *buf
)
927 return sprintf(buf
, "%d\n", 0);
930 static ssize_t
force_remove_store(struct kobject
*kobj
,
931 struct kobj_attribute
*attr
,
932 const char *buf
, size_t size
)
937 ret
= strtobool(buf
, &val
);
942 pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
948 static const struct kobj_attribute force_remove_attr
=
949 __ATTR(force_remove
, S_IRUGO
| S_IWUSR
, force_remove_show
,
952 int __init
acpi_sysfs_init(void)
956 result
= acpi_tables_sysfs_init();
960 hotplug_kobj
= kobject_create_and_add("hotplug", acpi_kobj
);
964 result
= sysfs_create_file(hotplug_kobj
, &force_remove_attr
.attr
);
968 result
= sysfs_create_file(acpi_kobj
, &pm_profile_attr
.attr
);