]>
Commit | Line | Data |
---|---|---|
d334a491 HY |
1 | /* |
2 | * APEI Generic Hardware Error Source support | |
3 | * | |
4 | * Generic Hardware Error Source provides a way to report platform | |
5 | * hardware errors (such as that from chipset). It works in so called | |
6 | * "Firmware First" mode, that is, hardware errors are reported to | |
7 | * firmware firstly, then reported to Linux by firmware. This way, | |
8 | * some non-standard hardware error registers or non-standard hardware | |
9 | * link can be checked by firmware to produce more hardware error | |
10 | * information for Linux. | |
11 | * | |
12 | * For more information about Generic Hardware Error Source, please | |
13 | * refer to ACPI Specification version 4.0, section 17.3.2.6 | |
14 | * | |
67eb2e99 | 15 | * Copyright 2010,2011 Intel Corp. |
d334a491 HY |
16 | * Author: Huang Ying <ying.huang@intel.com> |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License version | |
20 | * 2 as published by the Free Software Foundation; | |
21 | * | |
22 | * This program is distributed in the hope that it will be useful, | |
23 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
24 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
25 | * GNU General Public License for more details. | |
d334a491 HY |
26 | */ |
27 | ||
28 | #include <linux/kernel.h> | |
020bf066 | 29 | #include <linux/moduleparam.h> |
d334a491 HY |
30 | #include <linux/init.h> |
31 | #include <linux/acpi.h> | |
32 | #include <linux/io.h> | |
33 | #include <linux/interrupt.h> | |
81e88fdc | 34 | #include <linux/timer.h> |
d334a491 HY |
35 | #include <linux/cper.h> |
36 | #include <linux/kdebug.h> | |
7ad6e943 HY |
37 | #include <linux/platform_device.h> |
38 | #include <linux/mutex.h> | |
32c361f5 | 39 | #include <linux/ratelimit.h> |
81e88fdc | 40 | #include <linux/vmalloc.h> |
67eb2e99 HY |
41 | #include <linux/irq_work.h> |
42 | #include <linux/llist.h> | |
43 | #include <linux/genalloc.h> | |
a654e5ee HY |
44 | #include <linux/pci.h> |
45 | #include <linux/aer.h> | |
44a69f61 | 46 | #include <linux/nmi.h> |
e6017571 | 47 | #include <linux/sched/clock.h> |
297b64c7 TB |
48 | #include <linux/uuid.h> |
49 | #include <linux/ras.h> | |
40e06415 | 50 | |
42aa5604 | 51 | #include <acpi/actbl1.h> |
40e06415 | 52 | #include <acpi/ghes.h> |
9dae3d0d | 53 | #include <acpi/apei.h> |
81e88fdc | 54 | #include <asm/tlbflush.h> |
297b64c7 | 55 | #include <ras/ras_event.h> |
d334a491 HY |
56 | |
57 | #include "apei-internal.h" | |
58 | ||
59 | #define GHES_PFX "GHES: " | |
60 | ||
61 | #define GHES_ESTATUS_MAX_SIZE 65536 | |
67eb2e99 HY |
62 | #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536 |
63 | ||
64 | #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3 | |
65 | ||
152cef40 HY |
66 | /* This is just an estimation for memory pool allocation */ |
67 | #define GHES_ESTATUS_CACHE_AVG_SIZE 512 | |
68 | ||
69 | #define GHES_ESTATUS_CACHES_SIZE 4 | |
70 | ||
70cb6e1d | 71 | #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL |
152cef40 HY |
72 | /* Prevent too many caches are allocated because of RCU */ |
73 | #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2) | |
74 | ||
75 | #define GHES_ESTATUS_CACHE_LEN(estatus_len) \ | |
76 | (sizeof(struct ghes_estatus_cache) + (estatus_len)) | |
77 | #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \ | |
0a00fd5e | 78 | ((struct acpi_hest_generic_status *) \ |
152cef40 HY |
79 | ((struct ghes_estatus_cache *)(estatus_cache) + 1)) |
80 | ||
67eb2e99 HY |
81 | #define GHES_ESTATUS_NODE_LEN(estatus_len) \ |
82 | (sizeof(struct ghes_estatus_node) + (estatus_len)) | |
88f074f4 | 83 | #define GHES_ESTATUS_FROM_NODE(estatus_node) \ |
0a00fd5e | 84 | ((struct acpi_hest_generic_status *) \ |
67eb2e99 | 85 | ((struct ghes_estatus_node *)(estatus_node) + 1)) |
d334a491 | 86 | |
42aa5604 TB |
87 | static inline bool is_hest_type_generic_v2(struct ghes *ghes) |
88 | { | |
89 | return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2; | |
90 | } | |
91 | ||
020bf066 PG |
92 | /* |
93 | * This driver isn't really modular, however for the time being, | |
94 | * continuing to use module_param is the easiest way to remain | |
95 | * compatible with existing boot arg use cases. | |
96 | */ | |
90ab5ee9 | 97 | bool ghes_disable; |
b6a95016 HY |
98 | module_param_named(disable, ghes_disable, bool, 0); |
99 | ||
d334a491 | 100 | /* |
81e88fdc HY |
101 | * All error sources notified with SCI shares one notifier function, |
102 | * so they need to be linked and checked one by one. This is applied | |
103 | * to NMI too. | |
d334a491 | 104 | * |
81e88fdc HY |
105 | * RCU is used for these lists, so ghes_list_mutex is only used for |
106 | * list changing, not for traversing. | |
d334a491 HY |
107 | */ |
108 | static LIST_HEAD(ghes_sci); | |
7ad6e943 | 109 | static DEFINE_MUTEX(ghes_list_mutex); |
d334a491 | 110 | |
81e88fdc HY |
111 | /* |
112 | * Because the memory area used to transfer hardware error information | |
113 | * from BIOS to Linux can be determined only in NMI, IRQ or timer | |
114 | * handler, but general ioremap can not be used in atomic context, so | |
115 | * a special version of atomic ioremap is implemented for that. | |
116 | */ | |
117 | ||
118 | /* | |
594c7255 TN |
119 | * Two virtual pages are used, one for IRQ/PROCESS context, the other for |
120 | * NMI context (optionally). | |
81e88fdc | 121 | */ |
594c7255 | 122 | #define GHES_IOREMAP_PAGES 2 |
594c7255 TN |
123 | #define GHES_IOREMAP_IRQ_PAGE(base) (base) |
124 | #define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE) | |
81e88fdc HY |
125 | |
126 | /* virtual memory area for atomic ioremap */ | |
127 | static struct vm_struct *ghes_ioremap_area; | |
128 | /* | |
129 | * These 2 spinlock is used to prevent atomic ioremap virtual memory | |
130 | * area from being mapped simultaneously. | |
131 | */ | |
132 | static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); | |
133 | static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); | |
134 | ||
67eb2e99 HY |
135 | static struct gen_pool *ghes_estatus_pool; |
136 | static unsigned long ghes_estatus_pool_size_request; | |
67eb2e99 | 137 | |
8f7c31f6 | 138 | static struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; |
152cef40 HY |
139 | static atomic_t ghes_estatus_cache_alloced; |
140 | ||
2fb5853e JZZ |
141 | static int ghes_panic_timeout __read_mostly = 30; |
142 | ||
81e88fdc HY |
143 | static int ghes_ioremap_init(void) |
144 | { | |
145 | ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, | |
146 | VM_IOREMAP, VMALLOC_START, VMALLOC_END); | |
147 | if (!ghes_ioremap_area) { | |
148 | pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n"); | |
149 | return -ENOMEM; | |
150 | } | |
151 | ||
152 | return 0; | |
153 | } | |
154 | ||
155 | static void ghes_ioremap_exit(void) | |
156 | { | |
157 | free_vm_area(ghes_ioremap_area); | |
158 | } | |
159 | ||
160 | static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) | |
161 | { | |
162 | unsigned long vaddr; | |
7edda088 TB |
163 | phys_addr_t paddr; |
164 | pgprot_t prot; | |
81e88fdc HY |
165 | |
166 | vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); | |
7edda088 TB |
167 | |
168 | paddr = pfn << PAGE_SHIFT; | |
169 | prot = arch_apei_get_mem_attribute(paddr); | |
170 | ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); | |
81e88fdc HY |
171 | |
172 | return (void __iomem *)vaddr; | |
173 | } | |
174 | ||
175 | static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) | |
176 | { | |
8ece249a JZZ |
177 | unsigned long vaddr, paddr; |
178 | pgprot_t prot; | |
81e88fdc HY |
179 | |
180 | vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); | |
8ece249a JZZ |
181 | |
182 | paddr = pfn << PAGE_SHIFT; | |
183 | prot = arch_apei_get_mem_attribute(paddr); | |
184 | ||
185 | ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); | |
81e88fdc HY |
186 | |
187 | return (void __iomem *)vaddr; | |
188 | } | |
189 | ||
190 | static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) | |
191 | { | |
192 | unsigned long vaddr = (unsigned long __force)vaddr_ptr; | |
193 | void *base = ghes_ioremap_area->addr; | |
194 | ||
195 | BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); | |
196 | unmap_kernel_range_noflush(vaddr, PAGE_SIZE); | |
594c7255 | 197 | arch_apei_flush_tlb_one(vaddr); |
81e88fdc HY |
198 | } |
199 | ||
200 | static void ghes_iounmap_irq(void __iomem *vaddr_ptr) | |
201 | { | |
202 | unsigned long vaddr = (unsigned long __force)vaddr_ptr; | |
203 | void *base = ghes_ioremap_area->addr; | |
204 | ||
205 | BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); | |
206 | unmap_kernel_range_noflush(vaddr, PAGE_SIZE); | |
594c7255 | 207 | arch_apei_flush_tlb_one(vaddr); |
81e88fdc HY |
208 | } |
209 | ||
67eb2e99 HY |
210 | static int ghes_estatus_pool_init(void) |
211 | { | |
212 | ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1); | |
213 | if (!ghes_estatus_pool) | |
214 | return -ENOMEM; | |
215 | return 0; | |
216 | } | |
217 | ||
218 | static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool, | |
219 | struct gen_pool_chunk *chunk, | |
220 | void *data) | |
221 | { | |
222 | free_page(chunk->start_addr); | |
223 | } | |
224 | ||
225 | static void ghes_estatus_pool_exit(void) | |
226 | { | |
227 | gen_pool_for_each_chunk(ghes_estatus_pool, | |
228 | ghes_estatus_pool_free_chunk_page, NULL); | |
229 | gen_pool_destroy(ghes_estatus_pool); | |
230 | } | |
231 | ||
232 | static int ghes_estatus_pool_expand(unsigned long len) | |
233 | { | |
234 | unsigned long i, pages, size, addr; | |
235 | int ret; | |
236 | ||
237 | ghes_estatus_pool_size_request += PAGE_ALIGN(len); | |
238 | size = gen_pool_size(ghes_estatus_pool); | |
239 | if (size >= ghes_estatus_pool_size_request) | |
240 | return 0; | |
241 | pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE; | |
242 | for (i = 0; i < pages; i++) { | |
243 | addr = __get_free_page(GFP_KERNEL); | |
244 | if (!addr) | |
245 | return -ENOMEM; | |
246 | ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1); | |
247 | if (ret) | |
248 | return ret; | |
249 | } | |
250 | ||
251 | return 0; | |
252 | } | |
253 | ||
42aa5604 TB |
254 | static int map_gen_v2(struct ghes *ghes) |
255 | { | |
256 | return apei_map_generic_address(&ghes->generic_v2->read_ack_register); | |
257 | } | |
258 | ||
259 | static void unmap_gen_v2(struct ghes *ghes) | |
260 | { | |
261 | apei_unmap_generic_address(&ghes->generic_v2->read_ack_register); | |
262 | } | |
263 | ||
d334a491 HY |
264 | static struct ghes *ghes_new(struct acpi_hest_generic *generic) |
265 | { | |
266 | struct ghes *ghes; | |
267 | unsigned int error_block_length; | |
268 | int rc; | |
269 | ||
270 | ghes = kzalloc(sizeof(*ghes), GFP_KERNEL); | |
271 | if (!ghes) | |
272 | return ERR_PTR(-ENOMEM); | |
42aa5604 | 273 | |
d334a491 | 274 | ghes->generic = generic; |
42aa5604 TB |
275 | if (is_hest_type_generic_v2(ghes)) { |
276 | rc = map_gen_v2(ghes); | |
277 | if (rc) | |
278 | goto err_free; | |
279 | } | |
280 | ||
34ddeb03 | 281 | rc = apei_map_generic_address(&generic->error_status_address); |
d334a491 | 282 | if (rc) |
42aa5604 | 283 | goto err_unmap_read_ack_addr; |
d334a491 HY |
284 | error_block_length = generic->error_block_length; |
285 | if (error_block_length > GHES_ESTATUS_MAX_SIZE) { | |
286 | pr_warning(FW_WARN GHES_PFX | |
287 | "Error status block length is too long: %u for " | |
288 | "generic hardware error source: %d.\n", | |
289 | error_block_length, generic->header.source_id); | |
290 | error_block_length = GHES_ESTATUS_MAX_SIZE; | |
291 | } | |
292 | ghes->estatus = kmalloc(error_block_length, GFP_KERNEL); | |
293 | if (!ghes->estatus) { | |
294 | rc = -ENOMEM; | |
42aa5604 | 295 | goto err_unmap_status_addr; |
d334a491 HY |
296 | } |
297 | ||
298 | return ghes; | |
299 | ||
42aa5604 | 300 | err_unmap_status_addr: |
34ddeb03 | 301 | apei_unmap_generic_address(&generic->error_status_address); |
42aa5604 TB |
302 | err_unmap_read_ack_addr: |
303 | if (is_hest_type_generic_v2(ghes)) | |
304 | unmap_gen_v2(ghes); | |
d334a491 HY |
305 | err_free: |
306 | kfree(ghes); | |
307 | return ERR_PTR(rc); | |
308 | } | |
309 | ||
310 | static void ghes_fini(struct ghes *ghes) | |
311 | { | |
312 | kfree(ghes->estatus); | |
34ddeb03 | 313 | apei_unmap_generic_address(&ghes->generic->error_status_address); |
42aa5604 TB |
314 | if (is_hest_type_generic_v2(ghes)) |
315 | unmap_gen_v2(ghes); | |
d334a491 HY |
316 | } |
317 | ||
d334a491 HY |
318 | static inline int ghes_severity(int severity) |
319 | { | |
320 | switch (severity) { | |
ad4ecef2 HY |
321 | case CPER_SEV_INFORMATIONAL: |
322 | return GHES_SEV_NO; | |
323 | case CPER_SEV_CORRECTED: | |
324 | return GHES_SEV_CORRECTED; | |
325 | case CPER_SEV_RECOVERABLE: | |
326 | return GHES_SEV_RECOVERABLE; | |
327 | case CPER_SEV_FATAL: | |
328 | return GHES_SEV_PANIC; | |
d334a491 | 329 | default: |
25985edc | 330 | /* Unknown, go panic */ |
ad4ecef2 | 331 | return GHES_SEV_PANIC; |
d334a491 HY |
332 | } |
333 | } | |
334 | ||
81e88fdc HY |
335 | static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, |
336 | int from_phys) | |
d334a491 | 337 | { |
81e88fdc HY |
338 | void __iomem *vaddr; |
339 | unsigned long flags = 0; | |
340 | int in_nmi = in_nmi(); | |
341 | u64 offset; | |
342 | u32 trunk; | |
343 | ||
344 | while (len > 0) { | |
345 | offset = paddr - (paddr & PAGE_MASK); | |
346 | if (in_nmi) { | |
347 | raw_spin_lock(&ghes_ioremap_lock_nmi); | |
348 | vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); | |
349 | } else { | |
350 | spin_lock_irqsave(&ghes_ioremap_lock_irq, flags); | |
351 | vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); | |
352 | } | |
353 | trunk = PAGE_SIZE - offset; | |
354 | trunk = min(trunk, len); | |
355 | if (from_phys) | |
356 | memcpy_fromio(buffer, vaddr + offset, trunk); | |
357 | else | |
358 | memcpy_toio(vaddr + offset, buffer, trunk); | |
359 | len -= trunk; | |
360 | paddr += trunk; | |
361 | buffer += trunk; | |
362 | if (in_nmi) { | |
363 | ghes_iounmap_nmi(vaddr); | |
364 | raw_spin_unlock(&ghes_ioremap_lock_nmi); | |
365 | } else { | |
366 | ghes_iounmap_irq(vaddr); | |
367 | spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); | |
368 | } | |
369 | } | |
d334a491 HY |
370 | } |
371 | ||
372 | static int ghes_read_estatus(struct ghes *ghes, int silent) | |
373 | { | |
374 | struct acpi_hest_generic *g = ghes->generic; | |
375 | u64 buf_paddr; | |
376 | u32 len; | |
377 | int rc; | |
378 | ||
700130b4 | 379 | rc = apei_read(&buf_paddr, &g->error_status_address); |
d334a491 HY |
380 | if (rc) { |
381 | if (!silent && printk_ratelimit()) | |
382 | pr_warning(FW_WARN GHES_PFX | |
383 | "Failed to read error status block address for hardware error source: %d.\n", | |
384 | g->header.source_id); | |
385 | return -EIO; | |
386 | } | |
387 | if (!buf_paddr) | |
388 | return -ENOENT; | |
389 | ||
81e88fdc HY |
390 | ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, |
391 | sizeof(*ghes->estatus), 1); | |
d334a491 HY |
392 | if (!ghes->estatus->block_status) |
393 | return -ENOENT; | |
394 | ||
395 | ghes->buffer_paddr = buf_paddr; | |
396 | ghes->flags |= GHES_TO_CLEAR; | |
397 | ||
398 | rc = -EIO; | |
88f074f4 | 399 | len = cper_estatus_len(ghes->estatus); |
d334a491 HY |
400 | if (len < sizeof(*ghes->estatus)) |
401 | goto err_read_block; | |
402 | if (len > ghes->generic->error_block_length) | |
403 | goto err_read_block; | |
88f074f4 | 404 | if (cper_estatus_check_header(ghes->estatus)) |
d334a491 | 405 | goto err_read_block; |
81e88fdc HY |
406 | ghes_copy_tofrom_phys(ghes->estatus + 1, |
407 | buf_paddr + sizeof(*ghes->estatus), | |
408 | len - sizeof(*ghes->estatus), 1); | |
88f074f4 | 409 | if (cper_estatus_check(ghes->estatus)) |
d334a491 HY |
410 | goto err_read_block; |
411 | rc = 0; | |
412 | ||
413 | err_read_block: | |
81e88fdc | 414 | if (rc && !silent && printk_ratelimit()) |
d334a491 HY |
415 | pr_warning(FW_WARN GHES_PFX |
416 | "Failed to read error status block!\n"); | |
417 | return rc; | |
418 | } | |
419 | ||
420 | static void ghes_clear_estatus(struct ghes *ghes) | |
421 | { | |
422 | ghes->estatus->block_status = 0; | |
423 | if (!(ghes->flags & GHES_TO_CLEAR)) | |
424 | return; | |
425 | ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr, | |
426 | sizeof(ghes->estatus->block_status), 0); | |
427 | ghes->flags &= ~GHES_TO_CLEAR; | |
428 | } | |
429 | ||
0a00fd5e | 430 | static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev) |
cf870c70 NR |
431 | { |
432 | #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE | |
433 | unsigned long pfn; | |
ca104edc | 434 | int flags = -1; |
cf870c70 | 435 | int sec_sev = ghes_severity(gdata->error_severity); |
bbcc2e7b | 436 | struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); |
cf870c70 | 437 | |
ca104edc CG |
438 | if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) |
439 | return; | |
440 | ||
441 | pfn = mem_err->physical_addr >> PAGE_SHIFT; | |
442 | if (!pfn_valid(pfn)) { | |
443 | pr_warn_ratelimited(FW_WARN GHES_PFX | |
444 | "Invalid address in generic error data: %#llx\n", | |
445 | mem_err->physical_addr); | |
446 | return; | |
cf870c70 | 447 | } |
ca104edc CG |
448 | |
449 | /* iff following two events can be handled properly by now */ | |
450 | if (sec_sev == GHES_SEV_CORRECTED && | |
451 | (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED)) | |
452 | flags = MF_SOFT_OFFLINE; | |
453 | if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE) | |
454 | flags = 0; | |
455 | ||
456 | if (flags != -1) | |
457 | memory_failure_queue(pfn, 0, flags); | |
cf870c70 NR |
458 | #endif |
459 | } | |
460 | ||
21480547 | 461 | static void ghes_do_proc(struct ghes *ghes, |
0a00fd5e | 462 | const struct acpi_hest_generic_status *estatus) |
d334a491 | 463 | { |
ba61ca4a | 464 | int sev, sec_sev; |
0a00fd5e | 465 | struct acpi_hest_generic_data *gdata; |
5b53696a | 466 | guid_t *sec_type; |
297b64c7 TB |
467 | guid_t *fru_id = &NULL_UUID_LE; |
468 | char *fru_text = ""; | |
d334a491 | 469 | |
67eb2e99 HY |
470 | sev = ghes_severity(estatus->error_severity); |
471 | apei_estatus_for_each_section(estatus, gdata) { | |
5b53696a | 472 | sec_type = (guid_t *)gdata->section_type; |
ba61ca4a | 473 | sec_sev = ghes_severity(gdata->error_severity); |
297b64c7 TB |
474 | if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) |
475 | fru_id = (guid_t *)gdata->fru_id; | |
476 | ||
477 | if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) | |
478 | fru_text = gdata->fru_text; | |
479 | ||
5b53696a | 480 | if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { |
bbcc2e7b TB |
481 | struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); |
482 | ||
21480547 MCC |
483 | ghes_edac_report_mem_error(ghes, sev, mem_err); |
484 | ||
9dae3d0d | 485 | arch_apei_report_mem_error(sev, mem_err); |
cf870c70 | 486 | ghes_handle_memory_failure(gdata, sev); |
ba61ca4a | 487 | } |
a654e5ee | 488 | #ifdef CONFIG_ACPI_APEI_PCIEAER |
5b53696a | 489 | else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { |
bbcc2e7b TB |
490 | struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata); |
491 | ||
a654e5ee HY |
492 | if (sev == GHES_SEV_RECOVERABLE && |
493 | sec_sev == GHES_SEV_RECOVERABLE && | |
494 | pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID && | |
495 | pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) { | |
496 | unsigned int devfn; | |
497 | int aer_severity; | |
0ba98ec9 | 498 | |
a654e5ee HY |
499 | devfn = PCI_DEVFN(pcie_err->device_id.device, |
500 | pcie_err->device_id.function); | |
2458d66b | 501 | aer_severity = cper_severity_to_aer(gdata->error_severity); |
0ba98ec9 BD |
502 | |
503 | /* | |
504 | * If firmware reset the component to contain | |
505 | * the error, we must reinitialize it before | |
506 | * use, so treat it as a fatal AER error. | |
507 | */ | |
508 | if (gdata->flags & CPER_SEC_RESET) | |
509 | aer_severity = AER_FATAL; | |
510 | ||
a654e5ee HY |
511 | aer_recover_queue(pcie_err->device_id.segment, |
512 | pcie_err->device_id.bus, | |
37448adf LO |
513 | devfn, aer_severity, |
514 | (struct aer_capability_regs *) | |
515 | pcie_err->aer_info); | |
a654e5ee HY |
516 | } |
517 | ||
518 | } | |
519 | #endif | |
297b64c7 TB |
520 | else { |
521 | void *err = acpi_hest_get_payload(gdata); | |
522 | ||
523 | log_non_standard_event(sec_type, fru_id, fru_text, | |
524 | sec_sev, err, | |
525 | gdata->error_data_length); | |
526 | } | |
d334a491 | 527 | } |
32c361f5 | 528 | } |
d334a491 | 529 | |
67eb2e99 HY |
530 | static void __ghes_print_estatus(const char *pfx, |
531 | const struct acpi_hest_generic *generic, | |
0a00fd5e | 532 | const struct acpi_hest_generic_status *estatus) |
32c361f5 | 533 | { |
5ba82ab5 HY |
534 | static atomic_t seqno; |
535 | unsigned int curr_seqno; | |
536 | char pfx_seq[64]; | |
537 | ||
32c361f5 | 538 | if (pfx == NULL) { |
67eb2e99 | 539 | if (ghes_severity(estatus->error_severity) <= |
32c361f5 | 540 | GHES_SEV_CORRECTED) |
5ba82ab5 | 541 | pfx = KERN_WARNING; |
32c361f5 | 542 | else |
5ba82ab5 | 543 | pfx = KERN_ERR; |
32c361f5 | 544 | } |
5ba82ab5 HY |
545 | curr_seqno = atomic_inc_return(&seqno); |
546 | snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno); | |
5588340d | 547 | printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", |
5ba82ab5 | 548 | pfx_seq, generic->header.source_id); |
88f074f4 | 549 | cper_estatus_print(pfx_seq, estatus); |
5588340d HY |
550 | } |
551 | ||
152cef40 HY |
552 | static int ghes_print_estatus(const char *pfx, |
553 | const struct acpi_hest_generic *generic, | |
0a00fd5e | 554 | const struct acpi_hest_generic_status *estatus) |
5588340d HY |
555 | { |
556 | /* Not more than 2 messages every 5 seconds */ | |
67eb2e99 HY |
557 | static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2); |
558 | static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2); | |
559 | struct ratelimit_state *ratelimit; | |
5588340d | 560 | |
67eb2e99 HY |
561 | if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED) |
562 | ratelimit = &ratelimit_corrected; | |
563 | else | |
564 | ratelimit = &ratelimit_uncorrected; | |
152cef40 | 565 | if (__ratelimit(ratelimit)) { |
67eb2e99 | 566 | __ghes_print_estatus(pfx, generic, estatus); |
152cef40 HY |
567 | return 1; |
568 | } | |
569 | return 0; | |
570 | } | |
571 | ||
572 | /* | |
573 | * GHES error status reporting throttle, to report more kinds of | |
574 | * errors, instead of just most frequently occurred errors. | |
575 | */ | |
0a00fd5e | 576 | static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus) |
152cef40 HY |
577 | { |
578 | u32 len; | |
579 | int i, cached = 0; | |
580 | unsigned long long now; | |
581 | struct ghes_estatus_cache *cache; | |
0a00fd5e | 582 | struct acpi_hest_generic_status *cache_estatus; |
152cef40 | 583 | |
88f074f4 | 584 | len = cper_estatus_len(estatus); |
152cef40 HY |
585 | rcu_read_lock(); |
586 | for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { | |
587 | cache = rcu_dereference(ghes_estatus_caches[i]); | |
588 | if (cache == NULL) | |
589 | continue; | |
590 | if (len != cache->estatus_len) | |
591 | continue; | |
592 | cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); | |
593 | if (memcmp(estatus, cache_estatus, len)) | |
594 | continue; | |
595 | atomic_inc(&cache->count); | |
596 | now = sched_clock(); | |
597 | if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC) | |
598 | cached = 1; | |
599 | break; | |
600 | } | |
601 | rcu_read_unlock(); | |
602 | return cached; | |
603 | } | |
604 | ||
605 | static struct ghes_estatus_cache *ghes_estatus_cache_alloc( | |
606 | struct acpi_hest_generic *generic, | |
0a00fd5e | 607 | struct acpi_hest_generic_status *estatus) |
152cef40 HY |
608 | { |
609 | int alloced; | |
610 | u32 len, cache_len; | |
611 | struct ghes_estatus_cache *cache; | |
0a00fd5e | 612 | struct acpi_hest_generic_status *cache_estatus; |
152cef40 HY |
613 | |
614 | alloced = atomic_add_return(1, &ghes_estatus_cache_alloced); | |
615 | if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) { | |
616 | atomic_dec(&ghes_estatus_cache_alloced); | |
617 | return NULL; | |
618 | } | |
88f074f4 | 619 | len = cper_estatus_len(estatus); |
152cef40 HY |
620 | cache_len = GHES_ESTATUS_CACHE_LEN(len); |
621 | cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len); | |
622 | if (!cache) { | |
623 | atomic_dec(&ghes_estatus_cache_alloced); | |
624 | return NULL; | |
625 | } | |
626 | cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); | |
627 | memcpy(cache_estatus, estatus, len); | |
628 | cache->estatus_len = len; | |
629 | atomic_set(&cache->count, 0); | |
630 | cache->generic = generic; | |
631 | cache->time_in = sched_clock(); | |
632 | return cache; | |
633 | } | |
634 | ||
635 | static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache) | |
636 | { | |
637 | u32 len; | |
638 | ||
88f074f4 | 639 | len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); |
152cef40 HY |
640 | len = GHES_ESTATUS_CACHE_LEN(len); |
641 | gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len); | |
642 | atomic_dec(&ghes_estatus_cache_alloced); | |
643 | } | |
644 | ||
645 | static void ghes_estatus_cache_rcu_free(struct rcu_head *head) | |
646 | { | |
647 | struct ghes_estatus_cache *cache; | |
648 | ||
649 | cache = container_of(head, struct ghes_estatus_cache, rcu); | |
650 | ghes_estatus_cache_free(cache); | |
651 | } | |
652 | ||
653 | static void ghes_estatus_cache_add( | |
654 | struct acpi_hest_generic *generic, | |
0a00fd5e | 655 | struct acpi_hest_generic_status *estatus) |
152cef40 HY |
656 | { |
657 | int i, slot = -1, count; | |
658 | unsigned long long now, duration, period, max_period = 0; | |
659 | struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache; | |
660 | ||
661 | new_cache = ghes_estatus_cache_alloc(generic, estatus); | |
662 | if (new_cache == NULL) | |
663 | return; | |
664 | rcu_read_lock(); | |
665 | now = sched_clock(); | |
666 | for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { | |
667 | cache = rcu_dereference(ghes_estatus_caches[i]); | |
668 | if (cache == NULL) { | |
669 | slot = i; | |
670 | slot_cache = NULL; | |
671 | break; | |
672 | } | |
673 | duration = now - cache->time_in; | |
674 | if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) { | |
675 | slot = i; | |
676 | slot_cache = cache; | |
677 | break; | |
678 | } | |
679 | count = atomic_read(&cache->count); | |
70cb6e1d LB |
680 | period = duration; |
681 | do_div(period, (count + 1)); | |
152cef40 HY |
682 | if (period > max_period) { |
683 | max_period = period; | |
684 | slot = i; | |
685 | slot_cache = cache; | |
686 | } | |
687 | } | |
688 | /* new_cache must be put into array after its contents are written */ | |
689 | smp_wmb(); | |
690 | if (slot != -1 && cmpxchg(ghes_estatus_caches + slot, | |
691 | slot_cache, new_cache) == slot_cache) { | |
692 | if (slot_cache) | |
693 | call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free); | |
694 | } else | |
695 | ghes_estatus_cache_free(new_cache); | |
696 | rcu_read_unlock(); | |
d334a491 HY |
697 | } |
698 | ||
42aa5604 TB |
699 | static int ghes_ack_error(struct acpi_hest_generic_v2 *gv2) |
700 | { | |
701 | int rc; | |
702 | u64 val = 0; | |
703 | ||
704 | rc = apei_read(&val, &gv2->read_ack_register); | |
705 | if (rc) | |
706 | return rc; | |
707 | ||
708 | val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset; | |
709 | val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset; | |
710 | ||
711 | return apei_write(val, &gv2->read_ack_register); | |
712 | } | |
713 | ||
2fb5853e JZZ |
714 | static void __ghes_panic(struct ghes *ghes) |
715 | { | |
716 | __ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus); | |
717 | ||
718 | /* reboot to log the error! */ | |
719 | if (!panic_timeout) | |
720 | panic_timeout = ghes_panic_timeout; | |
721 | panic("Fatal hardware error!"); | |
722 | } | |
723 | ||
d334a491 HY |
724 | static int ghes_proc(struct ghes *ghes) |
725 | { | |
726 | int rc; | |
727 | ||
728 | rc = ghes_read_estatus(ghes, 0); | |
729 | if (rc) | |
730 | goto out; | |
2fb5853e JZZ |
731 | |
732 | if (ghes_severity(ghes->estatus->error_severity) >= GHES_SEV_PANIC) { | |
733 | __ghes_panic(ghes); | |
734 | } | |
735 | ||
152cef40 HY |
736 | if (!ghes_estatus_cached(ghes->estatus)) { |
737 | if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus)) | |
738 | ghes_estatus_cache_add(ghes->generic, ghes->estatus); | |
739 | } | |
21480547 | 740 | ghes_do_proc(ghes, ghes->estatus); |
42aa5604 TB |
741 | |
742 | /* | |
743 | * GHESv2 type HEST entries introduce support for error acknowledgment, | |
744 | * so only acknowledge the error if this support is present. | |
745 | */ | |
746 | if (is_hest_type_generic_v2(ghes)) { | |
747 | rc = ghes_ack_error(ghes->generic_v2); | |
748 | if (rc) | |
749 | return rc; | |
750 | } | |
d334a491 HY |
751 | out: |
752 | ghes_clear_estatus(ghes); | |
806487a8 | 753 | return rc; |
d334a491 HY |
754 | } |
755 | ||
81e88fdc HY |
756 | static void ghes_add_timer(struct ghes *ghes) |
757 | { | |
758 | struct acpi_hest_generic *g = ghes->generic; | |
759 | unsigned long expire; | |
760 | ||
761 | if (!g->notify.poll_interval) { | |
762 | pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n", | |
763 | g->header.source_id); | |
764 | return; | |
765 | } | |
766 | expire = jiffies + msecs_to_jiffies(g->notify.poll_interval); | |
767 | ghes->timer.expires = round_jiffies_relative(expire); | |
768 | add_timer(&ghes->timer); | |
769 | } | |
770 | ||
771 | static void ghes_poll_func(unsigned long data) | |
772 | { | |
773 | struct ghes *ghes = (void *)data; | |
774 | ||
775 | ghes_proc(ghes); | |
776 | if (!(ghes->flags & GHES_EXITING)) | |
777 | ghes_add_timer(ghes); | |
778 | } | |
779 | ||
780 | static irqreturn_t ghes_irq_func(int irq, void *data) | |
781 | { | |
782 | struct ghes *ghes = data; | |
783 | int rc; | |
784 | ||
785 | rc = ghes_proc(ghes); | |
786 | if (rc) | |
787 | return IRQ_NONE; | |
788 | ||
789 | return IRQ_HANDLED; | |
790 | } | |
791 | ||
d334a491 HY |
792 | static int ghes_notify_sci(struct notifier_block *this, |
793 | unsigned long event, void *data) | |
794 | { | |
795 | struct ghes *ghes; | |
796 | int ret = NOTIFY_DONE; | |
797 | ||
798 | rcu_read_lock(); | |
799 | list_for_each_entry_rcu(ghes, &ghes_sci, list) { | |
800 | if (!ghes_proc(ghes)) | |
801 | ret = NOTIFY_OK; | |
802 | } | |
803 | rcu_read_unlock(); | |
804 | ||
805 | return ret; | |
806 | } | |
807 | ||
44a69f61 TN |
808 | static struct notifier_block ghes_notifier_sci = { |
809 | .notifier_call = ghes_notify_sci, | |
810 | }; | |
811 | ||
7edda088 TB |
812 | #ifdef CONFIG_ACPI_APEI_SEA |
813 | static LIST_HEAD(ghes_sea); | |
814 | ||
815 | void ghes_notify_sea(void) | |
816 | { | |
817 | struct ghes *ghes; | |
818 | ||
819 | /* | |
820 | * synchronize_rcu() will wait for nmi_exit(), so no need to | |
821 | * rcu_read_lock(). | |
822 | */ | |
823 | list_for_each_entry_rcu(ghes, &ghes_sea, list) { | |
824 | ghes_proc(ghes); | |
825 | } | |
826 | } | |
827 | ||
828 | static void ghes_sea_add(struct ghes *ghes) | |
829 | { | |
830 | mutex_lock(&ghes_list_mutex); | |
831 | list_add_rcu(&ghes->list, &ghes_sea); | |
832 | mutex_unlock(&ghes_list_mutex); | |
833 | } | |
834 | ||
835 | static void ghes_sea_remove(struct ghes *ghes) | |
836 | { | |
837 | mutex_lock(&ghes_list_mutex); | |
838 | list_del_rcu(&ghes->list); | |
839 | mutex_unlock(&ghes_list_mutex); | |
840 | synchronize_rcu(); | |
841 | } | |
842 | #else /* CONFIG_ACPI_APEI_SEA */ | |
843 | static inline void ghes_sea_add(struct ghes *ghes) | |
844 | { | |
845 | pr_err(GHES_PFX "ID: %d, trying to add SEA notification which is not supported\n", | |
846 | ghes->generic->header.source_id); | |
847 | } | |
848 | ||
849 | static inline void ghes_sea_remove(struct ghes *ghes) | |
850 | { | |
851 | pr_err(GHES_PFX "ID: %d, trying to remove SEA notification which is not supported\n", | |
852 | ghes->generic->header.source_id); | |
853 | } | |
854 | #endif /* CONFIG_ACPI_APEI_SEA */ | |
855 | ||
44a69f61 TN |
856 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
857 | /* | |
858 | * printk is not safe in NMI context. So in NMI handler, we allocate | |
859 | * required memory from lock-less memory allocator | |
860 | * (ghes_estatus_pool), save estatus into it, put them into lock-less | |
861 | * list (ghes_estatus_llist), then delay printk into IRQ context via | |
862 | * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record | |
863 | * required pool size by all NMI error source. | |
864 | */ | |
865 | static struct llist_head ghes_estatus_llist; | |
866 | static struct irq_work ghes_proc_irq_work; | |
867 | ||
868 | /* | |
6fe9e7c2 JK |
869 | * NMI may be triggered on any CPU, so ghes_in_nmi is used for |
870 | * having only one concurrent reader. | |
44a69f61 | 871 | */ |
6fe9e7c2 | 872 | static atomic_t ghes_in_nmi = ATOMIC_INIT(0); |
44a69f61 TN |
873 | |
874 | static LIST_HEAD(ghes_nmi); | |
875 | ||
67eb2e99 HY |
876 | static void ghes_proc_in_irq(struct irq_work *irq_work) |
877 | { | |
46d12f0b | 878 | struct llist_node *llnode, *next; |
67eb2e99 | 879 | struct ghes_estatus_node *estatus_node; |
152cef40 | 880 | struct acpi_hest_generic *generic; |
0a00fd5e | 881 | struct acpi_hest_generic_status *estatus; |
67eb2e99 HY |
882 | u32 len, node_len; |
883 | ||
46d12f0b | 884 | llnode = llist_del_all(&ghes_estatus_llist); |
67eb2e99 HY |
885 | /* |
886 | * Because the time order of estatus in list is reversed, | |
887 | * revert it back to proper order. | |
888 | */ | |
8d21d4c9 | 889 | llnode = llist_reverse_order(llnode); |
67eb2e99 HY |
890 | while (llnode) { |
891 | next = llnode->next; | |
892 | estatus_node = llist_entry(llnode, struct ghes_estatus_node, | |
893 | llnode); | |
894 | estatus = GHES_ESTATUS_FROM_NODE(estatus_node); | |
88f074f4 | 895 | len = cper_estatus_len(estatus); |
67eb2e99 | 896 | node_len = GHES_ESTATUS_NODE_LEN(len); |
21480547 | 897 | ghes_do_proc(estatus_node->ghes, estatus); |
152cef40 HY |
898 | if (!ghes_estatus_cached(estatus)) { |
899 | generic = estatus_node->generic; | |
900 | if (ghes_print_estatus(NULL, generic, estatus)) | |
901 | ghes_estatus_cache_add(generic, estatus); | |
902 | } | |
67eb2e99 HY |
903 | gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, |
904 | node_len); | |
905 | llnode = next; | |
906 | } | |
907 | } | |
908 | ||
46d12f0b HY |
909 | static void ghes_print_queued_estatus(void) |
910 | { | |
911 | struct llist_node *llnode; | |
912 | struct ghes_estatus_node *estatus_node; | |
913 | struct acpi_hest_generic *generic; | |
0a00fd5e | 914 | struct acpi_hest_generic_status *estatus; |
46d12f0b HY |
915 | u32 len, node_len; |
916 | ||
917 | llnode = llist_del_all(&ghes_estatus_llist); | |
918 | /* | |
919 | * Because the time order of estatus in list is reversed, | |
920 | * revert it back to proper order. | |
921 | */ | |
8d21d4c9 | 922 | llnode = llist_reverse_order(llnode); |
46d12f0b HY |
923 | while (llnode) { |
924 | estatus_node = llist_entry(llnode, struct ghes_estatus_node, | |
925 | llnode); | |
926 | estatus = GHES_ESTATUS_FROM_NODE(estatus_node); | |
88f074f4 | 927 | len = cper_estatus_len(estatus); |
46d12f0b HY |
928 | node_len = GHES_ESTATUS_NODE_LEN(len); |
929 | generic = estatus_node->generic; | |
930 | ghes_print_estatus(NULL, generic, estatus); | |
931 | llnode = llnode->next; | |
932 | } | |
933 | } | |
934 | ||
11568496 BP |
935 | /* Save estatus for further processing in IRQ context */ |
936 | static void __process_error(struct ghes *ghes) | |
937 | { | |
938 | #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | |
939 | u32 len, node_len; | |
940 | struct ghes_estatus_node *estatus_node; | |
941 | struct acpi_hest_generic_status *estatus; | |
942 | ||
943 | if (ghes_estatus_cached(ghes->estatus)) | |
944 | return; | |
945 | ||
946 | len = cper_estatus_len(ghes->estatus); | |
947 | node_len = GHES_ESTATUS_NODE_LEN(len); | |
948 | ||
949 | estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len); | |
950 | if (!estatus_node) | |
951 | return; | |
952 | ||
953 | estatus_node->ghes = ghes; | |
954 | estatus_node->generic = ghes->generic; | |
955 | estatus = GHES_ESTATUS_FROM_NODE(estatus_node); | |
956 | memcpy(estatus, ghes->estatus, len); | |
957 | llist_add(&estatus_node->llnode, &ghes_estatus_llist); | |
958 | #endif | |
959 | } | |
960 | ||
9c48f1c6 | 961 | static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) |
81e88fdc | 962 | { |
6169ddf8 BP |
963 | struct ghes *ghes; |
964 | int sev, ret = NMI_DONE; | |
81e88fdc | 965 | |
6fe9e7c2 JK |
966 | if (!atomic_add_unless(&ghes_in_nmi, 1, 1)) |
967 | return ret; | |
968 | ||
81e88fdc HY |
969 | list_for_each_entry_rcu(ghes, &ghes_nmi, list) { |
970 | if (ghes_read_estatus(ghes, 1)) { | |
971 | ghes_clear_estatus(ghes); | |
972 | continue; | |
a545715d PB |
973 | } else { |
974 | ret = NMI_HANDLED; | |
81e88fdc | 975 | } |
6169ddf8 | 976 | |
81e88fdc | 977 | sev = ghes_severity(ghes->estatus->error_severity); |
2fb5853e JZZ |
978 | if (sev >= GHES_SEV_PANIC) { |
979 | oops_begin(); | |
980 | ghes_print_queued_estatus(); | |
6169ddf8 | 981 | __ghes_panic(ghes); |
2fb5853e | 982 | } |
6169ddf8 | 983 | |
81e88fdc HY |
984 | if (!(ghes->flags & GHES_TO_CLEAR)) |
985 | continue; | |
11568496 BP |
986 | |
987 | __process_error(ghes); | |
81e88fdc HY |
988 | ghes_clear_estatus(ghes); |
989 | } | |
11568496 | 990 | |
67eb2e99 | 991 | #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
a545715d PB |
992 | if (ret == NMI_HANDLED) |
993 | irq_work_queue(&ghes_proc_irq_work); | |
67eb2e99 | 994 | #endif |
6fe9e7c2 | 995 | atomic_dec(&ghes_in_nmi); |
81e88fdc HY |
996 | return ret; |
997 | } | |
998 | ||
67eb2e99 HY |
999 | static unsigned long ghes_esource_prealloc_size( |
1000 | const struct acpi_hest_generic *generic) | |
1001 | { | |
1002 | unsigned long block_length, prealloc_records, prealloc_size; | |
1003 | ||
1004 | block_length = min_t(unsigned long, generic->error_block_length, | |
1005 | GHES_ESTATUS_MAX_SIZE); | |
1006 | prealloc_records = max_t(unsigned long, | |
1007 | generic->records_to_preallocate, 1); | |
1008 | prealloc_size = min_t(unsigned long, block_length * prealloc_records, | |
1009 | GHES_ESOURCE_PREALLOC_MAX_SIZE); | |
1010 | ||
1011 | return prealloc_size; | |
1012 | } | |
1013 | ||
44a69f61 TN |
1014 | static void ghes_estatus_pool_shrink(unsigned long len) |
1015 | { | |
1016 | ghes_estatus_pool_size_request -= PAGE_ALIGN(len); | |
1017 | } | |
1018 | ||
1019 | static void ghes_nmi_add(struct ghes *ghes) | |
1020 | { | |
1021 | unsigned long len; | |
1022 | ||
1023 | len = ghes_esource_prealloc_size(ghes->generic); | |
1024 | ghes_estatus_pool_expand(len); | |
1025 | mutex_lock(&ghes_list_mutex); | |
1026 | if (list_empty(&ghes_nmi)) | |
1027 | register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes"); | |
1028 | list_add_rcu(&ghes->list, &ghes_nmi); | |
1029 | mutex_unlock(&ghes_list_mutex); | |
1030 | } | |
1031 | ||
1032 | static void ghes_nmi_remove(struct ghes *ghes) | |
1033 | { | |
1034 | unsigned long len; | |
1035 | ||
1036 | mutex_lock(&ghes_list_mutex); | |
1037 | list_del_rcu(&ghes->list); | |
1038 | if (list_empty(&ghes_nmi)) | |
1039 | unregister_nmi_handler(NMI_LOCAL, "ghes"); | |
1040 | mutex_unlock(&ghes_list_mutex); | |
1041 | /* | |
1042 | * To synchronize with NMI handler, ghes can only be | |
1043 | * freed after NMI handler finishes. | |
1044 | */ | |
1045 | synchronize_rcu(); | |
1046 | len = ghes_esource_prealloc_size(ghes->generic); | |
1047 | ghes_estatus_pool_shrink(len); | |
1048 | } | |
1049 | ||
1050 | static void ghes_nmi_init_cxt(void) | |
1051 | { | |
1052 | init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); | |
1053 | } | |
1054 | #else /* CONFIG_HAVE_ACPI_APEI_NMI */ | |
1055 | static inline void ghes_nmi_add(struct ghes *ghes) | |
1056 | { | |
1057 | pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n", | |
1058 | ghes->generic->header.source_id); | |
1059 | BUG(); | |
1060 | } | |
1061 | ||
1062 | static inline void ghes_nmi_remove(struct ghes *ghes) | |
1063 | { | |
1064 | pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n", | |
1065 | ghes->generic->header.source_id); | |
1066 | BUG(); | |
1067 | } | |
1068 | ||
1069 | static inline void ghes_nmi_init_cxt(void) | |
1070 | { | |
1071 | } | |
1072 | #endif /* CONFIG_HAVE_ACPI_APEI_NMI */ | |
1073 | ||
da095fd3 | 1074 | static int ghes_probe(struct platform_device *ghes_dev) |
d334a491 HY |
1075 | { |
1076 | struct acpi_hest_generic *generic; | |
1077 | struct ghes *ghes = NULL; | |
44a69f61 | 1078 | |
7ad6e943 | 1079 | int rc = -EINVAL; |
d334a491 | 1080 | |
1dd6b20e | 1081 | generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; |
d334a491 | 1082 | if (!generic->enabled) |
7ad6e943 | 1083 | return -ENODEV; |
d334a491 | 1084 | |
81e88fdc HY |
1085 | switch (generic->notify.type) { |
1086 | case ACPI_HEST_NOTIFY_POLLED: | |
1087 | case ACPI_HEST_NOTIFY_EXTERNAL: | |
1088 | case ACPI_HEST_NOTIFY_SCI: | |
44a69f61 | 1089 | break; |
7edda088 TB |
1090 | case ACPI_HEST_NOTIFY_SEA: |
1091 | if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) { | |
1092 | pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n", | |
1093 | generic->header.source_id); | |
1094 | rc = -ENOTSUPP; | |
1095 | goto err; | |
1096 | } | |
1097 | break; | |
81e88fdc | 1098 | case ACPI_HEST_NOTIFY_NMI: |
44a69f61 TN |
1099 | if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) { |
1100 | pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n", | |
1101 | generic->header.source_id); | |
1102 | goto err; | |
1103 | } | |
81e88fdc HY |
1104 | break; |
1105 | case ACPI_HEST_NOTIFY_LOCAL: | |
1106 | pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", | |
d334a491 HY |
1107 | generic->header.source_id); |
1108 | goto err; | |
81e88fdc HY |
1109 | default: |
1110 | pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", | |
1111 | generic->notify.type, generic->header.source_id); | |
1112 | goto err; | |
d334a491 | 1113 | } |
81e88fdc HY |
1114 | |
1115 | rc = -EIO; | |
1116 | if (generic->error_block_length < | |
0a00fd5e | 1117 | sizeof(struct acpi_hest_generic_status)) { |
81e88fdc HY |
1118 | pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n", |
1119 | generic->error_block_length, | |
d334a491 HY |
1120 | generic->header.source_id); |
1121 | goto err; | |
1122 | } | |
1123 | ghes = ghes_new(generic); | |
1124 | if (IS_ERR(ghes)) { | |
1125 | rc = PTR_ERR(ghes); | |
1126 | ghes = NULL; | |
1127 | goto err; | |
1128 | } | |
21480547 MCC |
1129 | |
1130 | rc = ghes_edac_register(ghes, &ghes_dev->dev); | |
1131 | if (rc < 0) | |
1132 | goto err; | |
1133 | ||
81e88fdc HY |
1134 | switch (generic->notify.type) { |
1135 | case ACPI_HEST_NOTIFY_POLLED: | |
7237c75b GT |
1136 | setup_deferrable_timer(&ghes->timer, ghes_poll_func, |
1137 | (unsigned long)ghes); | |
81e88fdc HY |
1138 | ghes_add_timer(ghes); |
1139 | break; | |
1140 | case ACPI_HEST_NOTIFY_EXTERNAL: | |
1141 | /* External interrupt vector is GSI */ | |
a98d4f64 WY |
1142 | rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq); |
1143 | if (rc) { | |
81e88fdc HY |
1144 | pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", |
1145 | generic->header.source_id); | |
21480547 | 1146 | goto err_edac_unreg; |
81e88fdc | 1147 | } |
a98d4f64 WY |
1148 | rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes); |
1149 | if (rc) { | |
81e88fdc HY |
1150 | pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", |
1151 | generic->header.source_id); | |
21480547 | 1152 | goto err_edac_unreg; |
81e88fdc HY |
1153 | } |
1154 | break; | |
1155 | case ACPI_HEST_NOTIFY_SCI: | |
7ad6e943 | 1156 | mutex_lock(&ghes_list_mutex); |
d334a491 HY |
1157 | if (list_empty(&ghes_sci)) |
1158 | register_acpi_hed_notifier(&ghes_notifier_sci); | |
1159 | list_add_rcu(&ghes->list, &ghes_sci); | |
7ad6e943 | 1160 | mutex_unlock(&ghes_list_mutex); |
81e88fdc | 1161 | break; |
7edda088 TB |
1162 | case ACPI_HEST_NOTIFY_SEA: |
1163 | ghes_sea_add(ghes); | |
1164 | break; | |
81e88fdc | 1165 | case ACPI_HEST_NOTIFY_NMI: |
44a69f61 | 1166 | ghes_nmi_add(ghes); |
81e88fdc HY |
1167 | break; |
1168 | default: | |
1169 | BUG(); | |
d334a491 | 1170 | } |
7ad6e943 | 1171 | platform_set_drvdata(ghes_dev, ghes); |
d334a491 HY |
1172 | |
1173 | return 0; | |
21480547 MCC |
1174 | err_edac_unreg: |
1175 | ghes_edac_unregister(ghes); | |
d334a491 | 1176 | err: |
7ad6e943 | 1177 | if (ghes) { |
d334a491 | 1178 | ghes_fini(ghes); |
7ad6e943 HY |
1179 | kfree(ghes); |
1180 | } | |
d334a491 HY |
1181 | return rc; |
1182 | } | |
1183 | ||
b59bc2fb | 1184 | static int ghes_remove(struct platform_device *ghes_dev) |
d334a491 | 1185 | { |
7ad6e943 HY |
1186 | struct ghes *ghes; |
1187 | struct acpi_hest_generic *generic; | |
d334a491 | 1188 | |
7ad6e943 HY |
1189 | ghes = platform_get_drvdata(ghes_dev); |
1190 | generic = ghes->generic; | |
1191 | ||
81e88fdc | 1192 | ghes->flags |= GHES_EXITING; |
7ad6e943 | 1193 | switch (generic->notify.type) { |
81e88fdc HY |
1194 | case ACPI_HEST_NOTIFY_POLLED: |
1195 | del_timer_sync(&ghes->timer); | |
1196 | break; | |
1197 | case ACPI_HEST_NOTIFY_EXTERNAL: | |
1198 | free_irq(ghes->irq, ghes); | |
1199 | break; | |
7ad6e943 HY |
1200 | case ACPI_HEST_NOTIFY_SCI: |
1201 | mutex_lock(&ghes_list_mutex); | |
1202 | list_del_rcu(&ghes->list); | |
1203 | if (list_empty(&ghes_sci)) | |
1204 | unregister_acpi_hed_notifier(&ghes_notifier_sci); | |
1205 | mutex_unlock(&ghes_list_mutex); | |
7d64f82c | 1206 | synchronize_rcu(); |
7ad6e943 | 1207 | break; |
7edda088 TB |
1208 | case ACPI_HEST_NOTIFY_SEA: |
1209 | ghes_sea_remove(ghes); | |
1210 | break; | |
81e88fdc | 1211 | case ACPI_HEST_NOTIFY_NMI: |
44a69f61 | 1212 | ghes_nmi_remove(ghes); |
81e88fdc | 1213 | break; |
7ad6e943 HY |
1214 | default: |
1215 | BUG(); | |
1216 | break; | |
1217 | } | |
d334a491 | 1218 | |
7ad6e943 | 1219 | ghes_fini(ghes); |
21480547 MCC |
1220 | |
1221 | ghes_edac_unregister(ghes); | |
1222 | ||
7ad6e943 | 1223 | kfree(ghes); |
d334a491 | 1224 | |
7ad6e943 HY |
1225 | platform_set_drvdata(ghes_dev, NULL); |
1226 | ||
1227 | return 0; | |
d334a491 HY |
1228 | } |
1229 | ||
7ad6e943 HY |
1230 | static struct platform_driver ghes_platform_driver = { |
1231 | .driver = { | |
1232 | .name = "GHES", | |
7ad6e943 HY |
1233 | }, |
1234 | .probe = ghes_probe, | |
1235 | .remove = ghes_remove, | |
1236 | }; | |
1237 | ||
d334a491 HY |
1238 | static int __init ghes_init(void) |
1239 | { | |
81e88fdc HY |
1240 | int rc; |
1241 | ||
d334a491 HY |
1242 | if (acpi_disabled) |
1243 | return -ENODEV; | |
1244 | ||
1245 | if (hest_disable) { | |
1246 | pr_info(GHES_PFX "HEST is not enabled!\n"); | |
1247 | return -EINVAL; | |
1248 | } | |
1249 | ||
b6a95016 HY |
1250 | if (ghes_disable) { |
1251 | pr_info(GHES_PFX "GHES is not enabled!\n"); | |
1252 | return -EINVAL; | |
1253 | } | |
1254 | ||
44a69f61 | 1255 | ghes_nmi_init_cxt(); |
67eb2e99 | 1256 | |
81e88fdc HY |
1257 | rc = ghes_ioremap_init(); |
1258 | if (rc) | |
1259 | goto err; | |
1260 | ||
67eb2e99 | 1261 | rc = ghes_estatus_pool_init(); |
81e88fdc HY |
1262 | if (rc) |
1263 | goto err_ioremap_exit; | |
1264 | ||
152cef40 HY |
1265 | rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE * |
1266 | GHES_ESTATUS_CACHE_ALLOCED_MAX); | |
1267 | if (rc) | |
1268 | goto err_pool_exit; | |
1269 | ||
67eb2e99 HY |
1270 | rc = platform_driver_register(&ghes_platform_driver); |
1271 | if (rc) | |
1272 | goto err_pool_exit; | |
1273 | ||
9fb0bfe1 HY |
1274 | rc = apei_osc_setup(); |
1275 | if (rc == 0 && osc_sb_apei_support_acked) | |
1276 | pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n"); | |
1277 | else if (rc == 0 && !osc_sb_apei_support_acked) | |
1278 | pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n"); | |
1279 | else if (rc && osc_sb_apei_support_acked) | |
1280 | pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); | |
1281 | else | |
1282 | pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); | |
1283 | ||
81e88fdc | 1284 | return 0; |
67eb2e99 HY |
1285 | err_pool_exit: |
1286 | ghes_estatus_pool_exit(); | |
81e88fdc HY |
1287 | err_ioremap_exit: |
1288 | ghes_ioremap_exit(); | |
1289 | err: | |
1290 | return rc; | |
d334a491 | 1291 | } |
020bf066 | 1292 | device_initcall(ghes_init); |