1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * APEI Generic Hardware Error Source support
4 *
5 * Generic Hardware Error Source provides a way to report platform
6 * hardware errors (such as that from chipset). It works in so called
7 * "Firmware First" mode, that is, hardware errors are reported to
8 * firmware firstly, then reported to Linux by firmware. This way,
9 * some non-standard hardware error registers or non-standard hardware
10 * link can be checked by firmware to produce more hardware error
11 * information for Linux.
12 *
13 * For more information about Generic Hardware Error Source, please
14 * refer to ACPI Specification version 4.0, section 17.3.2.6
15 *
16 * Copyright 2010,2011 Intel Corp.
17 * Author: Huang Ying <ying.huang@intel.com>
18 */
19
20 #include <linux/arm_sdei.h>
21 #include <linux/kernel.h>
22 #include <linux/moduleparam.h>
23 #include <linux/init.h>
24 #include <linux/acpi.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/timer.h>
28 #include <linux/cper.h>
29 #include <linux/platform_device.h>
30 #include <linux/mutex.h>
31 #include <linux/ratelimit.h>
32 #include <linux/vmalloc.h>
33 #include <linux/irq_work.h>
34 #include <linux/llist.h>
35 #include <linux/genalloc.h>
36 #include <linux/pci.h>
37 #include <linux/pfn.h>
38 #include <linux/aer.h>
39 #include <linux/nmi.h>
40 #include <linux/sched/clock.h>
41 #include <linux/uuid.h>
42 #include <linux/ras.h>
43 #include <linux/task_work.h>
44
45 #include <acpi/actbl1.h>
46 #include <acpi/ghes.h>
47 #include <acpi/apei.h>
48 #include <asm/fixmap.h>
49 #include <asm/tlbflush.h>
50 #include <ras/ras_event.h>
51
52 #include "apei-internal.h"
53
54 #define GHES_PFX "GHES: "
55
56 #define GHES_ESTATUS_MAX_SIZE 65536
57 #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
58
59 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
60
61 /* This is just an estimation for memory pool allocation */
62 #define GHES_ESTATUS_CACHE_AVG_SIZE 512
63
64 #define GHES_ESTATUS_CACHES_SIZE 4
65
66 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
67 /* Prevent too many caches are allocated because of RCU */
68 #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
69
70 #define GHES_ESTATUS_CACHE_LEN(estatus_len) \
71 (sizeof(struct ghes_estatus_cache) + (estatus_len))
72 #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
73 ((struct acpi_hest_generic_status *) \
74 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
75
76 #define GHES_ESTATUS_NODE_LEN(estatus_len) \
77 (sizeof(struct ghes_estatus_node) + (estatus_len))
78 #define GHES_ESTATUS_FROM_NODE(estatus_node) \
79 ((struct acpi_hest_generic_status *) \
80 ((struct ghes_estatus_node *)(estatus_node) + 1))
81
82 #define GHES_VENDOR_ENTRY_LEN(gdata_len) \
83 (sizeof(struct ghes_vendor_record_entry) + (gdata_len))
84 #define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry) \
85 ((struct acpi_hest_generic_data *) \
86 ((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
87
88 /*
89 * NMI-like notifications vary by architecture, before the compiler can prune
90 * unused static functions it needs a value for these enums.
91 */
92 #ifndef CONFIG_ARM_SDE_INTERFACE
93 #define FIX_APEI_GHES_SDEI_NORMAL __end_of_fixed_addresses
94 #define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses
95 #endif
96
is_hest_type_generic_v2(struct ghes * ghes)97 static inline bool is_hest_type_generic_v2(struct ghes *ghes)
98 {
99 return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
100 }
101
102 /*
103 * This driver isn't really modular, however for the time being,
104 * continuing to use module_param is the easiest way to remain
105 * compatible with existing boot arg use cases.
106 */
107 bool ghes_disable;
108 module_param_named(disable, ghes_disable, bool, 0);
109
110 /*
111 * All error sources notified with HED (Hardware Error Device) share a
112 * single notifier callback, so they need to be linked and checked one
113 * by one. This holds true for NMI too.
114 *
115 * RCU is used for these lists, so ghes_list_mutex is only used for
116 * list changing, not for traversing.
117 */
118 static LIST_HEAD(ghes_hed);
119 static DEFINE_MUTEX(ghes_list_mutex);
120
121 /*
122 * Because the memory area used to transfer hardware error information
123 * from BIOS to Linux can be determined only in NMI, IRQ or timer
124 * handler, but general ioremap can not be used in atomic context, so
125 * the fixmap is used instead.
126 *
127 * This spinlock is used to prevent the fixmap entry from being used
128 * simultaneously.
129 */
130 static DEFINE_SPINLOCK(ghes_notify_lock_irq);
131
132 struct ghes_vendor_record_entry {
133 struct work_struct work;
134 int error_severity;
135 char vendor_record[];
136 };
137
138 static struct gen_pool *ghes_estatus_pool;
139 static unsigned long ghes_estatus_pool_size_request;
140
141 static struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
142 static atomic_t ghes_estatus_cache_alloced;
143
144 static int ghes_panic_timeout __read_mostly = 30;
145
ghes_map(u64 pfn,enum fixed_addresses fixmap_idx)146 static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
147 {
148 phys_addr_t paddr;
149 pgprot_t prot;
150
151 paddr = PFN_PHYS(pfn);
152 prot = arch_apei_get_mem_attribute(paddr);
153 __set_fixmap(fixmap_idx, paddr, prot);
154
155 return (void __iomem *) __fix_to_virt(fixmap_idx);
156 }
157
ghes_unmap(void __iomem * vaddr,enum fixed_addresses fixmap_idx)158 static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
159 {
160 int _idx = virt_to_fix((unsigned long)vaddr);
161
162 WARN_ON_ONCE(fixmap_idx != _idx);
163 clear_fixmap(fixmap_idx);
164 }
165
ghes_estatus_pool_init(int num_ghes)166 int ghes_estatus_pool_init(int num_ghes)
167 {
168 unsigned long addr, len;
169 int rc;
170
171 ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
172 if (!ghes_estatus_pool)
173 return -ENOMEM;
174
175 len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
176 len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
177
178 ghes_estatus_pool_size_request = PAGE_ALIGN(len);
179 addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
180 if (!addr)
181 goto err_pool_alloc;
182
183 rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
184 if (rc)
185 goto err_pool_add;
186
187 return 0;
188
189 err_pool_add:
190 vfree((void *)addr);
191
192 err_pool_alloc:
193 gen_pool_destroy(ghes_estatus_pool);
194
195 return -ENOMEM;
196 }
197
map_gen_v2(struct ghes * ghes)198 static int map_gen_v2(struct ghes *ghes)
199 {
200 return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
201 }
202
unmap_gen_v2(struct ghes * ghes)203 static void unmap_gen_v2(struct ghes *ghes)
204 {
205 apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
206 }
207
ghes_ack_error(struct acpi_hest_generic_v2 * gv2)208 static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
209 {
210 int rc;
211 u64 val = 0;
212
213 rc = apei_read(&val, &gv2->read_ack_register);
214 if (rc)
215 return;
216
217 val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
218 val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset;
219
220 apei_write(val, &gv2->read_ack_register);
221 }
222
ghes_new(struct acpi_hest_generic * generic)223 static struct ghes *ghes_new(struct acpi_hest_generic *generic)
224 {
225 struct ghes *ghes;
226 unsigned int error_block_length;
227 int rc;
228
229 ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
230 if (!ghes)
231 return ERR_PTR(-ENOMEM);
232
233 ghes->generic = generic;
234 if (is_hest_type_generic_v2(ghes)) {
235 rc = map_gen_v2(ghes);
236 if (rc)
237 goto err_free;
238 }
239
240 rc = apei_map_generic_address(&generic->error_status_address);
241 if (rc)
242 goto err_unmap_read_ack_addr;
243 error_block_length = generic->error_block_length;
244 if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
245 pr_warn(FW_WARN GHES_PFX
246 "Error status block length is too long: %u for "
247 "generic hardware error source: %d.\n",
248 error_block_length, generic->header.source_id);
249 error_block_length = GHES_ESTATUS_MAX_SIZE;
250 }
251 ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
252 if (!ghes->estatus) {
253 rc = -ENOMEM;
254 goto err_unmap_status_addr;
255 }
256
257 return ghes;
258
259 err_unmap_status_addr:
260 apei_unmap_generic_address(&generic->error_status_address);
261 err_unmap_read_ack_addr:
262 if (is_hest_type_generic_v2(ghes))
263 unmap_gen_v2(ghes);
264 err_free:
265 kfree(ghes);
266 return ERR_PTR(rc);
267 }
268
ghes_fini(struct ghes * ghes)269 static void ghes_fini(struct ghes *ghes)
270 {
271 kfree(ghes->estatus);
272 apei_unmap_generic_address(&ghes->generic->error_status_address);
273 if (is_hest_type_generic_v2(ghes))
274 unmap_gen_v2(ghes);
275 }
276
ghes_severity(int severity)277 static inline int ghes_severity(int severity)
278 {
279 switch (severity) {
280 case CPER_SEV_INFORMATIONAL:
281 return GHES_SEV_NO;
282 case CPER_SEV_CORRECTED:
283 return GHES_SEV_CORRECTED;
284 case CPER_SEV_RECOVERABLE:
285 return GHES_SEV_RECOVERABLE;
286 case CPER_SEV_FATAL:
287 return GHES_SEV_PANIC;
288 default:
289 /* Unknown, go panic */
290 return GHES_SEV_PANIC;
291 }
292 }
293
ghes_copy_tofrom_phys(void * buffer,u64 paddr,u32 len,int from_phys,enum fixed_addresses fixmap_idx)294 static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
295 int from_phys,
296 enum fixed_addresses fixmap_idx)
297 {
298 void __iomem *vaddr;
299 u64 offset;
300 u32 trunk;
301
302 while (len > 0) {
303 offset = paddr - (paddr & PAGE_MASK);
304 vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
305 trunk = PAGE_SIZE - offset;
306 trunk = min(trunk, len);
307 if (from_phys)
308 memcpy_fromio(buffer, vaddr + offset, trunk);
309 else
310 memcpy_toio(vaddr + offset, buffer, trunk);
311 len -= trunk;
312 paddr += trunk;
313 buffer += trunk;
314 ghes_unmap(vaddr, fixmap_idx);
315 }
316 }
317
318 /* Check the top-level record header has an appropriate size. */
__ghes_check_estatus(struct ghes * ghes,struct acpi_hest_generic_status * estatus)319 static int __ghes_check_estatus(struct ghes *ghes,
320 struct acpi_hest_generic_status *estatus)
321 {
322 u32 len = cper_estatus_len(estatus);
323
324 if (len < sizeof(*estatus)) {
325 pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
326 return -EIO;
327 }
328
329 if (len > ghes->generic->error_block_length) {
330 pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
331 return -EIO;
332 }
333
334 if (cper_estatus_check_header(estatus)) {
335 pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
336 return -EIO;
337 }
338
339 return 0;
340 }
341
342 /* Read the CPER block, returning its address, and header in estatus. */
__ghes_peek_estatus(struct ghes * ghes,struct acpi_hest_generic_status * estatus,u64 * buf_paddr,enum fixed_addresses fixmap_idx)343 static int __ghes_peek_estatus(struct ghes *ghes,
344 struct acpi_hest_generic_status *estatus,
345 u64 *buf_paddr, enum fixed_addresses fixmap_idx)
346 {
347 struct acpi_hest_generic *g = ghes->generic;
348 int rc;
349
350 rc = apei_read(buf_paddr, &g->error_status_address);
351 if (rc) {
352 *buf_paddr = 0;
353 pr_warn_ratelimited(FW_WARN GHES_PFX
354 "Failed to read error status block address for hardware error source: %d.\n",
355 g->header.source_id);
356 return -EIO;
357 }
358 if (!*buf_paddr)
359 return -ENOENT;
360
361 ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
362 fixmap_idx);
363 if (!estatus->block_status) {
364 *buf_paddr = 0;
365 return -ENOENT;
366 }
367
368 return 0;
369 }
370
__ghes_read_estatus(struct acpi_hest_generic_status * estatus,u64 buf_paddr,enum fixed_addresses fixmap_idx,size_t buf_len)371 static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
372 u64 buf_paddr, enum fixed_addresses fixmap_idx,
373 size_t buf_len)
374 {
375 ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
376 if (cper_estatus_check(estatus)) {
377 pr_warn_ratelimited(FW_WARN GHES_PFX
378 "Failed to read error status block!\n");
379 return -EIO;
380 }
381
382 return 0;
383 }
384
ghes_read_estatus(struct ghes * ghes,struct acpi_hest_generic_status * estatus,u64 * buf_paddr,enum fixed_addresses fixmap_idx)385 static int ghes_read_estatus(struct ghes *ghes,
386 struct acpi_hest_generic_status *estatus,
387 u64 *buf_paddr, enum fixed_addresses fixmap_idx)
388 {
389 int rc;
390
391 rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
392 if (rc)
393 return rc;
394
395 rc = __ghes_check_estatus(ghes, estatus);
396 if (rc)
397 return rc;
398
399 return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
400 cper_estatus_len(estatus));
401 }
402
ghes_clear_estatus(struct ghes * ghes,struct acpi_hest_generic_status * estatus,u64 buf_paddr,enum fixed_addresses fixmap_idx)403 static void ghes_clear_estatus(struct ghes *ghes,
404 struct acpi_hest_generic_status *estatus,
405 u64 buf_paddr, enum fixed_addresses fixmap_idx)
406 {
407 estatus->block_status = 0;
408
409 if (!buf_paddr)
410 return;
411
412 ghes_copy_tofrom_phys(estatus, buf_paddr,
413 sizeof(estatus->block_status), 0,
414 fixmap_idx);
415
416 /*
417 * GHESv2 type HEST entries introduce support for error acknowledgment,
418 * so only acknowledge the error if this support is present.
419 */
420 if (is_hest_type_generic_v2(ghes))
421 ghes_ack_error(ghes->generic_v2);
422 }
423
424 /*
425 * Called as task_work before returning to user-space.
426 * Ensure any queued work has been done before we return to the context that
427 * triggered the notification.
428 */
ghes_kick_task_work(struct callback_head * head)429 static void ghes_kick_task_work(struct callback_head *head)
430 {
431 struct acpi_hest_generic_status *estatus;
432 struct ghes_estatus_node *estatus_node;
433 u32 node_len;
434
435 estatus_node = container_of(head, struct ghes_estatus_node, task_work);
436 if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
437 memory_failure_queue_kick(estatus_node->task_work_cpu);
438
439 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
440 node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
441 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
442 }
443
ghes_handle_memory_failure(struct acpi_hest_generic_data * gdata,int sev)444 static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
445 int sev)
446 {
447 unsigned long pfn;
448 int flags = -1;
449 int sec_sev = ghes_severity(gdata->error_severity);
450 struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
451
452 if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
453 return false;
454
455 if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
456 return false;
457
458 pfn = mem_err->physical_addr >> PAGE_SHIFT;
459 if (!pfn_valid(pfn)) {
460 pr_warn_ratelimited(FW_WARN GHES_PFX
461 "Invalid address in generic error data: %#llx\n",
462 mem_err->physical_addr);
463 return false;
464 }
465
466 /* iff following two events can be handled properly by now */
467 if (sec_sev == GHES_SEV_CORRECTED &&
468 (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
469 flags = MF_SOFT_OFFLINE;
470 if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
471 flags = 0;
472
473 if (flags != -1) {
474 memory_failure_queue(pfn, flags);
475 return true;
476 }
477
478 return false;
479 }
480
481 /*
482 * PCIe AER errors need to be sent to the AER driver for reporting and
483 * recovery. The GHES severities map to the following AER severities and
484 * require the following handling:
485 *
486 * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
487 * These need to be reported by the AER driver but no recovery is
488 * necessary.
489 * GHES_SEV_RECOVERABLE -> AER_NONFATAL
490 * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
491 * These both need to be reported and recovered from by the AER driver.
492 * GHES_SEV_PANIC does not make it to this handling since the kernel must
493 * panic.
494 */
ghes_handle_aer(struct acpi_hest_generic_data * gdata)495 static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
496 {
497 #ifdef CONFIG_ACPI_APEI_PCIEAER
498 struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
499
500 if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
501 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
502 unsigned int devfn;
503 int aer_severity;
504
505 devfn = PCI_DEVFN(pcie_err->device_id.device,
506 pcie_err->device_id.function);
507 aer_severity = cper_severity_to_aer(gdata->error_severity);
508
509 /*
510 * If firmware reset the component to contain
511 * the error, we must reinitialize it before
512 * use, so treat it as a fatal AER error.
513 */
514 if (gdata->flags & CPER_SEC_RESET)
515 aer_severity = AER_FATAL;
516
517 aer_recover_queue(pcie_err->device_id.segment,
518 pcie_err->device_id.bus,
519 devfn, aer_severity,
520 (struct aer_capability_regs *)
521 pcie_err->aer_info);
522 }
523 #endif
524 }
525
526 static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
527
ghes_register_vendor_record_notifier(struct notifier_block * nb)528 int ghes_register_vendor_record_notifier(struct notifier_block *nb)
529 {
530 return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
531 }
532 EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
533
ghes_unregister_vendor_record_notifier(struct notifier_block * nb)534 void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
535 {
536 blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
537 }
538 EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
539
ghes_vendor_record_work_func(struct work_struct * work)540 static void ghes_vendor_record_work_func(struct work_struct *work)
541 {
542 struct ghes_vendor_record_entry *entry;
543 struct acpi_hest_generic_data *gdata;
544 u32 len;
545
546 entry = container_of(work, struct ghes_vendor_record_entry, work);
547 gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
548
549 blocking_notifier_call_chain(&vendor_record_notify_list,
550 entry->error_severity, gdata);
551
552 len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
553 gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
554 }
555
ghes_defer_non_standard_event(struct acpi_hest_generic_data * gdata,int sev)556 static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
557 int sev)
558 {
559 struct acpi_hest_generic_data *copied_gdata;
560 struct ghes_vendor_record_entry *entry;
561 u32 len;
562
563 len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
564 entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
565 if (!entry)
566 return;
567
568 copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
569 memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
570 entry->error_severity = sev;
571
572 INIT_WORK(&entry->work, ghes_vendor_record_work_func);
573 schedule_work(&entry->work);
574 }
575
ghes_do_proc(struct ghes * ghes,const struct acpi_hest_generic_status * estatus)576 static bool ghes_do_proc(struct ghes *ghes,
577 const struct acpi_hest_generic_status *estatus)
578 {
579 int sev, sec_sev;
580 struct acpi_hest_generic_data *gdata;
581 guid_t *sec_type;
582 const guid_t *fru_id = &guid_null;
583 char *fru_text = "";
584 bool queued = false;
585
586 sev = ghes_severity(estatus->error_severity);
587 apei_estatus_for_each_section(estatus, gdata) {
588 sec_type = (guid_t *)gdata->section_type;
589 sec_sev = ghes_severity(gdata->error_severity);
590 if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
591 fru_id = (guid_t *)gdata->fru_id;
592
593 if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
594 fru_text = gdata->fru_text;
595
596 if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
597 struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
598
599 ghes_edac_report_mem_error(sev, mem_err);
600
601 arch_apei_report_mem_error(sev, mem_err);
602 queued = ghes_handle_memory_failure(gdata, sev);
603 }
604 else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
605 ghes_handle_aer(gdata);
606 }
607 else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
608 struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
609
610 log_arm_hw_error(err);
611 } else {
612 void *err = acpi_hest_get_payload(gdata);
613
614 ghes_defer_non_standard_event(gdata, sev);
615 log_non_standard_event(sec_type, fru_id, fru_text,
616 sec_sev, err,
617 gdata->error_data_length);
618 }
619 }
620
621 return queued;
622 }
623
__ghes_print_estatus(const char * pfx,const struct acpi_hest_generic * generic,const struct acpi_hest_generic_status * estatus)624 static void __ghes_print_estatus(const char *pfx,
625 const struct acpi_hest_generic *generic,
626 const struct acpi_hest_generic_status *estatus)
627 {
628 static atomic_t seqno;
629 unsigned int curr_seqno;
630 char pfx_seq[64];
631
632 if (pfx == NULL) {
633 if (ghes_severity(estatus->error_severity) <=
634 GHES_SEV_CORRECTED)
635 pfx = KERN_WARNING;
636 else
637 pfx = KERN_ERR;
638 }
639 curr_seqno = atomic_inc_return(&seqno);
640 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
641 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
642 pfx_seq, generic->header.source_id);
643 cper_estatus_print(pfx_seq, estatus);
644 }
645
ghes_print_estatus(const char * pfx,const struct acpi_hest_generic * generic,const struct acpi_hest_generic_status * estatus)646 static int ghes_print_estatus(const char *pfx,
647 const struct acpi_hest_generic *generic,
648 const struct acpi_hest_generic_status *estatus)
649 {
650 /* Not more than 2 messages every 5 seconds */
651 static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
652 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
653 struct ratelimit_state *ratelimit;
654
655 if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
656 ratelimit = &ratelimit_corrected;
657 else
658 ratelimit = &ratelimit_uncorrected;
659 if (__ratelimit(ratelimit)) {
660 __ghes_print_estatus(pfx, generic, estatus);
661 return 1;
662 }
663 return 0;
664 }
665
666 /*
667 * GHES error status reporting throttle, to report more kinds of
668 * errors, instead of just most frequently occurred errors.
669 */
ghes_estatus_cached(struct acpi_hest_generic_status * estatus)670 static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
671 {
672 u32 len;
673 int i, cached = 0;
674 unsigned long long now;
675 struct ghes_estatus_cache *cache;
676 struct acpi_hest_generic_status *cache_estatus;
677
678 len = cper_estatus_len(estatus);
679 rcu_read_lock();
680 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
681 cache = rcu_dereference(ghes_estatus_caches[i]);
682 if (cache == NULL)
683 continue;
684 if (len != cache->estatus_len)
685 continue;
686 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
687 if (memcmp(estatus, cache_estatus, len))
688 continue;
689 atomic_inc(&cache->count);
690 now = sched_clock();
691 if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
692 cached = 1;
693 break;
694 }
695 rcu_read_unlock();
696 return cached;
697 }
698
ghes_estatus_cache_alloc(struct acpi_hest_generic * generic,struct acpi_hest_generic_status * estatus)699 static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
700 struct acpi_hest_generic *generic,
701 struct acpi_hest_generic_status *estatus)
702 {
703 int alloced;
704 u32 len, cache_len;
705 struct ghes_estatus_cache *cache;
706 struct acpi_hest_generic_status *cache_estatus;
707
708 alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
709 if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
710 atomic_dec(&ghes_estatus_cache_alloced);
711 return NULL;
712 }
713 len = cper_estatus_len(estatus);
714 cache_len = GHES_ESTATUS_CACHE_LEN(len);
715 cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
716 if (!cache) {
717 atomic_dec(&ghes_estatus_cache_alloced);
718 return NULL;
719 }
720 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
721 memcpy(cache_estatus, estatus, len);
722 cache->estatus_len = len;
723 atomic_set(&cache->count, 0);
724 cache->generic = generic;
725 cache->time_in = sched_clock();
726 return cache;
727 }
728
ghes_estatus_cache_free(struct ghes_estatus_cache * cache)729 static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
730 {
731 u32 len;
732
733 len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
734 len = GHES_ESTATUS_CACHE_LEN(len);
735 gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
736 atomic_dec(&ghes_estatus_cache_alloced);
737 }
738
ghes_estatus_cache_rcu_free(struct rcu_head * head)739 static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
740 {
741 struct ghes_estatus_cache *cache;
742
743 cache = container_of(head, struct ghes_estatus_cache, rcu);
744 ghes_estatus_cache_free(cache);
745 }
746
ghes_estatus_cache_add(struct acpi_hest_generic * generic,struct acpi_hest_generic_status * estatus)747 static void ghes_estatus_cache_add(
748 struct acpi_hest_generic *generic,
749 struct acpi_hest_generic_status *estatus)
750 {
751 int i, slot = -1, count;
752 unsigned long long now, duration, period, max_period = 0;
753 struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
754
755 new_cache = ghes_estatus_cache_alloc(generic, estatus);
756 if (new_cache == NULL)
757 return;
758 rcu_read_lock();
759 now = sched_clock();
760 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
761 cache = rcu_dereference(ghes_estatus_caches[i]);
762 if (cache == NULL) {
763 slot = i;
764 slot_cache = NULL;
765 break;
766 }
767 duration = now - cache->time_in;
768 if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
769 slot = i;
770 slot_cache = cache;
771 break;
772 }
773 count = atomic_read(&cache->count);
774 period = duration;
775 do_div(period, (count + 1));
776 if (period > max_period) {
777 max_period = period;
778 slot = i;
779 slot_cache = cache;
780 }
781 }
782 /* new_cache must be put into array after its contents are written */
783 smp_wmb();
784 if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
785 slot_cache, new_cache) == slot_cache) {
786 if (slot_cache)
787 call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
788 } else
789 ghes_estatus_cache_free(new_cache);
790 rcu_read_unlock();
791 }
792
__ghes_panic(struct ghes * ghes,struct acpi_hest_generic_status * estatus,u64 buf_paddr,enum fixed_addresses fixmap_idx)793 static void __ghes_panic(struct ghes *ghes,
794 struct acpi_hest_generic_status *estatus,
795 u64 buf_paddr, enum fixed_addresses fixmap_idx)
796 {
797 __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
798
799 ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
800
801 /* reboot to log the error! */
802 if (!panic_timeout)
803 panic_timeout = ghes_panic_timeout;
804 panic("Fatal hardware error!");
805 }
806
ghes_proc(struct ghes * ghes)807 static int ghes_proc(struct ghes *ghes)
808 {
809 struct acpi_hest_generic_status *estatus = ghes->estatus;
810 u64 buf_paddr;
811 int rc;
812
813 rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
814 if (rc)
815 goto out;
816
817 if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
818 __ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
819
820 if (!ghes_estatus_cached(estatus)) {
821 if (ghes_print_estatus(NULL, ghes->generic, estatus))
822 ghes_estatus_cache_add(ghes->generic, estatus);
823 }
824 ghes_do_proc(ghes, estatus);
825
826 out:
827 ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
828
829 return rc;
830 }
831
ghes_add_timer(struct ghes * ghes)832 static void ghes_add_timer(struct ghes *ghes)
833 {
834 struct acpi_hest_generic *g = ghes->generic;
835 unsigned long expire;
836
837 if (!g->notify.poll_interval) {
838 pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
839 g->header.source_id);
840 return;
841 }
842 expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
843 ghes->timer.expires = round_jiffies_relative(expire);
844 add_timer(&ghes->timer);
845 }
846
ghes_poll_func(struct timer_list * t)847 static void ghes_poll_func(struct timer_list *t)
848 {
849 struct ghes *ghes = from_timer(ghes, t, timer);
850 unsigned long flags;
851
852 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
853 ghes_proc(ghes);
854 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
855 if (!(ghes->flags & GHES_EXITING))
856 ghes_add_timer(ghes);
857 }
858
ghes_irq_func(int irq,void * data)859 static irqreturn_t ghes_irq_func(int irq, void *data)
860 {
861 struct ghes *ghes = data;
862 unsigned long flags;
863 int rc;
864
865 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
866 rc = ghes_proc(ghes);
867 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
868 if (rc)
869 return IRQ_NONE;
870
871 return IRQ_HANDLED;
872 }
873
ghes_notify_hed(struct notifier_block * this,unsigned long event,void * data)874 static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
875 void *data)
876 {
877 struct ghes *ghes;
878 unsigned long flags;
879 int ret = NOTIFY_DONE;
880
881 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
882 rcu_read_lock();
883 list_for_each_entry_rcu(ghes, &ghes_hed, list) {
884 if (!ghes_proc(ghes))
885 ret = NOTIFY_OK;
886 }
887 rcu_read_unlock();
888 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
889
890 return ret;
891 }
892
893 static struct notifier_block ghes_notifier_hed = {
894 .notifier_call = ghes_notify_hed,
895 };
896
897 /*
898 * Handlers for CPER records may not be NMI safe. For example,
899 * memory_failure_queue() takes spinlocks and calls schedule_work_on().
900 * In any NMI-like handler, memory from ghes_estatus_pool is used to save
901 * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
902 * ghes_proc_in_irq() to run in IRQ context where each estatus in
903 * ghes_estatus_llist is processed.
904 *
905 * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
906 * to suppress frequent messages.
907 */
908 static struct llist_head ghes_estatus_llist;
909 static struct irq_work ghes_proc_irq_work;
910
ghes_proc_in_irq(struct irq_work * irq_work)911 static void ghes_proc_in_irq(struct irq_work *irq_work)
912 {
913 struct llist_node *llnode, *next;
914 struct ghes_estatus_node *estatus_node;
915 struct acpi_hest_generic *generic;
916 struct acpi_hest_generic_status *estatus;
917 bool task_work_pending;
918 u32 len, node_len;
919 int ret;
920
921 llnode = llist_del_all(&ghes_estatus_llist);
922 /*
923 * Because the time order of estatus in list is reversed,
924 * revert it back to proper order.
925 */
926 llnode = llist_reverse_order(llnode);
927 while (llnode) {
928 next = llnode->next;
929 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
930 llnode);
931 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
932 len = cper_estatus_len(estatus);
933 node_len = GHES_ESTATUS_NODE_LEN(len);
934 task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
935 if (!ghes_estatus_cached(estatus)) {
936 generic = estatus_node->generic;
937 if (ghes_print_estatus(NULL, generic, estatus))
938 ghes_estatus_cache_add(generic, estatus);
939 }
940
941 if (task_work_pending && current->mm != &init_mm) {
942 estatus_node->task_work.func = ghes_kick_task_work;
943 estatus_node->task_work_cpu = smp_processor_id();
944 ret = task_work_add(current, &estatus_node->task_work,
945 TWA_RESUME);
946 if (ret)
947 estatus_node->task_work.func = NULL;
948 }
949
950 if (!estatus_node->task_work.func)
951 gen_pool_free(ghes_estatus_pool,
952 (unsigned long)estatus_node, node_len);
953
954 llnode = next;
955 }
956 }
957
ghes_print_queued_estatus(void)958 static void ghes_print_queued_estatus(void)
959 {
960 struct llist_node *llnode;
961 struct ghes_estatus_node *estatus_node;
962 struct acpi_hest_generic *generic;
963 struct acpi_hest_generic_status *estatus;
964
965 llnode = llist_del_all(&ghes_estatus_llist);
966 /*
967 * Because the time order of estatus in list is reversed,
968 * revert it back to proper order.
969 */
970 llnode = llist_reverse_order(llnode);
971 while (llnode) {
972 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
973 llnode);
974 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
975 generic = estatus_node->generic;
976 ghes_print_estatus(NULL, generic, estatus);
977 llnode = llnode->next;
978 }
979 }
980
ghes_in_nmi_queue_one_entry(struct ghes * ghes,enum fixed_addresses fixmap_idx)981 static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
982 enum fixed_addresses fixmap_idx)
983 {
984 struct acpi_hest_generic_status *estatus, tmp_header;
985 struct ghes_estatus_node *estatus_node;
986 u32 len, node_len;
987 u64 buf_paddr;
988 int sev, rc;
989
990 if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
991 return -EOPNOTSUPP;
992
993 rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
994 if (rc) {
995 ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
996 return rc;
997 }
998
999 rc = __ghes_check_estatus(ghes, &tmp_header);
1000 if (rc) {
1001 ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1002 return rc;
1003 }
1004
1005 len = cper_estatus_len(&tmp_header);
1006 node_len = GHES_ESTATUS_NODE_LEN(len);
1007 estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
1008 if (!estatus_node)
1009 return -ENOMEM;
1010
1011 estatus_node->ghes = ghes;
1012 estatus_node->generic = ghes->generic;
1013 estatus_node->task_work.func = NULL;
1014 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1015
1016 if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
1017 ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
1018 rc = -ENOENT;
1019 goto no_work;
1020 }
1021
1022 sev = ghes_severity(estatus->error_severity);
1023 if (sev >= GHES_SEV_PANIC) {
1024 ghes_print_queued_estatus();
1025 __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
1026 }
1027
1028 ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1029
1030 /* This error has been reported before, don't process it again. */
1031 if (ghes_estatus_cached(estatus))
1032 goto no_work;
1033
1034 llist_add(&estatus_node->llnode, &ghes_estatus_llist);
1035
1036 return rc;
1037
1038 no_work:
1039 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
1040 node_len);
1041
1042 return rc;
1043 }
1044
ghes_in_nmi_spool_from_list(struct list_head * rcu_list,enum fixed_addresses fixmap_idx)1045 static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
1046 enum fixed_addresses fixmap_idx)
1047 {
1048 int ret = -ENOENT;
1049 struct ghes *ghes;
1050
1051 rcu_read_lock();
1052 list_for_each_entry_rcu(ghes, rcu_list, list) {
1053 if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
1054 ret = 0;
1055 }
1056 rcu_read_unlock();
1057
1058 if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
1059 irq_work_queue(&ghes_proc_irq_work);
1060
1061 return ret;
1062 }
1063
1064 #ifdef CONFIG_ACPI_APEI_SEA
1065 static LIST_HEAD(ghes_sea);
1066
1067 /*
1068 * Return 0 only if one of the SEA error sources successfully reported an error
1069 * record sent from the firmware.
1070 */
ghes_notify_sea(void)1071 int ghes_notify_sea(void)
1072 {
1073 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
1074 int rv;
1075
1076 raw_spin_lock(&ghes_notify_lock_sea);
1077 rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
1078 raw_spin_unlock(&ghes_notify_lock_sea);
1079
1080 return rv;
1081 }
1082
ghes_sea_add(struct ghes * ghes)1083 static void ghes_sea_add(struct ghes *ghes)
1084 {
1085 mutex_lock(&ghes_list_mutex);
1086 list_add_rcu(&ghes->list, &ghes_sea);
1087 mutex_unlock(&ghes_list_mutex);
1088 }
1089
ghes_sea_remove(struct ghes * ghes)1090 static void ghes_sea_remove(struct ghes *ghes)
1091 {
1092 mutex_lock(&ghes_list_mutex);
1093 list_del_rcu(&ghes->list);
1094 mutex_unlock(&ghes_list_mutex);
1095 synchronize_rcu();
1096 }
1097 #else /* CONFIG_ACPI_APEI_SEA */
ghes_sea_add(struct ghes * ghes)1098 static inline void ghes_sea_add(struct ghes *ghes) { }
ghes_sea_remove(struct ghes * ghes)1099 static inline void ghes_sea_remove(struct ghes *ghes) { }
1100 #endif /* CONFIG_ACPI_APEI_SEA */
1101
1102 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
1103 /*
1104 * NMI may be triggered on any CPU, so ghes_in_nmi is used for
1105 * having only one concurrent reader.
1106 */
1107 static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
1108
1109 static LIST_HEAD(ghes_nmi);
1110
ghes_notify_nmi(unsigned int cmd,struct pt_regs * regs)1111 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1112 {
1113 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
1114 int ret = NMI_DONE;
1115
1116 if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
1117 return ret;
1118
1119 raw_spin_lock(&ghes_notify_lock_nmi);
1120 if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
1121 ret = NMI_HANDLED;
1122 raw_spin_unlock(&ghes_notify_lock_nmi);
1123
1124 atomic_dec(&ghes_in_nmi);
1125 return ret;
1126 }
1127
ghes_nmi_add(struct ghes * ghes)1128 static void ghes_nmi_add(struct ghes *ghes)
1129 {
1130 mutex_lock(&ghes_list_mutex);
1131 if (list_empty(&ghes_nmi))
1132 register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
1133 list_add_rcu(&ghes->list, &ghes_nmi);
1134 mutex_unlock(&ghes_list_mutex);
1135 }
1136
ghes_nmi_remove(struct ghes * ghes)1137 static void ghes_nmi_remove(struct ghes *ghes)
1138 {
1139 mutex_lock(&ghes_list_mutex);
1140 list_del_rcu(&ghes->list);
1141 if (list_empty(&ghes_nmi))
1142 unregister_nmi_handler(NMI_LOCAL, "ghes");
1143 mutex_unlock(&ghes_list_mutex);
1144 /*
1145 * To synchronize with NMI handler, ghes can only be
1146 * freed after NMI handler finishes.
1147 */
1148 synchronize_rcu();
1149 }
1150 #else /* CONFIG_HAVE_ACPI_APEI_NMI */
ghes_nmi_add(struct ghes * ghes)1151 static inline void ghes_nmi_add(struct ghes *ghes) { }
ghes_nmi_remove(struct ghes * ghes)1152 static inline void ghes_nmi_remove(struct ghes *ghes) { }
1153 #endif /* CONFIG_HAVE_ACPI_APEI_NMI */
1154
ghes_nmi_init_cxt(void)1155 static void ghes_nmi_init_cxt(void)
1156 {
1157 init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1158 }
1159
__ghes_sdei_callback(struct ghes * ghes,enum fixed_addresses fixmap_idx)1160 static int __ghes_sdei_callback(struct ghes *ghes,
1161 enum fixed_addresses fixmap_idx)
1162 {
1163 if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
1164 irq_work_queue(&ghes_proc_irq_work);
1165
1166 return 0;
1167 }
1168
1169 return -ENOENT;
1170 }
1171
ghes_sdei_normal_callback(u32 event_num,struct pt_regs * regs,void * arg)1172 static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
1173 void *arg)
1174 {
1175 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
1176 struct ghes *ghes = arg;
1177 int err;
1178
1179 raw_spin_lock(&ghes_notify_lock_sdei_normal);
1180 err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
1181 raw_spin_unlock(&ghes_notify_lock_sdei_normal);
1182
1183 return err;
1184 }
1185
ghes_sdei_critical_callback(u32 event_num,struct pt_regs * regs,void * arg)1186 static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
1187 void *arg)
1188 {
1189 static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
1190 struct ghes *ghes = arg;
1191 int err;
1192
1193 raw_spin_lock(&ghes_notify_lock_sdei_critical);
1194 err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
1195 raw_spin_unlock(&ghes_notify_lock_sdei_critical);
1196
1197 return err;
1198 }
1199
apei_sdei_register_ghes(struct ghes * ghes)1200 static int apei_sdei_register_ghes(struct ghes *ghes)
1201 {
1202 if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1203 return -EOPNOTSUPP;
1204
1205 return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
1206 ghes_sdei_critical_callback);
1207 }
1208
apei_sdei_unregister_ghes(struct ghes * ghes)1209 static int apei_sdei_unregister_ghes(struct ghes *ghes)
1210 {
1211 if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1212 return -EOPNOTSUPP;
1213
1214 return sdei_unregister_ghes(ghes);
1215 }
1216
ghes_probe(struct platform_device * ghes_dev)1217 static int ghes_probe(struct platform_device *ghes_dev)
1218 {
1219 struct acpi_hest_generic *generic;
1220 struct ghes *ghes = NULL;
1221 unsigned long flags;
1222
1223 int rc = -EINVAL;
1224
1225 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
1226 if (!generic->enabled)
1227 return -ENODEV;
1228
1229 switch (generic->notify.type) {
1230 case ACPI_HEST_NOTIFY_POLLED:
1231 case ACPI_HEST_NOTIFY_EXTERNAL:
1232 case ACPI_HEST_NOTIFY_SCI:
1233 case ACPI_HEST_NOTIFY_GSIV:
1234 case ACPI_HEST_NOTIFY_GPIO:
1235 break;
1236
1237 case ACPI_HEST_NOTIFY_SEA:
1238 if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
1239 pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n",
1240 generic->header.source_id);
1241 rc = -ENOTSUPP;
1242 goto err;
1243 }
1244 break;
1245 case ACPI_HEST_NOTIFY_NMI:
1246 if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
1247 pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
1248 generic->header.source_id);
1249 goto err;
1250 }
1251 break;
1252 case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1253 if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
1254 pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
1255 generic->header.source_id);
1256 goto err;
1257 }
1258 break;
1259 case ACPI_HEST_NOTIFY_LOCAL:
1260 pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
1261 generic->header.source_id);
1262 goto err;
1263 default:
1264 pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
1265 generic->notify.type, generic->header.source_id);
1266 goto err;
1267 }
1268
1269 rc = -EIO;
1270 if (generic->error_block_length <
1271 sizeof(struct acpi_hest_generic_status)) {
1272 pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
1273 generic->error_block_length, generic->header.source_id);
1274 goto err;
1275 }
1276 ghes = ghes_new(generic);
1277 if (IS_ERR(ghes)) {
1278 rc = PTR_ERR(ghes);
1279 ghes = NULL;
1280 goto err;
1281 }
1282
1283 switch (generic->notify.type) {
1284 case ACPI_HEST_NOTIFY_POLLED:
1285 timer_setup(&ghes->timer, ghes_poll_func, 0);
1286 ghes_add_timer(ghes);
1287 break;
1288 case ACPI_HEST_NOTIFY_EXTERNAL:
1289 /* External interrupt vector is GSI */
1290 rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1291 if (rc) {
1292 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1293 generic->header.source_id);
1294 goto err;
1295 }
1296 rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
1297 "GHES IRQ", ghes);
1298 if (rc) {
1299 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1300 generic->header.source_id);
1301 goto err;
1302 }
1303 break;
1304
1305 case ACPI_HEST_NOTIFY_SCI:
1306 case ACPI_HEST_NOTIFY_GSIV:
1307 case ACPI_HEST_NOTIFY_GPIO:
1308 mutex_lock(&ghes_list_mutex);
1309 if (list_empty(&ghes_hed))
1310 register_acpi_hed_notifier(&ghes_notifier_hed);
1311 list_add_rcu(&ghes->list, &ghes_hed);
1312 mutex_unlock(&ghes_list_mutex);
1313 break;
1314
1315 case ACPI_HEST_NOTIFY_SEA:
1316 ghes_sea_add(ghes);
1317 break;
1318 case ACPI_HEST_NOTIFY_NMI:
1319 ghes_nmi_add(ghes);
1320 break;
1321 case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1322 rc = apei_sdei_register_ghes(ghes);
1323 if (rc)
1324 goto err;
1325 break;
1326 default:
1327 BUG();
1328 }
1329
1330 platform_set_drvdata(ghes_dev, ghes);
1331
1332 ghes_edac_register(ghes, &ghes_dev->dev);
1333
1334 /* Handle any pending errors right away */
1335 spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1336 ghes_proc(ghes);
1337 spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1338
1339 return 0;
1340
1341 err:
1342 if (ghes) {
1343 ghes_fini(ghes);
1344 kfree(ghes);
1345 }
1346 return rc;
1347 }
1348
ghes_remove(struct platform_device * ghes_dev)1349 static int ghes_remove(struct platform_device *ghes_dev)
1350 {
1351 int rc;
1352 struct ghes *ghes;
1353 struct acpi_hest_generic *generic;
1354
1355 ghes = platform_get_drvdata(ghes_dev);
1356 generic = ghes->generic;
1357
1358 ghes->flags |= GHES_EXITING;
1359 switch (generic->notify.type) {
1360 case ACPI_HEST_NOTIFY_POLLED:
1361 del_timer_sync(&ghes->timer);
1362 break;
1363 case ACPI_HEST_NOTIFY_EXTERNAL:
1364 free_irq(ghes->irq, ghes);
1365 break;
1366
1367 case ACPI_HEST_NOTIFY_SCI:
1368 case ACPI_HEST_NOTIFY_GSIV:
1369 case ACPI_HEST_NOTIFY_GPIO:
1370 mutex_lock(&ghes_list_mutex);
1371 list_del_rcu(&ghes->list);
1372 if (list_empty(&ghes_hed))
1373 unregister_acpi_hed_notifier(&ghes_notifier_hed);
1374 mutex_unlock(&ghes_list_mutex);
1375 synchronize_rcu();
1376 break;
1377
1378 case ACPI_HEST_NOTIFY_SEA:
1379 ghes_sea_remove(ghes);
1380 break;
1381 case ACPI_HEST_NOTIFY_NMI:
1382 ghes_nmi_remove(ghes);
1383 break;
1384 case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1385 rc = apei_sdei_unregister_ghes(ghes);
1386 if (rc)
1387 return rc;
1388 break;
1389 default:
1390 BUG();
1391 break;
1392 }
1393
1394 ghes_fini(ghes);
1395
1396 ghes_edac_unregister(ghes);
1397
1398 kfree(ghes);
1399
1400 platform_set_drvdata(ghes_dev, NULL);
1401
1402 return 0;
1403 }
1404
1405 static struct platform_driver ghes_platform_driver = {
1406 .driver = {
1407 .name = "GHES",
1408 },
1409 .probe = ghes_probe,
1410 .remove = ghes_remove,
1411 };
1412
ghes_init(void)1413 static int __init ghes_init(void)
1414 {
1415 int rc;
1416
1417 if (acpi_disabled)
1418 return -ENODEV;
1419
1420 switch (hest_disable) {
1421 case HEST_NOT_FOUND:
1422 return -ENODEV;
1423 case HEST_DISABLED:
1424 pr_info(GHES_PFX "HEST is not enabled!\n");
1425 return -EINVAL;
1426 default:
1427 break;
1428 }
1429
1430 if (ghes_disable) {
1431 pr_info(GHES_PFX "GHES is not enabled!\n");
1432 return -EINVAL;
1433 }
1434
1435 ghes_nmi_init_cxt();
1436
1437 rc = platform_driver_register(&ghes_platform_driver);
1438 if (rc)
1439 goto err;
1440
1441 rc = apei_osc_setup();
1442 if (rc == 0 && osc_sb_apei_support_acked)
1443 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1444 else if (rc == 0 && !osc_sb_apei_support_acked)
1445 pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1446 else if (rc && osc_sb_apei_support_acked)
1447 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1448 else
1449 pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1450
1451 return 0;
1452 err:
1453 return rc;
1454 }
1455 device_initcall(ghes_init);
1456