xref: /linux/drivers/acpi/apei/ghes.c (revision 021bc4b9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * APEI Generic Hardware Error Source support
4  *
5  * Generic Hardware Error Source provides a way to report platform
6  * hardware errors (such as that from chipset). It works in so called
7  * "Firmware First" mode, that is, hardware errors are reported to
8  * firmware firstly, then reported to Linux by firmware. This way,
9  * some non-standard hardware error registers or non-standard hardware
10  * link can be checked by firmware to produce more hardware error
11  * information for Linux.
12  *
13  * For more information about Generic Hardware Error Source, please
14  * refer to ACPI Specification version 4.0, section 17.3.2.6
15  *
16  * Copyright 2010,2011 Intel Corp.
17  *   Author: Huang Ying <ying.huang@intel.com>
18  */
19 
20 #include <linux/arm_sdei.h>
21 #include <linux/kernel.h>
22 #include <linux/moduleparam.h>
23 #include <linux/init.h>
24 #include <linux/acpi.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/timer.h>
28 #include <linux/cper.h>
29 #include <linux/cxl-event.h>
30 #include <linux/platform_device.h>
31 #include <linux/mutex.h>
32 #include <linux/ratelimit.h>
33 #include <linux/vmalloc.h>
34 #include <linux/irq_work.h>
35 #include <linux/llist.h>
36 #include <linux/genalloc.h>
37 #include <linux/pci.h>
38 #include <linux/pfn.h>
39 #include <linux/aer.h>
40 #include <linux/nmi.h>
41 #include <linux/sched/clock.h>
42 #include <linux/uuid.h>
43 #include <linux/ras.h>
44 #include <linux/task_work.h>
45 
46 #include <acpi/actbl1.h>
47 #include <acpi/ghes.h>
48 #include <acpi/apei.h>
49 #include <asm/fixmap.h>
50 #include <asm/tlbflush.h>
51 #include <ras/ras_event.h>
52 
53 #include "apei-internal.h"
54 
55 #define GHES_PFX	"GHES: "
56 
57 #define GHES_ESTATUS_MAX_SIZE		65536
58 #define GHES_ESOURCE_PREALLOC_MAX_SIZE	65536
59 
60 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
61 
62 /* This is just an estimation for memory pool allocation */
63 #define GHES_ESTATUS_CACHE_AVG_SIZE	512
64 
65 #define GHES_ESTATUS_CACHES_SIZE	4
66 
67 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC	10000000000ULL
68 /* Prevent too many caches are allocated because of RCU */
69 #define GHES_ESTATUS_CACHE_ALLOCED_MAX	(GHES_ESTATUS_CACHES_SIZE * 3 / 2)
70 
71 #define GHES_ESTATUS_CACHE_LEN(estatus_len)			\
72 	(sizeof(struct ghes_estatus_cache) + (estatus_len))
73 #define GHES_ESTATUS_FROM_CACHE(estatus_cache)			\
74 	((struct acpi_hest_generic_status *)				\
75 	 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
76 
77 #define GHES_ESTATUS_NODE_LEN(estatus_len)			\
78 	(sizeof(struct ghes_estatus_node) + (estatus_len))
79 #define GHES_ESTATUS_FROM_NODE(estatus_node)			\
80 	((struct acpi_hest_generic_status *)				\
81 	 ((struct ghes_estatus_node *)(estatus_node) + 1))
82 
83 #define GHES_VENDOR_ENTRY_LEN(gdata_len)                               \
84 	(sizeof(struct ghes_vendor_record_entry) + (gdata_len))
85 #define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry)                     \
86 	((struct acpi_hest_generic_data *)                              \
87 	((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
88 
89 /*
90  *  NMI-like notifications vary by architecture, before the compiler can prune
91  *  unused static functions it needs a value for these enums.
92  */
93 #ifndef CONFIG_ARM_SDE_INTERFACE
94 #define FIX_APEI_GHES_SDEI_NORMAL	__end_of_fixed_addresses
95 #define FIX_APEI_GHES_SDEI_CRITICAL	__end_of_fixed_addresses
96 #endif
97 
98 static ATOMIC_NOTIFIER_HEAD(ghes_report_chain);
99 
100 static inline bool is_hest_type_generic_v2(struct ghes *ghes)
101 {
102 	return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
103 }
104 
105 /*
106  * A platform may describe one error source for the handling of synchronous
107  * errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI
108  * or External Interrupt). On x86, the HEST notifications are always
109  * asynchronous, so only SEA on ARM is delivered as a synchronous
110  * notification.
111  */
112 static inline bool is_hest_sync_notify(struct ghes *ghes)
113 {
114 	u8 notify_type = ghes->generic->notify.type;
115 
116 	return notify_type == ACPI_HEST_NOTIFY_SEA;
117 }
118 
119 /*
120  * This driver isn't really modular, however for the time being,
121  * continuing to use module_param is the easiest way to remain
122  * compatible with existing boot arg use cases.
123  */
124 bool ghes_disable;
125 module_param_named(disable, ghes_disable, bool, 0);
126 
127 /*
128  * "ghes.edac_force_enable" forcibly enables ghes_edac and skips the platform
129  * check.
130  */
131 static bool ghes_edac_force_enable;
132 module_param_named(edac_force_enable, ghes_edac_force_enable, bool, 0);
133 
134 /*
135  * All error sources notified with HED (Hardware Error Device) share a
136  * single notifier callback, so they need to be linked and checked one
137  * by one. This holds true for NMI too.
138  *
139  * RCU is used for these lists, so ghes_list_mutex is only used for
140  * list changing, not for traversing.
141  */
142 static LIST_HEAD(ghes_hed);
143 static DEFINE_MUTEX(ghes_list_mutex);
144 
145 /*
146  * A list of GHES devices which are given to the corresponding EDAC driver
147  * ghes_edac for further use.
148  */
149 static LIST_HEAD(ghes_devs);
150 static DEFINE_MUTEX(ghes_devs_mutex);
151 
152 /*
153  * Because the memory area used to transfer hardware error information
154  * from BIOS to Linux can be determined only in NMI, IRQ or timer
155  * handler, but general ioremap can not be used in atomic context, so
156  * the fixmap is used instead.
157  *
158  * This spinlock is used to prevent the fixmap entry from being used
159  * simultaneously.
160  */
161 static DEFINE_SPINLOCK(ghes_notify_lock_irq);
162 
163 struct ghes_vendor_record_entry {
164 	struct work_struct work;
165 	int error_severity;
166 	char vendor_record[];
167 };
168 
169 static struct gen_pool *ghes_estatus_pool;
170 
171 static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
172 static atomic_t ghes_estatus_cache_alloced;
173 
174 static int ghes_panic_timeout __read_mostly = 30;
175 
176 static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
177 {
178 	phys_addr_t paddr;
179 	pgprot_t prot;
180 
181 	paddr = PFN_PHYS(pfn);
182 	prot = arch_apei_get_mem_attribute(paddr);
183 	__set_fixmap(fixmap_idx, paddr, prot);
184 
185 	return (void __iomem *) __fix_to_virt(fixmap_idx);
186 }
187 
188 static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
189 {
190 	int _idx = virt_to_fix((unsigned long)vaddr);
191 
192 	WARN_ON_ONCE(fixmap_idx != _idx);
193 	clear_fixmap(fixmap_idx);
194 }
195 
196 int ghes_estatus_pool_init(unsigned int num_ghes)
197 {
198 	unsigned long addr, len;
199 	int rc;
200 
201 	ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
202 	if (!ghes_estatus_pool)
203 		return -ENOMEM;
204 
205 	len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
206 	len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
207 
208 	addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
209 	if (!addr)
210 		goto err_pool_alloc;
211 
212 	rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
213 	if (rc)
214 		goto err_pool_add;
215 
216 	return 0;
217 
218 err_pool_add:
219 	vfree((void *)addr);
220 
221 err_pool_alloc:
222 	gen_pool_destroy(ghes_estatus_pool);
223 
224 	return -ENOMEM;
225 }
226 
227 /**
228  * ghes_estatus_pool_region_free - free previously allocated memory
229  *				   from the ghes_estatus_pool.
230  * @addr: address of memory to free.
231  * @size: size of memory to free.
232  *
233  * Returns none.
234  */
235 void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
236 {
237 	gen_pool_free(ghes_estatus_pool, addr, size);
238 }
239 EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
240 
241 static int map_gen_v2(struct ghes *ghes)
242 {
243 	return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
244 }
245 
246 static void unmap_gen_v2(struct ghes *ghes)
247 {
248 	apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
249 }
250 
251 static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
252 {
253 	int rc;
254 	u64 val = 0;
255 
256 	rc = apei_read(&val, &gv2->read_ack_register);
257 	if (rc)
258 		return;
259 
260 	val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
261 	val |= gv2->read_ack_write    << gv2->read_ack_register.bit_offset;
262 
263 	apei_write(val, &gv2->read_ack_register);
264 }
265 
266 static struct ghes *ghes_new(struct acpi_hest_generic *generic)
267 {
268 	struct ghes *ghes;
269 	unsigned int error_block_length;
270 	int rc;
271 
272 	ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
273 	if (!ghes)
274 		return ERR_PTR(-ENOMEM);
275 
276 	ghes->generic = generic;
277 	if (is_hest_type_generic_v2(ghes)) {
278 		rc = map_gen_v2(ghes);
279 		if (rc)
280 			goto err_free;
281 	}
282 
283 	rc = apei_map_generic_address(&generic->error_status_address);
284 	if (rc)
285 		goto err_unmap_read_ack_addr;
286 	error_block_length = generic->error_block_length;
287 	if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
288 		pr_warn(FW_WARN GHES_PFX
289 			"Error status block length is too long: %u for "
290 			"generic hardware error source: %d.\n",
291 			error_block_length, generic->header.source_id);
292 		error_block_length = GHES_ESTATUS_MAX_SIZE;
293 	}
294 	ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
295 	if (!ghes->estatus) {
296 		rc = -ENOMEM;
297 		goto err_unmap_status_addr;
298 	}
299 
300 	return ghes;
301 
302 err_unmap_status_addr:
303 	apei_unmap_generic_address(&generic->error_status_address);
304 err_unmap_read_ack_addr:
305 	if (is_hest_type_generic_v2(ghes))
306 		unmap_gen_v2(ghes);
307 err_free:
308 	kfree(ghes);
309 	return ERR_PTR(rc);
310 }
311 
312 static void ghes_fini(struct ghes *ghes)
313 {
314 	kfree(ghes->estatus);
315 	apei_unmap_generic_address(&ghes->generic->error_status_address);
316 	if (is_hest_type_generic_v2(ghes))
317 		unmap_gen_v2(ghes);
318 }
319 
320 static inline int ghes_severity(int severity)
321 {
322 	switch (severity) {
323 	case CPER_SEV_INFORMATIONAL:
324 		return GHES_SEV_NO;
325 	case CPER_SEV_CORRECTED:
326 		return GHES_SEV_CORRECTED;
327 	case CPER_SEV_RECOVERABLE:
328 		return GHES_SEV_RECOVERABLE;
329 	case CPER_SEV_FATAL:
330 		return GHES_SEV_PANIC;
331 	default:
332 		/* Unknown, go panic */
333 		return GHES_SEV_PANIC;
334 	}
335 }
336 
337 static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
338 				  int from_phys,
339 				  enum fixed_addresses fixmap_idx)
340 {
341 	void __iomem *vaddr;
342 	u64 offset;
343 	u32 trunk;
344 
345 	while (len > 0) {
346 		offset = paddr - (paddr & PAGE_MASK);
347 		vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
348 		trunk = PAGE_SIZE - offset;
349 		trunk = min(trunk, len);
350 		if (from_phys)
351 			memcpy_fromio(buffer, vaddr + offset, trunk);
352 		else
353 			memcpy_toio(vaddr + offset, buffer, trunk);
354 		len -= trunk;
355 		paddr += trunk;
356 		buffer += trunk;
357 		ghes_unmap(vaddr, fixmap_idx);
358 	}
359 }
360 
361 /* Check the top-level record header has an appropriate size. */
362 static int __ghes_check_estatus(struct ghes *ghes,
363 				struct acpi_hest_generic_status *estatus)
364 {
365 	u32 len = cper_estatus_len(estatus);
366 
367 	if (len < sizeof(*estatus)) {
368 		pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
369 		return -EIO;
370 	}
371 
372 	if (len > ghes->generic->error_block_length) {
373 		pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
374 		return -EIO;
375 	}
376 
377 	if (cper_estatus_check_header(estatus)) {
378 		pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
379 		return -EIO;
380 	}
381 
382 	return 0;
383 }
384 
385 /* Read the CPER block, returning its address, and header in estatus. */
386 static int __ghes_peek_estatus(struct ghes *ghes,
387 			       struct acpi_hest_generic_status *estatus,
388 			       u64 *buf_paddr, enum fixed_addresses fixmap_idx)
389 {
390 	struct acpi_hest_generic *g = ghes->generic;
391 	int rc;
392 
393 	rc = apei_read(buf_paddr, &g->error_status_address);
394 	if (rc) {
395 		*buf_paddr = 0;
396 		pr_warn_ratelimited(FW_WARN GHES_PFX
397 "Failed to read error status block address for hardware error source: %d.\n",
398 				   g->header.source_id);
399 		return -EIO;
400 	}
401 	if (!*buf_paddr)
402 		return -ENOENT;
403 
404 	ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
405 			      fixmap_idx);
406 	if (!estatus->block_status) {
407 		*buf_paddr = 0;
408 		return -ENOENT;
409 	}
410 
411 	return 0;
412 }
413 
414 static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
415 			       u64 buf_paddr, enum fixed_addresses fixmap_idx,
416 			       size_t buf_len)
417 {
418 	ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
419 	if (cper_estatus_check(estatus)) {
420 		pr_warn_ratelimited(FW_WARN GHES_PFX
421 				    "Failed to read error status block!\n");
422 		return -EIO;
423 	}
424 
425 	return 0;
426 }
427 
428 static int ghes_read_estatus(struct ghes *ghes,
429 			     struct acpi_hest_generic_status *estatus,
430 			     u64 *buf_paddr, enum fixed_addresses fixmap_idx)
431 {
432 	int rc;
433 
434 	rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
435 	if (rc)
436 		return rc;
437 
438 	rc = __ghes_check_estatus(ghes, estatus);
439 	if (rc)
440 		return rc;
441 
442 	return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
443 				   cper_estatus_len(estatus));
444 }
445 
446 static void ghes_clear_estatus(struct ghes *ghes,
447 			       struct acpi_hest_generic_status *estatus,
448 			       u64 buf_paddr, enum fixed_addresses fixmap_idx)
449 {
450 	estatus->block_status = 0;
451 
452 	if (!buf_paddr)
453 		return;
454 
455 	ghes_copy_tofrom_phys(estatus, buf_paddr,
456 			      sizeof(estatus->block_status), 0,
457 			      fixmap_idx);
458 
459 	/*
460 	 * GHESv2 type HEST entries introduce support for error acknowledgment,
461 	 * so only acknowledge the error if this support is present.
462 	 */
463 	if (is_hest_type_generic_v2(ghes))
464 		ghes_ack_error(ghes->generic_v2);
465 }
466 
467 /*
468  * Called as task_work before returning to user-space.
469  * Ensure any queued work has been done before we return to the context that
470  * triggered the notification.
471  */
472 static void ghes_kick_task_work(struct callback_head *head)
473 {
474 	struct acpi_hest_generic_status *estatus;
475 	struct ghes_estatus_node *estatus_node;
476 	u32 node_len;
477 
478 	estatus_node = container_of(head, struct ghes_estatus_node, task_work);
479 	if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
480 		memory_failure_queue_kick(estatus_node->task_work_cpu);
481 
482 	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
483 	node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
484 	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
485 }
486 
487 static bool ghes_do_memory_failure(u64 physical_addr, int flags)
488 {
489 	unsigned long pfn;
490 
491 	if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
492 		return false;
493 
494 	pfn = PHYS_PFN(physical_addr);
495 	if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
496 		pr_warn_ratelimited(FW_WARN GHES_PFX
497 		"Invalid address in generic error data: %#llx\n",
498 		physical_addr);
499 		return false;
500 	}
501 
502 	memory_failure_queue(pfn, flags);
503 	return true;
504 }
505 
506 static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
507 				       int sev, bool sync)
508 {
509 	int flags = -1;
510 	int sec_sev = ghes_severity(gdata->error_severity);
511 	struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
512 
513 	if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
514 		return false;
515 
516 	/* iff following two events can be handled properly by now */
517 	if (sec_sev == GHES_SEV_CORRECTED &&
518 	    (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
519 		flags = MF_SOFT_OFFLINE;
520 	if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
521 		flags = sync ? MF_ACTION_REQUIRED : 0;
522 
523 	if (flags != -1)
524 		return ghes_do_memory_failure(mem_err->physical_addr, flags);
525 
526 	return false;
527 }
528 
529 static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
530 				       int sev, bool sync)
531 {
532 	struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
533 	int flags = sync ? MF_ACTION_REQUIRED : 0;
534 	bool queued = false;
535 	int sec_sev, i;
536 	char *p;
537 
538 	log_arm_hw_error(err);
539 
540 	sec_sev = ghes_severity(gdata->error_severity);
541 	if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
542 		return false;
543 
544 	p = (char *)(err + 1);
545 	for (i = 0; i < err->err_info_num; i++) {
546 		struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
547 		bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
548 		bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
549 		const char *error_type = "unknown error";
550 
551 		/*
552 		 * The field (err_info->error_info & BIT(26)) is fixed to set to
553 		 * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
554 		 * firmware won't mix corrected errors in an uncorrected section,
555 		 * and don't filter out 'corrected' error here.
556 		 */
557 		if (is_cache && has_pa) {
558 			queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags);
559 			p += err_info->length;
560 			continue;
561 		}
562 
563 		if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
564 			error_type = cper_proc_error_type_strs[err_info->type];
565 
566 		pr_warn_ratelimited(FW_WARN GHES_PFX
567 				    "Unhandled processor error type: %s\n",
568 				    error_type);
569 		p += err_info->length;
570 	}
571 
572 	return queued;
573 }
574 
575 /*
576  * PCIe AER errors need to be sent to the AER driver for reporting and
577  * recovery. The GHES severities map to the following AER severities and
578  * require the following handling:
579  *
580  * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
581  *     These need to be reported by the AER driver but no recovery is
582  *     necessary.
583  * GHES_SEV_RECOVERABLE -> AER_NONFATAL
584  * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
585  *     These both need to be reported and recovered from by the AER driver.
586  * GHES_SEV_PANIC does not make it to this handling since the kernel must
587  *     panic.
588  */
589 static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
590 {
591 #ifdef CONFIG_ACPI_APEI_PCIEAER
592 	struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
593 
594 	if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
595 	    pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
596 		unsigned int devfn;
597 		int aer_severity;
598 		u8 *aer_info;
599 
600 		devfn = PCI_DEVFN(pcie_err->device_id.device,
601 				  pcie_err->device_id.function);
602 		aer_severity = cper_severity_to_aer(gdata->error_severity);
603 
604 		/*
605 		 * If firmware reset the component to contain
606 		 * the error, we must reinitialize it before
607 		 * use, so treat it as a fatal AER error.
608 		 */
609 		if (gdata->flags & CPER_SEC_RESET)
610 			aer_severity = AER_FATAL;
611 
612 		aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
613 						  sizeof(struct aer_capability_regs));
614 		if (!aer_info)
615 			return;
616 		memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
617 
618 		aer_recover_queue(pcie_err->device_id.segment,
619 				  pcie_err->device_id.bus,
620 				  devfn, aer_severity,
621 				  (struct aer_capability_regs *)
622 				  aer_info);
623 	}
624 #endif
625 }
626 
627 static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
628 
629 int ghes_register_vendor_record_notifier(struct notifier_block *nb)
630 {
631 	return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
632 }
633 EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
634 
635 void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
636 {
637 	blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
638 }
639 EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
640 
641 static void ghes_vendor_record_work_func(struct work_struct *work)
642 {
643 	struct ghes_vendor_record_entry *entry;
644 	struct acpi_hest_generic_data *gdata;
645 	u32 len;
646 
647 	entry = container_of(work, struct ghes_vendor_record_entry, work);
648 	gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
649 
650 	blocking_notifier_call_chain(&vendor_record_notify_list,
651 				     entry->error_severity, gdata);
652 
653 	len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
654 	gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
655 }
656 
657 static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
658 					  int sev)
659 {
660 	struct acpi_hest_generic_data *copied_gdata;
661 	struct ghes_vendor_record_entry *entry;
662 	u32 len;
663 
664 	len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
665 	entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
666 	if (!entry)
667 		return;
668 
669 	copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
670 	memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
671 	entry->error_severity = sev;
672 
673 	INIT_WORK(&entry->work, ghes_vendor_record_work_func);
674 	schedule_work(&entry->work);
675 }
676 
677 /*
678  * Only a single callback can be registered for CXL CPER events.
679  */
680 static DECLARE_RWSEM(cxl_cper_rw_sem);
681 static cxl_cper_callback cper_callback;
682 
683 static void cxl_cper_post_event(enum cxl_event_type event_type,
684 				struct cxl_cper_event_rec *rec)
685 {
686 	if (rec->hdr.length <= sizeof(rec->hdr) ||
687 	    rec->hdr.length > sizeof(*rec)) {
688 		pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
689 		       rec->hdr.length);
690 		return;
691 	}
692 
693 	if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
694 		pr_err(FW_WARN "CXL CPER invalid event\n");
695 		return;
696 	}
697 
698 	guard(rwsem_read)(&cxl_cper_rw_sem);
699 	if (cper_callback)
700 		cper_callback(event_type, rec);
701 }
702 
703 int cxl_cper_register_callback(cxl_cper_callback callback)
704 {
705 	guard(rwsem_write)(&cxl_cper_rw_sem);
706 	if (cper_callback)
707 		return -EINVAL;
708 	cper_callback = callback;
709 	return 0;
710 }
711 EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL);
712 
713 int cxl_cper_unregister_callback(cxl_cper_callback callback)
714 {
715 	guard(rwsem_write)(&cxl_cper_rw_sem);
716 	if (callback != cper_callback)
717 		return -EINVAL;
718 	cper_callback = NULL;
719 	return 0;
720 }
721 EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL);
722 
723 static bool ghes_do_proc(struct ghes *ghes,
724 			 const struct acpi_hest_generic_status *estatus)
725 {
726 	int sev, sec_sev;
727 	struct acpi_hest_generic_data *gdata;
728 	guid_t *sec_type;
729 	const guid_t *fru_id = &guid_null;
730 	char *fru_text = "";
731 	bool queued = false;
732 	bool sync = is_hest_sync_notify(ghes);
733 
734 	sev = ghes_severity(estatus->error_severity);
735 	apei_estatus_for_each_section(estatus, gdata) {
736 		sec_type = (guid_t *)gdata->section_type;
737 		sec_sev = ghes_severity(gdata->error_severity);
738 		if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
739 			fru_id = (guid_t *)gdata->fru_id;
740 
741 		if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
742 			fru_text = gdata->fru_text;
743 
744 		if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
745 			struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
746 
747 			atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err);
748 
749 			arch_apei_report_mem_error(sev, mem_err);
750 			queued = ghes_handle_memory_failure(gdata, sev, sync);
751 		}
752 		else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
753 			ghes_handle_aer(gdata);
754 		}
755 		else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
756 			queued = ghes_handle_arm_hw_error(gdata, sev, sync);
757 		} else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
758 			struct cxl_cper_event_rec *rec =
759 				acpi_hest_get_payload(gdata);
760 
761 			cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
762 		} else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
763 			struct cxl_cper_event_rec *rec =
764 				acpi_hest_get_payload(gdata);
765 
766 			cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
767 		} else if (guid_equal(sec_type,
768 				      &CPER_SEC_CXL_MEM_MODULE_GUID)) {
769 			struct cxl_cper_event_rec *rec =
770 				acpi_hest_get_payload(gdata);
771 
772 			cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
773 		} else {
774 			void *err = acpi_hest_get_payload(gdata);
775 
776 			ghes_defer_non_standard_event(gdata, sev);
777 			log_non_standard_event(sec_type, fru_id, fru_text,
778 					       sec_sev, err,
779 					       gdata->error_data_length);
780 		}
781 	}
782 
783 	return queued;
784 }
785 
786 static void __ghes_print_estatus(const char *pfx,
787 				 const struct acpi_hest_generic *generic,
788 				 const struct acpi_hest_generic_status *estatus)
789 {
790 	static atomic_t seqno;
791 	unsigned int curr_seqno;
792 	char pfx_seq[64];
793 
794 	if (pfx == NULL) {
795 		if (ghes_severity(estatus->error_severity) <=
796 		    GHES_SEV_CORRECTED)
797 			pfx = KERN_WARNING;
798 		else
799 			pfx = KERN_ERR;
800 	}
801 	curr_seqno = atomic_inc_return(&seqno);
802 	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
803 	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
804 	       pfx_seq, generic->header.source_id);
805 	cper_estatus_print(pfx_seq, estatus);
806 }
807 
808 static int ghes_print_estatus(const char *pfx,
809 			      const struct acpi_hest_generic *generic,
810 			      const struct acpi_hest_generic_status *estatus)
811 {
812 	/* Not more than 2 messages every 5 seconds */
813 	static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
814 	static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
815 	struct ratelimit_state *ratelimit;
816 
817 	if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
818 		ratelimit = &ratelimit_corrected;
819 	else
820 		ratelimit = &ratelimit_uncorrected;
821 	if (__ratelimit(ratelimit)) {
822 		__ghes_print_estatus(pfx, generic, estatus);
823 		return 1;
824 	}
825 	return 0;
826 }
827 
828 /*
829  * GHES error status reporting throttle, to report more kinds of
830  * errors, instead of just most frequently occurred errors.
831  */
832 static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
833 {
834 	u32 len;
835 	int i, cached = 0;
836 	unsigned long long now;
837 	struct ghes_estatus_cache *cache;
838 	struct acpi_hest_generic_status *cache_estatus;
839 
840 	len = cper_estatus_len(estatus);
841 	rcu_read_lock();
842 	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
843 		cache = rcu_dereference(ghes_estatus_caches[i]);
844 		if (cache == NULL)
845 			continue;
846 		if (len != cache->estatus_len)
847 			continue;
848 		cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
849 		if (memcmp(estatus, cache_estatus, len))
850 			continue;
851 		atomic_inc(&cache->count);
852 		now = sched_clock();
853 		if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
854 			cached = 1;
855 		break;
856 	}
857 	rcu_read_unlock();
858 	return cached;
859 }
860 
861 static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
862 	struct acpi_hest_generic *generic,
863 	struct acpi_hest_generic_status *estatus)
864 {
865 	int alloced;
866 	u32 len, cache_len;
867 	struct ghes_estatus_cache *cache;
868 	struct acpi_hest_generic_status *cache_estatus;
869 
870 	alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
871 	if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
872 		atomic_dec(&ghes_estatus_cache_alloced);
873 		return NULL;
874 	}
875 	len = cper_estatus_len(estatus);
876 	cache_len = GHES_ESTATUS_CACHE_LEN(len);
877 	cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
878 	if (!cache) {
879 		atomic_dec(&ghes_estatus_cache_alloced);
880 		return NULL;
881 	}
882 	cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
883 	memcpy(cache_estatus, estatus, len);
884 	cache->estatus_len = len;
885 	atomic_set(&cache->count, 0);
886 	cache->generic = generic;
887 	cache->time_in = sched_clock();
888 	return cache;
889 }
890 
891 static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
892 {
893 	struct ghes_estatus_cache *cache;
894 	u32 len;
895 
896 	cache = container_of(head, struct ghes_estatus_cache, rcu);
897 	len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
898 	len = GHES_ESTATUS_CACHE_LEN(len);
899 	gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
900 	atomic_dec(&ghes_estatus_cache_alloced);
901 }
902 
903 static void
904 ghes_estatus_cache_add(struct acpi_hest_generic *generic,
905 		       struct acpi_hest_generic_status *estatus)
906 {
907 	unsigned long long now, duration, period, max_period = 0;
908 	struct ghes_estatus_cache *cache, *new_cache;
909 	struct ghes_estatus_cache __rcu *victim;
910 	int i, slot = -1, count;
911 
912 	new_cache = ghes_estatus_cache_alloc(generic, estatus);
913 	if (!new_cache)
914 		return;
915 
916 	rcu_read_lock();
917 	now = sched_clock();
918 	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
919 		cache = rcu_dereference(ghes_estatus_caches[i]);
920 		if (cache == NULL) {
921 			slot = i;
922 			break;
923 		}
924 		duration = now - cache->time_in;
925 		if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
926 			slot = i;
927 			break;
928 		}
929 		count = atomic_read(&cache->count);
930 		period = duration;
931 		do_div(period, (count + 1));
932 		if (period > max_period) {
933 			max_period = period;
934 			slot = i;
935 		}
936 	}
937 	rcu_read_unlock();
938 
939 	if (slot != -1) {
940 		/*
941 		 * Use release semantics to ensure that ghes_estatus_cached()
942 		 * running on another CPU will see the updated cache fields if
943 		 * it can see the new value of the pointer.
944 		 */
945 		victim = xchg_release(&ghes_estatus_caches[slot],
946 				      RCU_INITIALIZER(new_cache));
947 
948 		/*
949 		 * At this point, victim may point to a cached item different
950 		 * from the one based on which we selected the slot. Instead of
951 		 * going to the loop again to pick another slot, let's just
952 		 * drop the other item anyway: this may cause a false cache
953 		 * miss later on, but that won't cause any problems.
954 		 */
955 		if (victim)
956 			call_rcu(&unrcu_pointer(victim)->rcu,
957 				 ghes_estatus_cache_rcu_free);
958 	}
959 }
960 
961 static void __ghes_panic(struct ghes *ghes,
962 			 struct acpi_hest_generic_status *estatus,
963 			 u64 buf_paddr, enum fixed_addresses fixmap_idx)
964 {
965 	__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
966 
967 	ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
968 
969 	/* reboot to log the error! */
970 	if (!panic_timeout)
971 		panic_timeout = ghes_panic_timeout;
972 	panic("Fatal hardware error!");
973 }
974 
975 static int ghes_proc(struct ghes *ghes)
976 {
977 	struct acpi_hest_generic_status *estatus = ghes->estatus;
978 	u64 buf_paddr;
979 	int rc;
980 
981 	rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
982 	if (rc)
983 		goto out;
984 
985 	if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
986 		__ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
987 
988 	if (!ghes_estatus_cached(estatus)) {
989 		if (ghes_print_estatus(NULL, ghes->generic, estatus))
990 			ghes_estatus_cache_add(ghes->generic, estatus);
991 	}
992 	ghes_do_proc(ghes, estatus);
993 
994 out:
995 	ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
996 
997 	return rc;
998 }
999 
1000 static void ghes_add_timer(struct ghes *ghes)
1001 {
1002 	struct acpi_hest_generic *g = ghes->generic;
1003 	unsigned long expire;
1004 
1005 	if (!g->notify.poll_interval) {
1006 		pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
1007 			g->header.source_id);
1008 		return;
1009 	}
1010 	expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
1011 	ghes->timer.expires = round_jiffies_relative(expire);
1012 	add_timer(&ghes->timer);
1013 }
1014 
1015 static void ghes_poll_func(struct timer_list *t)
1016 {
1017 	struct ghes *ghes = from_timer(ghes, t, timer);
1018 	unsigned long flags;
1019 
1020 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1021 	ghes_proc(ghes);
1022 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1023 	if (!(ghes->flags & GHES_EXITING))
1024 		ghes_add_timer(ghes);
1025 }
1026 
1027 static irqreturn_t ghes_irq_func(int irq, void *data)
1028 {
1029 	struct ghes *ghes = data;
1030 	unsigned long flags;
1031 	int rc;
1032 
1033 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1034 	rc = ghes_proc(ghes);
1035 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1036 	if (rc)
1037 		return IRQ_NONE;
1038 
1039 	return IRQ_HANDLED;
1040 }
1041 
1042 static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
1043 			   void *data)
1044 {
1045 	struct ghes *ghes;
1046 	unsigned long flags;
1047 	int ret = NOTIFY_DONE;
1048 
1049 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1050 	rcu_read_lock();
1051 	list_for_each_entry_rcu(ghes, &ghes_hed, list) {
1052 		if (!ghes_proc(ghes))
1053 			ret = NOTIFY_OK;
1054 	}
1055 	rcu_read_unlock();
1056 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1057 
1058 	return ret;
1059 }
1060 
1061 static struct notifier_block ghes_notifier_hed = {
1062 	.notifier_call = ghes_notify_hed,
1063 };
1064 
1065 /*
1066  * Handlers for CPER records may not be NMI safe. For example,
1067  * memory_failure_queue() takes spinlocks and calls schedule_work_on().
1068  * In any NMI-like handler, memory from ghes_estatus_pool is used to save
1069  * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
1070  * ghes_proc_in_irq() to run in IRQ context where each estatus in
1071  * ghes_estatus_llist is processed.
1072  *
1073  * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
1074  * to suppress frequent messages.
1075  */
1076 static struct llist_head ghes_estatus_llist;
1077 static struct irq_work ghes_proc_irq_work;
1078 
1079 static void ghes_proc_in_irq(struct irq_work *irq_work)
1080 {
1081 	struct llist_node *llnode, *next;
1082 	struct ghes_estatus_node *estatus_node;
1083 	struct acpi_hest_generic *generic;
1084 	struct acpi_hest_generic_status *estatus;
1085 	bool task_work_pending;
1086 	u32 len, node_len;
1087 	int ret;
1088 
1089 	llnode = llist_del_all(&ghes_estatus_llist);
1090 	/*
1091 	 * Because the time order of estatus in list is reversed,
1092 	 * revert it back to proper order.
1093 	 */
1094 	llnode = llist_reverse_order(llnode);
1095 	while (llnode) {
1096 		next = llnode->next;
1097 		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1098 					   llnode);
1099 		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1100 		len = cper_estatus_len(estatus);
1101 		node_len = GHES_ESTATUS_NODE_LEN(len);
1102 		task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
1103 		if (!ghes_estatus_cached(estatus)) {
1104 			generic = estatus_node->generic;
1105 			if (ghes_print_estatus(NULL, generic, estatus))
1106 				ghes_estatus_cache_add(generic, estatus);
1107 		}
1108 
1109 		if (task_work_pending && current->mm) {
1110 			estatus_node->task_work.func = ghes_kick_task_work;
1111 			estatus_node->task_work_cpu = smp_processor_id();
1112 			ret = task_work_add(current, &estatus_node->task_work,
1113 					    TWA_RESUME);
1114 			if (ret)
1115 				estatus_node->task_work.func = NULL;
1116 		}
1117 
1118 		if (!estatus_node->task_work.func)
1119 			gen_pool_free(ghes_estatus_pool,
1120 				      (unsigned long)estatus_node, node_len);
1121 
1122 		llnode = next;
1123 	}
1124 }
1125 
1126 static void ghes_print_queued_estatus(void)
1127 {
1128 	struct llist_node *llnode;
1129 	struct ghes_estatus_node *estatus_node;
1130 	struct acpi_hest_generic *generic;
1131 	struct acpi_hest_generic_status *estatus;
1132 
1133 	llnode = llist_del_all(&ghes_estatus_llist);
1134 	/*
1135 	 * Because the time order of estatus in list is reversed,
1136 	 * revert it back to proper order.
1137 	 */
1138 	llnode = llist_reverse_order(llnode);
1139 	while (llnode) {
1140 		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
1141 					   llnode);
1142 		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1143 		generic = estatus_node->generic;
1144 		ghes_print_estatus(NULL, generic, estatus);
1145 		llnode = llnode->next;
1146 	}
1147 }
1148 
1149 static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
1150 				       enum fixed_addresses fixmap_idx)
1151 {
1152 	struct acpi_hest_generic_status *estatus, tmp_header;
1153 	struct ghes_estatus_node *estatus_node;
1154 	u32 len, node_len;
1155 	u64 buf_paddr;
1156 	int sev, rc;
1157 
1158 	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
1159 		return -EOPNOTSUPP;
1160 
1161 	rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
1162 	if (rc) {
1163 		ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1164 		return rc;
1165 	}
1166 
1167 	rc = __ghes_check_estatus(ghes, &tmp_header);
1168 	if (rc) {
1169 		ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1170 		return rc;
1171 	}
1172 
1173 	len = cper_estatus_len(&tmp_header);
1174 	node_len = GHES_ESTATUS_NODE_LEN(len);
1175 	estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
1176 	if (!estatus_node)
1177 		return -ENOMEM;
1178 
1179 	estatus_node->ghes = ghes;
1180 	estatus_node->generic = ghes->generic;
1181 	estatus_node->task_work.func = NULL;
1182 	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
1183 
1184 	if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
1185 		ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
1186 		rc = -ENOENT;
1187 		goto no_work;
1188 	}
1189 
1190 	sev = ghes_severity(estatus->error_severity);
1191 	if (sev >= GHES_SEV_PANIC) {
1192 		ghes_print_queued_estatus();
1193 		__ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
1194 	}
1195 
1196 	ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
1197 
1198 	/* This error has been reported before, don't process it again. */
1199 	if (ghes_estatus_cached(estatus))
1200 		goto no_work;
1201 
1202 	llist_add(&estatus_node->llnode, &ghes_estatus_llist);
1203 
1204 	return rc;
1205 
1206 no_work:
1207 	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
1208 		      node_len);
1209 
1210 	return rc;
1211 }
1212 
1213 static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
1214 				       enum fixed_addresses fixmap_idx)
1215 {
1216 	int ret = -ENOENT;
1217 	struct ghes *ghes;
1218 
1219 	rcu_read_lock();
1220 	list_for_each_entry_rcu(ghes, rcu_list, list) {
1221 		if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
1222 			ret = 0;
1223 	}
1224 	rcu_read_unlock();
1225 
1226 	if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
1227 		irq_work_queue(&ghes_proc_irq_work);
1228 
1229 	return ret;
1230 }
1231 
1232 #ifdef CONFIG_ACPI_APEI_SEA
1233 static LIST_HEAD(ghes_sea);
1234 
1235 /*
1236  * Return 0 only if one of the SEA error sources successfully reported an error
1237  * record sent from the firmware.
1238  */
1239 int ghes_notify_sea(void)
1240 {
1241 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
1242 	int rv;
1243 
1244 	raw_spin_lock(&ghes_notify_lock_sea);
1245 	rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
1246 	raw_spin_unlock(&ghes_notify_lock_sea);
1247 
1248 	return rv;
1249 }
1250 
1251 static void ghes_sea_add(struct ghes *ghes)
1252 {
1253 	mutex_lock(&ghes_list_mutex);
1254 	list_add_rcu(&ghes->list, &ghes_sea);
1255 	mutex_unlock(&ghes_list_mutex);
1256 }
1257 
1258 static void ghes_sea_remove(struct ghes *ghes)
1259 {
1260 	mutex_lock(&ghes_list_mutex);
1261 	list_del_rcu(&ghes->list);
1262 	mutex_unlock(&ghes_list_mutex);
1263 	synchronize_rcu();
1264 }
1265 #else /* CONFIG_ACPI_APEI_SEA */
1266 static inline void ghes_sea_add(struct ghes *ghes) { }
1267 static inline void ghes_sea_remove(struct ghes *ghes) { }
1268 #endif /* CONFIG_ACPI_APEI_SEA */
1269 
1270 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
1271 /*
1272  * NMI may be triggered on any CPU, so ghes_in_nmi is used for
1273  * having only one concurrent reader.
1274  */
1275 static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
1276 
1277 static LIST_HEAD(ghes_nmi);
1278 
1279 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1280 {
1281 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
1282 	int ret = NMI_DONE;
1283 
1284 	if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
1285 		return ret;
1286 
1287 	raw_spin_lock(&ghes_notify_lock_nmi);
1288 	if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
1289 		ret = NMI_HANDLED;
1290 	raw_spin_unlock(&ghes_notify_lock_nmi);
1291 
1292 	atomic_dec(&ghes_in_nmi);
1293 	return ret;
1294 }
1295 
1296 static void ghes_nmi_add(struct ghes *ghes)
1297 {
1298 	mutex_lock(&ghes_list_mutex);
1299 	if (list_empty(&ghes_nmi))
1300 		register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
1301 	list_add_rcu(&ghes->list, &ghes_nmi);
1302 	mutex_unlock(&ghes_list_mutex);
1303 }
1304 
1305 static void ghes_nmi_remove(struct ghes *ghes)
1306 {
1307 	mutex_lock(&ghes_list_mutex);
1308 	list_del_rcu(&ghes->list);
1309 	if (list_empty(&ghes_nmi))
1310 		unregister_nmi_handler(NMI_LOCAL, "ghes");
1311 	mutex_unlock(&ghes_list_mutex);
1312 	/*
1313 	 * To synchronize with NMI handler, ghes can only be
1314 	 * freed after NMI handler finishes.
1315 	 */
1316 	synchronize_rcu();
1317 }
1318 #else /* CONFIG_HAVE_ACPI_APEI_NMI */
1319 static inline void ghes_nmi_add(struct ghes *ghes) { }
1320 static inline void ghes_nmi_remove(struct ghes *ghes) { }
1321 #endif /* CONFIG_HAVE_ACPI_APEI_NMI */
1322 
1323 static void ghes_nmi_init_cxt(void)
1324 {
1325 	init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1326 }
1327 
1328 static int __ghes_sdei_callback(struct ghes *ghes,
1329 				enum fixed_addresses fixmap_idx)
1330 {
1331 	if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
1332 		irq_work_queue(&ghes_proc_irq_work);
1333 
1334 		return 0;
1335 	}
1336 
1337 	return -ENOENT;
1338 }
1339 
1340 static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
1341 				      void *arg)
1342 {
1343 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
1344 	struct ghes *ghes = arg;
1345 	int err;
1346 
1347 	raw_spin_lock(&ghes_notify_lock_sdei_normal);
1348 	err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
1349 	raw_spin_unlock(&ghes_notify_lock_sdei_normal);
1350 
1351 	return err;
1352 }
1353 
1354 static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
1355 				       void *arg)
1356 {
1357 	static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
1358 	struct ghes *ghes = arg;
1359 	int err;
1360 
1361 	raw_spin_lock(&ghes_notify_lock_sdei_critical);
1362 	err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
1363 	raw_spin_unlock(&ghes_notify_lock_sdei_critical);
1364 
1365 	return err;
1366 }
1367 
1368 static int apei_sdei_register_ghes(struct ghes *ghes)
1369 {
1370 	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1371 		return -EOPNOTSUPP;
1372 
1373 	return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
1374 				 ghes_sdei_critical_callback);
1375 }
1376 
1377 static int apei_sdei_unregister_ghes(struct ghes *ghes)
1378 {
1379 	if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
1380 		return -EOPNOTSUPP;
1381 
1382 	return sdei_unregister_ghes(ghes);
1383 }
1384 
1385 static int ghes_probe(struct platform_device *ghes_dev)
1386 {
1387 	struct acpi_hest_generic *generic;
1388 	struct ghes *ghes = NULL;
1389 	unsigned long flags;
1390 
1391 	int rc = -EINVAL;
1392 
1393 	generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
1394 	if (!generic->enabled)
1395 		return -ENODEV;
1396 
1397 	switch (generic->notify.type) {
1398 	case ACPI_HEST_NOTIFY_POLLED:
1399 	case ACPI_HEST_NOTIFY_EXTERNAL:
1400 	case ACPI_HEST_NOTIFY_SCI:
1401 	case ACPI_HEST_NOTIFY_GSIV:
1402 	case ACPI_HEST_NOTIFY_GPIO:
1403 		break;
1404 
1405 	case ACPI_HEST_NOTIFY_SEA:
1406 		if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
1407 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n",
1408 				generic->header.source_id);
1409 			rc = -ENOTSUPP;
1410 			goto err;
1411 		}
1412 		break;
1413 	case ACPI_HEST_NOTIFY_NMI:
1414 		if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
1415 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
1416 				generic->header.source_id);
1417 			goto err;
1418 		}
1419 		break;
1420 	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1421 		if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
1422 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
1423 				generic->header.source_id);
1424 			goto err;
1425 		}
1426 		break;
1427 	case ACPI_HEST_NOTIFY_LOCAL:
1428 		pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
1429 			generic->header.source_id);
1430 		goto err;
1431 	default:
1432 		pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
1433 			generic->notify.type, generic->header.source_id);
1434 		goto err;
1435 	}
1436 
1437 	rc = -EIO;
1438 	if (generic->error_block_length <
1439 	    sizeof(struct acpi_hest_generic_status)) {
1440 		pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
1441 			generic->error_block_length, generic->header.source_id);
1442 		goto err;
1443 	}
1444 	ghes = ghes_new(generic);
1445 	if (IS_ERR(ghes)) {
1446 		rc = PTR_ERR(ghes);
1447 		ghes = NULL;
1448 		goto err;
1449 	}
1450 
1451 	switch (generic->notify.type) {
1452 	case ACPI_HEST_NOTIFY_POLLED:
1453 		timer_setup(&ghes->timer, ghes_poll_func, 0);
1454 		ghes_add_timer(ghes);
1455 		break;
1456 	case ACPI_HEST_NOTIFY_EXTERNAL:
1457 		/* External interrupt vector is GSI */
1458 		rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1459 		if (rc) {
1460 			pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1461 			       generic->header.source_id);
1462 			goto err;
1463 		}
1464 		rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
1465 				 "GHES IRQ", ghes);
1466 		if (rc) {
1467 			pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1468 			       generic->header.source_id);
1469 			goto err;
1470 		}
1471 		break;
1472 
1473 	case ACPI_HEST_NOTIFY_SCI:
1474 	case ACPI_HEST_NOTIFY_GSIV:
1475 	case ACPI_HEST_NOTIFY_GPIO:
1476 		mutex_lock(&ghes_list_mutex);
1477 		if (list_empty(&ghes_hed))
1478 			register_acpi_hed_notifier(&ghes_notifier_hed);
1479 		list_add_rcu(&ghes->list, &ghes_hed);
1480 		mutex_unlock(&ghes_list_mutex);
1481 		break;
1482 
1483 	case ACPI_HEST_NOTIFY_SEA:
1484 		ghes_sea_add(ghes);
1485 		break;
1486 	case ACPI_HEST_NOTIFY_NMI:
1487 		ghes_nmi_add(ghes);
1488 		break;
1489 	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1490 		rc = apei_sdei_register_ghes(ghes);
1491 		if (rc)
1492 			goto err;
1493 		break;
1494 	default:
1495 		BUG();
1496 	}
1497 
1498 	platform_set_drvdata(ghes_dev, ghes);
1499 
1500 	ghes->dev = &ghes_dev->dev;
1501 
1502 	mutex_lock(&ghes_devs_mutex);
1503 	list_add_tail(&ghes->elist, &ghes_devs);
1504 	mutex_unlock(&ghes_devs_mutex);
1505 
1506 	/* Handle any pending errors right away */
1507 	spin_lock_irqsave(&ghes_notify_lock_irq, flags);
1508 	ghes_proc(ghes);
1509 	spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
1510 
1511 	return 0;
1512 
1513 err:
1514 	if (ghes) {
1515 		ghes_fini(ghes);
1516 		kfree(ghes);
1517 	}
1518 	return rc;
1519 }
1520 
1521 static int ghes_remove(struct platform_device *ghes_dev)
1522 {
1523 	int rc;
1524 	struct ghes *ghes;
1525 	struct acpi_hest_generic *generic;
1526 
1527 	ghes = platform_get_drvdata(ghes_dev);
1528 	generic = ghes->generic;
1529 
1530 	ghes->flags |= GHES_EXITING;
1531 	switch (generic->notify.type) {
1532 	case ACPI_HEST_NOTIFY_POLLED:
1533 		timer_shutdown_sync(&ghes->timer);
1534 		break;
1535 	case ACPI_HEST_NOTIFY_EXTERNAL:
1536 		free_irq(ghes->irq, ghes);
1537 		break;
1538 
1539 	case ACPI_HEST_NOTIFY_SCI:
1540 	case ACPI_HEST_NOTIFY_GSIV:
1541 	case ACPI_HEST_NOTIFY_GPIO:
1542 		mutex_lock(&ghes_list_mutex);
1543 		list_del_rcu(&ghes->list);
1544 		if (list_empty(&ghes_hed))
1545 			unregister_acpi_hed_notifier(&ghes_notifier_hed);
1546 		mutex_unlock(&ghes_list_mutex);
1547 		synchronize_rcu();
1548 		break;
1549 
1550 	case ACPI_HEST_NOTIFY_SEA:
1551 		ghes_sea_remove(ghes);
1552 		break;
1553 	case ACPI_HEST_NOTIFY_NMI:
1554 		ghes_nmi_remove(ghes);
1555 		break;
1556 	case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
1557 		rc = apei_sdei_unregister_ghes(ghes);
1558 		if (rc)
1559 			return rc;
1560 		break;
1561 	default:
1562 		BUG();
1563 		break;
1564 	}
1565 
1566 	ghes_fini(ghes);
1567 
1568 	mutex_lock(&ghes_devs_mutex);
1569 	list_del(&ghes->elist);
1570 	mutex_unlock(&ghes_devs_mutex);
1571 
1572 	kfree(ghes);
1573 
1574 	return 0;
1575 }
1576 
1577 static struct platform_driver ghes_platform_driver = {
1578 	.driver		= {
1579 		.name	= "GHES",
1580 	},
1581 	.probe		= ghes_probe,
1582 	.remove		= ghes_remove,
1583 };
1584 
1585 void __init acpi_ghes_init(void)
1586 {
1587 	int rc;
1588 
1589 	sdei_init();
1590 
1591 	if (acpi_disabled)
1592 		return;
1593 
1594 	switch (hest_disable) {
1595 	case HEST_NOT_FOUND:
1596 		return;
1597 	case HEST_DISABLED:
1598 		pr_info(GHES_PFX "HEST is not enabled!\n");
1599 		return;
1600 	default:
1601 		break;
1602 	}
1603 
1604 	if (ghes_disable) {
1605 		pr_info(GHES_PFX "GHES is not enabled!\n");
1606 		return;
1607 	}
1608 
1609 	ghes_nmi_init_cxt();
1610 
1611 	rc = platform_driver_register(&ghes_platform_driver);
1612 	if (rc)
1613 		return;
1614 
1615 	rc = apei_osc_setup();
1616 	if (rc == 0 && osc_sb_apei_support_acked)
1617 		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1618 	else if (rc == 0 && !osc_sb_apei_support_acked)
1619 		pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1620 	else if (rc && osc_sb_apei_support_acked)
1621 		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1622 	else
1623 		pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1624 }
1625 
1626 /*
1627  * Known x86 systems that prefer GHES error reporting:
1628  */
1629 static struct acpi_platform_list plat_list[] = {
1630 	{"HPE   ", "Server  ", 0, ACPI_SIG_FADT, all_versions},
1631 	{ } /* End */
1632 };
1633 
1634 struct list_head *ghes_get_devices(void)
1635 {
1636 	int idx = -1;
1637 
1638 	if (IS_ENABLED(CONFIG_X86)) {
1639 		idx = acpi_match_platform_list(plat_list);
1640 		if (idx < 0) {
1641 			if (!ghes_edac_force_enable)
1642 				return NULL;
1643 
1644 			pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n");
1645 		}
1646 	} else if (list_empty(&ghes_devs)) {
1647 		return NULL;
1648 	}
1649 
1650 	return &ghes_devs;
1651 }
1652 EXPORT_SYMBOL_GPL(ghes_get_devices);
1653 
1654 void ghes_register_report_chain(struct notifier_block *nb)
1655 {
1656 	atomic_notifier_chain_register(&ghes_report_chain, nb);
1657 }
1658 EXPORT_SYMBOL_GPL(ghes_register_report_chain);
1659 
1660 void ghes_unregister_report_chain(struct notifier_block *nb)
1661 {
1662 	atomic_notifier_chain_unregister(&ghes_report_chain, nb);
1663 }
1664 EXPORT_SYMBOL_GPL(ghes_unregister_report_chain);
1665