1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * apei-base.c - ACPI Platform Error Interface (APEI) supporting
4  * infrastructure
5  *
6  * APEI allows to report errors (for example from the chipset) to the
7  * the operating system. This improves NMI handling especially. In
8  * addition it supports error serialization and error injection.
9  *
10  * For more information about APEI, please refer to ACPI Specification
11  * version 4.0, chapter 17.
12  *
13  * This file has Common functions used by more than one APEI table,
14  * including framework of interpreter for ERST and EINJ; resource
15  * management for APEI registers.
16  *
17  * Copyright (C) 2009, Intel Corp.
18  *	Author: Huang Ying <ying.huang@intel.com>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/acpi.h>
25 #include <linux/slab.h>
26 #include <linux/io.h>
27 #include <linux/kref.h>
28 #include <linux/rculist.h>
29 #include <linux/interrupt.h>
30 #include <linux/debugfs.h>
31 #include <asm/unaligned.h>
32 
33 #include "apei-internal.h"
34 
35 #define APEI_PFX "APEI: "
36 
37 /*
38  * APEI ERST (Error Record Serialization Table) and EINJ (Error
39  * INJection) interpreter framework.
40  */
41 
42 #define APEI_EXEC_PRESERVE_REGISTER	0x1
43 
apei_exec_ctx_init(struct apei_exec_context * ctx,struct apei_exec_ins_type * ins_table,u32 instructions,struct acpi_whea_header * action_table,u32 entries)44 void apei_exec_ctx_init(struct apei_exec_context *ctx,
45 			struct apei_exec_ins_type *ins_table,
46 			u32 instructions,
47 			struct acpi_whea_header *action_table,
48 			u32 entries)
49 {
50 	ctx->ins_table = ins_table;
51 	ctx->instructions = instructions;
52 	ctx->action_table = action_table;
53 	ctx->entries = entries;
54 }
55 EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
56 
__apei_exec_read_register(struct acpi_whea_header * entry,u64 * val)57 int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
58 {
59 	int rc;
60 
61 	rc = apei_read(val, &entry->register_region);
62 	if (rc)
63 		return rc;
64 	*val >>= entry->register_region.bit_offset;
65 	*val &= entry->mask;
66 
67 	return 0;
68 }
69 
apei_exec_read_register(struct apei_exec_context * ctx,struct acpi_whea_header * entry)70 int apei_exec_read_register(struct apei_exec_context *ctx,
71 			    struct acpi_whea_header *entry)
72 {
73 	int rc;
74 	u64 val = 0;
75 
76 	rc = __apei_exec_read_register(entry, &val);
77 	if (rc)
78 		return rc;
79 	ctx->value = val;
80 
81 	return 0;
82 }
83 EXPORT_SYMBOL_GPL(apei_exec_read_register);
84 
apei_exec_read_register_value(struct apei_exec_context * ctx,struct acpi_whea_header * entry)85 int apei_exec_read_register_value(struct apei_exec_context *ctx,
86 				  struct acpi_whea_header *entry)
87 {
88 	int rc;
89 
90 	rc = apei_exec_read_register(ctx, entry);
91 	if (rc)
92 		return rc;
93 	ctx->value = (ctx->value == entry->value);
94 
95 	return 0;
96 }
97 EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
98 
__apei_exec_write_register(struct acpi_whea_header * entry,u64 val)99 int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
100 {
101 	int rc;
102 
103 	val &= entry->mask;
104 	val <<= entry->register_region.bit_offset;
105 	if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
106 		u64 valr = 0;
107 		rc = apei_read(&valr, &entry->register_region);
108 		if (rc)
109 			return rc;
110 		valr &= ~(entry->mask << entry->register_region.bit_offset);
111 		val |= valr;
112 	}
113 	rc = apei_write(val, &entry->register_region);
114 
115 	return rc;
116 }
117 
apei_exec_write_register(struct apei_exec_context * ctx,struct acpi_whea_header * entry)118 int apei_exec_write_register(struct apei_exec_context *ctx,
119 			     struct acpi_whea_header *entry)
120 {
121 	return __apei_exec_write_register(entry, ctx->value);
122 }
123 EXPORT_SYMBOL_GPL(apei_exec_write_register);
124 
apei_exec_write_register_value(struct apei_exec_context * ctx,struct acpi_whea_header * entry)125 int apei_exec_write_register_value(struct apei_exec_context *ctx,
126 				   struct acpi_whea_header *entry)
127 {
128 	int rc;
129 
130 	ctx->value = entry->value;
131 	rc = apei_exec_write_register(ctx, entry);
132 
133 	return rc;
134 }
135 EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
136 
apei_exec_noop(struct apei_exec_context * ctx,struct acpi_whea_header * entry)137 int apei_exec_noop(struct apei_exec_context *ctx,
138 		   struct acpi_whea_header *entry)
139 {
140 	return 0;
141 }
142 EXPORT_SYMBOL_GPL(apei_exec_noop);
143 
144 /*
145  * Interpret the specified action. Go through whole action table,
146  * execute all instructions belong to the action.
147  */
__apei_exec_run(struct apei_exec_context * ctx,u8 action,bool optional)148 int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
149 		    bool optional)
150 {
151 	int rc = -ENOENT;
152 	u32 i, ip;
153 	struct acpi_whea_header *entry;
154 	apei_exec_ins_func_t run;
155 
156 	ctx->ip = 0;
157 
158 	/*
159 	 * "ip" is the instruction pointer of current instruction,
160 	 * "ctx->ip" specifies the next instruction to executed,
161 	 * instruction "run" function may change the "ctx->ip" to
162 	 * implement "goto" semantics.
163 	 */
164 rewind:
165 	ip = 0;
166 	for (i = 0; i < ctx->entries; i++) {
167 		entry = &ctx->action_table[i];
168 		if (entry->action != action)
169 			continue;
170 		if (ip == ctx->ip) {
171 			if (entry->instruction >= ctx->instructions ||
172 			    !ctx->ins_table[entry->instruction].run) {
173 				pr_warn(FW_WARN APEI_PFX
174 					"Invalid action table, unknown instruction type: %d\n",
175 					entry->instruction);
176 				return -EINVAL;
177 			}
178 			run = ctx->ins_table[entry->instruction].run;
179 			rc = run(ctx, entry);
180 			if (rc < 0)
181 				return rc;
182 			else if (rc != APEI_EXEC_SET_IP)
183 				ctx->ip++;
184 		}
185 		ip++;
186 		if (ctx->ip < ip)
187 			goto rewind;
188 	}
189 
190 	return !optional && rc < 0 ? rc : 0;
191 }
192 EXPORT_SYMBOL_GPL(__apei_exec_run);
193 
194 typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
195 				      struct acpi_whea_header *entry,
196 				      void *data);
197 
apei_exec_for_each_entry(struct apei_exec_context * ctx,apei_exec_entry_func_t func,void * data,int * end)198 static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
199 				    apei_exec_entry_func_t func,
200 				    void *data,
201 				    int *end)
202 {
203 	u8 ins;
204 	int i, rc;
205 	struct acpi_whea_header *entry;
206 	struct apei_exec_ins_type *ins_table = ctx->ins_table;
207 
208 	for (i = 0; i < ctx->entries; i++) {
209 		entry = ctx->action_table + i;
210 		ins = entry->instruction;
211 		if (end)
212 			*end = i;
213 		if (ins >= ctx->instructions || !ins_table[ins].run) {
214 			pr_warn(FW_WARN APEI_PFX
215 				"Invalid action table, unknown instruction type: %d\n",
216 				ins);
217 			return -EINVAL;
218 		}
219 		rc = func(ctx, entry, data);
220 		if (rc)
221 			return rc;
222 	}
223 
224 	return 0;
225 }
226 
pre_map_gar_callback(struct apei_exec_context * ctx,struct acpi_whea_header * entry,void * data)227 static int pre_map_gar_callback(struct apei_exec_context *ctx,
228 				struct acpi_whea_header *entry,
229 				void *data)
230 {
231 	u8 ins = entry->instruction;
232 
233 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
234 		return apei_map_generic_address(&entry->register_region);
235 
236 	return 0;
237 }
238 
239 /*
240  * Pre-map all GARs in action table to make it possible to access them
241  * in NMI handler.
242  */
apei_exec_pre_map_gars(struct apei_exec_context * ctx)243 int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
244 {
245 	int rc, end;
246 
247 	rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
248 				      NULL, &end);
249 	if (rc) {
250 		struct apei_exec_context ctx_unmap;
251 		memcpy(&ctx_unmap, ctx, sizeof(*ctx));
252 		ctx_unmap.entries = end;
253 		apei_exec_post_unmap_gars(&ctx_unmap);
254 	}
255 
256 	return rc;
257 }
258 EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
259 
post_unmap_gar_callback(struct apei_exec_context * ctx,struct acpi_whea_header * entry,void * data)260 static int post_unmap_gar_callback(struct apei_exec_context *ctx,
261 				   struct acpi_whea_header *entry,
262 				   void *data)
263 {
264 	u8 ins = entry->instruction;
265 
266 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
267 		apei_unmap_generic_address(&entry->register_region);
268 
269 	return 0;
270 }
271 
272 /* Post-unmap all GAR in action table. */
apei_exec_post_unmap_gars(struct apei_exec_context * ctx)273 int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
274 {
275 	return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
276 					NULL, NULL);
277 }
278 EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
279 
280 /*
281  * Resource management for GARs in APEI
282  */
283 struct apei_res {
284 	struct list_head list;
285 	unsigned long start;
286 	unsigned long end;
287 };
288 
289 /* Collect all resources requested, to avoid conflict */
290 static struct apei_resources apei_resources_all = {
291 	.iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
292 	.ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
293 };
294 
apei_res_add(struct list_head * res_list,unsigned long start,unsigned long size)295 static int apei_res_add(struct list_head *res_list,
296 			unsigned long start, unsigned long size)
297 {
298 	struct apei_res *res, *resn, *res_ins = NULL;
299 	unsigned long end = start + size;
300 
301 	if (end <= start)
302 		return 0;
303 repeat:
304 	list_for_each_entry_safe(res, resn, res_list, list) {
305 		if (res->start > end || res->end < start)
306 			continue;
307 		else if (end <= res->end && start >= res->start) {
308 			kfree(res_ins);
309 			return 0;
310 		}
311 		list_del(&res->list);
312 		res->start = start = min(res->start, start);
313 		res->end = end = max(res->end, end);
314 		kfree(res_ins);
315 		res_ins = res;
316 		goto repeat;
317 	}
318 
319 	if (res_ins)
320 		list_add(&res_ins->list, res_list);
321 	else {
322 		res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
323 		if (!res_ins)
324 			return -ENOMEM;
325 		res_ins->start = start;
326 		res_ins->end = end;
327 		list_add(&res_ins->list, res_list);
328 	}
329 
330 	return 0;
331 }
332 
apei_res_sub(struct list_head * res_list1,struct list_head * res_list2)333 static int apei_res_sub(struct list_head *res_list1,
334 			struct list_head *res_list2)
335 {
336 	struct apei_res *res1, *resn1, *res2, *res;
337 	res1 = list_entry(res_list1->next, struct apei_res, list);
338 	resn1 = list_entry(res1->list.next, struct apei_res, list);
339 	while (&res1->list != res_list1) {
340 		list_for_each_entry(res2, res_list2, list) {
341 			if (res1->start >= res2->end ||
342 			    res1->end <= res2->start)
343 				continue;
344 			else if (res1->end <= res2->end &&
345 				 res1->start >= res2->start) {
346 				list_del(&res1->list);
347 				kfree(res1);
348 				break;
349 			} else if (res1->end > res2->end &&
350 				   res1->start < res2->start) {
351 				res = kmalloc(sizeof(*res), GFP_KERNEL);
352 				if (!res)
353 					return -ENOMEM;
354 				res->start = res2->end;
355 				res->end = res1->end;
356 				res1->end = res2->start;
357 				list_add(&res->list, &res1->list);
358 				resn1 = res;
359 			} else {
360 				if (res1->start < res2->start)
361 					res1->end = res2->start;
362 				else
363 					res1->start = res2->end;
364 			}
365 		}
366 		res1 = resn1;
367 		resn1 = list_entry(resn1->list.next, struct apei_res, list);
368 	}
369 
370 	return 0;
371 }
372 
apei_res_clean(struct list_head * res_list)373 static void apei_res_clean(struct list_head *res_list)
374 {
375 	struct apei_res *res, *resn;
376 
377 	list_for_each_entry_safe(res, resn, res_list, list) {
378 		list_del(&res->list);
379 		kfree(res);
380 	}
381 }
382 
apei_resources_fini(struct apei_resources * resources)383 void apei_resources_fini(struct apei_resources *resources)
384 {
385 	apei_res_clean(&resources->iomem);
386 	apei_res_clean(&resources->ioport);
387 }
388 EXPORT_SYMBOL_GPL(apei_resources_fini);
389 
apei_resources_merge(struct apei_resources * resources1,struct apei_resources * resources2)390 static int apei_resources_merge(struct apei_resources *resources1,
391 				struct apei_resources *resources2)
392 {
393 	int rc;
394 	struct apei_res *res;
395 
396 	list_for_each_entry(res, &resources2->iomem, list) {
397 		rc = apei_res_add(&resources1->iomem, res->start,
398 				  res->end - res->start);
399 		if (rc)
400 			return rc;
401 	}
402 	list_for_each_entry(res, &resources2->ioport, list) {
403 		rc = apei_res_add(&resources1->ioport, res->start,
404 				  res->end - res->start);
405 		if (rc)
406 			return rc;
407 	}
408 
409 	return 0;
410 }
411 
apei_resources_add(struct apei_resources * resources,unsigned long start,unsigned long size,bool iomem)412 int apei_resources_add(struct apei_resources *resources,
413 		       unsigned long start, unsigned long size,
414 		       bool iomem)
415 {
416 	if (iomem)
417 		return apei_res_add(&resources->iomem, start, size);
418 	else
419 		return apei_res_add(&resources->ioport, start, size);
420 }
421 EXPORT_SYMBOL_GPL(apei_resources_add);
422 
423 /*
424  * EINJ has two groups of GARs (EINJ table entry and trigger table
425  * entry), so common resources are subtracted from the trigger table
426  * resources before the second requesting.
427  */
apei_resources_sub(struct apei_resources * resources1,struct apei_resources * resources2)428 int apei_resources_sub(struct apei_resources *resources1,
429 		       struct apei_resources *resources2)
430 {
431 	int rc;
432 
433 	rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
434 	if (rc)
435 		return rc;
436 	return apei_res_sub(&resources1->ioport, &resources2->ioport);
437 }
438 EXPORT_SYMBOL_GPL(apei_resources_sub);
439 
apei_get_res_callback(__u64 start,__u64 size,void * data)440 static int apei_get_res_callback(__u64 start, __u64 size, void *data)
441 {
442 	struct apei_resources *resources = data;
443 	return apei_res_add(&resources->iomem, start, size);
444 }
445 
apei_get_nvs_resources(struct apei_resources * resources)446 static int apei_get_nvs_resources(struct apei_resources *resources)
447 {
448 	return acpi_nvs_for_each_region(apei_get_res_callback, resources);
449 }
450 
451 int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
452 				     void *data), void *data);
apei_get_arch_resources(struct apei_resources * resources)453 static int apei_get_arch_resources(struct apei_resources *resources)
454 
455 {
456 	return arch_apei_filter_addr(apei_get_res_callback, resources);
457 }
458 
459 /*
460  * IO memory/port resource management mechanism is used to check
461  * whether memory/port area used by GARs conflicts with normal memory
462  * or IO memory/port of devices.
463  */
apei_resources_request(struct apei_resources * resources,const char * desc)464 int apei_resources_request(struct apei_resources *resources,
465 			   const char *desc)
466 {
467 	struct apei_res *res, *res_bak = NULL;
468 	struct resource *r;
469 	struct apei_resources nvs_resources, arch_res;
470 	int rc;
471 
472 	rc = apei_resources_sub(resources, &apei_resources_all);
473 	if (rc)
474 		return rc;
475 
476 	/*
477 	 * Some firmware uses ACPI NVS region, that has been marked as
478 	 * busy, so exclude it from APEI resources to avoid false
479 	 * conflict.
480 	 */
481 	apei_resources_init(&nvs_resources);
482 	rc = apei_get_nvs_resources(&nvs_resources);
483 	if (rc)
484 		goto nvs_res_fini;
485 	rc = apei_resources_sub(resources, &nvs_resources);
486 	if (rc)
487 		goto nvs_res_fini;
488 
489 	if (arch_apei_filter_addr) {
490 		apei_resources_init(&arch_res);
491 		rc = apei_get_arch_resources(&arch_res);
492 		if (rc)
493 			goto arch_res_fini;
494 		rc = apei_resources_sub(resources, &arch_res);
495 		if (rc)
496 			goto arch_res_fini;
497 	}
498 
499 	rc = -EINVAL;
500 	list_for_each_entry(res, &resources->iomem, list) {
501 		r = request_mem_region(res->start, res->end - res->start,
502 				       desc);
503 		if (!r) {
504 			pr_err(APEI_PFX
505 		"Can not request [mem %#010llx-%#010llx] for %s registers\n",
506 			       (unsigned long long)res->start,
507 			       (unsigned long long)res->end - 1, desc);
508 			res_bak = res;
509 			goto err_unmap_iomem;
510 		}
511 	}
512 
513 	list_for_each_entry(res, &resources->ioport, list) {
514 		r = request_region(res->start, res->end - res->start, desc);
515 		if (!r) {
516 			pr_err(APEI_PFX
517 		"Can not request [io  %#06llx-%#06llx] for %s registers\n",
518 			       (unsigned long long)res->start,
519 			       (unsigned long long)res->end - 1, desc);
520 			res_bak = res;
521 			goto err_unmap_ioport;
522 		}
523 	}
524 
525 	rc = apei_resources_merge(&apei_resources_all, resources);
526 	if (rc) {
527 		pr_err(APEI_PFX "Fail to merge resources!\n");
528 		goto err_unmap_ioport;
529 	}
530 
531 	goto arch_res_fini;
532 
533 err_unmap_ioport:
534 	list_for_each_entry(res, &resources->ioport, list) {
535 		if (res == res_bak)
536 			break;
537 		release_region(res->start, res->end - res->start);
538 	}
539 	res_bak = NULL;
540 err_unmap_iomem:
541 	list_for_each_entry(res, &resources->iomem, list) {
542 		if (res == res_bak)
543 			break;
544 		release_mem_region(res->start, res->end - res->start);
545 	}
546 arch_res_fini:
547 	if (arch_apei_filter_addr)
548 		apei_resources_fini(&arch_res);
549 nvs_res_fini:
550 	apei_resources_fini(&nvs_resources);
551 	return rc;
552 }
553 EXPORT_SYMBOL_GPL(apei_resources_request);
554 
apei_resources_release(struct apei_resources * resources)555 void apei_resources_release(struct apei_resources *resources)
556 {
557 	int rc;
558 	struct apei_res *res;
559 
560 	list_for_each_entry(res, &resources->iomem, list)
561 		release_mem_region(res->start, res->end - res->start);
562 	list_for_each_entry(res, &resources->ioport, list)
563 		release_region(res->start, res->end - res->start);
564 
565 	rc = apei_resources_sub(&apei_resources_all, resources);
566 	if (rc)
567 		pr_err(APEI_PFX "Fail to sub resources!\n");
568 }
569 EXPORT_SYMBOL_GPL(apei_resources_release);
570 
apei_check_gar(struct acpi_generic_address * reg,u64 * paddr,u32 * access_bit_width)571 static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
572 				u32 *access_bit_width)
573 {
574 	u32 bit_width, bit_offset, access_size_code, space_id;
575 
576 	bit_width = reg->bit_width;
577 	bit_offset = reg->bit_offset;
578 	access_size_code = reg->access_width;
579 	space_id = reg->space_id;
580 	*paddr = get_unaligned(&reg->address);
581 	if (!*paddr) {
582 		pr_warn(FW_BUG APEI_PFX
583 			"Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
584 			*paddr, bit_width, bit_offset, access_size_code,
585 			space_id);
586 		return -EINVAL;
587 	}
588 
589 	if (access_size_code < 1 || access_size_code > 4) {
590 		pr_warn(FW_BUG APEI_PFX
591 			"Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
592 			*paddr, bit_width, bit_offset, access_size_code,
593 			space_id);
594 		return -EINVAL;
595 	}
596 	*access_bit_width = 1UL << (access_size_code + 2);
597 
598 	/* Fixup common BIOS bug */
599 	if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
600 	    *access_bit_width < 32)
601 		*access_bit_width = 32;
602 	else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
603 	    *access_bit_width < 64)
604 		*access_bit_width = 64;
605 
606 	if ((bit_width + bit_offset) > *access_bit_width) {
607 		pr_warn(FW_BUG APEI_PFX
608 			"Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
609 			*paddr, bit_width, bit_offset, access_size_code,
610 			space_id);
611 		return -EINVAL;
612 	}
613 
614 	if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
615 	    space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
616 		pr_warn(FW_BUG APEI_PFX
617 			"Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
618 			*paddr, bit_width, bit_offset, access_size_code,
619 			space_id);
620 		return -EINVAL;
621 	}
622 
623 	return 0;
624 }
625 
apei_map_generic_address(struct acpi_generic_address * reg)626 int apei_map_generic_address(struct acpi_generic_address *reg)
627 {
628 	int rc;
629 	u32 access_bit_width;
630 	u64 address;
631 
632 	rc = apei_check_gar(reg, &address, &access_bit_width);
633 	if (rc)
634 		return rc;
635 
636 	/* IO space doesn't need mapping */
637 	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
638 		return 0;
639 
640 	if (!acpi_os_map_generic_address(reg))
641 		return -ENXIO;
642 
643 	return 0;
644 }
645 EXPORT_SYMBOL_GPL(apei_map_generic_address);
646 
647 /* read GAR in interrupt (including NMI) or process context */
apei_read(u64 * val,struct acpi_generic_address * reg)648 int apei_read(u64 *val, struct acpi_generic_address *reg)
649 {
650 	int rc;
651 	u32 access_bit_width;
652 	u64 address;
653 	acpi_status status;
654 
655 	rc = apei_check_gar(reg, &address, &access_bit_width);
656 	if (rc)
657 		return rc;
658 
659 	*val = 0;
660 	switch(reg->space_id) {
661 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
662 		status = acpi_os_read_memory((acpi_physical_address) address,
663 					       val, access_bit_width);
664 		if (ACPI_FAILURE(status))
665 			return -EIO;
666 		break;
667 	case ACPI_ADR_SPACE_SYSTEM_IO:
668 		status = acpi_os_read_port(address, (u32 *)val,
669 					   access_bit_width);
670 		if (ACPI_FAILURE(status))
671 			return -EIO;
672 		break;
673 	default:
674 		return -EINVAL;
675 	}
676 
677 	return 0;
678 }
679 EXPORT_SYMBOL_GPL(apei_read);
680 
681 /* write GAR in interrupt (including NMI) or process context */
apei_write(u64 val,struct acpi_generic_address * reg)682 int apei_write(u64 val, struct acpi_generic_address *reg)
683 {
684 	int rc;
685 	u32 access_bit_width;
686 	u64 address;
687 	acpi_status status;
688 
689 	rc = apei_check_gar(reg, &address, &access_bit_width);
690 	if (rc)
691 		return rc;
692 
693 	switch (reg->space_id) {
694 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
695 		status = acpi_os_write_memory((acpi_physical_address) address,
696 						val, access_bit_width);
697 		if (ACPI_FAILURE(status))
698 			return -EIO;
699 		break;
700 	case ACPI_ADR_SPACE_SYSTEM_IO:
701 		status = acpi_os_write_port(address, val, access_bit_width);
702 		if (ACPI_FAILURE(status))
703 			return -EIO;
704 		break;
705 	default:
706 		return -EINVAL;
707 	}
708 
709 	return 0;
710 }
711 EXPORT_SYMBOL_GPL(apei_write);
712 
collect_res_callback(struct apei_exec_context * ctx,struct acpi_whea_header * entry,void * data)713 static int collect_res_callback(struct apei_exec_context *ctx,
714 				struct acpi_whea_header *entry,
715 				void *data)
716 {
717 	struct apei_resources *resources = data;
718 	struct acpi_generic_address *reg = &entry->register_region;
719 	u8 ins = entry->instruction;
720 	u32 access_bit_width;
721 	u64 paddr;
722 	int rc;
723 
724 	if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
725 		return 0;
726 
727 	rc = apei_check_gar(reg, &paddr, &access_bit_width);
728 	if (rc)
729 		return rc;
730 
731 	switch (reg->space_id) {
732 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
733 		return apei_res_add(&resources->iomem, paddr,
734 				    access_bit_width / 8);
735 	case ACPI_ADR_SPACE_SYSTEM_IO:
736 		return apei_res_add(&resources->ioport, paddr,
737 				    access_bit_width / 8);
738 	default:
739 		return -EINVAL;
740 	}
741 }
742 
743 /*
744  * Same register may be used by multiple instructions in GARs, so
745  * resources are collected before requesting.
746  */
apei_exec_collect_resources(struct apei_exec_context * ctx,struct apei_resources * resources)747 int apei_exec_collect_resources(struct apei_exec_context *ctx,
748 				struct apei_resources *resources)
749 {
750 	return apei_exec_for_each_entry(ctx, collect_res_callback,
751 					resources, NULL);
752 }
753 EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
754 
apei_get_debugfs_dir(void)755 struct dentry *apei_get_debugfs_dir(void)
756 {
757 	static struct dentry *dapei;
758 
759 	if (!dapei)
760 		dapei = debugfs_create_dir("apei", NULL);
761 
762 	return dapei;
763 }
764 EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
765 
arch_apei_enable_cmcff(struct acpi_hest_header * hest_hdr,void * data)766 int __weak arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr,
767 				  void *data)
768 {
769 	return 1;
770 }
771 EXPORT_SYMBOL_GPL(arch_apei_enable_cmcff);
772 
arch_apei_report_mem_error(int sev,struct cper_sec_mem_err * mem_err)773 void __weak arch_apei_report_mem_error(int sev,
774 				       struct cper_sec_mem_err *mem_err)
775 {
776 }
777 EXPORT_SYMBOL_GPL(arch_apei_report_mem_error);
778 
apei_osc_setup(void)779 int apei_osc_setup(void)
780 {
781 	static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
782 	acpi_handle handle;
783 	u32 capbuf[3];
784 	struct acpi_osc_context context = {
785 		.uuid_str	= whea_uuid_str,
786 		.rev		= 1,
787 		.cap.length	= sizeof(capbuf),
788 		.cap.pointer	= capbuf,
789 	};
790 
791 	capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
792 	capbuf[OSC_SUPPORT_DWORD] = 1;
793 	capbuf[OSC_CONTROL_DWORD] = 0;
794 
795 	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
796 	    || ACPI_FAILURE(acpi_run_osc(handle, &context)))
797 		return -EIO;
798 	else {
799 		kfree(context.ret.pointer);
800 		return 0;
801 	}
802 }
803 EXPORT_SYMBOL_GPL(apei_osc_setup);
804