xref: /linux/arch/riscv/kernel/alternative.c (revision 6c8c1406)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * alternative runtime patching
4  * inspired by the ARM64 and x86 version
5  *
6  * Copyright (C) 2021 Sifive.
7  */
8 
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/cpu.h>
12 #include <linux/uaccess.h>
13 #include <asm/alternative.h>
14 #include <asm/sections.h>
15 #include <asm/vendorid_list.h>
16 #include <asm/sbi.h>
17 #include <asm/csr.h>
18 
19 struct cpu_manufacturer_info_t {
20 	unsigned long vendor_id;
21 	unsigned long arch_id;
22 	unsigned long imp_id;
23 	void (*patch_func)(struct alt_entry *begin, struct alt_entry *end,
24 				  unsigned long archid, unsigned long impid,
25 				  unsigned int stage);
26 };
27 
28 static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
29 {
30 #ifdef CONFIG_RISCV_M_MODE
31 	cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID);
32 	cpu_mfr_info->arch_id = csr_read(CSR_MARCHID);
33 	cpu_mfr_info->imp_id = csr_read(CSR_MIMPID);
34 #else
35 	cpu_mfr_info->vendor_id = sbi_get_mvendorid();
36 	cpu_mfr_info->arch_id = sbi_get_marchid();
37 	cpu_mfr_info->imp_id = sbi_get_mimpid();
38 #endif
39 
40 	switch (cpu_mfr_info->vendor_id) {
41 #ifdef CONFIG_ERRATA_SIFIVE
42 	case SIFIVE_VENDOR_ID:
43 		cpu_mfr_info->patch_func = sifive_errata_patch_func;
44 		break;
45 #endif
46 #ifdef CONFIG_ERRATA_THEAD
47 	case THEAD_VENDOR_ID:
48 		cpu_mfr_info->patch_func = thead_errata_patch_func;
49 		break;
50 #endif
51 	default:
52 		cpu_mfr_info->patch_func = NULL;
53 	}
54 }
55 
56 /*
57  * This is called very early in the boot process (directly after we run
58  * a feature detect on the boot CPU). No need to worry about other CPUs
59  * here.
60  */
61 static void __init_or_module _apply_alternatives(struct alt_entry *begin,
62 						 struct alt_entry *end,
63 						 unsigned int stage)
64 {
65 	struct cpu_manufacturer_info_t cpu_mfr_info;
66 
67 	riscv_fill_cpu_mfr_info(&cpu_mfr_info);
68 
69 	riscv_cpufeature_patch_func(begin, end, stage);
70 
71 	if (!cpu_mfr_info.patch_func)
72 		return;
73 
74 	cpu_mfr_info.patch_func(begin, end,
75 				cpu_mfr_info.arch_id,
76 				cpu_mfr_info.imp_id,
77 				stage);
78 }
79 
80 void __init apply_boot_alternatives(void)
81 {
82 	/* If called on non-boot cpu things could go wrong */
83 	WARN_ON(smp_processor_id() != 0);
84 
85 	_apply_alternatives((struct alt_entry *)__alt_start,
86 			    (struct alt_entry *)__alt_end,
87 			    RISCV_ALTERNATIVES_BOOT);
88 }
89 
90 /*
91  * apply_early_boot_alternatives() is called from setup_vm() with MMU-off.
92  *
93  * Following requirements should be honoured for it to work correctly:
94  * 1) It should use PC-relative addressing for accessing kernel symbols.
95  *    To achieve this we always use GCC cmodel=medany.
96  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
97  *    so disable compiler instrumentation when FTRACE is enabled.
98  *
99  * Currently, the above requirements are honoured by using custom CFLAGS
100  * for alternative.o in kernel/Makefile.
101  */
102 void __init apply_early_boot_alternatives(void)
103 {
104 #ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
105 	_apply_alternatives((struct alt_entry *)__alt_start,
106 			    (struct alt_entry *)__alt_end,
107 			    RISCV_ALTERNATIVES_EARLY_BOOT);
108 #endif
109 }
110 
111 #ifdef CONFIG_MODULES
112 void apply_module_alternatives(void *start, size_t length)
113 {
114 	_apply_alternatives((struct alt_entry *)start,
115 			    (struct alt_entry *)(start + length),
116 			    RISCV_ALTERNATIVES_MODULE);
117 }
118 #endif
119