1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2008-2011
4  * Graeme Russ, <graeme.russ@gmail.com>
5  *
6  * (C) Copyright 2002
7  * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se>
8  *
9  * (C) Copyright 2002
10  * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
11  * Marius Groeger <mgroeger@sysgo.de>
12  *
13  * (C) Copyright 2002
14  * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
15  * Alex Zuepke <azu@sysgo.de>
16  *
17  * Part of this file is adapted from coreboot
18  * src/arch/x86/lib/cpu.c
19  */
20 
21 #define LOG_CATEGORY	UCLASS_CPU
22 
23 #include <common.h>
24 #include <bootstage.h>
25 #include <command.h>
26 #include <cpu_func.h>
27 #include <dm.h>
28 #include <errno.h>
29 #include <init.h>
30 #include <irq.h>
31 #include <log.h>
32 #include <malloc.h>
33 #include <syscon.h>
34 #include <acpi/acpi_s3.h>
35 #include <acpi/acpi_table.h>
36 #include <asm/acpi.h>
37 #include <asm/control_regs.h>
38 #include <asm/coreboot_tables.h>
39 #include <asm/cpu.h>
40 #include <asm/global_data.h>
41 #include <asm/lapic.h>
42 #include <asm/microcode.h>
43 #include <asm/mp.h>
44 #include <asm/mrccache.h>
45 #include <asm/msr.h>
46 #include <asm/mtrr.h>
47 #include <asm/post.h>
48 #include <asm/processor.h>
49 #include <asm/processor-flags.h>
50 #include <asm/interrupt.h>
51 #include <asm/tables.h>
52 #include <linux/compiler.h>
53 
54 DECLARE_GLOBAL_DATA_PTR;
55 
56 #ifndef CONFIG_TPL_BUILD
57 static const char *const x86_vendor_name[] = {
58 	[X86_VENDOR_INTEL]     = "Intel",
59 	[X86_VENDOR_CYRIX]     = "Cyrix",
60 	[X86_VENDOR_AMD]       = "AMD",
61 	[X86_VENDOR_UMC]       = "UMC",
62 	[X86_VENDOR_NEXGEN]    = "NexGen",
63 	[X86_VENDOR_CENTAUR]   = "Centaur",
64 	[X86_VENDOR_RISE]      = "Rise",
65 	[X86_VENDOR_TRANSMETA] = "Transmeta",
66 	[X86_VENDOR_NSC]       = "NSC",
67 	[X86_VENDOR_SIS]       = "SiS",
68 };
69 #endif
70 
x86_cleanup_before_linux(void)71 int __weak x86_cleanup_before_linux(void)
72 {
73 	int ret;
74 
75 	ret = mp_park_aps();
76 	if (ret)
77 		return log_msg_ret("park", ret);
78 	bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH_ADDR,
79 			CONFIG_BOOTSTAGE_STASH_SIZE);
80 
81 	return 0;
82 }
83 
x86_init_cache(void)84 int x86_init_cache(void)
85 {
86 	enable_caches();
87 
88 	return 0;
89 }
90 int init_cache(void) __attribute__((weak, alias("x86_init_cache")));
91 
flush_cache(unsigned long dummy1,unsigned long dummy2)92 void  flush_cache(unsigned long dummy1, unsigned long dummy2)
93 {
94 	asm("wbinvd\n");
95 }
96 
97 /* Define these functions to allow ehch-hcd to function */
flush_dcache_range(unsigned long start,unsigned long stop)98 void flush_dcache_range(unsigned long start, unsigned long stop)
99 {
100 }
101 
invalidate_dcache_range(unsigned long start,unsigned long stop)102 void invalidate_dcache_range(unsigned long start, unsigned long stop)
103 {
104 }
105 
dcache_enable(void)106 void dcache_enable(void)
107 {
108 	enable_caches();
109 }
110 
dcache_disable(void)111 void dcache_disable(void)
112 {
113 	disable_caches();
114 }
115 
icache_enable(void)116 void icache_enable(void)
117 {
118 }
119 
icache_disable(void)120 void icache_disable(void)
121 {
122 }
123 
icache_status(void)124 int icache_status(void)
125 {
126 	return 1;
127 }
128 
129 #ifndef CONFIG_TPL_BUILD
cpu_vendor_name(int vendor)130 const char *cpu_vendor_name(int vendor)
131 {
132 	const char *name;
133 	name = "<invalid cpu vendor>";
134 	if (vendor < ARRAY_SIZE(x86_vendor_name) &&
135 	    x86_vendor_name[vendor])
136 		name = x86_vendor_name[vendor];
137 
138 	return name;
139 }
140 #endif
141 
cpu_get_name(char * name)142 char *cpu_get_name(char *name)
143 {
144 	unsigned int *name_as_ints = (unsigned int *)name;
145 	struct cpuid_result regs;
146 	char *ptr;
147 	int i;
148 
149 	/* This bit adds up to 48 bytes */
150 	for (i = 0; i < 3; i++) {
151 		regs = cpuid(0x80000002 + i);
152 		name_as_ints[i * 4 + 0] = regs.eax;
153 		name_as_ints[i * 4 + 1] = regs.ebx;
154 		name_as_ints[i * 4 + 2] = regs.ecx;
155 		name_as_ints[i * 4 + 3] = regs.edx;
156 	}
157 	name[CPU_MAX_NAME_LEN - 1] = '\0';
158 
159 	/* Skip leading spaces. */
160 	ptr = name;
161 	while (*ptr == ' ')
162 		ptr++;
163 
164 	return ptr;
165 }
166 
default_print_cpuinfo(void)167 int default_print_cpuinfo(void)
168 {
169 	printf("CPU: %s, vendor %s, device %xh\n",
170 	       cpu_has_64bit() ? "x86_64" : "x86",
171 	       cpu_vendor_name(gd->arch.x86_vendor), gd->arch.x86_device);
172 
173 	if (IS_ENABLED(CONFIG_HAVE_ACPI_RESUME)) {
174 		debug("ACPI previous sleep state: %s\n",
175 		      acpi_ss_string(gd->arch.prev_sleep_state));
176 	}
177 
178 	return 0;
179 }
180 
show_boot_progress(int val)181 void show_boot_progress(int val)
182 {
183 	outb(val, POST_PORT);
184 }
185 
186 #if !defined(CONFIG_SYS_COREBOOT) && !defined(CONFIG_EFI_STUB)
187 /*
188  * Implement a weak default function for boards that need to do some final init
189  * before the system is ready.
190  */
board_final_init(void)191 __weak void board_final_init(void)
192 {
193 }
194 
195 /*
196  * Implement a weak default function for boards that need to do some final
197  * processing before booting the OS.
198  */
board_final_cleanup(void)199 __weak void board_final_cleanup(void)
200 {
201 }
202 
last_stage_init(void)203 int last_stage_init(void)
204 {
205 	struct acpi_fadt __maybe_unused *fadt;
206 	int ret;
207 
208 	board_final_init();
209 
210 	if (IS_ENABLED(CONFIG_HAVE_ACPI_RESUME)) {
211 		fadt = acpi_find_fadt();
212 
213 		if (fadt && gd->arch.prev_sleep_state == ACPI_S3)
214 			acpi_resume(fadt);
215 	}
216 
217 	ret = write_tables();
218 	if (ret) {
219 		log_err("Failed to write tables\n");
220 		return log_msg_ret("table", ret);
221 	}
222 
223 	if (IS_ENABLED(CONFIG_GENERATE_ACPI_TABLE)) {
224 		fadt = acpi_find_fadt();
225 
226 		/* Don't touch ACPI hardware on HW reduced platforms */
227 		if (fadt && !(fadt->flags & ACPI_FADT_HW_REDUCED_ACPI)) {
228 			/*
229 			 * Other than waiting for OSPM to request us to switch
230 			 * to ACPI * mode, do it by ourselves, since SMI will
231 			 * not be triggered.
232 			 */
233 			enter_acpi_mode(fadt->pm1a_cnt_blk);
234 		}
235 	}
236 
237 	/*
238 	 * TODO(sjg@chromium.org): Move this to bootm_announce_and_cleanup()
239 	 * once APL FSP-S at 0x200000 does not overlap with the bzimage at
240 	 * 0x100000.
241 	 */
242 	board_final_cleanup();
243 
244 	return 0;
245 }
246 #endif
247 
x86_init_cpus(void)248 static int x86_init_cpus(void)
249 {
250 	if (IS_ENABLED(CONFIG_SMP)) {
251 		debug("Init additional CPUs\n");
252 		x86_mp_init();
253 	} else {
254 		struct udevice *dev;
255 
256 		/*
257 		 * This causes the cpu-x86 driver to be probed.
258 		 * We don't check return value here as we want to allow boards
259 		 * which have not been converted to use cpu uclass driver to
260 		 * boot.
261 		 */
262 		uclass_first_device(UCLASS_CPU, &dev);
263 	}
264 
265 	return 0;
266 }
267 
cpu_init_r(void)268 int cpu_init_r(void)
269 {
270 	struct udevice *dev;
271 	int ret;
272 
273 	if (!ll_boot_init()) {
274 		uclass_first_device(UCLASS_PCI, &dev);
275 		return 0;
276 	}
277 
278 	ret = x86_init_cpus();
279 	if (ret)
280 		return ret;
281 
282 	/*
283 	 * Set up the northbridge, PCH and LPC if available. Note that these
284 	 * may have had some limited pre-relocation init if they were probed
285 	 * before relocation, but this is post relocation.
286 	 */
287 	uclass_first_device(UCLASS_NORTHBRIDGE, &dev);
288 	uclass_first_device(UCLASS_PCH, &dev);
289 	uclass_first_device(UCLASS_LPC, &dev);
290 
291 	/* Set up pin control if available */
292 	ret = syscon_get_by_driver_data(X86_SYSCON_PINCONF, &dev);
293 	debug("%s, pinctrl=%p, ret=%d\n", __func__, dev, ret);
294 
295 	return 0;
296 }
297 
298 #ifndef CONFIG_EFI_STUB
reserve_arch(void)299 int reserve_arch(void)
300 {
301 	struct udevice *itss;
302 	int ret;
303 
304 	if (IS_ENABLED(CONFIG_ENABLE_MRC_CACHE))
305 		mrccache_reserve();
306 
307 	if (IS_ENABLED(CONFIG_SEABIOS))
308 		high_table_reserve();
309 
310 	if (IS_ENABLED(CONFIG_HAVE_ACPI_RESUME)) {
311 		acpi_s3_reserve();
312 
313 		if (IS_ENABLED(CONFIG_HAVE_FSP)) {
314 			/*
315 			 * Save stack address to CMOS so that at next S3 boot,
316 			 * we can use it as the stack address for fsp_contiue()
317 			 */
318 			fsp_save_s3_stack();
319 		}
320 	}
321 	ret = irq_first_device_type(X86_IRQT_ITSS, &itss);
322 	if (!ret) {
323 		/*
324 		 * Snapshot the current GPIO IRQ polarities. FSP-S is about to
325 		 * run and will set a default policy that doesn't honour boards'
326 		 * requirements
327 		 */
328 		irq_snapshot_polarities(itss);
329 	}
330 
331 	return 0;
332 }
333 #endif
334 
detect_coreboot_table_at(ulong start,ulong size)335 long detect_coreboot_table_at(ulong start, ulong size)
336 {
337 	u32 *ptr, *end;
338 
339 	size /= 4;
340 	for (ptr = (void *)start, end = ptr + size; ptr < end; ptr += 4) {
341 		if (*ptr == 0x4f49424c) /* "LBIO" */
342 			return (long)ptr;
343 	}
344 
345 	return -ENOENT;
346 }
347 
locate_coreboot_table(void)348 long locate_coreboot_table(void)
349 {
350 	long addr;
351 
352 	/* We look for LBIO in the first 4K of RAM and again at 960KB */
353 	addr = detect_coreboot_table_at(0x0, 0x1000);
354 	if (addr < 0)
355 		addr = detect_coreboot_table_at(0xf0000, 0x1000);
356 
357 	return addr;
358 }
359