1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * K3: Common Architecture initialization
4  *
5  * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6  *	Lokesh Vutla <lokeshvutla@ti.com>
7  */
8 
9 #include <common.h>
10 #include <cpu_func.h>
11 #include <image.h>
12 #include <init.h>
13 #include <log.h>
14 #include <spl.h>
15 #include <asm/global_data.h>
16 #include "common.h"
17 #include <dm.h>
18 #include <remoteproc.h>
19 #include <asm/cache.h>
20 #include <linux/soc/ti/ti_sci_protocol.h>
21 #include <fdt_support.h>
22 #include <asm/arch/sys_proto.h>
23 #include <asm/hardware.h>
24 #include <asm/io.h>
25 #include <fs_loader.h>
26 #include <fs.h>
27 #include <env.h>
28 #include <elf.h>
29 #include <soc.h>
30 
get_ti_sci_handle(void)31 struct ti_sci_handle *get_ti_sci_handle(void)
32 {
33 	struct udevice *dev;
34 	int ret;
35 
36 	ret = uclass_get_device_by_driver(UCLASS_FIRMWARE,
37 					  DM_DRIVER_GET(ti_sci), &dev);
38 	if (ret)
39 		panic("Failed to get SYSFW (%d)\n", ret);
40 
41 	return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev);
42 }
43 
k3_sysfw_print_ver(void)44 void k3_sysfw_print_ver(void)
45 {
46 	struct ti_sci_handle *ti_sci = get_ti_sci_handle();
47 	char fw_desc[sizeof(ti_sci->version.firmware_description) + 1];
48 
49 	/*
50 	 * Output System Firmware version info. Note that since the
51 	 * 'firmware_description' field is not guaranteed to be zero-
52 	 * terminated we manually add a \0 terminator if needed. Further
53 	 * note that we intentionally no longer rely on the extended
54 	 * printf() formatter '%.*s' to not having to require a more
55 	 * full-featured printf() implementation.
56 	 */
57 	strncpy(fw_desc, ti_sci->version.firmware_description,
58 		sizeof(ti_sci->version.firmware_description));
59 	fw_desc[sizeof(fw_desc) - 1] = '\0';
60 
61 	printf("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
62 	       ti_sci->version.abi_major, ti_sci->version.abi_minor,
63 	       ti_sci->version.firmware_revision, fw_desc);
64 }
65 
mmr_unlock(phys_addr_t base,u32 partition)66 void mmr_unlock(phys_addr_t base, u32 partition)
67 {
68 	/* Translate the base address */
69 	phys_addr_t part_base = base + partition * CTRL_MMR0_PARTITION_SIZE;
70 
71 	/* Unlock the requested partition if locked using two-step sequence */
72 	writel(CTRLMMR_LOCK_KICK0_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK0);
73 	writel(CTRLMMR_LOCK_KICK1_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK1);
74 }
75 
is_rom_loaded_sysfw(struct rom_extended_boot_data * data)76 bool is_rom_loaded_sysfw(struct rom_extended_boot_data *data)
77 {
78 	if (strncmp(data->header, K3_ROM_BOOT_HEADER_MAGIC, 7))
79 		return false;
80 
81 	return data->num_components > 1;
82 }
83 
84 DECLARE_GLOBAL_DATA_PTR;
85 
86 #ifdef CONFIG_K3_EARLY_CONS
early_console_init(void)87 int early_console_init(void)
88 {
89 	struct udevice *dev;
90 	int ret;
91 
92 	gd->baudrate = CONFIG_BAUDRATE;
93 
94 	ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX,
95 				       &dev);
96 	if (ret) {
97 		printf("Error getting serial dev for early console! (%d)\n",
98 		       ret);
99 		return ret;
100 	}
101 
102 	gd->cur_serial_dev = dev;
103 	gd->flags |= GD_FLG_SERIAL_READY;
104 	gd->have_console = 1;
105 
106 	return 0;
107 }
108 #endif
109 
110 #ifdef CONFIG_SYS_K3_SPL_ATF
111 
init_env(void)112 void init_env(void)
113 {
114 #ifdef CONFIG_SPL_ENV_SUPPORT
115 	char *part;
116 
117 	env_init();
118 	env_relocate();
119 	switch (spl_boot_device()) {
120 	case BOOT_DEVICE_MMC2:
121 		part = env_get("bootpart");
122 		env_set("storage_interface", "mmc");
123 		env_set("fw_dev_part", part);
124 		break;
125 	case BOOT_DEVICE_SPI:
126 		env_set("storage_interface", "ubi");
127 		env_set("fw_ubi_mtdpart", "UBI");
128 		env_set("fw_ubi_volume", "UBI0");
129 		break;
130 	default:
131 		printf("%s from device %u not supported!\n",
132 		       __func__, spl_boot_device());
133 		return;
134 	}
135 #endif
136 }
137 
138 #ifdef CONFIG_FS_LOADER
load_firmware(char * name_fw,char * name_loadaddr,u32 * loadaddr)139 int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
140 {
141 	struct udevice *fsdev;
142 	char *name = NULL;
143 	int size = 0;
144 
145 	*loadaddr = 0;
146 #ifdef CONFIG_SPL_ENV_SUPPORT
147 	switch (spl_boot_device()) {
148 	case BOOT_DEVICE_MMC2:
149 		name = env_get(name_fw);
150 		*loadaddr = env_get_hex(name_loadaddr, *loadaddr);
151 		break;
152 	default:
153 		printf("Loading rproc fw image from device %u not supported!\n",
154 		       spl_boot_device());
155 		return 0;
156 	}
157 #endif
158 	if (!*loadaddr)
159 		return 0;
160 
161 	if (!uclass_get_device(UCLASS_FS_FIRMWARE_LOADER, 0, &fsdev)) {
162 		size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
163 						 0, 0);
164 	}
165 
166 	return size;
167 }
168 #else
load_firmware(char * name_fw,char * name_loadaddr,u32 * loadaddr)169 int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
170 {
171 	return 0;
172 }
173 #endif
174 
start_non_linux_remote_cores(void)175 __weak void start_non_linux_remote_cores(void)
176 {
177 }
178 
jump_to_image_no_args(struct spl_image_info * spl_image)179 void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
180 {
181 	typedef void __noreturn (*image_entry_noargs_t)(void);
182 	struct ti_sci_handle *ti_sci = get_ti_sci_handle();
183 	u32 loadaddr = 0;
184 	int ret, size;
185 
186 	/* Release all the exclusive devices held by SPL before starting ATF */
187 	ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
188 
189 	ret = rproc_init();
190 	if (ret)
191 		panic("rproc failed to be initialized (%d)\n", ret);
192 
193 	init_env();
194 	start_non_linux_remote_cores();
195 	size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
196 			     &loadaddr);
197 
198 
199 	/*
200 	 * It is assumed that remoteproc device 1 is the corresponding
201 	 * Cortex-A core which runs ATF. Make sure DT reflects the same.
202 	 */
203 	ret = rproc_load(1, spl_image->entry_point, 0x200);
204 	if (ret)
205 		panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
206 
207 	/* Add an extra newline to differentiate the ATF logs from SPL */
208 	printf("Starting ATF on ARM64 core...\n\n");
209 
210 	ret = rproc_start(1);
211 	if (ret)
212 		panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
213 	if (!(size > 0 && valid_elf_image(loadaddr))) {
214 		debug("Shutting down...\n");
215 		release_resources_for_core_shutdown();
216 
217 		while (1)
218 			asm volatile("wfe");
219 	}
220 
221 	image_entry_noargs_t image_entry =
222 		(image_entry_noargs_t)load_elf_image_phdr(loadaddr);
223 
224 	image_entry();
225 }
226 #endif
227 
228 #if defined(CONFIG_OF_LIBFDT)
fdt_fixup_msmc_ram(void * blob,char * parent_path,char * node_name)229 int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name)
230 {
231 	u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2];
232 	struct ti_sci_handle *ti_sci = get_ti_sci_handle();
233 	int ret, node, subnode, len, prev_node;
234 	u32 range[4], addr, size;
235 	const fdt32_t *sub_reg;
236 
237 	ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end);
238 	msmc_size = msmc_end - msmc_start + 1;
239 	debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__,
240 	      msmc_start, msmc_size);
241 
242 	/* find or create "msmc_sram node */
243 	ret = fdt_path_offset(blob, parent_path);
244 	if (ret < 0)
245 		return ret;
246 
247 	node = fdt_find_or_add_subnode(blob, ret, node_name);
248 	if (node < 0)
249 		return node;
250 
251 	ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram");
252 	if (ret < 0)
253 		return ret;
254 
255 	reg[0] = cpu_to_fdt64(msmc_start);
256 	reg[1] = cpu_to_fdt64(msmc_size);
257 	ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg));
258 	if (ret < 0)
259 		return ret;
260 
261 	fdt_setprop_cell(blob, node, "#address-cells", 1);
262 	fdt_setprop_cell(blob, node, "#size-cells", 1);
263 
264 	range[0] = 0;
265 	range[1] = cpu_to_fdt32(msmc_start >> 32);
266 	range[2] = cpu_to_fdt32(msmc_start & 0xffffffff);
267 	range[3] = cpu_to_fdt32(msmc_size);
268 	ret = fdt_setprop(blob, node, "ranges", range, sizeof(range));
269 	if (ret < 0)
270 		return ret;
271 
272 	subnode = fdt_first_subnode(blob, node);
273 	prev_node = 0;
274 
275 	/* Look for invalid subnodes and delete them */
276 	while (subnode >= 0) {
277 		sub_reg = fdt_getprop(blob, subnode, "reg", &len);
278 		addr = fdt_read_number(sub_reg, 1);
279 		sub_reg++;
280 		size = fdt_read_number(sub_reg, 1);
281 		debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__,
282 		      subnode, addr, size);
283 		if (addr + size > msmc_size ||
284 		    !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) ||
285 		    !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) {
286 			fdt_del_node(blob, subnode);
287 			debug("%s: deleting subnode %d\n", __func__, subnode);
288 			if (!prev_node)
289 				subnode = fdt_first_subnode(blob, node);
290 			else
291 				subnode = fdt_next_subnode(blob, prev_node);
292 		} else {
293 			prev_node = subnode;
294 			subnode = fdt_next_subnode(blob, prev_node);
295 		}
296 	}
297 
298 	return 0;
299 }
300 
fdt_disable_node(void * blob,char * node_path)301 int fdt_disable_node(void *blob, char *node_path)
302 {
303 	int offs;
304 	int ret;
305 
306 	offs = fdt_path_offset(blob, node_path);
307 	if (offs < 0) {
308 		printf("Node %s not found.\n", node_path);
309 		return offs;
310 	}
311 	ret = fdt_setprop_string(blob, offs, "status", "disabled");
312 	if (ret < 0) {
313 		printf("Could not add status property to node %s: %s\n",
314 		       node_path, fdt_strerror(ret));
315 		return ret;
316 	}
317 	return 0;
318 }
319 
320 #endif
321 
322 #ifndef CONFIG_SYSRESET
reset_cpu(void)323 void reset_cpu(void)
324 {
325 }
326 #endif
327 
328 #if defined(CONFIG_DISPLAY_CPUINFO)
print_cpuinfo(void)329 int print_cpuinfo(void)
330 {
331 	struct udevice *soc;
332 	char name[64];
333 	int ret;
334 
335 	printf("SoC:   ");
336 
337 	ret = soc_get(&soc);
338 	if (ret) {
339 		printf("UNKNOWN\n");
340 		return 0;
341 	}
342 
343 	ret = soc_get_family(soc, name, 64);
344 	if (!ret) {
345 		printf("%s ", name);
346 	}
347 
348 	ret = soc_get_revision(soc, name, 64);
349 	if (!ret) {
350 		printf("%s\n", name);
351 	}
352 
353 	return 0;
354 }
355 #endif
356 
soc_is_j721e(void)357 bool soc_is_j721e(void)
358 {
359 	u32 soc;
360 
361 	soc = (readl(CTRLMMR_WKUP_JTAG_ID) &
362 		JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT;
363 
364 	return soc == J721E;
365 }
366 
soc_is_j7200(void)367 bool soc_is_j7200(void)
368 {
369 	u32 soc;
370 
371 	soc = (readl(CTRLMMR_WKUP_JTAG_ID) &
372 		JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT;
373 
374 	return soc == J7200;
375 }
376 
377 #ifdef CONFIG_ARM64
board_prep_linux(bootm_headers_t * images)378 void board_prep_linux(bootm_headers_t *images)
379 {
380 	debug("Linux kernel Image start = 0x%lx end = 0x%lx\n",
381 	      images->os.start, images->os.end);
382 	__asm_flush_dcache_range(images->os.start,
383 				 ROUND(images->os.end,
384 				       CONFIG_SYS_CACHELINE_SIZE));
385 }
386 #endif
387 
388 #ifdef CONFIG_CPU_V7R
disable_linefill_optimization(void)389 void disable_linefill_optimization(void)
390 {
391 	u32 actlr;
392 
393 	/*
394 	 * On K3 devices there are 2 conditions where R5F can deadlock:
395 	 * 1.When software is performing series of store operations to
396 	 *   cacheable write back/write allocate memory region and later
397 	 *   on software execute barrier operation (DSB or DMB). R5F may
398 	 *   hang at the barrier instruction.
399 	 * 2.When software is performing a mix of load and store operations
400 	 *   within a tight loop and store operations are all writing to
401 	 *   cacheable write back/write allocates memory regions, R5F may
402 	 *   hang at one of the load instruction.
403 	 *
404 	 * To avoid the above two conditions disable linefill optimization
405 	 * inside Cortex R5F.
406 	 */
407 	asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
408 	actlr |= (1 << 13); /* Set DLFO bit  */
409 	asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
410 }
411 #endif
412 
remove_fwl_configs(struct fwl_data * fwl_data,size_t fwl_data_size)413 void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
414 {
415 	struct ti_sci_msg_fwl_region region;
416 	struct ti_sci_fwl_ops *fwl_ops;
417 	struct ti_sci_handle *ti_sci;
418 	size_t i, j;
419 
420 	ti_sci = get_ti_sci_handle();
421 	fwl_ops = &ti_sci->ops.fwl_ops;
422 	for (i = 0; i < fwl_data_size; i++) {
423 		for (j = 0; j <  fwl_data[i].regions; j++) {
424 			region.fwl_id = fwl_data[i].fwl_id;
425 			region.region = j;
426 			region.n_permission_regs = 3;
427 
428 			fwl_ops->get_fwl_region(ti_sci, &region);
429 
430 			if (region.control != 0) {
431 				pr_debug("Attempting to disable firewall %5d (%25s)\n",
432 					 region.fwl_id, fwl_data[i].name);
433 				region.control = 0;
434 
435 				if (fwl_ops->set_fwl_region(ti_sci, &region))
436 					pr_err("Could not disable firewall %5d (%25s)\n",
437 					       region.fwl_id, fwl_data[i].name);
438 			}
439 		}
440 	}
441 }
442 
spl_enable_dcache(void)443 void spl_enable_dcache(void)
444 {
445 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
446 	phys_addr_t ram_top = CONFIG_SYS_SDRAM_BASE;
447 
448 	dram_init_banksize();
449 
450 	/* reserve TLB table */
451 	gd->arch.tlb_size = PGTABLE_SIZE;
452 
453 	ram_top += get_effective_memsize();
454 	/* keep ram_top in the 32-bit address space */
455 	if (ram_top >= 0x100000000)
456 		ram_top = (phys_addr_t) 0x100000000;
457 
458 	gd->arch.tlb_addr = ram_top - gd->arch.tlb_size;
459 	debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
460 	      gd->arch.tlb_addr + gd->arch.tlb_size);
461 
462 	dcache_enable();
463 #endif
464 }
465 
466 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
spl_board_prepare_for_boot(void)467 void spl_board_prepare_for_boot(void)
468 {
469 	dcache_disable();
470 }
471 
spl_board_prepare_for_linux(void)472 void spl_board_prepare_for_linux(void)
473 {
474 	dcache_disable();
475 }
476 #endif
477