1 /*
2  * Copyright 2018-2020 NXP
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  */
7 
8 #include <assert.h>
9 
10 #include <common/desc_image_load.h>
11 #include <dcfg.h>
12 #ifdef POLICY_FUSE_PROVISION
13 #include <fuse_io.h>
14 #endif
15 #include <mmu_def.h>
16 #include <plat_common.h>
17 #ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
18 #include <plat_nv_storage.h>
19 #endif
20 
21 #pragma weak bl2_el3_early_platform_setup
22 #pragma weak bl2_el3_plat_arch_setup
23 #pragma weak bl2_el3_plat_prepare_exit
24 
25 static dram_regions_info_t dram_regions_info  = {0};
26 
27 /*******************************************************************************
28  * Return the pointer to the 'dram_regions_info structure of the DRAM.
29  * This structure is populated after init_ddr().
30  ******************************************************************************/
get_dram_regions_info(void)31 dram_regions_info_t *get_dram_regions_info(void)
32 {
33 	return &dram_regions_info;
34 }
35 
36 #ifdef DDR_INIT
populate_dram_regions_info(void)37 static void populate_dram_regions_info(void)
38 {
39 	long long dram_remain_size = dram_regions_info.total_dram_size;
40 	uint8_t reg_id = 0U;
41 
42 	dram_regions_info.region[reg_id].addr = NXP_DRAM0_ADDR;
43 	dram_regions_info.region[reg_id].size =
44 			dram_remain_size > NXP_DRAM0_MAX_SIZE ?
45 				NXP_DRAM0_MAX_SIZE : dram_remain_size;
46 
47 	if (dram_regions_info.region[reg_id].size != NXP_DRAM0_SIZE) {
48 		ERROR("Incorrect DRAM0 size is defined in platform_def.h\n");
49 	}
50 
51 	dram_remain_size -= dram_regions_info.region[reg_id].size;
52 	dram_regions_info.region[reg_id].size -= (NXP_SECURE_DRAM_SIZE
53 						+ NXP_SP_SHRD_DRAM_SIZE);
54 
55 	assert(dram_regions_info.region[reg_id].size > 0);
56 
57 	/* Reducing total dram size by 66MB */
58 	dram_regions_info.total_dram_size -= (NXP_SECURE_DRAM_SIZE
59 						+ NXP_SP_SHRD_DRAM_SIZE);
60 
61 #if defined(NXP_DRAM1_ADDR) && defined(NXP_DRAM1_MAX_SIZE)
62 	if (dram_remain_size > 0) {
63 		reg_id++;
64 		dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR;
65 		dram_regions_info.region[reg_id].size =
66 				dram_remain_size > NXP_DRAM1_MAX_SIZE ?
67 					NXP_DRAM1_MAX_SIZE : dram_remain_size;
68 		dram_remain_size -= dram_regions_info.region[reg_id].size;
69 	}
70 #endif
71 #if defined(NXP_DRAM2_ADDR) && defined(NXP_DRAM2_MAX_SIZE)
72 	if (dram_remain_size > 0) {
73 		reg_id++;
74 		dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR;
75 		dram_regions_info.region[reg_id].size =
76 				dram_remain_size > NXP_DRAM1_MAX_SIZE ?
77 					NXP_DRAM1_MAX_SIZE : dram_remain_size;
78 		dram_remain_size -= dram_regions_info.region[reg_id].size;
79 	}
80 #endif
81 	reg_id++;
82 	dram_regions_info.num_dram_regions = reg_id;
83 }
84 #endif
85 
86 #ifdef IMAGE_BL32
87 /*******************************************************************************
88  * Gets SPSR for BL32 entry
89  ******************************************************************************/
ls_get_spsr_for_bl32_entry(void)90 static uint32_t ls_get_spsr_for_bl32_entry(void)
91 {
92 	/*
93 	 * The Secure Payload Dispatcher service is responsible for
94 	 * setting the SPSR prior to entry into the BL32 image.
95 	 */
96 	return 0U;
97 }
98 #endif
99 
100 /*******************************************************************************
101  * Gets SPSR for BL33 entry
102  ******************************************************************************/
103 #ifndef AARCH32
ls_get_spsr_for_bl33_entry(void)104 static uint32_t ls_get_spsr_for_bl33_entry(void)
105 {
106 	unsigned int mode;
107 	uint32_t spsr;
108 
109 	/* Figure out what mode we enter the non-secure world in */
110 	mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
111 
112 	/*
113 	 * TODO: Consider the possibility of specifying the SPSR in
114 	 * the FIP ToC and allowing the platform to have a say as
115 	 * well.
116 	 */
117 	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
118 	return spsr;
119 }
120 #else
121 /*******************************************************************************
122  * Gets SPSR for BL33 entry
123  ******************************************************************************/
ls_get_spsr_for_bl33_entry(void)124 static uint32_t ls_get_spsr_for_bl33_entry(void)
125 {
126 	unsigned int hyp_status, mode, spsr;
127 
128 	hyp_status = GET_VIRT_EXT(read_id_pfr1());
129 
130 	mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
131 
132 	/*
133 	 * TODO: Consider the possibility of specifying the SPSR in
134 	 * the FIP ToC and allowing the platform to have a say as
135 	 * well.
136 	 */
137 	spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
138 			SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
139 	return spsr;
140 }
141 #endif /* AARCH32 */
142 
bl2_el3_early_platform_setup(u_register_t arg0 __unused,u_register_t arg1 __unused,u_register_t arg2 __unused,u_register_t arg3 __unused)143 void bl2_el3_early_platform_setup(u_register_t arg0 __unused,
144 				  u_register_t arg1 __unused,
145 				  u_register_t arg2 __unused,
146 				  u_register_t arg3 __unused)
147 {
148 	/*
149 	 * SoC specific early init
150 	 * Any errata handling or SoC specific early initialization can
151 	 * be done here
152 	 * Set Counter Base Frequency in CNTFID0 and in cntfrq_el0.
153 	 * Initialize the interconnect.
154 	 * Enable coherency for primary CPU cluster
155 	 */
156 	soc_early_init();
157 
158 	/* Initialise the IO layer and register platform IO devices */
159 	plat_io_setup();
160 
161 	if (dram_regions_info.total_dram_size > 0) {
162 		populate_dram_regions_info();
163 	}
164 
165 #ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
166 	read_nv_app_data();
167 #if DEBUG
168 	const nv_app_data_t *nv_app_data = get_nv_data();
169 
170 	INFO("Value of warm_reset flag = 0x%x\n", nv_app_data->warm_rst_flag);
171 	INFO("Value of WDT flag = 0x%x\n", nv_app_data->wdt_rst_flag);
172 #endif
173 #endif
174 }
175 
176 /*******************************************************************************
177  * Perform the very early platform specific architectural setup here. At the
178  * moment this is only initializes the mmu in a quick and dirty way.
179  ******************************************************************************/
ls_bl2_el3_plat_arch_setup(void)180 void ls_bl2_el3_plat_arch_setup(void)
181 {
182 	unsigned int flags = 0U;
183 	/* Initialise the IO layer and register platform IO devices */
184 	ls_setup_page_tables(
185 #if SEPARATE_RW_AND_NOLOAD
186 			      BL2_START,
187 			      BL2_LIMIT - BL2_START,
188 #else
189 			      BL2_BASE,
190 			      (unsigned long)(&__BL2_END__) - BL2_BASE,
191 #endif
192 			      BL_CODE_BASE,
193 			      BL_CODE_END,
194 			      BL_RO_DATA_BASE,
195 			      BL_RO_DATA_END
196 #if USE_COHERENT_MEM
197 			      , BL_COHERENT_RAM_BASE,
198 			      BL_COHERENT_RAM_END
199 #endif
200 			      );
201 
202 	if ((dram_regions_info.region[0].addr == 0)
203 		&& (dram_regions_info.total_dram_size == 0)) {
204 		flags = XLAT_TABLE_NC;
205 	}
206 
207 #ifdef AARCH32
208 	enable_mmu_secure(0);
209 #else
210 	enable_mmu_el3(flags);
211 #endif
212 }
213 
bl2_el3_plat_arch_setup(void)214 void bl2_el3_plat_arch_setup(void)
215 {
216 	ls_bl2_el3_plat_arch_setup();
217 }
218 
bl2_platform_setup(void)219 void bl2_platform_setup(void)
220 {
221 	/*
222 	 * Perform platform setup before loading the image.
223 	 */
224 }
225 
226 /* Handling image information by platform. */
ls_bl2_handle_post_image_load(unsigned int image_id)227 int ls_bl2_handle_post_image_load(unsigned int image_id)
228 {
229 	int err = 0;
230 	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
231 
232 	assert(bl_mem_params);
233 
234 	switch (image_id) {
235 	case BL31_IMAGE_ID:
236 		bl_mem_params->ep_info.args.arg3 =
237 					(u_register_t) &dram_regions_info;
238 
239 		/* Pass the value of PORSR1 register in Argument 4 */
240 		bl_mem_params->ep_info.args.arg4 =
241 					(u_register_t)read_reg_porsr1();
242 		flush_dcache_range((uintptr_t)&dram_regions_info,
243 				sizeof(dram_regions_info));
244 		break;
245 #if defined(AARCH64) && defined(IMAGE_BL32)
246 	case BL32_IMAGE_ID:
247 		bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl32_entry();
248 		break;
249 #endif
250 	case BL33_IMAGE_ID:
251 		/* BL33 expects to receive the primary CPU MPID (through r0) */
252 		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
253 		bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl33_entry();
254 		break;
255 	}
256 
257 	return err;
258 }
259 
260 /*******************************************************************************
261  * This function can be used by the platforms to update/use image
262  * information for given `image_id`.
263  ******************************************************************************/
bl2_plat_handle_post_image_load(unsigned int image_id)264 int bl2_plat_handle_post_image_load(unsigned int image_id)
265 {
266 	return ls_bl2_handle_post_image_load(image_id);
267 }
268 
bl2_el3_plat_prepare_exit(void)269 void bl2_el3_plat_prepare_exit(void)
270 {
271 	return soc_bl2_prepare_exit();
272 }
273 
274 /* Called to do the dynamic initialization required
275  * before loading the next image.
276  */
bl2_plat_preload_setup(void)277 void bl2_plat_preload_setup(void)
278 {
279 
280 	soc_preload_setup();
281 
282 	if (dram_regions_info.total_dram_size < NXP_DRAM0_SIZE) {
283 		NOTICE("ERROR: DRAM0 Size is not correctly configured.");
284 		assert(false);
285 	}
286 
287 	if ((dram_regions_info.region[0].addr == 0)
288 		&& (dram_regions_info.total_dram_size > 0)) {
289 		populate_dram_regions_info();
290 
291 		mmap_add_ddr_region_dynamically();
292 	}
293 
294 	/* setup the memory region access permissions */
295 	soc_mem_access();
296 
297 #ifdef POLICY_FUSE_PROVISION
298 	fip_fuse_provisioning((uintptr_t)FUSE_BUF, FUSE_SZ);
299 #endif
300 }
301