1 /*
2 * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7 #include <stdbool.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <string.h>
11
12 #include <common/debug.h>
13 #include <common/runtime_svc.h>
14 #include <context.h>
15 #include <lib/coreboot.h>
16 #include <lib/utils_def.h>
17 #include <lib/xlat_tables/xlat_tables_v2.h>
18 #include <smccc_helpers.h>
19 #include <tools_share/uuid.h>
20
21 #include <qti_plat.h>
22 #include <qti_secure_io_cfg.h>
23 #include <qtiseclib_interface.h>
24 /*
25 * SIP service - SMC function IDs for SiP Service queries
26 *
27 */
28 #define QTI_SIP_SVC_CALL_COUNT_ID U(0x0200ff00)
29 #define QTI_SIP_SVC_UID_ID U(0x0200ff01)
30 /* 0x8200ff02 is reserved*/
31 #define QTI_SIP_SVC_VERSION_ID U(0x0200ff03)
32
33 /*
34 * Syscall's to allow Non Secure world accessing peripheral/IO memory
35 * those are secure/proteced BUT not required to be secure.
36 */
37 #define QTI_SIP_SVC_SECURE_IO_READ_ID U(0x02000501)
38 #define QTI_SIP_SVC_SECURE_IO_WRITE_ID U(0x02000502)
39
40 /*
41 * Syscall's to assigns a list of intermediate PAs from a
42 * source Virtual Machine (VM) to a destination VM.
43 */
44 #define QTI_SIP_SVC_MEM_ASSIGN_ID U(0x02000C16)
45
46 #define QTI_SIP_SVC_SECURE_IO_READ_PARAM_ID U(0x1)
47 #define QTI_SIP_SVC_SECURE_IO_WRITE_PARAM_ID U(0x2)
48 #define QTI_SIP_SVC_MEM_ASSIGN_PARAM_ID U(0x1117)
49
50 #define QTI_SIP_SVC_CALL_COUNT U(0x3)
51 #define QTI_SIP_SVC_VERSION_MAJOR U(0x0)
52 #define QTI_SIP_SVC_VERSION_MINOR U(0x0)
53
54 #define QTI_VM_LAST U(44)
55 #define SIZE4K U(0x1000)
56 #define QTI_VM_MAX_LIST_SIZE U(0x20)
57
58 #define FUNCID_OEN_NUM_MASK ((FUNCID_OEN_MASK << FUNCID_OEN_SHIFT)\
59 |(FUNCID_NUM_MASK << FUNCID_NUM_SHIFT))
60
61 enum {
62 QTI_SIP_SUCCESS = 0,
63 QTI_SIP_NOT_SUPPORTED = -1,
64 QTI_SIP_PREEMPTED = -2,
65 QTI_SIP_INVALID_PARAM = -3,
66 };
67
68 /* QTI SiP Service UUID */
69 DEFINE_SVC_UUID2(qti_sip_svc_uid,
70 0x43864748, 0x217f, 0x41ad, 0xaa, 0x5a,
71 0xba, 0xe7, 0x0f, 0xa5, 0x52, 0xaf);
72
qti_is_secure_io_access_allowed(u_register_t addr)73 static bool qti_is_secure_io_access_allowed(u_register_t addr)
74 {
75 int i = 0;
76
77 for (i = 0; i < ARRAY_SIZE(qti_secure_io_allowed_regs); i++) {
78 if ((uintptr_t) addr == qti_secure_io_allowed_regs[i]) {
79 return true;
80 }
81 }
82
83 return false;
84 }
85
qti_mem_assign_validate_param(memprot_info_t * mem_info,u_register_t u_num_mappings,uint32_t * source_vm_list,u_register_t src_vm_list_cnt,memprot_dst_vm_perm_info_t * dest_vm_list,u_register_t dst_vm_list_cnt)86 bool qti_mem_assign_validate_param(memprot_info_t *mem_info,
87 u_register_t u_num_mappings,
88 uint32_t *source_vm_list,
89 u_register_t src_vm_list_cnt,
90 memprot_dst_vm_perm_info_t *dest_vm_list,
91 u_register_t dst_vm_list_cnt)
92 {
93 int i;
94
95 if (!source_vm_list || !dest_vm_list || !mem_info
96 || (src_vm_list_cnt == 0)
97 || (src_vm_list_cnt >= QTI_VM_LAST) || (dst_vm_list_cnt == 0)
98 || (dst_vm_list_cnt >= QTI_VM_LAST) || (u_num_mappings == 0)
99 || u_num_mappings > QTI_VM_MAX_LIST_SIZE) {
100 ERROR("vm count is 0 or more then QTI_VM_LAST or empty list\n");
101 ERROR("source_vm_list %p dest_vm_list %p mem_info %p src_vm_list_cnt %u dst_vm_list_cnt %u u_num_mappings %u\n",
102 source_vm_list, dest_vm_list, mem_info,
103 (unsigned int)src_vm_list_cnt,
104 (unsigned int)dst_vm_list_cnt,
105 (unsigned int)u_num_mappings);
106 return false;
107 }
108 for (i = 0; i < u_num_mappings; i++) {
109 if ((mem_info[i].mem_addr & (SIZE4K - 1))
110 || (mem_info[i].mem_size == 0)
111 || (mem_info[i].mem_size & (SIZE4K - 1))) {
112 ERROR("mem_info passed buffer 0x%x or size 0x%x is not 4k aligned\n",
113 (unsigned int)mem_info[i].mem_addr,
114 (unsigned int)mem_info[i].mem_size);
115 return false;
116 }
117
118 if ((mem_info[i].mem_addr + mem_info[i].mem_size) <
119 mem_info[i].mem_addr) {
120 ERROR("overflow in mem_addr 0x%x add mem_size 0x%x\n",
121 (unsigned int)mem_info[i].mem_addr,
122 (unsigned int)mem_info[i].mem_size);
123 return false;
124 }
125 coreboot_memory_t mem_type = coreboot_get_memory_type(
126 mem_info[i].mem_addr,
127 mem_info[i].mem_size);
128 if (mem_type != CB_MEM_RAM && mem_type != CB_MEM_RESERVED) {
129 ERROR("memory region not in CB MEM RAM or RESERVED area: region start 0x%x size 0x%x\n",
130 (unsigned int)mem_info[i].mem_addr,
131 (unsigned int)mem_info[i].mem_size);
132 return false;
133 }
134 }
135 for (i = 0; i < src_vm_list_cnt; i++) {
136 if (source_vm_list[i] >= QTI_VM_LAST) {
137 ERROR("source_vm_list[%d] 0x%x is more then QTI_VM_LAST\n",
138 i, (unsigned int)source_vm_list[i]);
139 return false;
140 }
141 }
142 for (i = 0; i < dst_vm_list_cnt; i++) {
143 if (dest_vm_list[i].dst_vm >= QTI_VM_LAST) {
144 ERROR("dest_vm_list[%d] 0x%x is more then QTI_VM_LAST\n",
145 i, (unsigned int)dest_vm_list[i].dst_vm);
146 return false;
147 }
148 }
149 return true;
150 }
151
qti_sip_mem_assign(void * handle,uint32_t smc_cc,u_register_t x1,u_register_t x2,u_register_t x3,u_register_t x4)152 static uintptr_t qti_sip_mem_assign(void *handle, uint32_t smc_cc,
153 u_register_t x1,
154 u_register_t x2,
155 u_register_t x3, u_register_t x4)
156 {
157 uintptr_t dyn_map_start = 0, dyn_map_end = 0;
158 size_t dyn_map_size = 0;
159 u_register_t x6, x7;
160 int ret = QTI_SIP_NOT_SUPPORTED;
161 u_register_t x5 = read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X5);
162
163 if (smc_cc == SMC_32) {
164 x5 = (uint32_t) x5;
165 }
166 /* Validate input arg count & retrieve arg3-6 from NS Buffer. */
167 if ((x1 != QTI_SIP_SVC_MEM_ASSIGN_PARAM_ID) || (x5 == 0x0)) {
168 ERROR("invalid mem_assign param id or no mapping info\n");
169 goto unmap_return;
170 }
171
172 /* Map NS Buffer. */
173 dyn_map_start = x5;
174 dyn_map_size =
175 (smc_cc ==
176 SMC_32) ? (sizeof(uint32_t) * 4) : (sizeof(uint64_t) * 4);
177 if (qti_mmap_add_dynamic_region(dyn_map_start, dyn_map_size,
178 (MT_NS | MT_RO_DATA)) != 0) {
179 ERROR("map failed for params NS Buffer %x %x\n",
180 (unsigned int)dyn_map_start, (unsigned int)dyn_map_size);
181 goto unmap_return;
182 }
183 /* Retrieve indirect args. */
184 if (smc_cc == SMC_32) {
185 x6 = *((uint32_t *) x5 + 1);
186 x7 = *((uint32_t *) x5 + 2);
187 x5 = *(uint32_t *) x5;
188 } else {
189 x6 = *((uint64_t *) x5 + 1);
190 x7 = *((uint64_t *) x5 + 2);
191 x5 = *(uint64_t *) x5;
192 }
193 /* Un-Map NS Buffer. */
194 if (qti_mmap_remove_dynamic_region(dyn_map_start, dyn_map_size) != 0) {
195 ERROR("unmap failed for params NS Buffer %x %x\n",
196 (unsigned int)dyn_map_start, (unsigned int)dyn_map_size);
197 goto unmap_return;
198 }
199
200 /*
201 * Map NS Buffers.
202 * arg0,2,4 points to buffers & arg1,3,5 hold sizes.
203 * MAP api's fail to map if it's already mapped. Let's
204 * find lowest start & highest end address, then map once.
205 */
206 dyn_map_start = MIN(x2, x4);
207 dyn_map_start = MIN(dyn_map_start, x6);
208 dyn_map_end = MAX((x2 + x3), (x4 + x5));
209 dyn_map_end = MAX(dyn_map_end, (x6 + x7));
210 dyn_map_size = dyn_map_end - dyn_map_start;
211
212 if (qti_mmap_add_dynamic_region(dyn_map_start, dyn_map_size,
213 (MT_NS | MT_RO_DATA)) != 0) {
214 ERROR("map failed for params NS Buffer2 %x %x\n",
215 (unsigned int)dyn_map_start, (unsigned int)dyn_map_size);
216 goto unmap_return;
217 }
218 memprot_info_t *mem_info_p = (memprot_info_t *) x2;
219 uint32_t u_num_mappings = x3 / sizeof(memprot_info_t);
220 uint32_t *source_vm_list_p = (uint32_t *) x4;
221 uint32_t src_vm_list_cnt = x5 / sizeof(uint32_t);
222 memprot_dst_vm_perm_info_t *dest_vm_list_p =
223 (memprot_dst_vm_perm_info_t *) x6;
224 uint32_t dst_vm_list_cnt =
225 x7 / sizeof(memprot_dst_vm_perm_info_t);
226 if (qti_mem_assign_validate_param(mem_info_p, u_num_mappings,
227 source_vm_list_p, src_vm_list_cnt,
228 dest_vm_list_p,
229 dst_vm_list_cnt) != true) {
230 ERROR("Param validation failed\n");
231 goto unmap_return;
232 }
233
234 memprot_info_t mem_info[QTI_VM_MAX_LIST_SIZE];
235 /* Populating the arguments */
236 for (int i = 0; i < u_num_mappings; i++) {
237 mem_info[i].mem_addr = mem_info_p[i].mem_addr;
238 mem_info[i].mem_size = mem_info_p[i].mem_size;
239 }
240
241 memprot_dst_vm_perm_info_t dest_vm_list[QTI_VM_LAST];
242
243 for (int i = 0; i < dst_vm_list_cnt; i++) {
244 dest_vm_list[i].dst_vm = dest_vm_list_p[i].dst_vm;
245 dest_vm_list[i].dst_vm_perm = dest_vm_list_p[i].dst_vm_perm;
246 dest_vm_list[i].ctx = dest_vm_list_p[i].ctx;
247 dest_vm_list[i].ctx_size = dest_vm_list_p[i].ctx_size;
248 }
249
250 uint32_t source_vm_list[QTI_VM_LAST];
251
252 for (int i = 0; i < src_vm_list_cnt; i++) {
253 source_vm_list[i] = source_vm_list_p[i];
254 }
255 /* Un-Map NS Buffers. */
256 if (qti_mmap_remove_dynamic_region(dyn_map_start,
257 dyn_map_size) != 0) {
258 ERROR("unmap failed for params NS Buffer %x %x\n",
259 (unsigned int)dyn_map_start, (unsigned int)dyn_map_size);
260 goto unmap_return;
261 }
262 /* Invoke API lib api. */
263 ret = qtiseclib_mem_assign(mem_info, u_num_mappings,
264 source_vm_list, src_vm_list_cnt,
265 dest_vm_list, dst_vm_list_cnt);
266
267 if (ret == 0) {
268 SMC_RET2(handle, QTI_SIP_SUCCESS, ret);
269 }
270 unmap_return:
271 /* Un-Map NS Buffers if mapped */
272 if (dyn_map_start && dyn_map_size) {
273 qti_mmap_remove_dynamic_region(dyn_map_start, dyn_map_size);
274 }
275
276 SMC_RET2(handle, QTI_SIP_INVALID_PARAM, ret);
277 }
278
279 /*
280 * This function handles QTI specific syscalls. Currently only SiP calls are present.
281 * Both FAST & YIELD type call land here.
282 */
qti_sip_handler(uint32_t smc_fid,u_register_t x1,u_register_t x2,u_register_t x3,u_register_t x4,void * cookie,void * handle,u_register_t flags)283 static uintptr_t qti_sip_handler(uint32_t smc_fid,
284 u_register_t x1,
285 u_register_t x2,
286 u_register_t x3,
287 u_register_t x4,
288 void *cookie, void *handle, u_register_t flags)
289 {
290 uint32_t l_smc_fid = smc_fid & FUNCID_OEN_NUM_MASK;
291
292 if (GET_SMC_CC(smc_fid) == SMC_32) {
293 x1 = (uint32_t) x1;
294 x2 = (uint32_t) x2;
295 x3 = (uint32_t) x3;
296 x4 = (uint32_t) x4;
297 }
298
299 switch (l_smc_fid) {
300 case QTI_SIP_SVC_CALL_COUNT_ID:
301 {
302 SMC_RET1(handle, QTI_SIP_SVC_CALL_COUNT);
303 break;
304 }
305 case QTI_SIP_SVC_UID_ID:
306 {
307 /* Return UID to the caller */
308 SMC_UUID_RET(handle, qti_sip_svc_uid);
309 break;
310 }
311 case QTI_SIP_SVC_VERSION_ID:
312 {
313 /* Return the version of current implementation */
314 SMC_RET2(handle, QTI_SIP_SVC_VERSION_MAJOR,
315 QTI_SIP_SVC_VERSION_MINOR);
316 break;
317 }
318 case QTI_SIP_SVC_SECURE_IO_READ_ID:
319 {
320 if ((x1 == QTI_SIP_SVC_SECURE_IO_READ_PARAM_ID) &&
321 qti_is_secure_io_access_allowed(x2)) {
322 SMC_RET2(handle, QTI_SIP_SUCCESS,
323 *((volatile uint32_t *)x2));
324 }
325 SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
326 break;
327 }
328 case QTI_SIP_SVC_SECURE_IO_WRITE_ID:
329 {
330 if ((x1 == QTI_SIP_SVC_SECURE_IO_WRITE_PARAM_ID) &&
331 qti_is_secure_io_access_allowed(x2)) {
332 *((volatile uint32_t *)x2) = x3;
333 SMC_RET1(handle, QTI_SIP_SUCCESS);
334 }
335 SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
336 break;
337 }
338 case QTI_SIP_SVC_MEM_ASSIGN_ID:
339 {
340 return qti_sip_mem_assign(handle, GET_SMC_CC(smc_fid),
341 x1, x2, x3, x4);
342 break;
343 }
344 default:
345 {
346 SMC_RET1(handle, QTI_SIP_NOT_SUPPORTED);
347 }
348 }
349 return (uintptr_t) handle;
350 }
351
352 /* Define a runtime service descriptor for both fast & yield SiP calls */
353 DECLARE_RT_SVC(qti_sip_fast_svc, OEN_SIP_START,
354 OEN_SIP_END, SMC_TYPE_FAST, NULL, qti_sip_handler);
355
356 DECLARE_RT_SVC(qti_sip_yield_svc, OEN_SIP_START,
357 OEN_SIP_END, SMC_TYPE_YIELD, NULL, qti_sip_handler);
358