1 /*
2 * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <string.h>
10
11 #include <arch_helpers.h>
12 #include <arch/aarch64/arch_features.h>
13 #include <bl31/bl31.h>
14 #include <common/debug.h>
15 #include <common/runtime_svc.h>
16 #include <lib/el3_runtime/context_mgmt.h>
17 #include <lib/smccc.h>
18 #include <lib/spinlock.h>
19 #include <lib/utils.h>
20 #include <plat/common/common_def.h>
21 #include <plat/common/platform.h>
22 #include <platform_def.h>
23 #include <services/ffa_svc.h>
24 #include <services/spmd_svc.h>
25 #include <smccc_helpers.h>
26 #include "spmd_private.h"
27
28 /*******************************************************************************
29 * SPM Core context information.
30 ******************************************************************************/
31 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
32
33 /*******************************************************************************
34 * SPM Core attribute information read from its manifest.
35 ******************************************************************************/
36 static spmc_manifest_attribute_t spmc_attrs;
37
38 /*******************************************************************************
39 * SPM Core entry point information. Discovered on the primary core and reused
40 * on secondary cores.
41 ******************************************************************************/
42 static entry_point_info_t *spmc_ep_info;
43
44 /*******************************************************************************
45 * SPM Core context on CPU based on mpidr.
46 ******************************************************************************/
spmd_get_context_by_mpidr(uint64_t mpidr)47 spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
48 {
49 int core_idx = plat_core_pos_by_mpidr(mpidr);
50
51 if (core_idx < 0) {
52 ERROR("Invalid mpidr: %llx, returned ID: %d\n", mpidr, core_idx);
53 panic();
54 }
55
56 return &spm_core_context[core_idx];
57 }
58
59 /*******************************************************************************
60 * SPM Core context on current CPU get helper.
61 ******************************************************************************/
spmd_get_context(void)62 spmd_spm_core_context_t *spmd_get_context(void)
63 {
64 return spmd_get_context_by_mpidr(read_mpidr());
65 }
66
67 /*******************************************************************************
68 * SPM Core entry point information get helper.
69 ******************************************************************************/
spmd_spmc_ep_info_get(void)70 entry_point_info_t *spmd_spmc_ep_info_get(void)
71 {
72 return spmc_ep_info;
73 }
74
75 /*******************************************************************************
76 * SPM Core ID getter.
77 ******************************************************************************/
spmd_spmc_id_get(void)78 uint16_t spmd_spmc_id_get(void)
79 {
80 return spmc_attrs.spmc_id;
81 }
82
83 /*******************************************************************************
84 * Static function declaration.
85 ******************************************************************************/
86 static int32_t spmd_init(void);
87 static int spmd_spmc_init(void *pm_addr);
88 static uint64_t spmd_ffa_error_return(void *handle,
89 int error_code);
90 static uint64_t spmd_smc_forward(uint32_t smc_fid,
91 bool secure_origin,
92 uint64_t x1,
93 uint64_t x2,
94 uint64_t x3,
95 uint64_t x4,
96 void *handle);
97
98 /*******************************************************************************
99 * This function takes an SPMC context pointer and performs a synchronous
100 * SPMC entry.
101 ******************************************************************************/
spmd_spm_core_sync_entry(spmd_spm_core_context_t * spmc_ctx)102 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
103 {
104 uint64_t rc;
105
106 assert(spmc_ctx != NULL);
107
108 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
109
110 /* Restore the context assigned above */
111 cm_el1_sysregs_context_restore(SECURE);
112 #if SPMD_SPM_AT_SEL2
113 cm_el2_sysregs_context_restore(SECURE);
114 #endif
115 cm_set_next_eret_context(SECURE);
116
117 /* Enter SPMC */
118 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
119
120 /* Save secure state */
121 cm_el1_sysregs_context_save(SECURE);
122 #if SPMD_SPM_AT_SEL2
123 cm_el2_sysregs_context_save(SECURE);
124 #endif
125
126 return rc;
127 }
128
129 /*******************************************************************************
130 * This function returns to the place where spmd_spm_core_sync_entry() was
131 * called originally.
132 ******************************************************************************/
spmd_spm_core_sync_exit(uint64_t rc)133 __dead2 void spmd_spm_core_sync_exit(uint64_t rc)
134 {
135 spmd_spm_core_context_t *ctx = spmd_get_context();
136
137 /* Get current CPU context from SPMC context */
138 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
139
140 /*
141 * The SPMD must have initiated the original request through a
142 * synchronous entry into SPMC. Jump back to the original C runtime
143 * context with the value of rc in x0;
144 */
145 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
146
147 panic();
148 }
149
150 /*******************************************************************************
151 * Jump to the SPM Core for the first time.
152 ******************************************************************************/
spmd_init(void)153 static int32_t spmd_init(void)
154 {
155 spmd_spm_core_context_t *ctx = spmd_get_context();
156 uint64_t rc;
157 unsigned int linear_id = plat_my_core_pos();
158 unsigned int core_id;
159
160 VERBOSE("SPM Core init start.\n");
161 ctx->state = SPMC_STATE_ON_PENDING;
162
163 /* Set the SPMC context state on other CPUs to OFF */
164 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
165 if (core_id != linear_id) {
166 spm_core_context[core_id].state = SPMC_STATE_OFF;
167 }
168 }
169
170 rc = spmd_spm_core_sync_entry(ctx);
171 if (rc != 0ULL) {
172 ERROR("SPMC initialisation failed 0x%llx\n", rc);
173 return 0;
174 }
175
176 ctx->state = SPMC_STATE_ON;
177
178 VERBOSE("SPM Core init end.\n");
179
180 return 1;
181 }
182
183 /*******************************************************************************
184 * Loads SPMC manifest and inits SPMC.
185 ******************************************************************************/
spmd_spmc_init(void * pm_addr)186 static int spmd_spmc_init(void *pm_addr)
187 {
188 spmd_spm_core_context_t *spm_ctx = spmd_get_context();
189 uint32_t ep_attr;
190 int rc;
191
192 /* Load the SPM Core manifest */
193 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
194 if (rc != 0) {
195 WARN("No or invalid SPM Core manifest image provided by BL2\n");
196 return rc;
197 }
198
199 /*
200 * Ensure that the SPM Core version is compatible with the SPM
201 * Dispatcher version.
202 */
203 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
204 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
205 WARN("Unsupported FFA version (%u.%u)\n",
206 spmc_attrs.major_version, spmc_attrs.minor_version);
207 return -EINVAL;
208 }
209
210 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
211 spmc_attrs.minor_version);
212
213 VERBOSE("SPM Core run time EL%x.\n",
214 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
215
216 /* Validate the SPMC ID, Ensure high bit is set */
217 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
218 SPMC_SECURE_ID_MASK) == 0U) {
219 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
220 return -EINVAL;
221 }
222
223 /* Validate the SPM Core execution state */
224 if ((spmc_attrs.exec_state != MODE_RW_64) &&
225 (spmc_attrs.exec_state != MODE_RW_32)) {
226 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
227 spmc_attrs.exec_state);
228 return -EINVAL;
229 }
230
231 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
232 spmc_attrs.exec_state);
233
234 #if SPMD_SPM_AT_SEL2
235 /* Ensure manifest has not requested AArch32 state in S-EL2 */
236 if (spmc_attrs.exec_state == MODE_RW_32) {
237 WARN("AArch32 state at S-EL2 is not supported.\n");
238 return -EINVAL;
239 }
240
241 /*
242 * Check if S-EL2 is supported on this system if S-EL2
243 * is required for SPM
244 */
245 if (!is_armv8_4_sel2_present()) {
246 WARN("SPM Core run time S-EL2 is not supported.\n");
247 return -EINVAL;
248 }
249 #endif /* SPMD_SPM_AT_SEL2 */
250
251 /* Initialise an entrypoint to set up the CPU context */
252 ep_attr = SECURE | EP_ST_ENABLE;
253 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
254 ep_attr |= EP_EE_BIG;
255 }
256
257 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
258
259 /*
260 * Populate SPSR for SPM Core based upon validated parameters from the
261 * manifest.
262 */
263 if (spmc_attrs.exec_state == MODE_RW_32) {
264 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
265 SPSR_E_LITTLE,
266 DAIF_FIQ_BIT |
267 DAIF_IRQ_BIT |
268 DAIF_ABT_BIT);
269 } else {
270
271 #if SPMD_SPM_AT_SEL2
272 static const uint32_t runtime_el = MODE_EL2;
273 #else
274 static const uint32_t runtime_el = MODE_EL1;
275 #endif
276 spmc_ep_info->spsr = SPSR_64(runtime_el,
277 MODE_SP_ELX,
278 DISABLE_ALL_EXCEPTIONS);
279 }
280
281 /* Initialise SPM Core context with this entry point information */
282 cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info);
283
284 /* Reuse PSCI affinity states to mark this SPMC context as off */
285 spm_ctx->state = AFF_STATE_OFF;
286
287 INFO("SPM Core setup done.\n");
288
289 /* Register power management hooks with PSCI */
290 psci_register_spd_pm_hook(&spmd_pm);
291
292 /* Register init function for deferred init. */
293 bl31_register_bl32_init(&spmd_init);
294
295 return 0;
296 }
297
298 /*******************************************************************************
299 * Initialize context of SPM Core.
300 ******************************************************************************/
spmd_setup(void)301 int spmd_setup(void)
302 {
303 void *spmc_manifest;
304 int rc;
305
306 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
307 if (spmc_ep_info == NULL) {
308 WARN("No SPM Core image provided by BL2 boot loader.\n");
309 return -EINVAL;
310 }
311
312 /* Under no circumstances will this parameter be 0 */
313 assert(spmc_ep_info->pc != 0ULL);
314
315 /*
316 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
317 * be used as a manifest for the SPM Core at the next lower EL/mode.
318 */
319 spmc_manifest = (void *)spmc_ep_info->args.arg0;
320 if (spmc_manifest == NULL) {
321 ERROR("Invalid or absent SPM Core manifest.\n");
322 return -EINVAL;
323 }
324
325 /* Load manifest, init SPMC */
326 rc = spmd_spmc_init(spmc_manifest);
327 if (rc != 0) {
328 WARN("Booting device without SPM initialization.\n");
329 }
330
331 return rc;
332 }
333
334 /*******************************************************************************
335 * Forward SMC to the other security state
336 ******************************************************************************/
spmd_smc_forward(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * handle)337 static uint64_t spmd_smc_forward(uint32_t smc_fid,
338 bool secure_origin,
339 uint64_t x1,
340 uint64_t x2,
341 uint64_t x3,
342 uint64_t x4,
343 void *handle)
344 {
345 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
346 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
347
348 /* Save incoming security state */
349 cm_el1_sysregs_context_save(secure_state_in);
350 #if SPMD_SPM_AT_SEL2
351 cm_el2_sysregs_context_save(secure_state_in);
352 #endif
353
354 /* Restore outgoing security state */
355 cm_el1_sysregs_context_restore(secure_state_out);
356 #if SPMD_SPM_AT_SEL2
357 cm_el2_sysregs_context_restore(secure_state_out);
358 #endif
359 cm_set_next_eret_context(secure_state_out);
360
361 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
362 SMC_GET_GP(handle, CTX_GPREG_X5),
363 SMC_GET_GP(handle, CTX_GPREG_X6),
364 SMC_GET_GP(handle, CTX_GPREG_X7));
365 }
366
367 /*******************************************************************************
368 * Return FFA_ERROR with specified error code
369 ******************************************************************************/
spmd_ffa_error_return(void * handle,int error_code)370 static uint64_t spmd_ffa_error_return(void *handle, int error_code)
371 {
372 SMC_RET8(handle, (uint32_t) FFA_ERROR,
373 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
374 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
375 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
376 }
377
378 /*******************************************************************************
379 * spmd_check_address_in_binary_image
380 ******************************************************************************/
spmd_check_address_in_binary_image(uint64_t address)381 bool spmd_check_address_in_binary_image(uint64_t address)
382 {
383 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
384
385 return ((address >= spmc_attrs.load_address) &&
386 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
387 }
388
389 /******************************************************************************
390 * spmd_is_spmc_message
391 *****************************************************************************/
spmd_is_spmc_message(unsigned int ep)392 static bool spmd_is_spmc_message(unsigned int ep)
393 {
394 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
395 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
396 }
397
398 /******************************************************************************
399 * spmd_handle_spmc_message
400 *****************************************************************************/
spmd_handle_spmc_message(unsigned long long msg,unsigned long long parm1,unsigned long long parm2,unsigned long long parm3,unsigned long long parm4)401 static int spmd_handle_spmc_message(unsigned long long msg,
402 unsigned long long parm1, unsigned long long parm2,
403 unsigned long long parm3, unsigned long long parm4)
404 {
405 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
406 msg, parm1, parm2, parm3, parm4);
407
408 return -EINVAL;
409 }
410
411 /*******************************************************************************
412 * This function handles all SMCs in the range reserved for FFA. Each call is
413 * either forwarded to the other security state or handled by the SPM dispatcher
414 ******************************************************************************/
spmd_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)415 uint64_t spmd_smc_handler(uint32_t smc_fid,
416 uint64_t x1,
417 uint64_t x2,
418 uint64_t x3,
419 uint64_t x4,
420 void *cookie,
421 void *handle,
422 uint64_t flags)
423 {
424 unsigned int linear_id = plat_my_core_pos();
425 spmd_spm_core_context_t *ctx = spmd_get_context();
426 bool secure_origin;
427 int32_t ret;
428 uint32_t input_version;
429
430 /* Determine which security state this SMC originated from */
431 secure_origin = is_caller_secure(flags);
432
433 VERBOSE("SPM(%u): 0x%x 0x%llx 0x%llx 0x%llx 0x%llx "
434 "0x%llx 0x%llx 0x%llx\n",
435 linear_id, smc_fid, x1, x2, x3, x4,
436 SMC_GET_GP(handle, CTX_GPREG_X5),
437 SMC_GET_GP(handle, CTX_GPREG_X6),
438 SMC_GET_GP(handle, CTX_GPREG_X7));
439
440 switch (smc_fid) {
441 case FFA_ERROR:
442 /*
443 * Check if this is the first invocation of this interface on
444 * this CPU. If so, then indicate that the SPM Core initialised
445 * unsuccessfully.
446 */
447 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
448 spmd_spm_core_sync_exit(x2);
449 }
450
451 return spmd_smc_forward(smc_fid, secure_origin,
452 x1, x2, x3, x4, handle);
453 break; /* not reached */
454
455 case FFA_VERSION:
456 input_version = (uint32_t)(0xFFFFFFFF & x1);
457 /*
458 * If caller is secure and SPMC was initialized,
459 * return FFA_VERSION of SPMD.
460 * If caller is non secure and SPMC was initialized,
461 * return SPMC's version.
462 * Sanity check to "input_version".
463 */
464 if ((input_version & FFA_VERSION_BIT31_MASK) ||
465 (ctx->state == SPMC_STATE_RESET)) {
466 ret = FFA_ERROR_NOT_SUPPORTED;
467 } else if (!secure_origin) {
468 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
469 spmc_attrs.minor_version);
470 } else {
471 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
472 FFA_VERSION_MINOR);
473 }
474
475 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
476 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
477 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
478 break; /* not reached */
479
480 case FFA_FEATURES:
481 /*
482 * This is an optional interface. Do the minimal checks and
483 * forward to SPM Core which will handle it if implemented.
484 */
485
486 /*
487 * Check if x1 holds a valid FFA fid. This is an
488 * optimization.
489 */
490 if (!is_ffa_fid(x1)) {
491 return spmd_ffa_error_return(handle,
492 FFA_ERROR_NOT_SUPPORTED);
493 }
494
495 /* Forward SMC from Normal world to the SPM Core */
496 if (!secure_origin) {
497 return spmd_smc_forward(smc_fid, secure_origin,
498 x1, x2, x3, x4, handle);
499 }
500
501 /*
502 * Return success if call was from secure world i.e. all
503 * FFA functions are supported. This is essentially a
504 * nop.
505 */
506 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
507 SMC_GET_GP(handle, CTX_GPREG_X5),
508 SMC_GET_GP(handle, CTX_GPREG_X6),
509 SMC_GET_GP(handle, CTX_GPREG_X7));
510
511 break; /* not reached */
512
513 case FFA_ID_GET:
514 /*
515 * Returns the ID of the calling FFA component.
516 */
517 if (!secure_origin) {
518 SMC_RET8(handle, FFA_SUCCESS_SMC32,
519 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
520 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
521 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
522 FFA_PARAM_MBZ);
523 }
524
525 SMC_RET8(handle, FFA_SUCCESS_SMC32,
526 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
527 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
528 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
529 FFA_PARAM_MBZ);
530
531 break; /* not reached */
532
533 case FFA_SECONDARY_EP_REGISTER_SMC64:
534 if (secure_origin) {
535 ret = spmd_pm_secondary_ep_register(x1);
536
537 if (ret < 0) {
538 SMC_RET8(handle, FFA_ERROR_SMC64,
539 FFA_TARGET_INFO_MBZ, ret,
540 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
541 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
542 FFA_PARAM_MBZ);
543 } else {
544 SMC_RET8(handle, FFA_SUCCESS_SMC64,
545 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
546 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
547 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
548 FFA_PARAM_MBZ);
549 }
550 }
551
552 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
553 break; /* Not reached */
554
555 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
556 if (secure_origin && spmd_is_spmc_message(x1)) {
557 ret = spmd_handle_spmc_message(x3, x4,
558 SMC_GET_GP(handle, CTX_GPREG_X5),
559 SMC_GET_GP(handle, CTX_GPREG_X6),
560 SMC_GET_GP(handle, CTX_GPREG_X7));
561
562 SMC_RET8(handle, FFA_SUCCESS_SMC32,
563 FFA_TARGET_INFO_MBZ, ret,
564 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
565 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
566 FFA_PARAM_MBZ);
567 } else {
568 /* Forward direct message to the other world */
569 return spmd_smc_forward(smc_fid, secure_origin,
570 x1, x2, x3, x4, handle);
571 }
572 break; /* Not reached */
573
574 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
575 if (secure_origin && spmd_is_spmc_message(x1)) {
576 spmd_spm_core_sync_exit(0);
577 } else {
578 /* Forward direct message to the other world */
579 return spmd_smc_forward(smc_fid, secure_origin,
580 x1, x2, x3, x4, handle);
581 }
582 break; /* Not reached */
583
584 case FFA_RX_RELEASE:
585 case FFA_RXTX_MAP_SMC32:
586 case FFA_RXTX_MAP_SMC64:
587 case FFA_RXTX_UNMAP:
588 case FFA_PARTITION_INFO_GET:
589 /*
590 * Should not be allowed to forward FFA_PARTITION_INFO_GET
591 * from Secure world to Normal world
592 *
593 * Fall through to forward the call to the other world
594 */
595 case FFA_MSG_RUN:
596 /* This interface must be invoked only by the Normal world */
597
598 if (secure_origin) {
599 return spmd_ffa_error_return(handle,
600 FFA_ERROR_NOT_SUPPORTED);
601 }
602
603 /* Fall through to forward the call to the other world */
604 case FFA_MSG_SEND:
605 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
606 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
607 case FFA_MEM_DONATE_SMC32:
608 case FFA_MEM_DONATE_SMC64:
609 case FFA_MEM_LEND_SMC32:
610 case FFA_MEM_LEND_SMC64:
611 case FFA_MEM_SHARE_SMC32:
612 case FFA_MEM_SHARE_SMC64:
613 case FFA_MEM_RETRIEVE_REQ_SMC32:
614 case FFA_MEM_RETRIEVE_REQ_SMC64:
615 case FFA_MEM_RETRIEVE_RESP:
616 case FFA_MEM_RELINQUISH:
617 case FFA_MEM_RECLAIM:
618 case FFA_SUCCESS_SMC32:
619 case FFA_SUCCESS_SMC64:
620 /*
621 * TODO: Assume that no requests originate from EL3 at the
622 * moment. This will change if a SP service is required in
623 * response to secure interrupts targeted to EL3. Until then
624 * simply forward the call to the Normal world.
625 */
626
627 return spmd_smc_forward(smc_fid, secure_origin,
628 x1, x2, x3, x4, handle);
629 break; /* not reached */
630
631 case FFA_MSG_WAIT:
632 /*
633 * Check if this is the first invocation of this interface on
634 * this CPU from the Secure world. If so, then indicate that the
635 * SPM Core initialised successfully.
636 */
637 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
638 spmd_spm_core_sync_exit(0);
639 }
640
641 /* Fall through to forward the call to the other world */
642 case FFA_INTERRUPT:
643 case FFA_MSG_YIELD:
644 /* This interface must be invoked only by the Secure world */
645 if (!secure_origin) {
646 return spmd_ffa_error_return(handle,
647 FFA_ERROR_NOT_SUPPORTED);
648 }
649
650 return spmd_smc_forward(smc_fid, secure_origin,
651 x1, x2, x3, x4, handle);
652 break; /* not reached */
653
654 default:
655 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
656 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
657 }
658 }
659