1 /*
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020 Western Digital Corporation or its affiliates.
5 *
6 * Authors:
7 * Anup Patel <anup.patel@wdc.com>
8 */
9
10 #include <sbi/riscv_asm.h>
11 #include <sbi/sbi_console.h>
12 #include <sbi/sbi_domain.h>
13 #include <sbi/sbi_hartmask.h>
14 #include <sbi/sbi_hsm.h>
15 #include <sbi/sbi_math.h>
16 #include <sbi/sbi_platform.h>
17 #include <sbi/sbi_scratch.h>
18 #include <sbi/sbi_string.h>
19
20 struct sbi_domain *hartid_to_domain_table[SBI_HARTMASK_MAX_BITS] = { 0 };
21 struct sbi_domain *domidx_to_domain_table[SBI_DOMAIN_MAX_INDEX] = { 0 };
22
23 static u32 domain_count = 0;
24
25 static struct sbi_hartmask root_hmask = { 0 };
26
27 #define ROOT_FW_REGION 0
28 #define ROOT_ALL_REGION 1
29 #define ROOT_END_REGION 2
30 static struct sbi_domain_memregion root_memregs[ROOT_END_REGION + 1] = { 0 };
31
32 static struct sbi_domain root = {
33 .name = "root",
34 .possible_harts = &root_hmask,
35 .regions = root_memregs,
36 .system_reset_allowed = TRUE,
37 };
38
sbi_domain_is_assigned_hart(const struct sbi_domain * dom,u32 hartid)39 bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid)
40 {
41 if (dom)
42 return sbi_hartmask_test_hart(hartid, &dom->assigned_harts);
43
44 return FALSE;
45 }
46
sbi_domain_get_assigned_hartmask(const struct sbi_domain * dom,ulong hbase)47 ulong sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
48 ulong hbase)
49 {
50 ulong ret, bword, boff;
51
52 if (!dom)
53 return 0;
54
55 bword = BIT_WORD(hbase);
56 boff = BIT_WORD_OFFSET(hbase);
57
58 ret = sbi_hartmask_bits(&dom->assigned_harts)[bword++] >> boff;
59 if (boff && bword < BIT_WORD(SBI_HARTMASK_MAX_BITS)) {
60 ret |= (sbi_hartmask_bits(&dom->assigned_harts)[bword] &
61 (BIT(boff) - 1UL)) << (BITS_PER_LONG - boff);
62 }
63
64 return ret;
65 }
66
sbi_domain_memregion_initfw(struct sbi_domain_memregion * reg)67 void sbi_domain_memregion_initfw(struct sbi_domain_memregion *reg)
68 {
69 if (!reg)
70 return;
71
72 sbi_memcpy(reg, &root_memregs[ROOT_FW_REGION], sizeof(*reg));
73 }
74
sbi_domain_check_addr(const struct sbi_domain * dom,unsigned long addr,unsigned long mode,unsigned long access_flags)75 bool sbi_domain_check_addr(const struct sbi_domain *dom,
76 unsigned long addr, unsigned long mode,
77 unsigned long access_flags)
78 {
79 bool mmio = FALSE;
80 struct sbi_domain_memregion *reg;
81 unsigned long rstart, rend, rflags, rwx = 0;
82
83 if (!dom)
84 return FALSE;
85
86 if (access_flags & SBI_DOMAIN_READ)
87 rwx |= SBI_DOMAIN_MEMREGION_READABLE;
88 if (access_flags & SBI_DOMAIN_WRITE)
89 rwx |= SBI_DOMAIN_MEMREGION_WRITEABLE;
90 if (access_flags & SBI_DOMAIN_EXECUTE)
91 rwx |= SBI_DOMAIN_MEMREGION_EXECUTABLE;
92 if (access_flags & SBI_DOMAIN_MMIO)
93 mmio = TRUE;
94
95 sbi_domain_for_each_memregion(dom, reg) {
96 rflags = reg->flags;
97 if (mode == PRV_M && !(rflags & SBI_DOMAIN_MEMREGION_MMODE))
98 continue;
99
100 rstart = reg->base;
101 rend = (reg->order < __riscv_xlen) ?
102 rstart + ((1UL << reg->order) - 1) : -1UL;
103 if (rstart <= addr && addr <= rend) {
104 if ((mmio && !(rflags & SBI_DOMAIN_MEMREGION_MMIO)) ||
105 (!mmio && (rflags & SBI_DOMAIN_MEMREGION_MMIO)))
106 return FALSE;
107 return ((rflags & rwx) == rwx) ? TRUE : FALSE;
108 }
109 }
110
111 return (mode == PRV_M) ? TRUE : FALSE;
112 }
113
114 /* Check if region complies with constraints */
is_region_valid(const struct sbi_domain_memregion * reg)115 static bool is_region_valid(const struct sbi_domain_memregion *reg)
116 {
117 if (reg->order < 3 || __riscv_xlen < reg->order)
118 return FALSE;
119
120 if (reg->base & (BIT(reg->order) - 1))
121 return FALSE;
122
123 return TRUE;
124 }
125
126 /** Check if regionA is sub-region of regionB */
is_region_subset(const struct sbi_domain_memregion * regA,const struct sbi_domain_memregion * regB)127 static bool is_region_subset(const struct sbi_domain_memregion *regA,
128 const struct sbi_domain_memregion *regB)
129 {
130 ulong regA_start = regA->base;
131 ulong regA_end = regA->base + (BIT(regA->order) - 1);
132 ulong regB_start = regB->base;
133 ulong regB_end = regB->base + (BIT(regA->order) - 1);
134
135 if ((regB_start <= regA_start) &&
136 (regA_start < regB_end) &&
137 (regB_start < regA_end) &&
138 (regA_end <= regB_end))
139 return TRUE;
140
141 return FALSE;
142 }
143
144 /** Check if regionA conflicts regionB */
is_region_conflict(const struct sbi_domain_memregion * regA,const struct sbi_domain_memregion * regB)145 static bool is_region_conflict(const struct sbi_domain_memregion *regA,
146 const struct sbi_domain_memregion *regB)
147 {
148 if ((is_region_subset(regA, regB) || is_region_subset(regB, regA)) &&
149 regA->flags == regB->flags)
150 return TRUE;
151
152 return FALSE;
153 }
154
155 /** Check if regionA should be placed before regionB */
is_region_before(const struct sbi_domain_memregion * regA,const struct sbi_domain_memregion * regB)156 static bool is_region_before(const struct sbi_domain_memregion *regA,
157 const struct sbi_domain_memregion *regB)
158 {
159 if (regA->order < regB->order)
160 return TRUE;
161
162 if ((regA->order == regB->order) &&
163 (regA->base < regB->base))
164 return TRUE;
165
166 return FALSE;
167 }
168
sanitize_domain(const struct sbi_platform * plat,struct sbi_domain * dom)169 static int sanitize_domain(const struct sbi_platform *plat,
170 struct sbi_domain *dom)
171 {
172 u32 i, j, count;
173 bool have_fw_reg;
174 struct sbi_domain_memregion treg, *reg, *reg1;
175
176 /* Check possible HARTs */
177 if (!dom->possible_harts) {
178 sbi_printf("%s: %s possible HART mask is NULL\n",
179 __func__, dom->name);
180 return SBI_EINVAL;
181 }
182 sbi_hartmask_for_each_hart(i, dom->possible_harts) {
183 if (sbi_platform_hart_invalid(plat, i)) {
184 sbi_printf("%s: %s possible HART mask has invalid "
185 "hart %d\n", __func__, dom->name, i);
186 return SBI_EINVAL;
187 }
188 };
189
190 /* Check memory regions */
191 if (!dom->regions) {
192 sbi_printf("%s: %s regions is NULL\n",
193 __func__, dom->name);
194 return SBI_EINVAL;
195 }
196 sbi_domain_for_each_memregion(dom, reg) {
197 if (!is_region_valid(reg)) {
198 sbi_printf("%s: %s has invalid region base=0x%lx "
199 "order=%lu flags=0x%lx\n", __func__,
200 dom->name, reg->base, reg->order,
201 reg->flags);
202 return SBI_EINVAL;
203 }
204 }
205
206 /* Count memory regions and check presence of firmware region */
207 count = 0;
208 have_fw_reg = FALSE;
209 sbi_domain_for_each_memregion(dom, reg) {
210 if (reg->order == root_memregs[ROOT_FW_REGION].order &&
211 reg->base == root_memregs[ROOT_FW_REGION].base &&
212 reg->flags == root_memregs[ROOT_FW_REGION].flags)
213 have_fw_reg = TRUE;
214 count++;
215 }
216 if (!have_fw_reg) {
217 sbi_printf("%s: %s does not have firmware region\n",
218 __func__, dom->name);
219 return SBI_EINVAL;
220 }
221
222 /* Sort the memory regions */
223 for (i = 0; i < (count - 1); i++) {
224 reg = &dom->regions[i];
225 for (j = i + 1; j < count; j++) {
226 reg1 = &dom->regions[j];
227
228 if (is_region_conflict(reg1, reg)) {
229 sbi_printf("%s: %s conflict between regions "
230 "(base=0x%lx order=%lu flags=0x%lx) and "
231 "(base=0x%lx order=%lu flags=0x%lx)\n",
232 __func__, dom->name,
233 reg->base, reg->order, reg->flags,
234 reg1->base, reg1->order, reg1->flags);
235 return SBI_EINVAL;
236 }
237
238 if (!is_region_before(reg1, reg))
239 continue;
240
241 sbi_memcpy(&treg, reg1, sizeof(treg));
242 sbi_memcpy(reg1, reg, sizeof(treg));
243 sbi_memcpy(reg, &treg, sizeof(treg));
244 }
245 }
246
247 /*
248 * We don't need to check boot HART id of domain because if boot
249 * HART id is not possible/assigned to this domain then it won't
250 * be started at boot-time by sbi_domain_finalize().
251 */
252
253 /*
254 * Check next mode
255 *
256 * We only allow next mode to be S-mode or U-mode.so that we can
257 * protect M-mode context and enforce checks on memory accesses.
258 */
259 if (dom->next_mode != PRV_S &&
260 dom->next_mode != PRV_U) {
261 sbi_printf("%s: %s invalid next booting stage mode 0x%lx\n",
262 __func__, dom->name, dom->next_mode);
263 return SBI_EINVAL;
264 }
265
266 /* Check next address and next mode*/
267 if (!sbi_domain_check_addr(dom, dom->next_addr, dom->next_mode,
268 SBI_DOMAIN_EXECUTE)) {
269 sbi_printf("%s: %s next booting stage addres 0x%lx can't "
270 "execute\n", __func__, dom->name, dom->next_addr);
271 return SBI_EINVAL;
272 }
273
274 return 0;
275 }
276
sbi_domain_dump(const struct sbi_domain * dom,const char * suffix)277 void sbi_domain_dump(const struct sbi_domain *dom, const char *suffix)
278 {
279 u32 i, k;
280 unsigned long rstart, rend;
281 struct sbi_domain_memregion *reg;
282
283 sbi_printf("Domain%d Name %s: %s\n",
284 dom->index, suffix, dom->name);
285
286 sbi_printf("Domain%d Boot HART %s: %d\n",
287 dom->index, suffix, dom->boot_hartid);
288
289 k = 0;
290 sbi_printf("Domain%d HARTs %s: ", dom->index, suffix);
291 sbi_hartmask_for_each_hart(i, dom->possible_harts)
292 sbi_printf("%s%d%s", (k++) ? "," : "",
293 i, sbi_domain_is_assigned_hart(dom, i) ? "*" : "");
294 sbi_printf("\n");
295
296 i = 0;
297 sbi_domain_for_each_memregion(dom, reg) {
298 rstart = reg->base;
299 rend = (reg->order < __riscv_xlen) ?
300 rstart + ((1UL << reg->order) - 1) : -1UL;
301
302 #if __riscv_xlen == 32
303 sbi_printf("Domain%d Region%02d %s: 0x%08lx-0x%08lx ",
304 #else
305 sbi_printf("Domain%d Region%02d %s: 0x%016lx-0x%016lx ",
306 #endif
307 dom->index, i, suffix, rstart, rend);
308
309 k = 0;
310 if (reg->flags & SBI_DOMAIN_MEMREGION_MMODE)
311 sbi_printf("%cM", (k++) ? ',' : '(');
312 if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO)
313 sbi_printf("%cI", (k++) ? ',' : '(');
314 if (reg->flags & SBI_DOMAIN_MEMREGION_READABLE)
315 sbi_printf("%cR", (k++) ? ',' : '(');
316 if (reg->flags & SBI_DOMAIN_MEMREGION_WRITEABLE)
317 sbi_printf("%cW", (k++) ? ',' : '(');
318 if (reg->flags & SBI_DOMAIN_MEMREGION_EXECUTABLE)
319 sbi_printf("%cX", (k++) ? ',' : '(');
320 sbi_printf("%s\n", (k++) ? ")" : "()");
321
322 i++;
323 }
324
325 #if __riscv_xlen == 32
326 sbi_printf("Domain%d Next Address%s: 0x%08lx\n",
327 #else
328 sbi_printf("Domain%d Next Address%s: 0x%016lx\n",
329 #endif
330 dom->index, suffix, dom->next_addr);
331
332 #if __riscv_xlen == 32
333 sbi_printf("Domain%d Next Arg1 %s: 0x%08lx\n",
334 #else
335 sbi_printf("Domain%d Next Arg1 %s: 0x%016lx\n",
336 #endif
337 dom->index, suffix, dom->next_arg1);
338
339 sbi_printf("Domain%d Next Mode %s: ", dom->index, suffix);
340 switch (dom->next_mode) {
341 case PRV_M:
342 sbi_printf("M-mode\n");
343 break;
344 case PRV_S:
345 sbi_printf("S-mode\n");
346 break;
347 case PRV_U:
348 sbi_printf("U-mode\n");
349 break;
350 default:
351 sbi_printf("Unknown\n");
352 break;
353 };
354
355 sbi_printf("Domain%d SysReset %s: %s\n",
356 dom->index, suffix, (dom->system_reset_allowed) ? "yes" : "no");
357 }
358
sbi_domain_dump_all(const char * suffix)359 void sbi_domain_dump_all(const char *suffix)
360 {
361 u32 i;
362 const struct sbi_domain *dom;
363
364 sbi_domain_for_each(i, dom) {
365 sbi_domain_dump(dom, suffix);
366 sbi_printf("\n");
367 }
368 }
369
sbi_domain_register(struct sbi_domain * dom,const struct sbi_hartmask * assign_mask)370 int sbi_domain_register(struct sbi_domain *dom,
371 const struct sbi_hartmask *assign_mask)
372 {
373 u32 i;
374 int rc;
375 struct sbi_domain *tdom;
376 u32 cold_hartid = current_hartid();
377 const struct sbi_platform *plat = sbi_platform_thishart_ptr();
378
379 if (!dom || !assign_mask)
380 return SBI_EINVAL;
381
382 /* Check if domain already discovered */
383 sbi_domain_for_each(i, tdom) {
384 if (tdom == dom)
385 return SBI_EALREADY;
386 }
387
388 /*
389 * Ensure that we have room for Domain Index to
390 * HART ID mapping
391 */
392 if (SBI_DOMAIN_MAX_INDEX <= domain_count) {
393 sbi_printf("%s: No room for %s\n",
394 __func__, dom->name);
395 return SBI_ENOSPC;
396 }
397
398 /* Sanitize discovered domain */
399 rc = sanitize_domain(plat, dom);
400 if (rc) {
401 sbi_printf("%s: sanity checks failed for"
402 " %s (error %d)\n", __func__,
403 dom->name, rc);
404 return rc;
405 }
406
407 /* Assign index to domain */
408 dom->index = domain_count++;
409 domidx_to_domain_table[dom->index] = dom;
410
411 /* Clear assigned HARTs of domain */
412 sbi_hartmask_clear_all(&dom->assigned_harts);
413
414 /* Assign domain to HART if HART is a possible HART */
415 sbi_hartmask_for_each_hart(i, assign_mask) {
416 if (!sbi_hartmask_test_hart(i, dom->possible_harts))
417 continue;
418
419 tdom = hartid_to_domain_table[i];
420 if (tdom)
421 sbi_hartmask_clear_hart(i,
422 &tdom->assigned_harts);
423 hartid_to_domain_table[i] = dom;
424 sbi_hartmask_set_hart(i, &dom->assigned_harts);
425
426 /*
427 * If cold boot HART is assigned to this domain then
428 * override boot HART of this domain.
429 */
430 if (i == cold_hartid &&
431 dom->boot_hartid != cold_hartid) {
432 sbi_printf("Domain%d Boot HARTID forced to"
433 " %d\n", dom->index, cold_hartid);
434 dom->boot_hartid = cold_hartid;
435 }
436 }
437
438 return 0;
439 }
440
sbi_domain_finalize(struct sbi_scratch * scratch,u32 cold_hartid)441 int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid)
442 {
443 int rc;
444 u32 i, dhart;
445 struct sbi_domain *dom;
446 const struct sbi_platform *plat = sbi_platform_ptr(scratch);
447
448 /* Initialize and populate domains for the platform */
449 rc = sbi_platform_domains_init(plat);
450 if (rc) {
451 sbi_printf("%s: platform domains_init() failed (error %d)\n",
452 __func__, rc);
453 return rc;
454 }
455
456 /* Startup boot HART of domains */
457 sbi_domain_for_each(i, dom) {
458 /* Domain boot HART */
459 dhart = dom->boot_hartid;
460
461 /* Ignore of boot HART is off limits */
462 if (SBI_HARTMASK_MAX_BITS <= dhart)
463 continue;
464
465 /* Ignore if boot HART not possible for this domain */
466 if (!sbi_hartmask_test_hart(dhart, dom->possible_harts))
467 continue;
468
469 /* Ignore if boot HART assigned different domain */
470 if (sbi_hartid_to_domain(dhart) != dom ||
471 !sbi_hartmask_test_hart(dhart, &dom->assigned_harts))
472 continue;
473
474 /* Startup boot HART of domain */
475 if (dhart == cold_hartid) {
476 scratch->next_addr = dom->next_addr;
477 scratch->next_mode = dom->next_mode;
478 scratch->next_arg1 = dom->next_arg1;
479 } else {
480 rc = sbi_hsm_hart_start(scratch, NULL, dhart,
481 dom->next_addr,
482 dom->next_mode,
483 dom->next_arg1);
484 if (rc) {
485 sbi_printf("%s: failed to start boot HART %d"
486 " for %s (error %d)\n", __func__,
487 dhart, dom->name, rc);
488 return rc;
489 }
490 }
491 }
492
493 return 0;
494 }
495
sbi_domain_init(struct sbi_scratch * scratch,u32 cold_hartid)496 int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
497 {
498 u32 i;
499 struct sbi_domain_memregion *memregs;
500 const struct sbi_platform *plat = sbi_platform_ptr(scratch);
501
502 /* Root domain firmware memory region */
503 root_memregs[ROOT_FW_REGION].order = log2roundup(scratch->fw_size);
504 root_memregs[ROOT_FW_REGION].base = scratch->fw_start &
505 ~((1UL << root_memregs[0].order) - 1UL);
506 root_memregs[ROOT_FW_REGION].flags = 0;
507
508 /* Root domain allow everything memory region */
509 root_memregs[ROOT_ALL_REGION].order = __riscv_xlen;
510 root_memregs[ROOT_ALL_REGION].base = 0;
511 root_memregs[ROOT_ALL_REGION].flags = (SBI_DOMAIN_MEMREGION_READABLE |
512 SBI_DOMAIN_MEMREGION_WRITEABLE |
513 SBI_DOMAIN_MEMREGION_EXECUTABLE);
514
515 /* Root domain memory region end */
516 root_memregs[ROOT_END_REGION].order = 0;
517
518 /* Use platform specific root memory regions when available */
519 memregs = sbi_platform_domains_root_regions(plat);
520 if (memregs)
521 root.regions = memregs;
522
523 /* Root domain boot HART id is same as coldboot HART id */
524 root.boot_hartid = cold_hartid;
525
526 /* Root domain next booting stage details */
527 root.next_arg1 = scratch->next_arg1;
528 root.next_addr = scratch->next_addr;
529 root.next_mode = scratch->next_mode;
530
531 /* Root domain possible and assigned HARTs */
532 for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) {
533 if (sbi_platform_hart_invalid(plat, i))
534 continue;
535 sbi_hartmask_set_hart(i, &root_hmask);
536 }
537
538 return sbi_domain_register(&root, &root_hmask);
539 }
540