1 /* $OpenBSD: machdep.c,v 1.137 2023/10/24 13:20:10 claudio Exp $ */
2
3 /*
4 * Copyright (c) 2009, 2010 Miodrag Vallat.
5 * Copyright (c) 2019 Visa Hankala.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19 /*
20 * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
24 * are met:
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
32 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
33 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
35 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/proc.h>
48 #include <sys/buf.h>
49 #include <sys/reboot.h>
50 #include <sys/conf.h>
51 #include <sys/msgbuf.h>
52 #include <sys/tty.h>
53 #include <sys/user.h>
54 #include <sys/exec.h>
55 #include <sys/sysctl.h>
56 #include <sys/mount.h>
57 #include <sys/syscallargs.h>
58 #include <sys/exec_elf.h>
59 #include <sys/timetc.h>
60 #ifdef SYSVSHM
61 #include <sys/shm.h>
62 #endif
63 #ifdef SYSVSEM
64 #include <sys/sem.h>
65 #endif
66
67 #include <net/if.h>
68
69 #include <uvm/uvm_extern.h>
70
71 #include <machine/db_machdep.h>
72 #include <ddb/db_interface.h>
73
74 #include <machine/autoconf.h>
75 #include <mips64/cache.h>
76 #include <machine/cpu.h>
77 #include <mips64/mips_cpu.h>
78 #include <machine/memconf.h>
79
80 #include <dev/cons.h>
81 #include <dev/ofw/fdt.h>
82
83 #include <octeon/dev/cn30xxcorereg.h>
84 #include <octeon/dev/cn30xxipdreg.h>
85 #include <octeon/dev/iobusvar.h>
86 #include <machine/octeonreg.h>
87 #include <machine/octeonvar.h>
88 #include <machine/octeon_model.h>
89
90 #include "octboot.h"
91
92 /* The following is used externally (sysctl_hw) */
93 char machine[] = MACHINE; /* Machine "architecture" */
94 char cpu_model[64];
95
96 struct uvm_constraint_range dma_constraint = { 0x0, 0xffffffffUL };
97 struct uvm_constraint_range *uvm_md_constraints[] = { NULL };
98
99 vm_map_t exec_map;
100 vm_map_t phys_map;
101
102 extern struct timecounter cp0_timecounter;
103 extern uint8_t dt_blob_start[];
104
105 enum octeon_board octeon_board;
106 struct boot_desc *octeon_boot_desc;
107 struct boot_info *octeon_boot_info;
108
109 void *octeon_fdt;
110 unsigned int octeon_ver;
111
112 /*
113 * safepri is a safe priority for sleep to set for a spin-wait
114 * during autoconfiguration or after a panic.
115 */
116 int safepri = 0;
117
118 caddr_t msgbufbase;
119
120 int physmem; /* Max supported memory, changes to actual. */
121 int ncpu = 1; /* At least one CPU in the system. */
122 struct user *proc0paddr;
123
124 struct cpu_hwinfo bootcpu_hwinfo;
125
126 /* Pointers to the start and end of the symbol table. */
127 caddr_t ssym;
128 caddr_t esym;
129 caddr_t ekern;
130
131 struct phys_mem_desc mem_layout[MAXMEMSEGS];
132
133 void dumpsys(void);
134 void dumpconf(void);
135 vaddr_t mips_init(register_t, register_t, register_t, register_t);
136 int is_memory_range(paddr_t, psize_t, psize_t);
137 void octeon_memory_init(struct boot_info *);
138 void octeon_sync_tc(vaddr_t, uint64_t, uint64_t);
139 int octeon_cpuspeed(int *);
140 void octeon_tlb_init(void);
141 static void process_bootargs(void);
142 static uint64_t get_ncpusfound(void);
143 static enum octeon_board get_octeon_board(void);
144
145 cons_decl(octuart);
146 struct consdev uartcons = cons_init(octuart);
147
148 u_int ioclock_get_timecount(struct timecounter *);
149
150 struct timecounter ioclock_timecounter = {
151 .tc_get_timecount = ioclock_get_timecount,
152 .tc_counter_mask = 0xffffffff, /* truncated to 32 bits */
153 .tc_frequency = 0, /* determined at runtime */
154 .tc_name = "ioclock",
155 .tc_quality = 0, /* ioclock can be overridden
156 * by cp0 counter */
157 .tc_priv = 0, /* clock register,
158 * determined at runtime */
159 .tc_user = 0, /* expose to user */
160 };
161
162 static int
atoi(const char * s)163 atoi(const char *s)
164 {
165 int n, neg;
166
167 n = 0;
168 neg = 0;
169
170 while (*s == '-') {
171 s++;
172 neg = !neg;
173 }
174
175 while (*s != '\0') {
176 if (*s < '0' || *s > '9')
177 break;
178
179 n = (10 * n) + (*s - '0');
180 s++;
181 }
182
183 return (neg ? -n : n);
184 }
185
186 static struct octeon_bootmem_block *
pa_to_block(paddr_t addr)187 pa_to_block(paddr_t addr)
188 {
189 return (struct octeon_bootmem_block *)PHYS_TO_XKPHYS(addr, CCA_CACHED);
190 }
191
192 void
octeon_memory_init(struct boot_info * boot_info)193 octeon_memory_init(struct boot_info *boot_info)
194 {
195 struct octeon_bootmem_block *block;
196 struct octeon_bootmem_desc *memdesc;
197 paddr_t blockaddr;
198 uint64_t fp, lp;
199 int i;
200
201 physmem = atop((uint64_t)boot_info->dram_size << 20);
202
203 if (boot_info->phys_mem_desc_addr == 0)
204 panic("bootmem desc is missing");
205 memdesc = (struct octeon_bootmem_desc *)PHYS_TO_XKPHYS(
206 boot_info->phys_mem_desc_addr, CCA_CACHED);
207 printf("bootmem desc 0x%x version %d.%d\n",
208 boot_info->phys_mem_desc_addr, memdesc->major_version,
209 memdesc->minor_version);
210 if (memdesc->major_version > 3)
211 panic("unhandled bootmem desc version %d.%d",
212 memdesc->major_version, memdesc->minor_version);
213
214 blockaddr = memdesc->head_addr;
215 if (blockaddr == 0)
216 panic("bootmem list is empty");
217 for (i = 0; i < MAXMEMSEGS && blockaddr != 0; blockaddr = block->next) {
218 block = pa_to_block(blockaddr);
219 printf("avail phys mem 0x%016lx - 0x%016lx\n", blockaddr,
220 (paddr_t)(blockaddr + block->size));
221
222 #if NOCTBOOT > 0
223 /*
224 * Reserve the physical memory below the boot kernel
225 * for loading the actual kernel.
226 */
227 extern char start[];
228 if (blockaddr < CKSEG_SIZE &&
229 PHYS_TO_CKSEG0(blockaddr) < (vaddr_t)start) {
230 printf("skipped\n");
231 continue;
232 }
233 #endif
234
235 fp = atop(round_page(blockaddr));
236 lp = atop(trunc_page(blockaddr + block->size));
237
238 /* Clamp to the range of the pmap. */
239 if (fp > atop(pfn_to_pad(PG_FRAME)))
240 continue;
241 if (lp > atop(pfn_to_pad(PG_FRAME)) + 1)
242 lp = atop(pfn_to_pad(PG_FRAME)) + 1;
243 if (fp >= lp)
244 continue;
245
246 /* Skip small fragments. */
247 if (lp - fp < atop(1u << 20))
248 continue;
249
250 mem_layout[i].mem_first_page = fp;
251 mem_layout[i].mem_last_page = lp;
252 i++;
253 }
254
255 printf("Total DRAM Size 0x%016llX\n",
256 (uint64_t)boot_info->dram_size << 20);
257
258 for (i = 0; mem_layout[i].mem_last_page; i++) {
259 printf("mem_layout[%d] page 0x%016llX -> 0x%016llX\n", i,
260 mem_layout[i].mem_first_page, mem_layout[i].mem_last_page);
261
262 #if NOCTBOOT > 0
263 fp = mem_layout[i].mem_first_page;
264 lp = mem_layout[i].mem_last_page;
265 if (bootmem_alloc_region(ptoa(fp), ptoa(lp) - ptoa(fp)) != 0)
266 panic("%s: bootmem allocation failed", __func__);
267 #endif
268 }
269 }
270
271 /*
272 * Do all the stuff that locore normally does before calling main().
273 * Reset mapping and set up mapping to hardware and init "wired" reg.
274 */
275 vaddr_t
mips_init(register_t a0,register_t a1,register_t a2,register_t a3)276 mips_init(register_t a0, register_t a1, register_t a2, register_t a3)
277 {
278 uint prid;
279 vaddr_t xtlb_handler;
280 size_t len;
281 int i;
282 struct boot_desc *boot_desc;
283 struct boot_info *boot_info;
284 int32_t *symptr;
285 uint32_t config4;
286
287 extern char start[], end[];
288 extern char exception[], e_exception[];
289 extern void xtlb_miss;
290
291 boot_desc = (struct boot_desc *)a3;
292 boot_info = (struct boot_info *)
293 PHYS_TO_XKPHYS(boot_desc->boot_info_addr, CCA_CACHED);
294
295 /*
296 * Save the pointers for future reference.
297 * The descriptors are located outside the free memory,
298 * and the kernel should preserve them.
299 */
300 octeon_boot_desc = boot_desc;
301 octeon_boot_info = boot_info;
302
303 #ifdef MULTIPROCESSOR
304 /*
305 * Set curcpu address on primary processor.
306 */
307 setcurcpu(&cpu_info_primary);
308 #endif
309
310 /*
311 * Set up early console output.
312 */
313 cn_tab = &uartcons;
314
315 /*
316 * Reserve space for the symbol table, if it exists.
317 */
318 symptr = (int32_t *)roundup((vaddr_t)end, BOOTMEM_BLOCK_ALIGN);
319 ssym = (char *)(vaddr_t)symptr[0];
320 if (((long)ssym - (long)end) >= 0 &&
321 ((long)ssym - (long)end) <= 0x1000 &&
322 ssym[0] == ELFMAG0 && ssym[1] == ELFMAG1 &&
323 ssym[2] == ELFMAG2 && ssym[3] == ELFMAG3) {
324 /* Pointers exist directly after kernel. */
325 esym = (char *)(vaddr_t)symptr[1];
326 ekern = esym;
327 } else {
328 /* Pointers aren't setup either... */
329 ssym = NULL;
330 esym = NULL;
331 ekern = end;
332 }
333
334 prid = cp0_get_prid();
335
336 bootcpu_hwinfo.clock = boot_desc->eclock;
337
338 switch (octeon_model_family(prid)) {
339 default:
340 octeon_ver = OCTEON_1;
341 break;
342 case OCTEON_MODEL_FAMILY_CN50XX:
343 octeon_ver = OCTEON_PLUS;
344 break;
345 case OCTEON_MODEL_FAMILY_CN61XX:
346 case OCTEON_MODEL_FAMILY_CN63XX:
347 case OCTEON_MODEL_FAMILY_CN66XX:
348 case OCTEON_MODEL_FAMILY_CN68XX:
349 octeon_ver = OCTEON_2;
350 break;
351 case OCTEON_MODEL_FAMILY_CN71XX:
352 case OCTEON_MODEL_FAMILY_CN73XX:
353 case OCTEON_MODEL_FAMILY_CN78XX:
354 octeon_ver = OCTEON_3;
355 break;
356 }
357
358 /*
359 * Look at arguments passed to us and compute boothowto.
360 */
361 boothowto = RB_AUTOBOOT;
362
363 octeon_memory_init(boot_info);
364
365 /*
366 * Set pagesize to enable use of page macros and functions.
367 * Commit available memory to UVM system.
368 */
369
370 uvmexp.pagesize = PAGE_SIZE;
371 uvm_setpagesize();
372
373 for (i = 0; i < MAXMEMSEGS && mem_layout[i].mem_last_page != 0; i++) {
374 uint64_t fp, lp;
375 uint64_t firstkernpage, lastkernpage;
376 paddr_t firstkernpa, lastkernpa;
377
378 /* kernel is linked in CKSEG0 */
379 firstkernpa = CKSEG0_TO_PHYS((vaddr_t)start);
380 lastkernpa = CKSEG0_TO_PHYS((vaddr_t)ekern);
381
382 firstkernpage = atop(trunc_page(firstkernpa));
383 lastkernpage = atop(round_page(lastkernpa));
384
385 fp = mem_layout[i].mem_first_page;
386 lp = mem_layout[i].mem_last_page;
387
388 /* Account for kernel and kernel symbol table. */
389 if (fp >= firstkernpage && lp < lastkernpage)
390 continue; /* In kernel. */
391
392 if (lp < firstkernpage || fp > lastkernpage) {
393 uvm_page_physload(fp, lp, fp, lp, 0);
394 continue; /* Outside kernel. */
395 }
396
397 if (fp >= firstkernpage)
398 fp = lastkernpage;
399 else if (lp < lastkernpage)
400 lp = firstkernpage;
401 else { /* Need to split! */
402 uint64_t xp = firstkernpage;
403 uvm_page_physload(fp, xp, fp, xp, 0);
404 fp = lastkernpage;
405 }
406 if (lp > fp) {
407 uvm_page_physload(fp, lp, fp, lp, 0);
408 }
409 }
410
411 bootcpu_hwinfo.c0prid = prid;
412 bootcpu_hwinfo.type = (prid >> 8) & 0xff;
413 if (cp0_get_config_1() & CONFIG1_FP)
414 bootcpu_hwinfo.c1prid = cp1_get_prid();
415 else
416 bootcpu_hwinfo.c1prid = 0;
417
418 bootcpu_hwinfo.tlbsize = 1 + ((cp0_get_config_1() & CONFIG1_MMUSize1)
419 >> CONFIG1_MMUSize1_SHIFT);
420 if (cp0_get_config_3() & CONFIG3_M) {
421 config4 = cp0_get_config_4();
422 if (((config4 & CONFIG4_MMUExtDef) >>
423 CONFIG4_MMUExtDef_SHIFT) == 1)
424 bootcpu_hwinfo.tlbsize +=
425 (config4 & CONFIG4_MMUSizeExt) << 6;
426 }
427
428 bcopy(&bootcpu_hwinfo, &curcpu()->ci_hw, sizeof(struct cpu_hwinfo));
429
430 /*
431 * Configure cache.
432 */
433
434 Octeon_ConfigCache(curcpu());
435 Octeon_SyncCache(curcpu());
436
437 octeon_tlb_init();
438
439 snprintf(cpu_model, sizeof(cpu_model), "Cavium OCTEON (rev %d.%d) @ %d MHz",
440 (bootcpu_hwinfo.c0prid >> 4) & 0x0f,
441 bootcpu_hwinfo.c0prid & 0x0f,
442 bootcpu_hwinfo.clock / 1000000);
443
444 cpu_cpuspeed = octeon_cpuspeed;
445 ncpusfound = get_ncpusfound();
446 octeon_board = get_octeon_board();
447
448 process_bootargs();
449
450 /*
451 * Save the FDT and let the system use it.
452 */
453 if (octeon_boot_info->ver_minor >= 3 &&
454 octeon_boot_info->fdt_addr != 0) {
455 void *fdt;
456 size_t fdt_size;
457
458 fdt = (void *)PHYS_TO_XKPHYS(octeon_boot_info->fdt_addr,
459 CCA_CACHED);
460 if (fdt_init(fdt) != 0 && (fdt_size = fdt_get_size(fdt)) != 0) {
461 octeon_fdt = (void *)pmap_steal_memory(fdt_size, NULL,
462 NULL);
463 memcpy(octeon_fdt, fdt, fdt_size);
464 fdt_init(octeon_fdt);
465 }
466 } else
467 fdt_init(dt_blob_start);
468
469 /*
470 * Get a console, very early but after initial mapping setup.
471 */
472
473 consinit();
474 printf("Initial setup done, switching console.\n");
475
476 #define DUMP_BOOT_DESC(field, format) \
477 printf("boot_desc->" #field ":" #format "\n", boot_desc->field)
478 #define DUMP_BOOT_INFO(field, format) \
479 printf("boot_info->" #field ":" #format "\n", boot_info->field)
480
481 DUMP_BOOT_DESC(desc_ver, %d);
482 DUMP_BOOT_DESC(desc_size, %d);
483 DUMP_BOOT_DESC(stack_top, %llx);
484 DUMP_BOOT_DESC(heap_start, %llx);
485 DUMP_BOOT_DESC(heap_end, %llx);
486 DUMP_BOOT_DESC(argc, %d);
487 DUMP_BOOT_DESC(flags, %#x);
488 DUMP_BOOT_DESC(core_mask, %#x);
489 DUMP_BOOT_DESC(dram_size, %d);
490 DUMP_BOOT_DESC(phy_mem_desc_addr, %#x);
491 DUMP_BOOT_DESC(debugger_flag_addr, %#x);
492 DUMP_BOOT_DESC(eclock, %d);
493 DUMP_BOOT_DESC(boot_info_addr, %#llx);
494
495 DUMP_BOOT_INFO(ver_major, %d);
496 DUMP_BOOT_INFO(ver_minor, %d);
497 DUMP_BOOT_INFO(stack_top, %llx);
498 DUMP_BOOT_INFO(heap_start, %llx);
499 DUMP_BOOT_INFO(heap_end, %llx);
500 DUMP_BOOT_INFO(boot_desc_addr, %#llx);
501 DUMP_BOOT_INFO(exception_base_addr, %#x);
502 DUMP_BOOT_INFO(stack_size, %d);
503 DUMP_BOOT_INFO(flags, %#x);
504 DUMP_BOOT_INFO(core_mask, %#x);
505 DUMP_BOOT_INFO(dram_size, %d);
506 DUMP_BOOT_INFO(phys_mem_desc_addr, %#x);
507 DUMP_BOOT_INFO(debugger_flags_addr, %#x);
508 DUMP_BOOT_INFO(eclock, %d);
509 DUMP_BOOT_INFO(dclock, %d);
510 DUMP_BOOT_INFO(board_type, %d);
511 DUMP_BOOT_INFO(board_rev_major, %d);
512 DUMP_BOOT_INFO(board_rev_minor, %d);
513 DUMP_BOOT_INFO(mac_addr_count, %d);
514 DUMP_BOOT_INFO(cf_common_addr, %#llx);
515 DUMP_BOOT_INFO(cf_attr_addr, %#llx);
516 DUMP_BOOT_INFO(led_display_addr, %#llx);
517 DUMP_BOOT_INFO(dfaclock, %d);
518 DUMP_BOOT_INFO(config_flags, %#x);
519 if (octeon_boot_info->ver_minor >= 3)
520 DUMP_BOOT_INFO(fdt_addr, %#llx);
521
522 /*
523 * It is possible to launch the kernel from the bootloader without
524 * physical CPU 0. That does not really work, however, because of the
525 * way how the kernel assigns and uses cpuids. Moreover, cnmac(4) is
526 * hard coded to use CPU 0 for packet reception.
527 */
528 if (!(octeon_boot_info->core_mask & 1))
529 panic("cannot run without physical CPU 0");
530
531 /*
532 * Use bits of board information to improve initial entropy.
533 */
534 enqueue_randomness((octeon_boot_info->board_type << 16) |
535 (octeon_boot_info->board_rev_major << 8) |
536 octeon_boot_info->board_rev_minor);
537 len = strnlen(octeon_boot_info->board_serial,
538 sizeof(octeon_boot_info->board_serial));
539 for (i = 0; i < len; i++)
540 enqueue_randomness(octeon_boot_info->board_serial[i]);
541
542 /*
543 * Init message buffer.
544 */
545 msgbufbase = (caddr_t)pmap_steal_memory(MSGBUFSIZE, NULL,NULL);
546 initmsgbuf(msgbufbase, MSGBUFSIZE);
547
548 /*
549 * Allocate U page(s) for proc[0], pm_tlbpid 1.
550 */
551
552 proc0.p_addr = proc0paddr = curcpu()->ci_curprocpaddr =
553 (struct user *)pmap_steal_memory(USPACE, NULL, NULL);
554 proc0.p_md.md_regs = (struct trapframe *)&proc0paddr->u_pcb.pcb_regs;
555 tlb_set_pid(MIN_USER_ASID);
556
557 /*
558 * Bootstrap VM system.
559 */
560
561 pmap_bootstrap();
562
563 /*
564 * Copy down exception vector code.
565 */
566
567 bcopy(exception, (char *)CACHE_ERR_EXC_VEC, e_exception - exception);
568 bcopy(exception, (char *)GEN_EXC_VEC, e_exception - exception);
569
570 /*
571 * Build proper TLB refill handler trampolines.
572 */
573
574 xtlb_handler = (vaddr_t)&xtlb_miss;
575 build_trampoline(TLB_MISS_EXC_VEC, xtlb_handler);
576 build_trampoline(XTLB_MISS_EXC_VEC, xtlb_handler);
577
578 /*
579 * Turn off bootstrap exception vectors.
580 * (this is done by PMON already, but it doesn't hurt to be safe)
581 */
582
583 setsr(getsr() & ~SR_BOOT_EXC_VEC);
584 proc0.p_md.md_regs->sr = getsr();
585
586 #ifdef DDB
587 db_machine_init();
588 if (boothowto & RB_KDB)
589 db_enter();
590 #endif
591
592 switch (octeon_model_family(prid)) {
593 case OCTEON_MODEL_FAMILY_CN73XX:
594 case OCTEON_MODEL_FAMILY_CN78XX:
595 ioclock_timecounter.tc_priv = (void *)FPA3_CLK_COUNT;
596 break;
597 default:
598 ioclock_timecounter.tc_priv = (void *)IPD_CLK_COUNT;
599 break;
600 }
601 ioclock_timecounter.tc_frequency = octeon_ioclock_speed();
602 tc_init(&ioclock_timecounter);
603
604 cpu_has_synced_cp0_count = 1;
605 cp0_timecounter.tc_quality = 1000;
606 cp0_timecounter.tc_user = TC_CP0_COUNT;
607
608 /*
609 * Return the new kernel stack pointer.
610 */
611 return ((vaddr_t)proc0paddr + USPACE - 64);
612 }
613
614 /*
615 * Console initialization: called early on from main, before vm init or startup.
616 * Do enough configuration to choose and initialize a console.
617 */
618 void
consinit()619 consinit()
620 {
621 static int console_ok = 0;
622
623 if (console_ok == 0) {
624 com_fdt_init_cons();
625 cninit();
626 console_ok = 1;
627 }
628 }
629
630 /*
631 * cpu_startup: allocate memory for variable-sized tables, initialize CPU, and
632 * do auto-configuration.
633 */
634 void
cpu_startup()635 cpu_startup()
636 {
637 vaddr_t minaddr, maxaddr;
638
639 /*
640 * Good {morning,afternoon,evening,night}.
641 */
642 printf("%s", version);
643 printf("real mem = %lu (%luMB)\n", ptoa((psize_t)physmem),
644 ptoa((psize_t)physmem)/1024/1024);
645
646 /*
647 * Allocate a submap for exec arguments. This map effectively
648 * limits the number of processes exec'ing at any time.
649 */
650 minaddr = vm_map_min(kernel_map);
651 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
652 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
653 /* Allocate a submap for physio. */
654 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
655 VM_PHYS_SIZE, 0, FALSE, NULL);
656
657 printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
658 ptoa(uvmexp.free)/1024/1024);
659
660 /*
661 * Set up buffers, so they can be used to read disk labels.
662 */
663 bufinit();
664
665 /*
666 * Configure the system.
667 */
668 if (boothowto & RB_CONFIG) {
669 #ifdef BOOT_CONFIG
670 user_config();
671 #else
672 printf("kernel does not support -c; continuing..\n");
673 #endif
674 }
675 }
676
677 int
octeon_cpuspeed(int * freq)678 octeon_cpuspeed(int *freq)
679 {
680 *freq = octeon_boot_info->eclock / 1000000;
681 return (0);
682 }
683
684 int
octeon_ioclock_speed(void)685 octeon_ioclock_speed(void)
686 {
687 u_int64_t mio_rst_boot, rst_boot;
688
689 switch (octeon_ver) {
690 case OCTEON_2:
691 mio_rst_boot = octeon_xkphys_read_8(MIO_RST_BOOT);
692 return OCTEON_IO_REF_CLOCK * ((mio_rst_boot >>
693 MIO_RST_BOOT_PNR_MUL_SHIFT) & MIO_RST_BOOT_PNR_MUL_MASK);
694 case OCTEON_3:
695 rst_boot = octeon_xkphys_read_8(RST_BOOT);
696 return OCTEON_IO_REF_CLOCK * ((rst_boot >>
697 RST_BOOT_PNR_MUL_SHIFT) & RST_BOOT_PNR_MUL_MASK);
698 default:
699 return octeon_boot_info->eclock;
700 }
701 }
702
703 void
octeon_tlb_init(void)704 octeon_tlb_init(void)
705 {
706 uint64_t clk_reg, cvmmemctl, frac, cmul, imul, val;
707 uint32_t hwrena = 0;
708 uint32_t pgrain = 0;
709 int chipid;
710
711 chipid = octeon_get_chipid();
712 switch (octeon_model_family(chipid)) {
713 case OCTEON_MODEL_FAMILY_CN73XX:
714 /* Enable LMTDMA/LMTST transactions. */
715 cvmmemctl = octeon_get_cvmmemctl();
716 cvmmemctl |= COP_0_CVMMEMCTL_LMTENA;
717 cvmmemctl &= ~COP_0_CVMMEMCTL_LMTLINE_M;
718 cvmmemctl |= 2ull << COP_0_CVMMEMCTL_LMTLINE_S;
719 octeon_set_cvmmemctl(cvmmemctl);
720 break;
721 }
722
723 /*
724 * Make sure Coprocessor 2 is disabled.
725 */
726 setsr(getsr() & ~SR_COP_2_BIT);
727
728 /*
729 * Synchronize this core's cycle counter with the system-wide
730 * IO clock counter.
731 *
732 * The IO clock counter's value has to be scaled from the IO clock
733 * frequency domain to the core clock frequency domain:
734 *
735 * cclk / cmul = iclk / imul
736 * cclk = iclk * cmul / imul
737 *
738 * Division is very slow and possibly variable-time on the system,
739 * so the synchronization routine uses multiplication:
740 *
741 * cclk = iclk * cmul * frac / 2^64,
742 *
743 * where frac = 2^64 / imul is precomputed.
744 */
745 switch (octeon_model_family(chipid)) {
746 case OCTEON_MODEL_FAMILY_CN73XX:
747 case OCTEON_MODEL_FAMILY_CN78XX:
748 clk_reg = FPA3_CLK_COUNT;
749 break;
750 default:
751 clk_reg = IPD_CLK_COUNT;
752 break;
753 }
754 switch (octeon_ver) {
755 case OCTEON_2:
756 val = octeon_xkphys_read_8(MIO_RST_BOOT);
757 cmul = (val >> MIO_RST_BOOT_C_MUL_SHIFT) &
758 MIO_RST_BOOT_C_MUL_MASK;
759 imul = (val >> MIO_RST_BOOT_PNR_MUL_SHIFT) &
760 MIO_RST_BOOT_PNR_MUL_MASK;
761 break;
762 case OCTEON_3:
763 val = octeon_xkphys_read_8(RST_BOOT);
764 cmul = (val >> RST_BOOT_C_MUL_SHIFT) &
765 RST_BOOT_C_MUL_MASK;
766 imul = (val >> RST_BOOT_PNR_MUL_SHIFT) &
767 RST_BOOT_PNR_MUL_MASK;
768 break;
769 default:
770 cmul = 1;
771 imul = 1;
772 break;
773 }
774 frac = ((1ULL << 63) / imul) * 2;
775 octeon_sync_tc(PHYS_TO_XKPHYS(clk_reg, CCA_NC), cmul, frac);
776
777 /* Let userspace access the cycle counter. */
778 hwrena |= HWRENA_CC;
779
780 /*
781 * If the UserLocal register is available, let userspace
782 * access it using the RDHWR instruction.
783 */
784 if (cp0_get_config_3() & CONFIG3_ULRI) {
785 cp0_set_userlocal(NULL);
786 hwrena |= HWRENA_ULR;
787 cpu_has_userlocal = 1;
788 }
789 cp0_set_hwrena(hwrena);
790
791 #ifdef MIPS_PTE64
792 pgrain |= PGRAIN_ELPA;
793 #endif
794 if (cp0_get_config_3() & CONFIG3_RXI)
795 pgrain |= (PGRAIN_RIE | PGRAIN_XIE);
796 cp0_set_pagegrain(pgrain);
797
798 tlb_init(bootcpu_hwinfo.tlbsize);
799 }
800
801 static u_int64_t
get_ncpusfound(void)802 get_ncpusfound(void)
803 {
804 uint64_t core_mask;
805 uint64_t i, ncpus = 0;
806 int chipid;
807
808 chipid = octeon_get_chipid();
809 switch (octeon_model_family(chipid)) {
810 case OCTEON_MODEL_FAMILY_CN73XX:
811 case OCTEON_MODEL_FAMILY_CN78XX:
812 core_mask = octeon_xkphys_read_8(OCTEON_CIU3_BASE + CIU3_FUSE);
813 break;
814 default:
815 core_mask = octeon_xkphys_read_8(OCTEON_CIU_BASE + CIU_FUSE);
816 break;
817 }
818
819 /* There has to be 1-to-1 mapping between cpuids and coreids. */
820 for (i = 0; i < OCTEON_MAXCPUS && (core_mask & (1ul << i)) != 0; i++)
821 ncpus++;
822
823 return ncpus;
824 }
825
826 static enum octeon_board
get_octeon_board(void)827 get_octeon_board(void)
828 {
829 switch (octeon_boot_info->board_type) {
830 case 11:
831 return BOARD_CN3010_EVB_HS5;
832 case 20002:
833 return BOARD_UBIQUITI_E100;
834 case 20003:
835 return BOARD_UBIQUITI_E200;
836 case 20004:
837 /* E120 has two cores, whereas UTM25 has one core. */
838 if (ncpusfound == 1)
839 return BOARD_NETGEAR_UTM25;
840 return BOARD_UBIQUITI_E120;
841 case 20005:
842 return BOARD_UBIQUITI_E220;
843 case 20010:
844 return BOARD_UBIQUITI_E1000;
845 case 20011:
846 return BOARD_CHECKPOINT_N100;
847 case 20012:
848 return BOARD_RHINOLABS_UTM8;
849 case 20015:
850 return BOARD_DLINK_DSR_500;
851 case 20300:
852 return BOARD_UBIQUITI_E300;
853 default:
854 break;
855 }
856
857 return BOARD_UNKNOWN;
858 }
859
860 static void
process_bootargs(void)861 process_bootargs(void)
862 {
863 const char *cp;
864 int i;
865
866 /*
867 * U-Boot doesn't pass us anything by default, we need to explicitly
868 * pass the rootdevice.
869 */
870 for (i = 0; i < octeon_boot_desc->argc; i++ ) {
871 const char *arg = (const char*)
872 PHYS_TO_XKPHYS(octeon_boot_desc->argv[i], CCA_CACHED);
873
874 if (octeon_boot_desc->argv[i] == 0)
875 continue;
876
877 #ifdef DEBUG
878 printf("boot_desc->argv[%d] = %s\n", i, arg);
879 #endif
880
881 if (strncmp(arg, "boothowto=", 10) == 0) {
882 boothowto = atoi(arg + 10);
883 continue;
884 }
885
886 if (strncmp(arg, "rootdev=", 8) == 0) {
887 parse_uboot_root(arg + 8);
888 continue;
889 }
890
891 if (*arg != '-')
892 continue;
893
894 for (cp = arg + 1; *cp != '\0'; cp++) {
895 switch (*cp) {
896 case '-':
897 break;
898 case 'a':
899 boothowto |= RB_ASKNAME;
900 break;
901 case 'c':
902 boothowto |= RB_CONFIG;
903 break;
904 case 'd':
905 boothowto |= RB_KDB;
906 break;
907 case 's':
908 boothowto |= RB_SINGLE;
909 break;
910 default:
911 printf("unrecognized option `%c'", *cp);
912 break;
913 }
914 }
915 }
916 }
917
918 /*
919 * Machine dependent system variables.
920 */
921 int
cpu_sysctl(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,struct proc * p)922 cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
923 size_t newlen, struct proc *p)
924 {
925 /* All sysctl names at this level are terminal. */
926 if (namelen != 1)
927 return ENOTDIR; /* Overloaded */
928
929 switch (name[0]) {
930 default:
931 return EOPNOTSUPP;
932 }
933 }
934
935 int waittime = -1;
936
937 __dead void
boot(int howto)938 boot(int howto)
939 {
940 if ((howto & RB_RESET) != 0)
941 goto doreset;
942
943 if (curproc)
944 savectx(curproc->p_addr, 0);
945
946 if (cold) {
947 if ((howto & RB_USERREQ) == 0)
948 howto |= RB_HALT;
949 goto haltsys;
950 }
951
952 boothowto = howto;
953 if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
954 waittime = 0;
955 vfs_shutdown(curproc);
956
957 if ((howto & RB_TIMEBAD) == 0) {
958 resettodr();
959 } else {
960 printf("WARNING: not updating battery clock\n");
961 }
962 }
963 if_downall();
964
965 uvm_shutdown();
966 splhigh();
967 cold = 1;
968
969 if ((howto & RB_DUMP) != 0)
970 dumpsys();
971
972 haltsys:
973 config_suspend_all(DVACT_POWERDOWN);
974
975 if ((howto & RB_HALT) != 0) {
976 if ((howto & RB_POWERDOWN) != 0)
977 printf("System Power Down not supported,"
978 " halting system.\n");
979 else
980 printf("System Halt.\n");
981 } else {
982 doreset:
983 printf("System restart.\n");
984 (void)disableintr();
985 tlb_set_wired(0);
986 tlb_flush(bootcpu_hwinfo.tlbsize);
987
988 if (octeon_ver == OCTEON_3)
989 octeon_xkphys_write_8(RST_SOFT_RST, 1);
990 else
991 octeon_xkphys_write_8(OCTEON_CIU_BASE +
992 CIU_SOFT_RST, 1);
993 }
994
995 for (;;)
996 continue;
997 /* NOTREACHED */
998 }
999
1000 u_long dumpmag = 0x8fca0101; /* Magic number for savecore. */
1001 int dumpsize = 0; /* Also for savecore. */
1002 long dumplo = 0;
1003
1004 void
dumpconf(void)1005 dumpconf(void)
1006 {
1007 int nblks;
1008
1009 if (dumpdev == NODEV ||
1010 (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
1011 return;
1012 if (nblks <= ctod(1))
1013 return;
1014
1015 dumpsize = ptoa(physmem);
1016 if (dumpsize > atop(round_page(dbtob(nblks - dumplo))))
1017 dumpsize = atop(round_page(dbtob(nblks - dumplo)));
1018 else if (dumplo == 0)
1019 dumplo = nblks - btodb(ptoa(physmem));
1020
1021 /*
1022 * Don't dump on the first page in case the dump device includes a
1023 * disk label.
1024 */
1025 if (dumplo < btodb(PAGE_SIZE))
1026 dumplo = btodb(PAGE_SIZE);
1027 }
1028
1029 void
dumpsys()1030 dumpsys()
1031 {
1032 /* XXX TBD */
1033 }
1034
1035 int
is_memory_range(paddr_t pa,psize_t len,psize_t limit)1036 is_memory_range(paddr_t pa, psize_t len, psize_t limit)
1037 {
1038 extern char start[];
1039 struct phys_mem_desc *seg;
1040 uint64_t fp, lp;
1041 int i;
1042
1043 fp = atop(pa);
1044 lp = atop(round_page(pa + len));
1045
1046 if (limit != 0 && lp > atop(limit))
1047 return 0;
1048
1049 /* The kernel is linked in CKSEG0. */
1050 if (fp >= atop(trunc_page(CKSEG0_TO_PHYS((vaddr_t)start))) &&
1051 lp <= atop(round_page(CKSEG0_TO_PHYS((vaddr_t)ekern))))
1052 return 1;
1053
1054 for (i = 0, seg = mem_layout; i < MAXMEMSEGS; i++, seg++)
1055 if (fp >= seg->mem_first_page && lp <= seg->mem_last_page)
1056 return 1;
1057
1058 return 0;
1059 }
1060
1061 u_int
ioclock_get_timecount(struct timecounter * tc)1062 ioclock_get_timecount(struct timecounter *tc)
1063 {
1064 uint64_t reg = (uint64_t)tc->tc_priv;
1065
1066 return octeon_xkphys_read_8(reg);
1067 }
1068
1069 #if NOCTBOOT > 0
1070 static uint64_t
size_trunc(uint64_t size)1071 size_trunc(uint64_t size)
1072 {
1073 return (size & ~BOOTMEM_BLOCK_MASK);
1074 }
1075
1076 void
bootmem_dump(void)1077 bootmem_dump(void)
1078 {
1079 struct octeon_bootmem_desc *memdesc = (struct octeon_bootmem_desc *)
1080 PHYS_TO_XKPHYS(octeon_boot_info->phys_mem_desc_addr, CCA_CACHED);
1081 struct octeon_bootmem_block *block;
1082 paddr_t pa;
1083
1084 pa = memdesc->head_addr;
1085 while (pa != 0) {
1086 block = pa_to_block(pa);
1087 printf("free 0x%lx - 0x%lx\n", pa, pa + (size_t)block->size);
1088 pa = block->next;
1089 }
1090 }
1091
1092 /*
1093 * Allocate the given region from the free memory list.
1094 */
1095 int
bootmem_alloc_region(paddr_t pa,size_t size)1096 bootmem_alloc_region(paddr_t pa, size_t size)
1097 {
1098 struct octeon_bootmem_desc *memdesc = (struct octeon_bootmem_desc *)
1099 PHYS_TO_XKPHYS(octeon_boot_info->phys_mem_desc_addr, CCA_CACHED);
1100 struct octeon_bootmem_block *block, *next, nblock;
1101 paddr_t bpa;
1102
1103 if (pa == 0 || size < BOOTMEM_BLOCK_MIN_SIZE ||
1104 (pa & BOOTMEM_BLOCK_MASK) != 0 ||
1105 (size & BOOTMEM_BLOCK_MASK) != 0)
1106 return EINVAL;
1107
1108 if (memdesc->head_addr == 0 || pa < memdesc->head_addr)
1109 return ENOMEM;
1110
1111 /* Check if the region is at the head of the free list. */
1112 if (pa == memdesc->head_addr) {
1113 block = pa_to_block(memdesc->head_addr);
1114 if (block->size < size)
1115 return ENOMEM;
1116 if (size_trunc(block->size) == size) {
1117 memdesc->head_addr = block->next;
1118 } else {
1119 KASSERT(block->size > size);
1120 nblock.next = block->next;
1121 nblock.size = block->size - size;
1122 KASSERT(nblock.size >= BOOTMEM_BLOCK_MIN_SIZE);
1123 memdesc->head_addr += size;
1124 *pa_to_block(memdesc->head_addr) = nblock;
1125 }
1126 return 0;
1127 }
1128
1129 /* Find the block that immediately precedes or is at `pa'. */
1130 bpa = memdesc->head_addr;
1131 block = pa_to_block(bpa);
1132 while (block->next != 0 && block->next < pa) {
1133 bpa = block->next;
1134 block = pa_to_block(bpa);
1135 }
1136
1137 /* Refuse to play if the block is not properly aligned. */
1138 if ((bpa & BOOTMEM_BLOCK_MASK) != 0)
1139 return ENOMEM;
1140
1141 if (block->next == pa) {
1142 next = pa_to_block(block->next);
1143 if (next->size < size)
1144 return ENOMEM;
1145 if (size_trunc(next->size) == size) {
1146 block->next = next->next;
1147 } else {
1148 KASSERT(next->size > size);
1149 nblock.next = next->next;
1150 nblock.size = next->size - size;
1151 KASSERT(nblock.size >= BOOTMEM_BLOCK_MIN_SIZE);
1152 block->next += size;
1153 *pa_to_block(block->next) = nblock;
1154 }
1155 } else {
1156 KASSERT(bpa < pa);
1157 KASSERT(block->next == 0 || block->next > pa);
1158
1159 if (bpa + block->size < pa + size)
1160 return ENOMEM;
1161 if (bpa + size_trunc(block->size) == pa + size) {
1162 block->size = pa - bpa;
1163 } else {
1164 KASSERT(bpa + block->size > pa + size);
1165 nblock.next = block->next;
1166 nblock.size = block->size - (pa - bpa) - size;
1167 KASSERT(nblock.size >= BOOTMEM_BLOCK_MIN_SIZE);
1168 block->next = pa + size;
1169 block->size = pa - bpa;
1170 *pa_to_block(block->next) = nblock;
1171 }
1172 }
1173
1174 return 0;
1175 }
1176
1177 /*
1178 * Release the given region to the free memory list.
1179 */
1180 void
bootmem_free(paddr_t pa,size_t size)1181 bootmem_free(paddr_t pa, size_t size)
1182 {
1183 struct octeon_bootmem_desc *memdesc = (struct octeon_bootmem_desc *)
1184 PHYS_TO_XKPHYS(octeon_boot_info->phys_mem_desc_addr, CCA_CACHED);
1185 struct octeon_bootmem_block *block, *next, *prev;
1186 paddr_t prevpa;
1187
1188 if (pa == 0 || size < BOOTMEM_BLOCK_MIN_SIZE ||
1189 (pa & BOOTMEM_BLOCK_MASK) != 0 ||
1190 (size & BOOTMEM_BLOCK_MASK) != 0)
1191 panic("%s: invalid block 0x%lx @ 0x%lx", __func__, size, pa);
1192
1193 /* If the list is empty, insert at the head. */
1194 if (memdesc->head_addr == 0) {
1195 block = pa_to_block(pa);
1196 block->next = 0;
1197 block->size = size;
1198 memdesc->head_addr = pa;
1199 return;
1200 }
1201
1202 /* If the block precedes the current head, insert before, or merge. */
1203 if (pa <= memdesc->head_addr) {
1204 block = pa_to_block(pa);
1205 if (pa + size < memdesc->head_addr) {
1206 block->next = memdesc->head_addr;
1207 block->size = size;
1208 memdesc->head_addr = pa;
1209 } else if (pa + size == memdesc->head_addr) {
1210 next = pa_to_block(memdesc->head_addr);
1211 block->next = next->next;
1212 block->size = next->size + size;
1213 memdesc->head_addr = pa;
1214 } else {
1215 panic("%s: overlap 1: 0x%lx @ 0x%lx / 0x%llx @ 0x%llx",
1216 __func__, size, pa,
1217 pa_to_block(memdesc->head_addr)->size,
1218 memdesc->head_addr);
1219 }
1220 return;
1221 }
1222
1223 /* Find the immediate predecessor. */
1224 prevpa = memdesc->head_addr;
1225 prev = pa_to_block(prevpa);
1226 while (prev->next != 0 && prev->next < pa) {
1227 prevpa = prev->next;
1228 prev = pa_to_block(prevpa);
1229 }
1230 if (prevpa + prev->size > pa) {
1231 panic("%s: overlap 2: 0x%llx @ 0x%lx / 0x%lx @ 0x%lx",
1232 __func__, prev->size, prevpa, size, pa);
1233 }
1234
1235 /* Merge with or insert after the predecessor. */
1236 if (prevpa + prev->size == pa) {
1237 if (prev->next == 0) {
1238 prev->size += size;
1239 return;
1240 }
1241 next = pa_to_block(prev->next);
1242 if (prevpa + prev->size + size < prev->next) {
1243 prev->size += size;
1244 } else if (prevpa + prev->size + size == prev->next) {
1245 prev->next = next->next;
1246 prev->size += size + next->size;
1247 } else {
1248 panic("%s: overlap 3: 0x%llx @ 0x%lx / 0x%lx @ 0x%lx / "
1249 "0x%llx @ 0x%llx", __func__,
1250 prev->size, prevpa, size, pa,
1251 next->size, prev->next);
1252 }
1253 } else {
1254 /* The block is disjoint with prev. */
1255 KASSERT(prevpa + prev->size < pa);
1256
1257 block = pa_to_block(pa);
1258 if (pa + size < prev->next || prev->next == 0) {
1259 block->next = prev->next;
1260 block->size = size;
1261 prev->next = pa;
1262 } else if (pa + size == prev->next) {
1263 next = pa_to_block(prev->next);
1264 block->next = next->next;
1265 block->size = next->size + size;
1266 prev->next = pa;
1267 } else {
1268 next = pa_to_block(prev->next);
1269 panic("%s: overlap 4: 0x%llx @ 0x%lx / "
1270 "0x%lx @ 0x%lx / 0x%llx @ 0x%llx",
1271 __func__, prev->size, prevpa, size, pa,
1272 next->size, prev->next);
1273 }
1274 }
1275 }
1276 #endif /* NOCTBOOT > 0 */
1277
1278 #ifdef MULTIPROCESSOR
1279 uint32_t cpu_spinup_mask = 0;
1280 uint64_t cpu_spinup_a0, cpu_spinup_sp;
1281
1282 void
hw_cpu_boot_secondary(struct cpu_info * ci)1283 hw_cpu_boot_secondary(struct cpu_info *ci)
1284 {
1285 vaddr_t kstack;
1286
1287 kstack = alloc_contiguous_pages(USPACE);
1288 if (kstack == 0)
1289 panic("unable to allocate idle stack");
1290 ci->ci_curprocpaddr = (void *)kstack;
1291
1292 cpu_spinup_a0 = (uint64_t)ci;
1293 cpu_spinup_sp = (uint64_t)(kstack + USPACE);
1294 mips_sync();
1295
1296 cpu_spinup_mask = (uint32_t)ci->ci_cpuid;
1297
1298 while (!CPU_IS_RUNNING(ci))
1299 membar_sync();
1300 }
1301
1302 void
hw_cpu_hatch(struct cpu_info * ci)1303 hw_cpu_hatch(struct cpu_info *ci)
1304 {
1305 /*
1306 * Set curcpu address on this processor.
1307 */
1308 setcurcpu(ci);
1309
1310 /*
1311 * Make sure we can access the extended address space.
1312 */
1313 setsr(getsr() | SR_KX | SR_UX);
1314
1315 octeon_tlb_init();
1316 tlb_set_pid(0);
1317
1318 /*
1319 * Turn off bootstrap exception vectors.
1320 */
1321 setsr(getsr() & ~SR_BOOT_EXC_VEC);
1322
1323 /*
1324 * Clear out the I and D caches.
1325 */
1326 Octeon_ConfigCache(ci);
1327 Mips_SyncCache(ci);
1328
1329 (*md_startclock)(ci);
1330
1331 octeon_intr_init();
1332 mips64_ipi_init();
1333
1334 ci->ci_flags |= CPUF_RUNNING;
1335 membar_sync();
1336
1337 ncpus++;
1338
1339 spl0();
1340 (void)updateimask(0);
1341
1342 sched_toidle();
1343 }
1344 #endif /* MULTIPROCESSOR */
1345