xref: /netbsd/sys/arch/prep/prep/machdep.c (revision bf9ec67e)
1 /*	$NetBSD: machdep.c,v 1.39 2002/05/13 06:17:36 matt Exp $	*/
2 
3 /*
4  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5  * Copyright (C) 1995, 1996 TooLs GmbH.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by TooLs GmbH.
19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "opt_compat_netbsd.h"
35 #include "opt_ddb.h"
36 
37 #include <sys/param.h>
38 #include <sys/buf.h>
39 #include <sys/conf.h>
40 #include <sys/device.h>
41 #include <sys/exec.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/map.h>
45 #include <sys/mbuf.h>
46 #include <sys/mount.h>
47 #include <sys/msgbuf.h>
48 #include <sys/proc.h>
49 #include <sys/reboot.h>
50 #include <sys/syscallargs.h>
51 #include <sys/syslog.h>
52 #include <sys/systm.h>
53 #include <sys/user.h>
54 
55 #include <uvm/uvm_extern.h>
56 
57 #include <sys/sysctl.h>
58 
59 #include <net/netisr.h>
60 
61 #include <machine/autoconf.h>
62 #include <machine/bat.h>
63 #include <machine/bootinfo.h>
64 #include <machine/bus.h>
65 #include <machine/intr.h>
66 #include <machine/pmap.h>
67 #include <machine/platform.h>
68 #include <machine/powerpc.h>
69 #include <machine/residual.h>
70 #include <machine/trap.h>
71 
72 #include <dev/cons.h>
73 
74 #include "com.h"
75 #if (NCOM > 0)
76 #include <sys/termios.h>
77 #include <dev/ic/comreg.h>
78 #include <dev/ic/comvar.h>
79 void comsoft(void);
80 #endif
81 
82 #ifdef DDB
83 #include <machine/db_machdep.h>
84 #include <ddb/db_extern.h>
85 #endif
86 
87 void initppc __P((u_long, u_long, u_int, void *));
88 void dumpsys __P((void));
89 void strayintr __P((int));
90 int lcsplx __P((int));
91 
92 /*
93  * Global variables used here and there
94  */
95 struct vm_map *exec_map = NULL;
96 struct vm_map *mb_map = NULL;
97 struct vm_map *phys_map = NULL;
98 
99 char bootinfo[BOOTINFO_MAXSIZE];
100 
101 char machine[] = MACHINE;		/* machine */
102 char machine_arch[] = MACHINE_ARCH;	/* machine architecture */
103 
104 struct pcb *curpcb;
105 struct pmap *curpm;
106 struct proc *fpuproc;
107 
108 extern struct user *proc0paddr;
109 
110 struct bat battable[16];
111 
112 vaddr_t prep_intr_reg;			/* PReP interrupt vector register */
113 
114 #define	OFMEMREGIONS	32
115 struct mem_region physmemr[OFMEMREGIONS], availmemr[OFMEMREGIONS];
116 
117 paddr_t msgbuf_paddr;
118 vaddr_t msgbuf_vaddr;
119 
120 paddr_t avail_end;			/* XXX temporary */
121 
122 void install_extint __P((void (*)(void)));
123 
124 RESIDUAL *res;
125 RESIDUAL resdata;
126 
127 void
128 initppc(startkernel, endkernel, args, btinfo)
129 	u_long startkernel, endkernel;
130 	u_int args;
131 	void *btinfo;
132 {
133 	extern int trapcode, trapsize;
134 	extern int alitrap, alisize;
135 	extern int dsitrap, dsisize;
136 	extern int isitrap, isisize;
137 	extern int decrint, decrsize;
138 	extern int tlbimiss, tlbimsize;
139 	extern int tlbdlmiss, tlbdlmsize;
140 	extern int tlbdsmiss, tlbdsmsize;
141 #ifdef DDB
142 	extern int ddblow, ddbsize;
143 	extern void *startsym, *endsym;
144 #endif
145 	int exc, scratch;
146 
147 	/*
148 	 * copy bootinfo
149 	 */
150 	memcpy(bootinfo, btinfo, sizeof(bootinfo));
151 
152 	/*
153 	 * copy residual data
154 	 */
155 	{
156 		struct btinfo_residual *resinfo;
157 
158 		resinfo =
159 		    (struct btinfo_residual *)lookup_bootinfo(BTINFO_RESIDUAL);
160 		if (!resinfo)
161 			panic("not found residual information in bootinfo");
162 
163 		if (((RESIDUAL *)resinfo->addr != 0) &&
164 		    ((RESIDUAL *)resinfo->addr)->ResidualLength != 0) {
165 			memcpy(&resdata, resinfo->addr, sizeof(resdata));
166 			res = &resdata;
167 		} else
168 			panic("No residual data.");
169 	}
170 
171 	/*
172 	 * Set memory region
173 	 */
174 	{
175 		u_long memsize = res->TotalMemory;
176 
177 		physmemr[0].start = 0;
178 		physmemr[0].size = memsize & ~PGOFSET;
179 		availmemr[0].start = (endkernel + PGOFSET) & ~PGOFSET;
180 		availmemr[0].size = memsize - availmemr[0].start;
181 	}
182 	avail_end = physmemr[0].start + physmemr[0].size;    /* XXX temporary */
183 
184 	/*
185 	 * Set CPU clock
186 	 */
187 	{
188 		struct btinfo_clock *clockinfo;
189 		extern u_long ticks_per_sec, ns_per_tick;
190 
191 		clockinfo =
192 		    (struct btinfo_clock *)lookup_bootinfo(BTINFO_CLOCK);
193 		if (!clockinfo)
194 			panic("not found clock information in bootinfo");
195 
196 		ticks_per_sec = clockinfo->ticks_per_sec;
197 		ns_per_tick = 1000000000 / ticks_per_sec;
198 	}
199 
200 	/* Initialize the CPU type */
201 	ident_platform();
202 
203 	proc0.p_addr = proc0paddr;
204 	memset(proc0.p_addr, 0, sizeof *proc0.p_addr);
205 
206 	curpcb = &proc0paddr->u_pcb;
207 
208 	curpm = curpcb->pcb_pmreal = curpcb->pcb_pm = pmap_kernel();
209 
210 	/*
211 	 * boothowto
212 	 */
213 	boothowto = args;
214 
215 	/*
216 	 * Initialize bus_space.
217 	 */
218 	prep_bus_space_init();
219 
220 	/*
221 	 * i386 port says, that this shouldn't be here,
222 	 * but I really think the console should be initialized
223 	 * as early as possible.
224 	 */
225 	consinit();
226 
227 	/*
228 	 * Initialize BAT registers to unmapped to not generate
229 	 * overlapping mappings below.
230 	 */
231 	asm volatile ("mtibatu 0,%0" :: "r"(0));
232 	asm volatile ("mtibatu 1,%0" :: "r"(0));
233 	asm volatile ("mtibatu 2,%0" :: "r"(0));
234 	asm volatile ("mtibatu 3,%0" :: "r"(0));
235 	asm volatile ("mtdbatu 0,%0" :: "r"(0));
236 	asm volatile ("mtdbatu 1,%0" :: "r"(0));
237 	asm volatile ("mtdbatu 2,%0" :: "r"(0));
238 	asm volatile ("mtdbatu 3,%0" :: "r"(0));
239 
240 	/*
241 	 * Set up initial BAT table
242 	 */
243 	/* map the lowest 256 MB area */
244 	battable[0x00000000 >> 28].batl =
245 	    BATL(0x00000000, BAT_M, BAT_PP_RW);
246 	battable[0x00000000 >> 28].batu =
247 	    BATU(0x00000000, BAT_BL_256M, BAT_Vs);
248 
249 	/* map the PCI/ISA I/O 256 MB area */
250 	battable[PREP_BUS_SPACE_IO >> 28].batl =
251 	    BATL(PREP_BUS_SPACE_IO, BAT_I | BAT_G, BAT_PP_RW);
252 	battable[PREP_BUS_SPACE_IO >> 28].batu =
253 	    BATU(PREP_BUS_SPACE_IO, BAT_BL_256M, BAT_Vs);
254 
255 	/* map the PCI/ISA MEMORY 256 MB area */
256 	battable[PREP_BUS_SPACE_MEM >> 28].batl =
257 	    BATL(PREP_BUS_SPACE_MEM, BAT_I | BAT_G, BAT_PP_RW);
258 	battable[PREP_BUS_SPACE_MEM >> 28].batu =
259 	    BATU(PREP_BUS_SPACE_MEM, BAT_BL_256M, BAT_Vs);
260 
261 	/*
262 	 * Now setup fixed bat registers
263 	 */
264 	asm volatile ("mtibatl 0,%0; mtibatu 0,%1"
265 		      :: "r"(battable[0x00000000 >> 28].batl),
266 			 "r"(battable[0x00000000 >> 28].batu));
267 
268 	asm volatile ("mtdbatl 0,%0; mtdbatu 0,%1"
269 		      :: "r"(battable[0x00000000 >> 28].batl),
270 			 "r"(battable[0x00000000 >> 28].batu));
271 	asm volatile ("mtdbatl 1,%0; mtdbatu 1,%1"
272 		      :: "r"(battable[PREP_BUS_SPACE_IO >> 28].batl),
273 			 "r"(battable[PREP_BUS_SPACE_IO >> 28].batu));
274 	asm volatile ("mtdbatl 2,%0; mtdbatu 2,%1"
275 		      :: "r"(battable[PREP_BUS_SPACE_MEM >> 28].batl),
276 			 "r"(battable[PREP_BUS_SPACE_MEM >> 28].batu));
277 
278 	asm volatile ("sync; isync");
279 	/*
280 	 * Set up trap vectors
281 	 */
282 	for (exc = EXC_RSVD; exc <= EXC_LAST; exc += 0x100)
283 		switch (exc) {
284 		default:
285 			memcpy((void *)exc, &trapcode, (size_t)&trapsize);
286 			break;
287 		case EXC_EXI:
288 			/*
289 			 * This one is (potentially) installed during autoconf
290 			 */
291 			break;
292 		case EXC_ALI:
293 			memcpy((void *)EXC_ALI, &alitrap, (size_t)&alisize);
294 			break;
295 		case EXC_DSI:
296 			memcpy((void *)EXC_DSI, &dsitrap, (size_t)&dsisize);
297 			break;
298 		case EXC_ISI:
299 			memcpy((void *)EXC_ISI, &isitrap, (size_t)&isisize);
300 			break;
301 		case EXC_DECR:
302 			memcpy((void *)EXC_DECR, &decrint, (size_t)&decrsize);
303 			break;
304 		case EXC_IMISS:
305 			memcpy((void *)EXC_IMISS, &tlbimiss,
306 			    (size_t)&tlbimsize);
307 			break;
308 		case EXC_DLMISS:
309 			memcpy((void *)EXC_DLMISS, &tlbdlmiss,
310 			    (size_t)&tlbdlmsize);
311 			break;
312 		case EXC_DSMISS:
313 			memcpy((void *)EXC_DSMISS, &tlbdsmiss,
314 			    (size_t)&tlbdsmsize);
315 			break;
316 #ifdef DDB
317 		case EXC_PGM:
318 		case EXC_TRC:
319 		case EXC_BPT:
320 			memcpy((void *)exc, &ddblow, (size_t)&ddbsize);
321 			break;
322 #endif
323 		}
324 
325 	__syncicache((void *)EXC_RST, EXC_LAST - EXC_RST + 0x100);
326 
327 	/*
328 	 * external interrupt handler install
329 	 */
330 	install_extint(*platform->ext_intr);
331 
332 	/*
333 	 * Now enable translation (and machine checks/recoverable interrupts).
334 	 */
335 	asm volatile ("eieio; mfmsr %0; ori %0,%0,%1; mtmsr %0; isync"
336 		      : "=r"(scratch) : "K"(PSL_IR|PSL_DR|PSL_ME|PSL_RI));
337 
338         /*
339 	 * Set the page size.
340 	 */
341 	uvm_setpagesize();
342 
343 	/*
344 	 * Initialize pmap module.
345 	 */
346 	pmap_bootstrap(startkernel, endkernel, NULL);
347 
348 #ifdef DDB
349 	ddb_init((int)((u_long)endsym - (u_long)startsym), startsym, endsym);
350 
351 	if (boothowto & RB_KDB)
352 		Debugger();
353 #endif
354 }
355 
356 void
357 mem_regions(mem, avail)
358 	struct mem_region **mem, **avail;
359 {
360 
361 	*mem = physmemr;
362 	*avail = availmemr;
363 }
364 
365 void
366 install_extint(handler)
367 	void (*handler) __P((void));
368 {
369 	extern u_char extint[];
370 	extern u_long extsize;
371 	extern u_long extint_call;
372 	u_long offset = (u_long)handler - (u_long)&extint_call;
373 	int omsr, msr;
374 
375 #ifdef DIAGNOSTIC
376 	if (offset > 0x1ffffff)
377 		panic("install_extint: too far away");
378 #endif
379 	asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1"
380 		      : "=r"(omsr), "=r"(msr) : "K"((u_short)~PSL_EE));
381 	extint_call = (extint_call & 0xfc000003) | offset;
382 	memcpy((void *)EXC_EXI, &extint, (size_t)&extsize);
383 	__syncicache((void *)&extint_call, sizeof extint_call);
384 	__syncicache((void *)EXC_EXI, (int)&extsize);
385 	asm volatile ("mtmsr %0" :: "r"(omsr));
386 }
387 
388 /*
389  * Machine dependent startup code.
390  */
391 void
392 cpu_startup()
393 {
394 	int sz, i;
395 	caddr_t v;
396 	vaddr_t minaddr, maxaddr;
397 	int base, residual;
398 	char pbuf[9];
399 
400 	proc0.p_addr = proc0paddr;
401 	v = (caddr_t)proc0paddr + USPACE;
402 
403 	/*
404 	 * Mapping PReP interrput vector register.
405 	 */
406 	if (!(prep_intr_reg = uvm_km_valloc(kernel_map, round_page(NBPG))))
407 		panic("startup: no room for interrupt register");
408 	pmap_enter(pmap_kernel(), prep_intr_reg, PREP_INTR_REG,
409 	    VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
410 	pmap_update(pmap_kernel());
411 
412 	/*
413 	 * Initialize error message buffer (at end of core).
414 	 */
415 	if (!(msgbuf_vaddr = uvm_km_alloc(kernel_map, round_page(MSGBUFSIZE))))
416 		panic("startup: no room for message buffer");
417 	for (i = 0; i < btoc(MSGBUFSIZE); i++)
418 		pmap_enter(pmap_kernel(), msgbuf_vaddr + i * NBPG,
419 		    msgbuf_paddr + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
420 		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
421 	pmap_update(pmap_kernel());
422 	initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));
423 
424 	printf("%s", version);
425 
426 	printf("Model: %s\n", res->VitalProductData.PrintableModel);
427 	cpu_identify(NULL, 0);
428 
429 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
430 	printf("total memory = %s\n", pbuf);
431 
432 	/*
433 	 * Find out how much space we need, allocate it,
434 	 * and then give everything true virtual addresses.
435 	 */
436 	sz = (int)allocsys(NULL, NULL);
437 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
438 		panic("startup: no room for tables");
439 	if (allocsys(v, NULL) - v != sz)
440 		panic("startup: table size inconsistency");
441 
442 	/*
443 	 * Now allocate buffers proper.  They are different than the above
444 	 * in that they usually occupy more virtual memory than physical.
445 	 */
446 	sz = MAXBSIZE * nbuf;
447 	if (uvm_map(kernel_map, (vaddr_t *)&buffers, round_page(sz),
448 		    NULL, UVM_UNKNOWN_OFFSET, 0,
449 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
450 				UVM_ADV_NORMAL, 0)) != 0)
451 		panic("startup: cannot allocate VM for buffers");
452 	minaddr = (vaddr_t)buffers;
453 	base = bufpages / nbuf;
454 	residual = bufpages % nbuf;
455 	if (base >= MAXBSIZE) {
456 		/* Don't want to alloc more physical mem than ever needed */
457 		base = MAXBSIZE;
458 		residual = 0;
459 	}
460 	for (i = 0; i < nbuf; i++) {
461 		vsize_t curbufsize;
462 		vaddr_t curbuf;
463 		struct vm_page *pg;
464 
465 		/*
466 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
467 		 * that MAXBSIZE space, we allocate and map (base+1) pages
468 		 * for the first "residual" buffers, and then we allocate
469 		 * "base" pages for the rest.
470 		 */
471 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
472 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
473 
474 		while (curbufsize) {
475 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
476 			if (pg == NULL)
477 				panic("startup: not enough memory for "
478 					"buffer cache");
479 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
480 			    VM_PROT_READ | VM_PROT_WRITE);
481 			curbuf += PAGE_SIZE;
482 			curbufsize -= PAGE_SIZE;
483 		}
484 	}
485 	pmap_update(kernel_map->pmap);
486 
487 	/*
488 	 * Allocate a submap for exec arguments.  This map effectively
489 	 * limits the number of processes exec'ing at any time.
490 	 */
491 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
492 				 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
493 
494 	/*
495 	 * Allocate a submap for physio
496 	 */
497 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
498 				 VM_PHYS_SIZE, 0, FALSE, NULL);
499 
500 #ifndef PMAP_MAP_POOLPAGE
501 	/*
502 	 * We need to allocate an mbuf cluster submap if the pool
503 	 * allocater isn't using direct-mapped pool pages.
504 	 */
505 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
506 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
507 				 FALSE, NULL);
508 #endif
509 
510 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
511 	printf("avail memory = %s\n", pbuf);
512 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
513 	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
514 
515 	/*
516 	 * Set up the buffers.
517 	 */
518 	bufinit();
519 
520 	/*
521 	 * Now allow hardware interrupts.
522 	 */
523 	{
524 		int msr;
525 
526 		splraise(-1);
527 		asm volatile ("mfmsr %0; ori %0,%0,%1; mtmsr %0"
528 			      : "=r"(msr) : "K"(PSL_EE));
529 	}
530 
531 	/*
532 	 * Now safe for bus space allocation to use malloc.
533 	 */
534 	prep_bus_space_mallocok();
535 }
536 
537 /*
538  * lookup_bootinfo:
539  * Look up information in bootinfo of boot loader.
540  */
541 void *
542 lookup_bootinfo(type)
543 	int type;
544 {
545 	struct btinfo_common *bt;
546 	struct btinfo_common *help = (struct btinfo_common *)bootinfo;
547 
548 	do {
549 		bt = help;
550 		if (bt->type == type)
551 			return (help);
552 		help = (struct btinfo_common *)((char*)help + bt->next);
553 	} while (bt->next &&
554 		(size_t)help < (size_t)bootinfo + sizeof (bootinfo));
555 
556 	return (NULL);
557 }
558 
559 void
560 dumpsys()
561 {
562 
563 	printf("dumpsys: TBD\n");
564 }
565 
566 /*
567  * Soft networking interrupts.
568  */
569 void
570 softnet()
571 {
572 	extern volatile int netisr;
573 	int isr;
574 
575 	isr = netisr;
576 	netisr = 0;
577 
578 #define DONETISR(bit, fn) do {	\
579 	if (isr & (1 << bit))	\
580 		fn();		\
581 } while (0)
582 
583 #include <net/netisr_dispatch.h>
584 
585 #undef DONETISR
586 }
587 
588 /*
589  * Soft tty interrupts.
590  */
591 void
592 softserial()
593 {
594 
595 #if (NCOM > 0)
596 	comsoft();
597 #endif
598 }
599 
600 /*
601  * Stray interrupts.
602  */
603 void
604 strayintr(irq)
605 	int irq;
606 {
607 
608 	log(LOG_ERR, "stray interrupt %d\n", irq);
609 }
610 
611 /*
612  * Halt or reboot the machine after syncing/dumping according to howto.
613  */
614 void
615 cpu_reboot(howto, what)
616 	int howto;
617 	char *what;
618 {
619 	static int syncing;
620 
621 	if (cold) {
622 		howto |= RB_HALT;
623 		goto halt_sys;
624 	}
625 
626 	boothowto = howto;
627 	if ((howto & RB_NOSYNC) == 0 && syncing == 0) {
628 		syncing = 1;
629 		vfs_shutdown();		/* sync */
630 		resettodr();		/* set wall clock */
631 	}
632 
633 	/* Disable intr */
634 	splhigh();
635 
636 	/* Do dump if requested */
637 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
638 		dumpsys();
639 
640 halt_sys:
641 	doshutdownhooks();
642 
643 	if (howto & RB_HALT) {
644                 printf("\n");
645                 printf("The operating system has halted.\n");
646                 printf("Please press any key to reboot.\n\n");
647                 cnpollc(1);	/* for proper keyboard command handling */
648                 cngetc();
649                 cnpollc(0);
650 	}
651 
652 	printf("rebooting...\n\n");
653 
654 	(*platform->reset)();
655 
656 	for (;;)
657 		continue;
658 	/* NOTREACHED */
659 }
660 
661 /*
662  * lcsplx() is called from locore; it is an open-coded version of
663  * splx() differing in that it returns the previous priority level.
664  */
665 int
666 lcsplx(ipl)
667 	int ipl;
668 {
669 	int oldcpl;
670 
671 	__asm__ volatile("sync; eieio\n");	/* reorder protect */
672 	oldcpl = cpl;
673 	cpl = ipl;
674 	if (ipending & ~ipl)
675 		do_pending_int();
676 	__asm__ volatile("sync; eieio\n");	/* reorder protect */
677 
678 	return (oldcpl);
679 }
680 
681 /*
682  * Allocate vm space and mapin the I/O address
683  */
684 void *
685 mapiodev(pa, len)
686 	paddr_t pa;
687 	psize_t len;
688 {
689 	paddr_t faddr;
690 	vaddr_t taddr, va;
691 	int off;
692 
693 	faddr = trunc_page(pa);
694 	off = pa - faddr;
695 	len = round_page(off + len);
696 	va = taddr = uvm_km_valloc(kernel_map, len);
697 
698 	if (va == 0)
699 		return NULL;
700 
701 	for (; len > 0; len -= NBPG) {
702 		pmap_kenter_pa(taddr, faddr, VM_PROT_READ | VM_PROT_WRITE);
703 		faddr += NBPG;
704 		taddr += NBPG;
705 	}
706 	pmap_update(pmap_kernel());
707 
708 	return (void *)(va + off);
709 }
710