xref: /netbsd/sys/arch/vax/vax/subr.S (revision bf9ec67e)
1/*	$NetBSD: subr.S,v 1.4 2002/04/04 16:40:14 ragge Exp $	   */
2
3/*
4 * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *     This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 *    derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <machine/asm.h>
34
35#include "assym.h"
36#include "opt_ddb.h"
37#include "opt_multiprocessor.h"
38#include "opt_lockdebug.h"
39#include "opt_compat_netbsd.h"
40#include "opt_compat_ibcs2.h"
41#ifdef COMPAT_IBCS2
42#include <compat/ibcs2/ibcs2_syscall.h>
43#endif
44#include "opt_compat_ultrix.h"
45#ifdef COMPAT_ULTRIX
46#include <compat/ultrix/ultrix_syscall.h>
47#endif
48
49#define JSBENTRY(x)	.globl x ; .align 2 ; x :
50
51		.text
52
53#ifdef	KERNEL_LOADABLE_BY_MOP
54/*
55 * This is a little tricky. The kernel is not loaded at the correct
56 * address, so the kernel must first be relocated, then copied, then
57 * jump back to the correct address.
58 */
59/* Copy routine */
60cps:
612:	movb	(%r0)+,(%r1)+
62	cmpl	%r0,%r7
63	bneq	2b
64
653:	clrb	(%r1)+
66	incl	%r0
67	cmpl	%r0,%r6
68	bneq	3b
69	clrl	-(%sp)
70	movl	%sp,%ap
71	movl	$_cca,%r7
72	movl	%r8,(%r7)
73	movpsl	-(%sp)
74	pushl	%r2
75	rei
76cpe:
77
78/* Copy the copy routine */
791:	movab	cps,%r0
80	movab	cpe,%r1
81	movl	$0x300000,%sp
82	movl	%sp,%r3
834:	movb	(%r0)+,(%r3)+
84	cmpl	%r0,%r1
85	bneq	4b
86	movl	%r7,%r8
87/* Ok, copy routine copied, set registers and rei */
88	movab	_edata,%r7
89	movab	_end,%r6
90	movl	$0x80000000,%r1
91	movl	$0x80000200,%r0
92	subl3	$0x200,%r6,%r9
93	movab	2f,%r2
94	subl2	$0x200,%r2
95	movpsl	-(%sp)
96	pushab	4(%sp)
97	rei
98
99/*
100 * First entry routine from boot. This should be in a file called locore.
101 */
102JSBENTRY(start)
103	brb	1b				# Netbooted starts here
104#else
105ASENTRY(start, 0)
106#endif
1072:	bisl3	$0x80000000,%r9,_C_LABEL(esym)	# End of loaded code
108	pushl	$0x1f0000			# Push a nice PSL
109	pushl	$to				# Address to jump to
110	rei					# change to kernel stack
111to:	movw	$0xfff,_C_LABEL(panic)		# Save all regs in panic
112	cmpb	(%ap),$3			# symbols info present?
113	blssu	3f				# nope, skip
114	bisl3	$0x80000000,8(%ap),_C_LABEL(symtab_start)
115						#   save start of symtab
116	movl	12(%ap),_C_LABEL(symtab_nsyms)	#   save number of symtab
117	bisl3	$0x80000000,%r9,_C_LABEL(symtab_end)
118						#   save end of symtab
1193:	addl3	_C_LABEL(esym),$0x3ff,%r0	# Round symbol table end
120	bicl3	$0x3ff,%r0,_C_LABEL(proc0paddr)	# save proc0 uarea pointer
121	bicl3	$0x80000000,_C_LABEL(proc0paddr),%r0 # get phys proc0 uarea addr
122	mtpr	%r0,$PR_PCBB			# Save in IPR PCBB
123	addl3	$USPACE,_C_LABEL(proc0paddr),%r0	# Get kernel stack top
124	mtpr	%r0,$PR_KSP			# put in IPR KSP
125	movl	%r0,_C_LABEL(Sysmap)		# SPT start addr after KSP
126	movl	_C_LABEL(proc0paddr),%r0		# get PCB virtual address
127	movab	IFTRAP(%r0),4(%r0)		# Save trap address in ESP
128	mtpr	4(%r0),$PR_ESP			# Put it in ESP also
129
130# Set some registers in known state
131	movl	_C_LABEL(proc0paddr),%r0
132	clrl	P0LR(%r0)
133	clrl	P1LR(%r0)
134	mtpr	$0,$PR_P0LR
135	mtpr	$0,$PR_P1LR
136	movl	$0x80000000,%r1
137	movl	%r1,P0BR(%r0)
138	movl	%r1,P1BR(%r0)
139	mtpr	%r1,$PR_P0BR
140	mtpr	%r1,$PR_P1BR
141	clrl	IFTRAP(%r0)
142	mtpr	$0,$PR_SCBB
143
144# Copy the RPB to its new position
145#if defined(COMPAT_14)
146	tstl	(%ap)				# Any arguments?
147	bneq	1f				# Yes, called from new boot
148	movl	%r11,_C_LABEL(boothowto)		# Howto boot (single etc...)
149#	movl	%r10,_C_LABEL(bootdev)		# uninteresting, will complain
150	movl	%r8,_C_LABEL(avail_end)		# Usable memory (from VMB)
151	clrl	-(%sp)				# Have no RPB
152	brb	2f
153#endif
154
1551:	pushl	4(%ap)				# Address of old rpb
1562:	calls	$1,_C_LABEL(_start)		# Jump away.
157	/* NOTREACHED */
158
159
160/*
161 * Signal handler code.
162 */
163
164	.align	2
165	.globl	_C_LABEL(sigcode),_C_LABEL(esigcode)
166_C_LABEL(sigcode):
167	pushr	$0x3f
168	subl2	$0xc,%sp
169	movl	0x24(%sp),%r0
170	calls	$3,(%r0)
171	popr	$0x3f
172	chmk	$SYS___sigreturn14
173	chmk	$SYS_exit
174	halt
175_C_LABEL(esigcode):
176
177#ifdef COMPAT_IBCS2
178	.align	2
179	.globl	_C_LABEL(ibcs2_sigcode),_C_LABEL(ibcs2_esigcode)
180_C_LABEL(ibcs2_sigcode):
181	pushr	$0x3f
182	subl2	$0xc,%sp
183	movl	0x24(%sp),%r0
184	calls	$3,(%r0)
185	popr	$0x3f
186	chmk	$SYS___sigreturn14
187	chmk	$SYS_exit
188	halt
189_C_LABEL(ibcs2_esigcode):
190#endif /* COMPAT_IBCS2 */
191
192#ifdef COMPAT_ULTRIX
193	.align	2
194	.globl	_C_LABEL(ultrix_sigcode),_C_LABEL(ultrix_esigcode)
195_C_LABEL(ultrix_sigcode):
196	pushr	$0x3f
197	subl2	$0xc,%sp
198	movl	0x24(%sp),%r0
199	calls	$3,(%r0)
200	popr	$0x3f
201	chmk	$ULTRIX_SYS_sigreturn
202	chmk	$SYS_exit
203	halt
204_C_LABEL(ultrix_esigcode):
205#endif
206
207	.align	2
208	.globl	_C_LABEL(idsptch), _C_LABEL(eidsptch)
209_C_LABEL(idsptch):	pushr	$0x3f
210	.word	0x9f16		# jsb to absolute address
211	.long	_C_LABEL(cmn_idsptch)	# the absolute address
212	.long	0		# the callback interrupt routine
213	.long	0		# its argument
214	.long	0		# ptr to correspond evcnt struct
215_C_LABEL(eidsptch):
216
217_C_LABEL(cmn_idsptch):
218	movl	(%sp)+,%r0	# get pointer to idspvec
219	movl	8(%r0),%r1	# get evcnt pointer
220	beql	1f		# no ptr, skip increment
221	incl	EV_COUNT(%r1)	# increment low longword
222	adwc	$0,EV_COUNT+4(%r1) # add any carry to hi longword
2231:	pushl	4(%r0)		# push argument
224	calls	$1,*(%r0)	# call interrupt routine
225	popr	$0x3f		# pop registers
226	rei			# return from interrut
227
228ENTRY(badaddr,0)			# Called with addr,b/w/l
229	mfpr	$PR_IPL,%r0	# splhigh()
230	mtpr	$IPL_HIGH,$PR_IPL
231	movl	4(%ap),%r2	# First argument, the address
232	movl	8(%ap),%r1	# Sec arg, b,w,l
233	pushl	%r0		# Save old IPL
234	clrl	%r3
235	movab	4f,_C_LABEL(memtest)	# Set the return address
236
237	caseb	%r1,$1,$4	# What is the size
2381:	.word	1f-1b
239	.word	2f-1b
240	.word	3f-1b		# This is unused
241	.word	3f-1b
242
2431:	movb	(%r2),%r1		# Test a byte
244	brb	5f
245
2462:	movw	(%r2),%r1		# Test a word
247	brb	5f
248
2493:	movl	(%r2),%r1		# Test a long
250	brb	5f
251
2524:	incl	%r3		# Got machine chk => addr bad
2535:	mtpr	(%sp)+,$PR_IPL
254	movl	%r3,%r0
255	ret
256
257#ifdef DDB
258/*
259 * DDB is the only routine that uses setjmp/longjmp.
260 */
261	.globl	_C_LABEL(setjmp), _C_LABEL(longjmp)
262_C_LABEL(setjmp):.word	0
263	movl	4(%ap), %r0
264	movl	8(%fp), (%r0)
265	movl	12(%fp), 4(%r0)
266	movl	16(%fp), 8(%r0)
267	moval	28(%fp),12(%r0)
268	clrl	%r0
269	ret
270
271_C_LABEL(longjmp):.word	0
272	movl	4(%ap), %r1
273	movl	8(%ap), %r0
274	movl	(%r1), %ap
275	movl	4(%r1), %fp
276	movl	12(%r1), %sp
277	jmp	*8(%r1)
278#endif
279
280#
281# setrunqueue/remrunqueue fast variants.
282#
283
284JSBENTRY(Setrq)
285#ifdef DIAGNOSTIC
286	tstl	4(%r0)	# Check that process actually are off the queue
287	beql	1f
288	pushab	setrq
289	calls	$1,_C_LABEL(panic)
290setrq:	.asciz	"setrunqueue"
291#endif
2921:	extzv	$2,$6,P_PRIORITY(%r0),%r1		# get priority
293	movaq	_C_LABEL(sched_qs)[%r1],%r2	# get address of queue
294	insque	(%r0),*PH_RLINK(%r2)		# put proc last in queue
295	bbss	%r1,_C_LABEL(sched_whichqs),1f	# set queue bit.
2961:	rsb
297
298JSBENTRY(Remrq)
299	extzv	$2,$6,P_PRIORITY(%r0),%r1
300#ifdef DIAGNOSTIC
301	bbs	%r1,_C_LABEL(sched_whichqs),1f
302	pushab	remrq
303	calls	$1,_C_LABEL(panic)
304remrq:	.asciz	"remrunqueue"
305#endif
3061:	remque	(%r0),%r2
307	bneq	2f			# Not last process on queue
308	bbsc	%r1,_C_LABEL(sched_whichqs),2f
3092:	clrl	P_BACK(%r0)		# saftey belt
310	rsb
311
312#
313# Idle loop. Here we could do something fun, maybe, like calculating
314# pi or something.
315#
316idle:
317#if defined(LOCKDEBUG)
318	calls	$0,_C_LABEL(sched_unlock_idle)
319#elif defined(MULTIPROCESSOR)
320	clrl	_C_LABEL(sched_lock)	# release sched lock
321#endif
322	mtpr	$1,$PR_IPL 		# IPL cannot be 0 because we are
323					# running on the interrupt stack
324					# and may get interrupts
325
3261:	tstl	_C_LABEL(sched_whichqs)	# Anything ready to run?
327	beql	1b			# no, run the idle loop again.
328/* Now try the test the long way */
329	mtpr	$IPL_HIGH,$PR_IPL	# block all types of interrupts
330#if defined(LOCKDEBUG)
331	calls	$0,_C_LABEL(sched_lock_idle)
332#elif defined(MULTIPROCESSOR)
3333:	bbssi	$0,_C_LABEL(sched_lock),3b	# acquire sched lock
334#endif
335	brb	lp			# check sched_whichqs again
336
337#
338# cpu_switch, cpu_exit and the idle loop implemented in assembler
339# for efficiency. %r6 contains pointer to last process.  This is
340# called at IPL_HIGH.
341#
342
343JSBENTRY(Swtch)
344	mfpr	$PR_SSP,%r1		# Get ptr to this cpu_info struct
345	clrl	CI_CURPROC(%r1)		# Stop process accounting
346	svpctx				# Save context if another CPU
347					# get control first (must be on
348					# the interrupt stack when idling)
349
350
351lp:	ffs	$0,$32,_C_LABEL(sched_whichqs),%r3 # Search for bit set
352	beql	idle			# no bit set, go to idle loop
353
354	movaq	_C_LABEL(sched_qs)[%r3],%r1	# get address of queue head
355	remque	*(%r1),%r2		# remove proc pointed to by queue head
356					# proc ptr is now in %r2
357#ifdef DIAGNOSTIC
358	bvc	1f			# check if something on queue
359	pushab	noque
360	calls	$1,_C_LABEL(panic)
361#endif
362
3631:	bneq	2f			# more processes on queue?
364	bbsc	%r3,_C_LABEL(sched_whichqs),2f	# no, clear bit in whichqs
3652:	clrl	P_BACK(%r2)		# clear proc backpointer
366	mfpr	$PR_SSP,%r1		# Get ptr to this cpu_info struct
367	/* p->p_cpu initialized in fork1() for single-processor */
368#if defined(MULTIPROCESSOR)
369	movl	%r1,P_CPU(%r2)		# p->p_cpu = curcpu();
370#endif
371	movb	$SONPROC,P_STAT(%r2)	# p->p_stat = SONPROC;
372	movl	%r2,CI_CURPROC(%r1)	# set new process running
373	clrl	CI_WANT_RESCHED(%r1)	# we are now changing process
374	movl	P_ADDR(%r2),%r0		# Get pointer to new pcb.
375	addl3	%r0,$IFTRAP,%r1		# Save for copy* functions.
376	mtpr	%r1,$PR_ESP		# Use ESP as CPU-specific pointer
377	movl	%r1,ESP(%r0)		# Must save in PCB also.
378	mfpr	$PR_SSP,%r1		# New process must inherit cpu_info
379	movl	%r1,SSP(%r0)		# Put it in new PCB
380
381#
382# Nice routine to get physical from virtual adresses.
383#
384	extzv	$9,$21,%r0,%r1		# extract offset
385	ashl	$9,*_C_LABEL(Sysmap)[%r1],%r3
386
387	mtpr	%r3,$PR_PCBB
388	ldpctx
389#if defined(LOCKDEBUG)
390	calls	$0,_C_LABEL(sched_unlock_idle)
391#elif defined(MULTIPROCESSOR)
392	clrl	_C_LABEL(sched_lock)	# clear sched lock
393#endif
394	rei
395
396#if defined(MULTIPROCESSOR)
397	.align 2
398	.globl	_C_LABEL(tramp)	# used to kick off multiprocessor systems.
399_C_LABEL(tramp):
400	ldpctx
401	rei
402#endif
403
404#
405# the last routine called by a process.
406#
407
408ENTRY(cpu_exit,0)
409	movl	4(%ap),%r6	# Process pointer in %r6
410
411	pushl	%r6
412	calls	$1,_C_LABEL(pmap_deactivate)
413
414	mtpr	$IPL_CLOCK,$PR_IPL # Block almost everything
415	mfpr	$PR_SSP,%r7	# get cpu_info ptr
416	movl	CI_EXIT(%r7),%r8	# scratch page address
417	movab	512(%r8),%sp	# change stack
418	bicl2	$0xc0000000,%r8	# get physical address
419	mtpr	%r8,$PR_PCBB	# new PCB
420	mtpr	%r7,$PR_SSP	# In case...
421	pushl	%r6
422	calls	$1,_C_LABEL(exit2)	# release last resources.
423	mtpr	$IPL_HIGH,$PR_IPL	# block all types of interrupts
424#if defined(LOCKDEBUG)
425	calls	$0,_C_LABEL(sched_lock_idle)
426#elif defined(MULTIPROCESSOR)
4271:	bbssi	$0,_C_LABEL(sched_lock),1b	# acquire sched lock
428#endif
429	clrl	%r6
430	brw	Swtch
431
432#
433# copy/fetch/store routines.
434#
435
436ENTRY(copyout, 0)
437	movl	8(%ap),%r2
438	blss	3f		# kernel space
439	movl	4(%ap),%r1
440	brb	2f
441
442ENTRY(copyin, 0)
443	movl	4(%ap),%r1
444	blss	3f		# kernel space
445	movl	8(%ap),%r2
4462:	mfpr	$PR_ESP,%r3
447	movab	1f,(%r3)
448	movc3	12(%ap),(%r1),(%r2)
4491:	mfpr	$PR_ESP,%r3
450	clrl	(%r3)
451	ret
452
4533:	mnegl	$1,%r0
454	ret
455
456ENTRY(kcopy,0)
457	mfpr	$PR_ESP,%r3
458	movl	(%r3),-(%sp)
459	movab	1f,(%r3)
460	movl	4(%ap),%r1
461	movl	8(%ap),%r2
462	movc3	12(%ap),(%r1), (%r2)
463	clrl	%r1
4641:	mfpr	$PR_ESP,%r3
465	movl	(%sp)+,(%r3)
466	movl	%r1,%r0
467	ret
468
469/*
470 * copy{in,out}str() copies data from/to user space to/from kernel space.
471 * Security checks:
472 *	1) user space address must be < KERNBASE
473 *	2) the VM system will do the checks while copying
474 */
475ENTRY(copyinstr, 0)
476	tstl	4(%ap)		# kernel address?
477	bgeq	8f		# no, continue
4786:	movl	$EFAULT,%r0
479	movl	16(%ap),%r2
480	beql	7f
481	clrl	(%r2)
4827:	ret
483
484ENTRY(copyoutstr, 0)
485	tstl	8(%ap)		# kernel address?
486	bgeq	8f		# no, continue
487	brb	6b		# yes, return EFAULT
488
489ENTRY(copystr,0)
4908:	movl	4(%ap),%r5	# from
491	movl	8(%ap),%r4	# to
492	movl	12(%ap),%r3	# len
493	movl	16(%ap),%r2	# copied
494	clrl	%r0
495	mfpr	$PR_ESP,%r1
496	movab	3f,(%r1)
497
498	tstl	%r3		# any chars to copy?
499	bneq	1f		# yes, jump for more
5000:	tstl	%r2		# save copied len?
501	beql	2f		# no
502	subl3	4(%ap),%r5,(%r2)	# save copied len
5032:	ret
504
5051:	movb	(%r5)+,(%r4)+	# copy one char
506	beql	0b		# jmp if last char
507	sobgtr	%r3,1b		# copy one more
508	movl	$ENAMETOOLONG,%r0 # inform about too long string
509	brb	0b		# out of chars
510
5113:	mfpr	$PR_ESP,%r1
512	clrl	(%r1)
513	brb	0b
514
515ENTRY(subyte,0)
516	movl	4(%ap),%r0
517	blss	3f		# illegal space
518	mfpr	$PR_ESP,%r1
519	movab	1f,(%r1)
520	movb	8(%ap),(%r0)
521	clrl	%r1
5221:	mfpr	$PR_ESP,%r2
523	clrl	(%r2)
524	movl	%r1,%r0
525	ret
526
527ENTRY(suword,0)
528	movl	4(%ap),%r0
529	blss	3f		# illegal space
530	mfpr	$PR_ESP,%r1
531	movab	1f,(%r1)
532	movl	8(%ap),(%r0)
533	clrl	%r1
5341:	mfpr	$PR_ESP,%r2
535	clrl	(%r2)
536	movl	%r1,%r0
537	ret
538
539ENTRY(suswintr,0)
540	movl	4(%ap),%r0
541	blss	3f		# illegal space
542	mfpr	$PR_ESP,%r1
543	movab	1f,(%r1)
544	movw	8(%ap),(%r0)
545	clrl	%r1
5461:	mfpr	$PR_ESP,%r2
547	clrl	(%r2)
548	movl	%r1,%r0
549	ret
550
5513:	mnegl	$1,%r0
552	ret
553
554	.align	2
555ALTENTRY(fusword)
556ENTRY(fuswintr,0)
557	movl	4(%ap),%r0
558	blss	3b
559	mfpr	$PR_ESP,%r1
560	movab	1f,(%r1)
561	movzwl	(%r0),%r1
5621:	mfpr	$PR_ESP,%r2
563	clrl	(%r2)
564	movl	%r1,%r0
565	ret
566
567#if defined(MULTIPROCESSOR)
568
569JSBENTRY(Slock)
5701:	bbssi	$0,(%r1),1b
571	rsb
572
573JSBENTRY(Slocktry)
574	clrl	%r0
575	bbssi	$0,(%r1),1f
576	incl	%r0
5771:	rsb
578
579JSBENTRY(Sunlock)
580	bbcci	$0,(%r1),1f
5811:	rsb
582
583#endif
584
585#
586# data department
587#
588	.data
589
590	.globl _C_LABEL(memtest)
591_C_LABEL(memtest):		# memory test in progress
592	.long 0
593
594#ifdef __ELF__
595	.section	.rodata
596#endif
597noque:	.asciz	"swtch"
598