xref: /dragonfly/sys/platform/pc64/x86_64/support.s (revision 9348a738)
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * Copyright (c) 2003 Peter Wemm.
4 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $
32 */
33
34#include <machine/asmacros.h>
35#include <machine/pmap.h>
36
37#include "assym.s"
38
39	ALIGN_DATA
40
41	.text
42
43/*
44 * bzero(ptr:%rdi, bytes:%rsi)
45 *
46 * Using rep stosq is 70% faster than a %rax loop and almost as fast as
47 * a %xmm0 loop on a modern intel cpu.
48 *
49 * Do not use non-termportal instructions here as we do not know the caller's
50 * intent.
51 */
52ENTRY(bzero)
53	movq	%rsi,%rcx
54	xorl	%eax,%eax
55	shrq	$3,%rcx
56	cld
57	rep
58	stosq
59	movq	%rsi,%rcx
60	andq	$7,%rcx
61	rep
62	stosb
63	ret
64
65/*
66 * pagezero(ptr:%rdi)
67 *
68 * Using rep stosq is nearly as fast as using %xmm0 on a modern intel cpu,
69 * and about 70% faster than a %rax loop.
70 *
71 * Do not use non-termportal instructions here as we do not know the caller's
72 * intent.
73 */
74ENTRY(pagezero)
75	movq	$PAGE_SIZE>>3,%rcx
76	xorl	%eax,%eax
77	cld
78	rep
79	stosq
80	ret
81
82/*
83 * bcmp(ptr:%rdi, ptr:%rsi, bytes:%rdx)
84 */
85ENTRY(bcmp)
86	movq	%rdx,%rcx
87	shrq	$3,%rcx
88	cld					/* compare forwards */
89	repe
90	cmpsq
91	jne	1f
92
93	movq	%rdx,%rcx
94	andq	$7,%rcx
95	repe
96	cmpsb
971:
98	setne	%al
99	movsbl	%al,%eax
100	ret
101
102/*
103 * bcopy(src:%rdi, dst:%rsi, cnt:%rdx)
104 *
105 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
106 */
107ENTRY(bcopy)
108	xchgq	%rsi,%rdi
109	movq	%rdx,%rcx
110
111	movq	%rdi,%rax
112	subq	%rsi,%rax
113	cmpq	%rcx,%rax			/* overlapping && src < dst? */
114	jb	1f
115
116	shrq	$3,%rcx				/* copy by 64-bit words */
117	cld					/* nope, copy forwards */
118	rep
119	movsq
120	movq	%rdx,%rcx
121	andq	$7,%rcx				/* any bytes left? */
122	rep
123	movsb
124	ret
125
126	ALIGN_TEXT
1271:
128	addq	%rcx,%rdi			/* copy backwards */
129	addq	%rcx,%rsi
130	decq	%rdi
131	decq	%rsi
132	andq	$7,%rcx				/* any fractional bytes? */
133	std
134	rep
135	movsb
136	movq	%rdx,%rcx			/* copy by 32-bit words */
137	shrq	$3,%rcx
138	subq	$7,%rsi
139	subq	$7,%rdi
140	rep
141	movsq
142	cld
143	ret
144
145ENTRY(reset_dbregs)
146	movq	$0x200,%rax   /* the manual says that bit 10 must be set to 1 */
147	movq    %rax,%dr7     /* disable all breapoints first */
148	movq    $0,%rax
149	movq    %rax,%dr0
150	movq    %rax,%dr1
151	movq    %rax,%dr2
152	movq    %rax,%dr3
153	movq    %rax,%dr6
154	ret
155
156/*
157 * memcpy(dst:%rdi, src:%rsi, bytes:%rdx)
158 *
159 * Note: memcpy does not support overlapping copies
160 */
161ENTRY(memcpy)
162	movq	%rdi,%r8
163	movq	%rdx,%rcx
164	shrq	$3,%rcx				/* copy by 64-bit words */
165	cld					/* copy forwards */
166	rep
167	movsq
168	movq	%rdx,%rcx
169	andq	$7,%rcx				/* any bytes left? */
170	rep
171	movsb
172	movq	%r8,%rax
173	ret
174
175/* fillw(pat, base, cnt) */
176/*       %rdi,%rsi, %rdx */
177ENTRY(fillw)
178	movq	%rdi,%rax
179	movq	%rsi,%rdi
180	movq	%rdx,%rcx
181	cld
182	rep
183	stosw
184	ret
185
186/*****************************************************************************/
187/* copyout and fubyte family                                                 */
188/*****************************************************************************/
189/*
190 * Access user memory from inside the kernel. These routines should be
191 * the only places that do this.
192 *
193 * These routines set curpcb->onfault for the time they execute. When a
194 * protection violation occurs inside the functions, the trap handler
195 * returns to *curpcb->onfault instead of the function.
196 */
197
198/*
199 * std_copyout(from_kernel, to_user, len)  - MP SAFE
200 *         %rdi,        %rsi,    %rdx
201 */
202ENTRY(std_copyout)
203	movq	PCPU(curthread),%rax
204	movq	TD_PCB(%rax), %rax
205	movq	$copyout_fault,PCB_ONFAULT(%rax)
206	movq	%rsp,PCB_ONFAULT_SP(%rax)
207	testq	%rdx,%rdx			/* anything to do? */
208	jz	done_copyout
209
210	/*
211	 * Check explicitly for non-user addresses.  If 486 write protection
212	 * is being used, this check is essential because we are in kernel
213	 * mode so the h/w does not provide any protection against writing
214	 * kernel addresses.
215	 */
216
217	/*
218	 * First, prevent address wrapping.
219	 */
220	movq	%rsi,%rax
221	addq	%rdx,%rax
222	jc	copyout_fault
223/*
224 * XXX STOP USING VM_MAX_USER_ADDRESS.
225 * It is an end address, not a max, so every time it is used correctly it
226 * looks like there is an off by one error, and of course it caused an off
227 * by one error in several places.
228 */
229	movq	$VM_MAX_USER_ADDRESS,%rcx
230	cmpq	%rcx,%rax
231	ja	copyout_fault
232
233	xchgq	%rdi,%rsi
234	/* bcopy(%rsi, %rdi, %rdx) */
235	movq	%rdx,%rcx
236
237	shrq	$3,%rcx
238	cld
239	rep
240	movsq
241	movb	%dl,%cl
242	andb	$7,%cl
243	rep
244	movsb
245
246done_copyout:
247	xorl	%eax,%eax
248	movq	PCPU(curthread),%rdx
249	movq	TD_PCB(%rdx), %rdx
250	movq	%rax,PCB_ONFAULT(%rdx)
251	ret
252
253	ALIGN_TEXT
254copyout_fault:
255	movq	PCPU(curthread),%rdx
256	movq	TD_PCB(%rdx), %rdx
257	movq	$0,PCB_ONFAULT(%rdx)
258	movq	$EFAULT,%rax
259	ret
260
261/*
262 * std_copyin(from_user, to_kernel, len) - MP SAFE
263 *        %rdi,      %rsi,      %rdx
264 */
265ENTRY(std_copyin)
266	movq	PCPU(curthread),%rax
267	movq	TD_PCB(%rax), %rax
268	movq	$copyin_fault,PCB_ONFAULT(%rax)
269	movq	%rsp,PCB_ONFAULT_SP(%rax)
270	testq	%rdx,%rdx			/* anything to do? */
271	jz	done_copyin
272
273	/*
274	 * make sure address is valid
275	 */
276	movq	%rdi,%rax
277	addq	%rdx,%rax
278	jc	copyin_fault
279	movq	$VM_MAX_USER_ADDRESS,%rcx
280	cmpq	%rcx,%rax
281	ja	copyin_fault
282
283	xchgq	%rdi,%rsi
284	movq	%rdx,%rcx
285	movb	%cl,%al
286	shrq	$3,%rcx				/* copy longword-wise */
287	cld
288	rep
289	movsq
290	movb	%al,%cl
291	andb	$7,%cl				/* copy remaining bytes */
292	rep
293	movsb
294
295done_copyin:
296	xorl	%eax,%eax
297	movq	PCPU(curthread),%rdx
298	movq	TD_PCB(%rdx), %rdx
299	movq	%rax,PCB_ONFAULT(%rdx)
300	ret
301
302	ALIGN_TEXT
303copyin_fault:
304	movq	PCPU(curthread),%rdx
305	movq	TD_PCB(%rdx), %rdx
306	movq	$0,PCB_ONFAULT(%rdx)
307	movq	$EFAULT,%rax
308	ret
309
310/*
311 * casu32 - Compare and set user integer.  Returns -1 or the current value.
312 *          dst = %rdi, old = %rsi, new = %rdx
313 */
314ENTRY(casu32)
315	movq	PCPU(curthread),%rcx
316	movq	TD_PCB(%rcx), %rcx
317	movq	$fusufault,PCB_ONFAULT(%rcx)
318	movq	%rsp,PCB_ONFAULT_SP(%rcx)
319
320	movq	$VM_MAX_USER_ADDRESS-4,%rax
321	cmpq	%rax,%rdi			/* verify address is valid */
322	ja	fusufault
323
324	movl	%esi,%eax			/* old */
325	lock
326	cmpxchgl %edx,(%rdi)			/* new = %edx */
327
328	/*
329	 * The old value is in %eax.  If the store succeeded it will be the
330	 * value we expected (old) from before the store, otherwise it will
331	 * be the current value.
332	 */
333
334	movq	PCPU(curthread),%rcx
335	movq	TD_PCB(%rcx), %rcx
336	movq	$0,PCB_ONFAULT(%rcx)
337	ret
338
339/*
340 * swapu32 - Swap int in user space.  ptr = %rdi, val = %rsi
341 */
342ENTRY(std_swapu32)
343	movq	PCPU(curthread),%rcx
344	movq	TD_PCB(%rcx), %rcx
345	movq	$fusufault,PCB_ONFAULT(%rcx)
346	movq	%rsp,PCB_ONFAULT_SP(%rcx)
347
348	movq	$VM_MAX_USER_ADDRESS-4,%rax
349	cmpq	%rax,%rdi			/* verify address is valid */
350	ja	fusufault
351
352	movq	%rsi,%rax			/* old */
353	xchgl	%eax,(%rdi)
354
355	/*
356	 * The old value is in %rax.  If the store succeeded it will be the
357	 * value we expected (old) from before the store, otherwise it will
358	 * be the current value.
359	 */
360
361	movq	PCPU(curthread),%rcx
362	movq	TD_PCB(%rcx), %rcx
363	movq	$0,PCB_ONFAULT(%rcx)
364	ret
365
366/*
367 * casu64 - Compare and set user word.  Returns -1 or the current value.
368 *          dst = %rdi, old = %rsi, new = %rdx
369 */
370ENTRY(casu64)
371	movq	PCPU(curthread),%rcx
372	movq	TD_PCB(%rcx), %rcx
373	movq	$fusufault,PCB_ONFAULT(%rcx)
374	movq	%rsp,PCB_ONFAULT_SP(%rcx)
375
376	movq	$VM_MAX_USER_ADDRESS-8,%rax
377	cmpq	%rax,%rdi			/* verify address is valid */
378	ja	fusufault
379
380	movq	%rsi,%rax			/* old */
381	lock
382	cmpxchgq %rdx,(%rdi)			/* new = %rdx */
383
384	/*
385	 * The old value is in %rax.  If the store succeeded it will be the
386	 * value we expected (old) from before the store, otherwise it will
387	 * be the current value.
388	 */
389
390	movq	PCPU(curthread),%rcx
391	movq	TD_PCB(%rcx), %rcx
392	movq	$0,PCB_ONFAULT(%rcx)
393	ret
394
395/*
396 * swapu64 - Swap long in user space.  ptr = %rdi, val = %rsi
397 */
398ENTRY(std_swapu64)
399	movq	PCPU(curthread),%rcx
400	movq	TD_PCB(%rcx), %rcx
401	movq	$fusufault,PCB_ONFAULT(%rcx)
402	movq	%rsp,PCB_ONFAULT_SP(%rcx)
403
404	movq	$VM_MAX_USER_ADDRESS-8,%rax
405	cmpq	%rax,%rdi			/* verify address is valid */
406	ja	fusufault
407
408	movq	%rsi,%rax			/* old */
409	xchgq	%rax,(%rdi)
410
411	/*
412	 * The old value is in %rax.  If the store succeeded it will be the
413	 * value we expected (old) from before the store, otherwise it will
414	 * be the current value.
415	 */
416
417	movq	PCPU(curthread),%rcx
418	movq	TD_PCB(%rcx), %rcx
419	movq	$0,PCB_ONFAULT(%rcx)
420	ret
421
422/*
423 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
424 * byte from user memory.  All these functions are MPSAFE.
425 * addr = %rdi
426 */
427
428ENTRY(std_fuword64)
429	movq	PCPU(curthread),%rcx
430	movq	TD_PCB(%rcx), %rcx
431	movq	$fusufault,PCB_ONFAULT(%rcx)
432	movq	%rsp,PCB_ONFAULT_SP(%rcx)
433
434	movq	$VM_MAX_USER_ADDRESS-8,%rax
435	cmpq	%rax,%rdi			/* verify address is valid */
436	ja	fusufault
437
438	movq	(%rdi),%rax
439	movq	$0,PCB_ONFAULT(%rcx)
440	ret
441
442ENTRY(std_fuword32)
443	movq	PCPU(curthread),%rcx
444	movq	TD_PCB(%rcx), %rcx
445	movq	$fusufault,PCB_ONFAULT(%rcx)
446	movq	%rsp,PCB_ONFAULT_SP(%rcx)
447
448	movq	$VM_MAX_USER_ADDRESS-4,%rax
449	cmpq	%rax,%rdi			/* verify address is valid */
450	ja	fusufault
451
452	movl	(%rdi),%eax
453	movq	$0,PCB_ONFAULT(%rcx)
454	ret
455
456ENTRY(std_fubyte)
457	movq	PCPU(curthread),%rcx
458	movq	TD_PCB(%rcx), %rcx
459	movq	$fusufault,PCB_ONFAULT(%rcx)
460	movq	%rsp,PCB_ONFAULT_SP(%rcx)
461
462	movq	$VM_MAX_USER_ADDRESS-1,%rax
463	cmpq	%rax,%rdi
464	ja	fusufault
465
466	movzbl	(%rdi),%eax
467	movq	$0,PCB_ONFAULT(%rcx)
468	ret
469
470	ALIGN_TEXT
471fusufault:
472	movq	PCPU(curthread),%rcx
473	xorl	%eax,%eax
474	movq	TD_PCB(%rcx), %rcx
475	movq	%rax,PCB_ONFAULT(%rcx)
476	decq	%rax
477	ret
478
479/*
480 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
481 * user memory.  All these functions are MPSAFE.
482 *
483 * addr = %rdi, value = %rsi
484 *
485 * Write a long
486 */
487ENTRY(std_suword64)
488	movq	PCPU(curthread),%rcx
489	movq	TD_PCB(%rcx), %rcx
490	movq	$fusufault,PCB_ONFAULT(%rcx)
491	movq	%rsp,PCB_ONFAULT_SP(%rcx)
492
493	movq	$VM_MAX_USER_ADDRESS-8,%rax
494	cmpq	%rax,%rdi			/* verify address validity */
495	ja	fusufault
496
497	movq	%rsi,(%rdi)
498	xorl	%eax,%eax
499	movq	PCPU(curthread),%rcx
500	movq	TD_PCB(%rcx), %rcx
501	movq	%rax,PCB_ONFAULT(%rcx)
502	ret
503
504/*
505 * Write an int
506 */
507ENTRY(std_suword32)
508	movq	PCPU(curthread),%rcx
509	movq	TD_PCB(%rcx), %rcx
510	movq	$fusufault,PCB_ONFAULT(%rcx)
511	movq	%rsp,PCB_ONFAULT_SP(%rcx)
512
513	movq	$VM_MAX_USER_ADDRESS-4,%rax
514	cmpq	%rax,%rdi			/* verify address validity */
515	ja	fusufault
516
517	movl	%esi,(%rdi)
518	xorl	%eax,%eax
519	movq	PCPU(curthread),%rcx
520	movq	TD_PCB(%rcx), %rcx
521	movq	%rax,PCB_ONFAULT(%rcx)
522	ret
523
524ENTRY(std_subyte)
525	movq	PCPU(curthread),%rcx
526	movq	TD_PCB(%rcx), %rcx
527	movq	$fusufault,PCB_ONFAULT(%rcx)
528	movq	%rsp,PCB_ONFAULT_SP(%rcx)
529
530	movq	$VM_MAX_USER_ADDRESS-1,%rax
531	cmpq	%rax,%rdi			/* verify address validity */
532	ja	fusufault
533
534	movl	%esi,%eax
535	movb	%al,(%rdi)
536	xorl	%eax,%eax
537	movq	PCPU(curthread),%rcx		/* restore trashed register */
538	movq	TD_PCB(%rcx), %rcx
539	movq	%rax,PCB_ONFAULT(%rcx)
540	ret
541
542/*
543 * std_copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
544 *           %rdi, %rsi, %rdx, %rcx
545 *
546 *	copy a string from from to to, stop when a 0 character is reached.
547 *	return ENAMETOOLONG if string is longer than maxlen, and
548 *	EFAULT on protection violations. If lencopied is non-zero,
549 *	return the actual length in *lencopied.
550 */
551ENTRY(std_copyinstr)
552	movq	%rdx,%r8			/* %r8 = maxlen */
553	movq	%rcx,%r9			/* %r9 = *len */
554	xchgq	%rdi,%rsi			/* %rdi = from, %rsi = to */
555	movq	PCPU(curthread),%rcx
556	movq	TD_PCB(%rcx), %rcx
557	movq	$cpystrflt,PCB_ONFAULT(%rcx)
558	movq	%rsp,PCB_ONFAULT_SP(%rcx)
559
560	movq	$VM_MAX_USER_ADDRESS,%rax
561
562	/* make sure 'from' is within bounds */
563	subq	%rsi,%rax
564	jbe	cpystrflt
565
566	/* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
567	cmpq	%rdx,%rax
568	jae	1f
569	movq	%rax,%rdx
570	movq	%rax,%r8
5711:
572	incq	%rdx
573	cld
574
5752:
576	decq	%rdx
577	jz	3f
578
579	lodsb
580	stosb
581	orb	%al,%al
582	jnz	2b
583
584	/* Success -- 0 byte reached */
585	decq	%rdx
586	xorl	%eax,%eax
587	jmp	cpystrflt_x
5883:
589	/* rdx is zero - return ENAMETOOLONG or EFAULT */
590	movq	$VM_MAX_USER_ADDRESS,%rax
591	cmpq	%rax,%rsi
592	jae	cpystrflt
5934:
594	movq	$ENAMETOOLONG,%rax
595	jmp	cpystrflt_x
596
597cpystrflt:
598	movq	$EFAULT,%rax
599
600cpystrflt_x:
601	/* set *lencopied and return %eax */
602	movq	PCPU(curthread),%rcx
603	movq	TD_PCB(%rcx), %rcx
604	movq	$0,PCB_ONFAULT(%rcx)
605
606	testq	%r9,%r9
607	jz	1f
608	subq	%rdx,%r8
609	movq	%r8,(%r9)
6101:
611	ret
612
613
614/*
615 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
616 *         %rdi, %rsi, %rdx, %rcx
617 */
618ENTRY(copystr)
619	movq	%rdx,%r8			/* %r8 = maxlen */
620
621	xchgq	%rdi,%rsi
622	incq	%rdx
623	cld
6241:
625	decq	%rdx
626	jz	4f
627	lodsb
628	stosb
629	orb	%al,%al
630	jnz	1b
631
632	/* Success -- 0 byte reached */
633	decq	%rdx
634	xorl	%eax,%eax
635	jmp	6f
6364:
637	/* rdx is zero -- return ENAMETOOLONG */
638	movq	$ENAMETOOLONG,%rax
639
6406:
641
642	testq	%rcx,%rcx
643	jz	7f
644	/* set *lencopied and return %rax */
645	subq	%rdx,%r8
646	movq	%r8,(%rcx)
6477:
648	ret
649
650/*
651 * Handling of special x86_64 registers and descriptor tables etc
652 * %rdi
653 */
654/* void lgdt(struct region_descriptor *rdp); */
655ENTRY(lgdt)
656	/* reload the descriptor table */
657	lgdt	(%rdi)
658
659	/* flush the prefetch q */
660	jmp	1f
661	nop
6621:
663	movl	$KDSEL,%eax
664	movl	%eax,%ds
665	movl	%eax,%es
666	movl	%eax,%fs	/* Beware, use wrmsr to set 64 bit base */
667	movl	%eax,%gs	/* Beware, use wrmsr to set 64 bit base */
668	movl	%eax,%ss
669
670	/* reload code selector by turning return into intersegmental return */
671	popq	%rax
672	pushq	$KCSEL
673	pushq	%rax
674	MEXITCOUNT
675	lretq
676
677/*****************************************************************************/
678/* setjmp, longjmp                                                           */
679/*****************************************************************************/
680
681ENTRY(setjmp)
682	movq	%rbx,0(%rdi)			/* save rbx */
683	movq	%rsp,8(%rdi)			/* save rsp */
684	movq	%rbp,16(%rdi)			/* save rbp */
685	movq	%r12,24(%rdi)			/* save r12 */
686	movq	%r13,32(%rdi)			/* save r13 */
687	movq	%r14,40(%rdi)			/* save r14 */
688	movq	%r15,48(%rdi)			/* save r15 */
689	movq	0(%rsp),%rdx			/* get rta */
690	movq	%rdx,56(%rdi)			/* save rip */
691	xorl	%eax,%eax			/* return(0); */
692	ret
693
694ENTRY(longjmp)
695	movq	0(%rdi),%rbx			/* restore rbx */
696	movq	8(%rdi),%rsp			/* restore rsp */
697	movq	16(%rdi),%rbp			/* restore rbp */
698	movq	24(%rdi),%r12			/* restore r12 */
699	movq	32(%rdi),%r13			/* restore r13 */
700	movq	40(%rdi),%r14			/* restore r14 */
701	movq	48(%rdi),%r15			/* restore r15 */
702	movq	56(%rdi),%rdx			/* get rta */
703	movq	%rdx,0(%rsp)			/* put in return frame */
704	xorl	%eax,%eax			/* return(1); */
705	incl	%eax
706	ret
707
708/*
709 * Support for reading MSRs in the safe manner.
710 */
711ENTRY(rdmsr_safe)
712/* int rdmsr_safe(u_int msr, uint64_t *data) */
713	movq	PCPU(curthread),%r8
714	movq	TD_PCB(%r8), %r8
715	movq	$msr_onfault,PCB_ONFAULT(%r8)
716	movq	%rsp,PCB_ONFAULT_SP(%r8)
717	movl	%edi,%ecx
718	rdmsr			/* Read MSR pointed by %ecx. Returns
719				   hi byte in edx, lo in %eax */
720	salq	$32,%rdx	/* sign-shift %rdx left */
721	movl	%eax,%eax	/* zero-extend %eax -> %rax */
722	orq	%rdx,%rax
723	movq	%rax,(%rsi)
724	xorq	%rax,%rax
725	movq	%rax,PCB_ONFAULT(%r8)
726	ret
727
728/*
729 * Support for writing MSRs in the safe manner.
730 */
731ENTRY(wrmsr_safe)
732/* int wrmsr_safe(u_int msr, uint64_t data) */
733	movq	PCPU(curthread),%r8
734	movq	TD_PCB(%r8), %r8
735	movq	$msr_onfault,PCB_ONFAULT(%r8)
736	movq    %rsp,PCB_ONFAULT_SP(%r8)
737	movl	%edi,%ecx
738	movl	%esi,%eax
739	sarq	$32,%rsi
740	movl	%esi,%edx
741	wrmsr			/* Write MSR pointed by %ecx. Accepts
742				   hi byte in edx, lo in %eax. */
743	xorq	%rax,%rax
744	movq	%rax,PCB_ONFAULT(%r8)
745	ret
746
747/*
748 * MSR operations fault handler
749 */
750	ALIGN_TEXT
751msr_onfault:
752	movq	PCPU(curthread),%r8
753	movq	TD_PCB(%r8), %r8
754	movq	$0,PCB_ONFAULT(%r8)
755	movl	$EFAULT,%eax
756	ret
757
758/*
759 * Support for BB-profiling (gcc -a).  The kernbb program will extract
760 * the data from the kernel.
761 */
762
763	.data
764	ALIGN_DATA
765	.globl bbhead
766bbhead:
767	.quad 0
768
769	.text
770NON_GPROF_ENTRY(__bb_init_func)
771	movq	$1,(%rdi)
772	movq	bbhead,%rax
773	movq	%rax,32(%rdi)
774	movq	%rdi,bbhead
775	NON_GPROF_RET
776