xref: /dragonfly/sys/platform/pc64/x86_64/support.s (revision 678e8cc6)
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * Copyright (c) 2003 Peter Wemm.
4 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $
32 */
33
34#include "opt_ddb.h"
35
36#include <machine/asmacros.h>
37#include <machine/pmap.h>
38
39#include "assym.s"
40
41	ALIGN_DATA
42
43	.text
44
45/*
46 * bcopy family
47 * void bzero(void *buf, size_t len)
48 */
49
50/* done */
51ENTRY(bzero)
52	movq	%rsi,%rcx
53	xorl	%eax,%eax
54	shrq	$3,%rcx
55	cld
56	rep
57	stosq
58	movq	%rsi,%rcx
59	andq	$7,%rcx
60	rep
61	stosb
62	ret
63
64/* Address: %rdi */
65ENTRY(pagezero)
66	movq	$-PAGE_SIZE,%rdx
67	subq	%rdx,%rdi
68	xorl	%eax,%eax
691:
70	movq	%rax,(%rdi,%rdx)	/* movnti */
71	movq	%rax,8(%rdi,%rdx)	/* movnti */
72	movq	%rax,16(%rdi,%rdx)	/* movnti */
73	movq	%rax,24(%rdi,%rdx)	/* movnti */
74	addq	$32,%rdx
75	jne	1b
76	/*sfence*/
77	ret
78
79ENTRY(bcmp)
80	movq	%rdx,%rcx
81	shrq	$3,%rcx
82	cld					/* compare forwards */
83	repe
84	cmpsq
85	jne	1f
86
87	movq	%rdx,%rcx
88	andq	$7,%rcx
89	repe
90	cmpsb
911:
92	setne	%al
93	movsbl	%al,%eax
94	ret
95
96/*
97 * bcopy(src, dst, cnt)
98 *       rdi, rsi, rdx
99 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
100 */
101ENTRY(generic_bcopy)	/* generic_bcopy is bcopy without FPU */
102ENTRY(ovbcopy) /* our bcopy doesn't use the FPU, so ovbcopy is the same */
103ENTRY(bcopy)
104	xchgq	%rsi,%rdi
105	movq	%rdx,%rcx
106
107	movq	%rdi,%rax
108	subq	%rsi,%rax
109	cmpq	%rcx,%rax			/* overlapping && src < dst? */
110	jb	1f
111
112	shrq	$3,%rcx				/* copy by 64-bit words */
113	cld					/* nope, copy forwards */
114	rep
115	movsq
116	movq	%rdx,%rcx
117	andq	$7,%rcx				/* any bytes left? */
118	rep
119	movsb
120	ret
121
122	/* ALIGN_TEXT */
1231:
124	addq	%rcx,%rdi			/* copy backwards */
125	addq	%rcx,%rsi
126	decq	%rdi
127	decq	%rsi
128	andq	$7,%rcx				/* any fractional bytes? */
129	std
130	rep
131	movsb
132	movq	%rdx,%rcx			/* copy remainder by 32-bit words */
133	shrq	$3,%rcx
134	subq	$7,%rsi
135	subq	$7,%rdi
136	rep
137	movsq
138	cld
139	ret
140ENTRY(reset_dbregs)
141	movq	$0x200,%rax   /* the manual says that bit 10 must be set to 1 */
142	movq    %rax,%dr7     /* disable all breapoints first */
143	movq    $0,%rax
144	movq    %rax,%dr0
145	movq    %rax,%dr1
146	movq    %rax,%dr2
147	movq    %rax,%dr3
148	movq    %rax,%dr6
149	ret
150
151/*
152 * Note: memcpy does not support overlapping copies
153 */
154ENTRY(memcpy)
155	movq	%rdx,%rcx
156	shrq	$3,%rcx				/* copy by 64-bit words */
157	cld					/* copy forwards */
158	rep
159	movsq
160	movq	%rdx,%rcx
161	andq	$7,%rcx				/* any bytes left? */
162	rep
163	movsb
164	ret
165
166/*
167 * pagecopy(%rdi=from, %rsi=to)
168 */
169ENTRY(pagecopy)
170	movq	$-PAGE_SIZE,%rax
171	movq	%rax,%rdx
172	subq	%rax,%rdi
173	subq	%rax,%rsi
1741:
175	/*prefetchnta (%rdi,%rax)*/
176	/*addq	$64,%rax*/
177	/*jne	1b*/
1782:
179	movq	(%rdi,%rdx),%rax
180	movq	%rax,(%rsi,%rdx)	/* movnti */
181	movq	8(%rdi,%rdx),%rax
182	movq	%rax,8(%rsi,%rdx)	/* movnti */
183	movq	16(%rdi,%rdx),%rax
184	movq	%rax,16(%rsi,%rdx)	/* movnti */
185	movq	24(%rdi,%rdx),%rax
186	movq	%rax,24(%rsi,%rdx)	/* movnti */
187	addq	$32,%rdx
188	jne	2b
189	/*sfence*/
190	ret
191
192/* fillw(pat, base, cnt) */
193/*       %rdi,%rsi, %rdx */
194ENTRY(fillw)
195	movq	%rdi,%rax
196	movq	%rsi,%rdi
197	movq	%rdx,%rcx
198	cld
199	rep
200	stosw
201	ret
202
203/*****************************************************************************/
204/* copyout and fubyte family                                                 */
205/*****************************************************************************/
206/*
207 * Access user memory from inside the kernel. These routines should be
208 * the only places that do this.
209 *
210 * These routines set curpcb->onfault for the time they execute. When a
211 * protection violation occurs inside the functions, the trap handler
212 * returns to *curpcb->onfault instead of the function.
213 */
214
215/*
216 * copyout(from_kernel, to_user, len)  - MP SAFE
217 *         %rdi,        %rsi,    %rdx
218 */
219ENTRY(copyout)
220	movq	PCPU(curthread),%rax
221	movq	TD_PCB(%rax), %rax
222	movq	$copyout_fault,PCB_ONFAULT(%rax)
223	testq	%rdx,%rdx			/* anything to do? */
224	jz	done_copyout
225
226	/*
227	 * Check explicitly for non-user addresses.  If 486 write protection
228	 * is being used, this check is essential because we are in kernel
229	 * mode so the h/w does not provide any protection against writing
230	 * kernel addresses.
231	 */
232
233	/*
234	 * First, prevent address wrapping.
235	 */
236	movq	%rsi,%rax
237	addq	%rdx,%rax
238	jc	copyout_fault
239/*
240 * XXX STOP USING VM_MAX_USER_ADDRESS.
241 * It is an end address, not a max, so every time it is used correctly it
242 * looks like there is an off by one error, and of course it caused an off
243 * by one error in several places.
244 */
245	movq	$VM_MAX_USER_ADDRESS,%rcx
246	cmpq	%rcx,%rax
247	ja	copyout_fault
248
249	xchgq	%rdi,%rsi
250	/* bcopy(%rsi, %rdi, %rdx) */
251	movq	%rdx,%rcx
252
253	shrq	$3,%rcx
254	cld
255	rep
256	movsq
257	movb	%dl,%cl
258	andb	$7,%cl
259	rep
260	movsb
261
262done_copyout:
263	xorl	%eax,%eax
264	movq	PCPU(curthread),%rdx
265	movq	TD_PCB(%rdx), %rdx
266	movq	%rax,PCB_ONFAULT(%rdx)
267	ret
268
269	ALIGN_TEXT
270copyout_fault:
271	movq	PCPU(curthread),%rdx
272	movq	TD_PCB(%rdx), %rdx
273	movq	$0,PCB_ONFAULT(%rdx)
274	movq	$EFAULT,%rax
275	ret
276
277/*
278 * copyin(from_user, to_kernel, len) - MP SAFE
279 *        %rdi,      %rsi,      %rdx
280 */
281ENTRY(copyin)
282	movq	PCPU(curthread),%rax
283	movq	TD_PCB(%rax), %rax
284	movq	$copyin_fault,PCB_ONFAULT(%rax)
285	testq	%rdx,%rdx			/* anything to do? */
286	jz	done_copyin
287
288	/*
289	 * make sure address is valid
290	 */
291	movq	%rdi,%rax
292	addq	%rdx,%rax
293	jc	copyin_fault
294	movq	$VM_MAX_USER_ADDRESS,%rcx
295	cmpq	%rcx,%rax
296	ja	copyin_fault
297
298	xchgq	%rdi,%rsi
299	movq	%rdx,%rcx
300	movb	%cl,%al
301	shrq	$3,%rcx				/* copy longword-wise */
302	cld
303	rep
304	movsq
305	movb	%al,%cl
306	andb	$7,%cl				/* copy remaining bytes */
307	rep
308	movsb
309
310done_copyin:
311	xorl	%eax,%eax
312	movq	PCPU(curthread),%rdx
313	movq	TD_PCB(%rdx), %rdx
314	movq	%rax,PCB_ONFAULT(%rdx)
315	ret
316
317	ALIGN_TEXT
318copyin_fault:
319	movq	PCPU(curthread),%rdx
320	movq	TD_PCB(%rdx), %rdx
321	movq	$0,PCB_ONFAULT(%rdx)
322	movq	$EFAULT,%rax
323	ret
324
325/*
326 * casuword32.  Compare and set user integer.  Returns -1 or the current value.
327 *        dst = %rdi, old = %rsi, new = %rdx
328 */
329ENTRY(casuword32)
330	movq	PCPU(curthread),%rcx
331	movq	TD_PCB(%rcx), %rcx
332	movq	$fusufault,PCB_ONFAULT(%rcx)
333
334	movq	$VM_MAX_USER_ADDRESS-4,%rax
335	cmpq	%rax,%rdi			/* verify address is valid */
336	ja	fusufault
337
338	movl	%esi,%eax			/* old */
339#ifdef SMP
340	lock
341#endif
342	cmpxchgl %edx,(%rdi)			/* new = %edx */
343
344	/*
345	 * The old value is in %eax.  If the store succeeded it will be the
346	 * value we expected (old) from before the store, otherwise it will
347	 * be the current value.
348	 */
349
350	movq	PCPU(curthread),%rcx
351	movq	TD_PCB(%rcx), %rcx
352	movq	$0,PCB_ONFAULT(%rcx)
353	ret
354
355/*
356 * casuword.  Compare and set user word.  Returns -1 or the current value.
357 *        dst = %rdi, old = %rsi, new = %rdx
358 */
359ENTRY(casuword)
360	movq	PCPU(curthread),%rcx
361	movq	TD_PCB(%rcx), %rcx
362	movq	$fusufault,PCB_ONFAULT(%rcx)
363
364	movq	$VM_MAX_USER_ADDRESS-4,%rax
365	cmpq	%rax,%rdi			/* verify address is valid */
366	ja	fusufault
367
368	movq	%rsi,%rax			/* old */
369#ifdef SMP
370	lock
371#endif
372	cmpxchgq %rdx,(%rdi)			/* new = %rdx */
373
374	/*
375	 * The old value is in %eax.  If the store succeeded it will be the
376	 * value we expected (old) from before the store, otherwise it will
377	 * be the current value.
378	 */
379
380	movq	PCPU(curthread),%rcx
381	movq	TD_PCB(%rcx), %rcx
382	movq	$fusufault,PCB_ONFAULT(%rcx)
383	movq	$0,PCB_ONFAULT(%rcx)
384	ret
385
386/*
387 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
388 * byte from user memory.  All these functions are MPSAFE.
389 * addr = %rdi
390 */
391
392ALTENTRY(fuword64)
393ENTRY(fuword)
394	movq	PCPU(curthread),%rcx
395	movq	TD_PCB(%rcx), %rcx
396	movq	$fusufault,PCB_ONFAULT(%rcx)
397
398	movq	$VM_MAX_USER_ADDRESS-8,%rax
399	cmpq	%rax,%rdi			/* verify address is valid */
400	ja	fusufault
401
402	movq	(%rdi),%rax
403	movq	$0,PCB_ONFAULT(%rcx)
404	ret
405
406ENTRY(fuword32)
407	movq	PCPU(curthread),%rcx
408	movq	TD_PCB(%rcx), %rcx
409	movq	$fusufault,PCB_ONFAULT(%rcx)
410
411	movq	$VM_MAX_USER_ADDRESS-4,%rax
412	cmpq	%rax,%rdi			/* verify address is valid */
413	ja	fusufault
414
415	movl	(%rdi),%eax
416	movq	$0,PCB_ONFAULT(%rcx)
417	ret
418
419/*
420 * fuswintr() and suswintr() are specialized variants of fuword16() and
421 * suword16(), respectively.  They are called from the profiling code,
422 * potentially at interrupt time.  If they fail, that's okay; good things
423 * will happen later.  They always fail for now, until the trap code is
424 * able to deal with this.
425 */
426ALTENTRY(suswintr)
427ENTRY(fuswintr)
428	movq	$-1,%rax
429	ret
430
431ENTRY(fuword16)
432	movq	PCPU(curthread),%rcx
433	movq	TD_PCB(%rcx), %rcx
434	movq	$fusufault,PCB_ONFAULT(%rcx)
435
436	movq	$VM_MAX_USER_ADDRESS-2,%rax
437	cmpq	%rax,%rdi
438	ja	fusufault
439
440	movzwl	(%rdi),%eax
441	movq	$0,PCB_ONFAULT(%rcx)
442	ret
443
444ENTRY(fubyte)
445	movq	PCPU(curthread),%rcx
446	movq	TD_PCB(%rcx), %rcx
447	movq	$fusufault,PCB_ONFAULT(%rcx)
448
449	movq	$VM_MAX_USER_ADDRESS-1,%rax
450	cmpq	%rax,%rdi
451	ja	fusufault
452
453	movzbl	(%rdi),%eax
454	movq	$0,PCB_ONFAULT(%rcx)
455	ret
456
457	ALIGN_TEXT
458fusufault:
459	movq	PCPU(curthread),%rcx
460	xorl	%eax,%eax
461	movq	TD_PCB(%rcx), %rcx
462	movq	%rax,PCB_ONFAULT(%rcx)
463	decq	%rax
464	ret
465
466/*
467 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
468 * user memory.  All these functions are MPSAFE.
469 *
470 * addr = %rdi, value = %rsi
471 *
472 * Write a long
473 */
474ALTENTRY(suword64)
475ENTRY(suword)
476	movq	PCPU(curthread),%rcx
477	movq	TD_PCB(%rcx), %rcx
478	movq	$fusufault,PCB_ONFAULT(%rcx)
479
480	movq	$VM_MAX_USER_ADDRESS-8,%rax
481	cmpq	%rax,%rdi			/* verify address validity */
482	ja	fusufault
483
484	movq	%rsi,(%rdi)
485	xorl	%eax,%eax
486	movq	PCPU(curthread),%rcx
487	movq	TD_PCB(%rcx), %rcx
488	movq	%rax,PCB_ONFAULT(%rcx)
489	ret
490
491/*
492 * Write an int
493 */
494ENTRY(suword32)
495	movq	PCPU(curthread),%rcx
496	movq	TD_PCB(%rcx), %rcx
497	movq	$fusufault,PCB_ONFAULT(%rcx)
498
499	movq	$VM_MAX_USER_ADDRESS-4,%rax
500	cmpq	%rax,%rdi			/* verify address validity */
501	ja	fusufault
502
503	movl	%esi,(%rdi)
504	xorl	%eax,%eax
505	movq	PCPU(curthread),%rcx
506	movq	TD_PCB(%rcx), %rcx
507	movq	%rax,PCB_ONFAULT(%rcx)
508	ret
509
510ENTRY(suword16)
511	movq	PCPU(curthread),%rcx
512	movq	TD_PCB(%rcx), %rcx
513	movq	$fusufault,PCB_ONFAULT(%rcx)
514
515	movq	$VM_MAX_USER_ADDRESS-2,%rax
516	cmpq	%rax,%rdi			/* verify address validity */
517	ja	fusufault
518
519	movw	%si,(%rdi)
520	xorl	%eax,%eax
521	movq	PCPU(curthread),%rcx		/* restore trashed register */
522	movq	TD_PCB(%rcx), %rcx
523	movq	%rax,PCB_ONFAULT(%rcx)
524	ret
525
526ENTRY(subyte)
527	movq	PCPU(curthread),%rcx
528	movq	TD_PCB(%rcx), %rcx
529	movq	$fusufault,PCB_ONFAULT(%rcx)
530
531	movq	$VM_MAX_USER_ADDRESS-1,%rax
532	cmpq	%rax,%rdi			/* verify address validity */
533	ja	fusufault
534
535	movl	%esi,%eax
536	movb	%al,(%rdi)
537	xorl	%eax,%eax
538	movq	PCPU(curthread),%rcx		/* restore trashed register */
539	movq	TD_PCB(%rcx), %rcx
540	movq	%rax,PCB_ONFAULT(%rcx)
541	ret
542
543/*
544 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
545 *           %rdi, %rsi, %rdx, %rcx
546 *
547 *	copy a string from from to to, stop when a 0 character is reached.
548 *	return ENAMETOOLONG if string is longer than maxlen, and
549 *	EFAULT on protection violations. If lencopied is non-zero,
550 *	return the actual length in *lencopied.
551 */
552ENTRY(copyinstr)
553	movq	%rdx,%r8			/* %r8 = maxlen */
554	movq	%rcx,%r9			/* %r9 = *len */
555	xchgq	%rdi,%rsi			/* %rdi = from, %rsi = to */
556	movq	PCPU(curthread),%rcx
557	movq	TD_PCB(%rcx), %rcx
558	movq	$cpystrflt,PCB_ONFAULT(%rcx)
559
560	movq	$VM_MAX_USER_ADDRESS,%rax
561
562	/* make sure 'from' is within bounds */
563	subq	%rsi,%rax
564	jbe	cpystrflt
565
566	/* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
567	cmpq	%rdx,%rax
568	jae	1f
569	movq	%rax,%rdx
570	movq	%rax,%r8
5711:
572	incq	%rdx
573	cld
574
5752:
576	decq	%rdx
577	jz	3f
578
579	lodsb
580	stosb
581	orb	%al,%al
582	jnz	2b
583
584	/* Success -- 0 byte reached */
585	decq	%rdx
586	xorl	%eax,%eax
587	jmp	cpystrflt_x
5883:
589	/* rdx is zero - return ENAMETOOLONG or EFAULT */
590	movq	$VM_MAX_USER_ADDRESS,%rax
591	cmpq	%rax,%rsi
592	jae	cpystrflt
5934:
594	movq	$ENAMETOOLONG,%rax
595	jmp	cpystrflt_x
596
597cpystrflt:
598	movq	$EFAULT,%rax
599
600cpystrflt_x:
601	/* set *lencopied and return %eax */
602	movq	PCPU(curthread),%rcx
603	movq	TD_PCB(%rcx), %rcx
604	movq	$0,PCB_ONFAULT(%rcx)
605
606	testq	%r9,%r9
607	jz	1f
608	subq	%rdx,%r8
609	movq	%r8,(%r9)
6101:
611	ret
612
613
614/*
615 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
616 *         %rdi, %rsi, %rdx, %rcx
617 */
618ENTRY(copystr)
619	movq	%rdx,%r8			/* %r8 = maxlen */
620
621	xchgq	%rdi,%rsi
622	incq	%rdx
623	cld
6241:
625	decq	%rdx
626	jz	4f
627	lodsb
628	stosb
629	orb	%al,%al
630	jnz	1b
631
632	/* Success -- 0 byte reached */
633	decq	%rdx
634	xorl	%eax,%eax
635	jmp	6f
6364:
637	/* rdx is zero -- return ENAMETOOLONG */
638	movq	$ENAMETOOLONG,%rax
639
6406:
641
642	testq	%rcx,%rcx
643	jz	7f
644	/* set *lencopied and return %rax */
645	subq	%rdx,%r8
646	movq	%r8,(%rcx)
6477:
648	ret
649
650/*
651 * Handling of special x86_64 registers and descriptor tables etc
652 * %rdi
653 */
654/* void lgdt(struct region_descriptor *rdp); */
655ENTRY(lgdt)
656	/* reload the descriptor table */
657	lgdt	(%rdi)
658
659	/* flush the prefetch q */
660	jmp	1f
661	nop
6621:
663	movl	$KDSEL,%eax
664	movl	%eax,%ds
665	movl	%eax,%es
666	movl	%eax,%fs	/* Beware, use wrmsr to set 64 bit base */
667	movl	%eax,%gs	/* Beware, use wrmsr to set 64 bit base */
668	movl	%eax,%ss
669
670	/* reload code selector by turning return into intersegmental return */
671	popq	%rax
672	pushq	$KCSEL
673	pushq	%rax
674	MEXITCOUNT
675	lretq
676
677/*****************************************************************************/
678/* setjump, longjump                                                         */
679/*****************************************************************************/
680
681ENTRY(setjmp)
682	movq	%rbx,0(%rdi)			/* save rbx */
683	movq	%rsp,8(%rdi)			/* save rsp */
684	movq	%rbp,16(%rdi)			/* save rbp */
685	movq	%r12,24(%rdi)			/* save r12 */
686	movq	%r13,32(%rdi)			/* save r13 */
687	movq	%r14,40(%rdi)			/* save r14 */
688	movq	%r15,48(%rdi)			/* save r15 */
689	movq	0(%rsp),%rdx			/* get rta */
690	movq	%rdx,56(%rdi)			/* save rip */
691	xorl	%eax,%eax			/* return(0); */
692	ret
693
694ENTRY(longjmp)
695	movq	0(%rdi),%rbx			/* restore rbx */
696	movq	8(%rdi),%rsp			/* restore rsp */
697	movq	16(%rdi),%rbp			/* restore rbp */
698	movq	24(%rdi),%r12			/* restore r12 */
699	movq	32(%rdi),%r13			/* restore r13 */
700	movq	40(%rdi),%r14			/* restore r14 */
701	movq	48(%rdi),%r15			/* restore r15 */
702	movq	56(%rdi),%rdx			/* get rta */
703	movq	%rdx,0(%rsp)			/* put in return frame */
704	xorl	%eax,%eax			/* return(1); */
705	incl	%eax
706	ret
707
708/*
709 * Support for reading MSRs in the safe manner.
710 */
711ENTRY(rdmsr_safe)
712/* int rdmsr_safe(u_int msr, uint64_t *data) */
713	movq	PCPU(curthread),%r8
714	movq	TD_PCB(%r8), %r8
715	movq	$msr_onfault,PCB_ONFAULT(%r8)
716	movl	%edi,%ecx
717	rdmsr			/* Read MSR pointed by %ecx. Returns
718				   hi byte in edx, lo in %eax */
719	salq	$32,%rdx	/* sign-shift %rdx left */
720	movl	%eax,%eax	/* zero-extend %eax -> %rax */
721	orq	%rdx,%rax
722	movq	%rax,(%rsi)
723	xorq	%rax,%rax
724	movq	%rax,PCB_ONFAULT(%r8)
725	ret
726
727/*
728 * MSR operations fault handler
729 */
730	ALIGN_TEXT
731msr_onfault:
732	movq	PCPU(curthread),%r8
733	movq	TD_PCB(%r8), %r8
734	movq	$0,PCB_ONFAULT(%r8)
735	movl	$EFAULT,%eax
736	ret
737
738/*
739 * Support for BB-profiling (gcc -a).  The kernbb program will extract
740 * the data from the kernel.
741 */
742
743	.data
744	ALIGN_DATA
745	.globl bbhead
746bbhead:
747	.quad 0
748
749	.text
750NON_GPROF_ENTRY(__bb_init_func)
751	movq	$1,(%rdi)
752	movq	bbhead,%rax
753	movq	%rax,32(%rdi)
754	movq	%rdi,bbhead
755	NON_GPROF_RET
756