xref: /dragonfly/sys/platform/pc64/x86_64/support.s (revision 0dbf214d)
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * Copyright (c) 2003 Peter Wemm.
4 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $
32 */
33
34#include <machine/asmacros.h>
35#include <machine/pmap.h>
36
37#include "assym.s"
38
39	ALIGN_DATA
40
41	.text
42
43/*
44 * bzero(ptr:%rdi, bytes:%rsi)
45 *
46 * Using rep stosq is 70% faster than a %rax loop and almost as fast as
47 * a %xmm0 loop on a modern intel cpu.
48 *
49 * Do not use non-termportal instructions here as we do not know the caller's
50 * intent.
51 */
52ENTRY(bzero)
53	movq	%rsi,%rcx
54	xorl	%eax,%eax
55	shrq	$3,%rcx
56	cld
57	rep
58	stosq
59	movq	%rsi,%rcx
60	andq	$7,%rcx
61	jnz	1f
62	ret
631:	rep
64	stosb
65	ret
66END(bzero)
67
68/*
69 * pagezero(ptr:%rdi)
70 *
71 * Using rep stosq is nearly as fast as using %xmm0 on a modern intel cpu,
72 * and about 70% faster than a %rax loop.
73 *
74 * Do not use non-termportal instructions here as we do not know the caller's
75 * intent.
76 */
77#if 0
78
79ENTRY(pagezero)
80	movq	$PAGE_SIZE>>3,%rcx
81	xorl	%eax,%eax
82	cld
83	rep
84	stosq
85	ret
86END(pagezero)
87
88#endif
89
90ENTRY(pagezero)
91	addq	$4096,%rdi
92	movq	$-4096,%rax
93	ALIGN_TEXT
941:
95	movq	$0,(%rdi,%rax,1)
96	addq	$8,%rax
97	jne	1b
98	ret
99END(pagezero)
100
101/*
102 * bcmp(ptr:%rdi, ptr:%rsi, bytes:%rdx)
103 */
104ENTRY(bcmp)
105	movq	%rdx,%rcx
106	shrq	$3,%rcx
107	cld					/* compare forwards */
108	repe
109	cmpsq
110	jne	1f
111
112	movq	%rdx,%rcx
113	andq	$7,%rcx
114	je	1f
115	repe
116	cmpsb
1171:
118	setne	%al
119	movsbl	%al,%eax
120	ret
121END(bcmp)
122
123/*
124 * bcopy(src:%rdi, dst:%rsi, cnt:%rdx)
125 *
126 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
127 */
128ENTRY(bcopy)
129	xchgq	%rsi,%rdi
130	movq	%rdx,%rcx
131
132	movq	%rdi,%rax
133	subq	%rsi,%rax
134	cmpq	%rcx,%rax			/* overlapping && src < dst? */
135	jb	2f
136
137	cld					/* nope, copy forwards */
138	shrq	$3,%rcx				/* copy by 64-bit words */
139	rep
140	movsq
141	movq	%rdx,%rcx
142	andq	$7,%rcx				/* any bytes left? */
143	jnz	1f
144	ret
1451:	rep
146	movsb
147	ret
148
149	ALIGN_TEXT
1502:
151	addq	%rcx,%rdi			/* copy backwards */
152	addq	%rcx,%rsi
153	std
154	decq	%rdi
155	decq	%rsi
156	andq	$7,%rcx				/* any fractional bytes? */
157	jz	3f
158	rep
159	movsb
1603:	movq	%rdx,%rcx			/* copy by 32-bit words */
161	shrq	$3,%rcx
162	subq	$7,%rsi
163	subq	$7,%rdi
164	rep
165	movsq
166	cld
167	ret
168END(bcopy)
169
170ENTRY(reset_dbregs)
171	movq	$0x200,%rax	/* the manual says that bit 10 must be set to 1 */
172	movq	%rax,%dr7	/* disable all breapoints first */
173	movq	$0,%rax
174	movq	%rax,%dr0
175	movq	%rax,%dr1
176	movq	%rax,%dr2
177	movq	%rax,%dr3
178	movq	%rax,%dr6
179	ret
180END(reset_dbregs)
181
182/*
183 * memcpy(dst:%rdi, src:%rsi, bytes:%rdx)
184 *
185 * Note: memcpy does not support overlapping copies
186 */
187ENTRY(memcpy)
188	movq	%rdi,%r8
189	movq	%rdx,%rcx
190	shrq	$3,%rcx				/* copy by 64-bit words */
191	cld					/* copy forwards */
192	rep
193	movsq
194	movq	%rdx,%rcx
195	andq	$7,%rcx				/* any bytes left? */
196	jnz	1f
197	movq	%r8,%rax
198	ret
1991:	rep
200	movsb
201	movq	%r8,%rax
202	ret
203END(memcpy)
204
205/* fillw(pat, base, cnt) */
206/*       %rdi,%rsi, %rdx */
207ENTRY(fillw)
208	movq	%rdi,%rax
209	movq	%rsi,%rdi
210	movq	%rdx,%rcx
211	cld
212	rep
213	stosw
214	ret
215END(fillw)
216
217/*****************************************************************************/
218/* copyout and fubyte family                                                 */
219/*****************************************************************************/
220/*
221 * Access user memory from inside the kernel. These routines should be
222 * the only places that do this.
223 *
224 * These routines set curpcb->onfault for the time they execute. When a
225 * protection violation occurs inside the functions, the trap handler
226 * returns to *curpcb->onfault instead of the function.
227 */
228
229/*
230 * uint64_t:%rax kreadmem64(addr:%rdi)
231 *
232 * Read kernel or user memory with fault protection.
233 */
234ENTRY(kreadmem64)
235	movq	PCPU(curthread),%rcx
236	movq	TD_PCB(%rcx), %rcx
237	movq	$kreadmem64fault,PCB_ONFAULT(%rcx)
238	movq	%rsp,PCB_ONFAULT_SP(%rcx)
239
240	movq	(%rdi),%rax
241	movq	$0,PCB_ONFAULT(%rcx)
242	ret
243
244kreadmem64fault:
245	movq	PCPU(curthread),%rcx
246	xorl	%eax,%eax
247	movq	TD_PCB(%rcx),%rcx
248	movq	%rax,PCB_ONFAULT(%rcx)
249	decq	%rax
250	ret
251END(kreadmem64)
252
253/*
254 * std_copyout(from_kernel, to_user, len)  - MP SAFE
255 *         %rdi,        %rsi,    %rdx
256 */
257ENTRY(std_copyout)
258	movq	PCPU(curthread),%rax
259	movq	TD_PCB(%rax), %rax
260	movq	$copyout_fault,PCB_ONFAULT(%rax)
261	movq	%rsp,PCB_ONFAULT_SP(%rax)
262	testq	%rdx,%rdx			/* anything to do? */
263	jz	done_copyout
264
265	/*
266	 * Check explicitly for non-user addresses.  If 486 write protection
267	 * is being used, this check is essential because we are in kernel
268	 * mode so the h/w does not provide any protection against writing
269	 * kernel addresses.
270	 */
271
272	/*
273	 * First, prevent address wrapping.
274	 */
275	movq	%rsi,%rax
276	addq	%rdx,%rax
277	jc	copyout_fault
278/*
279 * XXX STOP USING VM_MAX_USER_ADDRESS.
280 * It is an end address, not a max, so every time it is used correctly it
281 * looks like there is an off by one error, and of course it caused an off
282 * by one error in several places.
283 */
284	movq	$VM_MAX_USER_ADDRESS,%rcx
285	cmpq	%rcx,%rax
286	ja	copyout_fault
287
288	xchgq	%rdi,%rsi
289	cld
290	/* bcopy(%rsi, %rdi, %rdx) */
291	movq	%rdx,%rcx
292
293	shrq	$3,%rcx
294	jz	1f
295	rep
296	movsq
2971:	movq	%rdx,%rcx
298	andq	$7,%rcx
299	jz	done_copyout
300	rep
301	movsb
302
303done_copyout:
304	xorl	%eax,%eax
305	movq	PCPU(curthread),%rdx
306	movq	TD_PCB(%rdx), %rdx
307	movq	%rax,PCB_ONFAULT(%rdx)
308	ret
309
310	ALIGN_TEXT
311copyout_fault:
312	movq	PCPU(curthread),%rdx
313	movq	TD_PCB(%rdx), %rdx
314	movq	$0,PCB_ONFAULT(%rdx)
315	movq	$EFAULT,%rax
316	ret
317END(std_copyout)
318
319/*
320 * std_copyin(from_user, to_kernel, len) - MP SAFE
321 *        %rdi,      %rsi,      %rdx
322 */
323ENTRY(std_copyin)
324	movq	PCPU(curthread),%rax
325	movq	TD_PCB(%rax), %rax
326	movq	$copyin_fault,PCB_ONFAULT(%rax)
327	movq	%rsp,PCB_ONFAULT_SP(%rax)
328	testq	%rdx,%rdx			/* anything to do? */
329	jz	done_copyin
330
331	/*
332	 * make sure address is valid
333	 */
334	movq	%rdi,%rax
335	addq	%rdx,%rax
336	jc	copyin_fault
337	movq	$VM_MAX_USER_ADDRESS,%rcx
338	cmpq	%rcx,%rax
339	ja	copyin_fault
340
341	xchgq	%rdi,%rsi
342	cld
343	movq	%rdx,%rcx
344	shrq	$3,%rcx				/* copy longword-wise */
345	jz	1f
346	rep
347	movsq
3481:	movq	%rdx,%rcx
349	andq	$7,%rcx				/* copy remaining bytes */
350	jz	done_copyin
351	rep
352	movsb
353
354done_copyin:
355	xorl	%eax,%eax
356	movq	PCPU(curthread),%rdx
357	movq	TD_PCB(%rdx), %rdx
358	movq	%rax,PCB_ONFAULT(%rdx)
359	ret
360
361	ALIGN_TEXT
362copyin_fault:
363	movq	PCPU(curthread),%rdx
364	movq	TD_PCB(%rdx), %rdx
365	movq	$0,PCB_ONFAULT(%rdx)
366	movq	$EFAULT,%rax
367	ret
368END(std_copyin)
369
370/*
371 * casu32 - Compare and set user integer.  Returns -1 or the current value.
372 *          dst = %rdi, old = %rsi, new = %rdx
373 */
374ENTRY(casu32)
375	movq	PCPU(curthread),%rcx
376	movq	TD_PCB(%rcx), %rcx
377	movq	$fusufault,PCB_ONFAULT(%rcx)
378	movq	%rsp,PCB_ONFAULT_SP(%rcx)
379
380	movq	$VM_MAX_USER_ADDRESS-4,%rax
381	cmpq	%rax,%rdi			/* verify address is valid */
382	ja	fusufault
383
384	movl	%esi,%eax			/* old */
385	lock
386	cmpxchgl %edx,(%rdi)			/* new = %edx */
387
388	/*
389	 * The old value is in %eax.  If the store succeeded it will be the
390	 * value we expected (old) from before the store, otherwise it will
391	 * be the current value.
392	 */
393
394	movq	PCPU(curthread),%rcx
395	movq	TD_PCB(%rcx), %rcx
396	movq	$0,PCB_ONFAULT(%rcx)
397	ret
398END(casu32)
399
400/*
401 * swapu32 - Swap int in user space.  ptr = %rdi, val = %rsi
402 */
403ENTRY(std_swapu32)
404	movq	PCPU(curthread),%rcx
405	movq	TD_PCB(%rcx), %rcx
406	movq	$fusufault,PCB_ONFAULT(%rcx)
407	movq	%rsp,PCB_ONFAULT_SP(%rcx)
408
409	movq	$VM_MAX_USER_ADDRESS-4,%rax
410	cmpq	%rax,%rdi			/* verify address is valid */
411	ja	fusufault
412
413	movq	%rsi,%rax			/* old */
414	xchgl	%eax,(%rdi)
415
416	/*
417	 * The old value is in %rax.  If the store succeeded it will be the
418	 * value we expected (old) from before the store, otherwise it will
419	 * be the current value.
420	 */
421
422	movq	PCPU(curthread),%rcx
423	movq	TD_PCB(%rcx), %rcx
424	movq	$0,PCB_ONFAULT(%rcx)
425	ret
426END(std_swapu32)
427
428ENTRY(std_fuwordadd32)
429	movq	PCPU(curthread),%rcx
430	movq	TD_PCB(%rcx), %rcx
431	movq	$fusufault,PCB_ONFAULT(%rcx)
432	movq	%rsp,PCB_ONFAULT_SP(%rcx)
433
434	movq	$VM_MAX_USER_ADDRESS-4,%rax
435	cmpq	%rax,%rdi			/* verify address is valid */
436	ja	fusufault
437
438	movq	%rsi,%rax			/* qty to add */
439	lock xaddl	%eax,(%rdi)
440
441	/*
442	 * The old value is in %rax.  If the store succeeded it will be the
443	 * value we expected (old) from before the store, otherwise it will
444	 * be the current value.
445	 */
446
447	movq	PCPU(curthread),%rcx
448	movq	TD_PCB(%rcx), %rcx
449	movq	$0,PCB_ONFAULT(%rcx)
450	ret
451END(std_fuwordadd32)
452
453/*
454 * casu64 - Compare and set user word.  Returns -1 or the current value.
455 *          dst = %rdi, old = %rsi, new = %rdx
456 */
457ENTRY(casu64)
458	movq	PCPU(curthread),%rcx
459	movq	TD_PCB(%rcx), %rcx
460	movq	$fusufault,PCB_ONFAULT(%rcx)
461	movq	%rsp,PCB_ONFAULT_SP(%rcx)
462
463	movq	$VM_MAX_USER_ADDRESS-8,%rax
464	cmpq	%rax,%rdi			/* verify address is valid */
465	ja	fusufault
466
467	movq	%rsi,%rax			/* old */
468	lock
469	cmpxchgq %rdx,(%rdi)			/* new = %rdx */
470
471	/*
472	 * The old value is in %rax.  If the store succeeded it will be the
473	 * value we expected (old) from before the store, otherwise it will
474	 * be the current value.
475	 */
476
477	movq	PCPU(curthread),%rcx
478	movq	TD_PCB(%rcx), %rcx
479	movq	$0,PCB_ONFAULT(%rcx)
480	ret
481END(casu64)
482
483/*
484 * swapu64 - Swap long in user space.  ptr = %rdi, val = %rsi
485 */
486ENTRY(std_swapu64)
487	movq	PCPU(curthread),%rcx
488	movq	TD_PCB(%rcx), %rcx
489	movq	$fusufault,PCB_ONFAULT(%rcx)
490	movq	%rsp,PCB_ONFAULT_SP(%rcx)
491
492	movq	$VM_MAX_USER_ADDRESS-8,%rax
493	cmpq	%rax,%rdi			/* verify address is valid */
494	ja	fusufault
495
496	movq	%rsi,%rax			/* old */
497	xchgq	%rax,(%rdi)
498
499	/*
500	 * The old value is in %rax.  If the store succeeded it will be the
501	 * value we expected (old) from before the store, otherwise it will
502	 * be the current value.
503	 */
504
505	movq	PCPU(curthread),%rcx
506	movq	TD_PCB(%rcx), %rcx
507	movq	$0,PCB_ONFAULT(%rcx)
508	ret
509END(std_swapu64)
510
511ENTRY(std_fuwordadd64)
512	movq	PCPU(curthread),%rcx
513	movq	TD_PCB(%rcx), %rcx
514	movq	$fusufault,PCB_ONFAULT(%rcx)
515	movq	%rsp,PCB_ONFAULT_SP(%rcx)
516
517	movq	$VM_MAX_USER_ADDRESS-8,%rax
518	cmpq	%rax,%rdi			/* verify address is valid */
519	ja	fusufault
520
521	movq	%rsi,%rax			/* value to add */
522	lock xaddq	%rax,(%rdi)
523
524	/*
525	 * The old value is in %rax.  If the store succeeded it will be the
526	 * value we expected (old) from before the store, otherwise it will
527	 * be the current value.
528	 */
529
530	movq	PCPU(curthread),%rcx
531	movq	TD_PCB(%rcx), %rcx
532	movq	$0,PCB_ONFAULT(%rcx)
533	ret
534END(std_fuwordadd64)
535
536/*
537 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
538 * byte from user memory.  All these functions are MPSAFE.
539 * addr = %rdi
540 */
541
542ENTRY(std_fuword64)
543	movq	PCPU(curthread),%rcx
544	movq	TD_PCB(%rcx), %rcx
545	movq	$fusufault,PCB_ONFAULT(%rcx)
546	movq	%rsp,PCB_ONFAULT_SP(%rcx)
547
548	movq	$VM_MAX_USER_ADDRESS-8,%rax
549	cmpq	%rax,%rdi			/* verify address is valid */
550	ja	fusufault
551
552	movq	(%rdi),%rax
553	movq	$0,PCB_ONFAULT(%rcx)
554	ret
555END(std_fuword64)
556
557ENTRY(std_fuword32)
558	movq	PCPU(curthread),%rcx
559	movq	TD_PCB(%rcx), %rcx
560	movq	$fusufault,PCB_ONFAULT(%rcx)
561	movq	%rsp,PCB_ONFAULT_SP(%rcx)
562
563	movq	$VM_MAX_USER_ADDRESS-4,%rax
564	cmpq	%rax,%rdi			/* verify address is valid */
565	ja	fusufault
566
567	movl	(%rdi),%eax
568	movq	$0,PCB_ONFAULT(%rcx)
569	ret
570END(std_fuword32)
571
572ENTRY(std_fubyte)
573	movq	PCPU(curthread),%rcx
574	movq	TD_PCB(%rcx), %rcx
575	movq	$fusufault,PCB_ONFAULT(%rcx)
576	movq	%rsp,PCB_ONFAULT_SP(%rcx)
577
578	movq	$VM_MAX_USER_ADDRESS-1,%rax
579	cmpq	%rax,%rdi
580	ja	fusufault
581
582	movzbl	(%rdi),%eax
583	movq	$0,PCB_ONFAULT(%rcx)
584	ret
585
586	ALIGN_TEXT
587fusufault:
588	movq	PCPU(curthread),%rcx
589	xorl	%eax,%eax
590	movq	TD_PCB(%rcx), %rcx
591	movq	%rax,PCB_ONFAULT(%rcx)
592	decq	%rax
593	ret
594END(std_fubyte)
595
596/*
597 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
598 * user memory.  All these functions are MPSAFE.
599 *
600 * addr = %rdi, value = %rsi
601 *
602 * Write a long
603 */
604ENTRY(std_suword64)
605	movq	PCPU(curthread),%rcx
606	movq	TD_PCB(%rcx), %rcx
607	movq	$fusufault,PCB_ONFAULT(%rcx)
608	movq	%rsp,PCB_ONFAULT_SP(%rcx)
609
610	movq	$VM_MAX_USER_ADDRESS-8,%rax
611	cmpq	%rax,%rdi			/* verify address validity */
612	ja	fusufault
613
614	movq	%rsi,(%rdi)
615	xorl	%eax,%eax
616	movq	PCPU(curthread),%rcx
617	movq	TD_PCB(%rcx), %rcx
618	movq	%rax,PCB_ONFAULT(%rcx)
619	ret
620END(std_suword64)
621
622/*
623 * Write an int
624 */
625ENTRY(std_suword32)
626	movq	PCPU(curthread),%rcx
627	movq	TD_PCB(%rcx), %rcx
628	movq	$fusufault,PCB_ONFAULT(%rcx)
629	movq	%rsp,PCB_ONFAULT_SP(%rcx)
630
631	movq	$VM_MAX_USER_ADDRESS-4,%rax
632	cmpq	%rax,%rdi			/* verify address validity */
633	ja	fusufault
634
635	movl	%esi,(%rdi)
636	xorl	%eax,%eax
637	movq	PCPU(curthread),%rcx
638	movq	TD_PCB(%rcx), %rcx
639	movq	%rax,PCB_ONFAULT(%rcx)
640	ret
641END(std_suword32)
642
643ENTRY(std_subyte)
644	movq	PCPU(curthread),%rcx
645	movq	TD_PCB(%rcx), %rcx
646	movq	$fusufault,PCB_ONFAULT(%rcx)
647	movq	%rsp,PCB_ONFAULT_SP(%rcx)
648
649	movq	$VM_MAX_USER_ADDRESS-1,%rax
650	cmpq	%rax,%rdi			/* verify address validity */
651	ja	fusufault
652
653	movl	%esi,%eax
654	movb	%al,(%rdi)
655	xorl	%eax,%eax
656	movq	PCPU(curthread),%rcx		/* restore trashed register */
657	movq	TD_PCB(%rcx), %rcx
658	movq	%rax,PCB_ONFAULT(%rcx)
659	ret
660END(std_subyte)
661
662/*
663 * std_copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
664 *           %rdi, %rsi, %rdx, %rcx
665 *
666 *	copy a string from from to to, stop when a 0 character is reached.
667 *	return ENAMETOOLONG if string is longer than maxlen, and
668 *	EFAULT on protection violations. If lencopied is non-zero,
669 *	return the actual length in *lencopied.
670 */
671ENTRY(std_copyinstr)
672	movq	%rdx,%r8			/* %r8 = maxlen */
673	movq	%rcx,%r9			/* %r9 = *len */
674	xchgq	%rdi,%rsi			/* %rdi = from, %rsi = to */
675	movq	PCPU(curthread),%rcx
676	movq	TD_PCB(%rcx), %rcx
677	movq	$cpystrflt,PCB_ONFAULT(%rcx)
678	movq	%rsp,PCB_ONFAULT_SP(%rcx)
679
680	movq	$VM_MAX_USER_ADDRESS,%rax
681
682	/* make sure 'from' is within bounds */
683	subq	%rsi,%rax
684	jbe	cpystrflt
685
686	/* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
687	cmpq	%rdx,%rax
688	jae	1f
689	movq	%rax,%rdx
690	movq	%rax,%r8
6911:
692	incq	%rdx
693	cld
694
6952:
696	decq	%rdx
697	jz	3f
698
699	lodsb
700	stosb
701	orb	%al,%al
702	jnz	2b
703
704	/* Success -- 0 byte reached */
705	decq	%rdx
706	xorl	%eax,%eax
707	jmp	cpystrflt_x
7083:
709	/* rdx is zero - return ENAMETOOLONG or EFAULT */
710	movq	$VM_MAX_USER_ADDRESS,%rax
711	cmpq	%rax,%rsi
712	jae	cpystrflt
7134:
714	movq	$ENAMETOOLONG,%rax
715	jmp	cpystrflt_x
716
717cpystrflt:
718	movq	$EFAULT,%rax
719
720cpystrflt_x:
721	/* set *lencopied and return %eax */
722	movq	PCPU(curthread),%rcx
723	movq	TD_PCB(%rcx), %rcx
724	movq	$0,PCB_ONFAULT(%rcx)
725
726	testq	%r9,%r9
727	jz	1f
728	subq	%rdx,%r8
729	movq	%r8,(%r9)
7301:
731	ret
732END(std_copyinstr)
733
734/*
735 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
736 *         %rdi, %rsi, %rdx, %rcx
737 */
738ENTRY(copystr)
739	movq	%rdx,%r8			/* %r8 = maxlen */
740
741	xchgq	%rdi,%rsi
742	incq	%rdx
743	cld
7441:
745	decq	%rdx
746	jz	4f
747	lodsb
748	stosb
749	orb	%al,%al
750	jnz	1b
751
752	/* Success -- 0 byte reached */
753	decq	%rdx
754	xorl	%eax,%eax
755	jmp	6f
7564:
757	/* rdx is zero -- return ENAMETOOLONG */
758	movq	$ENAMETOOLONG,%rax
759
7606:
761
762	testq	%rcx,%rcx
763	jz	7f
764	/* set *lencopied and return %rax */
765	subq	%rdx,%r8
766	movq	%r8,(%rcx)
7677:
768	ret
769END(copystr)
770
771/*
772 * Handling of special x86_64 registers and descriptor tables etc
773 * %rdi
774 */
775/* void lgdt(struct region_descriptor *rdp); */
776ENTRY(lgdt)
777	/* reload the descriptor table */
778	lgdt	(%rdi)
779
780	/* flush the prefetch q */
781	jmp	1f
782	nop
7831:
784	movl	$KDSEL,%eax
785	movl	%eax,%ds
786	movl	%eax,%es
787	movl	%eax,%fs	/* Beware, use wrmsr to set 64 bit base */
788	movl	%eax,%gs	/* Beware, use wrmsr to set 64 bit base */
789	movl	%eax,%ss
790
791	/* reload code selector by turning return into intersegmental return */
792	popq	%rax
793	pushq	$KCSEL
794	pushq	%rax
795	MEXITCOUNT
796	lretq
797END(lgdt)
798
799/*****************************************************************************/
800/* setjmp, longjmp                                                           */
801/*****************************************************************************/
802
803ENTRY(setjmp)
804	movq	%rbx,0(%rdi)			/* save rbx */
805	movq	%rsp,8(%rdi)			/* save rsp */
806	movq	%rbp,16(%rdi)			/* save rbp */
807	movq	%r12,24(%rdi)			/* save r12 */
808	movq	%r13,32(%rdi)			/* save r13 */
809	movq	%r14,40(%rdi)			/* save r14 */
810	movq	%r15,48(%rdi)			/* save r15 */
811	movq	0(%rsp),%rdx			/* get rta */
812	movq	%rdx,56(%rdi)			/* save rip */
813	xorl	%eax,%eax			/* return(0); */
814	ret
815END(setjmp)
816
817ENTRY(longjmp)
818	movq	0(%rdi),%rbx			/* restore rbx */
819	movq	8(%rdi),%rsp			/* restore rsp */
820	movq	16(%rdi),%rbp			/* restore rbp */
821	movq	24(%rdi),%r12			/* restore r12 */
822	movq	32(%rdi),%r13			/* restore r13 */
823	movq	40(%rdi),%r14			/* restore r14 */
824	movq	48(%rdi),%r15			/* restore r15 */
825	movq	56(%rdi),%rdx			/* get rta */
826	movq	%rdx,0(%rsp)			/* put in return frame */
827	xorl	%eax,%eax			/* return(1); */
828	incl	%eax
829	ret
830END(longjmp)
831
832/*
833 * Support for reading MSRs in the safe manner.
834 */
835ENTRY(rdmsr_safe)
836/* int rdmsr_safe(u_int msr, uint64_t *data) */
837	movq	PCPU(curthread),%r8
838	movq	TD_PCB(%r8), %r8
839	movq	$msr_onfault,PCB_ONFAULT(%r8)
840	movq	%rsp,PCB_ONFAULT_SP(%r8)
841	movl	%edi,%ecx
842	rdmsr			/* Read MSR pointed by %ecx. Returns
843				   hi byte in edx, lo in %eax */
844	salq	$32,%rdx	/* sign-shift %rdx left */
845	movl	%eax,%eax	/* zero-extend %eax -> %rax */
846	orq	%rdx,%rax
847	movq	%rax,(%rsi)
848	xorq	%rax,%rax
849	movq	%rax,PCB_ONFAULT(%r8)
850	ret
851END(rdmsr_safe)
852
853/*
854 * Support for writing MSRs in the safe manner.
855 */
856ENTRY(wrmsr_safe)
857/* int wrmsr_safe(u_int msr, uint64_t data) */
858	movq	PCPU(curthread),%r8
859	movq	TD_PCB(%r8), %r8
860	movq	$msr_onfault,PCB_ONFAULT(%r8)
861	movq	%rsp,PCB_ONFAULT_SP(%r8)
862	movl	%edi,%ecx
863	movl	%esi,%eax
864	sarq	$32,%rsi
865	movl	%esi,%edx
866	wrmsr			/* Write MSR pointed by %ecx. Accepts
867				   hi byte in edx, lo in %eax. */
868	xorq	%rax,%rax
869	movq	%rax,PCB_ONFAULT(%r8)
870	ret
871END(wrmsr_safe)
872
873/*
874 * MSR operations fault handler
875 */
876	ALIGN_TEXT
877msr_onfault:
878	movq	PCPU(curthread),%r8
879	movq	TD_PCB(%r8), %r8
880	movq	$0,PCB_ONFAULT(%r8)
881	movl	$EFAULT,%eax
882	ret
883