1da673940SJordan Gordeev /*
2da673940SJordan Gordeev  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3da673940SJordan Gordeev  *
4da673940SJordan Gordeev  * This code is derived from software contributed to The DragonFly Project
5da673940SJordan Gordeev  * by Matthew Dillon <dillon@backplane.com>
6da673940SJordan Gordeev  *
7da673940SJordan Gordeev  * Redistribution and use in source and binary forms, with or without
8da673940SJordan Gordeev  * modification, are permitted provided that the following conditions
9da673940SJordan Gordeev  * are met:
10da673940SJordan Gordeev  *
11da673940SJordan Gordeev  * 1. Redistributions of source code must retain the above copyright
12da673940SJordan Gordeev  *    notice, this list of conditions and the following disclaimer.
13da673940SJordan Gordeev  * 2. Redistributions in binary form must reproduce the above copyright
14da673940SJordan Gordeev  *    notice, this list of conditions and the following disclaimer in
15da673940SJordan Gordeev  *    the documentation and/or other materials provided with the
16da673940SJordan Gordeev  *    distribution.
17da673940SJordan Gordeev  * 3. Neither the name of The DragonFly Project nor the names of its
18da673940SJordan Gordeev  *    contributors may be used to endorse or promote products derived
19da673940SJordan Gordeev  *    from this software without specific, prior written permission.
20da673940SJordan Gordeev  *
21da673940SJordan Gordeev  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22da673940SJordan Gordeev  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23da673940SJordan Gordeev  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24da673940SJordan Gordeev  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25da673940SJordan Gordeev  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26da673940SJordan Gordeev  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27da673940SJordan Gordeev  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28da673940SJordan Gordeev  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29da673940SJordan Gordeev  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30da673940SJordan Gordeev  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31da673940SJordan Gordeev  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32da673940SJordan Gordeev  * SUCH DAMAGE.
33da673940SJordan Gordeev  */
34da673940SJordan Gordeev 
35da673940SJordan Gordeev #include <sys/types.h>
36da673940SJordan Gordeev #include <sys/systm.h>
370e6594a8SSascha Wildner #include <cpu/lwbuf.h>
38da673940SJordan Gordeev #include <vm/vm_page.h>
39da673940SJordan Gordeev #include <vm/vm_extern.h>
40da673940SJordan Gordeev #include <assert.h>
41da673940SJordan Gordeev 
42da673940SJordan Gordeev #include <sys/stat.h>
43da673940SJordan Gordeev #include <sys/mman.h>
44da673940SJordan Gordeev 
45eb36cb6bSMatthew Dillon uint64_t
casu64(volatile uint64_t * p,uint64_t oldval,uint64_t newval)46eb36cb6bSMatthew Dillon casu64(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
47629f89deSImre Vadasz {
48629f89deSImre Vadasz 	struct vmspace *vm = curproc->p_vmspace;
49629f89deSImre Vadasz 	vm_offset_t kva;
50629f89deSImre Vadasz 	vm_page_t m;
51eb36cb6bSMatthew Dillon 	volatile uint64_t *dest;
52eb36cb6bSMatthew Dillon 	uint64_t res;
53629f89deSImre Vadasz 	int error;
54dc039ae0SMatthew Dillon 	int busy;
55629f89deSImre Vadasz 
56629f89deSImre Vadasz 	/* XXX No idea how to handle this case in a simple way, just abort */
57eb36cb6bSMatthew Dillon 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
58629f89deSImre Vadasz 		return -1;
59629f89deSImre Vadasz 
60629f89deSImre Vadasz 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
61629f89deSImre Vadasz 			  VM_PROT_READ|VM_PROT_WRITE,
62dc039ae0SMatthew Dillon 			  VM_FAULT_NORMAL,
63dc039ae0SMatthew Dillon 			  &error, &busy);
64629f89deSImre Vadasz 	if (error)
65629f89deSImre Vadasz 		return -1;
66629f89deSImre Vadasz 
67629f89deSImre Vadasz 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
68eb36cb6bSMatthew Dillon 	dest = (uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK));
69629f89deSImre Vadasz 	res = oldval;
70629f89deSImre Vadasz 	__asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \
71629f89deSImre Vadasz 			 : "+a" (res), "=m" (*dest) \
72629f89deSImre Vadasz 			 : "r" (newval), "m" (*dest) \
73629f89deSImre Vadasz 			 : "memory");
74629f89deSImre Vadasz 
75dc039ae0SMatthew Dillon 	if (busy)
76dc039ae0SMatthew Dillon 		vm_page_wakeup(m);
77dc039ae0SMatthew Dillon 	else
78629f89deSImre Vadasz 		vm_page_unhold(m);
79629f89deSImre Vadasz 
80629f89deSImre Vadasz 	return res;
81629f89deSImre Vadasz }
82629f89deSImre Vadasz 
83eb36cb6bSMatthew Dillon u_int
casu32(volatile u_int * p,u_int oldval,u_int newval)84eb36cb6bSMatthew Dillon casu32(volatile u_int *p, u_int oldval, u_int newval)
85eb36cb6bSMatthew Dillon {
86eb36cb6bSMatthew Dillon 	struct vmspace *vm = curproc->p_vmspace;
87eb36cb6bSMatthew Dillon 	vm_offset_t kva;
88eb36cb6bSMatthew Dillon 	vm_page_t m;
89eb36cb6bSMatthew Dillon 	volatile u_int *dest;
90eb36cb6bSMatthew Dillon 	u_int res;
91eb36cb6bSMatthew Dillon 	int error;
92dc039ae0SMatthew Dillon 	int busy;
93eb36cb6bSMatthew Dillon 
94eb36cb6bSMatthew Dillon 	/* XXX No idea how to handle this case in a simple way, just abort */
95eb36cb6bSMatthew Dillon 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(u_int))
96eb36cb6bSMatthew Dillon 		return -1;
97eb36cb6bSMatthew Dillon 
98eb36cb6bSMatthew Dillon 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
99eb36cb6bSMatthew Dillon 			  VM_PROT_READ|VM_PROT_WRITE,
100dc039ae0SMatthew Dillon 			  VM_FAULT_NORMAL,
101dc039ae0SMatthew Dillon 			  &error, &busy);
102eb36cb6bSMatthew Dillon 	if (error)
103eb36cb6bSMatthew Dillon 		return -1;
104eb36cb6bSMatthew Dillon 
105eb36cb6bSMatthew Dillon 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
106eb36cb6bSMatthew Dillon 	dest = (u_int *)(kva + ((vm_offset_t)p & PAGE_MASK));
107eb36cb6bSMatthew Dillon 	res = oldval;
108eb36cb6bSMatthew Dillon 	__asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \
109eb36cb6bSMatthew Dillon 			 : "+a" (res), "=m" (*dest) \
110eb36cb6bSMatthew Dillon 			 : "r" (newval), "m" (*dest) \
111eb36cb6bSMatthew Dillon 			 : "memory");
112eb36cb6bSMatthew Dillon 
113dc039ae0SMatthew Dillon 	if (busy)
114dc039ae0SMatthew Dillon 		vm_page_wakeup(m);
115dc039ae0SMatthew Dillon 	else
116eb36cb6bSMatthew Dillon 		vm_page_unhold(m);
117eb36cb6bSMatthew Dillon 
118eb36cb6bSMatthew Dillon 	return res;
119eb36cb6bSMatthew Dillon }
120eb36cb6bSMatthew Dillon 
121eb36cb6bSMatthew Dillon uint64_t
swapu64(volatile uint64_t * p,uint64_t val)122eb36cb6bSMatthew Dillon swapu64(volatile uint64_t *p, uint64_t val)
123eb36cb6bSMatthew Dillon {
124eb36cb6bSMatthew Dillon 	struct vmspace *vm = curproc->p_vmspace;
125eb36cb6bSMatthew Dillon 	vm_offset_t kva;
126eb36cb6bSMatthew Dillon 	vm_page_t m;
127eb36cb6bSMatthew Dillon 	uint64_t res;
128eb36cb6bSMatthew Dillon 	int error;
129dc039ae0SMatthew Dillon 	int busy;
130eb36cb6bSMatthew Dillon 
131eb36cb6bSMatthew Dillon 	/* XXX No idea how to handle this case in a simple way, just abort */
132eb36cb6bSMatthew Dillon 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
133eb36cb6bSMatthew Dillon 		return -1;
134eb36cb6bSMatthew Dillon 
135eb36cb6bSMatthew Dillon 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
136eb36cb6bSMatthew Dillon 			  VM_PROT_READ|VM_PROT_WRITE,
137dc039ae0SMatthew Dillon 			  VM_FAULT_NORMAL,
138dc039ae0SMatthew Dillon 			  &error, &busy);
139eb36cb6bSMatthew Dillon 	if (error)
140eb36cb6bSMatthew Dillon 		return -1;
141eb36cb6bSMatthew Dillon 
142eb36cb6bSMatthew Dillon 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
143eb36cb6bSMatthew Dillon 	res = atomic_swap_long((uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK)),
144eb36cb6bSMatthew Dillon 			       val);
145dc039ae0SMatthew Dillon 	if (busy)
146dc039ae0SMatthew Dillon 		vm_page_wakeup(m);
147dc039ae0SMatthew Dillon 	else
14895270b7eSMatthew Dillon 		vm_page_unhold(m);
149eb36cb6bSMatthew Dillon 
150eb36cb6bSMatthew Dillon 	return res;
151eb36cb6bSMatthew Dillon }
152eb36cb6bSMatthew Dillon 
153eb36cb6bSMatthew Dillon uint32_t
swapu32(volatile uint32_t * p,uint32_t val)154eb36cb6bSMatthew Dillon swapu32(volatile uint32_t *p, uint32_t val)
155eb36cb6bSMatthew Dillon {
156eb36cb6bSMatthew Dillon 	struct vmspace *vm = curproc->p_vmspace;
157eb36cb6bSMatthew Dillon 	vm_offset_t kva;
158eb36cb6bSMatthew Dillon 	vm_page_t m;
159eb36cb6bSMatthew Dillon 	u_int res;
160eb36cb6bSMatthew Dillon 	int error;
161dc039ae0SMatthew Dillon 	int busy;
162eb36cb6bSMatthew Dillon 
163eb36cb6bSMatthew Dillon 	/* XXX No idea how to handle this case in a simple way, just abort */
164eb36cb6bSMatthew Dillon 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
165eb36cb6bSMatthew Dillon 		return -1;
166eb36cb6bSMatthew Dillon 
167eb36cb6bSMatthew Dillon 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
168eb36cb6bSMatthew Dillon 			  VM_PROT_READ|VM_PROT_WRITE,
169dc039ae0SMatthew Dillon 			  VM_FAULT_NORMAL,
170dc039ae0SMatthew Dillon 			  &error, &busy);
171eb36cb6bSMatthew Dillon 	if (error)
172eb36cb6bSMatthew Dillon 		return -1;
173eb36cb6bSMatthew Dillon 
174eb36cb6bSMatthew Dillon 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
175eb36cb6bSMatthew Dillon 	res = atomic_swap_int((u_int *)(kva + ((vm_offset_t)p & PAGE_MASK)),
176eb36cb6bSMatthew Dillon 			       val);
177dc039ae0SMatthew Dillon 	if (busy)
178dc039ae0SMatthew Dillon 		vm_page_wakeup(m);
179dc039ae0SMatthew Dillon 	else
18095270b7eSMatthew Dillon 		vm_page_unhold(m);
181eb36cb6bSMatthew Dillon 
182eb36cb6bSMatthew Dillon 	return res;
183eb36cb6bSMatthew Dillon }
184eb36cb6bSMatthew Dillon 
185*6481baf4SMatthew Dillon uint64_t
fuwordadd64(volatile uint64_t * p,uint64_t val)186*6481baf4SMatthew Dillon fuwordadd64(volatile uint64_t *p, uint64_t val)
187*6481baf4SMatthew Dillon {
188*6481baf4SMatthew Dillon 	struct vmspace *vm = curproc->p_vmspace;
189*6481baf4SMatthew Dillon 	vm_offset_t kva;
190*6481baf4SMatthew Dillon 	vm_page_t m;
191*6481baf4SMatthew Dillon 	uint64_t res;
192*6481baf4SMatthew Dillon 	int error;
193*6481baf4SMatthew Dillon 	int busy;
194*6481baf4SMatthew Dillon 
195*6481baf4SMatthew Dillon 	/* XXX No idea how to handle this case in a simple way, just abort */
196*6481baf4SMatthew Dillon 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
197*6481baf4SMatthew Dillon 		return -1;
198*6481baf4SMatthew Dillon 
199*6481baf4SMatthew Dillon 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
200*6481baf4SMatthew Dillon 			  VM_PROT_READ|VM_PROT_WRITE,
201*6481baf4SMatthew Dillon 			  VM_FAULT_NORMAL,
202*6481baf4SMatthew Dillon 			  &error, &busy);
203*6481baf4SMatthew Dillon 	if (error)
204*6481baf4SMatthew Dillon 		return -1;
205*6481baf4SMatthew Dillon 
206*6481baf4SMatthew Dillon 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
207*6481baf4SMatthew Dillon 	res = atomic_fetchadd_long((uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK)),
208*6481baf4SMatthew Dillon 			       val);
209*6481baf4SMatthew Dillon 	if (busy)
210*6481baf4SMatthew Dillon 		vm_page_wakeup(m);
211*6481baf4SMatthew Dillon 	else
212*6481baf4SMatthew Dillon 		vm_page_unhold(m);
213*6481baf4SMatthew Dillon 
214*6481baf4SMatthew Dillon 	return res;
215*6481baf4SMatthew Dillon }
216*6481baf4SMatthew Dillon 
217*6481baf4SMatthew Dillon uint32_t
fuwordadd32(volatile uint32_t * p,uint32_t val)218*6481baf4SMatthew Dillon fuwordadd32(volatile uint32_t *p, uint32_t val)
219*6481baf4SMatthew Dillon {
220*6481baf4SMatthew Dillon 	struct vmspace *vm = curproc->p_vmspace;
221*6481baf4SMatthew Dillon 	vm_offset_t kva;
222*6481baf4SMatthew Dillon 	vm_page_t m;
223*6481baf4SMatthew Dillon 	u_int res;
224*6481baf4SMatthew Dillon 	int error;
225*6481baf4SMatthew Dillon 	int busy;
226*6481baf4SMatthew Dillon 
227*6481baf4SMatthew Dillon 	/* XXX No idea how to handle this case in a simple way, just abort */
228*6481baf4SMatthew Dillon 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
229*6481baf4SMatthew Dillon 		return -1;
230*6481baf4SMatthew Dillon 
231*6481baf4SMatthew Dillon 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
232*6481baf4SMatthew Dillon 			  VM_PROT_READ|VM_PROT_WRITE,
233*6481baf4SMatthew Dillon 			  VM_FAULT_NORMAL,
234*6481baf4SMatthew Dillon 			  &error, &busy);
235*6481baf4SMatthew Dillon 	if (error)
236*6481baf4SMatthew Dillon 		return -1;
237*6481baf4SMatthew Dillon 
238*6481baf4SMatthew Dillon 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
239*6481baf4SMatthew Dillon 	res = atomic_fetchadd_int((u_int *)(kva + ((vm_offset_t)p & PAGE_MASK)),
240*6481baf4SMatthew Dillon 			       val);
241*6481baf4SMatthew Dillon 	if (busy)
242*6481baf4SMatthew Dillon 		vm_page_wakeup(m);
243*6481baf4SMatthew Dillon 	else
244*6481baf4SMatthew Dillon 		vm_page_unhold(m);
245*6481baf4SMatthew Dillon 
246*6481baf4SMatthew Dillon 	return res;
247*6481baf4SMatthew Dillon }
248*6481baf4SMatthew Dillon 
249da673940SJordan Gordeev int
copystr(const void * kfaddr,void * kdaddr,size_t len,size_t * lencopied)250da673940SJordan Gordeev copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied)
251da673940SJordan Gordeev {
252da673940SJordan Gordeev 	size_t i;
253da673940SJordan Gordeev 
254da673940SJordan Gordeev 	for (i = 0; i < len; ++i) {
255da673940SJordan Gordeev 		if ((((char *)kdaddr)[i] = ((const char *)kfaddr)[i]) == 0) {
256da673940SJordan Gordeev 			if (lencopied)
257da673940SJordan Gordeev 				*lencopied = i + 1;
258da673940SJordan Gordeev 			return(0);
259da673940SJordan Gordeev 		}
260da673940SJordan Gordeev 	}
261da673940SJordan Gordeev 	return (ENAMETOOLONG);
262da673940SJordan Gordeev }
263da673940SJordan Gordeev 
264da673940SJordan Gordeev /*
265da673940SJordan Gordeev  * Copies a NUL-terminated string from user space to kernel space.
266da673940SJordan Gordeev  * The number of bytes copied, including the terminator, is returned in
267da673940SJordan Gordeev  * (*res).
268da673940SJordan Gordeev  *
269da673940SJordan Gordeev  * Returns 0 on success, EFAULT or ENAMETOOLONG on failure.
270da673940SJordan Gordeev  */
271da673940SJordan Gordeev int
copyinstr(const void * udaddr,void * kaddr,size_t len,size_t * res)272da673940SJordan Gordeev copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *res)
273da673940SJordan Gordeev {
274da673940SJordan Gordeev 	int error;
275da673940SJordan Gordeev 	size_t n;
276da673940SJordan Gordeev 	const char *uptr = udaddr;
277da673940SJordan Gordeev 	char *kptr = kaddr;
278da673940SJordan Gordeev 
279da673940SJordan Gordeev 	if (res)
280da673940SJordan Gordeev 		*res = 0;
281da673940SJordan Gordeev 	while (len) {
282da673940SJordan Gordeev 		n = PAGE_SIZE - ((vm_offset_t)uptr & PAGE_MASK);
283da673940SJordan Gordeev 		if (n > 32)
284da673940SJordan Gordeev 			n = 32;
285da673940SJordan Gordeev 		if (n > len)
286da673940SJordan Gordeev 			n = len;
287da673940SJordan Gordeev 		if ((error = copyin(uptr, kptr, n)) != 0)
288da673940SJordan Gordeev 			return(error);
289da673940SJordan Gordeev 		while (n) {
290da673940SJordan Gordeev 			if (res)
291da673940SJordan Gordeev 				++*res;
292da673940SJordan Gordeev 			if (*kptr == 0)
293da673940SJordan Gordeev 				return(0);
294da673940SJordan Gordeev 			++kptr;
295da673940SJordan Gordeev 			++uptr;
296da673940SJordan Gordeev 			--n;
297da673940SJordan Gordeev 			--len;
298da673940SJordan Gordeev 		}
299da673940SJordan Gordeev 
300da673940SJordan Gordeev 	}
301da673940SJordan Gordeev 	return(ENAMETOOLONG);
302da673940SJordan Gordeev }
303da673940SJordan Gordeev 
304da673940SJordan Gordeev /*
305da673940SJordan Gordeev  * Copy a binary buffer from user space to kernel space.
306da673940SJordan Gordeev  *
307da673940SJordan Gordeev  * Returns 0 on success, EFAULT on failure.
308da673940SJordan Gordeev  */
309da673940SJordan Gordeev int
copyin(const void * udaddr,void * kaddr,size_t len)310da673940SJordan Gordeev copyin(const void *udaddr, void *kaddr, size_t len)
311da673940SJordan Gordeev {
312da673940SJordan Gordeev 	struct vmspace *vm = curproc->p_vmspace;
3130e6594a8SSascha Wildner 	struct lwbuf *lwb;
3147c4633adSMatthew Dillon 	struct lwbuf lwb_cache;
315da673940SJordan Gordeev 	vm_page_t m;
316da673940SJordan Gordeev 	int error;
317da673940SJordan Gordeev 	size_t n;
318da673940SJordan Gordeev 
319da673940SJordan Gordeev 	error = 0;
320da673940SJordan Gordeev 	while (len) {
321da673940SJordan Gordeev 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
322da673940SJordan Gordeev 				  VM_PROT_READ,
323dc039ae0SMatthew Dillon 				  VM_FAULT_NORMAL,
324dc039ae0SMatthew Dillon 				  &error, NULL);
325da673940SJordan Gordeev 		if (error)
326da673940SJordan Gordeev 			break;
327da673940SJordan Gordeev 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
328da673940SJordan Gordeev 		if (n > len)
329da673940SJordan Gordeev 			n = len;
3307a683a24SMatthew Dillon 		lwb = lwbuf_alloc(m, &lwb_cache);
3310e6594a8SSascha Wildner 		bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK),
332da673940SJordan Gordeev 		      kaddr, n);
333da673940SJordan Gordeev 		len -= n;
334da673940SJordan Gordeev 		udaddr = (const char *)udaddr + n;
335da673940SJordan Gordeev 		kaddr = (char *)kaddr + n;
3360e6594a8SSascha Wildner 		lwbuf_free(lwb);
337573fb415SMatthew Dillon 		vm_page_unhold(m);
338da673940SJordan Gordeev 	}
33956f3779cSMatthew Dillon 	if (error)
34056f3779cSMatthew Dillon 		error = EFAULT;
341da673940SJordan Gordeev 	return (error);
342da673940SJordan Gordeev }
343da673940SJordan Gordeev 
344da673940SJordan Gordeev /*
345da673940SJordan Gordeev  * Copy a binary buffer from kernel space to user space.
346da673940SJordan Gordeev  *
347da673940SJordan Gordeev  * Returns 0 on success, EFAULT on failure.
348da673940SJordan Gordeev  */
349da673940SJordan Gordeev int
copyout(const void * kaddr,void * udaddr,size_t len)350da673940SJordan Gordeev copyout(const void *kaddr, void *udaddr, size_t len)
351da673940SJordan Gordeev {
352da673940SJordan Gordeev 	struct vmspace *vm = curproc->p_vmspace;
3530e6594a8SSascha Wildner 	struct lwbuf *lwb;
3547a683a24SMatthew Dillon 	struct lwbuf lwb_cache;
355da673940SJordan Gordeev 	vm_page_t m;
356da673940SJordan Gordeev 	int error;
357dc039ae0SMatthew Dillon 	int busy;
358da673940SJordan Gordeev 	size_t n;
359da673940SJordan Gordeev 
360da673940SJordan Gordeev 	error = 0;
361da673940SJordan Gordeev 	while (len) {
362da673940SJordan Gordeev 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
363da673940SJordan Gordeev 				  VM_PROT_READ|VM_PROT_WRITE,
364dc039ae0SMatthew Dillon 				  VM_FAULT_NORMAL,
365dc039ae0SMatthew Dillon 				  &error, &busy);
366da673940SJordan Gordeev 		if (error)
367da673940SJordan Gordeev 			break;
368da673940SJordan Gordeev 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
369da673940SJordan Gordeev 		if (n > len)
370da673940SJordan Gordeev 			n = len;
3717a683a24SMatthew Dillon 		lwb = lwbuf_alloc(m, &lwb_cache);
3720e6594a8SSascha Wildner 		bcopy(kaddr, (char *)lwbuf_kva(lwb) +
373da673940SJordan Gordeev 			     ((vm_offset_t)udaddr & PAGE_MASK), n);
374da673940SJordan Gordeev 		len -= n;
375da673940SJordan Gordeev 		udaddr = (char *)udaddr + n;
376da673940SJordan Gordeev 		kaddr = (const char *)kaddr + n;
3770e6594a8SSascha Wildner 		lwbuf_free(lwb);
378dc039ae0SMatthew Dillon 		if (busy)
379dc039ae0SMatthew Dillon 			vm_page_wakeup(m);
380dc039ae0SMatthew Dillon 		else
381573fb415SMatthew Dillon 			vm_page_unhold(m);
382da673940SJordan Gordeev 	}
38356f3779cSMatthew Dillon 	if (error)
38456f3779cSMatthew Dillon 		error = EFAULT;
385da673940SJordan Gordeev 	return (error);
386da673940SJordan Gordeev }
387da673940SJordan Gordeev 
388da673940SJordan Gordeev /*
389da673940SJordan Gordeev  * Fetch the byte at the specified user address.  Returns -1 on failure.
390da673940SJordan Gordeev  */
391da673940SJordan Gordeev int
fubyte(const uint8_t * base)392eb36cb6bSMatthew Dillon fubyte(const uint8_t *base)
393da673940SJordan Gordeev {
394eb36cb6bSMatthew Dillon 	uint8_t c;
395da673940SJordan Gordeev 
396c0a27981SSascha Wildner 	if (copyin(base, &c, 1) == 0)
397da673940SJordan Gordeev 		return((int)c);
398da673940SJordan Gordeev 	return(-1);
399da673940SJordan Gordeev }
400da673940SJordan Gordeev 
401da673940SJordan Gordeev /*
402da673940SJordan Gordeev  * Store a byte at the specified user address.  Returns -1 on failure.
403da673940SJordan Gordeev  */
404da673940SJordan Gordeev int
subyte(uint8_t * base,uint8_t byte)405eb36cb6bSMatthew Dillon subyte(uint8_t *base, uint8_t byte)
406da673940SJordan Gordeev {
407eb36cb6bSMatthew Dillon 	uint8_t c = byte;
408da673940SJordan Gordeev 
409c0a27981SSascha Wildner 	if (copyout(&c, base, 1) == 0)
410da673940SJordan Gordeev 		return(0);
411da673940SJordan Gordeev 	return(-1);
412da673940SJordan Gordeev }
413da673940SJordan Gordeev 
414da673940SJordan Gordeev /*
415da673940SJordan Gordeev  * Fetch a word (integer, 32 bits) from user space
416da673940SJordan Gordeev  */
417eb36cb6bSMatthew Dillon int32_t
fuword32(const uint32_t * base)418eb36cb6bSMatthew Dillon fuword32(const uint32_t *base)
419da673940SJordan Gordeev {
420eb36cb6bSMatthew Dillon 	uint32_t v;
421eb36cb6bSMatthew Dillon 
422eb36cb6bSMatthew Dillon 	if (copyin(base, &v, sizeof(v)) == 0)
423eb36cb6bSMatthew Dillon 		return(v);
424eb36cb6bSMatthew Dillon 	return(-1);
425eb36cb6bSMatthew Dillon }
426eb36cb6bSMatthew Dillon 
427eb36cb6bSMatthew Dillon /*
428eb36cb6bSMatthew Dillon  * Fetch a word (integer, 32 bits) from user space
429eb36cb6bSMatthew Dillon  */
430eb36cb6bSMatthew Dillon int64_t
fuword64(const uint64_t * base)431eb36cb6bSMatthew Dillon fuword64(const uint64_t *base)
432eb36cb6bSMatthew Dillon {
433eb36cb6bSMatthew Dillon 	uint64_t v;
434da673940SJordan Gordeev 
435c0a27981SSascha Wildner 	if (copyin(base, &v, sizeof(v)) == 0)
43606bb314fSSascha Wildner 		return(v);
437da673940SJordan Gordeev 	return(-1);
438da673940SJordan Gordeev }
439da673940SJordan Gordeev 
440da673940SJordan Gordeev /*
441da673940SJordan Gordeev  * Store a word (integer, 32 bits) to user space
442da673940SJordan Gordeev  */
443da673940SJordan Gordeev int
suword64(uint64_t * base,uint64_t word)444eb36cb6bSMatthew Dillon suword64(uint64_t *base, uint64_t word)
445da673940SJordan Gordeev {
446c0a27981SSascha Wildner 	if (copyout(&word, base, sizeof(word)) == 0)
447da673940SJordan Gordeev 		return(0);
448da673940SJordan Gordeev 	return(-1);
449da673940SJordan Gordeev }
450da673940SJordan Gordeev 
4519c793cdeSSascha Wildner int
suword32(uint32_t * base,int word)452eb36cb6bSMatthew Dillon suword32(uint32_t *base, int word)
4539c793cdeSSascha Wildner {
4549c793cdeSSascha Wildner 	if (copyout(&word, base, sizeof(word)) == 0)
4559c793cdeSSascha Wildner 		return(0);
4569c793cdeSSascha Wildner 	return(-1);
4579c793cdeSSascha Wildner }
458