1da673940SJordan Gordeev /*
2da673940SJordan Gordeev  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3da673940SJordan Gordeev  *
4da673940SJordan Gordeev  * This code is derived from software contributed to The DragonFly Project
5da673940SJordan Gordeev  * by Matthew Dillon <dillon@backplane.com>
6da673940SJordan Gordeev  *
7da673940SJordan Gordeev  * Redistribution and use in source and binary forms, with or without
8da673940SJordan Gordeev  * modification, are permitted provided that the following conditions
9da673940SJordan Gordeev  * are met:
10da673940SJordan Gordeev  *
11da673940SJordan Gordeev  * 1. Redistributions of source code must retain the above copyright
12da673940SJordan Gordeev  *    notice, this list of conditions and the following disclaimer.
13da673940SJordan Gordeev  * 2. Redistributions in binary form must reproduce the above copyright
14da673940SJordan Gordeev  *    notice, this list of conditions and the following disclaimer in
15da673940SJordan Gordeev  *    the documentation and/or other materials provided with the
16da673940SJordan Gordeev  *    distribution.
17da673940SJordan Gordeev  * 3. Neither the name of The DragonFly Project nor the names of its
18da673940SJordan Gordeev  *    contributors may be used to endorse or promote products derived
19da673940SJordan Gordeev  *    from this software without specific, prior written permission.
20da673940SJordan Gordeev  *
21da673940SJordan Gordeev  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22da673940SJordan Gordeev  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23da673940SJordan Gordeev  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24da673940SJordan Gordeev  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25da673940SJordan Gordeev  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26da673940SJordan Gordeev  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27da673940SJordan Gordeev  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28da673940SJordan Gordeev  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29da673940SJordan Gordeev  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30da673940SJordan Gordeev  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31da673940SJordan Gordeev  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32da673940SJordan Gordeev  * SUCH DAMAGE.
33da673940SJordan Gordeev  */
34da673940SJordan Gordeev 
35da673940SJordan Gordeev #include <sys/types.h>
36da673940SJordan Gordeev #include <sys/systm.h>
370e6594a8SSascha Wildner #include <cpu/lwbuf.h>
38da673940SJordan Gordeev #include <vm/vm_page.h>
39da673940SJordan Gordeev #include <vm/vm_extern.h>
40da673940SJordan Gordeev #include <assert.h>
41da673940SJordan Gordeev 
42da673940SJordan Gordeev #include <sys/stat.h>
43da673940SJordan Gordeev #include <sys/mman.h>
44da673940SJordan Gordeev 
45eb36cb6bSMatthew Dillon uint64_t
46eb36cb6bSMatthew Dillon casu64(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
47629f89deSImre Vadasz {
48629f89deSImre Vadasz 	struct vmspace *vm = curproc->p_vmspace;
49629f89deSImre Vadasz 	vm_offset_t kva;
50629f89deSImre Vadasz 	vm_page_t m;
51eb36cb6bSMatthew Dillon 	volatile uint64_t *dest;
52eb36cb6bSMatthew Dillon 	uint64_t res;
53629f89deSImre Vadasz 	int error;
54*dc039ae0SMatthew Dillon 	int busy;
55629f89deSImre Vadasz 
56629f89deSImre Vadasz 	/* XXX No idea how to handle this case in a simple way, just abort */
57eb36cb6bSMatthew Dillon 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
58629f89deSImre Vadasz 		return -1;
59629f89deSImre Vadasz 
60629f89deSImre Vadasz 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
61629f89deSImre Vadasz 			  VM_PROT_READ|VM_PROT_WRITE,
62*dc039ae0SMatthew Dillon 			  VM_FAULT_NORMAL,
63*dc039ae0SMatthew Dillon 			  &error, &busy);
64629f89deSImre Vadasz 	if (error)
65629f89deSImre Vadasz 		return -1;
66629f89deSImre Vadasz 
67629f89deSImre Vadasz 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
68eb36cb6bSMatthew Dillon 	dest = (uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK));
69629f89deSImre Vadasz 	res = oldval;
70629f89deSImre Vadasz 	__asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \
71629f89deSImre Vadasz 			 : "+a" (res), "=m" (*dest) \
72629f89deSImre Vadasz 			 : "r" (newval), "m" (*dest) \
73629f89deSImre Vadasz 			 : "memory");
74629f89deSImre Vadasz 
75*dc039ae0SMatthew Dillon 	if (busy)
76*dc039ae0SMatthew Dillon 		vm_page_wakeup(m);
77*dc039ae0SMatthew Dillon 	else
78629f89deSImre Vadasz 		vm_page_unhold(m);
79629f89deSImre Vadasz 
80629f89deSImre Vadasz 	return res;
81629f89deSImre Vadasz }
82629f89deSImre Vadasz 
83eb36cb6bSMatthew Dillon u_int
84eb36cb6bSMatthew Dillon casu32(volatile u_int *p, u_int oldval, u_int newval)
85eb36cb6bSMatthew Dillon {
86eb36cb6bSMatthew Dillon 	struct vmspace *vm = curproc->p_vmspace;
87eb36cb6bSMatthew Dillon 	vm_offset_t kva;
88eb36cb6bSMatthew Dillon 	vm_page_t m;
89eb36cb6bSMatthew Dillon 	volatile u_int *dest;
90eb36cb6bSMatthew Dillon 	u_int res;
91eb36cb6bSMatthew Dillon 	int error;
92*dc039ae0SMatthew Dillon 	int busy;
93eb36cb6bSMatthew Dillon 
94eb36cb6bSMatthew Dillon 	/* XXX No idea how to handle this case in a simple way, just abort */
95eb36cb6bSMatthew Dillon 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(u_int))
96eb36cb6bSMatthew Dillon 		return -1;
97eb36cb6bSMatthew Dillon 
98eb36cb6bSMatthew Dillon 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
99eb36cb6bSMatthew Dillon 			  VM_PROT_READ|VM_PROT_WRITE,
100*dc039ae0SMatthew Dillon 			  VM_FAULT_NORMAL,
101*dc039ae0SMatthew Dillon 			  &error, &busy);
102eb36cb6bSMatthew Dillon 	if (error)
103eb36cb6bSMatthew Dillon 		return -1;
104eb36cb6bSMatthew Dillon 
105eb36cb6bSMatthew Dillon 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
106eb36cb6bSMatthew Dillon 	dest = (u_int *)(kva + ((vm_offset_t)p & PAGE_MASK));
107eb36cb6bSMatthew Dillon 	res = oldval;
108eb36cb6bSMatthew Dillon 	__asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \
109eb36cb6bSMatthew Dillon 			 : "+a" (res), "=m" (*dest) \
110eb36cb6bSMatthew Dillon 			 : "r" (newval), "m" (*dest) \
111eb36cb6bSMatthew Dillon 			 : "memory");
112eb36cb6bSMatthew Dillon 
113*dc039ae0SMatthew Dillon 	if (busy)
114*dc039ae0SMatthew Dillon 		vm_page_wakeup(m);
115*dc039ae0SMatthew Dillon 	else
116eb36cb6bSMatthew Dillon 		vm_page_unhold(m);
117eb36cb6bSMatthew Dillon 
118eb36cb6bSMatthew Dillon 	return res;
119eb36cb6bSMatthew Dillon }
120eb36cb6bSMatthew Dillon 
121eb36cb6bSMatthew Dillon uint64_t
122eb36cb6bSMatthew Dillon swapu64(volatile uint64_t *p, uint64_t val)
123eb36cb6bSMatthew Dillon {
124eb36cb6bSMatthew Dillon 	struct vmspace *vm = curproc->p_vmspace;
125eb36cb6bSMatthew Dillon 	vm_offset_t kva;
126eb36cb6bSMatthew Dillon 	vm_page_t m;
127eb36cb6bSMatthew Dillon 	uint64_t res;
128eb36cb6bSMatthew Dillon 	int error;
129*dc039ae0SMatthew Dillon 	int busy;
130eb36cb6bSMatthew Dillon 
131eb36cb6bSMatthew Dillon 	/* XXX No idea how to handle this case in a simple way, just abort */
132eb36cb6bSMatthew Dillon 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
133eb36cb6bSMatthew Dillon 		return -1;
134eb36cb6bSMatthew Dillon 
135eb36cb6bSMatthew Dillon 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
136eb36cb6bSMatthew Dillon 			  VM_PROT_READ|VM_PROT_WRITE,
137*dc039ae0SMatthew Dillon 			  VM_FAULT_NORMAL,
138*dc039ae0SMatthew Dillon 			  &error, &busy);
139eb36cb6bSMatthew Dillon 	if (error)
140eb36cb6bSMatthew Dillon 		return -1;
141eb36cb6bSMatthew Dillon 
142eb36cb6bSMatthew Dillon 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
143eb36cb6bSMatthew Dillon 	res = atomic_swap_long((uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK)),
144eb36cb6bSMatthew Dillon 			       val);
145*dc039ae0SMatthew Dillon 	if (busy)
146*dc039ae0SMatthew Dillon 		vm_page_wakeup(m);
147*dc039ae0SMatthew Dillon 	else
148eb36cb6bSMatthew Dillon 		vm_page_dirty(m);
149eb36cb6bSMatthew Dillon 
150eb36cb6bSMatthew Dillon 	return res;
151eb36cb6bSMatthew Dillon }
152eb36cb6bSMatthew Dillon 
153eb36cb6bSMatthew Dillon uint32_t
154eb36cb6bSMatthew Dillon swapu32(volatile uint32_t *p, uint32_t val)
155eb36cb6bSMatthew Dillon {
156eb36cb6bSMatthew Dillon 	struct vmspace *vm = curproc->p_vmspace;
157eb36cb6bSMatthew Dillon 	vm_offset_t kva;
158eb36cb6bSMatthew Dillon 	vm_page_t m;
159eb36cb6bSMatthew Dillon 	u_int res;
160eb36cb6bSMatthew Dillon 	int error;
161*dc039ae0SMatthew Dillon 	int busy;
162eb36cb6bSMatthew Dillon 
163eb36cb6bSMatthew Dillon 	/* XXX No idea how to handle this case in a simple way, just abort */
164eb36cb6bSMatthew Dillon 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
165eb36cb6bSMatthew Dillon 		return -1;
166eb36cb6bSMatthew Dillon 
167eb36cb6bSMatthew Dillon 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
168eb36cb6bSMatthew Dillon 			  VM_PROT_READ|VM_PROT_WRITE,
169*dc039ae0SMatthew Dillon 			  VM_FAULT_NORMAL,
170*dc039ae0SMatthew Dillon 			  &error, &busy);
171eb36cb6bSMatthew Dillon 	if (error)
172eb36cb6bSMatthew Dillon 		return -1;
173eb36cb6bSMatthew Dillon 
174eb36cb6bSMatthew Dillon 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
175eb36cb6bSMatthew Dillon 	res = atomic_swap_int((u_int *)(kva + ((vm_offset_t)p & PAGE_MASK)),
176eb36cb6bSMatthew Dillon 			       val);
177*dc039ae0SMatthew Dillon 	if (busy)
178*dc039ae0SMatthew Dillon 		vm_page_wakeup(m);
179*dc039ae0SMatthew Dillon 	else
180eb36cb6bSMatthew Dillon 		vm_page_dirty(m);
181eb36cb6bSMatthew Dillon 
182eb36cb6bSMatthew Dillon 	return res;
183eb36cb6bSMatthew Dillon }
184eb36cb6bSMatthew Dillon 
185da673940SJordan Gordeev int
186da673940SJordan Gordeev copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied)
187da673940SJordan Gordeev {
188da673940SJordan Gordeev 	size_t i;
189da673940SJordan Gordeev 
190da673940SJordan Gordeev 	for (i = 0; i < len; ++i) {
191da673940SJordan Gordeev 		if ((((char *)kdaddr)[i] = ((const char *)kfaddr)[i]) == 0) {
192da673940SJordan Gordeev 			if (lencopied)
193da673940SJordan Gordeev 				*lencopied = i + 1;
194da673940SJordan Gordeev 			return(0);
195da673940SJordan Gordeev 		}
196da673940SJordan Gordeev 	}
197da673940SJordan Gordeev 	return (ENAMETOOLONG);
198da673940SJordan Gordeev }
199da673940SJordan Gordeev 
200da673940SJordan Gordeev /*
201da673940SJordan Gordeev  * Copies a NUL-terminated string from user space to kernel space.
202da673940SJordan Gordeev  * The number of bytes copied, including the terminator, is returned in
203da673940SJordan Gordeev  * (*res).
204da673940SJordan Gordeev  *
205da673940SJordan Gordeev  * Returns 0 on success, EFAULT or ENAMETOOLONG on failure.
206da673940SJordan Gordeev  */
207da673940SJordan Gordeev int
208da673940SJordan Gordeev copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *res)
209da673940SJordan Gordeev {
210da673940SJordan Gordeev 	int error;
211da673940SJordan Gordeev 	size_t n;
212da673940SJordan Gordeev 	const char *uptr = udaddr;
213da673940SJordan Gordeev 	char *kptr = kaddr;
214da673940SJordan Gordeev 
215da673940SJordan Gordeev 	if (res)
216da673940SJordan Gordeev 		*res = 0;
217da673940SJordan Gordeev 	while (len) {
218da673940SJordan Gordeev 		n = PAGE_SIZE - ((vm_offset_t)uptr & PAGE_MASK);
219da673940SJordan Gordeev 		if (n > 32)
220da673940SJordan Gordeev 			n = 32;
221da673940SJordan Gordeev 		if (n > len)
222da673940SJordan Gordeev 			n = len;
223da673940SJordan Gordeev 		if ((error = copyin(uptr, kptr, n)) != 0)
224da673940SJordan Gordeev 			return(error);
225da673940SJordan Gordeev 		while (n) {
226da673940SJordan Gordeev 			if (res)
227da673940SJordan Gordeev 				++*res;
228da673940SJordan Gordeev 			if (*kptr == 0)
229da673940SJordan Gordeev 				return(0);
230da673940SJordan Gordeev 			++kptr;
231da673940SJordan Gordeev 			++uptr;
232da673940SJordan Gordeev 			--n;
233da673940SJordan Gordeev 			--len;
234da673940SJordan Gordeev 		}
235da673940SJordan Gordeev 
236da673940SJordan Gordeev 	}
237da673940SJordan Gordeev 	return(ENAMETOOLONG);
238da673940SJordan Gordeev }
239da673940SJordan Gordeev 
240da673940SJordan Gordeev /*
241da673940SJordan Gordeev  * Copy a binary buffer from user space to kernel space.
242da673940SJordan Gordeev  *
243da673940SJordan Gordeev  * Returns 0 on success, EFAULT on failure.
244da673940SJordan Gordeev  */
245da673940SJordan Gordeev int
246da673940SJordan Gordeev copyin(const void *udaddr, void *kaddr, size_t len)
247da673940SJordan Gordeev {
248da673940SJordan Gordeev 	struct vmspace *vm = curproc->p_vmspace;
2490e6594a8SSascha Wildner 	struct lwbuf *lwb;
2507c4633adSMatthew Dillon 	struct lwbuf lwb_cache;
251da673940SJordan Gordeev 	vm_page_t m;
252da673940SJordan Gordeev 	int error;
253da673940SJordan Gordeev 	size_t n;
254da673940SJordan Gordeev 
255da673940SJordan Gordeev 	error = 0;
256da673940SJordan Gordeev 	while (len) {
257da673940SJordan Gordeev 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
258da673940SJordan Gordeev 				  VM_PROT_READ,
259*dc039ae0SMatthew Dillon 				  VM_FAULT_NORMAL,
260*dc039ae0SMatthew Dillon 				  &error, NULL);
261da673940SJordan Gordeev 		if (error)
262da673940SJordan Gordeev 			break;
263da673940SJordan Gordeev 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
264da673940SJordan Gordeev 		if (n > len)
265da673940SJordan Gordeev 			n = len;
2667a683a24SMatthew Dillon 		lwb = lwbuf_alloc(m, &lwb_cache);
2670e6594a8SSascha Wildner 		bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK),
268da673940SJordan Gordeev 		      kaddr, n);
269da673940SJordan Gordeev 		len -= n;
270da673940SJordan Gordeev 		udaddr = (const char *)udaddr + n;
271da673940SJordan Gordeev 		kaddr = (char *)kaddr + n;
2720e6594a8SSascha Wildner 		lwbuf_free(lwb);
273573fb415SMatthew Dillon 		vm_page_unhold(m);
274da673940SJordan Gordeev 	}
27556f3779cSMatthew Dillon 	if (error)
27656f3779cSMatthew Dillon 		error = EFAULT;
277da673940SJordan Gordeev 	return (error);
278da673940SJordan Gordeev }
279da673940SJordan Gordeev 
280da673940SJordan Gordeev /*
281da673940SJordan Gordeev  * Copy a binary buffer from kernel space to user space.
282da673940SJordan Gordeev  *
283da673940SJordan Gordeev  * Returns 0 on success, EFAULT on failure.
284da673940SJordan Gordeev  */
285da673940SJordan Gordeev int
286da673940SJordan Gordeev copyout(const void *kaddr, void *udaddr, size_t len)
287da673940SJordan Gordeev {
288da673940SJordan Gordeev 	struct vmspace *vm = curproc->p_vmspace;
2890e6594a8SSascha Wildner 	struct lwbuf *lwb;
2907a683a24SMatthew Dillon 	struct lwbuf lwb_cache;
291da673940SJordan Gordeev 	vm_page_t m;
292da673940SJordan Gordeev 	int error;
293*dc039ae0SMatthew Dillon 	int busy;
294da673940SJordan Gordeev 	size_t n;
295da673940SJordan Gordeev 
296da673940SJordan Gordeev 	error = 0;
297da673940SJordan Gordeev 	while (len) {
298da673940SJordan Gordeev 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
299da673940SJordan Gordeev 				  VM_PROT_READ|VM_PROT_WRITE,
300*dc039ae0SMatthew Dillon 				  VM_FAULT_NORMAL,
301*dc039ae0SMatthew Dillon 				  &error, &busy);
302da673940SJordan Gordeev 		if (error)
303da673940SJordan Gordeev 			break;
304da673940SJordan Gordeev 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
305da673940SJordan Gordeev 		if (n > len)
306da673940SJordan Gordeev 			n = len;
3077a683a24SMatthew Dillon 		lwb = lwbuf_alloc(m, &lwb_cache);
3080e6594a8SSascha Wildner 		bcopy(kaddr, (char *)lwbuf_kva(lwb) +
309da673940SJordan Gordeev 			     ((vm_offset_t)udaddr & PAGE_MASK), n);
310da673940SJordan Gordeev 		len -= n;
311da673940SJordan Gordeev 		udaddr = (char *)udaddr + n;
312da673940SJordan Gordeev 		kaddr = (const char *)kaddr + n;
313da673940SJordan Gordeev 		vm_page_dirty(m);
3140e6594a8SSascha Wildner 		lwbuf_free(lwb);
315*dc039ae0SMatthew Dillon 		if (busy)
316*dc039ae0SMatthew Dillon 			vm_page_wakeup(m);
317*dc039ae0SMatthew Dillon 		else
318573fb415SMatthew Dillon 			vm_page_unhold(m);
319da673940SJordan Gordeev 	}
32056f3779cSMatthew Dillon 	if (error)
32156f3779cSMatthew Dillon 		error = EFAULT;
322da673940SJordan Gordeev 	return (error);
323da673940SJordan Gordeev }
324da673940SJordan Gordeev 
325da673940SJordan Gordeev /*
326da673940SJordan Gordeev  * Fetch the byte at the specified user address.  Returns -1 on failure.
327da673940SJordan Gordeev  */
328da673940SJordan Gordeev int
329eb36cb6bSMatthew Dillon fubyte(const uint8_t *base)
330da673940SJordan Gordeev {
331eb36cb6bSMatthew Dillon 	uint8_t c;
332da673940SJordan Gordeev 
333c0a27981SSascha Wildner 	if (copyin(base, &c, 1) == 0)
334da673940SJordan Gordeev 		return((int)c);
335da673940SJordan Gordeev 	return(-1);
336da673940SJordan Gordeev }
337da673940SJordan Gordeev 
338da673940SJordan Gordeev /*
339da673940SJordan Gordeev  * Store a byte at the specified user address.  Returns -1 on failure.
340da673940SJordan Gordeev  */
341da673940SJordan Gordeev int
342eb36cb6bSMatthew Dillon subyte(uint8_t *base, uint8_t byte)
343da673940SJordan Gordeev {
344eb36cb6bSMatthew Dillon 	uint8_t c = byte;
345da673940SJordan Gordeev 
346c0a27981SSascha Wildner 	if (copyout(&c, base, 1) == 0)
347da673940SJordan Gordeev 		return(0);
348da673940SJordan Gordeev 	return(-1);
349da673940SJordan Gordeev }
350da673940SJordan Gordeev 
351da673940SJordan Gordeev /*
352da673940SJordan Gordeev  * Fetch a word (integer, 32 bits) from user space
353da673940SJordan Gordeev  */
354eb36cb6bSMatthew Dillon int32_t
355eb36cb6bSMatthew Dillon fuword32(const uint32_t *base)
356da673940SJordan Gordeev {
357eb36cb6bSMatthew Dillon 	uint32_t v;
358eb36cb6bSMatthew Dillon 
359eb36cb6bSMatthew Dillon 	if (copyin(base, &v, sizeof(v)) == 0)
360eb36cb6bSMatthew Dillon 		return(v);
361eb36cb6bSMatthew Dillon 	return(-1);
362eb36cb6bSMatthew Dillon }
363eb36cb6bSMatthew Dillon 
364eb36cb6bSMatthew Dillon /*
365eb36cb6bSMatthew Dillon  * Fetch a word (integer, 32 bits) from user space
366eb36cb6bSMatthew Dillon  */
367eb36cb6bSMatthew Dillon int64_t
368eb36cb6bSMatthew Dillon fuword64(const uint64_t *base)
369eb36cb6bSMatthew Dillon {
370eb36cb6bSMatthew Dillon 	uint64_t v;
371da673940SJordan Gordeev 
372c0a27981SSascha Wildner 	if (copyin(base, &v, sizeof(v)) == 0)
37306bb314fSSascha Wildner 		return(v);
374da673940SJordan Gordeev 	return(-1);
375da673940SJordan Gordeev }
376da673940SJordan Gordeev 
377da673940SJordan Gordeev /*
378da673940SJordan Gordeev  * Store a word (integer, 32 bits) to user space
379da673940SJordan Gordeev  */
380da673940SJordan Gordeev int
381eb36cb6bSMatthew Dillon suword64(uint64_t *base, uint64_t word)
382da673940SJordan Gordeev {
383c0a27981SSascha Wildner 	if (copyout(&word, base, sizeof(word)) == 0)
384da673940SJordan Gordeev 		return(0);
385da673940SJordan Gordeev 	return(-1);
386da673940SJordan Gordeev }
387da673940SJordan Gordeev 
3889c793cdeSSascha Wildner int
389eb36cb6bSMatthew Dillon suword32(uint32_t *base, int word)
3909c793cdeSSascha Wildner {
3919c793cdeSSascha Wildner 	if (copyout(&word, base, sizeof(word)) == 0)
3929c793cdeSSascha Wildner 		return(0);
3939c793cdeSSascha Wildner 	return(-1);
3949c793cdeSSascha Wildner }
395