1da673940SJordan Gordeev /*
2da673940SJordan Gordeev  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3da673940SJordan Gordeev  *
4da673940SJordan Gordeev  * This code is derived from software contributed to The DragonFly Project
5da673940SJordan Gordeev  * by Matthew Dillon <dillon@backplane.com>
6da673940SJordan Gordeev  *
7da673940SJordan Gordeev  * Redistribution and use in source and binary forms, with or without
8da673940SJordan Gordeev  * modification, are permitted provided that the following conditions
9da673940SJordan Gordeev  * are met:
10da673940SJordan Gordeev  *
11da673940SJordan Gordeev  * 1. Redistributions of source code must retain the above copyright
12da673940SJordan Gordeev  *    notice, this list of conditions and the following disclaimer.
13da673940SJordan Gordeev  * 2. Redistributions in binary form must reproduce the above copyright
14da673940SJordan Gordeev  *    notice, this list of conditions and the following disclaimer in
15da673940SJordan Gordeev  *    the documentation and/or other materials provided with the
16da673940SJordan Gordeev  *    distribution.
17da673940SJordan Gordeev  * 3. Neither the name of The DragonFly Project nor the names of its
18da673940SJordan Gordeev  *    contributors may be used to endorse or promote products derived
19da673940SJordan Gordeev  *    from this software without specific, prior written permission.
20da673940SJordan Gordeev  *
21da673940SJordan Gordeev  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22da673940SJordan Gordeev  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23da673940SJordan Gordeev  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24da673940SJordan Gordeev  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25da673940SJordan Gordeev  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26da673940SJordan Gordeev  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27da673940SJordan Gordeev  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28da673940SJordan Gordeev  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29da673940SJordan Gordeev  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30da673940SJordan Gordeev  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31da673940SJordan Gordeev  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32da673940SJordan Gordeev  * SUCH DAMAGE.
33da673940SJordan Gordeev  */
34da673940SJordan Gordeev 
35da673940SJordan Gordeev #include <sys/types.h>
36da673940SJordan Gordeev #include <sys/systm.h>
370e6594a8SSascha Wildner #include <cpu/lwbuf.h>
38da673940SJordan Gordeev #include <vm/vm_page.h>
39da673940SJordan Gordeev #include <vm/vm_extern.h>
40da673940SJordan Gordeev #include <assert.h>
41da673940SJordan Gordeev 
42da673940SJordan Gordeev #include <sys/stat.h>
43da673940SJordan Gordeev #include <sys/mman.h>
44da673940SJordan Gordeev 
45da673940SJordan Gordeev /*
46da673940SJordan Gordeev  * A bcopy that works dring low level boot, before FP is working
47da673940SJordan Gordeev  */
48da673940SJordan Gordeev void
49da673940SJordan Gordeev ovbcopy(const void *src, void *dst, size_t len)
50da673940SJordan Gordeev {
51da673940SJordan Gordeev 	bcopy(src, dst, len);
52da673940SJordan Gordeev }
53da673940SJordan Gordeev 
54da673940SJordan Gordeev void
55da673940SJordan Gordeev bcopyi(const void *src, void *dst, size_t len)
56da673940SJordan Gordeev {
57da673940SJordan Gordeev 	bcopy(src, dst, len);
58da673940SJordan Gordeev }
59da673940SJordan Gordeev 
60da673940SJordan Gordeev int
61da673940SJordan Gordeev copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied)
62da673940SJordan Gordeev {
63da673940SJordan Gordeev 	size_t i;
64da673940SJordan Gordeev 
65da673940SJordan Gordeev 	for (i = 0; i < len; ++i) {
66da673940SJordan Gordeev 		if ((((char *)kdaddr)[i] = ((const char *)kfaddr)[i]) == 0) {
67da673940SJordan Gordeev 			if (lencopied)
68da673940SJordan Gordeev 				*lencopied = i + 1;
69da673940SJordan Gordeev 			return(0);
70da673940SJordan Gordeev 		}
71da673940SJordan Gordeev 	}
72da673940SJordan Gordeev 	return (ENAMETOOLONG);
73da673940SJordan Gordeev }
74da673940SJordan Gordeev 
75da673940SJordan Gordeev /*
76da673940SJordan Gordeev  * Copies a NUL-terminated string from user space to kernel space.
77da673940SJordan Gordeev  * The number of bytes copied, including the terminator, is returned in
78da673940SJordan Gordeev  * (*res).
79da673940SJordan Gordeev  *
80da673940SJordan Gordeev  * Returns 0 on success, EFAULT or ENAMETOOLONG on failure.
81da673940SJordan Gordeev  */
82da673940SJordan Gordeev int
83da673940SJordan Gordeev copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *res)
84da673940SJordan Gordeev {
85da673940SJordan Gordeev 	int error;
86da673940SJordan Gordeev 	size_t n;
87da673940SJordan Gordeev 	const char *uptr = udaddr;
88da673940SJordan Gordeev 	char *kptr = kaddr;
89da673940SJordan Gordeev 
90da673940SJordan Gordeev 	if (res)
91da673940SJordan Gordeev 		*res = 0;
92da673940SJordan Gordeev 	while (len) {
93da673940SJordan Gordeev 		n = PAGE_SIZE - ((vm_offset_t)uptr & PAGE_MASK);
94da673940SJordan Gordeev 		if (n > 32)
95da673940SJordan Gordeev 			n = 32;
96da673940SJordan Gordeev 		if (n > len)
97da673940SJordan Gordeev 			n = len;
98da673940SJordan Gordeev 		if ((error = copyin(uptr, kptr, n)) != 0)
99da673940SJordan Gordeev 			return(error);
100da673940SJordan Gordeev 		while (n) {
101da673940SJordan Gordeev 			if (res)
102da673940SJordan Gordeev 				++*res;
103da673940SJordan Gordeev 			if (*kptr == 0)
104da673940SJordan Gordeev 				return(0);
105da673940SJordan Gordeev 			++kptr;
106da673940SJordan Gordeev 			++uptr;
107da673940SJordan Gordeev 			--n;
108da673940SJordan Gordeev 			--len;
109da673940SJordan Gordeev 		}
110da673940SJordan Gordeev 
111da673940SJordan Gordeev 	}
112da673940SJordan Gordeev 	return(ENAMETOOLONG);
113da673940SJordan Gordeev }
114da673940SJordan Gordeev 
115da673940SJordan Gordeev /*
116da673940SJordan Gordeev  * Copy a binary buffer from user space to kernel space.
117da673940SJordan Gordeev  *
118da673940SJordan Gordeev  * NOTE: on a real system copyin/copyout are MP safe, but the current
119da673940SJordan Gordeev  * implementation on a vkernel is not so we get the mp lock.
120da673940SJordan Gordeev  *
121da673940SJordan Gordeev  * Returns 0 on success, EFAULT on failure.
122da673940SJordan Gordeev  */
123da673940SJordan Gordeev int
124da673940SJordan Gordeev copyin(const void *udaddr, void *kaddr, size_t len)
125da673940SJordan Gordeev {
126da673940SJordan Gordeev 	struct vmspace *vm = curproc->p_vmspace;
1270e6594a8SSascha Wildner 	struct lwbuf *lwb;
128*7c4633adSMatthew Dillon 	struct lwbuf lwb_cache;
129da673940SJordan Gordeev 	vm_page_t m;
130da673940SJordan Gordeev 	int error;
131da673940SJordan Gordeev 	size_t n;
132da673940SJordan Gordeev 
133da673940SJordan Gordeev 	error = 0;
134da673940SJordan Gordeev 	while (len) {
135da673940SJordan Gordeev 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
136da673940SJordan Gordeev 				  VM_PROT_READ,
137da673940SJordan Gordeev 				  VM_FAULT_NORMAL, &error);
138da673940SJordan Gordeev 		if (error)
139da673940SJordan Gordeev 			break;
140da673940SJordan Gordeev 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
141da673940SJordan Gordeev 		if (n > len)
142da673940SJordan Gordeev 			n = len;
1437a683a24SMatthew Dillon 		lwb = lwbuf_alloc(m, &lwb_cache);
1440e6594a8SSascha Wildner 		bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK),
145da673940SJordan Gordeev 		      kaddr, n);
146da673940SJordan Gordeev 		len -= n;
147da673940SJordan Gordeev 		udaddr = (const char *)udaddr + n;
148da673940SJordan Gordeev 		kaddr = (char *)kaddr + n;
1490e6594a8SSascha Wildner 		lwbuf_free(lwb);
150573fb415SMatthew Dillon 		vm_page_unhold(m);
151da673940SJordan Gordeev 	}
152da673940SJordan Gordeev 	return (error);
153da673940SJordan Gordeev }
154da673940SJordan Gordeev 
155da673940SJordan Gordeev /*
156da673940SJordan Gordeev  * Copy a binary buffer from kernel space to user space.
157da673940SJordan Gordeev  *
158da673940SJordan Gordeev  * Returns 0 on success, EFAULT on failure.
159da673940SJordan Gordeev  */
160da673940SJordan Gordeev int
161da673940SJordan Gordeev copyout(const void *kaddr, void *udaddr, size_t len)
162da673940SJordan Gordeev {
163da673940SJordan Gordeev 	struct vmspace *vm = curproc->p_vmspace;
1640e6594a8SSascha Wildner 	struct lwbuf *lwb;
1657a683a24SMatthew Dillon 	struct lwbuf lwb_cache;
166da673940SJordan Gordeev 	vm_page_t m;
167da673940SJordan Gordeev 	int error;
168da673940SJordan Gordeev 	size_t n;
169da673940SJordan Gordeev 
170da673940SJordan Gordeev 	error = 0;
171da673940SJordan Gordeev 	while (len) {
172da673940SJordan Gordeev 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
173da673940SJordan Gordeev 				  VM_PROT_READ|VM_PROT_WRITE,
174da673940SJordan Gordeev 				  VM_FAULT_NORMAL, &error);
175da673940SJordan Gordeev 		if (error)
176da673940SJordan Gordeev 			break;
177da673940SJordan Gordeev 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
178da673940SJordan Gordeev 		if (n > len)
179da673940SJordan Gordeev 			n = len;
1807a683a24SMatthew Dillon 		lwb = lwbuf_alloc(m, &lwb_cache);
1810e6594a8SSascha Wildner 		bcopy(kaddr, (char *)lwbuf_kva(lwb) +
182da673940SJordan Gordeev 			     ((vm_offset_t)udaddr & PAGE_MASK), n);
183da673940SJordan Gordeev 		len -= n;
184da673940SJordan Gordeev 		udaddr = (char *)udaddr + n;
185da673940SJordan Gordeev 		kaddr = (const char *)kaddr + n;
186da673940SJordan Gordeev 		vm_page_dirty(m);
1870e6594a8SSascha Wildner 		lwbuf_free(lwb);
188573fb415SMatthew Dillon 		vm_page_unhold(m);
189da673940SJordan Gordeev 	}
190da673940SJordan Gordeev 	return (error);
191da673940SJordan Gordeev }
192da673940SJordan Gordeev 
193da673940SJordan Gordeev /*
194da673940SJordan Gordeev  * Fetch the byte at the specified user address.  Returns -1 on failure.
195da673940SJordan Gordeev  */
196da673940SJordan Gordeev int
197da673940SJordan Gordeev fubyte(const void *base)
198da673940SJordan Gordeev {
199da673940SJordan Gordeev 	unsigned char c;
200da673940SJordan Gordeev 
201c0a27981SSascha Wildner 	if (copyin(base, &c, 1) == 0)
202da673940SJordan Gordeev 		return((int)c);
203da673940SJordan Gordeev 	return(-1);
204da673940SJordan Gordeev }
205da673940SJordan Gordeev 
206da673940SJordan Gordeev /*
207da673940SJordan Gordeev  * Store a byte at the specified user address.  Returns -1 on failure.
208da673940SJordan Gordeev  */
209da673940SJordan Gordeev int
210da673940SJordan Gordeev subyte (void *base, int byte)
211da673940SJordan Gordeev {
212da673940SJordan Gordeev 	unsigned char c = byte;
213da673940SJordan Gordeev 
214c0a27981SSascha Wildner 	if (copyout(&c, base, 1) == 0)
215da673940SJordan Gordeev 		return(0);
216da673940SJordan Gordeev 	return(-1);
217da673940SJordan Gordeev }
218da673940SJordan Gordeev 
219da673940SJordan Gordeev /*
220da673940SJordan Gordeev  * Fetch a word (integer, 32 bits) from user space
221da673940SJordan Gordeev  */
222da673940SJordan Gordeev long
223da673940SJordan Gordeev fuword(const void *base)
224da673940SJordan Gordeev {
225da673940SJordan Gordeev 	long v;
226da673940SJordan Gordeev 
227c0a27981SSascha Wildner 	if (copyin(base, &v, sizeof(v)) == 0)
228da673940SJordan Gordeev 		return((long)v);
229da673940SJordan Gordeev 	return(-1);
230da673940SJordan Gordeev }
231da673940SJordan Gordeev 
232da673940SJordan Gordeev /*
233da673940SJordan Gordeev  * Store a word (integer, 32 bits) to user space
234da673940SJordan Gordeev  */
235da673940SJordan Gordeev int
236da673940SJordan Gordeev suword(void *base, long word)
237da673940SJordan Gordeev {
238c0a27981SSascha Wildner 	if (copyout(&word, base, sizeof(word)) == 0)
239da673940SJordan Gordeev 		return(0);
240da673940SJordan Gordeev 	return(-1);
241da673940SJordan Gordeev }
242da673940SJordan Gordeev 
243da673940SJordan Gordeev /*
244da673940SJordan Gordeev  * Fetch an short word (16 bits) from user space
245da673940SJordan Gordeev  */
246da673940SJordan Gordeev int
247da673940SJordan Gordeev fusword(void *base)
248da673940SJordan Gordeev {
249da673940SJordan Gordeev 	unsigned short sword;
250da673940SJordan Gordeev 
251c0a27981SSascha Wildner 	if (copyin(base, &sword, sizeof(sword)) == 0)
252da673940SJordan Gordeev 		return((int)sword);
253da673940SJordan Gordeev 	return(-1);
254da673940SJordan Gordeev }
255da673940SJordan Gordeev 
256da673940SJordan Gordeev /*
257da673940SJordan Gordeev  * Store a short word (16 bits) to user space
258da673940SJordan Gordeev  */
259da673940SJordan Gordeev int
260da673940SJordan Gordeev susword (void *base, int word)
261da673940SJordan Gordeev {
262da673940SJordan Gordeev 	unsigned short sword = word;
263da673940SJordan Gordeev 
264c0a27981SSascha Wildner 	if (copyout(&sword, base, sizeof(sword)) == 0)
265da673940SJordan Gordeev 		return(0);
266da673940SJordan Gordeev 	return(-1);
267da673940SJordan Gordeev }
268