1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <cpu/lwbuf.h>
38 #include <vm/vm_page.h>
39 #include <vm/vm_extern.h>
40 #include <assert.h>
41 
42 #include <sys/stat.h>
43 #include <sys/mman.h>
44 
45 /*
46  * A bcopy that works dring low level boot, before FP is working
47  */
48 void
49 ovbcopy(const void *src, void *dst, size_t len)
50 {
51 	bcopy(src, dst, len);
52 }
53 
54 void
55 bcopyi(const void *src, void *dst, size_t len)
56 {
57 	bcopy(src, dst, len);
58 }
59 
60 u_long
61 casuword(volatile u_long *p, u_long oldval, u_long newval)
62 {
63 	struct vmspace *vm = curproc->p_vmspace;
64 	vm_offset_t kva;
65 	vm_page_t m;
66 	volatile u_long *dest;
67 	u_long res;
68 	int error;
69 
70 	/* XXX No idea how to handle this case in a simple way, just abort */
71 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(u_long))
72 		return -1;
73 
74 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
75 			  VM_PROT_READ|VM_PROT_WRITE,
76 			  VM_FAULT_NORMAL, &error);
77 	if (error)
78 		return -1;
79 
80 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
81 	dest = (u_long *)(kva + ((vm_offset_t)p & PAGE_MASK));
82 	res = oldval;
83 	__asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \
84 			 : "+a" (res), "=m" (*dest) \
85 			 : "r" (newval), "m" (*dest) \
86 			 : "memory");
87 
88 	if (res == oldval)
89 		vm_page_dirty(m);
90 	vm_page_unhold(m);
91 
92 	return res;
93 }
94 
95 int
96 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied)
97 {
98 	size_t i;
99 
100 	for (i = 0; i < len; ++i) {
101 		if ((((char *)kdaddr)[i] = ((const char *)kfaddr)[i]) == 0) {
102 			if (lencopied)
103 				*lencopied = i + 1;
104 			return(0);
105 		}
106 	}
107 	return (ENAMETOOLONG);
108 }
109 
110 /*
111  * Copies a NUL-terminated string from user space to kernel space.
112  * The number of bytes copied, including the terminator, is returned in
113  * (*res).
114  *
115  * Returns 0 on success, EFAULT or ENAMETOOLONG on failure.
116  */
117 int
118 copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *res)
119 {
120 	int error;
121 	size_t n;
122 	const char *uptr = udaddr;
123 	char *kptr = kaddr;
124 
125 	if (res)
126 		*res = 0;
127 	while (len) {
128 		n = PAGE_SIZE - ((vm_offset_t)uptr & PAGE_MASK);
129 		if (n > 32)
130 			n = 32;
131 		if (n > len)
132 			n = len;
133 		if ((error = copyin(uptr, kptr, n)) != 0)
134 			return(error);
135 		while (n) {
136 			if (res)
137 				++*res;
138 			if (*kptr == 0)
139 				return(0);
140 			++kptr;
141 			++uptr;
142 			--n;
143 			--len;
144 		}
145 
146 	}
147 	return(ENAMETOOLONG);
148 }
149 
150 /*
151  * Copy a binary buffer from user space to kernel space.
152  *
153  * NOTE: on a real system copyin/copyout are MP safe, but the current
154  * implementation on a vkernel is not so we get the mp lock.
155  *
156  * Returns 0 on success, EFAULT on failure.
157  */
158 int
159 copyin(const void *udaddr, void *kaddr, size_t len)
160 {
161 	struct vmspace *vm = curproc->p_vmspace;
162 	struct lwbuf *lwb;
163 	struct lwbuf lwb_cache;
164 	vm_page_t m;
165 	int error;
166 	size_t n;
167 
168 	error = 0;
169 	while (len) {
170 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
171 				  VM_PROT_READ,
172 				  VM_FAULT_NORMAL, &error);
173 		if (error)
174 			break;
175 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
176 		if (n > len)
177 			n = len;
178 		lwb = lwbuf_alloc(m, &lwb_cache);
179 		bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK),
180 		      kaddr, n);
181 		len -= n;
182 		udaddr = (const char *)udaddr + n;
183 		kaddr = (char *)kaddr + n;
184 		lwbuf_free(lwb);
185 		vm_page_unhold(m);
186 	}
187 	if (error)
188 		error = EFAULT;
189 	return (error);
190 }
191 
192 /*
193  * Copy a binary buffer from kernel space to user space.
194  *
195  * Returns 0 on success, EFAULT on failure.
196  */
197 int
198 copyout(const void *kaddr, void *udaddr, size_t len)
199 {
200 	struct vmspace *vm = curproc->p_vmspace;
201 	struct lwbuf *lwb;
202 	struct lwbuf lwb_cache;
203 	vm_page_t m;
204 	int error;
205 	size_t n;
206 
207 	error = 0;
208 	while (len) {
209 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
210 				  VM_PROT_READ|VM_PROT_WRITE,
211 				  VM_FAULT_NORMAL, &error);
212 		if (error)
213 			break;
214 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
215 		if (n > len)
216 			n = len;
217 		lwb = lwbuf_alloc(m, &lwb_cache);
218 		bcopy(kaddr, (char *)lwbuf_kva(lwb) +
219 			     ((vm_offset_t)udaddr & PAGE_MASK), n);
220 		len -= n;
221 		udaddr = (char *)udaddr + n;
222 		kaddr = (const char *)kaddr + n;
223 		vm_page_dirty(m);
224 		lwbuf_free(lwb);
225 		vm_page_unhold(m);
226 	}
227 	if (error)
228 		error = EFAULT;
229 	return (error);
230 }
231 
232 /*
233  * Fetch the byte at the specified user address.  Returns -1 on failure.
234  */
235 int
236 fubyte(const void *base)
237 {
238 	unsigned char c;
239 
240 	if (copyin(base, &c, 1) == 0)
241 		return((int)c);
242 	return(-1);
243 }
244 
245 /*
246  * Store a byte at the specified user address.  Returns -1 on failure.
247  */
248 int
249 subyte (void *base, int byte)
250 {
251 	unsigned char c = byte;
252 
253 	if (copyout(&c, base, 1) == 0)
254 		return(0);
255 	return(-1);
256 }
257 
258 /*
259  * Fetch a word (integer, 32 bits) from user space
260  */
261 long
262 fuword(const void *base)
263 {
264 	long v;
265 
266 	if (copyin(base, &v, sizeof(v)) == 0)
267 		return(v);
268 	return(-1);
269 }
270 
271 /*
272  * Store a word (integer, 32 bits) to user space
273  */
274 int
275 suword(void *base, long word)
276 {
277 	if (copyout(&word, base, sizeof(word)) == 0)
278 		return(0);
279 	return(-1);
280 }
281 
282 int
283 suword32(void *base, int word)
284 {
285 	if (copyout(&word, base, sizeof(word)) == 0)
286 		return(0);
287 	return(-1);
288 }
289 
290 /*
291  * Fetch an short word (16 bits) from user space
292  */
293 int
294 fusword(void *base)
295 {
296 	unsigned short sword;
297 
298 	if (copyin(base, &sword, sizeof(sword)) == 0)
299 		return((int)sword);
300 	return(-1);
301 }
302 
303 /*
304  * Store a short word (16 bits) to user space
305  */
306 int
307 susword (void *base, int word)
308 {
309 	unsigned short sword = word;
310 
311 	if (copyout(&sword, base, sizeof(sword)) == 0)
312 		return(0);
313 	return(-1);
314 }
315