1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <cpu/lwbuf.h>
38 #include <vm/vm_page.h>
39 #include <vm/vm_extern.h>
40 #include <assert.h>
41 
42 #include <sys/stat.h>
43 #include <sys/mman.h>
44 
45 /*
46  * A bcopy that works dring low level boot, before FP is working
47  */
48 u_long
49 casuword(volatile u_long *p, u_long oldval, u_long newval)
50 {
51 	struct vmspace *vm = curproc->p_vmspace;
52 	vm_offset_t kva;
53 	vm_page_t m;
54 	volatile u_long *dest;
55 	u_long res;
56 	int error;
57 
58 	/* XXX No idea how to handle this case in a simple way, just abort */
59 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(u_long))
60 		return -1;
61 
62 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
63 			  VM_PROT_READ|VM_PROT_WRITE,
64 			  VM_FAULT_NORMAL, &error);
65 	if (error)
66 		return -1;
67 
68 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
69 	dest = (u_long *)(kva + ((vm_offset_t)p & PAGE_MASK));
70 	res = oldval;
71 	__asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \
72 			 : "+a" (res), "=m" (*dest) \
73 			 : "r" (newval), "m" (*dest) \
74 			 : "memory");
75 
76 	if (res == oldval)
77 		vm_page_dirty(m);
78 	vm_page_unhold(m);
79 
80 	return res;
81 }
82 
83 int
84 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied)
85 {
86 	size_t i;
87 
88 	for (i = 0; i < len; ++i) {
89 		if ((((char *)kdaddr)[i] = ((const char *)kfaddr)[i]) == 0) {
90 			if (lencopied)
91 				*lencopied = i + 1;
92 			return(0);
93 		}
94 	}
95 	return (ENAMETOOLONG);
96 }
97 
98 /*
99  * Copies a NUL-terminated string from user space to kernel space.
100  * The number of bytes copied, including the terminator, is returned in
101  * (*res).
102  *
103  * Returns 0 on success, EFAULT or ENAMETOOLONG on failure.
104  */
105 int
106 copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *res)
107 {
108 	int error;
109 	size_t n;
110 	const char *uptr = udaddr;
111 	char *kptr = kaddr;
112 
113 	if (res)
114 		*res = 0;
115 	while (len) {
116 		n = PAGE_SIZE - ((vm_offset_t)uptr & PAGE_MASK);
117 		if (n > 32)
118 			n = 32;
119 		if (n > len)
120 			n = len;
121 		if ((error = copyin(uptr, kptr, n)) != 0)
122 			return(error);
123 		while (n) {
124 			if (res)
125 				++*res;
126 			if (*kptr == 0)
127 				return(0);
128 			++kptr;
129 			++uptr;
130 			--n;
131 			--len;
132 		}
133 
134 	}
135 	return(ENAMETOOLONG);
136 }
137 
138 /*
139  * Copy a binary buffer from user space to kernel space.
140  *
141  * NOTE: on a real system copyin/copyout are MP safe, but the current
142  * implementation on a vkernel is not so we get the mp lock.
143  *
144  * Returns 0 on success, EFAULT on failure.
145  */
146 int
147 copyin(const void *udaddr, void *kaddr, size_t len)
148 {
149 	struct vmspace *vm = curproc->p_vmspace;
150 	struct lwbuf *lwb;
151 	struct lwbuf lwb_cache;
152 	vm_page_t m;
153 	int error;
154 	size_t n;
155 
156 	error = 0;
157 	while (len) {
158 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
159 				  VM_PROT_READ,
160 				  VM_FAULT_NORMAL, &error);
161 		if (error)
162 			break;
163 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
164 		if (n > len)
165 			n = len;
166 		lwb = lwbuf_alloc(m, &lwb_cache);
167 		bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK),
168 		      kaddr, n);
169 		len -= n;
170 		udaddr = (const char *)udaddr + n;
171 		kaddr = (char *)kaddr + n;
172 		lwbuf_free(lwb);
173 		vm_page_unhold(m);
174 	}
175 	if (error)
176 		error = EFAULT;
177 	return (error);
178 }
179 
180 /*
181  * Copy a binary buffer from kernel space to user space.
182  *
183  * Returns 0 on success, EFAULT on failure.
184  */
185 int
186 copyout(const void *kaddr, void *udaddr, size_t len)
187 {
188 	struct vmspace *vm = curproc->p_vmspace;
189 	struct lwbuf *lwb;
190 	struct lwbuf lwb_cache;
191 	vm_page_t m;
192 	int error;
193 	size_t n;
194 
195 	error = 0;
196 	while (len) {
197 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
198 				  VM_PROT_READ|VM_PROT_WRITE,
199 				  VM_FAULT_NORMAL, &error);
200 		if (error)
201 			break;
202 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
203 		if (n > len)
204 			n = len;
205 		lwb = lwbuf_alloc(m, &lwb_cache);
206 		bcopy(kaddr, (char *)lwbuf_kva(lwb) +
207 			     ((vm_offset_t)udaddr & PAGE_MASK), n);
208 		len -= n;
209 		udaddr = (char *)udaddr + n;
210 		kaddr = (const char *)kaddr + n;
211 		vm_page_dirty(m);
212 		lwbuf_free(lwb);
213 		vm_page_unhold(m);
214 	}
215 	if (error)
216 		error = EFAULT;
217 	return (error);
218 }
219 
220 /*
221  * Fetch the byte at the specified user address.  Returns -1 on failure.
222  */
223 int
224 fubyte(const void *base)
225 {
226 	unsigned char c;
227 
228 	if (copyin(base, &c, 1) == 0)
229 		return((int)c);
230 	return(-1);
231 }
232 
233 /*
234  * Store a byte at the specified user address.  Returns -1 on failure.
235  */
236 int
237 subyte (void *base, int byte)
238 {
239 	unsigned char c = byte;
240 
241 	if (copyout(&c, base, 1) == 0)
242 		return(0);
243 	return(-1);
244 }
245 
246 /*
247  * Fetch a word (integer, 32 bits) from user space
248  */
249 long
250 fuword(const void *base)
251 {
252 	long v;
253 
254 	if (copyin(base, &v, sizeof(v)) == 0)
255 		return(v);
256 	return(-1);
257 }
258 
259 /*
260  * Store a word (integer, 32 bits) to user space
261  */
262 int
263 suword(void *base, long word)
264 {
265 	if (copyout(&word, base, sizeof(word)) == 0)
266 		return(0);
267 	return(-1);
268 }
269 
270 int
271 suword32(void *base, int word)
272 {
273 	if (copyout(&word, base, sizeof(word)) == 0)
274 		return(0);
275 	return(-1);
276 }
277 
278 /*
279  * Fetch an short word (16 bits) from user space
280  */
281 int
282 fusword(void *base)
283 {
284 	unsigned short sword;
285 
286 	if (copyin(base, &sword, sizeof(sword)) == 0)
287 		return((int)sword);
288 	return(-1);
289 }
290 
291 /*
292  * Store a short word (16 bits) to user space
293  */
294 int
295 susword (void *base, int word)
296 {
297 	unsigned short sword = word;
298 
299 	if (copyout(&sword, base, sizeof(sword)) == 0)
300 		return(0);
301 	return(-1);
302 }
303