1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <cpu/lwbuf.h>
38 #include <vm/vm_page.h>
39 #include <vm/vm_extern.h>
40 #include <assert.h>
41 
42 #include <sys/stat.h>
43 #include <sys/mman.h>
44 
45 uint64_t
46 casu64(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
47 {
48 	struct vmspace *vm = curproc->p_vmspace;
49 	vm_offset_t kva;
50 	vm_page_t m;
51 	volatile uint64_t *dest;
52 	uint64_t res;
53 	int error;
54 	int busy;
55 
56 	/* XXX No idea how to handle this case in a simple way, just abort */
57 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
58 		return -1;
59 
60 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
61 			  VM_PROT_READ|VM_PROT_WRITE,
62 			  VM_FAULT_NORMAL,
63 			  &error, &busy);
64 	if (error)
65 		return -1;
66 
67 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
68 	dest = (uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK));
69 	res = oldval;
70 	__asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \
71 			 : "+a" (res), "=m" (*dest) \
72 			 : "r" (newval), "m" (*dest) \
73 			 : "memory");
74 
75 	if (busy)
76 		vm_page_wakeup(m);
77 	else
78 		vm_page_unhold(m);
79 
80 	return res;
81 }
82 
83 u_int
84 casu32(volatile u_int *p, u_int oldval, u_int newval)
85 {
86 	struct vmspace *vm = curproc->p_vmspace;
87 	vm_offset_t kva;
88 	vm_page_t m;
89 	volatile u_int *dest;
90 	u_int res;
91 	int error;
92 	int busy;
93 
94 	/* XXX No idea how to handle this case in a simple way, just abort */
95 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(u_int))
96 		return -1;
97 
98 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
99 			  VM_PROT_READ|VM_PROT_WRITE,
100 			  VM_FAULT_NORMAL,
101 			  &error, &busy);
102 	if (error)
103 		return -1;
104 
105 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
106 	dest = (u_int *)(kva + ((vm_offset_t)p & PAGE_MASK));
107 	res = oldval;
108 	__asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \
109 			 : "+a" (res), "=m" (*dest) \
110 			 : "r" (newval), "m" (*dest) \
111 			 : "memory");
112 
113 	if (busy)
114 		vm_page_wakeup(m);
115 	else
116 		vm_page_unhold(m);
117 
118 	return res;
119 }
120 
121 uint64_t
122 swapu64(volatile uint64_t *p, uint64_t val)
123 {
124 	struct vmspace *vm = curproc->p_vmspace;
125 	vm_offset_t kva;
126 	vm_page_t m;
127 	uint64_t res;
128 	int error;
129 	int busy;
130 
131 	/* XXX No idea how to handle this case in a simple way, just abort */
132 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
133 		return -1;
134 
135 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
136 			  VM_PROT_READ|VM_PROT_WRITE,
137 			  VM_FAULT_NORMAL,
138 			  &error, &busy);
139 	if (error)
140 		return -1;
141 
142 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
143 	res = atomic_swap_long((uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK)),
144 			       val);
145 	if (busy)
146 		vm_page_wakeup(m);
147 	else
148 		vm_page_unhold(m);
149 
150 	return res;
151 }
152 
153 uint32_t
154 swapu32(volatile uint32_t *p, uint32_t val)
155 {
156 	struct vmspace *vm = curproc->p_vmspace;
157 	vm_offset_t kva;
158 	vm_page_t m;
159 	u_int res;
160 	int error;
161 	int busy;
162 
163 	/* XXX No idea how to handle this case in a simple way, just abort */
164 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
165 		return -1;
166 
167 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
168 			  VM_PROT_READ|VM_PROT_WRITE,
169 			  VM_FAULT_NORMAL,
170 			  &error, &busy);
171 	if (error)
172 		return -1;
173 
174 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
175 	res = atomic_swap_int((u_int *)(kva + ((vm_offset_t)p & PAGE_MASK)),
176 			       val);
177 	if (busy)
178 		vm_page_wakeup(m);
179 	else
180 		vm_page_unhold(m);
181 
182 	return res;
183 }
184 
185 uint64_t
186 fuwordadd64(volatile uint64_t *p, uint64_t val)
187 {
188 	struct vmspace *vm = curproc->p_vmspace;
189 	vm_offset_t kva;
190 	vm_page_t m;
191 	uint64_t res;
192 	int error;
193 	int busy;
194 
195 	/* XXX No idea how to handle this case in a simple way, just abort */
196 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
197 		return -1;
198 
199 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
200 			  VM_PROT_READ|VM_PROT_WRITE,
201 			  VM_FAULT_NORMAL,
202 			  &error, &busy);
203 	if (error)
204 		return -1;
205 
206 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
207 	res = atomic_fetchadd_long((uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK)),
208 			       val);
209 	if (busy)
210 		vm_page_wakeup(m);
211 	else
212 		vm_page_unhold(m);
213 
214 	return res;
215 }
216 
217 uint32_t
218 fuwordadd32(volatile uint32_t *p, uint32_t val)
219 {
220 	struct vmspace *vm = curproc->p_vmspace;
221 	vm_offset_t kva;
222 	vm_page_t m;
223 	u_int res;
224 	int error;
225 	int busy;
226 
227 	/* XXX No idea how to handle this case in a simple way, just abort */
228 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
229 		return -1;
230 
231 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
232 			  VM_PROT_READ|VM_PROT_WRITE,
233 			  VM_FAULT_NORMAL,
234 			  &error, &busy);
235 	if (error)
236 		return -1;
237 
238 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
239 	res = atomic_fetchadd_int((u_int *)(kva + ((vm_offset_t)p & PAGE_MASK)),
240 			       val);
241 	if (busy)
242 		vm_page_wakeup(m);
243 	else
244 		vm_page_unhold(m);
245 
246 	return res;
247 }
248 
249 int
250 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied)
251 {
252 	size_t i;
253 
254 	for (i = 0; i < len; ++i) {
255 		if ((((char *)kdaddr)[i] = ((const char *)kfaddr)[i]) == 0) {
256 			if (lencopied)
257 				*lencopied = i + 1;
258 			return(0);
259 		}
260 	}
261 	return (ENAMETOOLONG);
262 }
263 
264 /*
265  * Copies a NUL-terminated string from user space to kernel space.
266  * The number of bytes copied, including the terminator, is returned in
267  * (*res).
268  *
269  * Returns 0 on success, EFAULT or ENAMETOOLONG on failure.
270  */
271 int
272 copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *res)
273 {
274 	int error;
275 	size_t n;
276 	const char *uptr = udaddr;
277 	char *kptr = kaddr;
278 
279 	if (res)
280 		*res = 0;
281 	while (len) {
282 		n = PAGE_SIZE - ((vm_offset_t)uptr & PAGE_MASK);
283 		if (n > 32)
284 			n = 32;
285 		if (n > len)
286 			n = len;
287 		if ((error = copyin(uptr, kptr, n)) != 0)
288 			return(error);
289 		while (n) {
290 			if (res)
291 				++*res;
292 			if (*kptr == 0)
293 				return(0);
294 			++kptr;
295 			++uptr;
296 			--n;
297 			--len;
298 		}
299 
300 	}
301 	return(ENAMETOOLONG);
302 }
303 
304 /*
305  * Copy a binary buffer from user space to kernel space.
306  *
307  * Returns 0 on success, EFAULT on failure.
308  */
309 int
310 copyin(const void *udaddr, void *kaddr, size_t len)
311 {
312 	struct vmspace *vm = curproc->p_vmspace;
313 	struct lwbuf *lwb;
314 	struct lwbuf lwb_cache;
315 	vm_page_t m;
316 	int error;
317 	size_t n;
318 
319 	error = 0;
320 	while (len) {
321 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
322 				  VM_PROT_READ,
323 				  VM_FAULT_NORMAL,
324 				  &error, NULL);
325 		if (error)
326 			break;
327 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
328 		if (n > len)
329 			n = len;
330 		lwb = lwbuf_alloc(m, &lwb_cache);
331 		bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK),
332 		      kaddr, n);
333 		len -= n;
334 		udaddr = (const char *)udaddr + n;
335 		kaddr = (char *)kaddr + n;
336 		lwbuf_free(lwb);
337 		vm_page_unhold(m);
338 	}
339 	if (error)
340 		error = EFAULT;
341 	return (error);
342 }
343 
344 /*
345  * Copy a binary buffer from kernel space to user space.
346  *
347  * Returns 0 on success, EFAULT on failure.
348  */
349 int
350 copyout(const void *kaddr, void *udaddr, size_t len)
351 {
352 	struct vmspace *vm = curproc->p_vmspace;
353 	struct lwbuf *lwb;
354 	struct lwbuf lwb_cache;
355 	vm_page_t m;
356 	int error;
357 	int busy;
358 	size_t n;
359 
360 	error = 0;
361 	while (len) {
362 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
363 				  VM_PROT_READ|VM_PROT_WRITE,
364 				  VM_FAULT_NORMAL,
365 				  &error, &busy);
366 		if (error)
367 			break;
368 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
369 		if (n > len)
370 			n = len;
371 		lwb = lwbuf_alloc(m, &lwb_cache);
372 		bcopy(kaddr, (char *)lwbuf_kva(lwb) +
373 			     ((vm_offset_t)udaddr & PAGE_MASK), n);
374 		len -= n;
375 		udaddr = (char *)udaddr + n;
376 		kaddr = (const char *)kaddr + n;
377 		lwbuf_free(lwb);
378 		if (busy)
379 			vm_page_wakeup(m);
380 		else
381 			vm_page_unhold(m);
382 	}
383 	if (error)
384 		error = EFAULT;
385 	return (error);
386 }
387 
388 /*
389  * Fetch the byte at the specified user address.  Returns -1 on failure.
390  */
391 int
392 fubyte(const uint8_t *base)
393 {
394 	uint8_t c;
395 
396 	if (copyin(base, &c, 1) == 0)
397 		return((int)c);
398 	return(-1);
399 }
400 
401 /*
402  * Store a byte at the specified user address.  Returns -1 on failure.
403  */
404 int
405 subyte(uint8_t *base, uint8_t byte)
406 {
407 	uint8_t c = byte;
408 
409 	if (copyout(&c, base, 1) == 0)
410 		return(0);
411 	return(-1);
412 }
413 
414 /*
415  * Fetch a word (integer, 32 bits) from user space
416  */
417 int32_t
418 fuword32(const uint32_t *base)
419 {
420 	uint32_t v;
421 
422 	if (copyin(base, &v, sizeof(v)) == 0)
423 		return(v);
424 	return(-1);
425 }
426 
427 /*
428  * Fetch a word (integer, 32 bits) from user space
429  */
430 int64_t
431 fuword64(const uint64_t *base)
432 {
433 	uint64_t v;
434 
435 	if (copyin(base, &v, sizeof(v)) == 0)
436 		return(v);
437 	return(-1);
438 }
439 
440 /*
441  * Store a word (integer, 32 bits) to user space
442  */
443 int
444 suword64(uint64_t *base, uint64_t word)
445 {
446 	if (copyout(&word, base, sizeof(word)) == 0)
447 		return(0);
448 	return(-1);
449 }
450 
451 int
452 suword32(uint32_t *base, int word)
453 {
454 	if (copyout(&word, base, sizeof(word)) == 0)
455 		return(0);
456 	return(-1);
457 }
458