1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <cpu/lwbuf.h>
38 #include <vm/vm_page.h>
39 #include <vm/vm_extern.h>
40 #include <assert.h>
41 
42 #include <sys/stat.h>
43 #include <sys/mman.h>
44 
45 uint64_t
46 casu64(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
47 {
48 	struct vmspace *vm = curproc->p_vmspace;
49 	vm_offset_t kva;
50 	vm_page_t m;
51 	volatile uint64_t *dest;
52 	uint64_t res;
53 	int error;
54 	int busy;
55 
56 	/* XXX No idea how to handle this case in a simple way, just abort */
57 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
58 		return -1;
59 
60 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
61 			  VM_PROT_READ|VM_PROT_WRITE,
62 			  VM_FAULT_NORMAL,
63 			  &error, &busy);
64 	if (error)
65 		return -1;
66 
67 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
68 	dest = (uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK));
69 	res = oldval;
70 	__asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \
71 			 : "+a" (res), "=m" (*dest) \
72 			 : "r" (newval), "m" (*dest) \
73 			 : "memory");
74 
75 	if (busy)
76 		vm_page_wakeup(m);
77 	else
78 		vm_page_unhold(m);
79 
80 	return res;
81 }
82 
83 u_int
84 casu32(volatile u_int *p, u_int oldval, u_int newval)
85 {
86 	struct vmspace *vm = curproc->p_vmspace;
87 	vm_offset_t kva;
88 	vm_page_t m;
89 	volatile u_int *dest;
90 	u_int res;
91 	int error;
92 	int busy;
93 
94 	/* XXX No idea how to handle this case in a simple way, just abort */
95 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(u_int))
96 		return -1;
97 
98 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
99 			  VM_PROT_READ|VM_PROT_WRITE,
100 			  VM_FAULT_NORMAL,
101 			  &error, &busy);
102 	if (error)
103 		return -1;
104 
105 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
106 	dest = (u_int *)(kva + ((vm_offset_t)p & PAGE_MASK));
107 	res = oldval;
108 	__asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \
109 			 : "+a" (res), "=m" (*dest) \
110 			 : "r" (newval), "m" (*dest) \
111 			 : "memory");
112 
113 	if (busy)
114 		vm_page_wakeup(m);
115 	else
116 		vm_page_unhold(m);
117 
118 	return res;
119 }
120 
121 uint64_t
122 swapu64(volatile uint64_t *p, uint64_t val)
123 {
124 	struct vmspace *vm = curproc->p_vmspace;
125 	vm_offset_t kva;
126 	vm_page_t m;
127 	uint64_t res;
128 	int error;
129 	int busy;
130 
131 	/* XXX No idea how to handle this case in a simple way, just abort */
132 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
133 		return -1;
134 
135 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
136 			  VM_PROT_READ|VM_PROT_WRITE,
137 			  VM_FAULT_NORMAL,
138 			  &error, &busy);
139 	if (error)
140 		return -1;
141 
142 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
143 	res = atomic_swap_long((uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK)),
144 			       val);
145 	if (busy)
146 		vm_page_wakeup(m);
147 	else
148 		vm_page_unhold(m);
149 
150 	return res;
151 }
152 
153 uint32_t
154 swapu32(volatile uint32_t *p, uint32_t val)
155 {
156 	struct vmspace *vm = curproc->p_vmspace;
157 	vm_offset_t kva;
158 	vm_page_t m;
159 	u_int res;
160 	int error;
161 	int busy;
162 
163 	/* XXX No idea how to handle this case in a simple way, just abort */
164 	if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
165 		return -1;
166 
167 	m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
168 			  VM_PROT_READ|VM_PROT_WRITE,
169 			  VM_FAULT_NORMAL,
170 			  &error, &busy);
171 	if (error)
172 		return -1;
173 
174 	kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
175 	res = atomic_swap_int((u_int *)(kva + ((vm_offset_t)p & PAGE_MASK)),
176 			       val);
177 	if (busy)
178 		vm_page_wakeup(m);
179 	else
180 		vm_page_unhold(m);
181 
182 	return res;
183 }
184 
185 int
186 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied)
187 {
188 	size_t i;
189 
190 	for (i = 0; i < len; ++i) {
191 		if ((((char *)kdaddr)[i] = ((const char *)kfaddr)[i]) == 0) {
192 			if (lencopied)
193 				*lencopied = i + 1;
194 			return(0);
195 		}
196 	}
197 	return (ENAMETOOLONG);
198 }
199 
200 /*
201  * Copies a NUL-terminated string from user space to kernel space.
202  * The number of bytes copied, including the terminator, is returned in
203  * (*res).
204  *
205  * Returns 0 on success, EFAULT or ENAMETOOLONG on failure.
206  */
207 int
208 copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *res)
209 {
210 	int error;
211 	size_t n;
212 	const char *uptr = udaddr;
213 	char *kptr = kaddr;
214 
215 	if (res)
216 		*res = 0;
217 	while (len) {
218 		n = PAGE_SIZE - ((vm_offset_t)uptr & PAGE_MASK);
219 		if (n > 32)
220 			n = 32;
221 		if (n > len)
222 			n = len;
223 		if ((error = copyin(uptr, kptr, n)) != 0)
224 			return(error);
225 		while (n) {
226 			if (res)
227 				++*res;
228 			if (*kptr == 0)
229 				return(0);
230 			++kptr;
231 			++uptr;
232 			--n;
233 			--len;
234 		}
235 
236 	}
237 	return(ENAMETOOLONG);
238 }
239 
240 /*
241  * Copy a binary buffer from user space to kernel space.
242  *
243  * Returns 0 on success, EFAULT on failure.
244  */
245 int
246 copyin(const void *udaddr, void *kaddr, size_t len)
247 {
248 	struct vmspace *vm = curproc->p_vmspace;
249 	struct lwbuf *lwb;
250 	struct lwbuf lwb_cache;
251 	vm_page_t m;
252 	int error;
253 	size_t n;
254 
255 	error = 0;
256 	while (len) {
257 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
258 				  VM_PROT_READ,
259 				  VM_FAULT_NORMAL,
260 				  &error, NULL);
261 		if (error)
262 			break;
263 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
264 		if (n > len)
265 			n = len;
266 		lwb = lwbuf_alloc(m, &lwb_cache);
267 		bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK),
268 		      kaddr, n);
269 		len -= n;
270 		udaddr = (const char *)udaddr + n;
271 		kaddr = (char *)kaddr + n;
272 		lwbuf_free(lwb);
273 		vm_page_unhold(m);
274 	}
275 	if (error)
276 		error = EFAULT;
277 	return (error);
278 }
279 
280 /*
281  * Copy a binary buffer from kernel space to user space.
282  *
283  * Returns 0 on success, EFAULT on failure.
284  */
285 int
286 copyout(const void *kaddr, void *udaddr, size_t len)
287 {
288 	struct vmspace *vm = curproc->p_vmspace;
289 	struct lwbuf *lwb;
290 	struct lwbuf lwb_cache;
291 	vm_page_t m;
292 	int error;
293 	int busy;
294 	size_t n;
295 
296 	error = 0;
297 	while (len) {
298 		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
299 				  VM_PROT_READ|VM_PROT_WRITE,
300 				  VM_FAULT_NORMAL,
301 				  &error, &busy);
302 		if (error)
303 			break;
304 		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
305 		if (n > len)
306 			n = len;
307 		lwb = lwbuf_alloc(m, &lwb_cache);
308 		bcopy(kaddr, (char *)lwbuf_kva(lwb) +
309 			     ((vm_offset_t)udaddr & PAGE_MASK), n);
310 		len -= n;
311 		udaddr = (char *)udaddr + n;
312 		kaddr = (const char *)kaddr + n;
313 		lwbuf_free(lwb);
314 		if (busy)
315 			vm_page_wakeup(m);
316 		else
317 			vm_page_unhold(m);
318 	}
319 	if (error)
320 		error = EFAULT;
321 	return (error);
322 }
323 
324 /*
325  * Fetch the byte at the specified user address.  Returns -1 on failure.
326  */
327 int
328 fubyte(const uint8_t *base)
329 {
330 	uint8_t c;
331 
332 	if (copyin(base, &c, 1) == 0)
333 		return((int)c);
334 	return(-1);
335 }
336 
337 /*
338  * Store a byte at the specified user address.  Returns -1 on failure.
339  */
340 int
341 subyte(uint8_t *base, uint8_t byte)
342 {
343 	uint8_t c = byte;
344 
345 	if (copyout(&c, base, 1) == 0)
346 		return(0);
347 	return(-1);
348 }
349 
350 /*
351  * Fetch a word (integer, 32 bits) from user space
352  */
353 int32_t
354 fuword32(const uint32_t *base)
355 {
356 	uint32_t v;
357 
358 	if (copyin(base, &v, sizeof(v)) == 0)
359 		return(v);
360 	return(-1);
361 }
362 
363 /*
364  * Fetch a word (integer, 32 bits) from user space
365  */
366 int64_t
367 fuword64(const uint64_t *base)
368 {
369 	uint64_t v;
370 
371 	if (copyin(base, &v, sizeof(v)) == 0)
372 		return(v);
373 	return(-1);
374 }
375 
376 /*
377  * Store a word (integer, 32 bits) to user space
378  */
379 int
380 suword64(uint64_t *base, uint64_t word)
381 {
382 	if (copyout(&word, base, sizeof(word)) == 0)
383 		return(0);
384 	return(-1);
385 }
386 
387 int
388 suword32(uint32_t *base, int word)
389 {
390 	if (copyout(&word, base, sizeof(word)) == 0)
391 		return(0);
392 	return(-1);
393 }
394