1 /* 2 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/types.h> 36 #include <sys/systm.h> 37 #include <cpu/lwbuf.h> 38 #include <vm/vm_page.h> 39 #include <vm/vm_extern.h> 40 #include <assert.h> 41 42 #include <sys/stat.h> 43 #include <sys/mman.h> 44 45 uint64_t 46 casu64(volatile uint64_t *p, uint64_t oldval, uint64_t newval) 47 { 48 struct vmspace *vm = curproc->p_vmspace; 49 vm_offset_t kva; 50 vm_page_t m; 51 volatile uint64_t *dest; 52 uint64_t res; 53 int error; 54 int busy; 55 56 /* XXX No idea how to handle this case in a simple way, just abort */ 57 if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t)) 58 return -1; 59 60 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p), 61 VM_PROT_READ|VM_PROT_WRITE, 62 VM_FAULT_NORMAL, 63 &error, &busy); 64 if (error) 65 return -1; 66 KKASSERT(m->busy == 0); 67 68 kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 69 dest = (uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK)); 70 res = oldval; 71 __asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \ 72 : "+a" (res), "=m" (*dest) \ 73 : "r" (newval), "m" (*dest) \ 74 : "memory"); 75 76 if (busy) 77 vm_page_wakeup(m); 78 else 79 vm_page_unhold(m); 80 81 return res; 82 } 83 84 u_int 85 casu32(volatile u_int *p, u_int oldval, u_int newval) 86 { 87 struct vmspace *vm = curproc->p_vmspace; 88 vm_offset_t kva; 89 vm_page_t m; 90 volatile u_int *dest; 91 u_int res; 92 int error; 93 int busy; 94 95 /* XXX No idea how to handle this case in a simple way, just abort */ 96 if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(u_int)) 97 return -1; 98 99 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p), 100 VM_PROT_READ|VM_PROT_WRITE, 101 VM_FAULT_NORMAL, 102 &error, &busy); 103 if (error) 104 return -1; 105 KKASSERT(m->busy == 0); 106 107 kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 108 dest = (u_int *)(kva + ((vm_offset_t)p & PAGE_MASK)); 109 res = oldval; 110 __asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \ 111 : "+a" (res), "=m" (*dest) \ 112 : "r" (newval), "m" (*dest) \ 113 : "memory"); 114 115 if (busy) 116 vm_page_wakeup(m); 117 else 118 vm_page_unhold(m); 119 120 return res; 121 } 122 123 uint64_t 124 swapu64(volatile uint64_t *p, uint64_t val) 125 { 126 struct vmspace *vm = curproc->p_vmspace; 127 vm_offset_t kva; 128 vm_page_t m; 129 uint64_t res; 130 int error; 131 int busy; 132 133 /* XXX No idea how to handle this case in a simple way, just abort */ 134 if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t)) 135 return -1; 136 137 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p), 138 VM_PROT_READ|VM_PROT_WRITE, 139 VM_FAULT_NORMAL, 140 &error, &busy); 141 if (error) 142 return -1; 143 KKASSERT(m->busy == 0); 144 145 kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 146 res = atomic_swap_long((uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK)), 147 val); 148 if (busy) 149 vm_page_wakeup(m); 150 else 151 vm_page_unhold(m); 152 153 return res; 154 } 155 156 uint32_t 157 swapu32(volatile uint32_t *p, uint32_t val) 158 { 159 struct vmspace *vm = curproc->p_vmspace; 160 vm_offset_t kva; 161 vm_page_t m; 162 u_int res; 163 int error; 164 int busy; 165 166 /* XXX No idea how to handle this case in a simple way, just abort */ 167 if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t)) 168 return -1; 169 170 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p), 171 VM_PROT_READ|VM_PROT_WRITE, 172 VM_FAULT_NORMAL, 173 &error, &busy); 174 if (error) 175 return -1; 176 KKASSERT(m->busy == 0); 177 178 kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 179 res = atomic_swap_int((u_int *)(kva + ((vm_offset_t)p & PAGE_MASK)), 180 val); 181 if (busy) 182 vm_page_wakeup(m); 183 else 184 vm_page_unhold(m); 185 186 return res; 187 } 188 189 int 190 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied) 191 { 192 size_t i; 193 194 for (i = 0; i < len; ++i) { 195 if ((((char *)kdaddr)[i] = ((const char *)kfaddr)[i]) == 0) { 196 if (lencopied) 197 *lencopied = i + 1; 198 return(0); 199 } 200 } 201 return (ENAMETOOLONG); 202 } 203 204 /* 205 * Copies a NUL-terminated string from user space to kernel space. 206 * The number of bytes copied, including the terminator, is returned in 207 * (*res). 208 * 209 * Returns 0 on success, EFAULT or ENAMETOOLONG on failure. 210 */ 211 int 212 copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *res) 213 { 214 int error; 215 size_t n; 216 const char *uptr = udaddr; 217 char *kptr = kaddr; 218 219 if (res) 220 *res = 0; 221 while (len) { 222 n = PAGE_SIZE - ((vm_offset_t)uptr & PAGE_MASK); 223 if (n > 32) 224 n = 32; 225 if (n > len) 226 n = len; 227 if ((error = copyin(uptr, kptr, n)) != 0) 228 return(error); 229 while (n) { 230 if (res) 231 ++*res; 232 if (*kptr == 0) 233 return(0); 234 ++kptr; 235 ++uptr; 236 --n; 237 --len; 238 } 239 240 } 241 return(ENAMETOOLONG); 242 } 243 244 /* 245 * Copy a binary buffer from user space to kernel space. 246 * 247 * Returns 0 on success, EFAULT on failure. 248 */ 249 int 250 copyin(const void *udaddr, void *kaddr, size_t len) 251 { 252 struct vmspace *vm = curproc->p_vmspace; 253 struct lwbuf *lwb; 254 struct lwbuf lwb_cache; 255 vm_page_t m; 256 int error; 257 size_t n; 258 259 error = 0; 260 while (len) { 261 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr), 262 VM_PROT_READ, 263 VM_FAULT_NORMAL, 264 &error, NULL); 265 if (error) 266 break; 267 n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK); 268 if (n > len) 269 n = len; 270 lwb = lwbuf_alloc(m, &lwb_cache); 271 bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK), 272 kaddr, n); 273 len -= n; 274 udaddr = (const char *)udaddr + n; 275 kaddr = (char *)kaddr + n; 276 lwbuf_free(lwb); 277 vm_page_unhold(m); 278 } 279 if (error) 280 error = EFAULT; 281 return (error); 282 } 283 284 /* 285 * Copy a binary buffer from kernel space to user space. 286 * 287 * Returns 0 on success, EFAULT on failure. 288 */ 289 int 290 copyout(const void *kaddr, void *udaddr, size_t len) 291 { 292 struct vmspace *vm = curproc->p_vmspace; 293 struct lwbuf *lwb; 294 struct lwbuf lwb_cache; 295 vm_page_t m; 296 int error; 297 int busy; 298 size_t n; 299 300 error = 0; 301 while (len) { 302 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr), 303 VM_PROT_READ|VM_PROT_WRITE, 304 VM_FAULT_NORMAL, 305 &error, &busy); 306 if (error) 307 break; 308 KKASSERT(m->busy == 0); 309 n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK); 310 if (n > len) 311 n = len; 312 lwb = lwbuf_alloc(m, &lwb_cache); 313 bcopy(kaddr, (char *)lwbuf_kva(lwb) + 314 ((vm_offset_t)udaddr & PAGE_MASK), n); 315 len -= n; 316 udaddr = (char *)udaddr + n; 317 kaddr = (const char *)kaddr + n; 318 lwbuf_free(lwb); 319 if (busy) 320 vm_page_wakeup(m); 321 else 322 vm_page_unhold(m); 323 } 324 if (error) 325 error = EFAULT; 326 return (error); 327 } 328 329 /* 330 * Fetch the byte at the specified user address. Returns -1 on failure. 331 */ 332 int 333 fubyte(const uint8_t *base) 334 { 335 uint8_t c; 336 337 if (copyin(base, &c, 1) == 0) 338 return((int)c); 339 return(-1); 340 } 341 342 /* 343 * Store a byte at the specified user address. Returns -1 on failure. 344 */ 345 int 346 subyte(uint8_t *base, uint8_t byte) 347 { 348 uint8_t c = byte; 349 350 if (copyout(&c, base, 1) == 0) 351 return(0); 352 return(-1); 353 } 354 355 /* 356 * Fetch a word (integer, 32 bits) from user space 357 */ 358 int32_t 359 fuword32(const uint32_t *base) 360 { 361 uint32_t v; 362 363 if (copyin(base, &v, sizeof(v)) == 0) 364 return(v); 365 return(-1); 366 } 367 368 /* 369 * Fetch a word (integer, 32 bits) from user space 370 */ 371 int64_t 372 fuword64(const uint64_t *base) 373 { 374 uint64_t v; 375 376 if (copyin(base, &v, sizeof(v)) == 0) 377 return(v); 378 return(-1); 379 } 380 381 /* 382 * Store a word (integer, 32 bits) to user space 383 */ 384 int 385 suword64(uint64_t *base, uint64_t word) 386 { 387 if (copyout(&word, base, sizeof(word)) == 0) 388 return(0); 389 return(-1); 390 } 391 392 int 393 suword32(uint32_t *base, int word) 394 { 395 if (copyout(&word, base, sizeof(word)) == 0) 396 return(0); 397 return(-1); 398 } 399