1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/kern_xio.c,v 1.14 2007/06/29 05:09:15 dillon Exp $ 35 */ 36 /* 37 * Kernel XIO interface. An initialized XIO is basically a collection of 38 * appropriately held vm_page_t's. XIO buffers are vmspace agnostic and 39 * can represent userspace or kernelspace buffers, and can be passed to 40 * foreign threads outside of the originating vmspace. XIO buffers are 41 * not mapped into KVM and thus can be manipulated and passed around with 42 * very low overheads. 43 * 44 * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other 45 * places that need to pass (possibly userspace) data between threads. 46 * 47 * TODO: check for busy page when modifying, check writeable. 48 */ 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/malloc.h> 53 #include <sys/proc.h> 54 #include <sys/vmmeter.h> 55 #include <sys/vnode.h> 56 #include <sys/xio.h> 57 #include <sys/sfbuf.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_param.h> 61 #include <sys/lock.h> 62 #include <vm/vm_kern.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_pageout.h> 68 #include <vm/vm_pager.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_page2.h> 71 72 /* 73 * Just do basic initialization of an empty XIO 74 */ 75 void 76 xio_init(xio_t xio) 77 { 78 xio->xio_flags = 0; 79 xio->xio_bytes = 0; 80 xio->xio_error = 0; 81 xio->xio_offset = 0; 82 xio->xio_npages = 0; 83 xio->xio_pages = xio->xio_internal_pages; 84 } 85 86 /* 87 * Initialize an XIO given a userspace buffer. 0 is returned on success, 88 * an error code on failure. The actual number of bytes that could be 89 * accomodated in the XIO will be stored in xio_bytes and the page offset 90 * will be stored in xio_offset. 91 */ 92 int 93 xio_init_ubuf(xio_t xio, void *ubase, size_t ubytes, int flags) 94 { 95 vm_offset_t addr; 96 vm_page_t m; 97 vm_page_t m0; 98 int error; 99 int i; 100 int n; 101 int vmprot; 102 103 addr = trunc_page((vm_offset_t)ubase); 104 xio->xio_flags = flags; 105 xio->xio_bytes = 0; 106 xio->xio_error = 0; 107 if (ubytes == 0) { 108 xio->xio_offset = 0; 109 xio->xio_npages = 0; 110 } else { 111 vmprot = (flags & XIOF_WRITE) ? VM_PROT_WRITE : VM_PROT_READ; 112 xio->xio_offset = (vm_offset_t)ubase & PAGE_MASK; 113 xio->xio_pages = xio->xio_internal_pages; 114 if ((n = PAGE_SIZE - xio->xio_offset) > ubytes) 115 n = ubytes; 116 m0 = NULL; 117 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) { 118 m = vm_fault_page_quick(addr, vmprot, &error); 119 if (m == NULL) 120 break; 121 xio->xio_pages[i] = m; 122 ubytes -= n; 123 xio->xio_bytes += n; 124 if ((n = ubytes) > PAGE_SIZE) 125 n = PAGE_SIZE; 126 addr += PAGE_SIZE; 127 128 /* 129 * Check linearity, used by syslink to memory map DMA buffers. 130 */ 131 if (flags & XIOF_VMLINEAR) { 132 if (i == 0) { 133 m0 = m; 134 } else 135 if (m->object != m0->object || m->pindex != m0->pindex + i) { 136 error = EINVAL; 137 break; 138 } 139 } 140 } 141 xio->xio_npages = i; 142 143 /* 144 * If a failure occured clean out what we loaded and return EFAULT. 145 * Return 0 on success. 146 */ 147 if (i < XIO_INTERNAL_PAGES && n) { 148 xio_release(xio); 149 xio->xio_error = EFAULT; 150 } 151 } 152 return(xio->xio_error); 153 } 154 155 /* 156 * Initialize an XIO given a kernelspace buffer. 0 is returned on success, 157 * an error code on failure. The actual number of bytes that could be 158 * accomodated in the XIO will be stored in xio_bytes and the page offset 159 * will be stored in xio_offset. 160 */ 161 int 162 xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes) 163 { 164 vm_offset_t addr; 165 vm_paddr_t paddr; 166 vm_page_t m; 167 int i; 168 int n; 169 170 addr = trunc_page((vm_offset_t)kbase); 171 xio->xio_flags = 0; 172 xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK; 173 xio->xio_bytes = 0; 174 xio->xio_pages = xio->xio_internal_pages; 175 xio->xio_error = 0; 176 if ((n = PAGE_SIZE - xio->xio_offset) > kbytes) 177 n = kbytes; 178 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) { 179 if ((paddr = pmap_kextract(addr)) == 0) 180 break; 181 crit_enter(); 182 m = PHYS_TO_VM_PAGE(paddr); 183 vm_page_hold(m); 184 crit_exit(); 185 xio->xio_pages[i] = m; 186 kbytes -= n; 187 xio->xio_bytes += n; 188 if ((n = kbytes) > PAGE_SIZE) 189 n = PAGE_SIZE; 190 addr += PAGE_SIZE; 191 } 192 xio->xio_npages = i; 193 194 /* 195 * If a failure occured clean out what we loaded and return EFAULT. 196 * Return 0 on success. 197 */ 198 if (i < XIO_INTERNAL_PAGES && n) { 199 xio_release(xio); 200 xio->xio_error = EFAULT; 201 } 202 return(xio->xio_error); 203 } 204 205 /* 206 * Cleanup an XIO so it can be destroyed. The pages associated with the 207 * XIO are released. 208 */ 209 void 210 xio_release(xio_t xio) 211 { 212 int i; 213 vm_page_t m; 214 215 crit_enter(); 216 for (i = 0; i < xio->xio_npages; ++i) { 217 m = xio->xio_pages[i]; 218 vm_page_unhold(m); 219 } 220 crit_exit(); 221 xio->xio_offset = 0; 222 xio->xio_npages = 0; 223 xio->xio_bytes = 0; 224 xio->xio_error = ENOBUFS; 225 } 226 227 /* 228 * Copy data between an XIO and a UIO. If the UIO represents userspace it 229 * must be relative to the current context. 230 * 231 * uoffset is the abstracted starting offset in the XIO, not the actual 232 * offset, and usually starts at 0. 233 * 234 * The XIO is not modified. The UIO is updated to reflect the copy. 235 * 236 * UIO_READ xio -> uio 237 * UIO_WRITE uio -> xio 238 */ 239 int 240 xio_uio_copy(xio_t xio, int uoffset, struct uio *uio, int *sizep) 241 { 242 int error; 243 int bytes; 244 245 bytes = xio->xio_bytes - uoffset; 246 if (bytes > uio->uio_resid) 247 bytes = uio->uio_resid; 248 KKASSERT(bytes >= 0); 249 error = uiomove_fromphys(xio->xio_pages, xio->xio_offset + uoffset, 250 bytes, uio); 251 if (error == 0) 252 *sizep = bytes; 253 else 254 *sizep = 0; 255 return(error); 256 } 257 258 /* 259 * Copy the specified number of bytes from the xio to a userland 260 * buffer. Return an error code or 0 on success. 261 * 262 * uoffset is the abstracted starting offset in the XIO, not the actual 263 * offset, and usually starts at 0. 264 * 265 * The XIO is not modified. 266 */ 267 int 268 xio_copy_xtou(xio_t xio, int uoffset, void *uptr, int bytes) 269 { 270 int i; 271 int n; 272 int error; 273 int offset; 274 vm_page_t m; 275 struct sf_buf *sf; 276 277 if (uoffset + bytes > xio->xio_bytes) 278 return(EFAULT); 279 280 offset = (xio->xio_offset + uoffset) & PAGE_MASK; 281 if ((n = PAGE_SIZE - offset) > bytes) 282 n = bytes; 283 284 error = 0; 285 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT; 286 i < xio->xio_npages; 287 ++i 288 ) { 289 m = xio->xio_pages[i]; 290 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 291 error = copyout((char *)sf_buf_kva(sf) + offset, uptr, n); 292 sf_buf_free(sf); 293 if (error) 294 break; 295 bytes -= n; 296 uptr = (char *)uptr + n; 297 if (bytes == 0) 298 break; 299 if ((n = bytes) > PAGE_SIZE) 300 n = PAGE_SIZE; 301 offset = 0; 302 } 303 return(error); 304 } 305 306 /* 307 * Copy the specified number of bytes from the xio to a kernel 308 * buffer. Return an error code or 0 on success. 309 * 310 * uoffset is the abstracted starting offset in the XIO, not the actual 311 * offset, and usually starts at 0. 312 * 313 * The XIO is not modified. 314 */ 315 int 316 xio_copy_xtok(xio_t xio, int uoffset, void *kptr, int bytes) 317 { 318 int i; 319 int n; 320 int error; 321 int offset; 322 vm_page_t m; 323 struct sf_buf *sf; 324 325 if (bytes + uoffset > xio->xio_bytes) 326 return(EFAULT); 327 328 offset = (xio->xio_offset + uoffset) & PAGE_MASK; 329 if ((n = PAGE_SIZE - offset) > bytes) 330 n = bytes; 331 332 error = 0; 333 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT; 334 i < xio->xio_npages; 335 ++i 336 ) { 337 m = xio->xio_pages[i]; 338 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 339 bcopy((char *)sf_buf_kva(sf) + offset, kptr, n); 340 sf_buf_free(sf); 341 bytes -= n; 342 kptr = (char *)kptr + n; 343 if (bytes == 0) 344 break; 345 if ((n = bytes) > PAGE_SIZE) 346 n = PAGE_SIZE; 347 offset = 0; 348 } 349 return(error); 350 } 351 352 /* 353 * Copy the specified number of bytes from userland to the xio. 354 * Return an error code or 0 on success. 355 * 356 * uoffset is the abstracted starting offset in the XIO, not the actual 357 * offset, and usually starts at 0. 358 * 359 * Data in pages backing the XIO will be modified. 360 */ 361 int 362 xio_copy_utox(xio_t xio, int uoffset, const void *uptr, int bytes) 363 { 364 int i; 365 int n; 366 int error; 367 int offset; 368 vm_page_t m; 369 struct sf_buf *sf; 370 371 if (uoffset + bytes > xio->xio_bytes) 372 return(EFAULT); 373 374 offset = (xio->xio_offset + uoffset) & PAGE_MASK; 375 if ((n = PAGE_SIZE - offset) > bytes) 376 n = bytes; 377 378 error = 0; 379 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT; 380 i < xio->xio_npages; 381 ++i 382 ) { 383 m = xio->xio_pages[i]; 384 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 385 error = copyin(uptr, (char *)sf_buf_kva(sf) + offset, n); 386 sf_buf_free(sf); 387 if (error) 388 break; 389 bytes -= n; 390 uptr = (const char *)uptr + n; 391 if (bytes == 0) 392 break; 393 if ((n = bytes) > PAGE_SIZE) 394 n = PAGE_SIZE; 395 offset = 0; 396 } 397 return(error); 398 } 399 400 /* 401 * Copy the specified number of bytes from the kernel to the xio. 402 * Return an error code or 0 on success. 403 * 404 * uoffset is the abstracted starting offset in the XIO, not the actual 405 * offset, and usually starts at 0. 406 * 407 * Data in pages backing the XIO will be modified. 408 */ 409 int 410 xio_copy_ktox(xio_t xio, int uoffset, const void *kptr, int bytes) 411 { 412 int i; 413 int n; 414 int error; 415 int offset; 416 vm_page_t m; 417 struct sf_buf *sf; 418 419 if (uoffset + bytes > xio->xio_bytes) 420 return(EFAULT); 421 422 offset = (xio->xio_offset + uoffset) & PAGE_MASK; 423 if ((n = PAGE_SIZE - offset) > bytes) 424 n = bytes; 425 426 error = 0; 427 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT; 428 i < xio->xio_npages; 429 ++i 430 ) { 431 m = xio->xio_pages[i]; 432 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 433 bcopy(kptr, (char *)sf_buf_kva(sf) + offset, n); 434 sf_buf_free(sf); 435 bytes -= n; 436 kptr = (const char *)kptr + n; 437 if (bytes == 0) 438 break; 439 if ((n = bytes) > PAGE_SIZE) 440 n = PAGE_SIZE; 441 offset = 0; 442 } 443 return(error); 444 } 445