xref: /dragonfly/sys/kern/kern_xio.c (revision 6e278935)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/kern_xio.c,v 1.16 2008/05/09 07:24:45 dillon Exp $
35  */
36 /*
37  * Kernel XIO interface.  An initialized XIO is basically a collection of
38  * appropriately held vm_page_t's.  XIO buffers are vmspace agnostic and
39  * can represent userspace or kernelspace buffers, and can be passed to
40  * foreign threads outside of the originating vmspace.  XIO buffers are
41  * not mapped into KVM and thus can be manipulated and passed around with
42  * very low overheads.
43  *
44  * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other
45  * places that need to pass (possibly userspace) data between threads.
46  *
47  * TODO: check for busy page when modifying, check writeable.
48  */
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/malloc.h>
53 #include <sys/proc.h>
54 #include <sys/vmmeter.h>
55 #include <sys/vnode.h>
56 #include <sys/xio.h>
57 
58 #include <cpu/lwbuf.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <sys/lock.h>
63 #include <vm/vm_kern.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_pageout.h>
69 #include <vm/vm_pager.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_page2.h>
72 
73 /*
74  * Just do basic initialization of an empty XIO
75  */
76 void
77 xio_init(xio_t xio)
78 {
79     xio->xio_flags = 0;
80     xio->xio_bytes = 0;
81     xio->xio_error = 0;
82     xio->xio_offset = 0;
83     xio->xio_npages = 0;
84     xio->xio_pages = xio->xio_internal_pages;
85 }
86 
87 /*
88  * Initialize an XIO given a userspace buffer.  0 is returned on success,
89  * an error code on failure.  The actual number of bytes that could be
90  * accomodated in the XIO will be stored in xio_bytes and the page offset
91  * will be stored in xio_offset.
92  */
93 int
94 xio_init_ubuf(xio_t xio, void *ubase, size_t ubytes, int flags)
95 {
96     vm_offset_t addr;
97     vm_page_t m;
98     vm_page_t m0;
99     int error;
100     int i;
101     int n;
102     int vmprot;
103 
104     addr = trunc_page((vm_offset_t)ubase);
105     xio->xio_flags = flags;
106     xio->xio_bytes = 0;
107     xio->xio_error = 0;
108     if (ubytes == 0) {
109 	xio->xio_offset = 0;
110 	xio->xio_npages = 0;
111     } else {
112 	vmprot = (flags & XIOF_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
113 	xio->xio_offset = (vm_offset_t)ubase & PAGE_MASK;
114 	xio->xio_pages = xio->xio_internal_pages;
115 	if ((n = PAGE_SIZE - xio->xio_offset) > ubytes)
116 	    n = ubytes;
117 	m0 = NULL;
118 	for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
119 	    m = vm_fault_page_quick(addr, vmprot, &error);
120 	    if (m == NULL)
121 		break;
122 	    xio->xio_pages[i] = m;
123 	    ubytes -= n;
124 	    xio->xio_bytes += n;
125 	    if ((n = ubytes) > PAGE_SIZE)
126 		n = PAGE_SIZE;
127 	    addr += PAGE_SIZE;
128 
129 	    /*
130 	     * Check linearity, used by syslink to memory map DMA buffers.
131 	     */
132 	    if (flags & XIOF_VMLINEAR) {
133 		if (i == 0) {
134 		    m0 = m;
135 		} else
136 		if (m->object != m0->object || m->pindex != m0->pindex + i) {
137 		    error = EINVAL;
138 		    break;
139 		}
140 	    }
141 	}
142 	xio->xio_npages = i;
143 
144 	/*
145 	 * If a failure occured clean out what we loaded and return EFAULT.
146 	 * Return 0 on success.  Do not dirty the pages.
147 	 */
148 	if (i < XIO_INTERNAL_PAGES && n) {
149 	    xio->xio_flags &= ~XIOF_WRITE;
150 	    xio_release(xio);
151 	    xio->xio_error = EFAULT;
152 	}
153     }
154     return(xio->xio_error);
155 }
156 
157 /*
158  * Initialize an XIO given a kernelspace buffer.  0 is returned on success,
159  * an error code on failure.  The actual number of bytes that could be
160  * accomodated in the XIO will be stored in xio_bytes and the page offset
161  * will be stored in xio_offset.
162  */
163 int
164 xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes)
165 {
166     vm_offset_t addr;
167     vm_paddr_t paddr;
168     vm_page_t m;
169     int i;
170     int n;
171 
172     addr = trunc_page((vm_offset_t)kbase);
173     xio->xio_flags = 0;
174     xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK;
175     xio->xio_bytes = 0;
176     xio->xio_pages = xio->xio_internal_pages;
177     xio->xio_error = 0;
178     if ((n = PAGE_SIZE - xio->xio_offset) > kbytes)
179 	n = kbytes;
180     for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
181 	if ((paddr = pmap_kextract(addr)) == 0)
182 	    break;
183 	m = PHYS_TO_VM_PAGE(paddr);
184 	vm_page_hold(m);
185 	xio->xio_pages[i] = m;
186 	kbytes -= n;
187 	xio->xio_bytes += n;
188 	if ((n = kbytes) > PAGE_SIZE)
189 	    n = PAGE_SIZE;
190 	addr += PAGE_SIZE;
191     }
192     xio->xio_npages = i;
193 
194     /*
195      * If a failure occured clean out what we loaded and return EFAULT.
196      * Return 0 on success.
197      */
198     if (i < XIO_INTERNAL_PAGES && n) {
199 	xio_release(xio);
200 	xio->xio_error = EFAULT;
201     }
202     return(xio->xio_error);
203 }
204 
205 /*
206  * Initialize an XIO given an array of vm_page pointers.  The caller is
207  * responsible for any modified state changes for the pages.
208  */
209 int
210 xio_init_pages(xio_t xio, struct vm_page **mbase, int npages, int xflags)
211 {
212     int i;
213 
214     KKASSERT(npages <= XIO_INTERNAL_PAGES);
215 
216     xio->xio_flags = xflags;
217     xio->xio_offset = 0;
218     xio->xio_bytes = npages * PAGE_SIZE;
219     xio->xio_pages = xio->xio_internal_pages;
220     xio->xio_npages = npages;
221     xio->xio_error = 0;
222     for (i = 0; i < npages; ++i) {
223 	vm_page_hold(mbase[i]);
224 	xio->xio_pages[i] = mbase[i];
225     }
226     return(0);
227 }
228 
229 /*
230  * Cleanup an XIO so it can be destroyed.  The pages associated with the
231  * XIO are released.
232  */
233 void
234 xio_release(xio_t xio)
235 {
236     int i;
237     vm_page_t m;
238 
239     for (i = 0; i < xio->xio_npages; ++i) {
240 	m = xio->xio_pages[i];
241 	if (xio->xio_flags & XIOF_WRITE)
242 		vm_page_dirty(m);
243 	vm_page_unhold(m);
244     }
245     xio->xio_offset = 0;
246     xio->xio_npages = 0;
247     xio->xio_bytes = 0;
248     xio->xio_error = ENOBUFS;
249 }
250 
251 /*
252  * Copy data between an XIO and a UIO.  If the UIO represents userspace it
253  * must be relative to the current context.
254  *
255  * uoffset is the abstracted starting offset in the XIO, not the actual
256  * offset, and usually starts at 0.
257  *
258  * The XIO is not modified.  The UIO is updated to reflect the copy.
259  *
260  * UIO_READ	xio -> uio
261  * UIO_WRITE	uio -> xio
262  */
263 int
264 xio_uio_copy(xio_t xio, int uoffset, struct uio *uio, size_t *sizep)
265 {
266     size_t bytes;
267     int error;
268 
269     bytes = xio->xio_bytes - uoffset;
270     if (bytes > uio->uio_resid)
271 	bytes = uio->uio_resid;
272     KKASSERT(bytes >= 0);
273     error = uiomove_fromphys(xio->xio_pages, xio->xio_offset + uoffset,
274 				bytes, uio);
275     if (error == 0)
276 	*sizep = bytes;
277     else
278 	*sizep = 0;
279     return(error);
280 }
281 
282 /*
283  * Copy the specified number of bytes from the xio to a userland
284  * buffer.  Return an error code or 0 on success.
285  *
286  * uoffset is the abstracted starting offset in the XIO, not the actual
287  * offset, and usually starts at 0.
288  *
289  * The XIO is not modified.
290  */
291 int
292 xio_copy_xtou(xio_t xio, int uoffset, void *uptr, int bytes)
293 {
294     int i;
295     int n;
296     int error;
297     int offset;
298     vm_page_t m;
299     struct lwbuf *lwb;
300     struct lwbuf lwb_cache;
301 
302     if (uoffset + bytes > xio->xio_bytes)
303 	return(EFAULT);
304 
305     offset = (xio->xio_offset + uoffset) & PAGE_MASK;
306     if ((n = PAGE_SIZE - offset) > bytes)
307 	n = bytes;
308 
309     error = 0;
310     for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
311 	 i < xio->xio_npages;
312 	 ++i
313     ) {
314 	m = xio->xio_pages[i];
315 	lwb = lwbuf_alloc(m, &lwb_cache);
316 	error = copyout((char *)lwbuf_kva(lwb) + offset, uptr, n);
317 	lwbuf_free(lwb);
318 	if (error)
319 	    break;
320 	bytes -= n;
321 	uptr = (char *)uptr + n;
322 	if (bytes == 0)
323 	    break;
324 	if ((n = bytes) > PAGE_SIZE)
325 	    n = PAGE_SIZE;
326 	offset = 0;
327     }
328     return(error);
329 }
330 
331 /*
332  * Copy the specified number of bytes from the xio to a kernel
333  * buffer.  Return an error code or 0 on success.
334  *
335  * uoffset is the abstracted starting offset in the XIO, not the actual
336  * offset, and usually starts at 0.
337  *
338  * The XIO is not modified.
339  */
340 int
341 xio_copy_xtok(xio_t xio, int uoffset, void *kptr, int bytes)
342 {
343     int i;
344     int n;
345     int error;
346     int offset;
347     vm_page_t m;
348     struct lwbuf *lwb;
349     struct lwbuf lwb_cache;
350 
351     if (bytes + uoffset > xio->xio_bytes)
352 	return(EFAULT);
353 
354     offset = (xio->xio_offset + uoffset) & PAGE_MASK;
355     if ((n = PAGE_SIZE - offset) > bytes)
356 	n = bytes;
357 
358     error = 0;
359     for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
360 	 i < xio->xio_npages;
361 	 ++i
362     ) {
363 	m = xio->xio_pages[i];
364 	lwb = lwbuf_alloc(m, &lwb_cache);
365 	bcopy((char *)lwbuf_kva(lwb) + offset, kptr, n);
366 	lwbuf_free(lwb);
367 	bytes -= n;
368 	kptr = (char *)kptr + n;
369 	if (bytes == 0)
370 	    break;
371 	if ((n = bytes) > PAGE_SIZE)
372 	    n = PAGE_SIZE;
373 	offset = 0;
374     }
375     return(error);
376 }
377 
378 /*
379  * Copy the specified number of bytes from userland to the xio.
380  * Return an error code or 0 on success.
381  *
382  * uoffset is the abstracted starting offset in the XIO, not the actual
383  * offset, and usually starts at 0.
384  *
385  * Data in pages backing the XIO will be modified.
386  */
387 int
388 xio_copy_utox(xio_t xio, int uoffset, const void *uptr, int bytes)
389 {
390     int i;
391     int n;
392     int error;
393     int offset;
394     vm_page_t m;
395     struct lwbuf *lwb;
396     struct lwbuf lwb_cache;
397 
398     if (uoffset + bytes > xio->xio_bytes)
399 	return(EFAULT);
400 
401     offset = (xio->xio_offset + uoffset) & PAGE_MASK;
402     if ((n = PAGE_SIZE - offset) > bytes)
403 	n = bytes;
404 
405     error = 0;
406     for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
407 	 i < xio->xio_npages;
408 	 ++i
409     ) {
410 	m = xio->xio_pages[i];
411 	lwb = lwbuf_alloc(m, &lwb_cache);
412 	error = copyin(uptr, (char *)lwbuf_kva(lwb) + offset, n);
413 	lwbuf_free(lwb);
414 	if (error)
415 	    break;
416 	bytes -= n;
417 	uptr = (const char *)uptr + n;
418 	if (bytes == 0)
419 	    break;
420 	if ((n = bytes) > PAGE_SIZE)
421 	    n = PAGE_SIZE;
422 	offset = 0;
423     }
424     return(error);
425 }
426 
427 /*
428  * Copy the specified number of bytes from the kernel to the xio.
429  * Return an error code or 0 on success.
430  *
431  * uoffset is the abstracted starting offset in the XIO, not the actual
432  * offset, and usually starts at 0.
433  *
434  * Data in pages backing the XIO will be modified.
435  */
436 int
437 xio_copy_ktox(xio_t xio, int uoffset, const void *kptr, int bytes)
438 {
439     int i;
440     int n;
441     int error;
442     int offset;
443     vm_page_t m;
444     struct lwbuf *lwb;
445     struct lwbuf lwb_cache;
446 
447     if (uoffset + bytes > xio->xio_bytes)
448 	return(EFAULT);
449 
450     offset = (xio->xio_offset + uoffset) & PAGE_MASK;
451     if ((n = PAGE_SIZE - offset) > bytes)
452 	n = bytes;
453 
454     error = 0;
455     for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
456 	 i < xio->xio_npages;
457 	 ++i
458     ) {
459 	m = xio->xio_pages[i];
460 	lwb = lwbuf_alloc(m, &lwb_cache);
461 	bcopy(kptr, (char *)lwbuf_kva(lwb) + offset, n);
462 	lwbuf_free(lwb);
463 	bytes -= n;
464 	kptr = (const char *)kptr + n;
465 	if (bytes == 0)
466 	    break;
467 	if ((n = bytes) > PAGE_SIZE)
468 	    n = PAGE_SIZE;
469 	offset = 0;
470     }
471     return(error);
472 }
473