xref: /dragonfly/sys/kern/kern_physio.c (revision 73610d44)
1 /*
2  * Copyright (c) 1994 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. Modifications may be freely made to this file if the above conditions
17  *    are met.
18  *
19  * $FreeBSD: src/sys/kern/kern_physio.c,v 1.46.2.4 2003/11/14 09:51:47 simokawa Exp $
20  * $DragonFly: src/sys/kern/kern_physio.c,v 1.27 2008/08/22 08:47:56 swildner Exp $
21  */
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/conf.h>
27 #include <sys/proc.h>
28 #include <sys/uio.h>
29 #include <sys/device.h>
30 #include <sys/thread2.h>
31 
32 #include <vm/vm.h>
33 #include <vm/vm_extern.h>
34 
35 static int
36 physio(cdev_t dev, struct uio *uio, int ioflag)
37 {
38 	int i;
39 	int error;
40 	int saflags;
41 	int iolen;
42 	int bcount;
43 	int bounceit;
44 	caddr_t ubase;
45 	struct buf *bp;
46 
47 	bp = getpbuf_kva(NULL);
48 	saflags = bp->b_flags;
49 	error = 0;
50 
51 	/* XXX: sanity check */
52 	if (dev->si_iosize_max < PAGE_SIZE) {
53 		kprintf("WARNING: %s si_iosize_max=%d, using MAXPHYS.\n",
54 		    devtoname(dev), dev->si_iosize_max);
55 		dev->si_iosize_max = MAXPHYS;
56 	}
57 
58 	/* Must be a real uio */
59 	KKASSERT(uio->uio_segflg != UIO_NOCOPY);
60 
61 	for (i = 0; i < uio->uio_iovcnt; i++) {
62 		while (uio->uio_iov[i].iov_len) {
63 			if (uio->uio_rw == UIO_READ)
64 				bp->b_cmd = BUF_CMD_READ;
65 			else
66 				bp->b_cmd = BUF_CMD_WRITE;
67 			bp->b_flags = saflags;
68 			bcount = uio->uio_iov[i].iov_len;
69 
70 			reinitbufbio(bp);	/* clear translation cache */
71 			bp->b_bio1.bio_offset = uio->uio_offset;
72 			bp->b_bio1.bio_done = biodone_sync;
73 			bp->b_bio1.bio_flags |= BIO_SYNC;
74 
75 			/*
76 			 * Setup for mapping the request into kernel memory.
77 			 *
78 			 * We can only write as much as fits in a pbuf,
79 			 * which is MAXPHYS, and no larger then the device's
80 			 * ability.
81 			 *
82 			 * If not using bounce pages the base address of the
83 			 * user mapping into the pbuf may be offset, further
84 			 * reducing how much will actually fit in the pbuf.
85 			 */
86 			if (bcount > dev->si_iosize_max)
87 				bcount = dev->si_iosize_max;
88 
89 			ubase = uio->uio_iov[i].iov_base;
90 			bounceit = (int)(((vm_offset_t)ubase) & 15);
91 			iolen = ((vm_offset_t)ubase) & PAGE_MASK;
92 			if (bounceit) {
93 				if (bcount > bp->b_kvasize)
94 					bcount = bp->b_kvasize;
95 			} else {
96 				if ((bcount + iolen) > bp->b_kvasize) {
97 					bcount = bp->b_kvasize;
98 					if (iolen != 0)
99 						bcount -= PAGE_SIZE;
100 				}
101 			}
102 
103 			/*
104 			 * If we have to use a bounce buffer allocate kernel
105 			 * memory and copyin/copyout.  Otherwise map the
106 			 * user buffer directly into kernel memory without
107 			 * copying.
108 			 */
109 			if (uio->uio_segflg == UIO_USERSPACE) {
110 				if (bounceit) {
111 					bp->b_data = bp->b_kvabase;
112 					bp->b_bcount = bcount;
113 					vm_hold_load_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
114 					if (uio->uio_rw == UIO_WRITE) {
115 						error = copyin(ubase, bp->b_data, bcount);
116 						if (error) {
117 							vm_hold_free_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
118 							goto doerror;
119 						}
120 					}
121 				} else if (vmapbuf(bp, ubase, bcount) < 0) {
122 					error = EFAULT;
123 					goto doerror;
124 				}
125 			} else {
126 				bp->b_data = uio->uio_iov[i].iov_base;
127 				bp->b_bcount = bcount;
128 			}
129 			dev_dstrategy(dev, &bp->b_bio1);
130 			biowait(&bp->b_bio1, "physstr");
131 
132 			iolen = bp->b_bcount - bp->b_resid;
133 			if (uio->uio_segflg == UIO_USERSPACE) {
134 				if (bounceit) {
135 					if (uio->uio_rw == UIO_READ && iolen) {
136 						error = copyout(bp->b_data, ubase, iolen);
137 						if (error) {
138 							bp->b_flags |= B_ERROR;
139 							bp->b_error = error;
140 						}
141 					}
142 					vm_hold_free_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
143 				} else {
144 					vunmapbuf(bp);
145 				}
146 			}
147 			if (iolen == 0 && !(bp->b_flags & B_ERROR))
148 				goto doerror;	/* EOF */
149 			uio->uio_iov[i].iov_len -= iolen;
150 			uio->uio_iov[i].iov_base = (char *)uio->uio_iov[i].iov_base + iolen;
151 			uio->uio_resid -= iolen;
152 			uio->uio_offset += iolen;
153 			if (bp->b_flags & B_ERROR) {
154 				error = bp->b_error;
155 				goto doerror;
156 			}
157 		}
158 	}
159 doerror:
160 	relpbuf(bp, NULL);
161 	return (error);
162 }
163 
164 int
165 physread(struct dev_read_args *ap)
166 {
167 	return(physio(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
168 }
169 
170 int
171 physwrite(struct dev_write_args *ap)
172 {
173 	return(physio(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
174 }
175 
176