xref: /freebsd/sys/kern/kern_physio.c (revision 473c90ac)
1 /*-
2  * Copyright (c) 1994 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. Modifications may be freely made to this file if the above conditions
17  *    are met.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/bio.h>
23 #include <sys/buf.h>
24 #include <sys/conf.h>
25 #include <sys/malloc.h>
26 #include <sys/proc.h>
27 #include <sys/racct.h>
28 #include <sys/rwlock.h>
29 #include <sys/uio.h>
30 #include <geom/geom.h>
31 
32 #include <vm/vm.h>
33 #include <vm/vm_object.h>
34 #include <vm/vm_page.h>
35 #include <vm/vm_pager.h>
36 #include <vm/vm_extern.h>
37 #include <vm/vm_map.h>
38 
39 int
physio(struct cdev * dev,struct uio * uio,int ioflag)40 physio(struct cdev *dev, struct uio *uio, int ioflag)
41 {
42 	struct cdevsw *csw;
43 	struct buf *pbuf;
44 	struct bio *bp;
45 	struct vm_page **pages;
46 	char *base, *sa;
47 	u_int iolen, poff;
48 	int error, i, npages, maxpages;
49 	vm_prot_t prot;
50 
51 	csw = dev->si_devsw;
52 	npages = 0;
53 	sa = NULL;
54 	/* check if character device is being destroyed */
55 	if (csw == NULL)
56 		return (ENXIO);
57 
58 	/* XXX: sanity check */
59 	if (dev->si_iosize_max < PAGE_SIZE) {
60 		printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
61 		    devtoname(dev), dev->si_iosize_max);
62 		dev->si_iosize_max = DFLTPHYS;
63 	}
64 
65 	/*
66 	 * If the driver does not want I/O to be split, that means that we
67 	 * need to reject any requests that will not fit into one buffer.
68 	 */
69 	if (dev->si_flags & SI_NOSPLIT &&
70 	    (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > maxphys ||
71 	    uio->uio_iovcnt > 1)) {
72 		/*
73 		 * Tell the user why his I/O was rejected.
74 		 */
75 		if (uio->uio_resid > dev->si_iosize_max)
76 			uprintf("%s: request size=%zd > si_iosize_max=%d; "
77 			    "cannot split request\n", devtoname(dev),
78 			    uio->uio_resid, dev->si_iosize_max);
79 		if (uio->uio_resid > maxphys)
80 			uprintf("%s: request size=%zd > maxphys=%lu; "
81 			    "cannot split request\n", devtoname(dev),
82 			    uio->uio_resid, maxphys);
83 		if (uio->uio_iovcnt > 1)
84 			uprintf("%s: request vectors=%d > 1; "
85 			    "cannot split request\n", devtoname(dev),
86 			    uio->uio_iovcnt);
87 		return (EFBIG);
88 	}
89 
90 	/*
91 	 * Keep the process UPAGES from being swapped.  Processes swapped
92 	 * out while holding pbufs, used by swapper, may lead to deadlock.
93 	 */
94 	PHOLD(curproc);
95 
96 	bp = g_alloc_bio();
97 	if (uio->uio_segflg != UIO_USERSPACE) {
98 		pbuf = NULL;
99 		pages = NULL;
100 	} else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
101 		pbuf = NULL;
102 		maxpages = btoc(MIN(uio->uio_resid, maxphys)) + 1;
103 		pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK);
104 	} else {
105 		pbuf = uma_zalloc(pbuf_zone, M_WAITOK);
106 		MPASS((pbuf->b_flags & B_MAXPHYS) != 0);
107 		sa = pbuf->b_data;
108 		maxpages = PBUF_PAGES;
109 		pages = pbuf->b_pages;
110 	}
111 	prot = VM_PROT_READ;
112 	if (uio->uio_rw == UIO_READ)
113 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
114 	error = 0;
115 	for (i = 0; i < uio->uio_iovcnt; i++) {
116 #ifdef RACCT
117 		if (racct_enable) {
118 			PROC_LOCK(curproc);
119 			switch (uio->uio_rw) {
120 			case UIO_READ:
121 				racct_add_force(curproc, RACCT_READBPS,
122 				    uio->uio_iov[i].iov_len);
123 				racct_add_force(curproc, RACCT_READIOPS, 1);
124 				break;
125 			case UIO_WRITE:
126 				racct_add_force(curproc, RACCT_WRITEBPS,
127 				    uio->uio_iov[i].iov_len);
128 				racct_add_force(curproc, RACCT_WRITEIOPS, 1);
129 				break;
130 			}
131 			PROC_UNLOCK(curproc);
132 		}
133 #endif /* RACCT */
134 
135 		while (uio->uio_iov[i].iov_len) {
136 			g_reset_bio(bp);
137 			switch (uio->uio_rw) {
138 			case UIO_READ:
139 				bp->bio_cmd = BIO_READ;
140 				curthread->td_ru.ru_inblock++;
141 				break;
142 			case UIO_WRITE:
143 				bp->bio_cmd = BIO_WRITE;
144 				curthread->td_ru.ru_oublock++;
145 				break;
146 			}
147 			bp->bio_offset = uio->uio_offset;
148 			base = uio->uio_iov[i].iov_base;
149 			bp->bio_length = uio->uio_iov[i].iov_len;
150 			if (bp->bio_length > dev->si_iosize_max)
151 				bp->bio_length = dev->si_iosize_max;
152 			if (bp->bio_length > maxphys)
153 				bp->bio_length = maxphys;
154 			bp->bio_bcount = bp->bio_length;
155 			bp->bio_dev = dev;
156 
157 			if (pages) {
158 				if ((npages = vm_fault_quick_hold_pages(
159 				    &curproc->p_vmspace->vm_map,
160 				    (vm_offset_t)base, bp->bio_length,
161 				    prot, pages, maxpages)) < 0) {
162 					error = EFAULT;
163 					goto doerror;
164 				}
165 				poff = (vm_offset_t)base & PAGE_MASK;
166 				if (pbuf && sa) {
167 					pmap_qenter((vm_offset_t)sa,
168 					    pages, npages);
169 					bp->bio_data = sa + poff;
170 				} else {
171 					bp->bio_ma = pages;
172 					bp->bio_ma_n = npages;
173 					bp->bio_ma_offset = poff;
174 					bp->bio_data = unmapped_buf;
175 					bp->bio_flags |= BIO_UNMAPPED;
176 				}
177 			} else
178 				bp->bio_data = base;
179 
180 			csw->d_strategy(bp);
181 			if (uio->uio_rw == UIO_READ)
182 				biowait(bp, "physrd");
183 			else
184 				biowait(bp, "physwr");
185 
186 			if (pages) {
187 				if (pbuf)
188 					pmap_qremove((vm_offset_t)sa, npages);
189 				vm_page_unhold_pages(pages, npages);
190 			}
191 
192 			iolen = bp->bio_length - bp->bio_resid;
193 			if (iolen == 0 && !(bp->bio_flags & BIO_ERROR))
194 				goto doerror;	/* EOF */
195 			uio->uio_iov[i].iov_len -= iolen;
196 			uio->uio_iov[i].iov_base =
197 			    (char *)uio->uio_iov[i].iov_base + iolen;
198 			uio->uio_resid -= iolen;
199 			uio->uio_offset += iolen;
200 			if (bp->bio_flags & BIO_ERROR) {
201 				error = bp->bio_error;
202 				goto doerror;
203 			}
204 		}
205 	}
206 doerror:
207 	if (pbuf)
208 		uma_zfree(pbuf_zone, pbuf);
209 	else if (pages)
210 		free(pages, M_DEVBUF);
211 	g_destroy_bio(bp);
212 	PRELE(curproc);
213 	return (error);
214 }
215