xref: /original-bsd/sys/kern/kern_physio.c (revision 3705696b)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * %sccs.include.proprietary.c%
6  *
7  *	@(#)kern_physio.c	8.1 (Berkeley) 06/10/93
8  */
9 
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/buf.h>
13 #include <sys/conf.h>
14 #include <sys/proc.h>
15 #include <sys/vnode.h>
16 
17 static void freeswbuf __P((struct buf *));
18 static struct buf *getswbuf __P((int));
19 
20 /*
21  * This routine does device I/O for a user process.
22  *
23  * If the user has the proper access privileges, the process is
24  * marked 'delayed unlock' and the pages involved in the I/O are
25  * faulted and locked. After the completion of the I/O, the pages
26  * are unlocked.
27  */
28 physio(strat, bp, dev, rw, mincnt, uio)
29 	int (*strat)();
30 	register struct buf *bp;
31 	dev_t dev;
32 	int rw;
33 	u_int (*mincnt)();
34 	struct uio *uio;
35 {
36 	register struct iovec *iov;
37 	register int requested = 0, done = 0;
38 	register struct proc *p = curproc;
39 	char *a;
40 	int s, allocbuf = 0, error = 0;
41 
42 	if (bp == NULL) {
43 		allocbuf = 1;
44 		bp = getswbuf(PRIBIO+1);
45 	}
46 	for (; uio->uio_iovcnt; uio->uio_iov++, uio->uio_iovcnt--) {
47 		iov = uio->uio_iov;
48 		if (iov->iov_len == 0)
49 			continue;
50 		if (!useracc(iov->iov_base, (u_int)iov->iov_len,
51 		    rw == B_READ ? B_WRITE : B_READ)) {
52 			error = EFAULT;
53 			break;
54 		}
55 		if (!allocbuf) {	/* only if sharing caller's buffer */
56 			s = splbio();
57 			while (bp->b_flags&B_BUSY) {
58 				bp->b_flags |= B_WANTED;
59 				sleep((caddr_t)bp, PRIBIO+1);
60 			}
61 			splx(s);
62 		}
63 		bp->b_error = 0;
64 		bp->b_proc = p;
65 #ifdef HPUXCOMPAT
66 		if (ISHPMMADDR(iov->iov_base))
67 			bp->b_un.b_addr = (caddr_t)HPMMBASEADDR(iov->iov_base);
68 		else
69 #endif
70 		bp->b_un.b_addr = iov->iov_base;
71 		while (iov->iov_len > 0) {
72 			bp->b_flags = B_BUSY | B_PHYS | B_RAW | rw;
73 			bp->b_dev = dev;
74 			bp->b_blkno = btodb(uio->uio_offset);
75 			bp->b_bcount = iov->iov_len;
76 			(*mincnt)(bp);
77 			requested = bp->b_bcount;
78 			p->p_flag |= SPHYSIO;
79 			vslock(a = bp->b_un.b_addr, requested);
80 			vmapbuf(bp);
81 			(*strat)(bp);
82 			s = splbio();
83 			while ((bp->b_flags & B_DONE) == 0)
84 				sleep((caddr_t)bp, PRIBIO);
85 			vunmapbuf(bp);
86 			vsunlock(a, requested, rw);
87 			p->p_flag &= ~SPHYSIO;
88 			if (bp->b_flags&B_WANTED)	/* rare */
89 				wakeup((caddr_t)bp);
90 			splx(s);
91 			done = bp->b_bcount - bp->b_resid;
92 			bp->b_un.b_addr += done;
93 			iov->iov_len -= done;
94 			uio->uio_resid -= done;
95 			uio->uio_offset += done;
96 			/* temp kludge for disk drives */
97 			if (done < requested || bp->b_flags & B_ERROR)
98 				break;
99 		}
100 		bp->b_flags &= ~(B_BUSY | B_WANTED | B_PHYS | B_RAW);
101 		error = biowait(bp);
102 		/* temp kludge for disk drives */
103 		if (done < requested || bp->b_flags & B_ERROR)
104 			break;
105 	}
106 	if (allocbuf)
107 		freeswbuf(bp);
108 	return (error);
109 }
110 
111 /*
112  * Calculate the maximum size of I/O request that can be requested
113  * in a single operation. This limit is necessary to prevent a single
114  * process from being able to lock more than a fixed amount of memory
115  * in the kernel.
116  */
117 u_int
118 minphys(bp)
119 	struct buf *bp;
120 {
121 	if (bp->b_bcount > MAXPHYS)
122 		bp->b_bcount = MAXPHYS;
123 }
124 
125 static struct buf *
126 getswbuf(prio)
127 	int prio;
128 {
129 	int s;
130 	struct buf *bp;
131 
132 	s = splbio();
133 	while (bswlist.b_actf == NULL) {
134 		bswlist.b_flags |= B_WANTED;
135 		sleep((caddr_t)&bswlist, prio);
136 	}
137 	bp = bswlist.b_actf;
138 	bswlist.b_actf = bp->b_actf;
139 	splx(s);
140 	return (bp);
141 }
142 
143 static void
144 freeswbuf(bp)
145 	struct buf *bp;
146 {
147 	int s;
148 
149 	s = splbio();
150 	bp->b_actf = bswlist.b_actf;
151 	bswlist.b_actf = bp;
152 	if (bp->b_vp)
153 		brelvp(bp);
154 	if (bswlist.b_flags & B_WANTED) {
155 		bswlist.b_flags &= ~B_WANTED;
156 		wakeup((caddr_t)&bswlist);
157 		wakeup((caddr_t)pageproc);
158 	}
159 	splx(s);
160 }
161 
162 /*
163  * Do a read on a device for a user process.
164  */
165 rawread(dev, uio)
166 	dev_t dev;
167 	struct uio *uio;
168 {
169 	return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
170 	    dev, B_READ, minphys, uio));
171 }
172 
173 /*
174  * Do a write on a device for a user process.
175  */
176 rawwrite(dev, uio)
177 	dev_t dev;
178 	struct uio *uio;
179 {
180 	return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
181 	    dev, B_WRITE, minphys, uio));
182 }
183