xref: /original-bsd/sys/kern/kern_physio.c (revision 46bf0326)
1 /*-
2  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.proprietary.c%
6  *
7  *	@(#)kern_physio.c	7.20 (Berkeley) 05/11/91
8  */
9 
10 #include "param.h"
11 #include "systm.h"
12 #include "buf.h"
13 #include "conf.h"
14 #include "proc.h"
15 #include "seg.h"
16 #include "trace.h"
17 #include "map.h"
18 #include "vnode.h"
19 #include "specdev.h"
20 
21 #ifdef HPUXCOMPAT
22 #include "user.h"
23 #endif
24 
25 static	struct buf *getswbuf();
26 static	freeswbuf();
27 
28 /*
29  * This routine does device I/O for a user process.
30  *
31  * If the user has the proper access privilidges, the process is
32  * marked 'delayed unlock' and the pages involved in the I/O are
33  * faulted and locked. After the completion of the I/O, the pages
34  * are unlocked.
35  */
36 physio(strat, bp, dev, rw, mincnt, uio)
37 	int (*strat)();
38 	register struct buf *bp;
39 	dev_t dev;
40 	int rw;
41 	u_int (*mincnt)();
42 	struct uio *uio;
43 {
44 	register struct iovec *iov;
45 	register int requested, done;
46 	register struct proc *p = curproc;
47 	char *a;
48 	int s, allocbuf = 0, error = 0;
49 
50 	if (bp == NULL) {
51 		allocbuf = 1;
52 		bp = getswbuf(PRIBIO+1);
53 	}
54 	for (; uio->uio_iovcnt; uio->uio_iov++, uio->uio_iovcnt--) {
55 		iov = uio->uio_iov;
56 		if (!useracc(iov->iov_base, (u_int)iov->iov_len,
57 		    rw == B_READ ? B_WRITE : B_READ)) {
58 			error = EFAULT;
59 			break;
60 		}
61 		if (!allocbuf) {	/* only if sharing caller's buffer */
62 			s = splbio();
63 			while (bp->b_flags&B_BUSY) {
64 				bp->b_flags |= B_WANTED;
65 				sleep((caddr_t)bp, PRIBIO+1);
66 			}
67 			splx(s);
68 		}
69 		bp->b_error = 0;
70 		bp->b_proc = p;
71 #ifdef HPUXCOMPAT
72 		if (ISHPMMADDR(iov->iov_base))
73 			bp->b_un.b_addr = (caddr_t)HPMMBASEADDR(iov->iov_base);
74 		else
75 #endif
76 		bp->b_un.b_addr = iov->iov_base;
77 		while (iov->iov_len > 0) {
78 			bp->b_flags = B_BUSY | B_PHYS | B_RAW | rw;
79 			bp->b_dev = dev;
80 			bp->b_blkno = btodb(uio->uio_offset);
81 			bp->b_bcount = iov->iov_len;
82 			(*mincnt)(bp);
83 			requested = bp->b_bcount;
84 			p->p_flag |= SPHYSIO;
85 			vslock(a = bp->b_un.b_addr, requested);
86 #if defined(hp300) || defined(i386)
87 			vmapbuf(bp);
88 #endif
89 			(*strat)(bp);
90 			s = splbio();
91 			while ((bp->b_flags & B_DONE) == 0)
92 				sleep((caddr_t)bp, PRIBIO);
93 #if defined(hp300) || defined(i386)
94 			vunmapbuf(bp);
95 #endif
96 			vsunlock(a, requested, rw);
97 			p->p_flag &= ~SPHYSIO;
98 			if (bp->b_flags&B_WANTED)	/* rare */
99 				wakeup((caddr_t)bp);
100 			splx(s);
101 			done = bp->b_bcount - bp->b_resid;
102 			bp->b_un.b_addr += done;
103 			iov->iov_len -= done;
104 			uio->uio_resid -= done;
105 			uio->uio_offset += done;
106 			/* temp kludge for disk drives */
107 			if (done < requested || bp->b_flags & B_ERROR)
108 				break;
109 		}
110 		bp->b_flags &= ~(B_BUSY | B_WANTED | B_PHYS | B_RAW);
111 		error = biowait(bp);
112 		/* temp kludge for disk drives */
113 		if (done < requested || bp->b_flags & B_ERROR)
114 			break;
115 	}
116 #if defined(hp300)
117 	DCIU();
118 #endif
119 	if (allocbuf)
120 		freeswbuf(bp);
121 	return (error);
122 }
123 
124 /*
125  * Calculate the maximum size of I/O request that can be requested
126  * in a single operation. This limit is necessary to prevent a single
127  * process from being able to lock more than a fixed amount of memory
128  * in the kernel.
129  */
130 u_int
131 minphys(bp)
132 	struct buf *bp;
133 {
134 	if (bp->b_bcount > MAXPHYS)
135 		bp->b_bcount = MAXPHYS;
136 }
137 
138 static
139 struct buf *
140 getswbuf(prio)
141 	int prio;
142 {
143 	int s;
144 	struct buf *bp;
145 
146 	s = splbio();
147 	while (bswlist.av_forw == NULL) {
148 		bswlist.b_flags |= B_WANTED;
149 		sleep((caddr_t)&bswlist, prio);
150 	}
151 	bp = bswlist.av_forw;
152 	bswlist.av_forw = bp->av_forw;
153 	splx(s);
154 	return (bp);
155 }
156 
157 static
158 freeswbuf(bp)
159 	struct buf *bp;
160 {
161 	int s;
162 
163 	s = splbio();
164 	bp->av_forw = bswlist.av_forw;
165 	bswlist.av_forw = bp;
166 	if (bp->b_vp)
167 		brelvp(bp);
168 	if (bswlist.b_flags & B_WANTED) {
169 		bswlist.b_flags &= ~B_WANTED;
170 		wakeup((caddr_t)&bswlist);
171 		wakeup((caddr_t)pageproc);
172 	}
173 	splx(s);
174 }
175 
176 /*
177  * Do a read on a device for a user process.
178  */
179 rawread(dev, uio)
180 	dev_t dev;
181 	struct uio *uio;
182 {
183 	return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
184 	    dev, B_READ, minphys, uio));
185 }
186 
187 /*
188  * Do a write on a device for a user process.
189  */
190 rawwrite(dev, uio)
191 	dev_t dev;
192 	struct uio *uio;
193 {
194 	return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL,
195 	    dev, B_WRITE, minphys, uio));
196 }
197