1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * %sccs.include.proprietary.c% 11 * 12 * @(#)kern_physio.c 8.4 (Berkeley) 01/21/94 13 */ 14 15 #include <sys/param.h> 16 #include <sys/systm.h> 17 #include <sys/buf.h> 18 #include <sys/conf.h> 19 #include <sys/proc.h> 20 #include <sys/vnode.h> 21 22 static void freeswbuf __P((struct buf *)); 23 static struct buf *getswbuf __P((int)); 24 25 /* 26 * This routine does device I/O for a user process. 27 * 28 * If the user has the proper access privileges, the process is 29 * marked 'delayed unlock' and the pages involved in the I/O are 30 * faulted and locked. After the completion of the I/O, the pages 31 * are unlocked. 32 */ 33 physio(strat, bp, dev, rw, mincnt, uio) 34 int (*strat)(); 35 register struct buf *bp; 36 dev_t dev; 37 int rw; 38 u_int (*mincnt)(); 39 struct uio *uio; 40 { 41 register struct iovec *iov; 42 register int requested = 0, done = 0; 43 register struct proc *p = curproc; 44 char *a; 45 int s, allocbuf = 0, error = 0; 46 47 if (bp == NULL) { 48 allocbuf = 1; 49 bp = getswbuf(PRIBIO+1); 50 } 51 for (; uio->uio_iovcnt; uio->uio_iov++, uio->uio_iovcnt--) { 52 iov = uio->uio_iov; 53 if (iov->iov_len == 0) 54 continue; 55 if (!useracc(iov->iov_base, (u_int)iov->iov_len, 56 rw == B_READ ? B_WRITE : B_READ)) { 57 error = EFAULT; 58 break; 59 } 60 if (!allocbuf) { /* only if sharing caller's buffer */ 61 s = splbio(); 62 while (bp->b_flags&B_BUSY) { 63 bp->b_flags |= B_WANTED; 64 sleep((caddr_t)bp, PRIBIO+1); 65 } 66 splx(s); 67 } 68 bp->b_error = 0; 69 bp->b_proc = p; 70 #ifdef HPUXCOMPAT 71 if (ISHPMMADDR(iov->iov_base)) 72 bp->b_data = (caddr_t)HPMMBASEADDR(iov->iov_base); 73 else 74 #endif 75 bp->b_data = iov->iov_base; 76 while (iov->iov_len > 0) { 77 bp->b_flags = B_BUSY | B_PHYS | B_RAW | rw; 78 bp->b_dev = dev; 79 bp->b_blkno = btodb(uio->uio_offset); 80 bp->b_bcount = iov->iov_len; 81 (*mincnt)(bp); 82 requested = bp->b_bcount; 83 p->p_flag |= P_PHYSIO; 84 vslock(a = bp->b_data, requested); 85 vmapbuf(bp); 86 (*strat)(bp); 87 s = splbio(); 88 while ((bp->b_flags & B_DONE) == 0) 89 sleep((caddr_t)bp, PRIBIO); 90 vunmapbuf(bp); 91 vsunlock(a, requested, rw); 92 p->p_flag &= ~P_PHYSIO; 93 if (bp->b_flags&B_WANTED) /* rare */ 94 wakeup((caddr_t)bp); 95 splx(s); 96 done = bp->b_bcount - bp->b_resid; 97 (char *)bp->b_data += done; 98 iov->iov_len -= done; 99 uio->uio_resid -= done; 100 uio->uio_offset += done; 101 /* temp kludge for disk drives */ 102 if (done < requested || bp->b_flags & B_ERROR) 103 break; 104 } 105 bp->b_flags &= ~(B_BUSY | B_WANTED | B_PHYS | B_RAW); 106 error = biowait(bp); 107 /* temp kludge for disk drives */ 108 if (done < requested || bp->b_flags & B_ERROR) 109 break; 110 } 111 if (allocbuf) 112 freeswbuf(bp); 113 return (error); 114 } 115 116 /* 117 * Calculate the maximum size of I/O request that can be requested 118 * in a single operation. This limit is necessary to prevent a single 119 * process from being able to lock more than a fixed amount of memory 120 * in the kernel. 121 */ 122 u_int 123 minphys(bp) 124 struct buf *bp; 125 { 126 if (bp->b_bcount > MAXPHYS) 127 bp->b_bcount = MAXPHYS; 128 } 129 130 static struct buf * 131 getswbuf(prio) 132 int prio; 133 { 134 int s; 135 struct buf *bp; 136 137 s = splbio(); 138 while (bswlist.b_actf == NULL) { 139 bswlist.b_flags |= B_WANTED; 140 sleep((caddr_t)&bswlist, prio); 141 } 142 bp = bswlist.b_actf; 143 bswlist.b_actf = bp->b_actf; 144 splx(s); 145 return (bp); 146 } 147 148 static void 149 freeswbuf(bp) 150 struct buf *bp; 151 { 152 int s; 153 154 s = splbio(); 155 bp->b_actf = bswlist.b_actf; 156 bswlist.b_actf = bp; 157 if (bp->b_vp) 158 brelvp(bp); 159 if (bswlist.b_flags & B_WANTED) { 160 bswlist.b_flags &= ~B_WANTED; 161 wakeup((caddr_t)&bswlist); 162 wakeup((caddr_t)pageproc); 163 } 164 splx(s); 165 } 166 167 /* 168 * Do a read on a device for a user process. 169 */ 170 rawread(dev, uio) 171 dev_t dev; 172 struct uio *uio; 173 { 174 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, 175 dev, B_READ, minphys, uio)); 176 } 177 178 /* 179 * Do a write on a device for a user process. 180 */ 181 rawwrite(dev, uio) 182 dev_t dev; 183 struct uio *uio; 184 { 185 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, 186 dev, B_WRITE, minphys, uio)); 187 } 188