1 /*- 2 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.proprietary.c% 6 * 7 * @(#)kern_physio.c 7.21 (Berkeley) 09/06/91 8 */ 9 10 #include "param.h" 11 #include "systm.h" 12 #include "buf.h" 13 #include "conf.h" 14 #include "proc.h" 15 #include "seg.h" 16 #include "trace.h" 17 #include "map.h" 18 #include "vnode.h" 19 #include "specdev.h" 20 21 #ifdef HPUXCOMPAT 22 #include "user.h" 23 #endif 24 25 static struct buf *getswbuf(); 26 static freeswbuf(); 27 28 /* 29 * This routine does device I/O for a user process. 30 * 31 * If the user has the proper access privilidges, the process is 32 * marked 'delayed unlock' and the pages involved in the I/O are 33 * faulted and locked. After the completion of the I/O, the pages 34 * are unlocked. 35 */ 36 physio(strat, bp, dev, rw, mincnt, uio) 37 int (*strat)(); 38 register struct buf *bp; 39 dev_t dev; 40 int rw; 41 u_int (*mincnt)(); 42 struct uio *uio; 43 { 44 register struct iovec *iov; 45 register int requested, done; 46 register struct proc *p = curproc; 47 char *a; 48 int s, allocbuf = 0, error = 0; 49 50 if (bp == NULL) { 51 allocbuf = 1; 52 bp = getswbuf(PRIBIO+1); 53 } 54 for (; uio->uio_iovcnt; uio->uio_iov++, uio->uio_iovcnt--) { 55 iov = uio->uio_iov; 56 if (!useracc(iov->iov_base, (u_int)iov->iov_len, 57 rw == B_READ ? B_WRITE : B_READ)) { 58 error = EFAULT; 59 break; 60 } 61 if (!allocbuf) { /* only if sharing caller's buffer */ 62 s = splbio(); 63 while (bp->b_flags&B_BUSY) { 64 bp->b_flags |= B_WANTED; 65 sleep((caddr_t)bp, PRIBIO+1); 66 } 67 splx(s); 68 } 69 bp->b_error = 0; 70 bp->b_proc = p; 71 #ifdef HPUXCOMPAT 72 if (ISHPMMADDR(iov->iov_base)) 73 bp->b_un.b_addr = (caddr_t)HPMMBASEADDR(iov->iov_base); 74 else 75 #endif 76 bp->b_un.b_addr = iov->iov_base; 77 while (iov->iov_len > 0) { 78 bp->b_flags = B_BUSY | B_PHYS | B_RAW | rw; 79 bp->b_dev = dev; 80 bp->b_blkno = btodb(uio->uio_offset); 81 bp->b_bcount = iov->iov_len; 82 (*mincnt)(bp); 83 requested = bp->b_bcount; 84 p->p_flag |= SPHYSIO; 85 vslock(a = bp->b_un.b_addr, requested); 86 vmapbuf(bp); 87 (*strat)(bp); 88 s = splbio(); 89 while ((bp->b_flags & B_DONE) == 0) 90 sleep((caddr_t)bp, PRIBIO); 91 vunmapbuf(bp); 92 vsunlock(a, requested, rw); 93 p->p_flag &= ~SPHYSIO; 94 if (bp->b_flags&B_WANTED) /* rare */ 95 wakeup((caddr_t)bp); 96 splx(s); 97 done = bp->b_bcount - bp->b_resid; 98 bp->b_un.b_addr += done; 99 iov->iov_len -= done; 100 uio->uio_resid -= done; 101 uio->uio_offset += done; 102 /* temp kludge for disk drives */ 103 if (done < requested || bp->b_flags & B_ERROR) 104 break; 105 } 106 bp->b_flags &= ~(B_BUSY | B_WANTED | B_PHYS | B_RAW); 107 error = biowait(bp); 108 /* temp kludge for disk drives */ 109 if (done < requested || bp->b_flags & B_ERROR) 110 break; 111 } 112 #if defined(hp300) 113 DCIU(); 114 #endif 115 if (allocbuf) 116 freeswbuf(bp); 117 return (error); 118 } 119 120 /* 121 * Calculate the maximum size of I/O request that can be requested 122 * in a single operation. This limit is necessary to prevent a single 123 * process from being able to lock more than a fixed amount of memory 124 * in the kernel. 125 */ 126 u_int 127 minphys(bp) 128 struct buf *bp; 129 { 130 if (bp->b_bcount > MAXPHYS) 131 bp->b_bcount = MAXPHYS; 132 } 133 134 static 135 struct buf * 136 getswbuf(prio) 137 int prio; 138 { 139 int s; 140 struct buf *bp; 141 142 s = splbio(); 143 while (bswlist.av_forw == NULL) { 144 bswlist.b_flags |= B_WANTED; 145 sleep((caddr_t)&bswlist, prio); 146 } 147 bp = bswlist.av_forw; 148 bswlist.av_forw = bp->av_forw; 149 splx(s); 150 return (bp); 151 } 152 153 static 154 freeswbuf(bp) 155 struct buf *bp; 156 { 157 int s; 158 159 s = splbio(); 160 bp->av_forw = bswlist.av_forw; 161 bswlist.av_forw = bp; 162 if (bp->b_vp) 163 brelvp(bp); 164 if (bswlist.b_flags & B_WANTED) { 165 bswlist.b_flags &= ~B_WANTED; 166 wakeup((caddr_t)&bswlist); 167 wakeup((caddr_t)pageproc); 168 } 169 splx(s); 170 } 171 172 /* 173 * Do a read on a device for a user process. 174 */ 175 rawread(dev, uio) 176 dev_t dev; 177 struct uio *uio; 178 { 179 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, 180 dev, B_READ, minphys, uio)); 181 } 182 183 /* 184 * Do a write on a device for a user process. 185 */ 186 rawwrite(dev, uio) 187 dev_t dev; 188 struct uio *uio; 189 { 190 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, 191 dev, B_WRITE, minphys, uio)); 192 } 193