1 /*- 2 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.proprietary.c% 6 * 7 * @(#)kern_physio.c 7.24 (Berkeley) 02/05/92 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/buf.h> 13 #include <sys/conf.h> 14 #include <sys/proc.h> 15 #include <sys/trace.h> 16 #include <sys/map.h> 17 #include <sys/vnode.h> 18 #include <sys/specdev.h> 19 20 #ifdef HPUXCOMPAT 21 #include <sys/user.h> 22 #endif 23 24 static void freeswbuf __P((struct buf *)); 25 static struct buf *getswbuf __P((int)); 26 27 /* 28 * This routine does device I/O for a user process. 29 * 30 * If the user has the proper access privileges, the process is 31 * marked 'delayed unlock' and the pages involved in the I/O are 32 * faulted and locked. After the completion of the I/O, the pages 33 * are unlocked. 34 */ 35 physio(strat, bp, dev, rw, mincnt, uio) 36 int (*strat)(); 37 register struct buf *bp; 38 dev_t dev; 39 int rw; 40 u_int (*mincnt)(); 41 struct uio *uio; 42 { 43 register struct iovec *iov; 44 register int requested = 0, done = 0; 45 register struct proc *p = curproc; 46 char *a; 47 int s, allocbuf = 0, error = 0; 48 49 if (bp == NULL) { 50 allocbuf = 1; 51 bp = getswbuf(PRIBIO+1); 52 } 53 for (; uio->uio_iovcnt; uio->uio_iov++, uio->uio_iovcnt--) { 54 iov = uio->uio_iov; 55 if (iov->iov_len == 0) 56 continue; 57 if (!useracc(iov->iov_base, (u_int)iov->iov_len, 58 rw == B_READ ? B_WRITE : B_READ)) { 59 error = EFAULT; 60 break; 61 } 62 if (!allocbuf) { /* only if sharing caller's buffer */ 63 s = splbio(); 64 while (bp->b_flags&B_BUSY) { 65 bp->b_flags |= B_WANTED; 66 sleep((caddr_t)bp, PRIBIO+1); 67 } 68 splx(s); 69 } 70 bp->b_error = 0; 71 bp->b_proc = p; 72 #ifdef HPUXCOMPAT 73 if (ISHPMMADDR(iov->iov_base)) 74 bp->b_un.b_addr = (caddr_t)HPMMBASEADDR(iov->iov_base); 75 else 76 #endif 77 bp->b_un.b_addr = iov->iov_base; 78 while (iov->iov_len > 0) { 79 bp->b_flags = B_BUSY | B_PHYS | B_RAW | rw; 80 bp->b_dev = dev; 81 bp->b_blkno = btodb(uio->uio_offset); 82 bp->b_bcount = iov->iov_len; 83 (*mincnt)(bp); 84 requested = bp->b_bcount; 85 p->p_flag |= SPHYSIO; 86 vslock(a = bp->b_un.b_addr, requested); 87 vmapbuf(bp); 88 (*strat)(bp); 89 s = splbio(); 90 while ((bp->b_flags & B_DONE) == 0) 91 sleep((caddr_t)bp, PRIBIO); 92 vunmapbuf(bp); 93 vsunlock(a, requested, rw); 94 p->p_flag &= ~SPHYSIO; 95 if (bp->b_flags&B_WANTED) /* rare */ 96 wakeup((caddr_t)bp); 97 splx(s); 98 done = bp->b_bcount - bp->b_resid; 99 bp->b_un.b_addr += done; 100 iov->iov_len -= done; 101 uio->uio_resid -= done; 102 uio->uio_offset += done; 103 /* temp kludge for disk drives */ 104 if (done < requested || bp->b_flags & B_ERROR) 105 break; 106 } 107 bp->b_flags &= ~(B_BUSY | B_WANTED | B_PHYS | B_RAW); 108 error = biowait(bp); 109 /* temp kludge for disk drives */ 110 if (done < requested || bp->b_flags & B_ERROR) 111 break; 112 } 113 #if defined(hp300) 114 DCIU(); 115 #endif 116 if (allocbuf) 117 freeswbuf(bp); 118 return (error); 119 } 120 121 /* 122 * Calculate the maximum size of I/O request that can be requested 123 * in a single operation. This limit is necessary to prevent a single 124 * process from being able to lock more than a fixed amount of memory 125 * in the kernel. 126 */ 127 u_int 128 minphys(bp) 129 struct buf *bp; 130 { 131 if (bp->b_bcount > MAXPHYS) 132 bp->b_bcount = MAXPHYS; 133 } 134 135 static struct buf * 136 getswbuf(prio) 137 int prio; 138 { 139 int s; 140 struct buf *bp; 141 142 s = splbio(); 143 while (bswlist.av_forw == NULL) { 144 bswlist.b_flags |= B_WANTED; 145 sleep((caddr_t)&bswlist, prio); 146 } 147 bp = bswlist.av_forw; 148 bswlist.av_forw = bp->av_forw; 149 splx(s); 150 return (bp); 151 } 152 153 static void 154 freeswbuf(bp) 155 struct buf *bp; 156 { 157 int s; 158 159 s = splbio(); 160 bp->av_forw = bswlist.av_forw; 161 bswlist.av_forw = bp; 162 if (bp->b_vp) 163 brelvp(bp); 164 if (bswlist.b_flags & B_WANTED) { 165 bswlist.b_flags &= ~B_WANTED; 166 wakeup((caddr_t)&bswlist); 167 wakeup((caddr_t)pageproc); 168 } 169 splx(s); 170 } 171 172 /* 173 * Do a read on a device for a user process. 174 */ 175 rawread(dev, uio) 176 dev_t dev; 177 struct uio *uio; 178 { 179 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, 180 dev, B_READ, minphys, uio)); 181 } 182 183 /* 184 * Do a write on a device for a user process. 185 */ 186 rawwrite(dev, uio) 187 dev_t dev; 188 struct uio *uio; 189 { 190 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, 191 dev, B_WRITE, minphys, uio)); 192 } 193