1 /*- 2 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.proprietary.c% 6 * 7 * @(#)kern_physio.c 7.25 (Berkeley) 07/12/92 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/buf.h> 13 #include <sys/conf.h> 14 #include <sys/proc.h> 15 #include <sys/trace.h> 16 #include <sys/map.h> 17 #include <sys/vnode.h> 18 19 #ifdef HPUXCOMPAT 20 #include <sys/user.h> 21 #endif 22 23 static void freeswbuf __P((struct buf *)); 24 static struct buf *getswbuf __P((int)); 25 26 /* 27 * This routine does device I/O for a user process. 28 * 29 * If the user has the proper access privileges, the process is 30 * marked 'delayed unlock' and the pages involved in the I/O are 31 * faulted and locked. After the completion of the I/O, the pages 32 * are unlocked. 33 */ 34 physio(strat, bp, dev, rw, mincnt, uio) 35 int (*strat)(); 36 register struct buf *bp; 37 dev_t dev; 38 int rw; 39 u_int (*mincnt)(); 40 struct uio *uio; 41 { 42 register struct iovec *iov; 43 register int requested = 0, done = 0; 44 register struct proc *p = curproc; 45 char *a; 46 int s, allocbuf = 0, error = 0; 47 48 if (bp == NULL) { 49 allocbuf = 1; 50 bp = getswbuf(PRIBIO+1); 51 } 52 for (; uio->uio_iovcnt; uio->uio_iov++, uio->uio_iovcnt--) { 53 iov = uio->uio_iov; 54 if (iov->iov_len == 0) 55 continue; 56 if (!useracc(iov->iov_base, (u_int)iov->iov_len, 57 rw == B_READ ? B_WRITE : B_READ)) { 58 error = EFAULT; 59 break; 60 } 61 if (!allocbuf) { /* only if sharing caller's buffer */ 62 s = splbio(); 63 while (bp->b_flags&B_BUSY) { 64 bp->b_flags |= B_WANTED; 65 sleep((caddr_t)bp, PRIBIO+1); 66 } 67 splx(s); 68 } 69 bp->b_error = 0; 70 bp->b_proc = p; 71 #ifdef HPUXCOMPAT 72 if (ISHPMMADDR(iov->iov_base)) 73 bp->b_un.b_addr = (caddr_t)HPMMBASEADDR(iov->iov_base); 74 else 75 #endif 76 bp->b_un.b_addr = iov->iov_base; 77 while (iov->iov_len > 0) { 78 bp->b_flags = B_BUSY | B_PHYS | B_RAW | rw; 79 bp->b_dev = dev; 80 bp->b_blkno = btodb(uio->uio_offset); 81 bp->b_bcount = iov->iov_len; 82 (*mincnt)(bp); 83 requested = bp->b_bcount; 84 p->p_flag |= SPHYSIO; 85 vslock(a = bp->b_un.b_addr, requested); 86 vmapbuf(bp); 87 (*strat)(bp); 88 s = splbio(); 89 while ((bp->b_flags & B_DONE) == 0) 90 sleep((caddr_t)bp, PRIBIO); 91 vunmapbuf(bp); 92 vsunlock(a, requested, rw); 93 p->p_flag &= ~SPHYSIO; 94 if (bp->b_flags&B_WANTED) /* rare */ 95 wakeup((caddr_t)bp); 96 splx(s); 97 done = bp->b_bcount - bp->b_resid; 98 bp->b_un.b_addr += done; 99 iov->iov_len -= done; 100 uio->uio_resid -= done; 101 uio->uio_offset += done; 102 /* temp kludge for disk drives */ 103 if (done < requested || bp->b_flags & B_ERROR) 104 break; 105 } 106 bp->b_flags &= ~(B_BUSY | B_WANTED | B_PHYS | B_RAW); 107 error = biowait(bp); 108 /* temp kludge for disk drives */ 109 if (done < requested || bp->b_flags & B_ERROR) 110 break; 111 } 112 #if defined(hp300) 113 DCIU(); 114 #endif 115 if (allocbuf) 116 freeswbuf(bp); 117 return (error); 118 } 119 120 /* 121 * Calculate the maximum size of I/O request that can be requested 122 * in a single operation. This limit is necessary to prevent a single 123 * process from being able to lock more than a fixed amount of memory 124 * in the kernel. 125 */ 126 u_int 127 minphys(bp) 128 struct buf *bp; 129 { 130 if (bp->b_bcount > MAXPHYS) 131 bp->b_bcount = MAXPHYS; 132 } 133 134 static struct buf * 135 getswbuf(prio) 136 int prio; 137 { 138 int s; 139 struct buf *bp; 140 141 s = splbio(); 142 while (bswlist.av_forw == NULL) { 143 bswlist.b_flags |= B_WANTED; 144 sleep((caddr_t)&bswlist, prio); 145 } 146 bp = bswlist.av_forw; 147 bswlist.av_forw = bp->av_forw; 148 splx(s); 149 return (bp); 150 } 151 152 static void 153 freeswbuf(bp) 154 struct buf *bp; 155 { 156 int s; 157 158 s = splbio(); 159 bp->av_forw = bswlist.av_forw; 160 bswlist.av_forw = bp; 161 if (bp->b_vp) 162 brelvp(bp); 163 if (bswlist.b_flags & B_WANTED) { 164 bswlist.b_flags &= ~B_WANTED; 165 wakeup((caddr_t)&bswlist); 166 wakeup((caddr_t)pageproc); 167 } 168 splx(s); 169 } 170 171 /* 172 * Do a read on a device for a user process. 173 */ 174 rawread(dev, uio) 175 dev_t dev; 176 struct uio *uio; 177 { 178 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, 179 dev, B_READ, minphys, uio)); 180 } 181 182 /* 183 * Do a write on a device for a user process. 184 */ 185 rawwrite(dev, uio) 186 dev_t dev; 187 struct uio *uio; 188 { 189 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, 190 dev, B_WRITE, minphys, uio)); 191 } 192