1 /*- 2 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.proprietary.c% 6 * 7 * @(#)kern_physio.c 7.27 (Berkeley) 10/02/92 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/buf.h> 13 #include <sys/conf.h> 14 #include <sys/proc.h> 15 #include <sys/vnode.h> 16 17 #ifdef HPUXCOMPAT 18 #include <sys/user.h> 19 #endif 20 21 static void freeswbuf __P((struct buf *)); 22 static struct buf *getswbuf __P((int)); 23 24 /* 25 * This routine does device I/O for a user process. 26 * 27 * If the user has the proper access privileges, the process is 28 * marked 'delayed unlock' and the pages involved in the I/O are 29 * faulted and locked. After the completion of the I/O, the pages 30 * are unlocked. 31 */ 32 physio(strat, bp, dev, rw, mincnt, uio) 33 int (*strat)(); 34 register struct buf *bp; 35 dev_t dev; 36 int rw; 37 u_int (*mincnt)(); 38 struct uio *uio; 39 { 40 register struct iovec *iov; 41 register int requested = 0, done = 0; 42 register struct proc *p = curproc; 43 char *a; 44 int s, allocbuf = 0, error = 0; 45 46 if (bp == NULL) { 47 allocbuf = 1; 48 bp = getswbuf(PRIBIO+1); 49 } 50 for (; uio->uio_iovcnt; uio->uio_iov++, uio->uio_iovcnt--) { 51 iov = uio->uio_iov; 52 if (iov->iov_len == 0) 53 continue; 54 if (!useracc(iov->iov_base, (u_int)iov->iov_len, 55 rw == B_READ ? B_WRITE : B_READ)) { 56 error = EFAULT; 57 break; 58 } 59 if (!allocbuf) { /* only if sharing caller's buffer */ 60 s = splbio(); 61 while (bp->b_flags&B_BUSY) { 62 bp->b_flags |= B_WANTED; 63 sleep((caddr_t)bp, PRIBIO+1); 64 } 65 splx(s); 66 } 67 bp->b_error = 0; 68 bp->b_proc = p; 69 #ifdef HPUXCOMPAT 70 if (ISHPMMADDR(iov->iov_base)) 71 bp->b_un.b_addr = (caddr_t)HPMMBASEADDR(iov->iov_base); 72 else 73 #endif 74 bp->b_un.b_addr = iov->iov_base; 75 while (iov->iov_len > 0) { 76 bp->b_flags = B_BUSY | B_PHYS | B_RAW | rw; 77 bp->b_dev = dev; 78 bp->b_blkno = btodb(uio->uio_offset); 79 bp->b_bcount = iov->iov_len; 80 (*mincnt)(bp); 81 requested = bp->b_bcount; 82 p->p_flag |= SPHYSIO; 83 vslock(a = bp->b_un.b_addr, requested); 84 vmapbuf(bp); 85 (*strat)(bp); 86 s = splbio(); 87 while ((bp->b_flags & B_DONE) == 0) 88 sleep((caddr_t)bp, PRIBIO); 89 vunmapbuf(bp); 90 vsunlock(a, requested, rw); 91 p->p_flag &= ~SPHYSIO; 92 if (bp->b_flags&B_WANTED) /* rare */ 93 wakeup((caddr_t)bp); 94 splx(s); 95 done = bp->b_bcount - bp->b_resid; 96 bp->b_un.b_addr += done; 97 iov->iov_len -= done; 98 uio->uio_resid -= done; 99 uio->uio_offset += done; 100 /* temp kludge for disk drives */ 101 if (done < requested || bp->b_flags & B_ERROR) 102 break; 103 } 104 bp->b_flags &= ~(B_BUSY | B_WANTED | B_PHYS | B_RAW); 105 error = biowait(bp); 106 /* temp kludge for disk drives */ 107 if (done < requested || bp->b_flags & B_ERROR) 108 break; 109 } 110 #if defined(hp300) 111 DCIU(); 112 #endif 113 if (allocbuf) 114 freeswbuf(bp); 115 return (error); 116 } 117 118 /* 119 * Calculate the maximum size of I/O request that can be requested 120 * in a single operation. This limit is necessary to prevent a single 121 * process from being able to lock more than a fixed amount of memory 122 * in the kernel. 123 */ 124 u_int 125 minphys(bp) 126 struct buf *bp; 127 { 128 if (bp->b_bcount > MAXPHYS) 129 bp->b_bcount = MAXPHYS; 130 } 131 132 static struct buf * 133 getswbuf(prio) 134 int prio; 135 { 136 int s; 137 struct buf *bp; 138 139 s = splbio(); 140 while (bswlist.b_actf == NULL) { 141 bswlist.b_flags |= B_WANTED; 142 sleep((caddr_t)&bswlist, prio); 143 } 144 bp = bswlist.b_actf; 145 bswlist.b_actf = bp->b_actf; 146 splx(s); 147 return (bp); 148 } 149 150 static void 151 freeswbuf(bp) 152 struct buf *bp; 153 { 154 int s; 155 156 s = splbio(); 157 bp->b_actf = bswlist.b_actf; 158 bswlist.b_actf = bp; 159 if (bp->b_vp) 160 brelvp(bp); 161 if (bswlist.b_flags & B_WANTED) { 162 bswlist.b_flags &= ~B_WANTED; 163 wakeup((caddr_t)&bswlist); 164 wakeup((caddr_t)pageproc); 165 } 166 splx(s); 167 } 168 169 /* 170 * Do a read on a device for a user process. 171 */ 172 rawread(dev, uio) 173 dev_t dev; 174 struct uio *uio; 175 { 176 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, 177 dev, B_READ, minphys, uio)); 178 } 179 180 /* 181 * Do a write on a device for a user process. 182 */ 183 rawwrite(dev, uio) 184 dev_t dev; 185 struct uio *uio; 186 { 187 return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, 188 dev, B_WRITE, minphys, uio)); 189 } 190