1 /* kern_physio.c 4.37 82/12/17 */ 2 3 #include "../machine/pte.h" 4 5 #include "../h/param.h" 6 #include "../h/systm.h" 7 #include "../h/dir.h" 8 #include "../h/user.h" 9 #include "../h/buf.h" 10 #include "../h/conf.h" 11 #include "../h/proc.h" 12 #include "../h/seg.h" 13 #include "../h/vm.h" 14 #include "../h/trace.h" 15 #include "../h/uio.h" 16 17 /* 18 * Swap IO headers - 19 * They contain the necessary information for the swap I/O. 20 * At any given time, a swap header can be in three 21 * different lists. When free it is in the free list, 22 * when allocated and the I/O queued, it is on the swap 23 * device list, and finally, if the operation was a dirty 24 * page push, when the I/O completes, it is inserted 25 * in a list of cleaned pages to be processed by the pageout daemon. 26 */ 27 struct buf *swbuf; 28 short *swsize; /* CAN WE JUST USE B_BCOUNT? */ 29 int *swpf; 30 31 /* 32 * swap I/O - 33 * 34 * If the flag indicates a dirty page push initiated 35 * by the pageout daemon, we map the page into the i th 36 * virtual page of process 2 (the daemon itself) where i is 37 * the index of the swap header that has been allocated. 38 * We simply initialize the header and queue the I/O but 39 * do not wait for completion. When the I/O completes, 40 * iodone() will link the header to a list of cleaned 41 * pages to be processed by the pageout daemon. 42 */ 43 swap(p, dblkno, addr, nbytes, rdflg, flag, dev, pfcent) 44 struct proc *p; 45 swblk_t dblkno; 46 caddr_t addr; 47 int nbytes, rdflg, flag; 48 dev_t dev; 49 u_int pfcent; 50 { 51 register struct buf *bp; 52 register u_int c; 53 int p2dp; 54 register struct pte *dpte, *vpte; 55 int s; 56 57 s = spl6(); 58 while (bswlist.av_forw == NULL) { 59 bswlist.b_flags |= B_WANTED; 60 sleep((caddr_t)&bswlist, PSWP+1); 61 } 62 bp = bswlist.av_forw; 63 bswlist.av_forw = bp->av_forw; 64 splx(s); 65 66 bp->b_flags = B_BUSY | B_PHYS | rdflg | flag; 67 if ((bp->b_flags & (B_DIRTY|B_PGIN)) == 0) 68 if (rdflg == B_READ) 69 sum.v_pswpin += btoc(nbytes); 70 else 71 sum.v_pswpout += btoc(nbytes); 72 bp->b_proc = p; 73 if (flag & B_DIRTY) { 74 p2dp = ((bp - swbuf) * CLSIZE) * KLMAX; 75 dpte = dptopte(&proc[2], p2dp); 76 vpte = vtopte(p, btop(addr)); 77 for (c = 0; c < nbytes; c += NBPG) { 78 if (vpte->pg_pfnum == 0 || vpte->pg_fod) 79 panic("swap bad pte"); 80 *dpte++ = *vpte++; 81 } 82 bp->b_un.b_addr = (caddr_t)ctob(p2dp); 83 } else 84 bp->b_un.b_addr = addr; 85 while (nbytes > 0) { 86 bp->b_bcount = nbytes; 87 minphys(bp); 88 c = bp->b_bcount; 89 bp->b_blkno = dblkno; 90 bp->b_dev = dev; 91 if (flag & B_DIRTY) { 92 swpf[bp - swbuf] = pfcent; 93 swsize[bp - swbuf] = nbytes; 94 } 95 #ifdef TRACE 96 trace(TR_SWAPIO, dev, bp->b_blkno); 97 #endif 98 physstrat(bp, bdevsw[major(dev)].d_strategy, PSWP); 99 if (flag & B_DIRTY) { 100 if (c < nbytes) 101 panic("big push"); 102 return; 103 } 104 bp->b_un.b_addr += c; 105 bp->b_flags &= ~B_DONE; 106 if (bp->b_flags & B_ERROR) { 107 if ((flag & (B_UAREA|B_PAGET)) || rdflg == B_WRITE) 108 panic("hard IO err in swap"); 109 swkill(p, (char *)0); 110 } 111 nbytes -= c; 112 dblkno += c / DEV_BSIZE; 113 } 114 s = spl6(); 115 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY); 116 bp->av_forw = bswlist.av_forw; 117 bswlist.av_forw = bp; 118 if (bswlist.b_flags & B_WANTED) { 119 bswlist.b_flags &= ~B_WANTED; 120 wakeup((caddr_t)&bswlist); 121 wakeup((caddr_t)&proc[2]); 122 } 123 splx(s); 124 } 125 126 /* 127 * If rout == 0 then killed on swap error, else 128 * rout is the name of the routine where we ran out of 129 * swap space. 130 */ 131 swkill(p, rout) 132 struct proc *p; 133 char *rout; 134 { 135 char *mesg; 136 137 printf("pid %d: ", p->p_pid); 138 if (rout) 139 printf(mesg = "killed due to no swap space\n"); 140 else 141 printf(mesg = "killed on swap error\n"); 142 uprintf("sorry, pid %d was %s", p->p_pid, mesg); 143 /* 144 * To be sure no looping (e.g. in vmsched trying to 145 * swap out) mark process locked in core (as though 146 * done by user) after killing it so noone will try 147 * to swap it out. 148 */ 149 psignal(p, SIGKILL); 150 p->p_flag |= SULOCK; 151 } 152 153 /* 154 * Raw I/O. The arguments are 155 * The strategy routine for the device 156 * A buffer, which will always be a special buffer 157 * header owned exclusively by the device for this purpose 158 * The device number 159 * Read/write flag 160 * Essentially all the work is computing physical addresses and 161 * validating them. 162 * If the user has the proper access privilidges, the process is 163 * marked 'delayed unlock' and the pages involved in the I/O are 164 * faulted and locked. After the completion of the I/O, the above pages 165 * are unlocked. 166 */ 167 physio(strat, bp, dev, rw, mincnt, uio) 168 int (*strat)(); 169 register struct buf *bp; 170 dev_t dev; 171 int rw; 172 unsigned (*mincnt)(); 173 struct uio *uio; 174 { 175 register struct iovec *iov = uio->uio_iov; 176 register int c; 177 char *a; 178 int s, error = 0; 179 180 nextiov: 181 if (uio->uio_iovcnt == 0) 182 return (0); 183 if (useracc(iov->iov_base,(u_int)iov->iov_len,rw==B_READ?B_WRITE:B_READ) == NULL) 184 return (EFAULT); 185 s = spl6(); 186 while (bp->b_flags&B_BUSY) { 187 bp->b_flags |= B_WANTED; 188 sleep((caddr_t)bp, PRIBIO+1); 189 } 190 splx(s); 191 bp->b_error = 0; 192 bp->b_proc = u.u_procp; 193 bp->b_un.b_addr = iov->iov_base; 194 while (iov->iov_len > 0) { 195 bp->b_flags = B_BUSY | B_PHYS | rw; 196 bp->b_dev = dev; 197 bp->b_blkno = uio->uio_offset / DEV_BSIZE; 198 bp->b_bcount = iov->iov_len; 199 (*mincnt)(bp); 200 c = bp->b_bcount; 201 u.u_procp->p_flag |= SPHYSIO; 202 vslock(a = bp->b_un.b_addr, c); 203 physstrat(bp, strat, PRIBIO); 204 (void) spl6(); 205 vsunlock(a, c, rw); 206 u.u_procp->p_flag &= ~SPHYSIO; 207 if (bp->b_flags&B_WANTED) 208 wakeup((caddr_t)bp); 209 splx(s); 210 c -= bp->b_resid; 211 bp->b_un.b_addr += c; 212 iov->iov_len -= c; 213 uio->uio_resid -= c; 214 uio->uio_offset += c; 215 /* temp kludge for tape drives */ 216 if (bp->b_resid || bp->b_flags&B_ERROR) 217 break; 218 } 219 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS); 220 error = geterror(bp); 221 /* temp kludge for tape drives */ 222 if (bp->b_resid || error) 223 return (error); 224 uio->uio_iov++; 225 uio->uio_iovcnt--; 226 goto nextiov; 227 } 228 229 unsigned 230 minphys(bp) 231 struct buf *bp; 232 { 233 234 if (bp->b_bcount > 63 * 1024) 235 bp->b_bcount = 63 * 1024; 236 } 237 238