1 /* 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)vm_swap.c 7.22 (Berkeley) 05/14/92 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/buf.h> 13 #include <sys/conf.h> 14 #include <sys/proc.h> 15 #include <sys/namei.h> 16 #include <sys/dmap.h> /* XXX */ 17 #include <sys/vnode.h> 18 #include <sys/specdev.h> 19 #include <sys/map.h> 20 #include <sys/file.h> 21 22 /* 23 * Indirect driver for multi-controller paging. 24 */ 25 26 int nswap, nswdev; 27 28 /* 29 * Set up swap devices. 30 * Initialize linked list of free swap 31 * headers. These do not actually point 32 * to buffers, but rather to pages that 33 * are being swapped in and out. 34 */ 35 void 36 swapinit() 37 { 38 register int i; 39 register struct buf *sp = swbuf; 40 register struct proc *p = &proc0; /* XXX */ 41 struct swdevt *swp; 42 int error; 43 44 /* 45 * Count swap devices, and adjust total swap space available. 46 * Some of this space will not be available until a swapon() 47 * system is issued, usually when the system goes multi-user. 48 */ 49 nswdev = 0; 50 nswap = 0; 51 for (swp = swdevt; swp->sw_dev; swp++) { 52 nswdev++; 53 if (swp->sw_nblks > nswap) 54 nswap = swp->sw_nblks; 55 } 56 if (nswdev == 0) 57 panic("swapinit"); 58 if (nswdev > 1) 59 nswap = ((nswap + dmmax - 1) / dmmax) * dmmax; 60 nswap *= nswdev; 61 if (bdevvp(swdevt[0].sw_dev, &swdevt[0].sw_vp)) 62 panic("swapvp"); 63 if (error = swfree(p, 0)) { 64 printf("swfree errno %d\n", error); /* XXX */ 65 panic("swapinit swfree 0"); 66 } 67 68 /* 69 * Now set up swap buffer headers. 70 */ 71 bswlist.av_forw = sp; 72 for (i = 0; i < nswbuf - 1; i++, sp++) { 73 sp->av_forw = sp + 1; 74 sp->b_rcred = sp->b_wcred = p->p_ucred; 75 } 76 sp->b_rcred = sp->b_wcred = p->p_ucred; 77 sp->av_forw = NULL; 78 } 79 80 void 81 swstrategy(bp) 82 register struct buf *bp; 83 { 84 USES_VOP_STRATEGY; 85 int sz, off, seg, index; 86 register struct swdevt *sp; 87 struct vnode *vp; 88 89 #ifdef GENERIC 90 /* 91 * A mini-root gets copied into the front of the swap 92 * and we run over top of the swap area just long 93 * enough for us to do a mkfs and restor of the real 94 * root (sure beats rewriting standalone restor). 95 */ 96 #define MINIROOTSIZE 4096 97 if (rootdev == dumpdev) 98 bp->b_blkno += MINIROOTSIZE; 99 #endif 100 sz = howmany(bp->b_bcount, DEV_BSIZE); 101 if (bp->b_blkno + sz > nswap) { 102 bp->b_flags |= B_ERROR; 103 biodone(bp); 104 return; 105 } 106 if (nswdev > 1) { 107 off = bp->b_blkno % dmmax; 108 if (off+sz > dmmax) { 109 bp->b_flags |= B_ERROR; 110 biodone(bp); 111 return; 112 } 113 seg = bp->b_blkno / dmmax; 114 index = seg % nswdev; 115 seg /= nswdev; 116 bp->b_blkno = seg*dmmax + off; 117 } else 118 index = 0; 119 sp = &swdevt[index]; 120 if ((bp->b_dev = sp->sw_dev) == 0) 121 panic("swstrategy"); 122 if (sp->sw_vp == NULL) { 123 bp->b_error |= B_ERROR; 124 biodone(bp); 125 return; 126 } 127 VHOLD(sp->sw_vp); 128 if ((bp->b_flags & B_READ) == 0) { 129 if (vp = bp->b_vp) { 130 vp->v_numoutput--; 131 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { 132 vp->v_flag &= ~VBWAIT; 133 wakeup((caddr_t)&vp->v_numoutput); 134 } 135 } 136 sp->sw_vp->v_numoutput++; 137 } 138 if (bp->b_vp != NULL) 139 brelvp(bp); 140 bp->b_vp = sp->sw_vp; 141 VOP_STRATEGY(bp); 142 } 143 144 /* 145 * System call swapon(name) enables swapping on device name, 146 * which must be in the swdevsw. Return EBUSY 147 * if already swapping on this device. 148 */ 149 /* ARGSUSED */ 150 int 151 swapon(p, uap, retval) 152 struct proc *p; 153 struct args { 154 char *name; 155 } *uap; 156 int *retval; 157 { 158 register struct vnode *vp; 159 register struct swdevt *sp; 160 dev_t dev; 161 int error; 162 struct nameidata nd; 163 164 if (error = suser(p->p_ucred, &p->p_acflag)) 165 return (error); 166 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, p); 167 if (error = namei(&nd)) 168 return (error); 169 vp = nd.ni_vp; 170 if (vp->v_type != VBLK) { 171 vrele(vp); 172 return (ENOTBLK); 173 } 174 dev = (dev_t)vp->v_rdev; 175 if (major(dev) >= nblkdev) { 176 vrele(vp); 177 return (ENXIO); 178 } 179 for (sp = &swdevt[0]; sp->sw_dev; sp++) 180 if (sp->sw_dev == dev) { 181 if (sp->sw_freed) { 182 vrele(vp); 183 return (EBUSY); 184 } 185 sp->sw_vp = vp; 186 if (error = swfree(p, sp - swdevt)) { 187 vrele(vp); 188 return (error); 189 } 190 return (0); 191 } 192 vrele(vp); 193 return (EINVAL); 194 } 195 196 /* 197 * Swfree(index) frees the index'th portion of the swap map. 198 * Each of the nswdev devices provides 1/nswdev'th of the swap 199 * space, which is laid out with blocks of dmmax pages circularly 200 * among the devices. 201 */ 202 int 203 swfree(p, index) 204 struct proc *p; 205 int index; 206 { 207 USES_VOP_OPEN; 208 register struct swdevt *sp; 209 register swblk_t vsbase; 210 register long blk; 211 struct vnode *vp; 212 register swblk_t dvbase; 213 register int nblks; 214 int error; 215 216 sp = &swdevt[index]; 217 vp = sp->sw_vp; 218 if (error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)) 219 return (error); 220 sp->sw_freed = 1; 221 nblks = sp->sw_nblks; 222 for (dvbase = 0; dvbase < nblks; dvbase += dmmax) { 223 blk = nblks - dvbase; 224 if ((vsbase = index*dmmax + dvbase*nswdev) >= nswap) 225 panic("swfree"); 226 if (blk > dmmax) 227 blk = dmmax; 228 if (vsbase == 0) { 229 /* 230 * First of all chunks... initialize the swapmap 231 * the second half of the hunk. 232 */ 233 rminit(swapmap, (long)(blk/2), (long)(blk/2), 234 "swap", nswapmap); 235 } else if (dvbase == 0) { 236 /* 237 * Don't use the first cluster of the device 238 * in case it starts with a label or boot block. 239 */ 240 rmfree(swapmap, blk - ctod(CLSIZE), 241 vsbase + ctod(CLSIZE)); 242 } else 243 rmfree(swapmap, blk, vsbase); 244 } 245 return (0); 246 } 247