1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * %sccs.include.redist.c%
6 *
7 * @(#)vm_swap.c 8.5 (Berkeley) 02/17/94
8 */
9
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/buf.h>
13 #include <sys/conf.h>
14 #include <sys/proc.h>
15 #include <sys/namei.h>
16 #include <sys/dmap.h> /* XXX */
17 #include <sys/vnode.h>
18 #include <sys/map.h>
19 #include <sys/file.h>
20
21 #include <miscfs/specfs/specdev.h>
22
23 /*
24 * Indirect driver for multi-controller paging.
25 */
26
27 int nswap, nswdev;
28 #ifdef SEQSWAP
29 int niswdev; /* number of interleaved swap devices */
30 int niswap; /* size of interleaved swap area */
31 #endif
32
33 /*
34 * Set up swap devices.
35 * Initialize linked list of free swap
36 * headers. These do not actually point
37 * to buffers, but rather to pages that
38 * are being swapped in and out.
39 */
40 void
swapinit()41 swapinit()
42 {
43 register int i;
44 register struct buf *sp = swbuf;
45 register struct proc *p = &proc0; /* XXX */
46 struct swdevt *swp;
47 int error;
48
49 /*
50 * Count swap devices, and adjust total swap space available.
51 * Some of the space will not be countable until later (dynamically
52 * configurable devices) and some of the counted space will not be
53 * available until a swapon() system call is issued, both usually
54 * happen when the system goes multi-user.
55 *
56 * If using NFS for swap, swdevt[0] will already be bdevvp'd. XXX
57 */
58 #ifdef SEQSWAP
59 nswdev = niswdev = 0;
60 nswap = niswap = 0;
61 /*
62 * All interleaved devices must come first
63 */
64 for (swp = swdevt; swp->sw_dev != NODEV || swp->sw_vp != NULL; swp++) {
65 if (swp->sw_flags & SW_SEQUENTIAL)
66 break;
67 niswdev++;
68 if (swp->sw_nblks > niswap)
69 niswap = swp->sw_nblks;
70 }
71 niswap = roundup(niswap, dmmax);
72 niswap *= niswdev;
73 if (swdevt[0].sw_vp == NULL &&
74 bdevvp(swdevt[0].sw_dev, &swdevt[0].sw_vp))
75 panic("swapvp");
76 /*
77 * The remainder must be sequential
78 */
79 for ( ; swp->sw_dev != NODEV; swp++) {
80 if ((swp->sw_flags & SW_SEQUENTIAL) == 0)
81 panic("binit: mis-ordered swap devices");
82 nswdev++;
83 if (swp->sw_nblks > 0) {
84 if (swp->sw_nblks % dmmax)
85 swp->sw_nblks -= (swp->sw_nblks % dmmax);
86 nswap += swp->sw_nblks;
87 }
88 }
89 nswdev += niswdev;
90 if (nswdev == 0)
91 panic("swapinit");
92 nswap += niswap;
93 #else
94 nswdev = 0;
95 nswap = 0;
96 for (swp = swdevt; swp->sw_dev != NODEV || swp->sw_vp != NULL; swp++) {
97 nswdev++;
98 if (swp->sw_nblks > nswap)
99 nswap = swp->sw_nblks;
100 }
101 if (nswdev == 0)
102 panic("swapinit");
103 if (nswdev > 1)
104 nswap = ((nswap + dmmax - 1) / dmmax) * dmmax;
105 nswap *= nswdev;
106 if (swdevt[0].sw_vp == NULL &&
107 bdevvp(swdevt[0].sw_dev, &swdevt[0].sw_vp))
108 panic("swapvp");
109 #endif
110 if (nswap == 0)
111 printf("WARNING: no swap space found\n");
112 else if (error = swfree(p, 0)) {
113 printf("swfree errno %d\n", error); /* XXX */
114 panic("swapinit swfree 0");
115 }
116
117 /*
118 * Now set up swap buffer headers.
119 */
120 bswlist.b_actf = sp;
121 for (i = 0; i < nswbuf - 1; i++, sp++) {
122 sp->b_actf = sp + 1;
123 sp->b_rcred = sp->b_wcred = p->p_ucred;
124 sp->b_vnbufs.le_next = NOLIST;
125 }
126 sp->b_rcred = sp->b_wcred = p->p_ucred;
127 sp->b_vnbufs.le_next = NOLIST;
128 sp->b_actf = NULL;
129 }
130
131 void
swstrategy(bp)132 swstrategy(bp)
133 register struct buf *bp;
134 {
135 int sz, off, seg, index;
136 register struct swdevt *sp;
137 struct vnode *vp;
138
139 #ifdef GENERIC
140 /*
141 * A mini-root gets copied into the front of the swap
142 * and we run over top of the swap area just long
143 * enough for us to do a mkfs and restor of the real
144 * root (sure beats rewriting standalone restor).
145 */
146 #define MINIROOTSIZE 4096
147 if (rootdev == dumpdev)
148 bp->b_blkno += MINIROOTSIZE;
149 #endif
150 sz = howmany(bp->b_bcount, DEV_BSIZE);
151 if (bp->b_blkno + sz > nswap) {
152 bp->b_error = EINVAL;
153 bp->b_flags |= B_ERROR;
154 biodone(bp);
155 return;
156 }
157 if (nswdev > 1) {
158 #ifdef SEQSWAP
159 if (bp->b_blkno < niswap) {
160 if (niswdev > 1) {
161 off = bp->b_blkno % dmmax;
162 if (off+sz > dmmax) {
163 bp->b_error = EINVAL;
164 bp->b_flags |= B_ERROR;
165 biodone(bp);
166 return;
167 }
168 seg = bp->b_blkno / dmmax;
169 index = seg % niswdev;
170 seg /= niswdev;
171 bp->b_blkno = seg*dmmax + off;
172 } else
173 index = 0;
174 } else {
175 register struct swdevt *swp;
176
177 bp->b_blkno -= niswap;
178 for (index = niswdev, swp = &swdevt[niswdev];
179 swp->sw_dev != NODEV;
180 swp++, index++) {
181 if (bp->b_blkno < swp->sw_nblks)
182 break;
183 bp->b_blkno -= swp->sw_nblks;
184 }
185 if (swp->sw_dev == NODEV ||
186 bp->b_blkno+sz > swp->sw_nblks) {
187 bp->b_error = swp->sw_dev == NODEV ?
188 ENODEV : EINVAL;
189 bp->b_flags |= B_ERROR;
190 biodone(bp);
191 return;
192 }
193 }
194 #else
195 off = bp->b_blkno % dmmax;
196 if (off+sz > dmmax) {
197 bp->b_error = EINVAL;
198 bp->b_flags |= B_ERROR;
199 biodone(bp);
200 return;
201 }
202 seg = bp->b_blkno / dmmax;
203 index = seg % nswdev;
204 seg /= nswdev;
205 bp->b_blkno = seg*dmmax + off;
206 #endif
207 } else
208 index = 0;
209 sp = &swdevt[index];
210 if ((bp->b_dev = sp->sw_dev) == NODEV)
211 panic("swstrategy");
212 if (sp->sw_vp == NULL) {
213 bp->b_error = ENODEV;
214 bp->b_flags |= B_ERROR;
215 biodone(bp);
216 return;
217 }
218 VHOLD(sp->sw_vp);
219 if ((bp->b_flags & B_READ) == 0) {
220 if (vp = bp->b_vp) {
221 vp->v_numoutput--;
222 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
223 vp->v_flag &= ~VBWAIT;
224 wakeup((caddr_t)&vp->v_numoutput);
225 }
226 }
227 sp->sw_vp->v_numoutput++;
228 }
229 if (bp->b_vp != NULL)
230 brelvp(bp);
231 bp->b_vp = sp->sw_vp;
232 VOP_STRATEGY(bp);
233 }
234
235 /*
236 * System call swapon(name) enables swapping on device name,
237 * which must be in the swdevsw. Return EBUSY
238 * if already swapping on this device.
239 */
240 struct swapon_args {
241 char *name;
242 };
243 /* ARGSUSED */
244 int
swapon(p,uap,retval)245 swapon(p, uap, retval)
246 struct proc *p;
247 struct swapon_args *uap;
248 int *retval;
249 {
250 register struct vnode *vp;
251 register struct swdevt *sp;
252 dev_t dev;
253 int error;
254 struct nameidata nd;
255
256 if (error = suser(p->p_ucred, &p->p_acflag))
257 return (error);
258 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, p);
259 if (error = namei(&nd))
260 return (error);
261 vp = nd.ni_vp;
262 if (vp->v_type != VBLK) {
263 vrele(vp);
264 return (ENOTBLK);
265 }
266 dev = (dev_t)vp->v_rdev;
267 if (major(dev) >= nblkdev) {
268 vrele(vp);
269 return (ENXIO);
270 }
271 for (sp = &swdevt[0]; sp->sw_dev != NODEV; sp++) {
272 if (sp->sw_dev == dev) {
273 if (sp->sw_flags & SW_FREED) {
274 vrele(vp);
275 return (EBUSY);
276 }
277 sp->sw_vp = vp;
278 if (error = swfree(p, sp - swdevt)) {
279 vrele(vp);
280 return (error);
281 }
282 return (0);
283 }
284 #ifdef SEQSWAP
285 /*
286 * If we have reached a non-freed sequential device without
287 * finding what we are looking for, it is an error.
288 * That is because all interleaved devices must come first
289 * and sequential devices must be freed in order.
290 */
291 if ((sp->sw_flags & (SW_SEQUENTIAL|SW_FREED)) == SW_SEQUENTIAL)
292 break;
293 #endif
294 }
295 vrele(vp);
296 return (EINVAL);
297 }
298
299 /*
300 * Swfree(index) frees the index'th portion of the swap map.
301 * Each of the nswdev devices provides 1/nswdev'th of the swap
302 * space, which is laid out with blocks of dmmax pages circularly
303 * among the devices.
304 */
305 int
swfree(p,index)306 swfree(p, index)
307 struct proc *p;
308 int index;
309 {
310 register struct swdevt *sp;
311 register swblk_t vsbase;
312 register long blk;
313 struct vnode *vp;
314 register swblk_t dvbase;
315 register int nblks;
316 int error;
317
318 sp = &swdevt[index];
319 vp = sp->sw_vp;
320 if (error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p))
321 return (error);
322 sp->sw_flags |= SW_FREED;
323 nblks = sp->sw_nblks;
324 /*
325 * Some devices may not exist til after boot time.
326 * If so, their nblk count will be 0.
327 */
328 if (nblks <= 0) {
329 int perdev;
330 dev_t dev = sp->sw_dev;
331
332 if (bdevsw[major(dev)].d_psize == 0 ||
333 (nblks = (*bdevsw[major(dev)].d_psize)(dev)) == -1) {
334 (void) VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
335 sp->sw_flags &= ~SW_FREED;
336 return (ENXIO);
337 }
338 #ifdef SEQSWAP
339 if (index < niswdev) {
340 perdev = niswap / niswdev;
341 if (nblks > perdev)
342 nblks = perdev;
343 } else {
344 if (nblks % dmmax)
345 nblks -= (nblks % dmmax);
346 nswap += nblks;
347 }
348 #else
349 perdev = nswap / nswdev;
350 if (nblks > perdev)
351 nblks = perdev;
352 #endif
353 sp->sw_nblks = nblks;
354 }
355 if (nblks == 0) {
356 (void) VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
357 sp->sw_flags &= ~SW_FREED;
358 return (0); /* XXX error? */
359 }
360 #ifdef SEQSWAP
361 if (sp->sw_flags & SW_SEQUENTIAL) {
362 register struct swdevt *swp;
363
364 blk = niswap;
365 for (swp = &swdevt[niswdev]; swp != sp; swp++)
366 blk += swp->sw_nblks;
367 rmfree(swapmap, nblks, blk);
368 return (0);
369 }
370 #endif
371 for (dvbase = 0; dvbase < nblks; dvbase += dmmax) {
372 blk = nblks - dvbase;
373 #ifdef SEQSWAP
374 if ((vsbase = index*dmmax + dvbase*niswdev) >= niswap)
375 panic("swfree");
376 #else
377 if ((vsbase = index*dmmax + dvbase*nswdev) >= nswap)
378 panic("swfree");
379 #endif
380 if (blk > dmmax)
381 blk = dmmax;
382 if (vsbase == 0) {
383 /*
384 * First of all chunks... initialize the swapmap.
385 * Don't use the first cluster of the device
386 * in case it starts with a label or boot block.
387 */
388 rminit(swapmap, blk - ctod(CLSIZE),
389 vsbase + ctod(CLSIZE), "swap", nswapmap);
390 } else if (dvbase == 0) {
391 /*
392 * Don't use the first cluster of the device
393 * in case it starts with a label or boot block.
394 */
395 rmfree(swapmap, blk - ctod(CLSIZE),
396 vsbase + ctod(CLSIZE));
397 } else
398 rmfree(swapmap, blk, vsbase);
399 }
400 return (0);
401 }
402