xref: /original-bsd/sys/vm/vnode_pager.c (revision 0ac4996f)
1 /*
2  * Copyright (c) 1990 University of Utah.
3  * Copyright (c) 1991, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  *	@(#)vnode_pager.c	8.10 (Berkeley) 05/14/95
13  */
14 
15 /*
16  * Page to/from files (vnodes).
17  *
18  * TODO:
19  *	pageouts
20  *	fix credential use (uses current process credentials now)
21  */
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/proc.h>
26 #include <sys/malloc.h>
27 #include <sys/vnode.h>
28 #include <sys/uio.h>
29 #include <sys/mount.h>
30 
31 #include <vm/vm.h>
32 #include <vm/vm_page.h>
33 #include <vm/vnode_pager.h>
34 
35 struct pagerlst	vnode_pager_list;	/* list of managed vnodes */
36 
37 #ifdef DEBUG
38 int	vpagerdebug = 0x00;
39 #define	VDB_FOLLOW	0x01
40 #define VDB_INIT	0x02
41 #define VDB_IO		0x04
42 #define VDB_FAIL	0x08
43 #define VDB_ALLOC	0x10
44 #define VDB_SIZE	0x20
45 #endif
46 
47 static vm_pager_t	 vnode_pager_alloc
48 			    __P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
49 static void		 vnode_pager_cluster
50 			    __P((vm_pager_t, vm_offset_t,
51 				 vm_offset_t *, vm_offset_t *));
52 static void		 vnode_pager_dealloc __P((vm_pager_t));
53 static int		 vnode_pager_getpage
54 			    __P((vm_pager_t, vm_page_t *, int, boolean_t));
55 static boolean_t	 vnode_pager_haspage __P((vm_pager_t, vm_offset_t));
56 static void		 vnode_pager_init __P((void));
57 static int		 vnode_pager_io
58 			    __P((vn_pager_t, vm_page_t *, int,
59 				 boolean_t, enum uio_rw));
60 static boolean_t	 vnode_pager_putpage
61 			    __P((vm_pager_t, vm_page_t *, int, boolean_t));
62 
63 struct pagerops vnodepagerops = {
64 	vnode_pager_init,
65 	vnode_pager_alloc,
66 	vnode_pager_dealloc,
67 	vnode_pager_getpage,
68 	vnode_pager_putpage,
69 	vnode_pager_haspage,
70 	vnode_pager_cluster
71 };
72 
73 static void
74 vnode_pager_init()
75 {
76 #ifdef DEBUG
77 	if (vpagerdebug & VDB_FOLLOW)
78 		printf("vnode_pager_init()\n");
79 #endif
80 	TAILQ_INIT(&vnode_pager_list);
81 }
82 
83 /*
84  * Allocate (or lookup) pager for a vnode.
85  * Handle is a vnode pointer.
86  */
87 static vm_pager_t
88 vnode_pager_alloc(handle, size, prot, foff)
89 	caddr_t handle;
90 	vm_size_t size;
91 	vm_prot_t prot;
92 	vm_offset_t foff;
93 {
94 	register vm_pager_t pager;
95 	register vn_pager_t vnp;
96 	vm_object_t object;
97 	struct vattr vattr;
98 	struct vnode *vp;
99 	struct proc *p = curproc;	/* XXX */
100 
101 #ifdef DEBUG
102 	if (vpagerdebug & (VDB_FOLLOW|VDB_ALLOC))
103 		printf("vnode_pager_alloc(%x, %x, %x)\n", handle, size, prot);
104 #endif
105 	/*
106 	 * Pageout to vnode, no can do yet.
107 	 */
108 	if (handle == NULL)
109 		return(NULL);
110 
111 	/*
112 	 * Vnodes keep a pointer to any associated pager so no need to
113 	 * lookup with vm_pager_lookup.
114 	 */
115 	vp = (struct vnode *)handle;
116 	pager = (vm_pager_t)vp->v_vmdata;
117 	if (pager == NULL) {
118 		/*
119 		 * Allocate pager structures
120 		 */
121 		pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, M_WAITOK);
122 		if (pager == NULL)
123 			return(NULL);
124 		vnp = (vn_pager_t)malloc(sizeof *vnp, M_VMPGDATA, M_WAITOK);
125 		if (vnp == NULL) {
126 			free((caddr_t)pager, M_VMPAGER);
127 			return(NULL);
128 		}
129 		/*
130 		 * And an object of the appropriate size
131 		 */
132 		if (VOP_GETATTR(vp, &vattr, p->p_ucred, p) == 0) {
133 			object = vm_object_allocate(round_page(vattr.va_size));
134 			vm_object_enter(object, pager);
135 			vm_object_setpager(object, pager, 0, TRUE);
136 		} else {
137 			free((caddr_t)vnp, M_VMPGDATA);
138 			free((caddr_t)pager, M_VMPAGER);
139 			return(NULL);
140 		}
141 		/*
142 		 * Hold a reference to the vnode and initialize pager data.
143 		 */
144 		VREF(vp);
145 		vnp->vnp_flags = 0;
146 		vnp->vnp_vp = vp;
147 		vnp->vnp_size = vattr.va_size;
148 		TAILQ_INSERT_TAIL(&vnode_pager_list, pager, pg_list);
149 		pager->pg_handle = handle;
150 		pager->pg_type = PG_VNODE;
151 		pager->pg_flags = 0;
152 		pager->pg_ops = &vnodepagerops;
153 		pager->pg_data = vnp;
154 		vp->v_vmdata = (caddr_t)pager;
155 	} else {
156 		/*
157 		 * vm_object_lookup() will remove the object from the
158 		 * cache if found and also gain a reference to the object.
159 		 */
160 		object = vm_object_lookup(pager);
161 #ifdef DEBUG
162 		vnp = (vn_pager_t)pager->pg_data;
163 #endif
164 	}
165 #ifdef DEBUG
166 	if (vpagerdebug & VDB_ALLOC)
167 		printf("vnode_pager_setup: vp %x sz %x pager %x object %x\n",
168 		       vp, vnp->vnp_size, pager, object);
169 #endif
170 	return(pager);
171 }
172 
173 static void
174 vnode_pager_dealloc(pager)
175 	vm_pager_t pager;
176 {
177 	register vn_pager_t vnp = (vn_pager_t)pager->pg_data;
178 	register struct vnode *vp;
179 #ifdef NOTDEF
180 	struct proc *p = curproc;		/* XXX */
181 #endif
182 
183 #ifdef DEBUG
184 	if (vpagerdebug & VDB_FOLLOW)
185 		printf("vnode_pager_dealloc(%x)\n", pager);
186 #endif
187 	if (vp = vnp->vnp_vp) {
188 		vp->v_vmdata = NULL;
189 		vp->v_flag &= ~VTEXT;
190 #if NOTDEF
191 		/* can hang if done at reboot on NFS FS */
192 		(void) VOP_FSYNC(vp, p->p_ucred, p);
193 #endif
194 		vrele(vp);
195 	}
196 	TAILQ_REMOVE(&vnode_pager_list, pager, pg_list);
197 	free((caddr_t)vnp, M_VMPGDATA);
198 	free((caddr_t)pager, M_VMPAGER);
199 }
200 
201 static int
202 vnode_pager_getpage(pager, mlist, npages, sync)
203 	vm_pager_t pager;
204 	vm_page_t *mlist;
205 	int npages;
206 	boolean_t sync;
207 {
208 
209 #ifdef DEBUG
210 	if (vpagerdebug & VDB_FOLLOW)
211 		printf("vnode_pager_getpage(%x, %x, %x, %x)\n",
212 		       pager, mlist, npages, sync);
213 #endif
214 	return(vnode_pager_io((vn_pager_t)pager->pg_data,
215 			      mlist, npages, sync, UIO_READ));
216 }
217 
218 static boolean_t
219 vnode_pager_putpage(pager, mlist, npages, sync)
220 	vm_pager_t pager;
221 	vm_page_t *mlist;
222 	int npages;
223 	boolean_t sync;
224 {
225 	int err;
226 
227 #ifdef DEBUG
228 	if (vpagerdebug & VDB_FOLLOW)
229 		printf("vnode_pager_putpage(%x, %x, %x, %x)\n",
230 		       pager, mlist, npages, sync);
231 #endif
232 	if (pager == NULL)
233 		return (FALSE);			/* ??? */
234 	err = vnode_pager_io((vn_pager_t)pager->pg_data,
235 			     mlist, npages, sync, UIO_WRITE);
236 	/*
237 	 * If the operation was successful, mark the pages clean.
238 	 */
239 	if (err == VM_PAGER_OK) {
240 		while (npages--) {
241 			(*mlist)->flags |= PG_CLEAN;
242 			pmap_clear_modify(VM_PAGE_TO_PHYS(*mlist));
243 			mlist++;
244 		}
245 	}
246 	return(err);
247 }
248 
249 static boolean_t
250 vnode_pager_haspage(pager, offset)
251 	vm_pager_t pager;
252 	vm_offset_t offset;
253 {
254 	struct proc *p = curproc;	/* XXX */
255 	vn_pager_t vnp = (vn_pager_t)pager->pg_data;
256 	daddr_t bn;
257 	int err;
258 
259 #ifdef DEBUG
260 	if (vpagerdebug & VDB_FOLLOW)
261 		printf("vnode_pager_haspage(%x, %x)\n", pager, offset);
262 #endif
263 
264 	/*
265 	 * Offset beyond end of file, do not have the page
266 	 * Lock the vnode first to make sure we have the most recent
267 	 * version of the size.
268 	 */
269 	vn_lock(vnp->vnp_vp, LK_EXCLUSIVE | LK_RETRY, p);
270 	if (offset >= vnp->vnp_size) {
271 		VOP_UNLOCK(vnp->vnp_vp, 0, p);
272 #ifdef DEBUG
273 		if (vpagerdebug & (VDB_FAIL|VDB_SIZE))
274 			printf("vnode_pager_haspage: pg %x, off %x, size %x\n",
275 			       pager, offset, vnp->vnp_size);
276 #endif
277 		return(FALSE);
278 	}
279 
280 	/*
281 	 * Read the index to find the disk block to read
282 	 * from.  If there is no block, report that we don't
283 	 * have this data.
284 	 *
285 	 * Assumes that the vnode has whole page or nothing.
286 	 */
287 	err = VOP_BMAP(vnp->vnp_vp,
288 		       offset / vnp->vnp_vp->v_mount->mnt_stat.f_iosize,
289 		       (struct vnode **)0, &bn, NULL);
290 	VOP_UNLOCK(vnp->vnp_vp, 0, p);
291 	if (err) {
292 #ifdef DEBUG
293 		if (vpagerdebug & VDB_FAIL)
294 			printf("vnode_pager_haspage: BMAP err %d, pg %x, off %x\n",
295 			       err, pager, offset);
296 #endif
297 		return(TRUE);
298 	}
299 	return((long)bn < 0 ? FALSE : TRUE);
300 }
301 
302 static void
303 vnode_pager_cluster(pager, offset, loffset, hoffset)
304 	vm_pager_t	pager;
305 	vm_offset_t	offset;
306 	vm_offset_t	*loffset;
307 	vm_offset_t	*hoffset;
308 {
309 	vn_pager_t vnp = (vn_pager_t)pager->pg_data;
310 	vm_offset_t loff, hoff;
311 
312 #ifdef DEBUG
313 	if (vpagerdebug & VDB_FOLLOW)
314 		printf("vnode_pager_cluster(%x, %x) ", pager, offset);
315 #endif
316 	loff = offset;
317 	if (loff >= vnp->vnp_size)
318 		panic("vnode_pager_cluster: bad offset");
319 	/*
320 	 * XXX could use VOP_BMAP to get maxcontig value
321 	 */
322 	hoff = loff + MAXBSIZE;
323 	if (hoff > round_page(vnp->vnp_size))
324 		hoff = round_page(vnp->vnp_size);
325 
326 	*loffset = loff;
327 	*hoffset = hoff;
328 #ifdef DEBUG
329 	if (vpagerdebug & VDB_FOLLOW)
330 		printf("returns [%x-%x]\n", loff, hoff);
331 #endif
332 }
333 
334 /*
335  * (XXX)
336  * Lets the VM system know about a change in size for a file.
337  * If this vnode is mapped into some address space (i.e. we have a pager
338  * for it) we adjust our own internal size and flush any cached pages in
339  * the associated object that are affected by the size change.
340  *
341  * Note: this routine may be invoked as a result of a pager put
342  * operation (possibly at object termination time), so we must be careful.
343  */
344 void
345 vnode_pager_setsize(vp, nsize)
346 	struct vnode *vp;
347 	u_long nsize;
348 {
349 	register vn_pager_t vnp;
350 	register vm_object_t object;
351 	vm_pager_t pager;
352 
353 	/*
354 	 * Not a mapped vnode
355 	 */
356 	if (vp == NULL || vp->v_type != VREG || vp->v_vmdata == NULL)
357 		return;
358 	/*
359 	 * Hasn't changed size
360 	 */
361 	pager = (vm_pager_t)vp->v_vmdata;
362 	vnp = (vn_pager_t)pager->pg_data;
363 	if (nsize == vnp->vnp_size)
364 		return;
365 	/*
366 	 * No object.
367 	 * This can happen during object termination since
368 	 * vm_object_page_clean is called after the object
369 	 * has been removed from the hash table, and clean
370 	 * may cause vnode write operations which can wind
371 	 * up back here.
372 	 */
373 	object = vm_object_lookup(pager);
374 	if (object == NULL)
375 		return;
376 
377 #ifdef DEBUG
378 	if (vpagerdebug & (VDB_FOLLOW|VDB_SIZE))
379 		printf("vnode_pager_setsize: vp %x obj %x osz %d nsz %d\n",
380 		       vp, object, vnp->vnp_size, nsize);
381 #endif
382 	/*
383 	 * File has shrunk.
384 	 * Toss any cached pages beyond the new EOF.
385 	 */
386 	if (nsize < vnp->vnp_size) {
387 		vm_object_lock(object);
388 		vm_object_page_remove(object,
389 				      (vm_offset_t)nsize, vnp->vnp_size);
390 		vm_object_unlock(object);
391 	}
392 	vnp->vnp_size = (vm_offset_t)nsize;
393 	vm_object_deallocate(object);
394 }
395 
396 void
397 vnode_pager_umount(mp)
398 	register struct mount *mp;
399 {
400 	struct proc *p = curproc;	/* XXX */
401 	vm_pager_t pager, npager;
402 	struct vnode *vp;
403 
404 	for (pager = vnode_pager_list.tqh_first; pager != NULL; pager = npager){
405 		/*
406 		 * Save the next pointer now since uncaching may
407 		 * terminate the object and render pager invalid
408 		 */
409 		npager = pager->pg_list.tqe_next;
410 		vp = ((vn_pager_t)pager->pg_data)->vnp_vp;
411 		if (mp == (struct mount *)0 || vp->v_mount == mp) {
412 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
413 			(void) vnode_pager_uncache(vp);
414 			VOP_UNLOCK(vp, 0, p);
415 		}
416 	}
417 }
418 
419 /*
420  * Remove vnode associated object from the object cache.
421  *
422  * XXX unlock the vnode if it is currently locked.
423  * We must do this since uncaching the object may result in its
424  * destruction which may initiate paging activity which may necessitate
425  * re-locking the vnode.
426  */
427 boolean_t
428 vnode_pager_uncache(vp)
429 	register struct vnode *vp;
430 {
431 	struct proc *p = curproc;	/* XXX */
432 	vm_object_t object;
433 	boolean_t uncached;
434 	vm_pager_t pager;
435 
436 	/*
437 	 * Not a mapped vnode
438 	 */
439 	if (vp->v_type != VREG || (pager = (vm_pager_t)vp->v_vmdata) == NULL)
440 		return (TRUE);
441 #ifdef DEBUG
442 	if (!VOP_ISLOCKED(vp)) {
443 		extern int (**nfsv2_vnodeop_p)();
444 
445 		if (vp->v_op != nfsv2_vnodeop_p)
446 			panic("vnode_pager_uncache: vnode not locked!");
447 	}
448 #endif
449 	/*
450 	 * Must use vm_object_lookup() as it actually removes
451 	 * the object from the cache list.
452 	 */
453 	object = vm_object_lookup(pager);
454 	if (object) {
455 		uncached = (object->ref_count <= 1);
456 		VOP_UNLOCK(vp, 0, p);
457 		pager_cache(object, FALSE);
458 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
459 	} else
460 		uncached = TRUE;
461 	return(uncached);
462 }
463 
464 static int
465 vnode_pager_io(vnp, mlist, npages, sync, rw)
466 	register vn_pager_t vnp;
467 	vm_page_t *mlist;
468 	int npages;
469 	boolean_t sync;
470 	enum uio_rw rw;
471 {
472 	struct uio auio;
473 	struct iovec aiov;
474 	vm_offset_t kva, foff;
475 	int error, size;
476 	struct proc *p = curproc;		/* XXX */
477 
478 	/* XXX */
479 	vm_page_t m;
480 	if (npages != 1)
481 		panic("vnode_pager_io: cannot handle multiple pages");
482 	m = *mlist;
483 	/* XXX */
484 
485 #ifdef DEBUG
486 	if (vpagerdebug & VDB_FOLLOW)
487 		printf("vnode_pager_io(%x, %x, %c): vnode %x\n",
488 		       vnp, m, rw == UIO_READ ? 'R' : 'W', vnp->vnp_vp);
489 #endif
490 	foff = m->offset + m->object->paging_offset;
491 	/*
492 	 * Allocate a kernel virtual address and initialize so that
493 	 * we can use VOP_READ/WRITE routines.
494 	 */
495 	kva = vm_pager_map_pages(mlist, npages, sync);
496 	if (kva == NULL)
497 		return(VM_PAGER_AGAIN);
498 	/*
499 	 * After all of the potentially blocking operations have been
500 	 * performed, we can do the size checks:
501 	 *	read beyond EOF (returns error)
502 	 *	short read
503 	 */
504 	vn_lock(vnp->vnp_vp, LK_EXCLUSIVE | LK_RETRY, p);
505 	if (foff >= vnp->vnp_size) {
506 		VOP_UNLOCK(vnp->vnp_vp, 0, p);
507 		vm_pager_unmap_pages(kva, npages);
508 #ifdef DEBUG
509 		if (vpagerdebug & VDB_SIZE)
510 			printf("vnode_pager_io: vp %x, off %d size %d\n",
511 			       vnp->vnp_vp, foff, vnp->vnp_size);
512 #endif
513 		return(VM_PAGER_BAD);
514 	}
515 	if (foff + PAGE_SIZE > vnp->vnp_size)
516 		size = vnp->vnp_size - foff;
517 	else
518 		size = PAGE_SIZE;
519 	aiov.iov_base = (caddr_t)kva;
520 	aiov.iov_len = size;
521 	auio.uio_iov = &aiov;
522 	auio.uio_iovcnt = 1;
523 	auio.uio_offset = foff;
524 	auio.uio_segflg = UIO_SYSSPACE;
525 	auio.uio_rw = rw;
526 	auio.uio_resid = size;
527 	auio.uio_procp = (struct proc *)0;
528 #ifdef DEBUG
529 	if (vpagerdebug & VDB_IO)
530 		printf("vnode_pager_io: vp %x kva %x foff %x size %x",
531 		       vnp->vnp_vp, kva, foff, size);
532 #endif
533 	if (rw == UIO_READ)
534 		error = VOP_READ(vnp->vnp_vp, &auio, 0, p->p_ucred);
535 	else
536 		error = VOP_WRITE(vnp->vnp_vp, &auio, 0, p->p_ucred);
537 	VOP_UNLOCK(vnp->vnp_vp, 0, p);
538 #ifdef DEBUG
539 	if (vpagerdebug & VDB_IO) {
540 		if (error || auio.uio_resid)
541 			printf(" returns error %x, resid %x",
542 			       error, auio.uio_resid);
543 		printf("\n");
544 	}
545 #endif
546 	if (!error) {
547 		register int count = size - auio.uio_resid;
548 
549 		if (count == 0)
550 			error = EINVAL;
551 		else if (count != PAGE_SIZE && rw == UIO_READ)
552 			bzero((void *)(kva + count), PAGE_SIZE - count);
553 	}
554 	vm_pager_unmap_pages(kva, npages);
555 	return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
556 }
557