xref: /original-bsd/sys/vm/vnode_pager.c (revision f737e041)
1 /*
2  * Copyright (c) 1990 University of Utah.
3  * Copyright (c) 1991, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  *	@(#)vnode_pager.c	8.8 (Berkeley) 02/13/94
13  */
14 
15 /*
16  * Page to/from files (vnodes).
17  *
18  * TODO:
19  *	pageouts
20  *	fix credential use (uses current process credentials now)
21  */
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/proc.h>
26 #include <sys/malloc.h>
27 #include <sys/vnode.h>
28 #include <sys/uio.h>
29 #include <sys/mount.h>
30 
31 #include <vm/vm.h>
32 #include <vm/vm_page.h>
33 #include <vm/vnode_pager.h>
34 
35 struct pagerlst	vnode_pager_list;	/* list of managed vnodes */
36 
37 #ifdef DEBUG
38 int	vpagerdebug = 0x00;
39 #define	VDB_FOLLOW	0x01
40 #define VDB_INIT	0x02
41 #define VDB_IO		0x04
42 #define VDB_FAIL	0x08
43 #define VDB_ALLOC	0x10
44 #define VDB_SIZE	0x20
45 #endif
46 
47 static vm_pager_t	 vnode_pager_alloc
48 			    __P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
49 static void		 vnode_pager_cluster
50 			    __P((vm_pager_t, vm_offset_t,
51 				 vm_offset_t *, vm_offset_t *));
52 static void		 vnode_pager_dealloc __P((vm_pager_t));
53 static int		 vnode_pager_getpage
54 			    __P((vm_pager_t, vm_page_t *, int, boolean_t));
55 static boolean_t	 vnode_pager_haspage __P((vm_pager_t, vm_offset_t));
56 static void		 vnode_pager_init __P((void));
57 static int		 vnode_pager_io
58 			    __P((vn_pager_t, vm_page_t *, int,
59 				 boolean_t, enum uio_rw));
60 static boolean_t	 vnode_pager_putpage
61 			    __P((vm_pager_t, vm_page_t *, int, boolean_t));
62 
63 struct pagerops vnodepagerops = {
64 	vnode_pager_init,
65 	vnode_pager_alloc,
66 	vnode_pager_dealloc,
67 	vnode_pager_getpage,
68 	vnode_pager_putpage,
69 	vnode_pager_haspage,
70 	vnode_pager_cluster
71 };
72 
73 static void
74 vnode_pager_init()
75 {
76 #ifdef DEBUG
77 	if (vpagerdebug & VDB_FOLLOW)
78 		printf("vnode_pager_init()\n");
79 #endif
80 	TAILQ_INIT(&vnode_pager_list);
81 }
82 
83 /*
84  * Allocate (or lookup) pager for a vnode.
85  * Handle is a vnode pointer.
86  */
87 static vm_pager_t
88 vnode_pager_alloc(handle, size, prot, foff)
89 	caddr_t handle;
90 	vm_size_t size;
91 	vm_prot_t prot;
92 	vm_offset_t foff;
93 {
94 	register vm_pager_t pager;
95 	register vn_pager_t vnp;
96 	vm_object_t object;
97 	struct vattr vattr;
98 	struct vnode *vp;
99 	struct proc *p = curproc;	/* XXX */
100 
101 #ifdef DEBUG
102 	if (vpagerdebug & (VDB_FOLLOW|VDB_ALLOC))
103 		printf("vnode_pager_alloc(%x, %x, %x)\n", handle, size, prot);
104 #endif
105 	/*
106 	 * Pageout to vnode, no can do yet.
107 	 */
108 	if (handle == NULL)
109 		return(NULL);
110 
111 	/*
112 	 * Vnodes keep a pointer to any associated pager so no need to
113 	 * lookup with vm_pager_lookup.
114 	 */
115 	vp = (struct vnode *)handle;
116 	pager = (vm_pager_t)vp->v_vmdata;
117 	if (pager == NULL) {
118 		/*
119 		 * Allocate pager structures
120 		 */
121 		pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, M_WAITOK);
122 		if (pager == NULL)
123 			return(NULL);
124 		vnp = (vn_pager_t)malloc(sizeof *vnp, M_VMPGDATA, M_WAITOK);
125 		if (vnp == NULL) {
126 			free((caddr_t)pager, M_VMPAGER);
127 			return(NULL);
128 		}
129 		/*
130 		 * And an object of the appropriate size
131 		 */
132 		if (VOP_GETATTR(vp, &vattr, p->p_ucred, p) == 0) {
133 			object = vm_object_allocate(round_page(vattr.va_size));
134 			vm_object_enter(object, pager);
135 			vm_object_setpager(object, pager, 0, TRUE);
136 		} else {
137 			free((caddr_t)vnp, M_VMPGDATA);
138 			free((caddr_t)pager, M_VMPAGER);
139 			return(NULL);
140 		}
141 		/*
142 		 * Hold a reference to the vnode and initialize pager data.
143 		 */
144 		VREF(vp);
145 		vnp->vnp_flags = 0;
146 		vnp->vnp_vp = vp;
147 		vnp->vnp_size = vattr.va_size;
148 		TAILQ_INSERT_TAIL(&vnode_pager_list, pager, pg_list);
149 		pager->pg_handle = handle;
150 		pager->pg_type = PG_VNODE;
151 		pager->pg_flags = 0;
152 		pager->pg_ops = &vnodepagerops;
153 		pager->pg_data = vnp;
154 		vp->v_vmdata = (caddr_t)pager;
155 	} else {
156 		/*
157 		 * vm_object_lookup() will remove the object from the
158 		 * cache if found and also gain a reference to the object.
159 		 */
160 		object = vm_object_lookup(pager);
161 #ifdef DEBUG
162 		vnp = (vn_pager_t)pager->pg_data;
163 #endif
164 	}
165 #ifdef DEBUG
166 	if (vpagerdebug & VDB_ALLOC)
167 		printf("vnode_pager_setup: vp %x sz %x pager %x object %x\n",
168 		       vp, vnp->vnp_size, pager, object);
169 #endif
170 	return(pager);
171 }
172 
173 static void
174 vnode_pager_dealloc(pager)
175 	vm_pager_t pager;
176 {
177 	register vn_pager_t vnp = (vn_pager_t)pager->pg_data;
178 	register struct vnode *vp;
179 #ifdef NOTDEF
180 	struct proc *p = curproc;		/* XXX */
181 #endif
182 
183 #ifdef DEBUG
184 	if (vpagerdebug & VDB_FOLLOW)
185 		printf("vnode_pager_dealloc(%x)\n", pager);
186 #endif
187 	if (vp = vnp->vnp_vp) {
188 		vp->v_vmdata = NULL;
189 		vp->v_flag &= ~VTEXT;
190 #if NOTDEF
191 		/* can hang if done at reboot on NFS FS */
192 		(void) VOP_FSYNC(vp, p->p_ucred, p);
193 #endif
194 		vrele(vp);
195 	}
196 	TAILQ_REMOVE(&vnode_pager_list, pager, pg_list);
197 	free((caddr_t)vnp, M_VMPGDATA);
198 	free((caddr_t)pager, M_VMPAGER);
199 }
200 
201 static int
202 vnode_pager_getpage(pager, mlist, npages, sync)
203 	vm_pager_t pager;
204 	vm_page_t *mlist;
205 	int npages;
206 	boolean_t sync;
207 {
208 
209 #ifdef DEBUG
210 	if (vpagerdebug & VDB_FOLLOW)
211 		printf("vnode_pager_getpage(%x, %x, %x, %x)\n",
212 		       pager, mlist, npages, sync);
213 #endif
214 	return(vnode_pager_io((vn_pager_t)pager->pg_data,
215 			      mlist, npages, sync, UIO_READ));
216 }
217 
218 static boolean_t
219 vnode_pager_putpage(pager, mlist, npages, sync)
220 	vm_pager_t pager;
221 	vm_page_t *mlist;
222 	int npages;
223 	boolean_t sync;
224 {
225 	int err;
226 
227 #ifdef DEBUG
228 	if (vpagerdebug & VDB_FOLLOW)
229 		printf("vnode_pager_putpage(%x, %x, %x, %x)\n",
230 		       pager, mlist, npages, sync);
231 #endif
232 	if (pager == NULL)
233 		return (FALSE);			/* ??? */
234 	err = vnode_pager_io((vn_pager_t)pager->pg_data,
235 			     mlist, npages, sync, UIO_WRITE);
236 	/*
237 	 * If the operation was successful, mark the pages clean.
238 	 */
239 	if (err == VM_PAGER_OK) {
240 		while (npages--) {
241 			(*mlist)->flags |= PG_CLEAN;
242 			pmap_clear_modify(VM_PAGE_TO_PHYS(*mlist));
243 			mlist++;
244 		}
245 	}
246 	return(err);
247 }
248 
249 static boolean_t
250 vnode_pager_haspage(pager, offset)
251 	vm_pager_t pager;
252 	vm_offset_t offset;
253 {
254 	register vn_pager_t vnp = (vn_pager_t)pager->pg_data;
255 	daddr_t bn;
256 	int err;
257 
258 #ifdef DEBUG
259 	if (vpagerdebug & VDB_FOLLOW)
260 		printf("vnode_pager_haspage(%x, %x)\n", pager, offset);
261 #endif
262 
263 	/*
264 	 * Offset beyond end of file, do not have the page
265 	 * Lock the vnode first to make sure we have the most recent
266 	 * version of the size.
267 	 */
268 	VOP_LOCK(vnp->vnp_vp);
269 	if (offset >= vnp->vnp_size) {
270 		VOP_UNLOCK(vnp->vnp_vp);
271 #ifdef DEBUG
272 		if (vpagerdebug & (VDB_FAIL|VDB_SIZE))
273 			printf("vnode_pager_haspage: pg %x, off %x, size %x\n",
274 			       pager, offset, vnp->vnp_size);
275 #endif
276 		return(FALSE);
277 	}
278 
279 	/*
280 	 * Read the index to find the disk block to read
281 	 * from.  If there is no block, report that we don't
282 	 * have this data.
283 	 *
284 	 * Assumes that the vnode has whole page or nothing.
285 	 */
286 	err = VOP_BMAP(vnp->vnp_vp,
287 		       offset / vnp->vnp_vp->v_mount->mnt_stat.f_iosize,
288 		       (struct vnode **)0, &bn, NULL);
289 	VOP_UNLOCK(vnp->vnp_vp);
290 	if (err) {
291 #ifdef DEBUG
292 		if (vpagerdebug & VDB_FAIL)
293 			printf("vnode_pager_haspage: BMAP err %d, pg %x, off %x\n",
294 			       err, pager, offset);
295 #endif
296 		return(TRUE);
297 	}
298 	return((long)bn < 0 ? FALSE : TRUE);
299 }
300 
301 static void
302 vnode_pager_cluster(pager, offset, loffset, hoffset)
303 	vm_pager_t	pager;
304 	vm_offset_t	offset;
305 	vm_offset_t	*loffset;
306 	vm_offset_t	*hoffset;
307 {
308 	vn_pager_t vnp = (vn_pager_t)pager->pg_data;
309 	vm_offset_t loff, hoff;
310 
311 #ifdef DEBUG
312 	if (vpagerdebug & VDB_FOLLOW)
313 		printf("vnode_pager_cluster(%x, %x) ", pager, offset);
314 #endif
315 	loff = offset;
316 	if (loff >= vnp->vnp_size)
317 		panic("vnode_pager_cluster: bad offset");
318 	/*
319 	 * XXX could use VOP_BMAP to get maxcontig value
320 	 */
321 	hoff = loff + MAXBSIZE;
322 	if (hoff > round_page(vnp->vnp_size))
323 		hoff = round_page(vnp->vnp_size);
324 
325 	*loffset = loff;
326 	*hoffset = hoff;
327 #ifdef DEBUG
328 	if (vpagerdebug & VDB_FOLLOW)
329 		printf("returns [%x-%x]\n", loff, hoff);
330 #endif
331 }
332 
333 /*
334  * (XXX)
335  * Lets the VM system know about a change in size for a file.
336  * If this vnode is mapped into some address space (i.e. we have a pager
337  * for it) we adjust our own internal size and flush any cached pages in
338  * the associated object that are affected by the size change.
339  *
340  * Note: this routine may be invoked as a result of a pager put
341  * operation (possibly at object termination time), so we must be careful.
342  */
343 void
344 vnode_pager_setsize(vp, nsize)
345 	struct vnode *vp;
346 	u_long nsize;
347 {
348 	register vn_pager_t vnp;
349 	register vm_object_t object;
350 	vm_pager_t pager;
351 
352 	/*
353 	 * Not a mapped vnode
354 	 */
355 	if (vp == NULL || vp->v_type != VREG || vp->v_vmdata == NULL)
356 		return;
357 	/*
358 	 * Hasn't changed size
359 	 */
360 	pager = (vm_pager_t)vp->v_vmdata;
361 	vnp = (vn_pager_t)pager->pg_data;
362 	if (nsize == vnp->vnp_size)
363 		return;
364 	/*
365 	 * No object.
366 	 * This can happen during object termination since
367 	 * vm_object_page_clean is called after the object
368 	 * has been removed from the hash table, and clean
369 	 * may cause vnode write operations which can wind
370 	 * up back here.
371 	 */
372 	object = vm_object_lookup(pager);
373 	if (object == NULL)
374 		return;
375 
376 #ifdef DEBUG
377 	if (vpagerdebug & (VDB_FOLLOW|VDB_SIZE))
378 		printf("vnode_pager_setsize: vp %x obj %x osz %d nsz %d\n",
379 		       vp, object, vnp->vnp_size, nsize);
380 #endif
381 	/*
382 	 * File has shrunk.
383 	 * Toss any cached pages beyond the new EOF.
384 	 */
385 	if (nsize < vnp->vnp_size) {
386 		vm_object_lock(object);
387 		vm_object_page_remove(object,
388 				      (vm_offset_t)nsize, vnp->vnp_size);
389 		vm_object_unlock(object);
390 	}
391 	vnp->vnp_size = (vm_offset_t)nsize;
392 	vm_object_deallocate(object);
393 }
394 
395 void
396 vnode_pager_umount(mp)
397 	register struct mount *mp;
398 {
399 	register vm_pager_t pager, npager;
400 	struct vnode *vp;
401 
402 	for (pager = vnode_pager_list.tqh_first; pager != NULL; pager = npager){
403 		/*
404 		 * Save the next pointer now since uncaching may
405 		 * terminate the object and render pager invalid
406 		 */
407 		npager = pager->pg_list.tqe_next;
408 		vp = ((vn_pager_t)pager->pg_data)->vnp_vp;
409 		if (mp == (struct mount *)0 || vp->v_mount == mp) {
410 			VOP_LOCK(vp);
411 			(void) vnode_pager_uncache(vp);
412 			VOP_UNLOCK(vp);
413 		}
414 	}
415 }
416 
417 /*
418  * Remove vnode associated object from the object cache.
419  *
420  * XXX unlock the vnode if it is currently locked.
421  * We must do this since uncaching the object may result in its
422  * destruction which may initiate paging activity which may necessitate
423  * re-locking the vnode.
424  */
425 boolean_t
426 vnode_pager_uncache(vp)
427 	register struct vnode *vp;
428 {
429 	register vm_object_t object;
430 	boolean_t uncached;
431 	vm_pager_t pager;
432 
433 	/*
434 	 * Not a mapped vnode
435 	 */
436 	pager = (vm_pager_t)vp->v_vmdata;
437 	if (pager == NULL)
438 		return (TRUE);
439 #ifdef DEBUG
440 	if (!VOP_ISLOCKED(vp)) {
441 		extern int (**nfsv2_vnodeop_p)();
442 
443 		if (vp->v_op != nfsv2_vnodeop_p)
444 			panic("vnode_pager_uncache: vnode not locked!");
445 	}
446 #endif
447 	/*
448 	 * Must use vm_object_lookup() as it actually removes
449 	 * the object from the cache list.
450 	 */
451 	object = vm_object_lookup(pager);
452 	if (object) {
453 		uncached = (object->ref_count <= 1);
454 		VOP_UNLOCK(vp);
455 		pager_cache(object, FALSE);
456 		VOP_LOCK(vp);
457 	} else
458 		uncached = TRUE;
459 	return(uncached);
460 }
461 
462 static int
463 vnode_pager_io(vnp, mlist, npages, sync, rw)
464 	register vn_pager_t vnp;
465 	vm_page_t *mlist;
466 	int npages;
467 	boolean_t sync;
468 	enum uio_rw rw;
469 {
470 	struct uio auio;
471 	struct iovec aiov;
472 	vm_offset_t kva, foff;
473 	int error, size;
474 	struct proc *p = curproc;		/* XXX */
475 
476 	/* XXX */
477 	vm_page_t m;
478 	if (npages != 1)
479 		panic("vnode_pager_io: cannot handle multiple pages");
480 	m = *mlist;
481 	/* XXX */
482 
483 #ifdef DEBUG
484 	if (vpagerdebug & VDB_FOLLOW)
485 		printf("vnode_pager_io(%x, %x, %c): vnode %x\n",
486 		       vnp, m, rw == UIO_READ ? 'R' : 'W', vnp->vnp_vp);
487 #endif
488 	foff = m->offset + m->object->paging_offset;
489 	/*
490 	 * Allocate a kernel virtual address and initialize so that
491 	 * we can use VOP_READ/WRITE routines.
492 	 */
493 	kva = vm_pager_map_pages(mlist, npages, sync);
494 	if (kva == NULL)
495 		return(VM_PAGER_AGAIN);
496 	/*
497 	 * After all of the potentially blocking operations have been
498 	 * performed, we can do the size checks:
499 	 *	read beyond EOF (returns error)
500 	 *	short read
501 	 */
502 	VOP_LOCK(vnp->vnp_vp);
503 	if (foff >= vnp->vnp_size) {
504 		VOP_UNLOCK(vnp->vnp_vp);
505 		vm_pager_unmap_pages(kva, npages);
506 #ifdef DEBUG
507 		if (vpagerdebug & VDB_SIZE)
508 			printf("vnode_pager_io: vp %x, off %d size %d\n",
509 			       vnp->vnp_vp, foff, vnp->vnp_size);
510 #endif
511 		return(VM_PAGER_BAD);
512 	}
513 	if (foff + PAGE_SIZE > vnp->vnp_size)
514 		size = vnp->vnp_size - foff;
515 	else
516 		size = PAGE_SIZE;
517 	aiov.iov_base = (caddr_t)kva;
518 	aiov.iov_len = size;
519 	auio.uio_iov = &aiov;
520 	auio.uio_iovcnt = 1;
521 	auio.uio_offset = foff;
522 	auio.uio_segflg = UIO_SYSSPACE;
523 	auio.uio_rw = rw;
524 	auio.uio_resid = size;
525 	auio.uio_procp = (struct proc *)0;
526 #ifdef DEBUG
527 	if (vpagerdebug & VDB_IO)
528 		printf("vnode_pager_io: vp %x kva %x foff %x size %x",
529 		       vnp->vnp_vp, kva, foff, size);
530 #endif
531 	if (rw == UIO_READ)
532 		error = VOP_READ(vnp->vnp_vp, &auio, 0, p->p_ucred);
533 	else
534 		error = VOP_WRITE(vnp->vnp_vp, &auio, 0, p->p_ucred);
535 	VOP_UNLOCK(vnp->vnp_vp);
536 #ifdef DEBUG
537 	if (vpagerdebug & VDB_IO) {
538 		if (error || auio.uio_resid)
539 			printf(" returns error %x, resid %x",
540 			       error, auio.uio_resid);
541 		printf("\n");
542 	}
543 #endif
544 	if (!error) {
545 		register int count = size - auio.uio_resid;
546 
547 		if (count == 0)
548 			error = EINVAL;
549 		else if (count != PAGE_SIZE && rw == UIO_READ)
550 			bzero((void *)(kva + count), PAGE_SIZE - count);
551 	}
552 	vm_pager_unmap_pages(kva, npages);
553 	return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
554 }
555