xref: /original-bsd/sys/vm/vnode_pager.c (revision e59fb703)
1 /*
2  * Copyright (c) 1990 University of Utah.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  *	@(#)vnode_pager.c	7.7 (Berkeley) 12/14/91
13  */
14 
15 /*
16  * Page to/from files (vnodes).
17  *
18  * TODO:
19  *	pageouts
20  *	fix credential use (uses current process credentials now)
21  */
22 #include "vnodepager.h"
23 #if NVNODEPAGER > 0
24 
25 #include "param.h"
26 #include "proc.h"
27 #include "malloc.h"
28 #include "vnode.h"
29 #include "uio.h"
30 #include "mount.h"
31 
32 #include "vm.h"
33 #include "vm_page.h"
34 #include "vnode_pager.h"
35 
36 queue_head_t	vnode_pager_list;	/* list of managed vnodes */
37 
38 #ifdef DEBUG
39 int	vpagerdebug = 0x00;
40 #define	VDB_FOLLOW	0x01
41 #define VDB_INIT	0x02
42 #define VDB_IO		0x04
43 #define VDB_FAIL	0x08
44 #define VDB_ALLOC	0x10
45 #define VDB_SIZE	0x20
46 #endif
47 
48 void
49 vnode_pager_init()
50 {
51 #ifdef DEBUG
52 	if (vpagerdebug & VDB_FOLLOW)
53 		printf("vnode_pager_init()\n");
54 #endif
55 	queue_init(&vnode_pager_list);
56 }
57 
58 /*
59  * Allocate (or lookup) pager for a vnode.
60  * Handle is a vnode pointer.
61  */
62 vm_pager_t
63 vnode_pager_alloc(handle, size, prot)
64 	caddr_t handle;
65 	vm_size_t size;
66 	vm_prot_t prot;
67 {
68 	register vm_pager_t pager;
69 	register vn_pager_t vnp;
70 	vm_object_t object;
71 	struct vattr vattr;
72 	struct vnode *vp;
73 	struct proc *p = curproc;	/* XXX */
74 
75 #ifdef DEBUG
76 	if (vpagerdebug & (VDB_FOLLOW|VDB_ALLOC))
77 		printf("vnode_pager_alloc(%x, %x, %x)\n", handle, size, prot);
78 #endif
79 	/*
80 	 * Pageout to vnode, no can do yet.
81 	 */
82 	if (handle == NULL)
83 		return(NULL);
84 
85 	/*
86 	 * Vnodes keep a pointer to any associated pager so no need to
87 	 * lookup with vm_pager_lookup.
88 	 */
89 	vp = (struct vnode *)handle;
90 	pager = (vm_pager_t)vp->v_vmdata;
91 	if (pager == NULL) {
92 		/*
93 		 * Allocate pager structures
94 		 */
95 		pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, M_WAITOK);
96 		if (pager == NULL)
97 			return(NULL);
98 		vnp = (vn_pager_t)malloc(sizeof *vnp, M_VMPGDATA, M_WAITOK);
99 		if (vnp == NULL) {
100 			free((caddr_t)pager, M_VMPAGER);
101 			return(NULL);
102 		}
103 		/*
104 		 * And an object of the appropriate size
105 		 */
106 		if (VOP_GETATTR(vp, &vattr, p->p_ucred, p) == 0) {
107 			object = vm_object_allocate(round_page(vattr.va_size));
108 			vm_object_enter(object, pager);
109 			vm_object_setpager(object, pager, 0, TRUE);
110 		} else {
111 			free((caddr_t)vnp, M_VMPGDATA);
112 			free((caddr_t)pager, M_VMPAGER);
113 			return(NULL);
114 		}
115 		/*
116 		 * Hold a reference to the vnode and initialize pager data.
117 		 */
118 		VREF(vp);
119 		vnp->vnp_flags = 0;
120 		vnp->vnp_vp = vp;
121 		vnp->vnp_size = vattr.va_size;
122 		queue_enter(&vnode_pager_list, pager, vm_pager_t, pg_list);
123 		pager->pg_handle = handle;
124 		pager->pg_type = PG_VNODE;
125 		pager->pg_ops = &vnodepagerops;
126 		pager->pg_data = (caddr_t)vnp;
127 		vp->v_vmdata = (caddr_t)pager;
128 	} else {
129 		/*
130 		 * vm_object_lookup() will remove the object from the
131 		 * cache if found and also gain a reference to the object.
132 		 */
133 		object = vm_object_lookup(pager);
134 #ifdef DEBUG
135 		vnp = (vn_pager_t)pager->pg_data;
136 #endif
137 	}
138 #ifdef DEBUG
139 	if (vpagerdebug & VDB_ALLOC)
140 		printf("vnode_pager_setup: vp %x sz %x pager %x object %x\n",
141 		       vp, vnp->vnp_size, pager, object);
142 #endif
143 	return(pager);
144 }
145 
146 void
147 vnode_pager_dealloc(pager)
148 	vm_pager_t pager;
149 {
150 	register vn_pager_t vnp = (vn_pager_t)pager->pg_data;
151 	register struct vnode *vp;
152 	struct proc *p = curproc;		/* XXX */
153 
154 #ifdef DEBUG
155 	if (vpagerdebug & VDB_FOLLOW)
156 		printf("vnode_pager_dealloc(%x)\n", pager);
157 #endif
158 	if (vp = vnp->vnp_vp) {
159 		vp->v_vmdata = NULL;
160 		vp->v_flag &= ~VTEXT;
161 #if 0
162 		/* can hang if done at reboot on NFS FS */
163 		(void) VOP_FSYNC(vp, p->p_ucred, p);
164 #endif
165 		vrele(vp);
166 	}
167 	queue_remove(&vnode_pager_list, pager, vm_pager_t, pg_list);
168 	free((caddr_t)vnp, M_VMPGDATA);
169 	free((caddr_t)pager, M_VMPAGER);
170 }
171 
172 vnode_pager_getpage(pager, m, sync)
173 	vm_pager_t pager;
174 	vm_page_t m;
175 	boolean_t sync;
176 {
177 
178 #ifdef DEBUG
179 	if (vpagerdebug & VDB_FOLLOW)
180 		printf("vnode_pager_getpage(%x, %x)\n", pager, m);
181 #endif
182 	return(vnode_pager_io((vn_pager_t)pager->pg_data, m, UIO_READ));
183 }
184 
185 boolean_t
186 vnode_pager_putpage(pager, m, sync)
187 	vm_pager_t pager;
188 	vm_page_t m;
189 	boolean_t sync;
190 {
191 	int err;
192 
193 #ifdef DEBUG
194 	if (vpagerdebug & VDB_FOLLOW)
195 		printf("vnode_pager_putpage(%x, %x)\n", pager, m);
196 #endif
197 	if (pager == NULL)
198 		return;
199 	err = vnode_pager_io((vn_pager_t)pager->pg_data, m, UIO_WRITE);
200 	if (err == VM_PAGER_OK) {
201 		m->clean = TRUE;			/* XXX - wrong place */
202 		pmap_clear_modify(VM_PAGE_TO_PHYS(m));	/* XXX - wrong place */
203 	}
204 	return(err);
205 }
206 
207 boolean_t
208 vnode_pager_haspage(pager, offset)
209 	vm_pager_t pager;
210 	vm_offset_t offset;
211 {
212 	register vn_pager_t vnp = (vn_pager_t)pager->pg_data;
213 	daddr_t bn;
214 	int err;
215 
216 #ifdef DEBUG
217 	if (vpagerdebug & VDB_FOLLOW)
218 		printf("vnode_pager_haspage(%x, %x)\n", pager, offset);
219 #endif
220 
221 	/*
222 	 * Offset beyond end of file, do not have the page
223 	 */
224 	if (offset >= vnp->vnp_size) {
225 #ifdef DEBUG
226 		if (vpagerdebug & (VDB_FAIL|VDB_SIZE))
227 			printf("vnode_pager_haspage: pg %x, off %x, size %x\n",
228 			       pager, offset, vnp->vnp_size);
229 #endif
230 		return(FALSE);
231 	}
232 
233 	/*
234 	 * Read the index to find the disk block to read
235 	 * from.  If there is no block, report that we don't
236 	 * have this data.
237 	 *
238 	 * Assumes that the vnode has whole page or nothing.
239 	 */
240 	err = VOP_BMAP(vnp->vnp_vp,
241 		       offset / vnp->vnp_vp->v_mount->mnt_stat.f_iosize,
242 		       (struct vnode **)0, &bn);
243 	if (err) {
244 #ifdef DEBUG
245 		if (vpagerdebug & VDB_FAIL)
246 			printf("vnode_pager_haspage: BMAP err %d, pg %x, off %x\n",
247 			       err, pager, offset);
248 #endif
249 		return(TRUE);
250 	}
251 	return((long)bn < 0 ? FALSE : TRUE);
252 }
253 
254 /*
255  * (XXX)
256  * Lets the VM system know about a change in size for a file.
257  * If this vnode is mapped into some address space (i.e. we have a pager
258  * for it) we adjust our own internal size and flush any cached pages in
259  * the associated object that are affected by the size change.
260  *
261  * Note: this routine may be invoked as a result of a pager put
262  * operation (possibly at object termination time), so we must be careful.
263  */
264 vnode_pager_setsize(vp, nsize)
265 	struct vnode *vp;
266 	u_long nsize;
267 {
268 	register vn_pager_t vnp;
269 	register vm_object_t object;
270 	vm_pager_t pager;
271 
272 	/*
273 	 * Not a mapped vnode
274 	 */
275 	if (vp == NULL || vp->v_type != VREG || vp->v_vmdata == NULL)
276 		return;
277 	/*
278 	 * Hasn't changed size
279 	 */
280 	pager = (vm_pager_t)vp->v_vmdata;
281 	vnp = (vn_pager_t)pager->pg_data;
282 	if (nsize == vnp->vnp_size)
283 		return;
284 	/*
285 	 * No object.
286 	 * This can happen during object termination since
287 	 * vm_object_page_clean is called after the object
288 	 * has been removed from the hash table, and clean
289 	 * may cause vnode write operations which can wind
290 	 * up back here.
291 	 */
292 	object = vm_object_lookup(pager);
293 	if (object == NULL)
294 		return;
295 
296 #ifdef DEBUG
297 	if (vpagerdebug & (VDB_FOLLOW|VDB_SIZE))
298 		printf("vnode_pager_setsize: vp %x obj %x osz %d nsz %d\n",
299 		       vp, object, vnp->vnp_size, nsize);
300 #endif
301 	/*
302 	 * File has shrunk.
303 	 * Toss any cached pages beyond the new EOF.
304 	 */
305 	if (nsize < vnp->vnp_size) {
306 		vm_object_lock(object);
307 		vm_object_page_remove(object,
308 				      (vm_offset_t)nsize, vnp->vnp_size);
309 		vm_object_unlock(object);
310 	}
311 	vnp->vnp_size = (vm_offset_t)nsize;
312 	vm_object_deallocate(object);
313 }
314 
315 vnode_pager_umount(mp)
316 	register struct mount *mp;
317 {
318 	register vm_pager_t pager, npager;
319 	struct vnode *vp;
320 
321 	pager = (vm_pager_t) queue_first(&vnode_pager_list);
322 	while (!queue_end(&vnode_pager_list, (queue_entry_t)pager)) {
323 		/*
324 		 * Save the next pointer now since uncaching may
325 		 * terminate the object and render pager invalid
326 		 */
327 		vp = ((vn_pager_t)pager->pg_data)->vnp_vp;
328 		npager = (vm_pager_t) queue_next(&pager->pg_list);
329 		if (mp == (struct mount *)0 || vp->v_mount == mp)
330 			(void) vnode_pager_uncache(vp);
331 		pager = npager;
332 	}
333 }
334 
335 /*
336  * Remove vnode associated object from the object cache.
337  *
338  * Note: this routine may be invoked as a result of a pager put
339  * operation (possibly at object termination time), so we must be careful.
340  */
341 boolean_t
342 vnode_pager_uncache(vp)
343 	register struct vnode *vp;
344 {
345 	register vm_object_t object;
346 	boolean_t uncached, locked;
347 	vm_pager_t pager;
348 
349 	/*
350 	 * Not a mapped vnode
351 	 */
352 	pager = (vm_pager_t)vp->v_vmdata;
353 	if (pager == NULL)
354 		return (TRUE);
355 	/*
356 	 * Unlock the vnode if it is currently locked.
357 	 * We do this since uncaching the object may result
358 	 * in its destruction which may initiate paging
359 	 * activity which may necessitate locking the vnode.
360 	 */
361 	locked = VOP_ISLOCKED(vp);
362 	if (locked)
363 		VOP_UNLOCK(vp);
364 	/*
365 	 * Must use vm_object_lookup() as it actually removes
366 	 * the object from the cache list.
367 	 */
368 	object = vm_object_lookup(pager);
369 	if (object) {
370 		uncached = (object->ref_count <= 1);
371 		pager_cache(object, FALSE);
372 	} else
373 		uncached = TRUE;
374 	if (locked)
375 		VOP_LOCK(vp);
376 	return(uncached);
377 }
378 
379 vnode_pager_io(vnp, m, rw)
380 	register vn_pager_t vnp;
381 	vm_page_t m;
382 	enum uio_rw rw;
383 {
384 	struct uio auio;
385 	struct iovec aiov;
386 	vm_offset_t kva, foff;
387 	int error, size;
388 	struct proc *p = curproc;		/* XXX */
389 
390 #ifdef DEBUG
391 	if (vpagerdebug & VDB_FOLLOW)
392 		printf("vnode_pager_io(%x, %x, %c): vnode %x\n",
393 		       vnp, m, rw == UIO_READ ? 'R' : 'W', vnp->vnp_vp);
394 #endif
395 	foff = m->offset + m->object->paging_offset;
396 	/*
397 	 * Return failure if beyond current EOF
398 	 */
399 	if (foff >= vnp->vnp_size) {
400 #ifdef DEBUG
401 		if (vpagerdebug & VDB_SIZE)
402 			printf("vnode_pager_io: vp %x, off %d size %d\n",
403 			       vnp->vnp_vp, foff, vnp->vnp_size);
404 #endif
405 		return(VM_PAGER_BAD);
406 	}
407 	if (foff + PAGE_SIZE > vnp->vnp_size)
408 		size = vnp->vnp_size - foff;
409 	else
410 		size = PAGE_SIZE;
411 	/*
412 	 * Allocate a kernel virtual address and initialize so that
413 	 * we can use VOP_READ/WRITE routines.
414 	 */
415 	kva = vm_pager_map_page(m);
416 	aiov.iov_base = (caddr_t)kva;
417 	aiov.iov_len = size;
418 	auio.uio_iov = &aiov;
419 	auio.uio_iovcnt = 1;
420 	auio.uio_offset = foff;
421 	auio.uio_segflg = UIO_SYSSPACE;
422 	auio.uio_rw = rw;
423 	auio.uio_resid = size;
424 	auio.uio_procp = (struct proc *)0;
425 #ifdef DEBUG
426 	if (vpagerdebug & VDB_IO)
427 		printf("vnode_pager_io: vp %x kva %x foff %x size %x",
428 		       vnp->vnp_vp, kva, foff, size);
429 #endif
430 	if (rw == UIO_READ)
431 		error = VOP_READ(vnp->vnp_vp, &auio, 0, p->p_ucred);
432 	else
433 		error = VOP_WRITE(vnp->vnp_vp, &auio, 0, p->p_ucred);
434 #ifdef DEBUG
435 	if (vpagerdebug & VDB_IO) {
436 		if (error || auio.uio_resid)
437 			printf(" returns error %x, resid %x",
438 			       error, auio.uio_resid);
439 		printf("\n");
440 	}
441 #endif
442 	if (!error) {
443 		register int count = size - auio.uio_resid;
444 
445 		if (count == 0)
446 			error = EINVAL;
447 		else if (count != PAGE_SIZE && rw == UIO_READ)
448 			bzero(kva + count, PAGE_SIZE - count);
449 	}
450 	vm_pager_unmap_page(kva);
451 	return (error ? VM_PAGER_FAIL : VM_PAGER_OK);
452 }
453 #endif
454