xref: /original-bsd/sys/vm/vm_pager.c (revision 4e1ffb20)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_pager.c	8.7 (Berkeley) 07/07/94
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Paging space routine stubs.  Emulates a matchmaker-like interface
41  *	for builtin pagers.
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_kern.h>
51 
52 #ifdef SWAPPAGER
53 extern struct pagerops swappagerops;
54 #endif
55 
56 #ifdef VNODEPAGER
57 extern struct pagerops vnodepagerops;
58 #endif
59 
60 #ifdef DEVPAGER
61 extern struct pagerops devicepagerops;
62 #endif
63 
64 struct pagerops *pagertab[] = {
65 #ifdef SWAPPAGER
66 	&swappagerops,		/* PG_SWAP */
67 #else
68 	NULL,
69 #endif
70 #ifdef VNODEPAGER
71 	&vnodepagerops,		/* PG_VNODE */
72 #else
73 	NULL,
74 #endif
75 #ifdef DEVPAGER
76 	&devicepagerops,	/* PG_DEV */
77 #else
78 	NULL,
79 #endif
80 };
81 int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
82 
83 struct pagerops *dfltpagerops = NULL;	/* default pager */
84 
85 /*
86  * Kernel address space for mapping pages.
87  * Used by pagers where KVAs are needed for IO.
88  *
89  * XXX needs to be large enough to support the number of pending async
90  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
91  * (MAXPHYS == 64k) if you want to get the most efficiency.
92  */
93 #define PAGER_MAP_SIZE	(4 * 1024 * 1024)
94 
95 vm_map_t pager_map;
96 boolean_t pager_map_wanted;
97 vm_offset_t pager_sva, pager_eva;
98 
99 void
100 vm_pager_init()
101 {
102 	struct pagerops **pgops;
103 
104 	/*
105 	 * Allocate a kernel submap for tracking get/put page mappings
106 	 */
107 	pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
108 				  PAGER_MAP_SIZE, FALSE);
109 	/*
110 	 * Initialize known pagers
111 	 */
112 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
113 		if (pgops)
114 			(*(*pgops)->pgo_init)();
115 	if (dfltpagerops == NULL)
116 		panic("no default pager");
117 }
118 
119 /*
120  * Allocate an instance of a pager of the given type.
121  * Size, protection and offset parameters are passed in for pagers that
122  * need to perform page-level validation (e.g. the device pager).
123  */
124 vm_pager_t
125 vm_pager_allocate(type, handle, size, prot, off)
126 	int type;
127 	caddr_t handle;
128 	vm_size_t size;
129 	vm_prot_t prot;
130 	vm_offset_t off;
131 {
132 	struct pagerops *ops;
133 
134 	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
135 	if (ops)
136 		return ((*ops->pgo_alloc)(handle, size, prot, off));
137 	return (NULL);
138 }
139 
140 void
141 vm_pager_deallocate(pager)
142 	vm_pager_t	pager;
143 {
144 	if (pager == NULL)
145 		panic("vm_pager_deallocate: null pager");
146 
147 	(*pager->pg_ops->pgo_dealloc)(pager);
148 }
149 
150 int
151 vm_pager_get_pages(pager, mlist, npages, sync)
152 	vm_pager_t	pager;
153 	vm_page_t	*mlist;
154 	int		npages;
155 	boolean_t	sync;
156 {
157 	int rv;
158 
159 	if (pager == NULL) {
160 		rv = VM_PAGER_OK;
161 		while (npages--)
162 			if (!vm_page_zero_fill(*mlist)) {
163 				rv = VM_PAGER_FAIL;
164 				break;
165 			} else
166 				mlist++;
167 		return (rv);
168 	}
169 	return ((*pager->pg_ops->pgo_getpages)(pager, mlist, npages, sync));
170 }
171 
172 int
173 vm_pager_put_pages(pager, mlist, npages, sync)
174 	vm_pager_t	pager;
175 	vm_page_t	*mlist;
176 	int		npages;
177 	boolean_t	sync;
178 {
179 	if (pager == NULL)
180 		panic("vm_pager_put_pages: null pager");
181 	return ((*pager->pg_ops->pgo_putpages)(pager, mlist, npages, sync));
182 }
183 
184 /* XXX compatibility*/
185 int
186 vm_pager_get(pager, m, sync)
187 	vm_pager_t	pager;
188 	vm_page_t	m;
189 	boolean_t	sync;
190 {
191 	return vm_pager_get_pages(pager, &m, 1, sync);
192 }
193 
194 /* XXX compatibility*/
195 int
196 vm_pager_put(pager, m, sync)
197 	vm_pager_t	pager;
198 	vm_page_t	m;
199 	boolean_t	sync;
200 {
201 	return vm_pager_put_pages(pager, &m, 1, sync);
202 }
203 
204 boolean_t
205 vm_pager_has_page(pager, offset)
206 	vm_pager_t	pager;
207 	vm_offset_t	offset;
208 {
209 	if (pager == NULL)
210 		panic("vm_pager_has_page: null pager");
211 	return ((*pager->pg_ops->pgo_haspage)(pager, offset));
212 }
213 
214 /*
215  * Called by pageout daemon before going back to sleep.
216  * Gives pagers a chance to clean up any completed async pageing operations.
217  */
218 void
219 vm_pager_sync()
220 {
221 	struct pagerops **pgops;
222 
223 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
224 		if (pgops)
225 			(*(*pgops)->pgo_putpages)(NULL, NULL, 0, FALSE);
226 }
227 
228 void
229 vm_pager_cluster(pager, offset, loff, hoff)
230 	vm_pager_t	pager;
231 	vm_offset_t	offset;
232 	vm_offset_t	*loff;
233 	vm_offset_t	*hoff;
234 {
235 	if (pager == NULL)
236 		panic("vm_pager_cluster: null pager");
237 	((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff));
238 }
239 
240 void
241 vm_pager_clusternull(pager, offset, loff, hoff)
242 	vm_pager_t	pager;
243 	vm_offset_t	offset;
244 	vm_offset_t	*loff;
245 	vm_offset_t	*hoff;
246 {
247 	panic("vm_pager_nullcluster called");
248 }
249 
250 vm_offset_t
251 vm_pager_map_pages(mlist, npages, canwait)
252 	vm_page_t	*mlist;
253 	int		npages;
254 	boolean_t	canwait;
255 {
256 	vm_offset_t kva, va;
257 	vm_size_t size;
258 	vm_page_t m;
259 
260 	/*
261 	 * Allocate space in the pager map, if none available return 0.
262 	 * This is basically an expansion of kmem_alloc_wait with optional
263 	 * blocking on no space.
264 	 */
265 	size = npages * PAGE_SIZE;
266 	vm_map_lock(pager_map);
267 	while (vm_map_findspace(pager_map, 0, size, &kva)) {
268 		if (!canwait) {
269 			vm_map_unlock(pager_map);
270 			return (0);
271 		}
272 		pager_map_wanted = TRUE;
273 		vm_map_unlock(pager_map);
274 		(void) tsleep(pager_map, PVM, "pager_map", 0);
275 		vm_map_lock(pager_map);
276 	}
277 	vm_map_insert(pager_map, NULL, 0, kva, kva + size);
278 	vm_map_unlock(pager_map);
279 
280 	for (va = kva; npages--; va += PAGE_SIZE) {
281 		m = *mlist++;
282 #ifdef DEBUG
283 		if ((m->flags & PG_BUSY) == 0)
284 			panic("vm_pager_map_pages: page not busy");
285 		if (m->flags & PG_PAGEROWNED)
286 			panic("vm_pager_map_pages: page already in pager");
287 #endif
288 #ifdef DEBUG
289 		m->flags |= PG_PAGEROWNED;
290 #endif
291 		pmap_enter(vm_map_pmap(pager_map), va, VM_PAGE_TO_PHYS(m),
292 			   VM_PROT_DEFAULT, TRUE);
293 	}
294 	return (kva);
295 }
296 
297 void
298 vm_pager_unmap_pages(kva, npages)
299 	vm_offset_t	kva;
300 	int		npages;
301 {
302 	vm_size_t size = npages * PAGE_SIZE;
303 
304 #ifdef DEBUG
305 	vm_offset_t va;
306 	vm_page_t m;
307 	int np = npages;
308 
309 	for (va = kva; np--; va += PAGE_SIZE) {
310 		m = vm_pager_atop(va);
311 		if (m->flags & PG_PAGEROWNED)
312 			m->flags &= ~PG_PAGEROWNED;
313 		else
314 			printf("vm_pager_unmap_pages: %x(%x/%x) not owned\n",
315 			       m, va, VM_PAGE_TO_PHYS(m));
316 	}
317 #endif
318 	pmap_remove(vm_map_pmap(pager_map), kva, kva + size);
319 	vm_map_lock(pager_map);
320 	(void) vm_map_delete(pager_map, kva, kva + size);
321 	if (pager_map_wanted)
322 		wakeup(pager_map);
323 	vm_map_unlock(pager_map);
324 }
325 
326 vm_page_t
327 vm_pager_atop(kva)
328 	vm_offset_t	kva;
329 {
330 	vm_offset_t pa;
331 
332 	pa = pmap_extract(vm_map_pmap(pager_map), kva);
333 	if (pa == 0)
334 		panic("vm_pager_atop");
335 	return (PHYS_TO_VM_PAGE(pa));
336 }
337 
338 vm_pager_t
339 vm_pager_lookup(pglist, handle)
340 	register struct pagerlst *pglist;
341 	caddr_t handle;
342 {
343 	register vm_pager_t pager;
344 
345 	for (pager = pglist->tqh_first; pager; pager = pager->pg_list.tqe_next)
346 		if (pager->pg_handle == handle)
347 			return (pager);
348 	return (NULL);
349 }
350 
351 /*
352  * This routine gains a reference to the object.
353  * Explicit deallocation is necessary.
354  */
355 int
356 pager_cache(object, should_cache)
357 	vm_object_t	object;
358 	boolean_t	should_cache;
359 {
360 	if (object == NULL)
361 		return (KERN_INVALID_ARGUMENT);
362 
363 	vm_object_cache_lock();
364 	vm_object_lock(object);
365 	if (should_cache)
366 		object->flags |= OBJ_CANPERSIST;
367 	else
368 		object->flags &= ~OBJ_CANPERSIST;
369 	vm_object_unlock(object);
370 	vm_object_cache_unlock();
371 
372 	vm_object_deallocate(object);
373 
374 	return (KERN_SUCCESS);
375 }
376