xref: /original-bsd/sys/vm/vm_pager.c (revision 8f7f80b6)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_pager.c	7.9 (Berkeley) 10/01/92
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Paging space routine stubs.  Emulates a matchmaker-like interface
41  *	for builtin pagers.
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_kern.h>
51 
52 #include "swappager.h"
53 #if NSWAPPAGER > 0
54 extern struct pagerops swappagerops;
55 #define	swappagerops_p	&swappagerops
56 #endif
57 
58 #include "vnodepager.h"
59 #if NVNODEPAGER > 0
60 extern struct pagerops vnodepagerops;
61 #define	vnodepagerops_p	&vnodepagerops
62 #endif
63 
64 #include "devpager.h"
65 #if NDEVPAGER > 0
66 extern struct pagerops devicepagerops;
67 #define	devicepagerops_p &devicepagerops
68 #endif
69 
70 struct pagerops *pagertab[] = {
71 #if NSWAPPAGER > 0
72 	swappagerops_p,		/* PG_SWAP */
73 #endif
74 #if NVNODEPAGER > 0
75 	vnodepagerops_p,	/* PG_VNODE */
76 #endif
77 #if NDEVPAGER > 0
78 	devicepagerops_p,	/* PG_DEV */
79 #endif
80 };
81 int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
82 
83 struct pagerops *dfltpagerops = NULL;	/* default pager */
84 
85 /*
86  * Kernel address space for mapping pages.
87  * Used by pagers where KVAs are needed for IO.
88  */
89 #define PAGER_MAP_SIZE	(256 * PAGE_SIZE)
90 vm_map_t pager_map;
91 vm_offset_t pager_sva, pager_eva;
92 
93 void
94 vm_pager_init()
95 {
96 	struct pagerops **pgops;
97 
98 	/*
99 	 * Allocate a kernel submap for tracking get/put page mappings
100 	 */
101 	pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
102 				  PAGER_MAP_SIZE, FALSE);
103 	/*
104 	 * Initialize known pagers
105 	 */
106 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
107 		(*(*pgops)->pgo_init)();
108 	if (dfltpagerops == NULL)
109 		panic("no default pager");
110 }
111 
112 /*
113  * Allocate an instance of a pager of the given type.
114  */
115 vm_pager_t
116 vm_pager_allocate(type, handle, size, prot)
117 	int type;
118 	caddr_t handle;
119 	vm_size_t size;
120 	vm_prot_t prot;
121 {
122 	vm_pager_t pager;
123 	struct pagerops *ops;
124 
125 	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
126 	return((*ops->pgo_alloc)(handle, size, prot));
127 }
128 
129 void
130 vm_pager_deallocate(pager)
131 	vm_pager_t	pager;
132 {
133 	if (pager == NULL)
134 		panic("vm_pager_deallocate: null pager");
135 
136 	VM_PAGER_DEALLOC(pager);
137 }
138 
139 int
140 vm_pager_get(pager, m, sync)
141 	vm_pager_t	pager;
142 	vm_page_t	m;
143 	boolean_t	sync;
144 {
145 	extern boolean_t vm_page_zero_fill();
146 
147 	if (pager == NULL)
148 		return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
149 	return(VM_PAGER_GET(pager, m, sync));
150 }
151 
152 int
153 vm_pager_put(pager, m, sync)
154 	vm_pager_t	pager;
155 	vm_page_t	m;
156 	boolean_t	sync;
157 {
158 	if (pager == NULL)
159 		panic("vm_pager_put: null pager");
160 	return(VM_PAGER_PUT(pager, m, sync));
161 }
162 
163 boolean_t
164 vm_pager_has_page(pager, offset)
165 	vm_pager_t	pager;
166 	vm_offset_t	offset;
167 {
168 	if (pager == NULL)
169 		panic("vm_pager_has_page");
170 	return(VM_PAGER_HASPAGE(pager, offset));
171 }
172 
173 /*
174  * Called by pageout daemon before going back to sleep.
175  * Gives pagers a chance to clean up any completed async pageing operations.
176  */
177 void
178 vm_pager_sync()
179 {
180 	struct pagerops **pgops;
181 
182 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
183 		(*(*pgops)->pgo_putpage)(NULL, NULL, FALSE);
184 }
185 
186 vm_offset_t
187 vm_pager_map_page(m)
188 	vm_page_t	m;
189 {
190 	vm_offset_t kva;
191 
192 #ifdef DEBUG
193 	if (!(m->flags & PG_BUSY) || (m->flags & PG_ACTIVE))
194 		panic("vm_pager_map_page: page active or not busy");
195 	if (m->flags & PG_PAGEROWNED)
196 		printf("vm_pager_map_page: page %x already in pager\n", m);
197 #endif
198 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
199 #ifdef DEBUG
200 	m->flags |= PG_PAGEROWNED;
201 #endif
202 	pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
203 		   VM_PROT_DEFAULT, TRUE);
204 	return(kva);
205 }
206 
207 void
208 vm_pager_unmap_page(kva)
209 	vm_offset_t	kva;
210 {
211 #ifdef DEBUG
212 	vm_page_t m;
213 
214 	m = PHYS_TO_VM_PAGE(pmap_extract(vm_map_pmap(pager_map), kva));
215 #endif
216 	pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE);
217 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
218 #ifdef DEBUG
219 	if (m->flags & PG_PAGEROWNED)
220 		m->flags &= ~PG_PAGEROWNED;
221 	else
222 		printf("vm_pager_unmap_page: page %x(%x/%x) not owned\n",
223 		       m, kva, VM_PAGE_TO_PHYS(m));
224 #endif
225 }
226 
227 vm_pager_t
228 vm_pager_lookup(list, handle)
229 	register queue_head_t *list;
230 	caddr_t handle;
231 {
232 	register vm_pager_t pager;
233 
234 	pager = (vm_pager_t) queue_first(list);
235 	while (!queue_end(list, (queue_entry_t)pager)) {
236 		if (pager->pg_handle == handle)
237 			return(pager);
238 		pager = (vm_pager_t) queue_next(&pager->pg_list);
239 	}
240 	return(NULL);
241 }
242 
243 /*
244  * This routine gains a reference to the object.
245  * Explicit deallocation is necessary.
246  */
247 int
248 pager_cache(object, should_cache)
249 	vm_object_t	object;
250 	boolean_t	should_cache;
251 {
252 	if (object == NULL)
253 		return(KERN_INVALID_ARGUMENT);
254 
255 	vm_object_cache_lock();
256 	vm_object_lock(object);
257 	if (should_cache)
258 		object->flags |= OBJ_CANPERSIST;
259 	else
260 		object->flags &= ~OBJ_CANPERSIST;
261 	vm_object_unlock(object);
262 	vm_object_cache_unlock();
263 
264 	vm_object_deallocate(object);
265 
266 	return(KERN_SUCCESS);
267 }
268