xref: /original-bsd/sys/vm/vm_pager.c (revision de3f5c4e)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_pager.c	7.4 (Berkeley) 05/07/91
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Paging space routine stubs.  Emulates a matchmaker-like interface
41  *	for builtin pagers.
42  */
43 
44 #include "param.h"
45 #include "malloc.h"
46 
47 #include "vm.h"
48 #include "vm_page.h"
49 #include "vm_kern.h"
50 
51 #include "swappager.h"
52 
53 #if NSWAPPAGER > 0
54 extern struct pagerops swappagerops;
55 #else
56 #define	swappagerops	NULL
57 #endif
58 #include "vnodepager.h"
59 #if NVNODEPAGER > 0
60 extern struct pagerops vnodepagerops;
61 #else
62 #define	vnodepagerops	NULL
63 #endif
64 #include "devpager.h"
65 #if NDEVPAGER > 0
66 extern struct pagerops devicepagerops;
67 #else
68 #define	devicepagerops	NULL
69 #endif
70 
71 struct pagerops *pagertab[] = {
72 	&swappagerops,		/* PG_SWAP */
73 	&vnodepagerops,		/* PG_VNODE */
74 	&devicepagerops,	/* PG_DEV */
75 };
76 int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
77 
78 struct pagerops *dfltpagerops = NULL;	/* default pager */
79 
80 /*
81  * Kernel address space for mapping pages.
82  * Used by pagers where KVAs are needed for IO.
83  */
84 #define PAGER_MAP_SIZE	(256 * PAGE_SIZE)
85 vm_map_t pager_map;
86 vm_offset_t pager_sva, pager_eva;
87 
88 void
89 vm_pager_init()
90 {
91 	struct pagerops **pgops;
92 
93 	/*
94 	 * Allocate a kernel submap for tracking get/put page mappings
95 	 */
96 	pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
97 				  PAGER_MAP_SIZE, FALSE);
98 	/*
99 	 * Initialize known pagers
100 	 */
101 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
102 		(*(*pgops)->pgo_init)();
103 	if (dfltpagerops == NULL)
104 		panic("no default pager");
105 }
106 
107 /*
108  * Allocate an instance of a pager of the given type.
109  */
110 vm_pager_t
111 vm_pager_allocate(type, handle, size, prot)
112 	int type;
113 	caddr_t handle;
114 	vm_size_t size;
115 	vm_prot_t prot;
116 {
117 	vm_pager_t pager;
118 	struct pagerops *ops;
119 
120 	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
121 	return((*ops->pgo_alloc)(handle, size, prot));
122 }
123 
124 void
125 vm_pager_deallocate(pager)
126 	vm_pager_t	pager;
127 {
128 	if (pager == NULL)
129 		panic("vm_pager_deallocate: null pager");
130 
131 	VM_PAGER_DEALLOC(pager);
132 }
133 
134 vm_pager_get(pager, m, sync)
135 	vm_pager_t	pager;
136 	vm_page_t	m;
137 	boolean_t	sync;
138 {
139 	extern boolean_t vm_page_zero_fill();
140 
141 	if (pager == NULL)
142 		return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
143 	return(VM_PAGER_GET(pager, m, sync));
144 }
145 
146 vm_pager_put(pager, m, sync)
147 	vm_pager_t	pager;
148 	vm_page_t	m;
149 	boolean_t	sync;
150 {
151 	if (pager == NULL)
152 		panic("vm_pager_put: null pager");
153 	return(VM_PAGER_PUT(pager, m, sync));
154 }
155 
156 boolean_t
157 vm_pager_has_page(pager, offset)
158 	vm_pager_t	pager;
159 	vm_offset_t	offset;
160 {
161 	if (pager == NULL)
162 		panic("vm_pager_has_page");
163 	return(VM_PAGER_HASPAGE(pager, offset));
164 }
165 
166 /*
167  * Called by pageout daemon before going back to sleep.
168  * Gives pagers a chance to clean up any completed async pageing operations.
169  */
170 void
171 vm_pager_sync()
172 {
173 	struct pagerops **pgops;
174 
175 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
176 		(*(*pgops)->pgo_putpage)(NULL, NULL, FALSE);
177 }
178 
179 vm_offset_t
180 vm_pager_map_page(m)
181 	vm_page_t	m;
182 {
183 	vm_offset_t kva;
184 
185 #ifdef DEBUG
186 	if (!m->busy || m->active)
187 		panic("vm_pager_map_page: page active or not busy");
188 	if (m->pagerowned)
189 		printf("vm_pager_map_page: page %x already in pager\n", m);
190 #endif
191 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
192 #ifdef DEBUG
193 	m->pagerowned = 1;
194 #endif
195 	pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
196 		   VM_PROT_DEFAULT, TRUE);
197 	return(kva);
198 }
199 
200 void
201 vm_pager_unmap_page(kva)
202 	vm_offset_t	kva;
203 {
204 #ifdef DEBUG
205 	vm_page_t m;
206 
207 	m = PHYS_TO_VM_PAGE(pmap_extract(vm_map_pmap(pager_map), kva));
208 #endif
209 	pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE);
210 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
211 #ifdef DEBUG
212 	if (m->pagerowned)
213 		m->pagerowned = 0;
214 	else
215 		printf("vm_pager_unmap_page: page %x(%x/%x) not owned\n",
216 		       m, kva, VM_PAGE_TO_PHYS(m));
217 #endif
218 }
219 
220 vm_pager_t
221 vm_pager_lookup(list, handle)
222 	register queue_head_t *list;
223 	caddr_t handle;
224 {
225 	register vm_pager_t pager;
226 
227 	pager = (vm_pager_t) queue_first(list);
228 	while (!queue_end(list, (queue_entry_t)pager)) {
229 		if (pager->pg_handle == handle)
230 			return(pager);
231 		pager = (vm_pager_t) queue_next(&pager->pg_list);
232 	}
233 	return(NULL);
234 }
235 
236 /*
237  * This routine gains a reference to the object.
238  * Explicit deallocation is necessary.
239  */
240 pager_cache(object, should_cache)
241 	vm_object_t	object;
242 	boolean_t	should_cache;
243 {
244 	if (object == NULL)
245 		return(KERN_INVALID_ARGUMENT);
246 
247 	vm_object_cache_lock();
248 	vm_object_lock(object);
249 	object->can_persist = should_cache;
250 	vm_object_unlock(object);
251 	vm_object_cache_unlock();
252 
253 	vm_object_deallocate(object);
254 
255 	return(KERN_SUCCESS);
256 }
257