xref: /original-bsd/sys/vm/vm_pager.c (revision e0399a72)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_pager.c	7.3 (Berkeley) 04/21/91
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Paging space routine stubs.  Emulates a matchmaker-like interface
41  *	for builtin pagers.
42  */
43 
44 #include "param.h"
45 #include "malloc.h"
46 
47 #include "vm.h"
48 #include "vm_page.h"
49 #include "vm_kern.h"
50 
51 #ifdef hp300
52 #include "../hp300/hp300/pte.h"			/* XXX XXX XXX */
53 #endif
54 
55 #include "swappager.h"
56 
57 #if NSWAPPAGER > 0
58 extern struct pagerops swappagerops;
59 #else
60 #define	swappagerops	NULL
61 #endif
62 #include "vnodepager.h"
63 #if NVNODEPAGER > 0
64 extern struct pagerops vnodepagerops;
65 #else
66 #define	vnodepagerops	NULL
67 #endif
68 #include "devpager.h"
69 #if NDEVPAGER > 0
70 extern struct pagerops devicepagerops;
71 #else
72 #define	devicepagerops	NULL
73 #endif
74 
75 struct pagerops *pagertab[] = {
76 	&swappagerops,		/* PG_SWAP */
77 	&vnodepagerops,		/* PG_VNODE */
78 	&devicepagerops,	/* PG_DEV */
79 };
80 int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
81 
82 struct pagerops *dfltpagerops = NULL;	/* default pager */
83 
84 /*
85  * Kernel address space for mapping pages.
86  * Used by pagers where KVAs are needed for IO.
87  */
88 #define PAGER_MAP_SIZE	(256 * PAGE_SIZE)
89 vm_map_t pager_map;
90 
91 void
92 vm_pager_init()
93 {
94 	vm_offset_t whocares1, whocares2;
95 	struct pagerops **pgops;
96 
97 	/*
98 	 * Allocate a kernel submap for tracking get/put page mappings
99 	 */
100 	pager_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
101 				  PAGER_MAP_SIZE, FALSE);
102 	/*
103 	 * Initialize known pagers
104 	 */
105 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
106 		(*(*pgops)->pgo_init)();
107 	if (dfltpagerops == NULL)
108 		panic("no default pager");
109 }
110 
111 /*
112  * Allocate an instance of a pager of the given type.
113  */
114 vm_pager_t
115 vm_pager_allocate(type, handle, size, prot)
116 	int type;
117 	caddr_t handle;
118 	vm_size_t size;
119 	vm_prot_t prot;
120 {
121 	vm_pager_t pager;
122 	struct pagerops *ops;
123 
124 	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
125 	return((*ops->pgo_alloc)(handle, size, prot));
126 }
127 
128 void
129 vm_pager_deallocate(pager)
130 	vm_pager_t	pager;
131 {
132 	if (pager == NULL)
133 		panic("vm_pager_deallocate: null pager");
134 
135 	VM_PAGER_DEALLOC(pager);
136 }
137 
138 vm_pager_get(pager, m, sync)
139 	vm_pager_t	pager;
140 	vm_page_t	m;
141 	boolean_t	sync;
142 {
143 	extern boolean_t vm_page_zero_fill();
144 
145 	if (pager == NULL)
146 		return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
147 	return(VM_PAGER_GET(pager, m, sync));
148 }
149 
150 vm_pager_put(pager, m, sync)
151 	vm_pager_t	pager;
152 	vm_page_t	m;
153 	boolean_t	sync;
154 {
155 	if (pager == NULL)
156 		panic("vm_pager_put: null pager");
157 	return(VM_PAGER_PUT(pager, m, sync));
158 }
159 
160 boolean_t
161 vm_pager_has_page(pager, offset)
162 	vm_pager_t	pager;
163 	vm_offset_t	offset;
164 {
165 	if (pager == NULL)
166 		panic("vm_pager_has_page");
167 	return(VM_PAGER_HASPAGE(pager, offset));
168 }
169 
170 /*
171  * Called by pageout daemon before going back to sleep.
172  * Gives pagers a chance to clean up any completed async pageing operations.
173  */
174 void
175 vm_pager_sync()
176 {
177 	struct pagerops **pgops;
178 
179 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
180 		(*(*pgops)->pgo_putpage)(NULL, NULL, FALSE);
181 }
182 
183 vm_offset_t
184 vm_pager_map_page(m)
185 	vm_page_t	m;
186 {
187 	vm_offset_t kva;
188 
189 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
190 #ifdef hp300
191 	/*
192 	 * XXX: cannot use pmap_enter as the mapping would be
193 	 * removed by a pmap_remove_all().
194 	 */
195 	*(int *)kvtopte(kva) = VM_PAGE_TO_PHYS(m) | PG_CI | PG_V;
196 	TBIS(kva);
197 #else
198 	pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
199 		   VM_PROT_DEFAULT, TRUE);
200 #endif
201 	return(kva);
202 }
203 
204 void
205 vm_pager_unmap_page(kva)
206 	vm_offset_t	kva;
207 {
208 #ifdef hp300
209 	*(int *)kvtopte(kva) = PG_NV;
210 	TBIS(kva);
211 #endif
212 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
213 }
214 
215 vm_pager_t
216 vm_pager_lookup(list, handle)
217 	register queue_head_t *list;
218 	caddr_t handle;
219 {
220 	register vm_pager_t pager;
221 
222 	pager = (vm_pager_t) queue_first(list);
223 	while (!queue_end(list, (queue_entry_t)pager)) {
224 		if (pager->pg_handle == handle)
225 			return(pager);
226 		pager = (vm_pager_t) queue_next(&pager->pg_list);
227 	}
228 	return(NULL);
229 }
230 
231 /*
232  * This routine gains a reference to the object.
233  * Explicit deallocation is necessary.
234  */
235 pager_cache(object, should_cache)
236 	vm_object_t	object;
237 	boolean_t	should_cache;
238 {
239 	if (object == NULL)
240 		return(KERN_INVALID_ARGUMENT);
241 
242 	vm_object_cache_lock();
243 	vm_object_lock(object);
244 	object->can_persist = should_cache;
245 	vm_object_unlock(object);
246 	vm_object_cache_unlock();
247 
248 	vm_object_deallocate(object);
249 
250 	return(KERN_SUCCESS);
251 }
252