xref: /original-bsd/sys/vm/vm_pager.c (revision b7cc7b86)
1 /*
2  * Copyright (c) 1985, 1986 Avadis Tevanian, Jr., Michael Wayne Young
3  * Copyright (c) 1987 Carnegie-Mellon University
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * The CMU software License Agreement specifies the terms and conditions
11  * for use and redistribution.
12  *
13  *	@(#)vm_pager.c	7.1 (Berkeley) 12/05/90
14  */
15 
16 /*
17  *	Paging space routine stubs.  Emulates a matchmaker-like interface
18  *	for builtin pagers.
19  */
20 
21 #include "param.h"
22 #include "queue.h"
23 #include "malloc.h"
24 
25 #include "../vm/vm_param.h"
26 #include "../vm/vm_pager.h"
27 #include "../vm/vm_page.h"
28 #include "../vm/vm_prot.h"
29 #include "../vm/vm_map.h"
30 #include "../vm/vm_kern.h"
31 
32 #include "../vm/pmap.h"
33 
34 #include "swappager.h"
35 #if NSWAPPAGER > 0
36 extern struct pagerops swappagerops;
37 #else
38 #define	swappagerops	PAGER_OPS_NULL
39 #endif
40 #include "vnodepager.h"
41 #if NVNODEPAGER > 0
42 extern struct pagerops vnodepagerops;
43 #else
44 #define	vnodepagerops	PAGER_OPS_NULL
45 #endif
46 #include "devpager.h"
47 #if NDEVPAGER > 0
48 extern struct pagerops devicepagerops;
49 #else
50 #define	devicepagerops	PAGER_OPS_NULL
51 #endif
52 
53 struct pagerops *pagertab[] = {
54 	&swappagerops,		/* PG_SWAP */
55 	&vnodepagerops,		/* PG_VNODE */
56 	&devicepagerops,	/* PG_DEV */
57 };
58 int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
59 
60 struct pagerops *dfltpagerops = PAGER_OPS_NULL;	/* default pager */
61 
62 /*
63  * Kernel address space for mapping pages.
64  * Used by pagers where KVAs are needed for IO.
65  */
66 #define PAGER_MAP_SIZE	(256 * PAGE_SIZE)
67 vm_map_t pager_map;
68 
69 void
70 vm_pager_init()
71 {
72 	vm_offset_t whocares1, whocares2;
73 	struct pagerops **pgops;
74 
75 	/*
76 	 * Allocate a kernel submap for tracking get/put page mappings
77 	 */
78 	pager_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
79 				  PAGER_MAP_SIZE, FALSE);
80 	/*
81 	 * Initialize known pagers
82 	 */
83 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
84 		(*(*pgops)->pgo_init)();
85 	if (dfltpagerops == PAGER_OPS_NULL)
86 		panic("no default pager");
87 }
88 
89 /*
90  * Allocate an instance of a pager of the given type.
91  */
92 vm_pager_t
93 vm_pager_allocate(type, handle, size, prot)
94 	int type;
95 	caddr_t handle;
96 	vm_size_t size;
97 	vm_prot_t prot;
98 {
99 	vm_pager_t pager;
100 	struct pagerops *ops;
101 
102 	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
103 	return((*ops->pgo_alloc)(handle, size, prot));
104 }
105 
106 void
107 vm_pager_deallocate(pager)
108 	vm_pager_t	pager;
109 {
110 	if (pager == vm_pager_null)
111 		panic("vm_pager_deallocate: null pager");
112 
113 	VM_PAGER_DEALLOC(pager);
114 }
115 
116 vm_pager_get(pager, m, sync)
117 	vm_pager_t	pager;
118 	vm_page_t	m;
119 	boolean_t	sync;
120 {
121 	extern boolean_t vm_page_zero_fill();
122 
123 	if (pager == vm_pager_null)
124 		return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
125 	return(VM_PAGER_GET(pager, m, sync));
126 }
127 
128 vm_pager_put(pager, m, sync)
129 	vm_pager_t	pager;
130 	vm_page_t	m;
131 	boolean_t	sync;
132 {
133 	if (pager == vm_pager_null)
134 		panic("vm_pager_put: null pager");
135 	return(VM_PAGER_PUT(pager, m, sync));
136 }
137 
138 boolean_t
139 vm_pager_has_page(pager, offset)
140 	vm_pager_t	pager;
141 	vm_offset_t	offset;
142 {
143 	if (pager == vm_pager_null)
144 		panic("vm_pager_has_page");
145 	return(VM_PAGER_HASPAGE(pager, offset));
146 }
147 
148 /*
149  * Called by pageout daemon before going back to sleep.
150  * Gives pagers a chance to clean up any completed async pageing operations.
151  */
152 void
153 vm_pager_sync()
154 {
155 	struct pagerops **pgops;
156 
157 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
158 		(*(*pgops)->pgo_putpage)(VM_PAGER_NULL, VM_PAGE_NULL, FALSE);
159 }
160 
161 vm_offset_t
162 vm_pager_map_page(m)
163 	vm_page_t	m;
164 {
165 	vm_offset_t kva;
166 
167 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
168 #if 1
169 	/*
170 	 * XXX: cannot use pmap_enter as the mapping would be
171 	 * removed by a pmap_remove_all().
172 	 */
173 	*(int *)kvtopte(kva) = VM_PAGE_TO_PHYS(m) | PG_CI | PG_V;
174 	TBIS(kva);
175 #else
176 	pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
177 		   VM_PROT_DEFAULT, TRUE);
178 #endif
179 	return(kva);
180 }
181 
182 void
183 vm_pager_unmap_page(kva)
184 	vm_offset_t	kva;
185 {
186 #if 1
187 	*(int *)kvtopte(kva) = PG_NV;
188 	TBIS(kva);
189 #endif
190 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
191 }
192 
193 vm_pager_t
194 vm_pager_lookup(list, handle)
195 	register queue_head_t *list;
196 	caddr_t handle;
197 {
198 	register vm_pager_t pager;
199 
200 	pager = (vm_pager_t) queue_first(list);
201 	while (!queue_end(list, (queue_entry_t)pager)) {
202 		if (pager->pg_handle == handle)
203 			return(pager);
204 		pager = (vm_pager_t) queue_next(&pager->pg_list);
205 	}
206 	return(VM_PAGER_NULL);
207 }
208 
209 /*
210  * This routine gains a reference to the object.
211  * Explicit deallocation is necessary.
212  */
213 pager_cache(object, should_cache)
214 	vm_object_t	object;
215 	boolean_t	should_cache;
216 {
217 	if (object == VM_OBJECT_NULL)
218 		return(KERN_INVALID_ARGUMENT);
219 
220 	vm_object_cache_lock();
221 	vm_object_lock(object);
222 	object->can_persist = should_cache;
223 	vm_object_unlock(object);
224 	vm_object_cache_unlock();
225 
226 	vm_object_deallocate(object);
227 
228 	return(KERN_SUCCESS);
229 }
230