xref: /minix/minix/servers/vm/mem_cache.c (revision e3b78ef1)
1 
2 /* This file implements the disk cache.
3  *
4  * If they exist anywhere, cached pages are always in a private
5  * VM datastructure.
6  *
7  * They might also be any combination of:
8  *    - be mapped in by a filesystem for reading/writing by it
9  *    - be mapped in by a process as the result of an mmap call (future)
10  *
11  * This file manages the datastructure of all cache blocks, and
12  * mapping them in and out of filesystems.
13  */
14 
15 #include <assert.h>
16 #include <string.h>
17 
18 #include <minix/hash.h>
19 
20 #include <machine/vmparam.h>
21 
22 #include "proto.h"
23 #include "vm.h"
24 #include "region.h"
25 #include "glo.h"
26 #include "cache.h"
27 
28 static int cache_reference(struct phys_region *pr, struct phys_region *pr2);
29 static int cache_unreference(struct phys_region *pr);
30 static int cache_sanitycheck(struct phys_region *pr, const char *file, int line);
31 static int cache_writable(struct phys_region *pr);
32 static int cache_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l);
33 static int cache_pagefault(struct vmproc *vmp, struct vir_region *region,
34         struct phys_region *ph, int write, vfs_callback_t cb, void *state,
35 	int len, int *io);
36 static int cache_pt_flags(struct vir_region *vr);
37 
38 struct mem_type mem_type_cache = {
39 	.name = "cache memory",
40 	.ev_reference = cache_reference,
41 	.ev_unreference = cache_unreference,
42 	.ev_resize = cache_resize,
43 	.ev_sanitycheck = cache_sanitycheck,
44 	.ev_pagefault = cache_pagefault,
45 	.writable = cache_writable,
46 	.pt_flags = cache_pt_flags,
47 };
48 
49 static int cache_pt_flags(struct vir_region *vr){
50 #if defined(__arm__)
51 	return ARM_VM_PTE_CACHED;
52 #else
53 	return 0;
54 #endif
55 }
56 
57 
58 static int cache_reference(struct phys_region *pr, struct phys_region *pr2)
59 {
60 	return OK;
61 }
62 
63 static int cache_unreference(struct phys_region *pr)
64 {
65 	return mem_type_anon.ev_unreference(pr);
66 }
67 
68 static int cache_sanitycheck(struct phys_region *pr, const char *file, int line)
69 {
70 	MYASSERT(usedpages_add(pr->ph->phys, VM_PAGE_SIZE) == OK);
71 	return OK;
72 }
73 
74 static int cache_writable(struct phys_region *pr)
75 {
76 	/* Cache blocks are at the moment only used by filesystems so always writable. */
77 	assert(pr->ph->refcount > 0);
78 	return pr->ph->phys != MAP_NONE;
79 }
80 
81 static int cache_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l)
82 {
83 	printf("VM: cannot resize cache blocks.\n");
84 	return ENOMEM;
85 }
86 
87 int
88 do_mapcache(message *msg)
89 {
90 	dev_t dev = msg->m_vmmcp.dev;
91 	off_t dev_off = msg->m_vmmcp.dev_offset;
92 	off_t ino_off = msg->m_vmmcp.ino_offset;
93 	int n;
94 	phys_bytes bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
95 	struct vir_region *vr;
96 	struct vmproc *caller;
97 	vir_bytes offset;
98 	int io = 0;
99 
100 	if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
101 		printf("VM: unaligned cache operation\n");
102 		return EFAULT;
103 	}
104 
105 	if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
106 	caller = &vmproc[n];
107 
108 	if(bytes < VM_PAGE_SIZE) return EINVAL;
109 
110 	if(!(vr = map_page_region(caller, VM_PAGE_SIZE, VM_DATATOP, bytes,
111 		VR_ANON | VR_WRITABLE, 0, &mem_type_cache))) {
112 		printf("VM: map_page_region failed\n");
113 		return ENOMEM;
114 	}
115 
116 	assert(vr->length == bytes);
117 
118 	for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
119 		struct cached_page *hb;
120 
121 		assert(vr->length == bytes);
122 		assert(offset < vr->length);
123 
124 		if(!(hb = find_cached_page_bydev(dev, dev_off + offset,
125 			msg->m_vmmcp.ino, ino_off + offset, 1))) {
126 			map_unmap_region(caller, vr, 0, bytes);
127 			return ENOENT;
128 		}
129 
130 		assert(!vr->param.pb_cache);
131 		vr->param.pb_cache = hb->page;
132 
133 		assert(vr->length == bytes);
134 		assert(offset < vr->length);
135 
136 		if(map_pf(caller, vr, offset, 1, NULL, NULL, 0, &io) != OK) {
137 			map_unmap_region(caller, vr, 0, bytes);
138 			printf("VM: map_pf failed\n");
139 			return ENOMEM;
140 		}
141 		assert(!vr->param.pb_cache);
142 	}
143 
144 	memset(msg, 0, sizeof(*msg));
145 
146 	msg->m_vmmcp_reply.addr = (void *) vr->vaddr;
147 
148  	assert(vr);
149 
150 #if CACHE_SANITY
151 	cache_sanitycheck_internal();
152 #endif
153 
154 	return OK;
155 }
156 
157 static int cache_pagefault(struct vmproc *vmp, struct vir_region *region,
158         struct phys_region *ph, int write, vfs_callback_t cb,
159 	void *state, int len, int *io)
160 {
161 	vir_bytes offset = ph->offset;
162 	assert(ph->ph->phys == MAP_NONE);
163 	assert(region->param.pb_cache);
164 	pb_unreferenced(region, ph, 0);
165 	pb_link(ph, region->param.pb_cache, offset, region);
166 	region->param.pb_cache = NULL;
167 
168 	return OK;
169 }
170 
171 int
172 do_setcache(message *msg)
173 {
174 	int r;
175 	dev_t dev = msg->m_vmmcp.dev;
176 	off_t dev_off = msg->m_vmmcp.dev_offset;
177 	off_t ino_off = msg->m_vmmcp.ino_offset;
178 	int flags = msg->m_vmmcp.flags;
179 	int n;
180 	struct vmproc *caller;
181 	phys_bytes offset;
182 	phys_bytes bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
183 
184 	if(bytes < VM_PAGE_SIZE) return EINVAL;
185 
186 	if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
187 		printf("VM: unaligned cache operation\n");
188 		return EFAULT;
189 	}
190 
191 	if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
192 	caller = &vmproc[n];
193 
194 	for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
195 		struct vir_region *region;
196 		struct phys_region *phys_region = NULL;
197 		vir_bytes v = (vir_bytes) msg->m_vmmcp.block + offset;
198                 struct cached_page *hb;
199 
200 		if(!(region = map_lookup(caller, v, &phys_region))) {
201 			printf("VM: error: no reasonable memory region given (offset 0x%lx, 0x%lx)\n", offset, v);
202 			return EFAULT;
203 		}
204 
205 		if(!phys_region) {
206 			printf("VM: error: no available memory region given\n");
207 			return EFAULT;
208 		}
209 
210 		if((hb=find_cached_page_bydev(dev, dev_off + offset,
211 			msg->m_vmmcp.ino, ino_off + offset, 1))) {
212 			/* block inode info updated */
213 			if(hb->page != phys_region->ph ||
214 			    (hb->flags & VMSF_ONCE)) {
215 				/* previous cache entry has become
216 				 * obsolete; make a new one. rmcache
217 				 * removes it from the cache and frees
218 				 * the page if it isn't mapped in anywhere
219 				 * else.
220 				 */
221                         	rmcache(hb);
222 			} else {
223 				/* block was already there, inode info might've changed which is fine */
224 				continue;
225 			}
226 		}
227 
228 		if(phys_region->memtype != &mem_type_anon &&
229 			phys_region->memtype != &mem_type_anon_contig) {
230 			printf("VM: error: no reasonable memory type\n");
231 			return EFAULT;
232 		}
233 
234 		if(phys_region->ph->refcount != 1) {
235 			printf("VM: error: no reasonable refcount\n");
236 			return EFAULT;
237 		}
238 
239 		phys_region->memtype = &mem_type_cache;
240 
241 		if((r=addcache(dev, dev_off + offset, msg->m_vmmcp.ino,
242 		    ino_off + offset, flags, phys_region->ph)) != OK) {
243 			printf("VM: addcache failed\n");
244 			return r;
245 		}
246 	}
247 
248 #if CACHE_SANITY
249 	cache_sanitycheck_internal();
250 #endif
251 
252 	return OK;
253 }
254 
255 /*
256  * A file system wants to invalidate all pages belonging to a certain device.
257  */
258 int
259 do_clearcache(message *msg)
260 {
261 	dev_t dev;
262 
263 	dev = msg->m_vmmcp.dev;
264 
265 	clear_cache_bydev(dev);
266 
267 	return OK;
268 }
269