xref: /original-bsd/sys/vm/vm_page.c (revision babae2df)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_page.c	8.3 (Berkeley) 03/21/94
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Resident memory management module.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_pageout.h>
50 
51 /*
52  *	Associated with page of user-allocatable memory is a
53  *	page structure.
54  */
55 
56 struct pglist	*vm_page_buckets;		/* Array of buckets */
57 int		vm_page_bucket_count = 0;	/* How big is array? */
58 int		vm_page_hash_mask;		/* Mask for hash function */
59 simple_lock_data_t	bucket_lock;		/* lock for all buckets XXX */
60 
61 struct pglist	vm_page_queue_free;
62 struct pglist	vm_page_queue_active;
63 struct pglist	vm_page_queue_inactive;
64 simple_lock_data_t	vm_page_queue_lock;
65 simple_lock_data_t	vm_page_queue_free_lock;
66 
67 /* has physical page allocation been initialized? */
68 boolean_t vm_page_startup_initialized;
69 
70 vm_page_t	vm_page_array;
71 long		first_page;
72 long		last_page;
73 vm_offset_t	first_phys_addr;
74 vm_offset_t	last_phys_addr;
75 vm_size_t	page_mask;
76 int		page_shift;
77 
78 /*
79  *	vm_set_page_size:
80  *
81  *	Sets the page size, perhaps based upon the memory
82  *	size.  Must be called before any use of page-size
83  *	dependent functions.
84  *
85  *	Sets page_shift and page_mask from cnt.v_page_size.
86  */
87 void vm_set_page_size()
88 {
89 
90 	if (cnt.v_page_size == 0)
91 		cnt.v_page_size = DEFAULT_PAGE_SIZE;
92 	page_mask = cnt.v_page_size - 1;
93 	if ((page_mask & cnt.v_page_size) != 0)
94 		panic("vm_set_page_size: page size not a power of two");
95 	for (page_shift = 0; ; page_shift++)
96 		if ((1 << page_shift) == cnt.v_page_size)
97 			break;
98 }
99 
100 
101 /*
102  *	vm_page_startup:
103  *
104  *	Initializes the resident memory module.
105  *
106  *	Allocates memory for the page cells, and
107  *	for the object/offset-to-page hash table headers.
108  *	Each page cell is initialized and placed on the free list.
109  */
110 void vm_page_startup(start, end)
111 	vm_offset_t	*start;
112 	vm_offset_t	*end;
113 {
114 	register vm_page_t	m;
115 	register struct pglist	*bucket;
116 	vm_size_t		npages;
117 	int			i;
118 	vm_offset_t		pa;
119 	extern	vm_offset_t	kentry_data;
120 	extern	vm_size_t	kentry_data_size;
121 
122 
123 	/*
124 	 *	Initialize the locks
125 	 */
126 
127 	simple_lock_init(&vm_page_queue_free_lock);
128 	simple_lock_init(&vm_page_queue_lock);
129 
130 	/*
131 	 *	Initialize the queue headers for the free queue,
132 	 *	the active queue and the inactive queue.
133 	 */
134 
135 	TAILQ_INIT(&vm_page_queue_free);
136 	TAILQ_INIT(&vm_page_queue_active);
137 	TAILQ_INIT(&vm_page_queue_inactive);
138 
139 	/*
140 	 *	Calculate the number of hash table buckets.
141 	 *
142 	 *	The number of buckets MUST BE a power of 2, and
143 	 *	the actual value is the next power of 2 greater
144 	 *	than the number of physical pages in the system.
145 	 *
146 	 *	Note:
147 	 *		This computation can be tweaked if desired.
148 	 */
149 
150 	if (vm_page_bucket_count == 0) {
151 		vm_page_bucket_count = 1;
152 		while (vm_page_bucket_count < atop(*end - *start))
153 			vm_page_bucket_count <<= 1;
154 	}
155 
156 	vm_page_hash_mask = vm_page_bucket_count - 1;
157 
158 	/*
159 	 *	Allocate (and initialize) the hash table buckets.
160 	 */
161 	vm_page_buckets = (struct pglist *)
162 	    pmap_bootstrap_alloc(vm_page_bucket_count * sizeof(struct pglist));
163 	bucket = vm_page_buckets;
164 
165 	for (i = vm_page_bucket_count; i--;) {
166 		TAILQ_INIT(bucket);
167 		bucket++;
168 	}
169 
170 	simple_lock_init(&bucket_lock);
171 
172 	/*
173 	 *	Truncate the remainder of physical memory to our page size.
174 	 */
175 
176 	*end = trunc_page(*end);
177 
178 	/*
179 	 *	Pre-allocate maps and map entries that cannot be dynamically
180 	 *	allocated via malloc().  The maps include the kernel_map and
181 	 *	kmem_map which must be initialized before malloc() will
182 	 *	work (obviously).  Also could include pager maps which would
183 	 *	be allocated before kmeminit.
184 	 *
185 	 *	Allow some kernel map entries... this should be plenty
186 	 *	since people shouldn't be cluttering up the kernel
187 	 *	map (they should use their own maps).
188 	 */
189 
190 	kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) +
191 				      MAX_KMAPENT*sizeof(struct vm_map_entry));
192 	kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size);
193 
194 	/*
195  	 *	Compute the number of pages of memory that will be
196 	 *	available for use (taking into account the overhead
197 	 *	of a page structure per page).
198 	 */
199 
200 	cnt.v_free_count = npages = (*end - *start + sizeof(struct vm_page))
201 		/ (PAGE_SIZE + sizeof(struct vm_page));
202 
203 	/*
204 	 *	Record the extent of physical memory that the
205 	 *	virtual memory system manages.
206 	 */
207 
208 	first_page = *start;
209 	first_page += npages*sizeof(struct vm_page);
210 	first_page = atop(round_page(first_page));
211 	last_page  = first_page + npages - 1;
212 
213 	first_phys_addr = ptoa(first_page);
214 	last_phys_addr  = ptoa(last_page) + PAGE_MASK;
215 
216 
217 	/*
218 	 *	Allocate and clear the mem entry structures.
219 	 */
220 
221 	m = vm_page_array = (vm_page_t)
222 		pmap_bootstrap_alloc(npages * sizeof(struct vm_page));
223 
224 	/*
225 	 *	Initialize the mem entry structures now, and
226 	 *	put them in the free queue.
227 	 */
228 
229 	pa = first_phys_addr;
230 	while (npages--) {
231 		m->flags = 0;
232 		m->object = NULL;
233 		m->phys_addr = pa;
234 #ifdef i386
235 		if (pmap_isvalidphys(m->phys_addr)) {
236 			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
237 		} else {
238 			/* perhaps iomem needs it's own type, or dev pager? */
239 			m->flags |= PG_FICTITIOUS | PG_BUSY;
240 			cnt.v_free_count--;
241 		}
242 #else /* i386 */
243 		TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
244 #endif /* i386 */
245 		m++;
246 		pa += PAGE_SIZE;
247 	}
248 
249 	/*
250 	 *	Initialize vm_pages_needed lock here - don't wait for pageout
251 	 *	daemon	XXX
252 	 */
253 	simple_lock_init(&vm_pages_needed_lock);
254 
255 	/* from now on, pmap_bootstrap_alloc can't be used */
256 	vm_page_startup_initialized = TRUE;
257 }
258 
259 /*
260  *	vm_page_hash:
261  *
262  *	Distributes the object/offset key pair among hash buckets.
263  *
264  *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
265  */
266 #define vm_page_hash(object, offset) \
267 	(((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
268 
269 /*
270  *	vm_page_insert:		[ internal use only ]
271  *
272  *	Inserts the given mem entry into the object/object-page
273  *	table and object list.
274  *
275  *	The object and page must be locked.
276  */
277 
278 void vm_page_insert(mem, object, offset)
279 	register vm_page_t	mem;
280 	register vm_object_t	object;
281 	register vm_offset_t	offset;
282 {
283 	register struct pglist	*bucket;
284 	int			spl;
285 
286 	VM_PAGE_CHECK(mem);
287 
288 	if (mem->flags & PG_TABLED)
289 		panic("vm_page_insert: already inserted");
290 
291 	/*
292 	 *	Record the object/offset pair in this page
293 	 */
294 
295 	mem->object = object;
296 	mem->offset = offset;
297 
298 	/*
299 	 *	Insert it into the object_object/offset hash table
300 	 */
301 
302 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
303 	spl = splimp();
304 	simple_lock(&bucket_lock);
305 	TAILQ_INSERT_TAIL(bucket, mem, hashq);
306 	simple_unlock(&bucket_lock);
307 	(void) splx(spl);
308 
309 	/*
310 	 *	Now link into the object's list of backed pages.
311 	 */
312 
313 	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
314 	mem->flags |= PG_TABLED;
315 
316 	/*
317 	 *	And show that the object has one more resident
318 	 *	page.
319 	 */
320 
321 	object->resident_page_count++;
322 }
323 
324 /*
325  *	vm_page_remove:		[ internal use only ]
326  *				NOTE: used by device pager as well -wfj
327  *
328  *	Removes the given mem entry from the object/offset-page
329  *	table and the object page list.
330  *
331  *	The object and page must be locked.
332  */
333 
334 void vm_page_remove(mem)
335 	register vm_page_t	mem;
336 {
337 	register struct pglist	*bucket;
338 	int			spl;
339 
340 	VM_PAGE_CHECK(mem);
341 
342 	if (!(mem->flags & PG_TABLED))
343 		return;
344 
345 	/*
346 	 *	Remove from the object_object/offset hash table
347 	 */
348 
349 	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
350 	spl = splimp();
351 	simple_lock(&bucket_lock);
352 	TAILQ_REMOVE(bucket, mem, hashq);
353 	simple_unlock(&bucket_lock);
354 	(void) splx(spl);
355 
356 	/*
357 	 *	Now remove from the object's list of backed pages.
358 	 */
359 
360 	TAILQ_REMOVE(&mem->object->memq, mem, listq);
361 
362 	/*
363 	 *	And show that the object has one fewer resident
364 	 *	page.
365 	 */
366 
367 	mem->object->resident_page_count--;
368 
369 	mem->flags &= ~PG_TABLED;
370 }
371 
372 /*
373  *	vm_page_lookup:
374  *
375  *	Returns the page associated with the object/offset
376  *	pair specified; if none is found, NULL is returned.
377  *
378  *	The object must be locked.  No side effects.
379  */
380 
381 vm_page_t vm_page_lookup(object, offset)
382 	register vm_object_t	object;
383 	register vm_offset_t	offset;
384 {
385 	register vm_page_t	mem;
386 	register struct pglist	*bucket;
387 	int			spl;
388 
389 	/*
390 	 *	Search the hash table for this object/offset pair
391 	 */
392 
393 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
394 
395 	spl = splimp();
396 	simple_lock(&bucket_lock);
397 	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
398 		VM_PAGE_CHECK(mem);
399 		if ((mem->object == object) && (mem->offset == offset)) {
400 			simple_unlock(&bucket_lock);
401 			splx(spl);
402 			return(mem);
403 		}
404 	}
405 
406 	simple_unlock(&bucket_lock);
407 	splx(spl);
408 	return(NULL);
409 }
410 
411 /*
412  *	vm_page_rename:
413  *
414  *	Move the given memory entry from its
415  *	current object to the specified target object/offset.
416  *
417  *	The object must be locked.
418  */
419 void vm_page_rename(mem, new_object, new_offset)
420 	register vm_page_t	mem;
421 	register vm_object_t	new_object;
422 	vm_offset_t		new_offset;
423 {
424 	if (mem->object == new_object)
425 		return;
426 
427 	vm_page_lock_queues();	/* keep page from moving out from
428 				   under pageout daemon */
429     	vm_page_remove(mem);
430 	vm_page_insert(mem, new_object, new_offset);
431 	vm_page_unlock_queues();
432 }
433 
434 /*
435  *	vm_page_alloc:
436  *
437  *	Allocate and return a memory cell associated
438  *	with this VM object/offset pair.
439  *
440  *	Object must be locked.
441  */
442 vm_page_t vm_page_alloc(object, offset)
443 	vm_object_t	object;
444 	vm_offset_t	offset;
445 {
446 	register vm_page_t	mem;
447 	int		spl;
448 
449 	spl = splimp();				/* XXX */
450 	simple_lock(&vm_page_queue_free_lock);
451 	if (vm_page_queue_free.tqh_first == NULL) {
452 		simple_unlock(&vm_page_queue_free_lock);
453 		splx(spl);
454 		return(NULL);
455 	}
456 
457 	mem = vm_page_queue_free.tqh_first;
458 	TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
459 
460 	cnt.v_free_count--;
461 	simple_unlock(&vm_page_queue_free_lock);
462 	splx(spl);
463 
464 	VM_PAGE_INIT(mem, object, offset);
465 
466 	/*
467 	 *	Decide if we should poke the pageout daemon.
468 	 *	We do this if the free count is less than the low
469 	 *	water mark, or if the free count is less than the high
470 	 *	water mark (but above the low water mark) and the inactive
471 	 *	count is less than its target.
472 	 *
473 	 *	We don't have the counts locked ... if they change a little,
474 	 *	it doesn't really matter.
475 	 */
476 
477 	if (cnt.v_free_count < cnt.v_free_min ||
478 	    (cnt.v_free_count < cnt.v_free_target &&
479 	     cnt.v_inactive_count < cnt.v_inactive_target))
480 		thread_wakeup((int)&vm_pages_needed);
481 	return (mem);
482 }
483 
484 /*
485  *	vm_page_free:
486  *
487  *	Returns the given page to the free list,
488  *	disassociating it with any VM object.
489  *
490  *	Object and page must be locked prior to entry.
491  */
492 void vm_page_free(mem)
493 	register vm_page_t	mem;
494 {
495 	vm_page_remove(mem);
496 	if (mem->flags & PG_ACTIVE) {
497 		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
498 		mem->flags &= ~PG_ACTIVE;
499 		cnt.v_active_count--;
500 	}
501 
502 	if (mem->flags & PG_INACTIVE) {
503 		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
504 		mem->flags &= ~PG_INACTIVE;
505 		cnt.v_inactive_count--;
506 	}
507 
508 	if (!(mem->flags & PG_FICTITIOUS)) {
509 		int	spl;
510 
511 		spl = splimp();
512 		simple_lock(&vm_page_queue_free_lock);
513 		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
514 
515 		cnt.v_free_count++;
516 		simple_unlock(&vm_page_queue_free_lock);
517 		splx(spl);
518 	}
519 }
520 
521 /*
522  *	vm_page_wire:
523  *
524  *	Mark this page as wired down by yet
525  *	another map, removing it from paging queues
526  *	as necessary.
527  *
528  *	The page queues must be locked.
529  */
530 void vm_page_wire(mem)
531 	register vm_page_t	mem;
532 {
533 	VM_PAGE_CHECK(mem);
534 
535 	if (mem->wire_count == 0) {
536 		if (mem->flags & PG_ACTIVE) {
537 			TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
538 			cnt.v_active_count--;
539 			mem->flags &= ~PG_ACTIVE;
540 		}
541 		if (mem->flags & PG_INACTIVE) {
542 			TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
543 			cnt.v_inactive_count--;
544 			mem->flags &= ~PG_INACTIVE;
545 		}
546 		cnt.v_wire_count++;
547 	}
548 	mem->wire_count++;
549 }
550 
551 /*
552  *	vm_page_unwire:
553  *
554  *	Release one wiring of this page, potentially
555  *	enabling it to be paged again.
556  *
557  *	The page queues must be locked.
558  */
559 void vm_page_unwire(mem)
560 	register vm_page_t	mem;
561 {
562 	VM_PAGE_CHECK(mem);
563 
564 	mem->wire_count--;
565 	if (mem->wire_count == 0) {
566 		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
567 		cnt.v_active_count++;
568 		mem->flags |= PG_ACTIVE;
569 		cnt.v_wire_count--;
570 	}
571 }
572 
573 /*
574  *	vm_page_deactivate:
575  *
576  *	Returns the given page to the inactive list,
577  *	indicating that no physical maps have access
578  *	to this page.  [Used by the physical mapping system.]
579  *
580  *	The page queues must be locked.
581  */
582 void vm_page_deactivate(m)
583 	register vm_page_t	m;
584 {
585 	VM_PAGE_CHECK(m);
586 
587 	/*
588 	 *	Only move active pages -- ignore locked or already
589 	 *	inactive ones.
590 	 */
591 
592 	if (m->flags & PG_ACTIVE) {
593 		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
594 		TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
595 		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
596 		m->flags &= ~PG_ACTIVE;
597 		m->flags |= PG_INACTIVE;
598 		cnt.v_active_count--;
599 		cnt.v_inactive_count++;
600 		if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
601 			m->flags &= ~PG_CLEAN;
602 		if (m->flags & PG_CLEAN)
603 			m->flags &= ~PG_LAUNDRY;
604 		else
605 			m->flags |= PG_LAUNDRY;
606 	}
607 }
608 
609 /*
610  *	vm_page_activate:
611  *
612  *	Put the specified page on the active list (if appropriate).
613  *
614  *	The page queues must be locked.
615  */
616 
617 void vm_page_activate(m)
618 	register vm_page_t	m;
619 {
620 	VM_PAGE_CHECK(m);
621 
622 	if (m->flags & PG_INACTIVE) {
623 		TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
624 		cnt.v_inactive_count--;
625 		m->flags &= ~PG_INACTIVE;
626 	}
627 	if (m->wire_count == 0) {
628 		if (m->flags & PG_ACTIVE)
629 			panic("vm_page_activate: already active");
630 
631 		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
632 		m->flags |= PG_ACTIVE;
633 		cnt.v_active_count++;
634 	}
635 }
636 
637 /*
638  *	vm_page_zero_fill:
639  *
640  *	Zero-fill the specified page.
641  *	Written as a standard pagein routine, to
642  *	be used by the zero-fill object.
643  */
644 
645 boolean_t vm_page_zero_fill(m)
646 	vm_page_t	m;
647 {
648 	VM_PAGE_CHECK(m);
649 
650 	m->flags &= ~PG_CLEAN;
651 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
652 	return(TRUE);
653 }
654 
655 /*
656  *	vm_page_copy:
657  *
658  *	Copy one page to another
659  */
660 
661 void vm_page_copy(src_m, dest_m)
662 	vm_page_t	src_m;
663 	vm_page_t	dest_m;
664 {
665 	VM_PAGE_CHECK(src_m);
666 	VM_PAGE_CHECK(dest_m);
667 
668 	dest_m->flags &= ~PG_CLEAN;
669 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
670 }
671