xref: /original-bsd/sys/vm/vm_page.h (revision 9b5efc43)
1 /*
2  * Copyright (c) 1985, Avadis Tevanian, Jr., Michael Wayne Young
3  * Copyright (c) 1987 Carnegie-Mellon University
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * The CMU software License Agreement specifies the terms and conditions
11  * for use and redistribution.
12  *
13  *	@(#)vm_page.h	7.1 (Berkeley) 12/05/90
14  */
15 
16 /*
17  *	Resident memory system definitions.
18  */
19 
20 #ifndef	_VM_PAGE_
21 #define	_VM_PAGE_
22 
23 #include "../vm/vm_param.h"
24 #include "../vm/vm_object.h"
25 #include "../vm/vm_prot.h"
26 #include "lock.h"
27 
28 /*
29  *	Management of resident (logical) pages.
30  *
31  *	A small structure is kept for each resident
32  *	page, indexed by page number.  Each structure
33  *	is an element of several lists:
34  *
35  *		A hash table bucket used to quickly
36  *		perform object/offset lookups
37  *
38  *		A list of all pages for a given object,
39  *		so they can be quickly deactivated at
40  *		time of deallocation.
41  *
42  *		An ordered list of pages due for pageout.
43  *
44  *	In addition, the structure contains the object
45  *	and offset to which this page belongs (for pageout),
46  *	and sundry status bits.
47  *
48  *	Fields in this structure are locked either by the lock on the
49  *	object that the page belongs to (O) or by the lock on the page
50  *	queues (P).
51  */
52 
53 struct vm_page {
54 	queue_chain_t	pageq;		/* queue info for FIFO
55 					 * queue or free list (P) */
56 	queue_chain_t	hashq;		/* hash table links (O)*/
57 	queue_chain_t	listq;		/* all pages in same object (O)*/
58 
59 	vm_object_t	object;		/* which object am I in (O,P)*/
60 	vm_offset_t	offset;		/* offset into that object (O,P) */
61 
62 	unsigned int	wire_count:16,	/* how many wired down maps use me?
63 					   (P) */
64 	/* boolean_t */	inactive:1,	/* page is in inactive list (P) */
65 			active:1,	/* page is in active list (P) */
66 			laundry:1,	/* page is being cleaned now (P)*/
67 #ifdef DEBUG
68 			pagerowned:1,	/* async paging op in progress */
69 			ptpage:1,	/* is a user page table page */
70 #endif
71 			:0;		/* (force to 'long' boundary) */
72 #ifdef	ns32000
73 	int		pad;		/* extra space for ns32000 bit ops */
74 #endif	ns32000
75 	boolean_t	clean;		/* page has not been modified */
76 	unsigned int
77 	/* boolean_t */	busy:1,		/* page is in transit (O) */
78 			wanted:1,	/* someone is waiting for page (O) */
79 			tabled:1,	/* page is in VP table (O) */
80 			copy_on_write:1,/* page must be copied before being
81 					   changed (O) */
82 			fictitious:1,	/* physical page doesn't exist (O) */
83 			absent:1,	/* virtual page doesn't exist (O) */
84 			fake:1,		/* page is a placeholder for page-in
85 					   (O) */
86 			:0;
87 
88 	vm_offset_t	phys_addr;	/* physical address of page */
89 	vm_prot_t	page_lock;	/* Uses prohibited by data manager */
90 	vm_prot_t	unlock_request;	/* Outstanding unlock request */
91 };
92 
93 typedef struct vm_page	*vm_page_t;
94 
95 #define	VM_PAGE_NULL		((vm_page_t) 0)
96 
97 #if	VM_PAGE_DEBUG
98 #define	VM_PAGE_CHECK(mem) { \
99 		if ( (((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
100 		     (((unsigned int) mem) > ((unsigned int) &vm_page_array[last_page-first_page])) || \
101 		     (mem->active && mem->inactive) \
102 		    ) panic("vm_page_check: not valid!"); \
103 		}
104 #else	VM_PAGE_DEBUG
105 #define	VM_PAGE_CHECK(mem)
106 #endif	VM_PAGE_DEBUG
107 
108 #ifdef	KERNEL
109 /*
110  *	Each pageable resident page falls into one of three lists:
111  *
112  *	free
113  *		Available for allocation now.
114  *	inactive
115  *		Not referenced in any map, but still has an
116  *		object/offset-page mapping, and may be dirty.
117  *		This is the list of pages that should be
118  *		paged out next.
119  *	active
120  *		A list of pages which have been placed in
121  *		at least one physical map.  This list is
122  *		ordered, in LRU-like fashion.
123  */
124 
125 extern
126 queue_head_t	vm_page_queue_free;	/* memory free queue */
127 extern
128 queue_head_t	vm_page_queue_active;	/* active memory queue */
129 extern
130 queue_head_t	vm_page_queue_inactive;	/* inactive memory queue */
131 
132 extern
133 vm_page_t	vm_page_array;		/* First resident page in table */
134 extern
135 long		first_page;		/* first physical page number */
136 					/* ... represented in vm_page_array */
137 extern
138 long		last_page;		/* last physical page number */
139 					/* ... represented in vm_page_array */
140 					/* [INCLUSIVE] */
141 extern
142 vm_offset_t	first_phys_addr;	/* physical address for first_page */
143 extern
144 vm_offset_t	last_phys_addr;		/* physical address for last_page */
145 
146 extern
147 int	vm_page_free_count;	/* How many pages are free? */
148 extern
149 int	vm_page_active_count;	/* How many pages are active? */
150 extern
151 int	vm_page_inactive_count;	/* How many pages are inactive? */
152 extern
153 int	vm_page_wire_count;	/* How many pages are wired? */
154 extern
155 int	vm_page_free_target;	/* How many do we want free? */
156 extern
157 int	vm_page_free_min;	/* When to wakeup pageout */
158 extern
159 int	vm_page_inactive_target;/* How many do we want inactive? */
160 extern
161 int	vm_page_free_reserved;	/* How many pages reserved to do pageout */
162 extern
163 int	vm_page_laundry_count;	/* How many pages being laundered? */
164 
165 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
166 
167 #define IS_VM_PHYSADDR(pa) \
168 		((pa) >= first_phys_addr && (pa) <= last_phys_addr)
169 
170 #define PHYS_TO_VM_PAGE(pa) \
171 		(&vm_page_array[atop(pa) - first_page ])
172 
173 extern
174 simple_lock_data_t	vm_page_queue_lock;	/* lock on active and inactive
175 						   page queues */
176 extern
177 simple_lock_data_t	vm_page_queue_free_lock;
178 						/* lock on free page queue */
179 vm_offset_t	vm_page_startup();
180 vm_page_t	vm_page_lookup();
181 vm_page_t	vm_page_alloc();
182 void		vm_page_init();
183 void		vm_page_free();
184 void		vm_page_activate();
185 void		vm_page_deactivate();
186 void		vm_page_rename();
187 void		vm_page_replace();
188 
189 boolean_t	vm_page_zero_fill();
190 void		vm_page_copy();
191 
192 void		vm_page_wire();
193 void		vm_page_unwire();
194 
195 void		vm_set_page_size();
196 
197 /*
198  *	Functions implemented as macros
199  */
200 
201 #define PAGE_ASSERT_WAIT(m, interruptible)	{ \
202 				(m)->wanted = TRUE; \
203 				assert_wait((int) (m), (interruptible)); \
204 			}
205 
206 #define PAGE_WAKEUP(m)	{ \
207 				(m)->busy = FALSE; \
208 				if ((m)->wanted) { \
209 					(m)->wanted = FALSE; \
210 					thread_wakeup((int) (m)); \
211 				} \
212 			}
213 
214 #define	vm_page_lock_queues()	simple_lock(&vm_page_queue_lock)
215 #define	vm_page_unlock_queues()	simple_unlock(&vm_page_queue_lock)
216 
217 #define vm_page_set_modified(m)	{ (m)->clean = FALSE; }
218 #endif	KERNEL
219 #endif	_VM_PAGE_
220