xref: /original-bsd/sys/vm/vm_page.h (revision b193be73)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_page.h	7.6 (Berkeley) 02/19/92
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Resident memory system definitions.
41  */
42 
43 #ifndef	_VM_PAGE_
44 #define	_VM_PAGE_
45 
46 /*
47  *	Management of resident (logical) pages.
48  *
49  *	A small structure is kept for each resident
50  *	page, indexed by page number.  Each structure
51  *	is an element of several lists:
52  *
53  *		A hash table bucket used to quickly
54  *		perform object/offset lookups
55  *
56  *		A list of all pages for a given object,
57  *		so they can be quickly deactivated at
58  *		time of deallocation.
59  *
60  *		An ordered list of pages due for pageout.
61  *
62  *	In addition, the structure contains the object
63  *	and offset to which this page belongs (for pageout),
64  *	and sundry status bits.
65  *
66  *	Fields in this structure are locked either by the lock on the
67  *	object that the page belongs to (O) or by the lock on the page
68  *	queues (P).
69  */
70 
71 struct vm_page {
72 	queue_chain_t	pageq;		/* queue info for FIFO
73 					 * queue or free list (P) */
74 	queue_chain_t	hashq;		/* hash table links (O)*/
75 	queue_chain_t	listq;		/* all pages in same object (O)*/
76 
77 	vm_object_t	object;		/* which object am I in (O,P)*/
78 	vm_offset_t	offset;		/* offset into that object (O,P) */
79 
80 	unsigned int	wire_count:16,	/* how many wired down maps use me?
81 					   (P) */
82 	/* boolean_t */	inactive:1,	/* page is in inactive list (P) */
83 			active:1,	/* page is in active list (P) */
84 			laundry:1,	/* page is being cleaned now (P)*/
85 #ifdef DEBUG
86 			pagerowned:1,	/* async paging op in progress */
87 			ptpage:1,	/* is a user page table page */
88 #endif
89 			:0;		/* (force to 'long' boundary) */
90 #ifdef	ns32000
91 	int		pad;		/* extra space for ns32000 bit ops */
92 #endif	ns32000
93 	boolean_t	clean;		/* page has not been modified */
94 	unsigned int
95 	/* boolean_t */	busy:1,		/* page is in transit (O) */
96 			wanted:1,	/* someone is waiting for page (O) */
97 			tabled:1,	/* page is in VP table (O) */
98 			copy_on_write:1,/* page must be copied before being
99 					   changed (O) */
100 			fictitious:1,	/* physical page doesn't exist (O) */
101 			absent:1,	/* virtual page doesn't exist (O) */
102 			fake:1,		/* page is a placeholder for page-in
103 					   (O) */
104 			:0;
105 
106 	vm_offset_t	phys_addr;	/* physical address of page */
107 	vm_prot_t	page_lock;	/* Uses prohibited by data manager */
108 	vm_prot_t	unlock_request;	/* Outstanding unlock request */
109 };
110 
111 typedef struct vm_page	*vm_page_t;
112 
113 #if	VM_PAGE_DEBUG
114 #define	VM_PAGE_CHECK(mem) { \
115 		if ( (((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
116 		     (((unsigned int) mem) > ((unsigned int) &vm_page_array[last_page-first_page])) || \
117 		     (mem->active && mem->inactive) \
118 		    ) panic("vm_page_check: not valid!"); \
119 		}
120 #else	VM_PAGE_DEBUG
121 #define	VM_PAGE_CHECK(mem)
122 #endif	VM_PAGE_DEBUG
123 
124 #ifdef	KERNEL
125 /*
126  *	Each pageable resident page falls into one of three lists:
127  *
128  *	free
129  *		Available for allocation now.
130  *	inactive
131  *		Not referenced in any map, but still has an
132  *		object/offset-page mapping, and may be dirty.
133  *		This is the list of pages that should be
134  *		paged out next.
135  *	active
136  *		A list of pages which have been placed in
137  *		at least one physical map.  This list is
138  *		ordered, in LRU-like fashion.
139  */
140 
141 extern
142 queue_head_t	vm_page_queue_free;	/* memory free queue */
143 extern
144 queue_head_t	vm_page_queue_active;	/* active memory queue */
145 extern
146 queue_head_t	vm_page_queue_inactive;	/* inactive memory queue */
147 
148 extern
149 vm_page_t	vm_page_array;		/* First resident page in table */
150 extern
151 long		first_page;		/* first physical page number */
152 					/* ... represented in vm_page_array */
153 extern
154 long		last_page;		/* last physical page number */
155 					/* ... represented in vm_page_array */
156 					/* [INCLUSIVE] */
157 extern
158 vm_offset_t	first_phys_addr;	/* physical address for first_page */
159 extern
160 vm_offset_t	last_phys_addr;		/* physical address for last_page */
161 
162 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
163 
164 #define IS_VM_PHYSADDR(pa) \
165 		((pa) >= first_phys_addr && (pa) <= last_phys_addr)
166 
167 #define PHYS_TO_VM_PAGE(pa) \
168 		(&vm_page_array[atop(pa) - first_page ])
169 
170 extern
171 simple_lock_data_t	vm_page_queue_lock;	/* lock on active and inactive
172 						   page queues */
173 extern
174 simple_lock_data_t	vm_page_queue_free_lock;
175 						/* lock on free page queue */
176 void		vm_page_startup __P((vm_offset_t *start, vm_offset_t *end));
177 vm_page_t	vm_page_lookup __P((vm_object_t object, vm_offset_t offset));
178 vm_page_t	vm_page_alloc __P((vm_object_t object, vm_offset_t offset));
179 void		vm_page_free __P((vm_page_t mem));
180 void		vm_page_activate __P((vm_page_t mem));
181 void		vm_page_deactivate __P((vm_page_t mem));
182 void		vm_page_rename __P((vm_page_t mem, vm_object_t new_object,
183 			vm_offset_t new_offset));
184 
185 boolean_t	vm_page_zero_fill __P((vm_page_t mem));
186 void		vm_page_copy __P((vm_page_t src_mem, vm_page_t dst_mem));
187 
188 void		vm_page_wire __P((vm_page_t mem));
189 void		vm_page_unwire __P((vm_page_t mem));
190 
191 void		vm_set_page_size __P((void));
192 
193 /*
194  *	Functions implemented as macros
195  */
196 
197 #define PAGE_ASSERT_WAIT(m, interruptible)	{ \
198 				(m)->wanted = TRUE; \
199 				assert_wait((int) (m), (interruptible)); \
200 			}
201 
202 #define PAGE_WAKEUP(m)	{ \
203 				(m)->busy = FALSE; \
204 				if ((m)->wanted) { \
205 					(m)->wanted = FALSE; \
206 					thread_wakeup((int) (m)); \
207 				} \
208 			}
209 
210 #define	vm_page_lock_queues()	simple_lock(&vm_page_queue_lock)
211 #define	vm_page_unlock_queues()	simple_unlock(&vm_page_queue_lock)
212 
213 #define vm_page_set_modified(m)	{ (m)->clean = FALSE; }
214 
215 #ifdef DEBUG
216 #define	VM_PAGE_DEBUG_INIT(m) ((m)->pagerowned = 0, (m)->ptpage = 0)
217 #else
218 #define	VM_PAGE_DEBUG_INIT(m)
219 #endif
220 
221 #define	VM_PAGE_INIT(mem, object, offset) { \
222 	(mem)->busy = TRUE; \
223 	(mem)->tabled = FALSE; \
224 	vm_page_insert((mem), (object), (offset)); \
225 	(mem)->absent = FALSE; \
226 	(mem)->fictitious = FALSE; \
227 	(mem)->page_lock = VM_PROT_NONE; \
228 	(mem)->unlock_request = VM_PROT_NONE; \
229 	(mem)->laundry = FALSE; \
230 	(mem)->active = FALSE; \
231 	(mem)->inactive = FALSE; \
232 	(mem)->wire_count = 0; \
233 	(mem)->clean = TRUE; \
234 	(mem)->copy_on_write = FALSE; \
235 	(mem)->fake = TRUE; \
236 	VM_PAGE_DEBUG_INIT(mem); \
237 }
238 
239 #endif	KERNEL
240 #endif	_VM_PAGE_
241