xref: /original-bsd/sys/vm/vm_page.h (revision 0997b878)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_page.h	8.3 (Berkeley) 01/09/95
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Resident memory system definitions.
41  */
42 
43 #ifndef	_VM_PAGE_
44 #define	_VM_PAGE_
45 
46 /*
47  *	Management of resident (logical) pages.
48  *
49  *	A small structure is kept for each resident
50  *	page, indexed by page number.  Each structure
51  *	is an element of several lists:
52  *
53  *		A hash table bucket used to quickly
54  *		perform object/offset lookups
55  *
56  *		A list of all pages for a given object,
57  *		so they can be quickly deactivated at
58  *		time of deallocation.
59  *
60  *		An ordered list of pages due for pageout.
61  *
62  *	In addition, the structure contains the object
63  *	and offset to which this page belongs (for pageout),
64  *	and sundry status bits.
65  *
66  *	Fields in this structure are locked either by the lock on the
67  *	object that the page belongs to (O) or by the lock on the page
68  *	queues (P).
69  */
70 
71 TAILQ_HEAD(pglist, vm_page);
72 
73 struct vm_page {
74 	TAILQ_ENTRY(vm_page)	pageq;		/* queue info for FIFO
75 						 * queue or free list (P) */
76 	TAILQ_ENTRY(vm_page)	hashq;		/* hash table links (O)*/
77 	TAILQ_ENTRY(vm_page)	listq;		/* pages in same object (O)*/
78 
79 	vm_object_t		object;		/* which object am I in (O,P)*/
80 	vm_offset_t		offset;		/* offset into object (O,P) */
81 
82 	u_short			wire_count;	/* wired down maps refs (P) */
83 	u_short			flags;		/* see below */
84 
85 	vm_offset_t		phys_addr;	/* physical address of page */
86 };
87 
88 /*
89  * These are the flags defined for vm_page.
90  *
91  * Note: PG_FILLED and PG_DIRTY are added for the filesystems.
92  */
93 #define	PG_INACTIVE	0x0001		/* page is in inactive list (P) */
94 #define	PG_ACTIVE	0x0002		/* page is in active list (P) */
95 #define	PG_LAUNDRY	0x0004		/* page is being cleaned now (P)*/
96 #define	PG_CLEAN	0x0008		/* page has not been modified */
97 #define	PG_BUSY		0x0010		/* page is in transit (O) */
98 #define	PG_WANTED	0x0020		/* someone is waiting for page (O) */
99 #define	PG_TABLED	0x0040		/* page is in VP table (O) */
100 #define	PG_COPYONWRITE	0x0080		/* must copy page before changing (O) */
101 #define	PG_FICTITIOUS	0x0100		/* physical page doesn't exist (O) */
102 #define	PG_FAKE		0x0200		/* page is placeholder for pagein (O) */
103 #define	PG_FILLED	0x0400		/* client flag to set when filled */
104 #define	PG_DIRTY	0x0800		/* client flag to set when dirty */
105 #define	PG_PAGEROWNED	0x4000		/* DEBUG: async paging op in progress */
106 #define	PG_PTPAGE	0x8000		/* DEBUG: is a user page table page */
107 
108 #if	VM_PAGE_DEBUG
109 #define	VM_PAGE_CHECK(mem) { \
110 	if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
111 	    (((unsigned int) mem) > \
112 		((unsigned int) &vm_page_array[last_page-first_page])) || \
113 	    ((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
114 		(PG_ACTIVE | PG_INACTIVE))) \
115 		panic("vm_page_check: not valid!"); \
116 }
117 #else /* VM_PAGE_DEBUG */
118 #define	VM_PAGE_CHECK(mem)
119 #endif /* VM_PAGE_DEBUG */
120 
121 #ifdef KERNEL
122 /*
123  *	Each pageable resident page falls into one of three lists:
124  *
125  *	free
126  *		Available for allocation now.
127  *	inactive
128  *		Not referenced in any map, but still has an
129  *		object/offset-page mapping, and may be dirty.
130  *		This is the list of pages that should be
131  *		paged out next.
132  *	active
133  *		A list of pages which have been placed in
134  *		at least one physical map.  This list is
135  *		ordered, in LRU-like fashion.
136  */
137 
138 extern
139 struct pglist	vm_page_queue_free;	/* memory free queue */
140 extern
141 struct pglist	vm_page_queue_active;	/* active memory queue */
142 extern
143 struct pglist	vm_page_queue_inactive;	/* inactive memory queue */
144 
145 extern
146 vm_page_t	vm_page_array;		/* First resident page in table */
147 extern
148 long		first_page;		/* first physical page number */
149 					/* ... represented in vm_page_array */
150 extern
151 long		last_page;		/* last physical page number */
152 					/* ... represented in vm_page_array */
153 					/* [INCLUSIVE] */
154 extern
155 vm_offset_t	first_phys_addr;	/* physical address for first_page */
156 extern
157 vm_offset_t	last_phys_addr;		/* physical address for last_page */
158 
159 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
160 
161 #define IS_VM_PHYSADDR(pa) \
162 		((pa) >= first_phys_addr && (pa) <= last_phys_addr)
163 
164 #define PHYS_TO_VM_PAGE(pa) \
165 		(&vm_page_array[atop(pa) - first_page ])
166 
167 extern
168 simple_lock_data_t	vm_page_queue_lock;	/* lock on active and inactive
169 						   page queues */
170 extern						/* lock on free page queue */
171 simple_lock_data_t	vm_page_queue_free_lock;
172 
173 /*
174  *	Functions implemented as macros
175  */
176 
177 #define PAGE_ASSERT_WAIT(m, interruptible)	{ \
178 				(m)->flags |= PG_WANTED; \
179 				assert_wait((m), (interruptible)); \
180 			}
181 
182 #define PAGE_WAKEUP(m)	{ \
183 				(m)->flags &= ~PG_BUSY; \
184 				if ((m)->flags & PG_WANTED) { \
185 					(m)->flags &= ~PG_WANTED; \
186 					thread_wakeup((m)); \
187 				} \
188 			}
189 
190 #define	vm_page_lock_queues()	simple_lock(&vm_page_queue_lock)
191 #define	vm_page_unlock_queues()	simple_unlock(&vm_page_queue_lock)
192 
193 #define vm_page_set_modified(m)	{ (m)->flags &= ~PG_CLEAN; }
194 
195 #define	VM_PAGE_INIT(mem, object, offset) { \
196 	(mem)->flags = PG_BUSY | PG_CLEAN | PG_FAKE; \
197 	vm_page_insert((mem), (object), (offset)); \
198 	(mem)->wire_count = 0; \
199 }
200 
201 void		 vm_page_activate __P((vm_page_t));
202 vm_page_t	 vm_page_alloc __P((vm_object_t, vm_offset_t));
203 void		 vm_page_copy __P((vm_page_t, vm_page_t));
204 void		 vm_page_deactivate __P((vm_page_t));
205 void		 vm_page_free __P((vm_page_t));
206 void		 vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
207 vm_page_t	 vm_page_lookup __P((vm_object_t, vm_offset_t));
208 void		 vm_page_remove __P((vm_page_t));
209 void		 vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
210 void		 vm_page_startup __P((vm_offset_t *, vm_offset_t *));
211 void		 vm_page_unwire __P((vm_page_t));
212 void		 vm_page_wire __P((vm_page_t));
213 boolean_t	 vm_page_zero_fill __P((vm_page_t));
214 
215 #endif /* KERNEL */
216 #endif /* !_VM_PAGE_ */
217