xref: /386bsd/usr/src/kernel/include/vm_page.h (revision a2142627)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	$Id: vm_page.h,v 1.2 93/02/04 20:16:08 bill Exp $
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /* Logical memory page abstraction used to allocate primary store. */
66 
67 #ifndef	_VM_PAGE_
68 #define	_VM_PAGE_
69 
70 /*
71  * An array of logical memory page abstraction structures indexed by
72  * the corresponding page frame number of the managed page of RAM. Thus,
73  * the reverse lookup of the physical address to logical memory structure
74  * is possible (via the macro PHYS_TO_VMPAGE()).
75  *
76  * Pages can only be allocated to objects at specific page offsets to
77  * represent a portion of the objects resident memory. Allocated pages can
78  * be located by name (object/offset) using a hash table of queues that
79  * each is linked on (using vm_page_lookup()). Object hold pages by
80  * incorporating them in the objects queue of resident pages.
81  *
82  * Allocated pages can be made eligible for reclaimation by placing them
83  * on the active queue when they are added to the address translation maps.
84  * The pageout daemon will monitor them, deactivating candidates for
85  * reclaimation by removing them from the address translation maps and placing
86  * them on the inactive queue. Inactive pages may be eventually reclaimed,
87  * their associated object being used to locate the pager to save any changed
88  * contents on secondary backing store prior to reuse. Wired pages suspend
89  * reclaimation until they become unwired.
90  */
91 
92 struct vm_page {
93 	/* queues */
94 	queue_chain_t	pageq;		/* page queue(active, inactive, free) */
95 	queue_chain_t	hashq;		/* allocated page lookup hash table */
96 	queue_chain_t	listq;		/* allocated object (O)*/
97 	/* allocated page "name" */
98 	vm_object_t	object;		/* page holder */
99 	vm_offset_t	offset;		/* page position within object */
100 	/* page status */
101 	unsigned int	wire_count:16,	/* wired map entry reference count */
102 			inactive:1,	/* queued inactive */
103 			active:1,	/* queued active */
104 			laundry:1,	/* must be written back before reclaim*/
105 			clean:1,	/* has not been modified */
106 			busy:1,		/* is in exclusive use */
107 			wanted:1,	/* another waits for exclusive use */
108 			tabled:1,	/* allocated to object */
109 			copy_on_write:1,/* clone before write access allowed */
110 			fictitious:1,	/* entry not part of managed array */
111 			fake:1,		/* contents are invalid */
112 			io:1;		/* can be used for I/O */
113 
114 	vm_offset_t	phys_addr;	/* physical address of page */
115 	vm_prot_t	page_lock;	/* restricted access implied */
116 	vm_prot_t	unlock_request;	/* unlock request on restriction */
117 };
118 
119 typedef struct vm_page	*vm_page_t;
120 
121 #ifdef	KERNEL
122 extern queue_head_t	vm_page_queue_free;	/* memory free queue */
123 extern queue_head_t	vm_page_queue_active;	/* active memory queue */
124 extern queue_head_t	vm_page_queue_inactive;	/* inactive memory queue */
125 
126 extern vm_page_t	vm_page_array;
127 extern long		first_page;	/* first physical page number */
128 					/* ... represented in vm_page_array */
129 extern long		last_page;	/* last physical page number */
130 					/* ... represented in vm_page_array */
131 					/* [INCLUSIVE] */
132 extern vm_offset_t	first_phys_addr; /* physical address for first_page */
133 extern vm_offset_t	last_phys_addr;	/* physical address for last_page */
134 
135 extern int	vm_page_free_count;	/* How many pages are free? */
136 extern int	vm_page_active_count;	/* How many pages are active? */
137 extern int	vm_page_inactive_count;	/* How many pages are inactive? */
138 extern int	vm_page_wire_count;	/* How many pages are wired? */
139 extern int	vm_page_free_target;	/* How many do we want free? */
140 extern int	vm_page_free_min;	/* When to wakeup pageout */
141 extern int	vm_page_inactive_target;/* How many do we want inactive? */
142 
143 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
144 
145 #define IS_VM_PHYSADDR(pa) \
146 		((pa) >= first_phys_addr && (pa) <= last_phys_addr)
147 
148 #define PHYS_TO_VM_PAGE(pa) \
149 		(&vm_page_array[atop(pa) - first_page ])
150 
151 void	vm_set_page_size();
152 void /* vm_offset_t*/
153 	vm_page_startup(void /*vm_offset_t start, vm_offset_t end, vm_offset_t vaddr*/);
154 vm_page_t
155 	vm_page_lookup(vm_object_t object, vm_offset_t offset);
156 void	vm_page_rename(vm_page_t mem, vm_object_t new_object,
157 		vm_offset_t new_offset);
158 vm_page_t
159 	vm_page_alloc(vm_object_t object, vm_offset_t offset, boolean_t io);
160 void	vm_page_free(vm_page_t mem);
161 void	vm_page_wire(vm_page_t mem);
162 void	vm_page_unwire(vm_page_t mem);
163 void	vm_page_deactivate(vm_page_t m);
164 void	vm_page_activate(vm_page_t m);
165 boolean_t
166 	vm_page_zero_fill(vm_page_t m);
167 void	vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
168 /* "internal" functions used externally by device_pager: */
169 void	vm_page_remove(vm_page_t mem);
170 void	vm_page_init(vm_page_t mem, vm_object_t object, vm_offset_t offset);
171 void	vm_page_insert(vm_page_t mem, vm_object_t object, vm_offset_t offset);
172 
173 /* set logical page to default state on allocation. */
174 extern inline void
vm_page_init(vm_page_t mem,vm_object_t object,vm_offset_t offset)175 vm_page_init(vm_page_t mem, vm_object_t object, vm_offset_t offset)
176 {
177 	mem->busy = TRUE;
178 	mem->tabled = FALSE;
179 	vm_page_insert(mem, object, offset);
180 	mem->fictitious = FALSE;
181 	mem->page_lock = VM_PROT_NONE;
182 	mem->unlock_request = VM_PROT_NONE;
183 	mem->laundry = FALSE;
184 	mem->active = FALSE;
185 	mem->inactive = FALSE;
186 	mem->wire_count = 0;
187 	mem->clean = TRUE;
188 	mem->copy_on_write = FALSE;
189 	mem->fake = TRUE;
190 }
191 
192 /*
193  *	Functions implemented as macros
194  */
195 
196 #define PAGE_WAIT(m, s, interruptible)	{ \
197 	(m)->wanted = TRUE; \
198 	(void) tsleep((caddr_t)m, PVM, s, 0); \
199 }
200 
201 #define PAGE_WAKEUP(m)	{ \
202 	(m)->busy = FALSE; \
203 	if ((m)->wanted) { \
204 		(m)->wanted = FALSE; \
205 		wakeup((caddr_t) (m)); \
206 	} \
207 }
208 
209 #define vm_page_set_modified(m)	{ (m)->clean = FALSE; }
210 #endif	KERNEL
211 #endif	_VM_PAGE_
212