xref: /netbsd/sys/uvm/uvm_page.h (revision c4a72b64)
1 /*	$NetBSD: uvm_page.h,v 1.32 2002/11/08 02:05:16 enami Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Charles D. Cranor,
23  *      Washington University, the University of California, Berkeley and
24  *      its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
42  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  */
68 
69 #ifndef _UVM_UVM_PAGE_H_
70 #define _UVM_UVM_PAGE_H_
71 
72 /*
73  * uvm_page.h
74  */
75 
76 /*
77  *	Resident memory system definitions.
78  */
79 
80 /*
81  *	Management of resident (logical) pages.
82  *
83  *	A small structure is kept for each resident
84  *	page, indexed by page number.  Each structure
85  *	is an element of several lists:
86  *
87  *		A hash table bucket used to quickly
88  *		perform object/offset lookups
89  *
90  *		A list of all pages for a given object,
91  *		so they can be quickly deactivated at
92  *		time of deallocation.
93  *
94  *		An ordered list of pages due for pageout.
95  *
96  *	In addition, the structure contains the object
97  *	and offset to which this page belongs (for pageout),
98  *	and sundry status bits.
99  *
100  *	Fields in this structure are locked either by the lock on the
101  *	object that the page belongs to (O) or by the lock on the page
102  *	queues (P) [or both].
103  */
104 
105 /*
106  * locking note: the mach version of this data structure had bit
107  * fields for the flags, and the bit fields were divided into two
108  * items (depending on who locked what).  some time, in BSD, the bit
109  * fields were dumped and all the flags were lumped into one short.
110  * that is fine for a single threaded uniprocessor OS, but bad if you
111  * want to actual make use of locking (simple_lock's).  so, we've
112  * separated things back out again.
113  *
114  * note the page structure has no lock of its own.
115  */
116 
117 #include <uvm/uvm_extern.h>
118 #include <uvm/uvm_pglist.h>
119 
120 struct vm_page {
121 	TAILQ_ENTRY(vm_page)	pageq;		/* queue info for FIFO
122 						 * queue or free list (P) */
123 	TAILQ_ENTRY(vm_page)	hashq;		/* hash table links (O)*/
124 	TAILQ_ENTRY(vm_page)	listq;		/* pages in same object (O)*/
125 
126 	struct vm_anon		*uanon;		/* anon (O,P) */
127 	struct uvm_object	*uobject;	/* object (O,P) */
128 	voff_t			offset;		/* offset into object (O,P) */
129 	uint16_t		flags;		/* object flags [O] */
130 	uint16_t		loan_count;	/* number of active loans
131 						 * to read: [O or P]
132 						 * to modify: [O _and_ P] */
133 	uint16_t		wire_count;	/* wired down map refs [P] */
134 	uint16_t		pqflags;	/* page queue flags [P] */
135 	paddr_t			phys_addr;	/* physical address of page */
136 
137 #ifdef __HAVE_VM_PAGE_MD
138 	struct vm_page_md	mdpage;		/* pmap-specific data */
139 #endif
140 
141 #if defined(UVM_PAGE_TRKOWN)
142 	/* debugging fields to track page ownership */
143 	pid_t			owner;		/* proc that set PG_BUSY */
144 	char			*owner_tag;	/* why it was set busy */
145 #endif
146 };
147 
148 /*
149  * These are the flags defined for vm_page.
150  */
151 
152 /*
153  * locking rules:
154  *   PG_ ==> locked by object lock
155  *   PQ_ ==> lock by page queue lock
156  *   PQ_FREE is locked by free queue lock and is mutex with all other PQs
157  *
158  * PG_ZERO is used to indicate that a page has been pre-zero'd.  This flag
159  * is only set when the page is on no queues, and is cleared when the page
160  * is placed on the free list.
161  */
162 
163 #define	PG_BUSY		0x0001		/* page is locked */
164 #define	PG_WANTED	0x0002		/* someone is waiting for page */
165 #define	PG_TABLED	0x0004		/* page is in VP table  */
166 #define	PG_CLEAN	0x0008		/* page has not been modified */
167 #define	PG_PAGEOUT	0x0010		/* page to be freed for pagedaemon */
168 #define PG_RELEASED	0x0020		/* page to be freed when unbusied */
169 #define	PG_FAKE		0x0040		/* page is not yet initialized */
170 #define	PG_RDONLY	0x0080		/* page must be mapped read-only */
171 #define	PG_ZERO		0x0100		/* page is pre-zero'd */
172 
173 #define PG_PAGER1	0x1000		/* pager-specific flag */
174 
175 #define PQ_FREE		0x01		/* page is on free list */
176 #define PQ_INACTIVE	0x02		/* page is in inactive list */
177 #define PQ_ACTIVE	0x04		/* page is in active list */
178 #define PQ_ANON		0x10		/* page is part of an anon, rather
179 					   than an uvm_object */
180 #define PQ_AOBJ		0x20		/* page is part of an anonymous
181 					   uvm_object */
182 #define PQ_SWAPBACKED	(PQ_ANON|PQ_AOBJ)
183 
184 /*
185  * physical memory layout structure
186  *
187  * MD vmparam.h must #define:
188  *   VM_PHYSEG_MAX = max number of physical memory segments we support
189  *		   (if this is "1" then we revert to a "contig" case)
190  *   VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
191  * 	- VM_PSTRAT_RANDOM:   linear search (random order)
192  *	- VM_PSTRAT_BSEARCH:  binary search (sorted by address)
193  *	- VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
194  *      - others?
195  *   XXXCDC: eventually we should purge all left-over global variables...
196  */
197 #define VM_PSTRAT_RANDOM	1
198 #define VM_PSTRAT_BSEARCH	2
199 #define VM_PSTRAT_BIGFIRST	3
200 
201 /*
202  * vm_physmemseg: describes one segment of physical memory
203  */
204 struct vm_physseg {
205 	paddr_t	start;			/* PF# of first page in segment */
206 	paddr_t	end;			/* (PF# of last page in segment) + 1 */
207 	paddr_t	avail_start;		/* PF# of first free page in segment */
208 	paddr_t	avail_end;		/* (PF# of last free page in segment) +1  */
209 	int	free_list;		/* which free list they belong on */
210 	struct	vm_page *pgs;		/* vm_page structures (from start) */
211 	struct	vm_page *lastpg;	/* vm_page structure for end */
212 #ifdef __HAVE_PMAP_PHYSSEG
213 	struct	pmap_physseg pmseg;	/* pmap specific (MD) data */
214 #endif
215 };
216 
217 #ifdef _KERNEL
218 
219 /*
220  * globals
221  */
222 
223 extern boolean_t vm_page_zero_enable;
224 
225 /*
226  *	Each pageable resident page falls into one of three lists:
227  *
228  *	free
229  *		Available for allocation now.
230  *	inactive
231  *		Not referenced in any map, but still has an
232  *		object/offset-page mapping, and may be dirty.
233  *		This is the list of pages that should be
234  *		paged out next.
235  *	active
236  *		A list of pages which have been placed in
237  *		at least one physical map.  This list is
238  *		ordered, in LRU-like fashion.
239  */
240 
241 extern struct pglist	vm_page_queue_free;	/* memory free queue */
242 extern struct pglist	vm_page_queue_active;	/* active memory queue */
243 extern struct pglist	vm_page_queue_inactive;	/* inactive memory queue */
244 
245 /*
246  * physical memory config is stored in vm_physmem.
247  */
248 
249 extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
250 extern int vm_nphysseg;
251 
252 /*
253  * handle inline options
254  */
255 
256 #ifdef UVM_PAGE_INLINE
257 #define PAGE_INLINE static __inline
258 #else
259 #define PAGE_INLINE /* nothing */
260 #endif /* UVM_PAGE_INLINE */
261 
262 /*
263  * prototypes: the following prototypes define the interface to pages
264  */
265 
266 void uvm_page_init __P((vaddr_t *, vaddr_t *));
267 #if defined(UVM_PAGE_TRKOWN)
268 void uvm_page_own __P((struct vm_page *, char *));
269 #endif
270 #if !defined(PMAP_STEAL_MEMORY)
271 boolean_t uvm_page_physget __P((paddr_t *));
272 #endif
273 void uvm_page_rehash __P((void));
274 void uvm_page_recolor __P((int));
275 void uvm_pageidlezero __P((void));
276 
277 PAGE_INLINE int uvm_lock_fpageq __P((void));
278 PAGE_INLINE void uvm_unlock_fpageq __P((int));
279 
280 PAGE_INLINE void uvm_pageactivate __P((struct vm_page *));
281 vaddr_t uvm_pageboot_alloc __P((vsize_t));
282 PAGE_INLINE void uvm_pagecopy __P((struct vm_page *, struct vm_page *));
283 PAGE_INLINE void uvm_pagedeactivate __P((struct vm_page *));
284 PAGE_INLINE void uvm_pagedequeue __P((struct vm_page *));
285 void uvm_pagefree __P((struct vm_page *));
286 void uvm_page_unbusy __P((struct vm_page **, int));
287 PAGE_INLINE struct vm_page *uvm_pagelookup __P((struct uvm_object *, voff_t));
288 PAGE_INLINE void uvm_pageunwire __P((struct vm_page *));
289 PAGE_INLINE void uvm_pagewait __P((struct vm_page *, int));
290 PAGE_INLINE void uvm_pagewake __P((struct vm_page *));
291 PAGE_INLINE void uvm_pagewire __P((struct vm_page *));
292 PAGE_INLINE void uvm_pagezero __P((struct vm_page *));
293 
294 PAGE_INLINE int uvm_page_lookup_freelist __P((struct vm_page *));
295 
296 static struct vm_page *PHYS_TO_VM_PAGE __P((paddr_t));
297 static int vm_physseg_find __P((paddr_t, int *));
298 
299 /*
300  * macros
301  */
302 
303 #define UVM_PAGE_HASH_PENALTY	4	/* XXX: a guess */
304 
305 #define uvm_lock_pageq()	simple_lock(&uvm.pageqlock)
306 #define uvm_unlock_pageq()	simple_unlock(&uvm.pageqlock)
307 
308 #define uvm_pagehash(obj,off) \
309 	(((unsigned long)obj+(unsigned long)atop(off)) & uvm.page_hashmask)
310 
311 #define	UVM_PAGEZERO_TARGET	(uvmexp.free)
312 
313 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
314 
315 /*
316  * Compute the page color bucket for a given page.
317  */
318 #define	VM_PGCOLOR_BUCKET(pg) \
319 	(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
320 
321 /*
322  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
323  */
324 
325 /*
326  * vm_physseg_find: find vm_physseg structure that belongs to a PA
327  */
328 static __inline int
329 vm_physseg_find(pframe, offp)
330 	paddr_t pframe;
331 	int	*offp;
332 {
333 #if VM_PHYSSEG_MAX == 1
334 
335 	/* 'contig' case */
336 	if (pframe >= vm_physmem[0].start && pframe < vm_physmem[0].end) {
337 		if (offp)
338 			*offp = pframe - vm_physmem[0].start;
339 		return(0);
340 	}
341 	return(-1);
342 
343 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
344 	/* binary search for it */
345 	int	start, len, try;
346 
347 	/*
348 	 * if try is too large (thus target is less than try) we reduce
349 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
350 	 *
351 	 * if the try is too small (thus target is greater than try) then
352 	 * we set the new start to be (try + 1).   this means we need to
353 	 * reduce the length to (round(len/2) - 1).
354 	 *
355 	 * note "adjust" below which takes advantage of the fact that
356 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
357 	 * for any value of len we may have
358 	 */
359 
360 	for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
361 		try = start + (len / 2);	/* try in the middle */
362 
363 		/* start past our try? */
364 		if (pframe >= vm_physmem[try].start) {
365 			/* was try correct? */
366 			if (pframe < vm_physmem[try].end) {
367 				if (offp)
368 					*offp = pframe - vm_physmem[try].start;
369 				return(try);            /* got it */
370 			}
371 			start = try + 1;	/* next time, start here */
372 			len--;			/* "adjust" */
373 		} else {
374 			/*
375 			 * pframe before try, just reduce length of
376 			 * region, done in "for" loop
377 			 */
378 		}
379 	}
380 	return(-1);
381 
382 #else
383 	/* linear search for it */
384 	int	lcv;
385 
386 	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
387 		if (pframe >= vm_physmem[lcv].start &&
388 		    pframe < vm_physmem[lcv].end) {
389 			if (offp)
390 				*offp = pframe - vm_physmem[lcv].start;
391 			return(lcv);		   /* got it */
392 		}
393 	}
394 	return(-1);
395 
396 #endif
397 }
398 
399 
400 /*
401  * IS_VM_PHYSADDR: only used my mips/pmax/pica trap/pmap.
402  */
403 
404 #define IS_VM_PHYSADDR(PA) (vm_physseg_find(atop(PA), NULL) != -1)
405 
406 /*
407  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
408  * back from an I/O mapping (ugh!).   used in some MD code as well.
409  */
410 static __inline struct vm_page *
411 PHYS_TO_VM_PAGE(pa)
412 	paddr_t pa;
413 {
414 	paddr_t pf = atop(pa);
415 	int	off;
416 	int	psi;
417 
418 	psi = vm_physseg_find(pf, &off);
419 	if (psi != -1)
420 		return(&vm_physmem[psi].pgs[off]);
421 	return(NULL);
422 }
423 
424 #define VM_PAGE_IS_FREE(entry)  ((entry)->pqflags & PQ_FREE)
425 
426 #endif /* _KERNEL */
427 
428 #endif /* _UVM_UVM_PAGE_H_ */
429