xref: /dragonfly/sys/vm/vm_page2.h (revision 25a2db75)
1 /*-
2  * Copyright (c) 1982, 1986, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)vmmeter.h	8.2 (Berkeley) 7/10/94
34  * $FreeBSD: src/sys/sys/vmmeter.h,v 1.21.2.2 2002/10/10 19:28:21 dillon Exp $
35  * $DragonFly: src/sys/vm/vm_page2.h,v 1.3 2008/04/14 20:00:29 dillon Exp $
36  */
37 
38 #ifndef _VM_VM_PAGE2_H_
39 #define _VM_VM_PAGE2_H_
40 
41 #ifndef _SYS_VMMETER_H_
42 #include <sys/vmmeter.h>
43 #endif
44 #ifndef _SYS_QUEUE_H_
45 #include <sys/queue.h>
46 #endif
47 #ifndef _VM_PAGE_H_
48 #include <vm/vm_page.h>
49 #endif
50 #ifndef _SYS_SPINLOCK_H_
51 #include <sys/spinlock.h>
52 #endif
53 #ifndef _SYS_SPINLOCK2_H_
54 #include <sys/spinlock2.h>
55 #endif
56 
57 #ifdef _KERNEL
58 
59 /*
60  * Return TRUE if we are under our severe low-free-pages threshold
61  *
62  * This causes user processes to stall to avoid exhausting memory that
63  * the kernel might need.
64  *
65  * reserved < severe < minimum < target < paging_target
66  */
67 static __inline
68 int
69 vm_page_count_severe(void)
70 {
71     return (vmstats.v_free_severe >
72 	    vmstats.v_free_count + vmstats.v_cache_count ||
73 	    vmstats.v_free_reserved > vmstats.v_free_count);
74 }
75 
76 /*
77  * Return TRUE if we are under our minimum low-free-pages threshold.
78  * This activates the pageout demon.  The pageout demon tries to
79  * reach the target but may stop once it satisfies the minimum.
80  *
81  * reserved < severe < minimum < target < paging_target
82  */
83 static __inline
84 int
85 vm_page_count_min(int donotcount)
86 {
87     return (vmstats.v_free_min + donotcount >
88 	    (vmstats.v_free_count + vmstats.v_cache_count) ||
89 	    vmstats.v_free_reserved > vmstats.v_free_count);
90 }
91 
92 /*
93  * Return TRUE if we are under our free page target.  The pageout demon
94  * tries to reach the target but may stop once it gets past the min.
95  */
96 static __inline
97 int
98 vm_page_count_target(void)
99 {
100     return (vmstats.v_free_target >
101 	    (vmstats.v_free_count + vmstats.v_cache_count) ||
102 	    vmstats.v_free_reserved > vmstats.v_free_count);
103 }
104 
105 /*
106  * Return the number of pages the pageout daemon needs to move into the
107  * cache or free lists.  A negative number means we have sufficient free
108  * pages.
109  *
110  * The target free+cache is greater than vm_page_count_target().  The
111  * frontend uses vm_page_count_target() while the backend continue freeing
112  * based on vm_paging_target().
113  *
114  * This function DOES NOT return TRUE or FALSE.
115  */
116 static __inline
117 int
118 vm_paging_target(void)
119 {
120     return (
121 	(vmstats.v_free_target + vmstats.v_cache_min) -
122 	(vmstats.v_free_count + vmstats.v_cache_count)
123     );
124 }
125 
126 /*
127  * Return TRUE if hysteresis dictates we should nominally wakeup the
128  * pageout daemon to start working on freeing up some memory.  This
129  * routine should NOT be used to determine when to block on the VM system.
130  * We want to wakeup the pageout daemon before we might otherwise block.
131  *
132  * Paging begins when cache+free drops below cache_min + free_min.
133  */
134 static __inline
135 int
136 vm_paging_needed(void)
137 {
138     if (vmstats.v_free_min + vmstats.v_cache_min >
139 	vmstats.v_free_count + vmstats.v_cache_count) {
140 		return 1;
141     }
142     if (vmstats.v_free_min > vmstats.v_free_count)
143 		return 1;
144     return 0;
145 }
146 
147 static __inline
148 void
149 vm_page_event(vm_page_t m, vm_page_event_t event)
150 {
151     if (m->flags & PG_ACTIONLIST)
152 	vm_page_event_internal(m, event);
153 }
154 
155 static __inline
156 void
157 vm_page_init_action(vm_page_t m, vm_page_action_t action,
158 		    void (*func)(vm_page_t, vm_page_action_t), void *data)
159 {
160     action->m = m;
161     action->func = func;
162     action->data = data;
163 }
164 
165 /*
166  * Clear dirty bits in the VM page but truncate the
167  * end to a DEV_BSIZE'd boundary.
168  *
169  * Used when reading data in, typically via getpages.
170  * The partial device block at the end of the truncation
171  * range should not lose its dirty bit.
172  *
173  * NOTE: This function does not clear the pmap modified bit.
174  */
175 static __inline
176 void
177 vm_page_clear_dirty_end_nonincl(vm_page_t m, int base, int size)
178 {
179     size = (base + size) & ~DEV_BMASK;
180     if (base < size)
181 	vm_page_clear_dirty(m, base, size - base);
182 }
183 
184 /*
185  * Clear dirty bits in the VM page but truncate the
186  * beginning to a DEV_BSIZE'd boundary.
187  *
188  * Used when truncating a buffer.  The partial device
189  * block at the beginning of the truncation range
190  * should not lose its dirty bit.
191  *
192  * NOTE: This function does not clear the pmap modified bit.
193  */
194 static __inline
195 void
196 vm_page_clear_dirty_beg_nonincl(vm_page_t m, int base, int size)
197 {
198     size += base;
199     base = (base + DEV_BMASK) & ~DEV_BMASK;
200     if (base < size)
201 	vm_page_clear_dirty(m, base, size - base);
202 }
203 
204 static __inline
205 void
206 vm_page_spin_lock(vm_page_t m)
207 {
208     spin_pool_lock(m);
209 }
210 
211 static __inline
212 void
213 vm_page_spin_unlock(vm_page_t m)
214 {
215     spin_pool_unlock(m);
216 }
217 
218 /*
219  * Wire a vm_page that is already wired.  Does not require a busied
220  * page.
221  */
222 static __inline
223 void
224 vm_page_wire_quick(vm_page_t m)
225 {
226     if (atomic_fetchadd_int(&m->wire_count, 1) == 0)
227 	panic("vm_page_wire_quick: wire_count was 0");
228 }
229 
230 /*
231  * Unwire a vm_page quickly, does not require a busied page.
232  *
233  * This routine refuses to drop the wire_count to 0 and will return
234  * TRUE if it would have had to (instead of decrementing it to 0).
235  * The caller can then busy the page and deal with it.
236  */
237 static __inline
238 int
239 vm_page_unwire_quick(vm_page_t m)
240 {
241     KKASSERT(m->wire_count > 0);
242     for (;;) {
243 	u_int wire_count = m->wire_count;
244 
245 	cpu_ccfence();
246 	if (wire_count == 1)
247 		return TRUE;
248 	if (atomic_cmpset_int(&m->wire_count, wire_count, wire_count - 1))
249 		return FALSE;
250     }
251 }
252 
253 #endif	/* _KERNEL */
254 #endif	/* _VM_VM_PAGE2_H_ */
255 
256