xref: /dragonfly/sys/vm/vm_meter.c (revision 9348a738)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)vm_meter.c	8.4 (Berkeley) 1/4/94
32  * $FreeBSD: src/sys/vm/vm_meter.c,v 1.34.2.7 2002/10/10 19:28:22 dillon Exp $
33  * $DragonFly: src/sys/vm/vm_meter.c,v 1.15 2008/04/28 18:04:08 dillon Exp $
34  */
35 
36 #include <sys/param.h>
37 #include <sys/proc.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/resource.h>
41 #include <sys/vmmeter.h>
42 
43 #include <vm/vm.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_param.h>
47 #include <sys/lock.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_object.h>
51 #include <sys/sysctl.h>
52 
53 /*
54  * WARNING: vmstats represents the final say, but individual cpu's may
55  *	    accumulative adjustments in gd->gd_vmstats_adj.  These are
56  *	    synchronized to the global vmstats in hardclock.
57  *
58  *	    In addition, most individual cpus check vmstats using a local
59  *	    copy of the global vmstats in gd->gd_vmstats.  Hardclock also
60  *	    sychronizes the copy.  The pageout code and vm_page_alloc will
61  *	    also synchronize their local copies as necessary.
62  *
63  *	    Other consumers should not expect perfect values.
64  */
65 __cachealign struct vmstats vmstats;
66 
67 static int maxslp = MAXSLP;
68 
69 SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min,
70 	CTLFLAG_RW, &vmstats.v_free_min, 0,
71 	"Minimum number of pages desired free");
72 SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target,
73 	CTLFLAG_RW, &vmstats.v_free_target, 0,
74 	"Number of pages desired free");
75 SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved,
76 	CTLFLAG_RW, &vmstats.v_free_reserved, 0,
77 	"Number of pages reserved for deadlock");
78 SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target,
79 	CTLFLAG_RW, &vmstats.v_inactive_target, 0,
80 	"Number of pages desired inactive");
81 SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min,
82 	CTLFLAG_RW, &vmstats.v_cache_min, 0,
83 	"Min number of pages desired on cache queue");
84 SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max,
85 	CTLFLAG_RW, &vmstats.v_cache_max, 0,
86 	"Max number of pages in cached obj");
87 SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min,
88 	CTLFLAG_RW, &vmstats.v_pageout_free_min, 0,
89 	"Min number pages reserved for kernel");
90 SYSCTL_UINT(_vm, OID_AUTO, v_free_severe,
91 	CTLFLAG_RW, &vmstats.v_free_severe, 0, "");
92 
93 SYSCTL_STRUCT(_vm, VM_LOADAVG, loadavg, CTLFLAG_RD,
94     &averunnable, loadavg, "Machine loadaverage history");
95 
96 static int do_vmtotal_callback(struct proc *p, void *data);
97 
98 /*
99  * No requirements.
100  */
101 static int
102 do_vmtotal(SYSCTL_HANDLER_ARGS)
103 {
104 	struct vmtotal total;
105 	globaldata_t gd;
106 	int n;
107 
108 	bzero(&total, sizeof(total));
109 	for (n = 0; n < ncpus; ++n) {
110 		gd = globaldata_find(n);
111 
112 		/* total.t_rq calculated separately */
113 		/* total.t_dw calculated separately */
114 		/* total.t_pw calculated separately */
115 		/* total.t_sl calculated separately */
116 		/* total.t_sw calculated separately */
117 		total.t_vm += gd->gd_vmtotal.t_vm;
118 		total.t_avm += gd->gd_vmtotal.t_avm;
119 		total.t_rm += gd->gd_vmtotal.t_rm;
120 		total.t_arm += gd->gd_vmtotal.t_arm;
121 		total.t_vmshr += gd->gd_vmtotal.t_vmshr;
122 		total.t_avmshr += gd->gd_vmtotal.t_avmshr;
123 		total.t_rmshr += gd->gd_vmtotal.t_rmshr;
124 		total.t_armshr += gd->gd_vmtotal.t_armshr;
125 		/* total.t_free calculated separately */
126 	}
127 
128 	/*
129 	 * Calculate process statistics.
130 	 */
131 	allproc_scan(do_vmtotal_callback, &total);
132 
133 	/*
134 	 * Adjust for sysctl return.  Add real memory into virtual memory.
135 	 * Set t_free.
136 	 *
137 	 * t_rm - Real memory
138 	 * t_vm - Virtual memory (real + swap)
139 	 */
140 	total.t_vm += total.t_rm;
141 	total.t_free = vmstats.v_free_count + vmstats.v_cache_count;
142 
143 	return (sysctl_handle_opaque(oidp, &total, sizeof(total), req));
144 }
145 
146 static int
147 do_vmtotal_callback(struct proc *p, void *data)
148 {
149 	struct vmtotal *totalp = data;
150 	struct lwp *lp;
151 
152 	if (p->p_flags & P_SYSTEM)
153 		return(0);
154 
155 	lwkt_gettoken(&p->p_token);
156 
157 	FOREACH_LWP_IN_PROC(lp, p) {
158 		switch (lp->lwp_stat) {
159 		case LSSTOP:
160 		case LSSLEEP:
161 			if ((p->p_flags & P_SWAPPEDOUT) == 0) {
162 				if ((lp->lwp_flags & LWP_SINTR) == 0)
163 					totalp->t_dw++;
164 				else if (lp->lwp_slptime < maxslp)
165 					totalp->t_sl++;
166 			} else if (lp->lwp_slptime < maxslp) {
167 				totalp->t_sw++;
168 			}
169 			if (lp->lwp_slptime >= maxslp)
170 				goto out;
171 			break;
172 
173 		case LSRUN:
174 			if (p->p_flags & P_SWAPPEDOUT)
175 				totalp->t_sw++;
176 			else
177 				totalp->t_rq++;
178 			if (p->p_stat == SIDL)
179 				goto out;
180 			break;
181 
182 		default:
183 			goto out;
184 		}
185 
186 		/*
187 		 * Set while in vm_fault()
188 		 */
189 		if (lp->lwp_flags & LWP_PAGING)
190 			totalp->t_pw++;
191 	}
192 out:
193 	lwkt_reltoken(&p->p_token);
194 	return(0);
195 }
196 
197 /*
198  * No requirements.
199  */
200 static int
201 do_vmstats(SYSCTL_HANDLER_ARGS)
202 {
203 	struct vmstats vms = vmstats;
204 	return (sysctl_handle_opaque(oidp, &vms, sizeof(vms), req));
205 }
206 
207 /*
208  * No requirements.
209  */
210 static int
211 do_vmmeter(SYSCTL_HANDLER_ARGS)
212 {
213 	int boffset = offsetof(struct vmmeter, vmmeter_uint_begin);
214 	int eoffset = offsetof(struct vmmeter, vmmeter_uint_end);
215 	struct vmmeter vmm;
216 	int i;
217 
218 	bzero(&vmm, sizeof(vmm));
219 	for (i = 0; i < ncpus; ++i) {
220 		int off;
221 		struct globaldata *gd = globaldata_find(i);
222 
223 		for (off = boffset; off <= eoffset; off += sizeof(u_int)) {
224 			*(u_int *)((char *)&vmm + off) +=
225 				*(u_int *)((char *)&gd->gd_cnt + off);
226 		}
227 
228 	}
229 	vmm.v_intr += vmm.v_ipi + vmm.v_timer;
230 	return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req));
231 }
232 
233 /*
234  * vcnt() -	accumulate statistics from the cnt structure for each cpu
235  *
236  *	The vmmeter structure is now per-cpu as well as global.  Those
237  *	statistics which can be kept on a per-cpu basis (to avoid cache
238  *	stalls between cpus) can be moved to the per-cpu vmmeter.  Remaining
239  *	statistics, such as v_free_reserved, are left in the global
240  *	structure.
241  *
242  * (sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
243  *
244  * No requirements.
245  */
246 static int
247 vcnt(SYSCTL_HANDLER_ARGS)
248 {
249 	int i;
250 	int count = 0;
251 	int offset = arg2;
252 
253 	for (i = 0; i < ncpus; ++i) {
254 		struct globaldata *gd = globaldata_find(i);
255 		count += *(int *)((char *)&gd->gd_cnt + offset);
256 	}
257 	return(SYSCTL_OUT(req, &count, sizeof(int)));
258 }
259 
260 /*
261  * No requirements.
262  */
263 static int
264 vcnt_intr(SYSCTL_HANDLER_ARGS)
265 {
266 	int i;
267 	int count = 0;
268 
269 	for (i = 0; i < ncpus; ++i) {
270 		struct globaldata *gd = globaldata_find(i);
271 
272 		count += gd->gd_cnt.v_intr + gd->gd_cnt.v_ipi +
273 			 gd->gd_cnt.v_timer;
274 	}
275 	return(SYSCTL_OUT(req, &count, sizeof(int)));
276 }
277 
278 #define VMMETEROFF(var)	offsetof(struct vmmeter, var)
279 
280 SYSCTL_PROC(_vm, OID_AUTO, vmtotal, CTLTYPE_OPAQUE|CTLFLAG_RD,
281     0, sizeof(struct vmtotal), do_vmtotal, "S,vmtotal",
282     "System virtual memory aggregate");
283 SYSCTL_PROC(_vm, OID_AUTO, vmstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
284     0, sizeof(struct vmstats), do_vmstats, "S,vmstats",
285     "System virtual memory statistics");
286 SYSCTL_PROC(_vm, OID_AUTO, vmmeter, CTLTYPE_OPAQUE|CTLFLAG_RD,
287     0, sizeof(struct vmmeter), do_vmmeter, "S,vmmeter",
288     "System statistics");
289 SYSCTL_NODE(_vm, OID_AUTO, stats, CTLFLAG_RW, 0, "VM meter stats");
290 SYSCTL_NODE(_vm_stats, OID_AUTO, sys, CTLFLAG_RW, 0, "VM meter sys stats");
291 SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0, "VM meter vm stats");
292 SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats");
293 
294 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_swtch, CTLTYPE_UINT|CTLFLAG_RD,
295 	0, VMMETEROFF(v_swtch), vcnt, "IU", "Context switches");
296 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_coll, CTLTYPE_UINT|CTLFLAG_RD,
297 	0, VMMETEROFF(v_intrans_coll), vcnt, "IU", "Intransit map collisions (total)");
298 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_wait, CTLTYPE_UINT|CTLFLAG_RD,
299 	0, VMMETEROFF(v_intrans_wait), vcnt, "IU", "Intransit map collisions which blocked");
300 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_ints, CTLTYPE_UINT|CTLFLAG_RD,
301 	0, VMMETEROFF(v_forwarded_ints), vcnt, "IU", "Forwarded interrupts due to MP lock");
302 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_hits, CTLTYPE_UINT|CTLFLAG_RD,
303 	0, VMMETEROFF(v_forwarded_hits), vcnt, "IU", "Forwarded hits due to MP lock");
304 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_misses, CTLTYPE_UINT|CTLFLAG_RD,
305 	0, VMMETEROFF(v_forwarded_misses), vcnt, "IU", "Forwarded misses due to MP lock");
306 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_trap, CTLTYPE_UINT|CTLFLAG_RD,
307 	0, VMMETEROFF(v_trap), vcnt, "IU", "Traps");
308 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_syscall, CTLTYPE_UINT|CTLFLAG_RD,
309 	0, VMMETEROFF(v_syscall), vcnt, "IU", "Syscalls");
310 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intr, CTLTYPE_UINT|CTLFLAG_RD,
311 	0, VMMETEROFF(v_intr), vcnt_intr, "IU", "Hardware interrupts");
312 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_ipi, CTLTYPE_UINT|CTLFLAG_RD,
313 	0, VMMETEROFF(v_ipi), vcnt, "IU", "Inter-processor interrupts");
314 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_timer, CTLTYPE_UINT|CTLFLAG_RD,
315 	0, VMMETEROFF(v_timer), vcnt, "IU", "LAPIC timer interrupts");
316 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_soft, CTLTYPE_UINT|CTLFLAG_RD,
317 	0, VMMETEROFF(v_soft), vcnt, "IU", "Software interrupts");
318 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vm_faults, CTLTYPE_UINT|CTLFLAG_RD,
319 	0, VMMETEROFF(v_vm_faults), vcnt, "IU", "VM faults");
320 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_faults, CTLTYPE_UINT|CTLFLAG_RD,
321 	0, VMMETEROFF(v_cow_faults), vcnt, "IU", "COW faults");
322 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_optim, CTLTYPE_UINT|CTLFLAG_RD,
323 	0, VMMETEROFF(v_cow_optim), vcnt, "IU", "Optimized COW faults");
324 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_zfod, CTLTYPE_UINT|CTLFLAG_RD,
325 	0, VMMETEROFF(v_zfod), vcnt, "IU", "Zero fill");
326 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ozfod, CTLTYPE_UINT|CTLFLAG_RD,
327 	0, VMMETEROFF(v_ozfod), vcnt, "IU", "Optimized zero fill");
328 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapin, CTLTYPE_UINT|CTLFLAG_RD,
329 	0, VMMETEROFF(v_swapin), vcnt, "IU", "Swapin operations");
330 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapout, CTLTYPE_UINT|CTLFLAG_RD,
331 	0, VMMETEROFF(v_swapout), vcnt, "IU", "Swapout operations");
332 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsin, CTLTYPE_UINT|CTLFLAG_RD,
333 	0, VMMETEROFF(v_swappgsin), vcnt, "IU", "Swapin pages");
334 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsout, CTLTYPE_UINT|CTLFLAG_RD,
335 	0, VMMETEROFF(v_swappgsout), vcnt, "IU", "Swapout pages");
336 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodein, CTLTYPE_UINT|CTLFLAG_RD,
337 	0, VMMETEROFF(v_vnodein), vcnt, "IU", "Vnodein operations");
338 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodeout, CTLTYPE_UINT|CTLFLAG_RD,
339 	0, VMMETEROFF(v_vnodeout), vcnt, "IU", "Vnodeout operations");
340 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsin, CTLTYPE_UINT|CTLFLAG_RD,
341 	0, VMMETEROFF(v_vnodepgsin), vcnt, "IU", "Vnodein pages");
342 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsout, CTLTYPE_UINT|CTLFLAG_RD,
343 	0, VMMETEROFF(v_vnodepgsout), vcnt, "IU", "Vnodeout pages");
344 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_intrans, CTLTYPE_UINT|CTLFLAG_RD,
345 	0, VMMETEROFF(v_intrans), vcnt, "IU", "In transit page blocking");
346 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_reactivated, CTLTYPE_UINT|CTLFLAG_RD,
347 	0, VMMETEROFF(v_reactivated), vcnt, "IU", "Reactivated pages");
348 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdwakeups, CTLTYPE_UINT|CTLFLAG_RD,
349 	0, VMMETEROFF(v_pdwakeups), vcnt, "IU", "Pagedaemon wakeups");
350 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ppwakeups, CTLTYPE_UINT|CTLFLAG_RD,
351 	0, VMMETEROFF(v_ppwakeups), vcnt, "IU", "vm_wait wakeups");
352 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, CTLTYPE_UINT|CTLFLAG_RD,
353 	0, VMMETEROFF(v_pdpages), vcnt, "IU", "Pagedaemon page scans");
354 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_dfree, CTLTYPE_UINT|CTLFLAG_RD,
355 	0, VMMETEROFF(v_dfree), vcnt, "IU", "Pages freed by daemon");
356 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pfree, CTLTYPE_UINT|CTLFLAG_RD,
357 	0, VMMETEROFF(v_pfree), vcnt, "IU", "Pages freed by exiting processes");
358 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_tfree, CTLTYPE_UINT|CTLFLAG_RD,
359 	0, VMMETEROFF(v_tfree), vcnt, "IU", "Total pages freed");
360 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forks, CTLTYPE_UINT|CTLFLAG_RD,
361 	0, VMMETEROFF(v_forks), vcnt, "IU", "Number of fork() calls");
362 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforks, CTLTYPE_UINT|CTLFLAG_RD,
363 	0, VMMETEROFF(v_vforks), vcnt, "IU", "Number of vfork() calls");
364 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforks, CTLTYPE_UINT|CTLFLAG_RD,
365 	0, VMMETEROFF(v_rforks), vcnt, "IU", "Number of rfork() calls");
366 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreads, CTLTYPE_UINT|CTLFLAG_RD,
367 	0, VMMETEROFF(v_kthreads), vcnt, "IU", "Number of fork() calls by kernel");
368 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forkpages, CTLTYPE_UINT|CTLFLAG_RD,
369 	0, VMMETEROFF(v_forkpages), vcnt, "IU", "VM pages affected by fork()");
370 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforkpages, CTLTYPE_UINT|CTLFLAG_RD,
371 	0, VMMETEROFF(v_vforkpages), vcnt, "IU", "VM pages affected by vfork()");
372 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforkpages, CTLTYPE_UINT|CTLFLAG_RD,
373 	0, VMMETEROFF(v_rforkpages), vcnt, "IU", "VM pages affected by rfork()");
374 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD,
375 	0, VMMETEROFF(v_kthreadpages), vcnt, "IU", "VM pages affected by fork() by kernel");
376 
377 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
378 	v_page_size, CTLFLAG_RD, &vmstats.v_page_size, 0,
379 	"Page size in bytes");
380 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
381 	v_page_count, CTLFLAG_RD, &vmstats.v_page_count, 0,
382 	"Total number of pages in system");
383 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
384 	v_free_reserved, CTLFLAG_RD, &vmstats.v_free_reserved, 0,
385 	"Number of pages reserved for deadlock");
386 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
387 	v_free_target, CTLFLAG_RD, &vmstats.v_free_target, 0,
388 	"Number of pages desired free");
389 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
390 	v_free_min, CTLFLAG_RD, &vmstats.v_free_min, 0,
391 	"Minimum number of pages desired free");
392 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
393 	v_free_count, CTLFLAG_RD, &vmstats.v_free_count, 0,
394 	"Number of pages free");
395 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
396 	v_wire_count, CTLFLAG_RD, &vmstats.v_wire_count, 0,
397 	"Number of pages wired down");
398 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
399 	v_active_count, CTLFLAG_RD, &vmstats.v_active_count, 0,
400 	"Number of pages active");
401 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
402 	v_inactive_target, CTLFLAG_RD, &vmstats.v_inactive_target, 0,
403 	"Number of pages desired inactive");
404 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
405 	v_inactive_count, CTLFLAG_RD, &vmstats.v_inactive_count, 0,
406 	"Number of pages inactive");
407 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
408 	v_cache_count, CTLFLAG_RD, &vmstats.v_cache_count, 0,
409 	"Number of pages on buffer cache queue");
410 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
411 	v_cache_min, CTLFLAG_RD, &vmstats.v_cache_min, 0,
412 	"Min number of pages desired on cache queue");
413 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
414 	v_cache_max, CTLFLAG_RD, &vmstats.v_cache_max, 0,
415 	"Max number of pages in cached obj");
416 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
417 	v_pageout_free_min, CTLFLAG_RD, &vmstats.v_pageout_free_min, 0,
418 	"Min number pages reserved for kernel");
419 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
420 	v_interrupt_free_min, CTLFLAG_RD, &vmstats.v_interrupt_free_min, 0,
421 	"Reserved number of pages for int code");
422 
423 /*
424  * No requirements.
425  */
426 static int
427 do_vmmeter_pcpu(SYSCTL_HANDLER_ARGS)
428 {
429 	int boffset = offsetof(struct vmmeter, vmmeter_uint_begin);
430 	int eoffset = offsetof(struct vmmeter, vmmeter_uint_end);
431 	struct globaldata *gd = arg1;
432 	struct vmmeter vmm;
433 	int off;
434 
435 	bzero(&vmm, sizeof(vmm));
436 	for (off = boffset; off <= eoffset; off += sizeof(u_int)) {
437 		*(u_int *)((char *)&vmm + off) +=
438 			*(u_int *)((char *)&gd->gd_cnt + off);
439 	}
440 	vmm.v_intr += vmm.v_ipi + vmm.v_timer;
441 	return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req));
442 }
443 
444 /*
445  * Called from the low level boot code only.
446  */
447 static void
448 vmmeter_init(void *dummy __unused)
449 {
450 	int i;
451 
452 	for (i = 0; i < ncpus; ++i) {
453 		struct sysctl_ctx_list *ctx;
454 		struct sysctl_oid *oid;
455 		struct globaldata *gd;
456 		char name[32];
457 
458 		ksnprintf(name, sizeof(name), "cpu%d", i);
459 
460 		ctx = kmalloc(sizeof(*ctx), M_TEMP, M_WAITOK);
461 		sysctl_ctx_init(ctx);
462 		oid = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_vm),
463 				      OID_AUTO, name, CTLFLAG_RD, 0, "");
464 
465 		gd = globaldata_find(i);
466 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
467 				"vmmeter", CTLTYPE_OPAQUE|CTLFLAG_RD,
468 				gd, sizeof(struct vmmeter), do_vmmeter_pcpu,
469 				"S,vmmeter", "System per-cpu statistics");
470 	}
471 }
472 SYSINIT(vmmeter, SI_SUB_PSEUDO, SI_ORDER_ANY, vmmeter_init, 0);
473 
474 /*
475  * Rolls up accumulated pcpu adjustments to vmstats counts into the global
476  * structure, copy the global structure into our pcpu structure.  Critical
477  * path checks will use our pcpu structure.
478  *
479  * This is somewhat expensive and only called when needed, and by the
480  * hardclock.
481  */
482 void
483 vmstats_rollup(void)
484 {
485 	int cpu;
486 
487 	for (cpu = 0; cpu < ncpus; ++cpu) {
488 		vmstats_rollup_cpu(globaldata_find(cpu));
489 	}
490 	mycpu->gd_vmstats = vmstats;
491 }
492 
493 void
494 vmstats_rollup_cpu(globaldata_t gd)
495 {
496 	int value;
497 
498 	if (gd->gd_vmstats_adj.v_free_count) {
499 		value = atomic_swap_int(&gd->gd_vmstats_adj.v_free_count, 0);
500 		atomic_add_int(&vmstats.v_free_count, value);
501 	}
502 	if (gd->gd_vmstats_adj.v_cache_count) {
503 		value = atomic_swap_int(&gd->gd_vmstats_adj.v_cache_count, 0);
504 		atomic_add_int(&vmstats.v_cache_count, value);
505 	}
506 	if (gd->gd_vmstats_adj.v_inactive_count) {
507 		value =atomic_swap_int(&gd->gd_vmstats_adj.v_inactive_count, 0);
508 		atomic_add_int(&vmstats.v_inactive_count, value);
509 	}
510 	if (gd->gd_vmstats_adj.v_active_count) {
511 		value = atomic_swap_int(&gd->gd_vmstats_adj.v_active_count, 0);
512 		atomic_add_int(&vmstats.v_active_count, value);
513 	}
514 	if (gd->gd_vmstats_adj.v_wire_count) {
515 		value = atomic_swap_int(&gd->gd_vmstats_adj.v_wire_count, 0);
516 		atomic_add_int(&vmstats.v_wire_count, value);
517 	}
518 }
519