1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94 32 * $FreeBSD: src/sys/vm/vm_meter.c,v 1.34.2.7 2002/10/10 19:28:22 dillon Exp $ 33 * $DragonFly: src/sys/vm/vm_meter.c,v 1.15 2008/04/28 18:04:08 dillon Exp $ 34 */ 35 36 #include <sys/param.h> 37 #include <sys/proc.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/resource.h> 41 #include <sys/vmmeter.h> 42 43 #include <vm/vm.h> 44 #include <vm/vm_page.h> 45 #include <vm/vm_extern.h> 46 #include <vm/vm_param.h> 47 #include <sys/lock.h> 48 #include <vm/pmap.h> 49 #include <vm/vm_map.h> 50 #include <vm/vm_object.h> 51 #include <sys/sysctl.h> 52 53 struct vmstats vmstats; 54 55 static int maxslp = MAXSLP; 56 57 SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min, 58 CTLFLAG_RW, &vmstats.v_free_min, 0, 59 "Minimum number of pages desired free"); 60 SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target, 61 CTLFLAG_RW, &vmstats.v_free_target, 0, 62 "Number of pages desired free"); 63 SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved, 64 CTLFLAG_RW, &vmstats.v_free_reserved, 0, 65 "Number of pages reserved for deadlock"); 66 SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target, 67 CTLFLAG_RW, &vmstats.v_inactive_target, 0, 68 "Number of pages desired inactive"); 69 SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min, 70 CTLFLAG_RW, &vmstats.v_cache_min, 0, 71 "Min number of pages desired on cache queue"); 72 SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max, 73 CTLFLAG_RW, &vmstats.v_cache_max, 0, 74 "Max number of pages in cached obj"); 75 SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min, 76 CTLFLAG_RW, &vmstats.v_pageout_free_min, 0, 77 "Min number pages reserved for kernel"); 78 SYSCTL_UINT(_vm, OID_AUTO, v_free_severe, 79 CTLFLAG_RW, &vmstats.v_free_severe, 0, ""); 80 81 SYSCTL_STRUCT(_vm, VM_LOADAVG, loadavg, CTLFLAG_RD, 82 &averunnable, loadavg, "Machine loadaverage history"); 83 84 static int do_vmtotal_callback(struct proc *p, void *data); 85 86 /* 87 * No requirements. 88 */ 89 static int 90 do_vmtotal(SYSCTL_HANDLER_ARGS) 91 { 92 struct vmtotal total; 93 globaldata_t gd; 94 int n; 95 96 bzero(&total, sizeof(total)); 97 for (n = 0; n < ncpus; ++n) { 98 gd = globaldata_find(n); 99 100 /* total.t_rq calculated separately */ 101 /* total.t_dw calculated separately */ 102 /* total.t_pw calculated separately */ 103 /* total.t_sl calculated separately */ 104 /* total.t_sw calculated separately */ 105 total.t_vm += gd->gd_vmtotal.t_vm; 106 total.t_avm += gd->gd_vmtotal.t_avm; 107 total.t_rm += gd->gd_vmtotal.t_rm; 108 total.t_arm += gd->gd_vmtotal.t_arm; 109 total.t_vmshr += gd->gd_vmtotal.t_vmshr; 110 total.t_avmshr += gd->gd_vmtotal.t_avmshr; 111 total.t_rmshr += gd->gd_vmtotal.t_rmshr; 112 total.t_armshr += gd->gd_vmtotal.t_armshr; 113 /* total.t_free calculated separately */ 114 } 115 116 /* 117 * Calculate process statistics. 118 */ 119 allproc_scan(do_vmtotal_callback, &total); 120 121 /* 122 * Adjust for sysctl return. Add real memory into virtual memory. 123 * Set t_free. 124 * 125 * t_rm - Real memory 126 * t_vm - Virtual memory (real + swap) 127 */ 128 total.t_vm += total.t_rm; 129 total.t_free = vmstats.v_free_count + vmstats.v_cache_count; 130 131 return (sysctl_handle_opaque(oidp, &total, sizeof(total), req)); 132 } 133 134 static int 135 do_vmtotal_callback(struct proc *p, void *data) 136 { 137 struct vmtotal *totalp = data; 138 struct lwp *lp; 139 140 if (p->p_flags & P_SYSTEM) 141 return(0); 142 143 lwkt_gettoken(&p->p_token); 144 145 FOREACH_LWP_IN_PROC(lp, p) { 146 switch (lp->lwp_stat) { 147 case LSSTOP: 148 case LSSLEEP: 149 if ((p->p_flags & P_SWAPPEDOUT) == 0) { 150 if ((lp->lwp_flags & LWP_SINTR) == 0) 151 totalp->t_dw++; 152 else if (lp->lwp_slptime < maxslp) 153 totalp->t_sl++; 154 } else if (lp->lwp_slptime < maxslp) { 155 totalp->t_sw++; 156 } 157 if (lp->lwp_slptime >= maxslp) 158 goto out; 159 break; 160 161 case LSRUN: 162 if (p->p_flags & P_SWAPPEDOUT) 163 totalp->t_sw++; 164 else 165 totalp->t_rq++; 166 if (p->p_stat == SIDL) 167 goto out; 168 break; 169 170 default: 171 goto out; 172 } 173 174 /* 175 * Set while in vm_fault() 176 */ 177 if (lp->lwp_flags & LWP_PAGING) 178 totalp->t_pw++; 179 } 180 out: 181 lwkt_reltoken(&p->p_token); 182 return(0); 183 } 184 185 /* 186 * No requirements. 187 */ 188 static int 189 do_vmstats(SYSCTL_HANDLER_ARGS) 190 { 191 struct vmstats vms = vmstats; 192 return (sysctl_handle_opaque(oidp, &vms, sizeof(vms), req)); 193 } 194 195 /* 196 * No requirements. 197 */ 198 static int 199 do_vmmeter(SYSCTL_HANDLER_ARGS) 200 { 201 int boffset = offsetof(struct vmmeter, vmmeter_uint_begin); 202 int eoffset = offsetof(struct vmmeter, vmmeter_uint_end); 203 struct vmmeter vmm; 204 int i; 205 206 bzero(&vmm, sizeof(vmm)); 207 for (i = 0; i < ncpus; ++i) { 208 int off; 209 struct globaldata *gd = globaldata_find(i); 210 211 for (off = boffset; off <= eoffset; off += sizeof(u_int)) { 212 *(u_int *)((char *)&vmm + off) += 213 *(u_int *)((char *)&gd->gd_cnt + off); 214 } 215 216 } 217 vmm.v_intr += vmm.v_ipi + vmm.v_timer; 218 return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req)); 219 } 220 221 /* 222 * vcnt() - accumulate statistics from the cnt structure for each cpu 223 * 224 * The vmmeter structure is now per-cpu as well as global. Those 225 * statistics which can be kept on a per-cpu basis (to avoid cache 226 * stalls between cpus) can be moved to the per-cpu vmmeter. Remaining 227 * statistics, such as v_free_reserved, are left in the global 228 * structure. 229 * 230 * (sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) 231 * 232 * No requirements. 233 */ 234 static int 235 vcnt(SYSCTL_HANDLER_ARGS) 236 { 237 int i; 238 int count = 0; 239 int offset = arg2; 240 241 for (i = 0; i < ncpus; ++i) { 242 struct globaldata *gd = globaldata_find(i); 243 count += *(int *)((char *)&gd->gd_cnt + offset); 244 } 245 return(SYSCTL_OUT(req, &count, sizeof(int))); 246 } 247 248 static 249 int 250 vzerocnt(SYSCTL_HANDLER_ARGS) 251 { 252 int count = 0; 253 int i; 254 255 for (i = 0; i < PQ_L2_SIZE; ++i) { 256 count += vm_page_queues[PQ_FREE+i].zero_count; 257 } 258 return(SYSCTL_OUT(req, &count, sizeof(int))); 259 } 260 261 /* 262 * No requirements. 263 */ 264 static int 265 vcnt_intr(SYSCTL_HANDLER_ARGS) 266 { 267 int i; 268 int count = 0; 269 270 for (i = 0; i < ncpus; ++i) { 271 struct globaldata *gd = globaldata_find(i); 272 273 count += gd->gd_cnt.v_intr + gd->gd_cnt.v_ipi + 274 gd->gd_cnt.v_timer; 275 } 276 return(SYSCTL_OUT(req, &count, sizeof(int))); 277 } 278 279 #define VMMETEROFF(var) offsetof(struct vmmeter, var) 280 281 SYSCTL_PROC(_vm, OID_AUTO, vmtotal, CTLTYPE_OPAQUE|CTLFLAG_RD, 282 0, sizeof(struct vmtotal), do_vmtotal, "S,vmtotal", 283 "System virtual memory aggregate"); 284 SYSCTL_PROC(_vm, OID_AUTO, vmstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 285 0, sizeof(struct vmstats), do_vmstats, "S,vmstats", 286 "System virtual memory statistics"); 287 SYSCTL_PROC(_vm, OID_AUTO, vmmeter, CTLTYPE_OPAQUE|CTLFLAG_RD, 288 0, sizeof(struct vmmeter), do_vmmeter, "S,vmmeter", 289 "System statistics"); 290 SYSCTL_NODE(_vm, OID_AUTO, stats, CTLFLAG_RW, 0, "VM meter stats"); 291 SYSCTL_NODE(_vm_stats, OID_AUTO, sys, CTLFLAG_RW, 0, "VM meter sys stats"); 292 SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0, "VM meter vm stats"); 293 SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats"); 294 295 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_swtch, CTLTYPE_UINT|CTLFLAG_RD, 296 0, VMMETEROFF(v_swtch), vcnt, "IU", "Context switches"); 297 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_coll, CTLTYPE_UINT|CTLFLAG_RD, 298 0, VMMETEROFF(v_intrans_coll), vcnt, "IU", "Intransit map collisions (total)"); 299 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_wait, CTLTYPE_UINT|CTLFLAG_RD, 300 0, VMMETEROFF(v_intrans_wait), vcnt, "IU", "Intransit map collisions which blocked"); 301 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_ints, CTLTYPE_UINT|CTLFLAG_RD, 302 0, VMMETEROFF(v_forwarded_ints), vcnt, "IU", "Forwarded interrupts due to MP lock"); 303 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_hits, CTLTYPE_UINT|CTLFLAG_RD, 304 0, VMMETEROFF(v_forwarded_hits), vcnt, "IU", "Forwarded hits due to MP lock"); 305 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_misses, CTLTYPE_UINT|CTLFLAG_RD, 306 0, VMMETEROFF(v_forwarded_misses), vcnt, "IU", "Forwarded misses due to MP lock"); 307 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_trap, CTLTYPE_UINT|CTLFLAG_RD, 308 0, VMMETEROFF(v_trap), vcnt, "IU", "Traps"); 309 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_syscall, CTLTYPE_UINT|CTLFLAG_RD, 310 0, VMMETEROFF(v_syscall), vcnt, "IU", "Syscalls"); 311 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intr, CTLTYPE_UINT|CTLFLAG_RD, 312 0, VMMETEROFF(v_intr), vcnt_intr, "IU", "Hardware interrupts"); 313 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_ipi, CTLTYPE_UINT|CTLFLAG_RD, 314 0, VMMETEROFF(v_ipi), vcnt, "IU", "Inter-processor interrupts"); 315 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_timer, CTLTYPE_UINT|CTLFLAG_RD, 316 0, VMMETEROFF(v_timer), vcnt, "IU", "LAPIC timer interrupts"); 317 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_soft, CTLTYPE_UINT|CTLFLAG_RD, 318 0, VMMETEROFF(v_soft), vcnt, "IU", "Software interrupts"); 319 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vm_faults, CTLTYPE_UINT|CTLFLAG_RD, 320 0, VMMETEROFF(v_vm_faults), vcnt, "IU", "VM faults"); 321 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_faults, CTLTYPE_UINT|CTLFLAG_RD, 322 0, VMMETEROFF(v_cow_faults), vcnt, "IU", "COW faults"); 323 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_optim, CTLTYPE_UINT|CTLFLAG_RD, 324 0, VMMETEROFF(v_cow_optim), vcnt, "IU", "Optimized COW faults"); 325 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_zfod, CTLTYPE_UINT|CTLFLAG_RD, 326 0, VMMETEROFF(v_zfod), vcnt, "IU", "Zero fill"); 327 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ozfod, CTLTYPE_UINT|CTLFLAG_RD, 328 0, VMMETEROFF(v_ozfod), vcnt, "IU", "Optimized zero fill"); 329 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapin, CTLTYPE_UINT|CTLFLAG_RD, 330 0, VMMETEROFF(v_swapin), vcnt, "IU", "Swapin operations"); 331 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapout, CTLTYPE_UINT|CTLFLAG_RD, 332 0, VMMETEROFF(v_swapout), vcnt, "IU", "Swapout operations"); 333 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsin, CTLTYPE_UINT|CTLFLAG_RD, 334 0, VMMETEROFF(v_swappgsin), vcnt, "IU", "Swapin pages"); 335 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsout, CTLTYPE_UINT|CTLFLAG_RD, 336 0, VMMETEROFF(v_swappgsout), vcnt, "IU", "Swapout pages"); 337 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodein, CTLTYPE_UINT|CTLFLAG_RD, 338 0, VMMETEROFF(v_vnodein), vcnt, "IU", "Vnodein operations"); 339 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodeout, CTLTYPE_UINT|CTLFLAG_RD, 340 0, VMMETEROFF(v_vnodeout), vcnt, "IU", "Vnodeout operations"); 341 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsin, CTLTYPE_UINT|CTLFLAG_RD, 342 0, VMMETEROFF(v_vnodepgsin), vcnt, "IU", "Vnodein pages"); 343 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsout, CTLTYPE_UINT|CTLFLAG_RD, 344 0, VMMETEROFF(v_vnodepgsout), vcnt, "IU", "Vnodeout pages"); 345 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_intrans, CTLTYPE_UINT|CTLFLAG_RD, 346 0, VMMETEROFF(v_intrans), vcnt, "IU", "In transit page blocking"); 347 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_reactivated, CTLTYPE_UINT|CTLFLAG_RD, 348 0, VMMETEROFF(v_reactivated), vcnt, "IU", "Reactivated pages"); 349 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdwakeups, CTLTYPE_UINT|CTLFLAG_RD, 350 0, VMMETEROFF(v_pdwakeups), vcnt, "IU", "Pagedaemon wakeups"); 351 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ppwakeups, CTLTYPE_UINT|CTLFLAG_RD, 352 0, VMMETEROFF(v_ppwakeups), vcnt, "IU", "vm_wait wakeups"); 353 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, CTLTYPE_UINT|CTLFLAG_RD, 354 0, VMMETEROFF(v_pdpages), vcnt, "IU", "Pagedaemon page scans"); 355 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_dfree, CTLTYPE_UINT|CTLFLAG_RD, 356 0, VMMETEROFF(v_dfree), vcnt, "IU", "Pages freed by daemon"); 357 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pfree, CTLTYPE_UINT|CTLFLAG_RD, 358 0, VMMETEROFF(v_pfree), vcnt, "IU", "Pages freed by exiting processes"); 359 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_tfree, CTLTYPE_UINT|CTLFLAG_RD, 360 0, VMMETEROFF(v_tfree), vcnt, "IU", "Total pages freed"); 361 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forks, CTLTYPE_UINT|CTLFLAG_RD, 362 0, VMMETEROFF(v_forks), vcnt, "IU", "Number of fork() calls"); 363 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforks, CTLTYPE_UINT|CTLFLAG_RD, 364 0, VMMETEROFF(v_vforks), vcnt, "IU", "Number of vfork() calls"); 365 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforks, CTLTYPE_UINT|CTLFLAG_RD, 366 0, VMMETEROFF(v_rforks), vcnt, "IU", "Number of rfork() calls"); 367 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreads, CTLTYPE_UINT|CTLFLAG_RD, 368 0, VMMETEROFF(v_kthreads), vcnt, "IU", "Number of fork() calls by kernel"); 369 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forkpages, CTLTYPE_UINT|CTLFLAG_RD, 370 0, VMMETEROFF(v_forkpages), vcnt, "IU", "VM pages affected by fork()"); 371 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforkpages, CTLTYPE_UINT|CTLFLAG_RD, 372 0, VMMETEROFF(v_vforkpages), vcnt, "IU", "VM pages affected by vfork()"); 373 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforkpages, CTLTYPE_UINT|CTLFLAG_RD, 374 0, VMMETEROFF(v_rforkpages), vcnt, "IU", "VM pages affected by rfork()"); 375 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD, 376 0, VMMETEROFF(v_kthreadpages), vcnt, "IU", "VM pages affected by fork() by kernel"); 377 378 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 379 v_page_size, CTLFLAG_RD, &vmstats.v_page_size, 0, 380 "Page size in bytes"); 381 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 382 v_page_count, CTLFLAG_RD, &vmstats.v_page_count, 0, 383 "Total number of pages in system"); 384 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 385 v_free_reserved, CTLFLAG_RD, &vmstats.v_free_reserved, 0, 386 "Number of pages reserved for deadlock"); 387 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 388 v_free_target, CTLFLAG_RD, &vmstats.v_free_target, 0, 389 "Number of pages desired free"); 390 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 391 v_free_min, CTLFLAG_RD, &vmstats.v_free_min, 0, 392 "Minimum number of pages desired free"); 393 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 394 v_free_count, CTLFLAG_RD, &vmstats.v_free_count, 0, 395 "Number of pages free"); 396 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 397 v_wire_count, CTLFLAG_RD, &vmstats.v_wire_count, 0, 398 "Number of pages wired down"); 399 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 400 v_active_count, CTLFLAG_RD, &vmstats.v_active_count, 0, 401 "Number of pages active"); 402 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 403 v_inactive_target, CTLFLAG_RD, &vmstats.v_inactive_target, 0, 404 "Number of pages desired inactive"); 405 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 406 v_inactive_count, CTLFLAG_RD, &vmstats.v_inactive_count, 0, 407 "Number of pages inactive"); 408 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 409 v_cache_count, CTLFLAG_RD, &vmstats.v_cache_count, 0, 410 "Number of pages on buffer cache queue"); 411 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 412 v_cache_min, CTLFLAG_RD, &vmstats.v_cache_min, 0, 413 "Min number of pages desired on cache queue"); 414 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 415 v_cache_max, CTLFLAG_RD, &vmstats.v_cache_max, 0, 416 "Max number of pages in cached obj"); 417 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 418 v_pageout_free_min, CTLFLAG_RD, &vmstats.v_pageout_free_min, 0, 419 "Min number pages reserved for kernel"); 420 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 421 v_interrupt_free_min, CTLFLAG_RD, &vmstats.v_interrupt_free_min, 0, 422 "Reserved number of pages for int code"); 423 SYSCTL_PROC(_vm_stats_misc, OID_AUTO, zero_page_count, CTLTYPE_UINT|CTLFLAG_RD, 424 0, 0, vzerocnt, "IU", "Pre-zerod VM pages"); 425 426 /* 427 * No requirements. 428 */ 429 static int 430 do_vmmeter_pcpu(SYSCTL_HANDLER_ARGS) 431 { 432 int boffset = offsetof(struct vmmeter, vmmeter_uint_begin); 433 int eoffset = offsetof(struct vmmeter, vmmeter_uint_end); 434 struct globaldata *gd = arg1; 435 struct vmmeter vmm; 436 int off; 437 438 bzero(&vmm, sizeof(vmm)); 439 for (off = boffset; off <= eoffset; off += sizeof(u_int)) { 440 *(u_int *)((char *)&vmm + off) += 441 *(u_int *)((char *)&gd->gd_cnt + off); 442 } 443 vmm.v_intr += vmm.v_ipi + vmm.v_timer; 444 return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req)); 445 } 446 447 /* 448 * Called from the low level boot code only. 449 */ 450 static void 451 vmmeter_init(void *dummy __unused) 452 { 453 int i; 454 455 for (i = 0; i < ncpus; ++i) { 456 struct sysctl_ctx_list *ctx; 457 struct sysctl_oid *oid; 458 struct globaldata *gd; 459 char name[32]; 460 461 ksnprintf(name, sizeof(name), "cpu%d", i); 462 463 ctx = kmalloc(sizeof(*ctx), M_TEMP, M_WAITOK); 464 sysctl_ctx_init(ctx); 465 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_vm), 466 OID_AUTO, name, CTLFLAG_RD, 0, ""); 467 468 gd = globaldata_find(i); 469 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 470 "vmmeter", CTLTYPE_OPAQUE|CTLFLAG_RD, 471 gd, sizeof(struct vmmeter), do_vmmeter_pcpu, 472 "S,vmmeter", "System per-cpu statistics"); 473 } 474 } 475 SYSINIT(vmmeter, SI_SUB_PSEUDO, SI_ORDER_ANY, vmmeter_init, 0); 476