1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94 32 * $FreeBSD: src/sys/vm/vm_meter.c,v 1.34.2.7 2002/10/10 19:28:22 dillon Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/proc.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/malloc.h> 40 #include <sys/resource.h> 41 #include <sys/vmmeter.h> 42 #include <sys/kcollect.h> 43 44 #include <vm/vm.h> 45 #include <vm/vm_page.h> 46 #include <vm/vm_extern.h> 47 #include <vm/vm_param.h> 48 #include <sys/lock.h> 49 #include <vm/pmap.h> 50 #include <vm/vm_map.h> 51 #include <vm/vm_object.h> 52 #include <sys/sysctl.h> 53 54 /* 55 * WARNING: vmstats represents the final say, but individual cpu's may 56 * accumualte adjustments in gd->gd_vmstats_adj. These are 57 * synchronized to the global vmstats in hardclock. 58 * 59 * In addition, most individual cpus check vmstats using a local 60 * copy of the global vmstats in gd->gd_vmstats. Hardclock also 61 * sychronizes the copy. The pageout code and vm_page_alloc will 62 * also synchronize their local copies as necessary. 63 * 64 * Other consumers should not expect perfect values. 65 */ 66 __exclusive_cache_line struct vmstats vmstats; 67 68 static int maxslp = MAXSLP; 69 70 SYSCTL_ULONG(_vm, VM_V_FREE_MIN, v_free_min, 71 CTLFLAG_RW, &vmstats.v_free_min, 0, 72 "Minimum number of pages desired free"); 73 SYSCTL_ULONG(_vm, VM_V_PAGING_WAIT, v_paging_wait, 74 CTLFLAG_RW, &vmstats.v_paging_wait, 0, 75 "Userland slows down allocations"); 76 SYSCTL_ULONG(_vm, VM_V_PAGING_START, v_paging_start, 77 CTLFLAG_RW, &vmstats.v_paging_start, 0, 78 "Pageout daemon begins running"); 79 SYSCTL_ULONG(_vm, VM_V_PAGING_TARGET1, v_paging_target1, 80 CTLFLAG_RW, &vmstats.v_paging_target1, 0, 81 "Mid pageout daemon target"); 82 SYSCTL_ULONG(_vm, VM_V_PAGING_TARGET2, v_paging_target2, 83 CTLFLAG_RW, &vmstats.v_paging_target2, 0, 84 "Final pageout daemon target"); 85 SYSCTL_ULONG(_vm, VM_V_FREE_RESERVED, v_free_reserved, 86 CTLFLAG_RW, &vmstats.v_free_reserved, 0, 87 "Number of pages reserved for deadlock"); 88 SYSCTL_ULONG(_vm, VM_V_INACTIVE_TARGET, v_inactive_target, 89 CTLFLAG_RW, &vmstats.v_inactive_target, 0, 90 "Maximum inactive pages during pageout"); 91 SYSCTL_ULONG(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min, 92 CTLFLAG_RW, &vmstats.v_pageout_free_min, 0, 93 "Min number pages reserved for kernel"); 94 SYSCTL_ULONG(_vm, OID_AUTO, v_free_severe, 95 CTLFLAG_RW, &vmstats.v_free_severe, 0, ""); 96 97 SYSCTL_STRUCT(_vm, VM_LOADAVG, loadavg, 98 CTLFLAG_RD | CTLFLAG_NOLOCK, 99 &averunnable, loadavg, "Machine loadaverage history"); 100 101 static int do_vmtotal_callback(struct proc *p, void *data); 102 103 /* 104 * No requirements. 105 */ 106 static int 107 do_vmtotal(SYSCTL_HANDLER_ARGS) 108 { 109 struct vmtotal total; 110 globaldata_t gd; 111 int n; 112 113 bzero(&total, sizeof(total)); 114 for (n = 0; n < ncpus; ++n) { 115 gd = globaldata_find(n); 116 117 /* total.t_rq calculated separately */ 118 /* total.t_dw calculated separately */ 119 /* total.t_pw calculated separately */ 120 /* total.t_sl calculated separately */ 121 /* total.t_sw calculated separately */ 122 total.t_vm += gd->gd_vmtotal.t_vm; 123 total.t_avm += gd->gd_vmtotal.t_avm; 124 total.t_rm += gd->gd_vmtotal.t_rm; 125 total.t_arm += gd->gd_vmtotal.t_arm; 126 total.t_vmshr += gd->gd_vmtotal.t_vmshr; 127 total.t_avmshr += gd->gd_vmtotal.t_avmshr; 128 total.t_rmshr += gd->gd_vmtotal.t_rmshr; 129 total.t_armshr += gd->gd_vmtotal.t_armshr; 130 /* total.t_free calculated separately */ 131 } 132 133 /* 134 * Calculate process statistics. 135 */ 136 allproc_scan(do_vmtotal_callback, &total, 0); 137 138 /* 139 * Adjust for sysctl return. Add real memory into virtual memory. 140 * Set t_free. 141 * 142 * t_rm - Real memory 143 * t_vm - Virtual memory (real + swap) 144 */ 145 total.t_vm += total.t_rm; 146 total.t_free = vmstats.v_free_count + vmstats.v_cache_count; 147 148 return (sysctl_handle_opaque(oidp, &total, sizeof(total), req)); 149 } 150 151 static int 152 do_vmtotal_callback(struct proc *p, void *data) 153 { 154 struct vmtotal *totalp = data; 155 struct lwp *lp; 156 157 if (p->p_flags & P_SYSTEM) 158 return(0); 159 160 lwkt_gettoken_shared(&p->p_token); 161 162 FOREACH_LWP_IN_PROC(lp, p) { 163 switch (lp->lwp_stat) { 164 case LSSTOP: 165 case LSSLEEP: 166 if ((lp->lwp_flags & LWP_SINTR) == 0) 167 totalp->t_dw++; 168 else if (lp->lwp_slptime < maxslp) 169 totalp->t_sl++; 170 if (lp->lwp_slptime >= maxslp) 171 goto out; 172 break; 173 case LSRUN: 174 totalp->t_rq++; 175 if (p->p_stat == SIDL) 176 goto out; 177 break; 178 default: 179 goto out; 180 } 181 182 /* 183 * Set while in vm_fault() 184 */ 185 if (lp->lwp_flags & LWP_PAGING) 186 totalp->t_pw++; 187 } 188 out: 189 lwkt_reltoken(&p->p_token); 190 return(0); 191 } 192 193 /* 194 * No requirements. 195 */ 196 static int 197 do_vmstats(SYSCTL_HANDLER_ARGS) 198 { 199 struct vmstats vms = vmstats; 200 return (sysctl_handle_opaque(oidp, &vms, sizeof(vms), req)); 201 } 202 203 /* 204 * No requirements. 205 */ 206 static int 207 do_vmmeter(SYSCTL_HANDLER_ARGS) 208 { 209 int boffset = offsetof(struct vmmeter, vmmeter_uint_begin); 210 int eoffset = offsetof(struct vmmeter, vmmeter_uint_end); 211 struct vmmeter vmm; 212 int i; 213 214 bzero(&vmm, sizeof(vmm)); 215 for (i = 0; i < ncpus; ++i) { 216 int off; 217 struct globaldata *gd = globaldata_find(i); 218 219 for (off = boffset; off <= eoffset; off += sizeof(u_int)) { 220 *(u_int *)((char *)&vmm + off) += 221 *(u_int *)((char *)&gd->gd_cnt + off); 222 } 223 224 } 225 vmm.v_intr += vmm.v_ipi + vmm.v_timer; 226 return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req)); 227 } 228 229 /* 230 * vcnt() - accumulate statistics from the cnt structure for each cpu 231 * 232 * The vmmeter structure is now per-cpu as well as global. Those 233 * statistics which can be kept on a per-cpu basis (to avoid cache 234 * stalls between cpus) can be moved to the per-cpu vmmeter. Remaining 235 * statistics, such as v_free_reserved, are left in the global 236 * structure. 237 * 238 * (sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) 239 * 240 * No requirements. 241 */ 242 static int 243 vcnt(SYSCTL_HANDLER_ARGS) 244 { 245 int i; 246 int count = 0; 247 int offset = arg2; 248 249 for (i = 0; i < ncpus; ++i) { 250 struct globaldata *gd = globaldata_find(i); 251 count += *(int *)((char *)&gd->gd_cnt + offset); 252 } 253 return(SYSCTL_OUT(req, &count, sizeof(int))); 254 } 255 256 /* 257 * No requirements. 258 */ 259 static int 260 vcnt_intr(SYSCTL_HANDLER_ARGS) 261 { 262 int i; 263 int count = 0; 264 265 for (i = 0; i < ncpus; ++i) { 266 struct globaldata *gd = globaldata_find(i); 267 268 count += gd->gd_cnt.v_intr + gd->gd_cnt.v_ipi + 269 gd->gd_cnt.v_timer; 270 } 271 return(SYSCTL_OUT(req, &count, sizeof(int))); 272 } 273 274 #define VMMETEROFF(var) offsetof(struct vmmeter, var) 275 276 SYSCTL_PROC(_vm, OID_AUTO, vmtotal, CTLTYPE_OPAQUE|CTLFLAG_RD, 277 0, sizeof(struct vmtotal), do_vmtotal, "S,vmtotal", 278 "System virtual memory aggregate"); 279 SYSCTL_PROC(_vm, OID_AUTO, vmstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 280 0, sizeof(struct vmstats), do_vmstats, "S,vmstats", 281 "System virtual memory statistics"); 282 SYSCTL_PROC(_vm, OID_AUTO, vmmeter, CTLTYPE_OPAQUE|CTLFLAG_RD, 283 0, sizeof(struct vmmeter), do_vmmeter, "S,vmmeter", 284 "System statistics"); 285 SYSCTL_NODE(_vm, OID_AUTO, stats, CTLFLAG_RW, 0, "VM meter stats"); 286 SYSCTL_NODE(_vm_stats, OID_AUTO, sys, CTLFLAG_RW, 0, "VM meter sys stats"); 287 SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0, "VM meter vm stats"); 288 SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats"); 289 290 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_swtch, CTLTYPE_UINT|CTLFLAG_RD, 291 0, VMMETEROFF(v_swtch), vcnt, "IU", "Context switches"); 292 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_coll, CTLTYPE_UINT|CTLFLAG_RD, 293 0, VMMETEROFF(v_intrans_coll), vcnt, "IU", "Intransit map collisions (total)"); 294 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intrans_wait, CTLTYPE_UINT|CTLFLAG_RD, 295 0, VMMETEROFF(v_intrans_wait), vcnt, "IU", "Intransit map collisions which blocked"); 296 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_ints, CTLTYPE_UINT|CTLFLAG_RD, 297 0, VMMETEROFF(v_forwarded_ints), vcnt, "IU", "Forwarded interrupts due to MP lock"); 298 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_hits, CTLTYPE_UINT|CTLFLAG_RD, 299 0, VMMETEROFF(v_forwarded_hits), vcnt, "IU", "Forwarded hits due to MP lock"); 300 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_forwarded_misses, CTLTYPE_UINT|CTLFLAG_RD, 301 0, VMMETEROFF(v_forwarded_misses), vcnt, "IU", "Forwarded misses due to MP lock"); 302 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_trap, CTLTYPE_UINT|CTLFLAG_RD, 303 0, VMMETEROFF(v_trap), vcnt, "IU", "Traps"); 304 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_syscall, CTLTYPE_UINT|CTLFLAG_RD, 305 0, VMMETEROFF(v_syscall), vcnt, "IU", "Syscalls"); 306 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intr, CTLTYPE_UINT|CTLFLAG_RD, 307 0, VMMETEROFF(v_intr), vcnt_intr, "IU", "Hardware interrupts"); 308 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_ipi, CTLTYPE_UINT|CTLFLAG_RD, 309 0, VMMETEROFF(v_ipi), vcnt, "IU", "Inter-processor interrupts"); 310 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_timer, CTLTYPE_UINT|CTLFLAG_RD, 311 0, VMMETEROFF(v_timer), vcnt, "IU", "LAPIC timer interrupts"); 312 SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_soft, CTLTYPE_UINT|CTLFLAG_RD, 313 0, VMMETEROFF(v_soft), vcnt, "IU", "Software interrupts"); 314 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vm_faults, CTLTYPE_UINT|CTLFLAG_RD, 315 0, VMMETEROFF(v_vm_faults), vcnt, "IU", "VM faults"); 316 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_faults, CTLTYPE_UINT|CTLFLAG_RD, 317 0, VMMETEROFF(v_cow_faults), vcnt, "IU", "COW faults"); 318 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_optim, CTLTYPE_UINT|CTLFLAG_RD, 319 0, VMMETEROFF(v_cow_optim), vcnt, "IU", "Optimized COW faults"); 320 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_zfod, CTLTYPE_UINT|CTLFLAG_RD, 321 0, VMMETEROFF(v_zfod), vcnt, "IU", "Zero fill"); 322 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ozfod, CTLTYPE_UINT|CTLFLAG_RD, 323 0, VMMETEROFF(v_ozfod), vcnt, "IU", "Optimized zero fill"); 324 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapin, CTLTYPE_UINT|CTLFLAG_RD, 325 0, VMMETEROFF(v_swapin), vcnt, "IU", "Swapin operations"); 326 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapout, CTLTYPE_UINT|CTLFLAG_RD, 327 0, VMMETEROFF(v_swapout), vcnt, "IU", "Swapout operations"); 328 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsin, CTLTYPE_UINT|CTLFLAG_RD, 329 0, VMMETEROFF(v_swappgsin), vcnt, "IU", "Swapin pages"); 330 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsout, CTLTYPE_UINT|CTLFLAG_RD, 331 0, VMMETEROFF(v_swappgsout), vcnt, "IU", "Swapout pages"); 332 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodein, CTLTYPE_UINT|CTLFLAG_RD, 333 0, VMMETEROFF(v_vnodein), vcnt, "IU", "Vnodein operations"); 334 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodeout, CTLTYPE_UINT|CTLFLAG_RD, 335 0, VMMETEROFF(v_vnodeout), vcnt, "IU", "Vnodeout operations"); 336 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsin, CTLTYPE_UINT|CTLFLAG_RD, 337 0, VMMETEROFF(v_vnodepgsin), vcnt, "IU", "Vnodein pages"); 338 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsout, CTLTYPE_UINT|CTLFLAG_RD, 339 0, VMMETEROFF(v_vnodepgsout), vcnt, "IU", "Vnodeout pages"); 340 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_intrans, CTLTYPE_UINT|CTLFLAG_RD, 341 0, VMMETEROFF(v_intrans), vcnt, "IU", "In transit page blocking"); 342 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_reactivated, CTLTYPE_UINT|CTLFLAG_RD, 343 0, VMMETEROFF(v_reactivated), vcnt, "IU", "Reactivated pages"); 344 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdwakeups, CTLTYPE_UINT|CTLFLAG_RD, 345 0, VMMETEROFF(v_pdwakeups), vcnt, "IU", "Pagedaemon wakeups"); 346 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ppwakeups, CTLTYPE_UINT|CTLFLAG_RD, 347 0, VMMETEROFF(v_ppwakeups), vcnt, "IU", "vm_wait wakeups"); 348 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, CTLTYPE_UINT|CTLFLAG_RD, 349 0, VMMETEROFF(v_pdpages), vcnt, "IU", "Pagedaemon page scans"); 350 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_dfree, CTLTYPE_UINT|CTLFLAG_RD, 351 0, VMMETEROFF(v_dfree), vcnt, "IU", "Pages freed by daemon"); 352 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pfree, CTLTYPE_UINT|CTLFLAG_RD, 353 0, VMMETEROFF(v_pfree), vcnt, "IU", "Pages freed by exiting processes"); 354 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_tfree, CTLTYPE_UINT|CTLFLAG_RD, 355 0, VMMETEROFF(v_tfree), vcnt, "IU", "Total pages freed"); 356 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forks, CTLTYPE_UINT|CTLFLAG_RD, 357 0, VMMETEROFF(v_forks), vcnt, "IU", "Number of fork() calls"); 358 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforks, CTLTYPE_UINT|CTLFLAG_RD, 359 0, VMMETEROFF(v_vforks), vcnt, "IU", "Number of vfork() calls"); 360 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforks, CTLTYPE_UINT|CTLFLAG_RD, 361 0, VMMETEROFF(v_rforks), vcnt, "IU", "Number of rfork() calls"); 362 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreads, CTLTYPE_UINT|CTLFLAG_RD, 363 0, VMMETEROFF(v_kthreads), vcnt, "IU", "Number of fork() calls by kernel"); 364 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forkpages, CTLTYPE_UINT|CTLFLAG_RD, 365 0, VMMETEROFF(v_forkpages), vcnt, "IU", "VM pages affected by fork()"); 366 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforkpages, CTLTYPE_UINT|CTLFLAG_RD, 367 0, VMMETEROFF(v_vforkpages), vcnt, "IU", "VM pages affected by vfork()"); 368 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforkpages, CTLTYPE_UINT|CTLFLAG_RD, 369 0, VMMETEROFF(v_rforkpages), vcnt, "IU", "VM pages affected by rfork()"); 370 SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD, 371 0, VMMETEROFF(v_kthreadpages), vcnt, "IU", "VM pages affected by fork() by kernel"); 372 373 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, 374 v_page_size, CTLFLAG_RD, &vmstats.v_page_size, 0, 375 "Page size in bytes"); 376 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 377 v_page_count, CTLFLAG_RD, &vmstats.v_page_count, 0, 378 "Total number of pages in system"); 379 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 380 v_free_reserved, CTLFLAG_RD, &vmstats.v_free_reserved, 0, 381 "Number of pages reserved for deadlock"); 382 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 383 v_free_min, CTLFLAG_RD, &vmstats.v_free_min, 0, 384 "Minimum number of pages desired free"); 385 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 386 v_paging_wait, CTLFLAG_RW, &vmstats.v_paging_wait, 0, 387 "Userland slows down allocations"); 388 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 389 v_paging_start, CTLFLAG_RW, &vmstats.v_paging_start, 0, 390 "Pageout daemon begins running"); 391 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 392 v_paging_target1, CTLFLAG_RW, &vmstats.v_paging_target1, 0, 393 "Mid pageout daemon target"); 394 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 395 v_paging_target2, CTLFLAG_RW, &vmstats.v_paging_target2, 0, 396 "Final pageout daemon target"); 397 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 398 v_free_count, CTLFLAG_RD, &vmstats.v_free_count, 0, 399 "Number of pages free"); 400 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 401 v_wire_count, CTLFLAG_RD, &vmstats.v_wire_count, 0, 402 "Number of pages wired down"); 403 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 404 v_active_count, CTLFLAG_RD, &vmstats.v_active_count, 0, 405 "Number of pages active"); 406 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 407 v_inactive_target, CTLFLAG_RD, &vmstats.v_inactive_target, 0, 408 "Maximum inactive pages during pageout"); 409 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 410 v_inactive_count, CTLFLAG_RD, &vmstats.v_inactive_count, 0, 411 "Number of pages inactive"); 412 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 413 v_cache_count, CTLFLAG_RD, &vmstats.v_cache_count, 0, 414 "Number of pages on buffer cache queue"); 415 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 416 v_pageout_free_min, CTLFLAG_RD, &vmstats.v_pageout_free_min, 0, 417 "Min number pages reserved for kernel"); 418 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, 419 v_interrupt_free_min, CTLFLAG_RD, &vmstats.v_interrupt_free_min, 0, 420 "Reserved number of pages for int code"); 421 422 /* 423 * No requirements. 424 */ 425 static int 426 do_vmmeter_pcpu(SYSCTL_HANDLER_ARGS) 427 { 428 int boffset = offsetof(struct vmmeter, vmmeter_uint_begin); 429 int eoffset = offsetof(struct vmmeter, vmmeter_uint_end); 430 struct globaldata *gd = arg1; 431 struct vmmeter vmm; 432 int off; 433 434 bzero(&vmm, sizeof(vmm)); 435 for (off = boffset; off <= eoffset; off += sizeof(u_int)) { 436 *(u_int *)((char *)&vmm + off) += 437 *(u_int *)((char *)&gd->gd_cnt + off); 438 } 439 vmm.v_intr += vmm.v_ipi + vmm.v_timer; 440 return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req)); 441 } 442 443 /* 444 * Callback for long-term slow data collection on 10-second interval. 445 * 446 * Return faults, set data for other entries. 447 */ 448 #define PTOB(value) ((uint64_t)(value) << PAGE_SHIFT) 449 450 static uint64_t 451 collect_vmstats_callback(int n) 452 { 453 static struct vmmeter last_vmm; 454 struct vmmeter cur_vmm; 455 const int boffset = offsetof(struct vmmeter, vmmeter_uint_begin); 456 const int eoffset = offsetof(struct vmmeter, vmmeter_uint_end); 457 uint64_t total; 458 459 /* 460 * The hardclock already rolls up vmstats for us. 461 */ 462 kcollect_setvalue(KCOLLECT_MEMFRE, PTOB(vmstats.v_free_count)); 463 kcollect_setvalue(KCOLLECT_MEMCAC, PTOB(vmstats.v_cache_count)); 464 kcollect_setvalue(KCOLLECT_MEMINA, PTOB(vmstats.v_inactive_count)); 465 kcollect_setvalue(KCOLLECT_MEMACT, PTOB(vmstats.v_active_count)); 466 kcollect_setvalue(KCOLLECT_MEMWIR, PTOB(vmstats.v_wire_count)); 467 468 /* 469 * Collect pcpu statistics for things like faults. 470 */ 471 bzero(&cur_vmm, sizeof(cur_vmm)); 472 for (n = 0; n < ncpus; ++n) { 473 struct globaldata *gd = globaldata_find(n); 474 int off; 475 476 for (off = boffset; off <= eoffset; off += sizeof(u_int)) { 477 *(u_int *)((char *)&cur_vmm + off) += 478 *(u_int *)((char *)&gd->gd_cnt + off); 479 } 480 481 } 482 483 total = cur_vmm.v_cow_faults - last_vmm.v_cow_faults; 484 last_vmm.v_cow_faults = cur_vmm.v_cow_faults; 485 kcollect_setvalue(KCOLLECT_COWFAULT, total); 486 487 total = cur_vmm.v_zfod - last_vmm.v_zfod; 488 last_vmm.v_zfod = cur_vmm.v_zfod; 489 kcollect_setvalue(KCOLLECT_ZFILL, total); 490 491 total = cur_vmm.v_syscall - last_vmm.v_syscall; 492 last_vmm.v_syscall = cur_vmm.v_syscall; 493 kcollect_setvalue(KCOLLECT_SYSCALLS, total); 494 495 total = cur_vmm.v_intr - last_vmm.v_intr; 496 last_vmm.v_intr = cur_vmm.v_intr; 497 kcollect_setvalue(KCOLLECT_INTR, total); 498 499 total = cur_vmm.v_ipi - last_vmm.v_ipi; 500 last_vmm.v_ipi = cur_vmm.v_ipi; 501 kcollect_setvalue(KCOLLECT_IPI, total); 502 503 total = cur_vmm.v_timer - last_vmm.v_timer; 504 last_vmm.v_timer = cur_vmm.v_timer; 505 kcollect_setvalue(KCOLLECT_TIMER, total); 506 507 total = cur_vmm.v_vm_faults - last_vmm.v_vm_faults; 508 last_vmm.v_vm_faults = cur_vmm.v_vm_faults; 509 510 return total; 511 } 512 513 /* 514 * Called from the low level boot code only. 515 */ 516 static void 517 vmmeter_init(void *dummy __unused) 518 { 519 int i; 520 521 for (i = 0; i < ncpus; ++i) { 522 struct sysctl_ctx_list *ctx; 523 struct sysctl_oid *oid; 524 struct globaldata *gd; 525 char name[32]; 526 527 ksnprintf(name, sizeof(name), "cpu%d", i); 528 529 ctx = kmalloc(sizeof(*ctx), M_TEMP, M_WAITOK); 530 sysctl_ctx_init(ctx); 531 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_vm), 532 OID_AUTO, name, CTLFLAG_RD, 0, ""); 533 534 gd = globaldata_find(i); 535 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 536 "vmmeter", CTLTYPE_OPAQUE|CTLFLAG_RD, 537 gd, sizeof(struct vmmeter), do_vmmeter_pcpu, 538 "S,vmmeter", "System per-cpu statistics"); 539 } 540 kcollect_register(KCOLLECT_VMFAULT, "fault", collect_vmstats_callback, 541 KCOLLECT_SCALE(KCOLLECT_VMFAULT_FORMAT, 0)); 542 kcollect_register(KCOLLECT_COWFAULT, "cow", NULL, 543 KCOLLECT_SCALE(KCOLLECT_COWFAULT_FORMAT, 0)); 544 kcollect_register(KCOLLECT_ZFILL, "zfill", NULL, 545 KCOLLECT_SCALE(KCOLLECT_ZFILL_FORMAT, 0)); 546 547 kcollect_register(KCOLLECT_MEMFRE, "free", NULL, 548 KCOLLECT_SCALE(KCOLLECT_MEMFRE_FORMAT, 549 PTOB(vmstats.v_page_count))); 550 kcollect_register(KCOLLECT_MEMCAC, "cache", NULL, 551 KCOLLECT_SCALE(KCOLLECT_MEMCAC_FORMAT, 552 PTOB(vmstats.v_page_count))); 553 kcollect_register(KCOLLECT_MEMINA, "inact", NULL, 554 KCOLLECT_SCALE(KCOLLECT_MEMINA_FORMAT, 555 PTOB(vmstats.v_page_count))); 556 kcollect_register(KCOLLECT_MEMACT, "act", NULL, 557 KCOLLECT_SCALE(KCOLLECT_MEMACT_FORMAT, 558 PTOB(vmstats.v_page_count))); 559 kcollect_register(KCOLLECT_MEMWIR, "wired", NULL, 560 KCOLLECT_SCALE(KCOLLECT_MEMWIR_FORMAT, 561 PTOB(vmstats.v_page_count))); 562 563 kcollect_register(KCOLLECT_SYSCALLS, "syscalls", NULL, 564 KCOLLECT_SCALE(KCOLLECT_SYSCALLS_FORMAT, 0)); 565 566 kcollect_register(KCOLLECT_INTR, "intr", NULL, 567 KCOLLECT_SCALE(KCOLLECT_INTR_FORMAT, 0)); 568 kcollect_register(KCOLLECT_IPI, "ipi", NULL, 569 KCOLLECT_SCALE(KCOLLECT_IPI_FORMAT, 0)); 570 kcollect_register(KCOLLECT_TIMER, "timer", NULL, 571 KCOLLECT_SCALE(KCOLLECT_TIMER_FORMAT, 0)); 572 } 573 SYSINIT(vmmeter, SI_SUB_PSEUDO, SI_ORDER_ANY, vmmeter_init, 0); 574 575 /* 576 * Rolls up accumulated pcpu adjustments to vmstats counts into the global 577 * structure, copy the global structure into our pcpu structure. Critical 578 * path checks will use our pcpu structure. 579 * 580 * This is somewhat expensive and only called when needed, and by the 581 * hardclock. 582 */ 583 void 584 vmstats_rollup(void) 585 { 586 int cpu; 587 588 for (cpu = 0; cpu < ncpus; ++cpu) { 589 vmstats_rollup_cpu(globaldata_find(cpu)); 590 } 591 mycpu->gd_vmstats = vmstats; 592 } 593 594 void 595 vmstats_rollup_cpu(globaldata_t gd) 596 { 597 long value; 598 599 if (gd->gd_vmstats_adj.v_free_count) { 600 value = atomic_swap_long(&gd->gd_vmstats_adj.v_free_count, 0); 601 atomic_add_long(&vmstats.v_free_count, value); 602 } 603 if (gd->gd_vmstats_adj.v_cache_count) { 604 value = atomic_swap_long(&gd->gd_vmstats_adj.v_cache_count, 0); 605 atomic_add_long(&vmstats.v_cache_count, value); 606 } 607 if (gd->gd_vmstats_adj.v_inactive_count) { 608 value=atomic_swap_long(&gd->gd_vmstats_adj.v_inactive_count, 0); 609 atomic_add_long(&vmstats.v_inactive_count, value); 610 } 611 if (gd->gd_vmstats_adj.v_active_count) { 612 value = atomic_swap_long(&gd->gd_vmstats_adj.v_active_count, 0); 613 atomic_add_long(&vmstats.v_active_count, value); 614 } 615 if (gd->gd_vmstats_adj.v_wire_count) { 616 value = atomic_swap_long(&gd->gd_vmstats_adj.v_wire_count, 0); 617 atomic_add_long(&vmstats.v_wire_count, value); 618 } 619 } 620