xref: /dragonfly/sys/sys/vmmeter.h (revision ffe53622)
1 /*-
2  * Copyright (c) 1982, 1986, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)vmmeter.h	8.2 (Berkeley) 7/10/94
30  * $FreeBSD: src/sys/sys/vmmeter.h,v 1.21.2.2 2002/10/10 19:28:21 dillon Exp $
31  * $DragonFly: src/sys/sys/vmmeter.h,v 1.10 2006/05/20 02:42:13 dillon Exp $
32  */
33 
34 #ifndef _SYS_VMMETER_H_
35 #define _SYS_VMMETER_H_
36 
37 #ifndef _SYS_TYPES_H_
38 #include <sys/types.h>
39 #endif
40 
41 struct globaldata;
42 
43 /*
44  * System wide statistics counters.
45  */
46 struct vmmeter {
47 	/*
48 	 * General system activity.
49 	 */
50 #define vmmeter_uint_begin	v_swtch
51 	u_int v_swtch;		/* context switches */
52 	u_int v_trap;		/* calls to trap */
53 	u_int v_syscall;	/* calls to syscall() */
54 	u_int v_intr;		/* device interrupts */
55 	u_int v_ipi;		/* inter processor interrupts */
56 	u_int v_timer;		/* LAPIC timer interrupts */
57 	u_int v_soft;		/* software interrupts */
58 	/*
59 	 * Virtual memory activity.
60 	 */
61 	u_int v_vm_faults;	/* number of address memory faults */
62 	u_int v_cow_faults;	/* number of copy-on-writes */
63 	u_int v_cow_optim;	/* number of optimized copy-on-writes */
64 	u_int v_zfod;		/* pages zero filled on demand */
65 	u_int v_ozfod;		/* optimized zero fill pages */
66 	u_int v_swapin;		/* swap pager pageins */
67 	u_int v_swapout;	/* swap pager pageouts */
68 	u_int v_swappgsin;	/* swap pager pages paged in */
69 	u_int v_swappgsout;	/* swap pager pages paged out */
70 	u_int v_vnodein;	/* vnode pager pageins */
71 	u_int v_vnodeout;	/* vnode pager pageouts */
72 	u_int v_vnodepgsin;	/* vnode_pager pages paged in */
73 	u_int v_vnodepgsout;	/* vnode pager pages paged out */
74 	u_int v_intrans;	/* intransit blocking page faults */
75 	u_int v_reactivated;	/* number of pages reactivated from free list */
76 	u_int v_pdwakeups;	/* number of times daemon has awaken from sleep */
77 	u_int v_pdpages;	/* number of pages analyzed by daemon */
78 
79 	u_int v_dfree;		/* pages freed by daemon */
80 	u_int v_pfree;		/* pages freed by exiting processes */
81 	u_int v_tfree;		/* total pages freed */
82 	/*
83 	 * Fork/vfork/rfork activity.
84 	 */
85 	u_int v_forks;		/* number of fork() calls */
86 	u_int v_vforks;		/* number of vfork() calls */
87 	u_int v_rforks;		/* number of rfork() calls */
88 	u_int v_exec;		/* number of exec() calls */
89 	u_int v_kthreads;	/* number of fork() calls by kernel */
90 	u_int v_forkpages;	/* number of VM pages affected by fork() */
91 	u_int v_vforkpages;	/* number of VM pages affected by vfork() */
92 	u_int v_rforkpages;	/* number of VM pages affected by rfork() */
93 	u_int v_kthreadpages;	/* number of VM pages affected by fork() by kernel */
94 	u_int v_intrans_coll;	/* intransit map collisions (total) */
95 	u_int v_intrans_wait;	/* intransit map collisions which blocked */
96 	u_int v_forwarded_ints; /* forwarded interrupts due to MP lock */
97 	u_int v_forwarded_hits;
98 	u_int v_forwarded_misses;
99 	u_int v_sendsys;	/* calls to sendsys() */
100 	u_int v_waitsys;	/* calls to waitsys() */
101 	u_int v_smpinvltlb;	/* nasty global invltlbs */
102 	u_int v_ppwakeups;	/* wakeups on processes stalled on VM */
103 	u_int v_lock_colls;	/* # of token, lock, or spin collisions */
104 	char  v_lock_name[16];	/* last-colliding token, lock, or spin name */
105 	u_int v_wakeup_colls;	/* possible spurious wakeup IPIs */
106 	u_int v_reserved7;
107 #define vmmeter_uint_end	v_reserved7
108 };
109 
110 /*
111  * vmstats structure, global vmstats is the rollup, pcpu vmstats keeps
112  * track of minor (generally positive) adjustments.  For moving targets,
113  * the global vmstats structure represents the smallest likely value.
114  *
115  * This structure is cache sensitive, separate nominal read-only elements
116  * from variable elements.
117  */
118 struct vmstats {
119 	/*
120 	 * Distribution of page usages.
121 	 */
122 	u_int v_page_size;	/* page size in bytes */
123 	u_int v_unused01;
124 	long v_page_count;	/* total number of pages in system */
125 	long v_free_reserved;	/* number of pages reserved for deadlock */
126 	long v_free_target;	/* number of pages desired free */
127 	long v_free_min;	/* minimum number of pages desired free */
128 
129 	long v_cache_min;	/* min number of pages desired on cache queue */
130 	long v_cache_max;	/* max number of pages in cached obj */
131 	long v_pageout_free_min; /* min number pages reserved for kernel */
132 	long v_interrupt_free_min; /* reserved number of pages for int code */
133 	long v_free_severe;	/* severe depletion of pages below this pt */
134 	long v_dma_pages;	/* total dma-reserved pages */
135 
136 	long v_unused_fixed[5];
137 
138 	long v_free_count;	/* number of pages free */
139 	long v_wire_count;	/* number of pages wired down */
140 	long v_active_count;	/* number of pages active */
141 	long v_inactive_target;	/* number of pages desired inactive */
142 	long v_inactive_count;	/* number of pages inactive */
143 	long v_cache_count;	/* number of pages on buffer cache queue */
144 	long v_dma_avail;	/* free dma-reserved pages */
145 
146 	long v_unused_variable[9];
147 };
148 
149 #define VMMETER_SLOP_COUNT	128
150 
151 #ifdef _KERNEL
152 
153 /* note: vmmeter 'cnt' structure is now per-cpu */
154 extern struct vmstats vmstats;
155 
156 #endif
157 
158 /* systemwide totals computed every five seconds */
159 struct vmtotal {
160 	long	t_rq;		/* length of the run queue */
161 	long	t_dw;		/* jobs in ``disk wait'' (neg priority) */
162 	long	t_pw;		/* jobs in page wait */
163 	long	t_sl;		/* jobs sleeping in core */
164 	long	t_sw;		/* swapped out runnable/short block jobs */
165 	int64_t	t_vm;		/* total virtual memory */
166 	int64_t	t_avm;		/* active virtual memory */
167 	long	t_rm;		/* total real memory in use */
168 	long	t_arm;		/* active real memory */
169 	int64_t	t_vmshr;	/* shared virtual memory */
170 	int64_t	t_avmshr;	/* active shared virtual memory */
171 	long	t_rmshr;	/* shared real memory */
172 	long	t_armshr;	/* active shared real memory */
173 	long	t_free;		/* free memory pages */
174 };
175 
176 #ifdef PGINPROF
177 /*
178  * Optional instrumentation.
179  */
180 
181 #define	NDMON	128
182 #define	NSMON	128
183 
184 #define	DRES	20
185 #define	SRES	5
186 
187 #define	PMONMIN	20
188 #define	PRES	50
189 #define	NPMON	64
190 
191 #define	RMONMIN	130
192 #define	RRES	5
193 #define	NRMON	64
194 
195 /* data and stack size distribution counters */
196 u_int	dmon[NDMON+1];
197 u_int	smon[NSMON+1];
198 
199 /* page in time distribution counters */
200 u_int	pmon[NPMON+2];
201 
202 /* reclaim time distribution counters */
203 u_int	rmon[NRMON+2];
204 
205 int	pmonmin;
206 int	pres;
207 int	rmonmin;
208 int	rres;
209 
210 u_int rectime;		/* accumulator for reclaim times */
211 u_int pgintime;		/* accumulator for page in times */
212 
213 #endif	/* PGINPROF */
214 
215 #ifdef _KERNEL
216 
217 void vmstats_rollup(void);
218 void vmstats_rollup_cpu(struct globaldata *gd);
219 
220 #endif
221 #endif
222