xref: /original-bsd/sys/vm/vm_meter.c (revision 68549010)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vm_meter.c	7.14 (Berkeley) 03/03/92
8  */
9 
10 #include "param.h"
11 #include "proc.h"
12 #include "systm.h"
13 #include "kernel.h"
14 #include "vm.h"
15 
16 struct	loadavg averunnable;		/* load average, of runnable procs */
17 
18 int	maxslp = MAXSLP;
19 int	saferss = SAFERSS;
20 
21 
22 vmmeter()
23 {
24 	register unsigned *cp, *rp, *sp;
25 
26 	if (time.tv_sec % 5 == 0)
27 		loadav(&averunnable);
28 	if (proc0.p_slptime > maxslp/2)
29 		wakeup((caddr_t)&proc0);
30 }
31 
32 /*
33  * Constants for averages over 1, 5, and 15 minutes
34  * when sampling at 5 second intervals.
35  */
36 fixpt_t	cexp[3] = {
37 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
38 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
39 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
40 };
41 
42 /*
43  * Compute a tenex style load average of a quantity on
44  * 1, 5 and 15 minute intervals.
45  */
46 loadav(avg)
47 	register struct loadavg *avg;
48 {
49 	register int i, nrun;
50 	register struct proc *p;
51 
52 	for (nrun = 0, p = allproc; p != NULL; p = p->p_nxt) {
53 		switch (p->p_stat) {
54 		case SSLEEP:
55 			if (p->p_pri > PZERO || p->p_slptime != 0)
56 				continue;
57 			/* fall through */
58 		case SRUN:
59 		case SIDL:
60 			nrun++;
61 		}
62 	}
63 	for (i = 0; i < 3; i++)
64 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
65 			nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
66 #if defined(COMPAT_43) && (defined(vax) || defined(tahoe))
67 	for (i = 0; i < 3; i++)
68 		avenrun[i] = (double) avg->ldavg[i] / FSCALE;
69 #endif /* COMPAT_43 */
70 }
71 
72 /*
73  * Load average information
74  */
75 /* ARGSUSED */
76 kinfo_loadavg(op, where, acopysize, arg, aneeded)
77 	int op;
78 	register char *where;
79 	int *acopysize, arg, *aneeded;
80 {
81 	int buflen, error;
82 
83 	*aneeded = sizeof(averunnable);
84 	if (where == NULL)
85 		return (0);
86 	/*
87 	 * Check for enough buffering.
88 	 */
89 	buflen = *acopysize;
90 	if (buflen < sizeof(averunnable)) {
91 		*acopysize = 0;
92 		return (0);
93 	}
94 	/*
95 	 * Copyout averunnable structure.
96 	 */
97 	averunnable.fscale = FSCALE;
98 	if (error = copyout((caddr_t)&averunnable, where, sizeof(averunnable)))
99 		return (error);
100 	*acopysize = sizeof(averunnable);
101 	return (0);
102 }
103 
104 /*
105  * Calculate and return vmtotals structure.
106  */
107 kinfo_meter(op, where, acopysize, arg, aneeded)
108 	int op;
109 	caddr_t where;
110 	int *acopysize, arg, *aneeded;
111 {
112 	struct vmtotal vmtotals;
113 	int error;
114 
115 	*aneeded = sizeof(struct vmtotal);
116 	if (where == NULL)
117 		return (0);
118 	if (*acopysize < sizeof(struct vmtotal))
119 		return (EINVAL);
120 	vmtotal(&vmtotals);
121 	if (error = copyout((caddr_t)&vmtotals, where, sizeof(struct vmtotal)))
122 		return (error);
123 	*acopysize = sizeof(struct vmtotal);
124 	return (0);
125 }
126 
127 /*
128  * Calculate the current state of the system.
129  * Done on demand from getkerninfo().
130  */
131 vmtotal(totalp)
132 	register struct vmtotal *totalp;
133 {
134 	register struct proc *p;
135 	register vm_map_entry_t	entry;
136 	register vm_object_t object;
137 	register vm_map_t map;
138 	int paging;
139 
140 	bzero(totalp, sizeof *totalp);
141 	/*
142 	 * Mark all objects as inactive.
143 	 */
144 	simple_unlock(&vm_object_list_lock);
145 	simple_lock(&vm_object_list_lock);
146 	object = (vm_object_t) queue_first(&vm_object_list);
147 	while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
148 		object->flags &= ~OBJ_ACTIVE;
149 		object = (vm_object_t) queue_next(&object->object_list);
150 	}
151 	simple_unlock(&vm_object_list_lock);
152 	/*
153 	 * Calculate process statistics.
154 	 */
155 	for (p = allproc; p != NULL; p = p->p_nxt) {
156 		if (p->p_flag & SSYS)
157 			continue;
158 		switch (p->p_stat) {
159 		case 0:
160 			continue;
161 
162 		case SSLEEP:
163 		case SSTOP:
164 			if (p->p_flag & SLOAD) {
165 				if (p->p_pri <= PZERO)
166 					totalp->t_dw++;
167 				else if (p->p_slptime < maxslp)
168 					totalp->t_sl++;
169 			} else if (p->p_slptime < maxslp)
170 				totalp->t_sw++;
171 			if (p->p_slptime >= maxslp)
172 				continue;
173 			break;
174 
175 		case SRUN:
176 		case SIDL:
177 			if (p->p_flag & SLOAD)
178 				totalp->t_rq++;
179 			else
180 				totalp->t_sw++;
181 			if (p->p_stat == SIDL)
182 				continue;
183 			break;
184 		}
185 		/*
186 		 * Note active objects.
187 		 */
188 		paging = 0;
189 		for (map = &p->p_vmspace->vm_map, entry = map->header.next;
190 		     entry != &map->header; entry = entry->next) {
191 			if (entry->is_a_map || entry->is_sub_map ||
192 			    entry->object.vm_object == NULL)
193 				continue;
194 			entry->object.vm_object->flags |= OBJ_ACTIVE;
195 			paging |= entry->object.vm_object->paging_in_progress;
196 		}
197 		if (paging)
198 			totalp->t_pw++;
199 	}
200 	/*
201 	 * Calculate object memory usage statistics.
202 	 */
203 	simple_lock(&vm_object_list_lock);
204 	object = (vm_object_t) queue_first(&vm_object_list);
205 	while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
206 		totalp->t_vm += num_pages(object->size);
207 		totalp->t_rm += object->resident_page_count;
208 		if (object->flags & OBJ_ACTIVE) {
209 			totalp->t_avm += num_pages(object->size);
210 			totalp->t_arm += object->resident_page_count;
211 		}
212 		if (object->ref_count > 1) {
213 			/* shared object */
214 			totalp->t_vmshr += num_pages(object->size);
215 			totalp->t_rmshr += object->resident_page_count;
216 			if (object->flags & OBJ_ACTIVE) {
217 				totalp->t_avmshr += num_pages(object->size);
218 				totalp->t_armshr += object->resident_page_count;
219 			}
220 		}
221 		object = (vm_object_t) queue_next(&object->object_list);
222 	}
223 	totalp->t_free = cnt.v_free_count;
224 }
225