xref: /original-bsd/sys/vm/vm_meter.c (revision e1db577d)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vm_meter.c	7.17 (Berkeley) 07/08/92
8  */
9 
10 #include <sys/param.h>
11 #include <sys/proc.h>
12 #include <sys/systm.h>
13 #include <sys/kernel.h>
14 
15 #include <vm/vm.h>
16 
17 struct	loadavg averunnable;		/* load average, of runnable procs */
18 
19 int	maxslp = MAXSLP;
20 int	saferss = SAFERSS;
21 
22 void
23 vmmeter()
24 {
25 	register unsigned *cp, *rp, *sp;
26 
27 	if (time.tv_sec % 5 == 0)
28 		loadav(&averunnable);
29 	if (proc0.p_slptime > maxslp/2)
30 		wakeup((caddr_t)&proc0);
31 }
32 
33 /*
34  * Constants for averages over 1, 5, and 15 minutes
35  * when sampling at 5 second intervals.
36  */
37 fixpt_t	cexp[3] = {
38 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
39 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
40 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
41 };
42 
43 /*
44  * Compute a tenex style load average of a quantity on
45  * 1, 5 and 15 minute intervals.
46  */
47 void
48 loadav(avg)
49 	register struct loadavg *avg;
50 {
51 	register int i, nrun;
52 	register struct proc *p;
53 
54 	for (nrun = 0, p = (struct proc *)allproc; p != NULL; p = p->p_nxt) {
55 		switch (p->p_stat) {
56 		case SSLEEP:
57 			if (p->p_pri > PZERO || p->p_slptime != 0)
58 				continue;
59 			/* fall through */
60 		case SRUN:
61 		case SIDL:
62 			nrun++;
63 		}
64 	}
65 	for (i = 0; i < 3; i++)
66 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
67 			nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
68 #if defined(COMPAT_43) && (defined(vax) || defined(tahoe))
69 	for (i = 0; i < 3; i++)
70 		avenrun[i] = (double) avg->ldavg[i] / FSCALE;
71 #endif /* COMPAT_43 */
72 }
73 
74 /*
75  * Load average information
76  */
77 /* ARGSUSED */
78 int
79 kinfo_loadavg(op, where, acopysize, arg, aneeded)
80 	int op;
81 	register char *where;
82 	int *acopysize, arg, *aneeded;
83 {
84 	int buflen, error;
85 
86 	*aneeded = sizeof(averunnable);
87 	if (where == NULL)
88 		return (0);
89 	/*
90 	 * Check for enough buffering.
91 	 */
92 	buflen = *acopysize;
93 	if (buflen < sizeof(averunnable)) {
94 		*acopysize = 0;
95 		return (0);
96 	}
97 	/*
98 	 * Copyout averunnable structure.
99 	 */
100 	averunnable.fscale = FSCALE;
101 	if (error = copyout((caddr_t)&averunnable, where, sizeof(averunnable)))
102 		return (error);
103 	*acopysize = sizeof(averunnable);
104 	return (0);
105 }
106 
107 /*
108  * Calculate and return vmtotals structure.
109  */
110 int
111 kinfo_meter(op, where, acopysize, arg, aneeded)
112 	int op;
113 	caddr_t where;
114 	int *acopysize, arg, *aneeded;
115 {
116 	struct vmtotal vmtotals;
117 	int error;
118 
119 	*aneeded = sizeof(struct vmtotal);
120 	if (where == NULL)
121 		return (0);
122 	if (*acopysize < sizeof(struct vmtotal))
123 		return (EINVAL);
124 	vmtotal(&vmtotals);
125 	if (error = copyout((caddr_t)&vmtotals, where, sizeof(struct vmtotal)))
126 		return (error);
127 	*acopysize = sizeof(struct vmtotal);
128 	return (0);
129 }
130 
131 /*
132  * Calculate the current state of the system.
133  * Done on demand from getkerninfo().
134  */
135 void
136 vmtotal(totalp)
137 	register struct vmtotal *totalp;
138 {
139 	register struct proc *p;
140 	register vm_map_entry_t	entry;
141 	register vm_object_t object;
142 	register vm_map_t map;
143 	int paging;
144 
145 	bzero(totalp, sizeof *totalp);
146 	/*
147 	 * Mark all objects as inactive.
148 	 */
149 	simple_lock(&vm_object_list_lock);
150 	object = (vm_object_t) queue_first(&vm_object_list);
151 	while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
152 		object->flags &= ~OBJ_ACTIVE;
153 		object = (vm_object_t) queue_next(&object->object_list);
154 	}
155 	simple_unlock(&vm_object_list_lock);
156 	/*
157 	 * Calculate process statistics.
158 	 */
159 	for (p = (struct proc *)allproc; p != NULL; p = p->p_nxt) {
160 		if (p->p_flag & SSYS)
161 			continue;
162 		switch (p->p_stat) {
163 		case 0:
164 			continue;
165 
166 		case SSLEEP:
167 		case SSTOP:
168 			if (p->p_flag & SLOAD) {
169 				if (p->p_pri <= PZERO)
170 					totalp->t_dw++;
171 				else if (p->p_slptime < maxslp)
172 					totalp->t_sl++;
173 			} else if (p->p_slptime < maxslp)
174 				totalp->t_sw++;
175 			if (p->p_slptime >= maxslp)
176 				continue;
177 			break;
178 
179 		case SRUN:
180 		case SIDL:
181 			if (p->p_flag & SLOAD)
182 				totalp->t_rq++;
183 			else
184 				totalp->t_sw++;
185 			if (p->p_stat == SIDL)
186 				continue;
187 			break;
188 		}
189 		/*
190 		 * Note active objects.
191 		 */
192 		paging = 0;
193 		for (map = &p->p_vmspace->vm_map, entry = map->header.next;
194 		     entry != &map->header; entry = entry->next) {
195 			if (entry->is_a_map || entry->is_sub_map ||
196 			    entry->object.vm_object == NULL)
197 				continue;
198 			entry->object.vm_object->flags |= OBJ_ACTIVE;
199 			paging |= entry->object.vm_object->paging_in_progress;
200 		}
201 		if (paging)
202 			totalp->t_pw++;
203 	}
204 	/*
205 	 * Calculate object memory usage statistics.
206 	 */
207 	simple_lock(&vm_object_list_lock);
208 	object = (vm_object_t) queue_first(&vm_object_list);
209 	while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
210 		totalp->t_vm += num_pages(object->size);
211 		totalp->t_rm += object->resident_page_count;
212 		if (object->flags & OBJ_ACTIVE) {
213 			totalp->t_avm += num_pages(object->size);
214 			totalp->t_arm += object->resident_page_count;
215 		}
216 		if (object->ref_count > 1) {
217 			/* shared object */
218 			totalp->t_vmshr += num_pages(object->size);
219 			totalp->t_rmshr += object->resident_page_count;
220 			if (object->flags & OBJ_ACTIVE) {
221 				totalp->t_avmshr += num_pages(object->size);
222 				totalp->t_armshr += object->resident_page_count;
223 			}
224 		}
225 		object = (vm_object_t) queue_next(&object->object_list);
226 	}
227 	totalp->t_free = cnt.v_free_count;
228 }
229