xref: /freebsd/lib/libmemstat/memstat.c (revision 7bd6fde3)
1 /*-
2  * Copyright (c) 2005 Robert N. M. Watson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/param.h>
30 #include <sys/sysctl.h>
31 
32 #include <err.h>
33 #include <errno.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 
38 #include "memstat.h"
39 #include "memstat_internal.h"
40 
41 const char *
42 memstat_strerror(int error)
43 {
44 
45 	switch (error) {
46 	case MEMSTAT_ERROR_NOMEMORY:
47 		return ("Cannot allocate memory");
48 	case MEMSTAT_ERROR_VERSION:
49 		return ("Version mismatch");
50 	case MEMSTAT_ERROR_PERMISSION:
51 		return ("Permission denied");
52 	case MEMSTAT_ERROR_TOOMANYCPUS:
53 		return ("Too many CPUs");
54 	case MEMSTAT_ERROR_DATAERROR:
55 		return ("Data format error");
56 	case MEMSTAT_ERROR_KVM:
57 		return ("KVM error");
58 	case MEMSTAT_ERROR_KVM_NOSYMBOL:
59 		return ("KVM unable to find symbol");
60 	case MEMSTAT_ERROR_KVM_SHORTREAD:
61 		return ("KVM short read");
62 	case MEMSTAT_ERROR_UNDEFINED:
63 	default:
64 		return ("Unknown error");
65 	}
66 }
67 
68 struct memory_type_list *
69 memstat_mtl_alloc(void)
70 {
71 	struct memory_type_list *mtlp;
72 
73 	mtlp = malloc(sizeof(*mtlp));
74 	if (mtlp == NULL)
75 		return (NULL);
76 
77 	LIST_INIT(&mtlp->mtl_list);
78 	mtlp->mtl_error = MEMSTAT_ERROR_UNDEFINED;
79 	return (mtlp);
80 }
81 
82 struct memory_type *
83 memstat_mtl_first(struct memory_type_list *list)
84 {
85 
86 	return (LIST_FIRST(&list->mtl_list));
87 }
88 
89 struct memory_type *
90 memstat_mtl_next(struct memory_type *mtp)
91 {
92 
93 	return (LIST_NEXT(mtp, mt_list));
94 }
95 
96 void
97 _memstat_mtl_empty(struct memory_type_list *list)
98 {
99 	struct memory_type *mtp;
100 
101 	while ((mtp = LIST_FIRST(&list->mtl_list))) {
102 		LIST_REMOVE(mtp, mt_list);
103 		free(mtp);
104 	}
105 }
106 
107 void
108 memstat_mtl_free(struct memory_type_list *list)
109 {
110 
111 	_memstat_mtl_empty(list);
112 	free(list);
113 }
114 
115 int
116 memstat_mtl_geterror(struct memory_type_list *list)
117 {
118 
119 	return (list->mtl_error);
120 }
121 
122 /*
123  * Look for an existing memory_type entry in a memory_type list, based on the
124  * allocator and name of the type.  If not found, return NULL.  No errno or
125  * memstat error.
126  */
127 struct memory_type *
128 memstat_mtl_find(struct memory_type_list *list, int allocator,
129     const char *name)
130 {
131 	struct memory_type *mtp;
132 
133 	LIST_FOREACH(mtp, &list->mtl_list, mt_list) {
134 		if ((mtp->mt_allocator == allocator ||
135 		    allocator == ALLOCATOR_ANY) &&
136 		    strcmp(mtp->mt_name, name) == 0)
137 			return (mtp);
138 	}
139 	return (NULL);
140 }
141 
142 /*
143  * Allocate a new memory_type with the specificed allocator type and name,
144  * then insert into the list.  The structure will be zero'd.
145  *
146  * libmemstat(3) internal function.
147  */
148 struct memory_type *
149 _memstat_mt_allocate(struct memory_type_list *list, int allocator,
150     const char *name)
151 {
152 	struct memory_type *mtp;
153 
154 	mtp = malloc(sizeof(*mtp));
155 	if (mtp == NULL)
156 		return (NULL);
157 
158 	bzero(mtp, sizeof(*mtp));
159 
160 	mtp->mt_allocator = allocator;
161 	strlcpy(mtp->mt_name, name, MEMTYPE_MAXNAME);
162 	LIST_INSERT_HEAD(&list->mtl_list, mtp, mt_list);
163 	return (mtp);
164 }
165 
166 /*
167  * Reset any libmemstat(3)-owned statistics in a memory_type record so that
168  * it can be reused without incremental addition problems.  Caller-owned
169  * memory is left "as-is", and must be updated by the caller if desired.
170  *
171  * libmemstat(3) internal function.
172  */
173 void
174 _memstat_mt_reset_stats(struct memory_type *mtp)
175 {
176 	int i;
177 
178 	mtp->mt_countlimit = 0;
179 	mtp->mt_byteslimit = 0;
180 	mtp->mt_sizemask = 0;
181 	mtp->mt_size = 0;
182 
183 	mtp->mt_memalloced = 0;
184 	mtp->mt_memfreed = 0;
185 	mtp->mt_numallocs = 0;
186 	mtp->mt_numfrees = 0;
187 	mtp->mt_bytes = 0;
188 	mtp->mt_count = 0;
189 	mtp->mt_free = 0;
190 	mtp->mt_failures = 0;
191 
192 	mtp->mt_zonefree = 0;
193 	mtp->mt_kegfree = 0;
194 
195 	for (i = 0; i < MEMSTAT_MAXCPU; i++) {
196 		mtp->mt_percpu_alloc[i].mtp_memalloced = 0;
197 		mtp->mt_percpu_alloc[i].mtp_memfreed = 0;
198 		mtp->mt_percpu_alloc[i].mtp_numallocs = 0;
199 		mtp->mt_percpu_alloc[i].mtp_numfrees = 0;
200 		mtp->mt_percpu_alloc[i].mtp_sizemask = 0;
201 		mtp->mt_percpu_cache[i].mtp_free = 0;
202 	}
203 }
204 
205 /*
206  * Accessor methods for struct memory_type.  Avoids encoding the structure
207  * ABI into the application.
208  */
209 const char *
210 memstat_get_name(const struct memory_type *mtp)
211 {
212 
213 	return (mtp->mt_name);
214 }
215 
216 int
217 memstat_get_allocator(const struct memory_type *mtp)
218 {
219 
220 	return (mtp->mt_allocator);
221 }
222 
223 uint64_t
224 memstat_get_countlimit(const struct memory_type *mtp)
225 {
226 
227 	return (mtp->mt_countlimit);
228 }
229 
230 uint64_t
231 memstat_get_byteslimit(const struct memory_type *mtp)
232 {
233 
234 	return (mtp->mt_byteslimit);
235 }
236 
237 uint64_t
238 memstat_get_sizemask(const struct memory_type *mtp)
239 {
240 
241 	return (mtp->mt_sizemask);
242 }
243 
244 uint64_t
245 memstat_get_size(const struct memory_type *mtp)
246 {
247 
248 	return (mtp->mt_size);
249 }
250 
251 uint64_t
252 memstat_get_memalloced(const struct memory_type *mtp)
253 {
254 
255 	return (mtp->mt_memalloced);
256 }
257 
258 uint64_t
259 memstat_get_memfreed(const struct memory_type *mtp)
260 {
261 
262 	return (mtp->mt_memfreed);
263 }
264 
265 uint64_t
266 memstat_get_numallocs(const struct memory_type *mtp)
267 {
268 
269 	return (mtp->mt_numallocs);
270 }
271 
272 uint64_t
273 memstat_get_numfrees(const struct memory_type *mtp)
274 {
275 
276 	return (mtp->mt_numfrees);
277 }
278 
279 uint64_t
280 memstat_get_bytes(const struct memory_type *mtp)
281 {
282 
283 	return (mtp->mt_bytes);
284 }
285 
286 uint64_t
287 memstat_get_count(const struct memory_type *mtp)
288 {
289 
290 	return (mtp->mt_count);
291 }
292 
293 uint64_t
294 memstat_get_free(const struct memory_type *mtp)
295 {
296 
297 	return (mtp->mt_free);
298 }
299 
300 uint64_t
301 memstat_get_failures(const struct memory_type *mtp)
302 {
303 
304 	return (mtp->mt_failures);
305 }
306 
307 void *
308 memstat_get_caller_pointer(const struct memory_type *mtp, int index)
309 {
310 
311 	return (mtp->mt_caller_pointer[index]);
312 }
313 
314 void
315 memstat_set_caller_pointer(struct memory_type *mtp, int index, void *value)
316 {
317 
318 	mtp->mt_caller_pointer[index] = value;
319 }
320 
321 uint64_t
322 memstat_get_caller_uint64(const struct memory_type *mtp, int index)
323 {
324 
325 	return (mtp->mt_caller_uint64[index]);
326 }
327 
328 void
329 memstat_set_caller_uint64(struct memory_type *mtp, int index, uint64_t value)
330 {
331 
332 	mtp->mt_caller_uint64[index] = value;
333 }
334 
335 uint64_t
336 memstat_get_zonefree(const struct memory_type *mtp)
337 {
338 
339 	return (mtp->mt_zonefree);
340 }
341 
342 uint64_t
343 memstat_get_kegfree(const struct memory_type *mtp)
344 {
345 
346 	return (mtp->mt_kegfree);
347 }
348 
349 uint64_t
350 memstat_get_percpu_memalloced(const struct memory_type *mtp, int cpu)
351 {
352 
353 	return (mtp->mt_percpu_alloc[cpu].mtp_memalloced);
354 }
355 
356 uint64_t
357 memstat_get_percpu_memfreed(const struct memory_type *mtp, int cpu)
358 {
359 
360 	return (mtp->mt_percpu_alloc[cpu].mtp_memfreed);
361 }
362 
363 uint64_t
364 memstat_get_percpu_numallocs(const struct memory_type *mtp, int cpu)
365 {
366 
367 	return (mtp->mt_percpu_alloc[cpu].mtp_numallocs);
368 }
369 
370 uint64_t
371 memstat_get_percpu_numfrees(const struct memory_type *mtp, int cpu)
372 {
373 
374 	return (mtp->mt_percpu_alloc[cpu].mtp_numfrees);
375 }
376 
377 uint64_t
378 memstat_get_percpu_sizemask(const struct memory_type *mtp, int cpu)
379 {
380 
381 	return (mtp->mt_percpu_alloc[cpu].mtp_sizemask);
382 }
383 
384 void *
385 memstat_get_percpu_caller_pointer(const struct memory_type *mtp, int cpu,
386     int index)
387 {
388 
389 	return (mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index]);
390 }
391 
392 void
393 memstat_set_percpu_caller_pointer(struct memory_type *mtp, int cpu,
394     int index, void *value)
395 {
396 
397 	mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index] = value;
398 }
399 
400 uint64_t
401 memstat_get_percpu_caller_uint64(const struct memory_type *mtp, int cpu,
402     int index)
403 {
404 
405 	return (mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index]);
406 }
407 
408 void
409 memstat_set_percpu_caller_uint64(struct memory_type *mtp, int cpu, int index,
410     uint64_t value)
411 {
412 
413 	mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index] = value;
414 }
415 
416 uint64_t
417 memstat_get_percpu_free(const struct memory_type *mtp, int cpu)
418 {
419 
420 	return (mtp->mt_percpu_cache[cpu].mtp_free);
421 }
422