xref: /openbsd/sys/dev/kstat.c (revision e6b4f1cd)
1 /* $OpenBSD: kstat.c,v 1.5 2025/01/18 12:31:49 mglocker Exp $ */
2 
3 /*
4  * Copyright (c) 2020 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/types.h>
22 #include <sys/malloc.h>
23 #include <sys/pool.h>
24 #include <sys/time.h>
25 
26 /* for kstat_set_cpu */
27 #include <sys/proc.h>
28 #include <sys/sched.h>
29 
30 #include <sys/kstat.h>
31 
32 RBT_HEAD(kstat_id_tree, kstat);
33 
34 static inline int
kstat_id_cmp(const struct kstat * a,const struct kstat * b)35 kstat_id_cmp(const struct kstat *a, const struct kstat *b)
36 {
37 	if (a->ks_id > b->ks_id)
38 		return (1);
39 	if (a->ks_id < b->ks_id)
40 		return (-1);
41 
42 	return (0);
43 }
44 
45 RBT_PROTOTYPE(kstat_id_tree, kstat, ks_id_entry, kstat_id_cmp);
46 
47 RBT_HEAD(kstat_pv_tree, kstat);
48 
49 static inline int
kstat_pv_cmp(const struct kstat * a,const struct kstat * b)50 kstat_pv_cmp(const struct kstat *a, const struct kstat *b)
51 {
52 	int rv;
53 
54 	rv = strcmp(a->ks_provider, b->ks_provider);
55 	if (rv != 0)
56 		return (rv);
57 
58 	if (a->ks_instance > b->ks_instance)
59 		return (1);
60 	if (a->ks_instance < b->ks_instance)
61 		return (-1);
62 
63 	rv = strcmp(a->ks_name, b->ks_name);
64 	if (rv != 0)
65 		return (rv);
66 
67 	if (a->ks_unit > b->ks_unit)
68 		return (1);
69 	if (a->ks_unit < b->ks_unit)
70 		return (-1);
71 
72 	return (0);
73 }
74 
75 RBT_PROTOTYPE(kstat_pv_tree, kstat, ks_pv_entry, kstat_pv_cmp);
76 
77 RBT_HEAD(kstat_nm_tree, kstat);
78 
79 static inline int
kstat_nm_cmp(const struct kstat * a,const struct kstat * b)80 kstat_nm_cmp(const struct kstat *a, const struct kstat *b)
81 {
82 	int rv;
83 
84 	rv = strcmp(a->ks_name, b->ks_name);
85 	if (rv != 0)
86 		return (rv);
87 
88 	if (a->ks_unit > b->ks_unit)
89 		return (1);
90 	if (a->ks_unit < b->ks_unit)
91 		return (-1);
92 
93 	rv = strcmp(a->ks_provider, b->ks_provider);
94 	if (rv != 0)
95 		return (rv);
96 
97 	if (a->ks_instance > b->ks_instance)
98 		return (1);
99 	if (a->ks_instance < b->ks_instance)
100 		return (-1);
101 
102 	return (0);
103 }
104 
105 RBT_PROTOTYPE(kstat_nm_tree, kstat, ks_nm_entry, kstat_nm_cmp);
106 
107 struct kstat_lock_ops {
108 	void	(*enter)(void *);
109 	void	(*leave)(void *);
110 };
111 
112 #define kstat_enter(_ks) (_ks)->ks_lock_ops->enter((_ks)->ks_lock)
113 #define kstat_leave(_ks) (_ks)->ks_lock_ops->leave((_ks)->ks_lock)
114 
115 const struct kstat_lock_ops kstat_rlock_ops = {
116 	(void (*)(void *))rw_enter_read,
117 	(void (*)(void *))rw_exit_read,
118 };
119 
120 const struct kstat_lock_ops kstat_wlock_ops = {
121 	(void (*)(void *))rw_enter_write,
122 	(void (*)(void *))rw_exit_write,
123 };
124 
125 const struct kstat_lock_ops kstat_mutex_ops = {
126 	(void (*)(void *))mtx_enter,
127 	(void (*)(void *))mtx_leave,
128 };
129 
130 void kstat_cpu_enter(void *);
131 void kstat_cpu_leave(void *);
132 
133 const struct kstat_lock_ops kstat_cpu_ops = {
134 	kstat_cpu_enter,
135 	kstat_cpu_leave,
136 };
137 
138 struct rwlock		kstat_lock = RWLOCK_INITIALIZER("kstat");
139 
140 /*
141  * The global state is versioned so changes to the set of kstats
142  * can be detected. This is an int so it can be read atomically on
143  * any arch, which is a ridiculous optimisation, really.
144  */
145 unsigned int		kstat_version = 0;
146 
147 /*
148  * kstat structures have a unique identifier so they can be found
149  * quickly. Identifiers are 64bit in the hope that it won't wrap
150  * during the runtime of a system. The identifiers start at 1 so that
151  * 0 can be used as the first value for userland to iterate with.
152  */
153 uint64_t			kstat_next_id = 1;
154 
155 struct kstat_id_tree	kstat_id_tree = RBT_INITIALIZER();
156 struct kstat_pv_tree	kstat_pv_tree = RBT_INITIALIZER();
157 struct kstat_nm_tree	kstat_nm_tree = RBT_INITIALIZER();
158 struct pool		kstat_pool;
159 
160 struct rwlock		kstat_default_lock = RWLOCK_INITIALIZER("kstatlk");
161 
162 int	kstat_read(struct kstat *);
163 int	kstat_copy(struct kstat *, void *);
164 
165 int
kstatattach(int num)166 kstatattach(int num)
167 {
168 	/* XXX install system stats here */
169 	return (0);
170 }
171 
172 int
kstatopen(dev_t dev,int flag,int mode,struct proc * p)173 kstatopen(dev_t dev, int flag, int mode, struct proc *p)
174 {
175 	return (0);
176 }
177 
178 int
kstatclose(dev_t dev,int flag,int mode,struct proc * p)179 kstatclose(dev_t dev, int flag, int mode, struct proc *p)
180 {
181 	return (0);
182 }
183 
184 int
kstatioc_enter(struct kstat_req * ksreq)185 kstatioc_enter(struct kstat_req *ksreq)
186 {
187 	int error;
188 
189 	error = rw_enter(&kstat_lock, RW_READ | RW_INTR);
190 	if (error != 0)
191 		return (error);
192 
193 	if (!ISSET(ksreq->ks_rflags, KSTATIOC_F_IGNVER) &&
194 	    ksreq->ks_version != kstat_version) {
195 		error = EINVAL;
196 		goto error;
197 	}
198 
199 	return (0);
200 
201 error:
202 	rw_exit(&kstat_lock);
203 	return (error);
204 }
205 
206 int
kstatioc_leave(struct kstat_req * ksreq,struct kstat * ks)207 kstatioc_leave(struct kstat_req *ksreq, struct kstat *ks)
208 {
209 	void *buf = NULL;
210 	size_t klen = 0, ulen = 0;
211 	struct timespec updated;
212 	int error = 0;
213 
214 	if (ks == NULL) {
215 		error = ENOENT;
216 		goto error;
217 	}
218 
219 	switch (ks->ks_state) {
220 	case KSTAT_S_CREATED:
221 		ksreq->ks_updated = ks->ks_created;
222 		ksreq->ks_interval.tv_sec = 0;
223 		ksreq->ks_interval.tv_nsec = 0;
224 		ksreq->ks_datalen = 0;
225 		ksreq->ks_dataver = 0;
226 		break;
227 
228 	case KSTAT_S_INSTALLED:
229 		ksreq->ks_dataver = ks->ks_dataver;
230 		ksreq->ks_interval = ks->ks_interval;
231 
232 		if (ksreq->ks_data == NULL) {
233 			/* userland doesn't want actual data, so shortcut */
234 			kstat_enter(ks);
235 			ksreq->ks_datalen = ks->ks_datalen;
236 			ksreq->ks_updated = ks->ks_updated;
237 			kstat_leave(ks);
238 			break;
239 		}
240 
241 		klen = ks->ks_datalen; /* KSTAT_F_REALLOC */
242 		buf = malloc(klen, M_TEMP, M_WAITOK|M_CANFAIL);
243 		if (buf == NULL) {
244 			error = ENOMEM;
245 			goto error;
246 		}
247 
248 		kstat_enter(ks);
249 		error = (*ks->ks_read)(ks);
250 		if (error == 0) {
251 			updated = ks->ks_updated;
252 
253 			/* KSTAT_F_REALLOC */
254 			KASSERTMSG(ks->ks_datalen == klen,
255 			    "kstat doesn't support resized data yet");
256 
257 			error = (*ks->ks_copy)(ks, buf);
258 		}
259 		kstat_leave(ks);
260 
261 		if (error != 0)
262 			goto error;
263 
264 		ulen = ksreq->ks_datalen;
265 		ksreq->ks_datalen = klen; /* KSTAT_F_REALLOC */
266 		ksreq->ks_updated = updated;
267 		break;
268 	default:
269 		panic("ks %p unexpected state %u", ks, ks->ks_state);
270 	}
271 
272 	ksreq->ks_version = kstat_version;
273 	ksreq->ks_id = ks->ks_id;
274 
275 	if (strlcpy(ksreq->ks_provider, ks->ks_provider,
276 	    sizeof(ksreq->ks_provider)) >= sizeof(ksreq->ks_provider))
277 		panic("kstat %p provider string has grown", ks);
278 	ksreq->ks_instance = ks->ks_instance;
279 	if (strlcpy(ksreq->ks_name, ks->ks_name,
280 	    sizeof(ksreq->ks_name)) >= sizeof(ksreq->ks_name))
281 		panic("kstat %p name string has grown", ks);
282 	ksreq->ks_unit = ks->ks_unit;
283 
284 	ksreq->ks_created = ks->ks_created;
285 	ksreq->ks_type = ks->ks_type;
286 	ksreq->ks_state = ks->ks_state;
287 
288 error:
289 	rw_exit(&kstat_lock);
290 
291 	if (buf != NULL) {
292 		if (error == 0)
293 			error = copyout(buf, ksreq->ks_data, min(klen, ulen));
294 
295 		free(buf, M_TEMP, klen);
296 	}
297 
298 	return (error);
299 }
300 
301 int
kstatioc_find_id(struct kstat_req * ksreq)302 kstatioc_find_id(struct kstat_req *ksreq)
303 {
304 	struct kstat *ks, key;
305 	int error;
306 
307 	error = kstatioc_enter(ksreq);
308 	if (error != 0)
309 		return (error);
310 
311 	key.ks_id = ksreq->ks_id;
312 
313 	ks = RBT_FIND(kstat_id_tree, &kstat_id_tree, &key);
314 
315 	return (kstatioc_leave(ksreq, ks));
316 }
317 
318 int
kstatioc_nfind_id(struct kstat_req * ksreq)319 kstatioc_nfind_id(struct kstat_req *ksreq)
320 {
321 	struct kstat *ks, key;
322 	int error;
323 
324 	error = kstatioc_enter(ksreq);
325 	if (error != 0)
326 		return (error);
327 
328 	key.ks_id = ksreq->ks_id;
329 
330 	ks = RBT_NFIND(kstat_id_tree, &kstat_id_tree, &key);
331 
332 	return (kstatioc_leave(ksreq, ks));
333 }
334 
335 int
kstatioc_find_pv(struct kstat_req * ksreq)336 kstatioc_find_pv(struct kstat_req *ksreq)
337 {
338 	struct kstat *ks, key;
339 	int error;
340 
341 	error = kstatioc_enter(ksreq);
342 	if (error != 0)
343 		return (error);
344 
345 	key.ks_provider = ksreq->ks_provider;
346 	key.ks_instance = ksreq->ks_instance;
347 	key.ks_name = ksreq->ks_name;
348 	key.ks_unit = ksreq->ks_unit;
349 
350 	ks = RBT_FIND(kstat_pv_tree, &kstat_pv_tree, &key);
351 
352 	return (kstatioc_leave(ksreq, ks));
353 }
354 
355 int
kstatioc_nfind_pv(struct kstat_req * ksreq)356 kstatioc_nfind_pv(struct kstat_req *ksreq)
357 {
358 	struct kstat *ks, key;
359 	int error;
360 
361 	error = kstatioc_enter(ksreq);
362 	if (error != 0)
363 		return (error);
364 
365 	key.ks_provider = ksreq->ks_provider;
366 	key.ks_instance = ksreq->ks_instance;
367 	key.ks_name = ksreq->ks_name;
368 	key.ks_unit = ksreq->ks_unit;
369 
370 	ks = RBT_NFIND(kstat_pv_tree, &kstat_pv_tree, &key);
371 
372 	return (kstatioc_leave(ksreq, ks));
373 }
374 
375 int
kstatioc_find_nm(struct kstat_req * ksreq)376 kstatioc_find_nm(struct kstat_req *ksreq)
377 {
378 	struct kstat *ks, key;
379 	int error;
380 
381 	error = kstatioc_enter(ksreq);
382 	if (error != 0)
383 		return (error);
384 
385 	key.ks_name = ksreq->ks_name;
386 	key.ks_unit = ksreq->ks_unit;
387 	key.ks_provider = ksreq->ks_provider;
388 	key.ks_instance = ksreq->ks_instance;
389 
390 	ks = RBT_FIND(kstat_nm_tree, &kstat_nm_tree, &key);
391 
392 	return (kstatioc_leave(ksreq, ks));
393 }
394 
395 int
kstatioc_nfind_nm(struct kstat_req * ksreq)396 kstatioc_nfind_nm(struct kstat_req *ksreq)
397 {
398 	struct kstat *ks, key;
399 	int error;
400 
401 	error = kstatioc_enter(ksreq);
402 	if (error != 0)
403 		return (error);
404 
405 	key.ks_name = ksreq->ks_name;
406 	key.ks_unit = ksreq->ks_unit;
407 	key.ks_provider = ksreq->ks_provider;
408 	key.ks_instance = ksreq->ks_instance;
409 
410 	ks = RBT_NFIND(kstat_nm_tree, &kstat_nm_tree, &key);
411 
412 	return (kstatioc_leave(ksreq, ks));
413 }
414 
415 int
kstatioctl(dev_t dev,u_long cmd,caddr_t data,int flag,struct proc * p)416 kstatioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
417 {
418 	struct kstat_req *ksreq = (struct kstat_req *)data;
419 	int error = 0;
420 
421 	KERNEL_UNLOCK();
422 
423 	switch (cmd) {
424 	case KSTATIOC_VERSION:
425 		*(unsigned int *)data = kstat_version;
426 		break;
427 
428 	case KSTATIOC_FIND_ID:
429 		error = kstatioc_find_id(ksreq);
430 		break;
431 	case KSTATIOC_NFIND_ID:
432 		error = kstatioc_nfind_id(ksreq);
433 		break;
434 	case KSTATIOC_FIND_PROVIDER:
435 		error = kstatioc_find_pv(ksreq);
436 		break;
437 	case KSTATIOC_NFIND_PROVIDER:
438 		error = kstatioc_nfind_pv(ksreq);
439 		break;
440 	case KSTATIOC_FIND_NAME:
441 		error = kstatioc_find_nm(ksreq);
442 		break;
443 	case KSTATIOC_NFIND_NAME:
444 		error = kstatioc_nfind_nm(ksreq);
445 		break;
446 
447 	default:
448 		error = ENOTTY;
449 		break;
450 	}
451 
452 	KERNEL_LOCK();
453 
454 	return (error);
455 }
456 
457 void
kstat_init(void)458 kstat_init(void)
459 {
460 	static int initialized = 0;
461 
462 	if (initialized)
463 		return;
464 
465 	pool_init(&kstat_pool, sizeof(struct kstat), 0, IPL_NONE,
466 	    PR_WAITOK | PR_RWLOCK, "kstatmem", NULL);
467 
468 	initialized = 1;
469 }
470 
471 int
kstat_strcheck(const char * str)472 kstat_strcheck(const char *str)
473 {
474 	size_t i, l;
475 
476 	l = strlen(str);
477 	if (l == 0 || l >= KSTAT_STRLEN)
478 		return (-1);
479 	for (i = 0; i < l; i++) {
480 		int ch = str[i];
481 		if (ch >= 'a' && ch <= 'z')
482 			continue;
483 		if (ch >= 'A' && ch <= 'Z')
484 			continue;
485 		if (ch >= '0' && ch <= '9')
486 			continue;
487 		switch (ch) {
488 		case '-':
489 		case '_':
490 		case '.':
491 			break;
492 		default:
493 			return (-1);
494 		}
495 	}
496 
497 	return (0);
498 }
499 
500 struct kstat *
kstat_create(const char * provider,unsigned int instance,const char * name,unsigned int unit,unsigned int type,unsigned int flags)501 kstat_create(const char *provider, unsigned int instance,
502     const char *name, unsigned int unit,
503     unsigned int type, unsigned int flags)
504 {
505 	struct kstat *ks, *oks;
506 
507 	if (kstat_strcheck(provider) == -1)
508 		panic("invalid provider string");
509 	if (kstat_strcheck(name) == -1)
510 		panic("invalid name string");
511 
512 	kstat_init();
513 
514 	ks = pool_get(&kstat_pool, PR_WAITOK|PR_ZERO);
515 
516 	ks->ks_provider = provider;
517 	ks->ks_instance = instance;
518 	ks->ks_name = name;
519 	ks->ks_unit = unit;
520 	ks->ks_flags = flags;
521 	ks->ks_type = type;
522 	ks->ks_state = KSTAT_S_CREATED;
523 
524 	getnanouptime(&ks->ks_created);
525 	ks->ks_updated = ks->ks_created;
526 
527 	ks->ks_lock = &kstat_default_lock;
528 	ks->ks_lock_ops = &kstat_wlock_ops;
529 	ks->ks_read = kstat_read;
530 	ks->ks_copy = kstat_copy;
531 
532 	rw_enter_write(&kstat_lock);
533 	ks->ks_id = kstat_next_id;
534 
535 	oks = RBT_INSERT(kstat_pv_tree, &kstat_pv_tree, ks);
536 	if (oks == NULL) {
537 		/* commit */
538 		kstat_next_id++;
539 		kstat_version++;
540 
541 		oks = RBT_INSERT(kstat_nm_tree, &kstat_nm_tree, ks);
542 		if (oks != NULL)
543 			panic("kstat name collision! (%llu)", ks->ks_id);
544 
545 		oks = RBT_INSERT(kstat_id_tree, &kstat_id_tree, ks);
546 		if (oks != NULL)
547 			panic("kstat id collision! (%llu)", ks->ks_id);
548 	}
549 	rw_exit_write(&kstat_lock);
550 
551 	if (oks != NULL) {
552 		pool_put(&kstat_pool, ks);
553 		return (NULL);
554 	}
555 
556 	return (ks);
557 }
558 
559 void
kstat_set_rlock(struct kstat * ks,struct rwlock * rwl)560 kstat_set_rlock(struct kstat *ks, struct rwlock *rwl)
561 {
562 	KASSERT(ks->ks_state == KSTAT_S_CREATED);
563 
564 	ks->ks_lock = rwl;
565 	ks->ks_lock_ops = &kstat_rlock_ops;
566 }
567 
568 void
kstat_set_wlock(struct kstat * ks,struct rwlock * rwl)569 kstat_set_wlock(struct kstat *ks, struct rwlock *rwl)
570 {
571 	KASSERT(ks->ks_state == KSTAT_S_CREATED);
572 
573 	ks->ks_lock = rwl;
574 	ks->ks_lock_ops = &kstat_wlock_ops;
575 }
576 
577 void
kstat_set_mutex(struct kstat * ks,struct mutex * mtx)578 kstat_set_mutex(struct kstat *ks, struct mutex *mtx)
579 {
580 	KASSERT(ks->ks_state == KSTAT_S_CREATED);
581 
582 	ks->ks_lock = mtx;
583 	ks->ks_lock_ops = &kstat_mutex_ops;
584 }
585 
586 void
kstat_cpu_enter(void * p)587 kstat_cpu_enter(void *p)
588 {
589 	struct cpu_info *ci = p;
590 	sched_peg_curproc(ci);
591 }
592 
593 void
kstat_cpu_leave(void * p)594 kstat_cpu_leave(void *p)
595 {
596 	sched_unpeg_curproc();
597 }
598 
599 void
kstat_set_cpu(struct kstat * ks,struct cpu_info * ci)600 kstat_set_cpu(struct kstat *ks, struct cpu_info *ci)
601 {
602 	KASSERT(ks->ks_state == KSTAT_S_CREATED);
603 
604 	ks->ks_lock = ci;
605 	ks->ks_lock_ops = &kstat_cpu_ops;
606 }
607 
608 int
kstat_read_nop(struct kstat * ks)609 kstat_read_nop(struct kstat *ks)
610 {
611 	return (0);
612 }
613 
614 void
kstat_install(struct kstat * ks)615 kstat_install(struct kstat *ks)
616 {
617 	if (!ISSET(ks->ks_flags, KSTAT_F_REALLOC)) {
618 		KASSERTMSG(ks->ks_copy != NULL || ks->ks_data != NULL,
619 		    "kstat %p %s:%u:%s:%u must provide ks_copy or ks_data", ks,
620 		    ks->ks_provider, ks->ks_instance, ks->ks_name, ks->ks_unit);
621 		KASSERT(ks->ks_datalen > 0);
622 	}
623 
624 	rw_enter_write(&kstat_lock);
625 	ks->ks_state = KSTAT_S_INSTALLED;
626 	rw_exit_write(&kstat_lock);
627 }
628 
629 void
kstat_remove(struct kstat * ks)630 kstat_remove(struct kstat *ks)
631 {
632 	rw_enter_write(&kstat_lock);
633 	KASSERTMSG(ks->ks_state == KSTAT_S_INSTALLED,
634 	    "kstat %p %s:%u:%s:%u is not installed", ks,
635 	    ks->ks_provider, ks->ks_instance, ks->ks_name, ks->ks_unit);
636 
637 	ks->ks_state = KSTAT_S_CREATED;
638 	rw_exit_write(&kstat_lock);
639 }
640 
641 void
kstat_destroy(struct kstat * ks)642 kstat_destroy(struct kstat *ks)
643 {
644 	rw_enter_write(&kstat_lock);
645 	RBT_REMOVE(kstat_id_tree, &kstat_id_tree, ks);
646 	RBT_REMOVE(kstat_pv_tree, &kstat_pv_tree, ks);
647 	RBT_REMOVE(kstat_nm_tree, &kstat_nm_tree, ks);
648 	kstat_version++;
649 	rw_exit_write(&kstat_lock);
650 
651 	pool_put(&kstat_pool, ks);
652 }
653 
654 int
kstat_read(struct kstat * ks)655 kstat_read(struct kstat *ks)
656 {
657 	getnanouptime(&ks->ks_updated);
658 	return (0);
659 }
660 
661 int
kstat_copy(struct kstat * ks,void * buf)662 kstat_copy(struct kstat *ks, void *buf)
663 {
664 	memcpy(buf, ks->ks_data, ks->ks_datalen);
665 	return (0);
666 }
667 
668 RBT_GENERATE(kstat_id_tree, kstat, ks_id_entry, kstat_id_cmp);
669 RBT_GENERATE(kstat_pv_tree, kstat, ks_pv_entry, kstat_pv_cmp);
670 RBT_GENERATE(kstat_nm_tree, kstat, ks_nm_entry, kstat_nm_cmp);
671 
672 void
kstat_kv_init(struct kstat_kv * kv,const char * name,enum kstat_kv_type type)673 kstat_kv_init(struct kstat_kv *kv, const char *name, enum kstat_kv_type type)
674 {
675 	memset(kv, 0, sizeof(*kv));
676 	strlcpy(kv->kv_key, name, sizeof(kv->kv_key)); /* XXX truncated? */
677 	kv->kv_type = type;
678 	kv->kv_unit = KSTAT_KV_U_NONE;
679 }
680 
681 void
kstat_kv_unit_init(struct kstat_kv * kv,const char * name,enum kstat_kv_type type,enum kstat_kv_unit unit)682 kstat_kv_unit_init(struct kstat_kv *kv, const char *name,
683     enum kstat_kv_type type, enum kstat_kv_unit unit)
684 {
685 	switch (type) {
686 	case KSTAT_KV_T_COUNTER64:
687 	case KSTAT_KV_T_COUNTER32:
688 	case KSTAT_KV_T_COUNTER16:
689 	case KSTAT_KV_T_UINT64:
690 	case KSTAT_KV_T_INT64:
691 	case KSTAT_KV_T_UINT32:
692 	case KSTAT_KV_T_INT32:
693 	case KSTAT_KV_T_UINT16:
694 	case KSTAT_KV_T_INT16:
695 		break;
696 	default:
697 		panic("kv unit init %s: unit for non-integer type", name);
698 	}
699 
700 	memset(kv, 0, sizeof(*kv));
701 	strlcpy(kv->kv_key, name, sizeof(kv->kv_key)); /* XXX truncated? */
702 	kv->kv_type = type;
703 	kv->kv_unit = unit;
704 }
705