1 /* $OpenBSD: kstat.c,v 1.1 2020/07/06 03:56:51 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 2020 David Gwynne <dlg@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/types.h> 22 #include <sys/malloc.h> 23 #include <sys/pool.h> 24 #include <sys/time.h> 25 26 /* for kstat_set_cpu */ 27 #include <sys/proc.h> 28 #include <sys/sched.h> 29 30 #include <sys/kstat.h> 31 32 RBT_HEAD(kstat_id_tree, kstat); 33 34 static inline int 35 kstat_id_cmp(const struct kstat *a, const struct kstat *b) 36 { 37 if (a->ks_id > b->ks_id) 38 return (1); 39 if (a->ks_id < b->ks_id) 40 return (-1); 41 42 return (0); 43 } 44 45 RBT_PROTOTYPE(kstat_id_tree, kstat, ks_id_entry, kstat_id_cmp); 46 47 RBT_HEAD(kstat_pv_tree, kstat); 48 49 static inline int 50 kstat_pv_cmp(const struct kstat *a, const struct kstat *b) 51 { 52 int rv; 53 54 rv = strcmp(a->ks_provider, b->ks_provider); 55 if (rv != 0) 56 return (rv); 57 58 if (a->ks_instance > b->ks_instance) 59 return (1); 60 if (a->ks_instance < b->ks_instance) 61 return (-1); 62 63 rv = strcmp(a->ks_name, b->ks_name); 64 if (rv != 0) 65 return (rv); 66 67 if (a->ks_unit > b->ks_unit) 68 return (1); 69 if (a->ks_unit < b->ks_unit) 70 return (-1); 71 72 return (0); 73 } 74 75 RBT_PROTOTYPE(kstat_pv_tree, kstat, ks_pv_entry, kstat_pv_cmp); 76 77 RBT_HEAD(kstat_nm_tree, kstat); 78 79 static inline int 80 kstat_nm_cmp(const struct kstat *a, const struct kstat *b) 81 { 82 int rv; 83 84 rv = strcmp(a->ks_name, b->ks_name); 85 if (rv != 0) 86 return (rv); 87 88 if (a->ks_unit > b->ks_unit) 89 return (1); 90 if (a->ks_unit < b->ks_unit) 91 return (-1); 92 93 rv = strcmp(a->ks_provider, b->ks_provider); 94 if (rv != 0) 95 return (rv); 96 97 if (a->ks_instance > b->ks_instance) 98 return (1); 99 if (a->ks_instance < b->ks_instance) 100 return (-1); 101 102 return (0); 103 } 104 105 RBT_PROTOTYPE(kstat_nm_tree, kstat, ks_nm_entry, kstat_nm_cmp); 106 107 struct kstat_lock_ops { 108 void (*enter)(void *); 109 void (*leave)(void *); 110 }; 111 112 #define kstat_enter(_ks) (_ks)->ks_lock_ops->enter((_ks)->ks_lock) 113 #define kstat_leave(_ks) (_ks)->ks_lock_ops->leave((_ks)->ks_lock) 114 115 const struct kstat_lock_ops kstat_rlock_ops = { 116 (void (*)(void *))rw_enter_read, 117 (void (*)(void *))rw_exit_read, 118 }; 119 120 const struct kstat_lock_ops kstat_wlock_ops = { 121 (void (*)(void *))rw_enter_write, 122 (void (*)(void *))rw_exit_write, 123 }; 124 125 const struct kstat_lock_ops kstat_mutex_ops = { 126 (void (*)(void *))mtx_enter, 127 (void (*)(void *))mtx_leave, 128 }; 129 130 void kstat_cpu_enter(void *); 131 void kstat_cpu_leave(void *); 132 133 const struct kstat_lock_ops kstat_cpu_ops = { 134 kstat_cpu_enter, 135 kstat_cpu_leave, 136 }; 137 138 struct rwlock kstat_lock = RWLOCK_INITIALIZER("kstat"); 139 140 /* 141 * The global state is versioned so changes to the set of kstats 142 * can be detected. This is an int so it can be read atomically on 143 * any arch, which is a ridiculous optimisation, really. 144 */ 145 unsigned int kstat_version = 0; 146 147 /* 148 * kstat structures have a unique identifier so they can be found 149 * quickly. Identifiers are 64bit in the hope that it won't wrap 150 * during the runtime of a system. The identifiers start at 1 so that 151 * 0 can be used as the first value for userland to iterate with. 152 */ 153 uint64_t kstat_next_id = 1; 154 155 struct kstat_id_tree kstat_id_tree = RBT_INITIALIZER(); 156 struct kstat_pv_tree kstat_pv_tree = RBT_INITIALIZER(); 157 struct kstat_nm_tree kstat_nm_tree = RBT_INITIALIZER(); 158 struct pool kstat_pool; 159 160 struct rwlock kstat_default_lock = RWLOCK_INITIALIZER("kstatlk"); 161 162 int kstat_read(struct kstat *); 163 int kstat_copy(struct kstat *, void *); 164 165 int 166 kstatattach(int num) 167 { 168 /* XXX install system stats here */ 169 return (0); 170 } 171 172 int 173 kstatopen(dev_t dev, int flag, int mode, struct proc *p) 174 { 175 return (0); 176 } 177 178 int 179 kstatclose(dev_t dev, int flag, int mode, struct proc *p) 180 { 181 return (0); 182 } 183 184 int 185 kstatioc_enter(struct kstat_req *ksreq) 186 { 187 int error; 188 189 error = rw_enter(&kstat_lock, RW_READ | RW_INTR); 190 if (error != 0) 191 return (error); 192 193 if (!ISSET(ksreq->ks_rflags, KSTATIOC_F_IGNVER) && 194 ksreq->ks_version != kstat_version) { 195 error = EINVAL; 196 goto error; 197 } 198 199 return (0); 200 201 error: 202 rw_exit(&kstat_lock); 203 return (error); 204 } 205 206 int 207 kstatioc_leave(struct kstat_req *ksreq, struct kstat *ks) 208 { 209 void *buf = NULL; 210 size_t klen = 0, ulen = 0; 211 struct timespec updated; 212 int error = 0; 213 214 if (ks == NULL) { 215 error = ENOENT; 216 goto error; 217 } 218 219 switch (ks->ks_state) { 220 case KSTAT_S_CREATED: 221 ksreq->ks_updated = ks->ks_created; 222 ksreq->ks_interval.tv_sec = 0; 223 ksreq->ks_interval.tv_nsec = 0; 224 ksreq->ks_datalen = 0; 225 ksreq->ks_dataver = 0; 226 break; 227 228 case KSTAT_S_INSTALLED: 229 ksreq->ks_dataver = ks->ks_dataver; 230 ksreq->ks_interval = ks->ks_interval; 231 232 if (ksreq->ks_data == NULL) { 233 /* userland doesn't want actual data, so shortcut */ 234 kstat_enter(ks); 235 ksreq->ks_datalen = ks->ks_datalen; 236 ksreq->ks_updated = ks->ks_updated; 237 kstat_leave(ks); 238 break; 239 } 240 241 klen = ks->ks_datalen; /* KSTAT_F_REALLOC */ 242 buf = malloc(klen, M_TEMP, M_WAITOK|M_CANFAIL); 243 if (buf == NULL) { 244 error = ENOMEM; 245 goto error; 246 } 247 248 kstat_enter(ks); 249 error = (*ks->ks_read)(ks); 250 if (error == 0) { 251 updated = ks->ks_updated; 252 253 /* KSTAT_F_REALLOC */ 254 KASSERTMSG(ks->ks_datalen == klen, 255 "kstat doesnt support resized data yet"); 256 257 error = (*ks->ks_copy)(ks, buf); 258 } 259 kstat_leave(ks); 260 261 if (error != 0) 262 goto error; 263 264 ulen = ksreq->ks_datalen; 265 ksreq->ks_datalen = klen; /* KSTAT_F_REALLOC */ 266 ksreq->ks_updated = updated; 267 break; 268 default: 269 panic("ks %p unexpected state %u", ks, ks->ks_state); 270 } 271 272 ksreq->ks_version = kstat_version; 273 ksreq->ks_id = ks->ks_id; 274 275 if (strlcpy(ksreq->ks_provider, ks->ks_provider, 276 sizeof(ksreq->ks_provider)) >= sizeof(ksreq->ks_provider)) 277 panic("kstat %p provider string has grown", ks); 278 ksreq->ks_instance = ks->ks_instance; 279 if (strlcpy(ksreq->ks_name, ks->ks_name, 280 sizeof(ksreq->ks_name)) >= sizeof(ksreq->ks_name)) 281 panic("kstat %p name string has grown", ks); 282 ksreq->ks_unit = ks->ks_unit; 283 284 ksreq->ks_created = ks->ks_created; 285 ksreq->ks_type = ks->ks_type; 286 ksreq->ks_state = ks->ks_state; 287 288 error: 289 rw_exit(&kstat_lock); 290 291 if (buf != NULL) { 292 if (error == 0) 293 error = copyout(buf, ksreq->ks_data, min(klen, ulen)); 294 295 free(buf, M_TEMP, klen); 296 } 297 298 return (error); 299 } 300 301 int 302 kstatioc_find_id(struct kstat_req *ksreq) 303 { 304 struct kstat *ks, key; 305 int error; 306 307 error = kstatioc_enter(ksreq); 308 if (error != 0) 309 return (error); 310 311 key.ks_id = ksreq->ks_id; 312 313 ks = RBT_FIND(kstat_id_tree, &kstat_id_tree, &key); 314 315 return (kstatioc_leave(ksreq, ks)); 316 } 317 318 int 319 kstatioc_nfind_id(struct kstat_req *ksreq) 320 { 321 struct kstat *ks, key; 322 int error; 323 324 error = kstatioc_enter(ksreq); 325 if (error != 0) 326 return (error); 327 328 key.ks_id = ksreq->ks_id; 329 330 ks = RBT_NFIND(kstat_id_tree, &kstat_id_tree, &key); 331 332 return (kstatioc_leave(ksreq, ks)); 333 } 334 335 int 336 kstatioc_find_pv(struct kstat_req *ksreq) 337 { 338 struct kstat *ks, key; 339 int error; 340 341 error = kstatioc_enter(ksreq); 342 if (error != 0) 343 return (error); 344 345 key.ks_provider = ksreq->ks_provider; 346 key.ks_instance = ksreq->ks_instance; 347 key.ks_name = ksreq->ks_name; 348 key.ks_unit = ksreq->ks_unit; 349 350 ks = RBT_FIND(kstat_pv_tree, &kstat_pv_tree, &key); 351 352 return (kstatioc_leave(ksreq, ks)); 353 } 354 355 int 356 kstatioc_nfind_pv(struct kstat_req *ksreq) 357 { 358 struct kstat *ks, key; 359 int error; 360 361 error = kstatioc_enter(ksreq); 362 if (error != 0) 363 return (error); 364 365 key.ks_provider = ksreq->ks_provider; 366 key.ks_instance = ksreq->ks_instance; 367 key.ks_name = ksreq->ks_name; 368 key.ks_unit = ksreq->ks_unit; 369 370 ks = RBT_NFIND(kstat_pv_tree, &kstat_pv_tree, &key); 371 372 return (kstatioc_leave(ksreq, ks)); 373 } 374 375 int 376 kstatioc_find_nm(struct kstat_req *ksreq) 377 { 378 struct kstat *ks, key; 379 int error; 380 381 error = kstatioc_enter(ksreq); 382 if (error != 0) 383 return (error); 384 385 key.ks_name = ksreq->ks_name; 386 key.ks_unit = ksreq->ks_unit; 387 key.ks_provider = ksreq->ks_provider; 388 key.ks_instance = ksreq->ks_instance; 389 390 ks = RBT_FIND(kstat_nm_tree, &kstat_nm_tree, &key); 391 392 return (kstatioc_leave(ksreq, ks)); 393 } 394 395 int 396 kstatioc_nfind_nm(struct kstat_req *ksreq) 397 { 398 struct kstat *ks, key; 399 int error; 400 401 error = kstatioc_enter(ksreq); 402 if (error != 0) 403 return (error); 404 405 key.ks_name = ksreq->ks_name; 406 key.ks_unit = ksreq->ks_unit; 407 key.ks_provider = ksreq->ks_provider; 408 key.ks_instance = ksreq->ks_instance; 409 410 ks = RBT_NFIND(kstat_nm_tree, &kstat_nm_tree, &key); 411 412 return (kstatioc_leave(ksreq, ks)); 413 } 414 415 int 416 kstatioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 417 { 418 struct kstat_req *ksreq = (struct kstat_req *)data; 419 int error = 0; 420 421 KERNEL_UNLOCK(); 422 423 switch (cmd) { 424 case KSTATIOC_VERSION: 425 *(unsigned int *)data = kstat_version; 426 break; 427 428 case KSTATIOC_FIND_ID: 429 error = kstatioc_find_id(ksreq); 430 break; 431 case KSTATIOC_NFIND_ID: 432 error = kstatioc_nfind_id(ksreq); 433 break; 434 case KSTATIOC_FIND_PROVIDER: 435 error = kstatioc_find_pv(ksreq); 436 break; 437 case KSTATIOC_NFIND_PROVIDER: 438 error = kstatioc_nfind_pv(ksreq); 439 break; 440 case KSTATIOC_FIND_NAME: 441 error = kstatioc_find_nm(ksreq); 442 break; 443 case KSTATIOC_NFIND_NAME: 444 error = kstatioc_nfind_nm(ksreq); 445 break; 446 447 default: 448 error = ENOTTY; 449 break; 450 } 451 452 KERNEL_LOCK(); 453 454 return (error); 455 } 456 457 void 458 kstat_init(void) 459 { 460 static int initialized = 0; 461 462 if (initialized) 463 return; 464 465 pool_init(&kstat_pool, sizeof(struct kstat), 0, IPL_NONE, 466 PR_WAITOK | PR_RWLOCK, "kstatmem", NULL); 467 468 initialized = 1; 469 } 470 471 int 472 kstat_strcheck(const char *str) 473 { 474 size_t i, l; 475 476 l = strlen(str); 477 if (l == 0 || l >= KSTAT_STRLEN) 478 return (-1); 479 for (i = 0; i < l; i++) { 480 int ch = str[i]; 481 if (ch >= 'a' && ch <= 'z') 482 continue; 483 if (ch >= 'A' && ch <= 'Z') 484 continue; 485 if (ch >= '0' && ch <= '9') 486 continue; 487 switch (ch) { 488 case '-': 489 case '_': 490 case '.': 491 break; 492 default: 493 return (-1); 494 } 495 } 496 497 return (0); 498 } 499 500 struct kstat * 501 kstat_create(const char *provider, unsigned int instance, 502 const char *name, unsigned int unit, 503 unsigned int type, unsigned int flags) 504 { 505 struct kstat *ks, *oks; 506 507 if (kstat_strcheck(provider) == -1) 508 panic("invalid provider string"); 509 if (kstat_strcheck(name) == -1) 510 panic("invalid name string"); 511 512 kstat_init(); 513 514 ks = pool_get(&kstat_pool, PR_WAITOK|PR_ZERO); 515 516 ks->ks_provider = provider; 517 ks->ks_instance = instance; 518 ks->ks_name = name; 519 ks->ks_unit = unit; 520 ks->ks_flags = flags; 521 ks->ks_type = type; 522 ks->ks_state = KSTAT_S_CREATED; 523 524 getnanouptime(&ks->ks_created); 525 ks->ks_updated = ks->ks_created; 526 527 ks->ks_lock = &kstat_default_lock; 528 ks->ks_lock_ops = &kstat_wlock_ops; 529 ks->ks_read = kstat_read; 530 ks->ks_copy = kstat_copy; 531 532 rw_enter_write(&kstat_lock); 533 ks->ks_id = kstat_next_id; 534 535 oks = RBT_INSERT(kstat_pv_tree, &kstat_pv_tree, ks); 536 if (oks == NULL) { 537 /* commit */ 538 kstat_next_id++; 539 kstat_version++; 540 541 oks = RBT_INSERT(kstat_nm_tree, &kstat_nm_tree, ks); 542 if (oks != NULL) 543 panic("kstat name collision! (%llu)", ks->ks_id); 544 545 oks = RBT_INSERT(kstat_id_tree, &kstat_id_tree, ks); 546 if (oks != NULL) 547 panic("kstat id collision! (%llu)", ks->ks_id); 548 } 549 rw_exit_write(&kstat_lock); 550 551 if (oks != NULL) { 552 pool_put(&kstat_pool, ks); 553 return (NULL); 554 } 555 556 return (ks); 557 } 558 559 void 560 kstat_set_rlock(struct kstat *ks, struct rwlock *rwl) 561 { 562 KASSERT(ks->ks_state == KSTAT_S_CREATED); 563 564 ks->ks_lock = rwl; 565 ks->ks_lock_ops = &kstat_rlock_ops; 566 } 567 568 void 569 kstat_set_wlock(struct kstat *ks, struct rwlock *rwl) 570 { 571 KASSERT(ks->ks_state == KSTAT_S_CREATED); 572 573 ks->ks_lock = rwl; 574 ks->ks_lock_ops = &kstat_wlock_ops; 575 } 576 577 void 578 kstat_set_mutex(struct kstat *ks, struct mutex *mtx) 579 { 580 KASSERT(ks->ks_state == KSTAT_S_CREATED); 581 582 ks->ks_lock = mtx; 583 ks->ks_lock_ops = &kstat_mutex_ops; 584 } 585 586 void 587 kstat_cpu_enter(void *p) 588 { 589 struct cpu_info *ci = p; 590 sched_peg_curproc(ci); 591 } 592 593 void 594 kstat_cpu_leave(void *p) 595 { 596 atomic_clearbits_int(&curproc->p_flag, P_CPUPEG); 597 } 598 599 void 600 kstat_set_cpu(struct kstat *ks, struct cpu_info *ci) 601 { 602 KASSERT(ks->ks_state == KSTAT_S_CREATED); 603 604 ks->ks_lock = ci; 605 ks->ks_lock_ops = &kstat_cpu_ops; 606 } 607 608 int 609 kstat_read_nop(struct kstat *ks) 610 { 611 return (0); 612 } 613 614 void 615 kstat_install(struct kstat *ks) 616 { 617 if (!ISSET(ks->ks_flags, KSTAT_F_REALLOC)) { 618 KASSERTMSG(ks->ks_copy != NULL || ks->ks_data != NULL, 619 "kstat %p %s:%u:%s:%u must provide ks_copy or ks_data", ks, 620 ks->ks_provider, ks->ks_instance, ks->ks_name, ks->ks_unit); 621 KASSERT(ks->ks_datalen > 0); 622 } 623 624 rw_enter_write(&kstat_lock); 625 ks->ks_state = KSTAT_S_INSTALLED; 626 rw_exit_write(&kstat_lock); 627 } 628 629 void 630 kstat_destroy(struct kstat *ks) 631 { 632 rw_enter_write(&kstat_lock); 633 RBT_REMOVE(kstat_id_tree, &kstat_id_tree, ks); 634 RBT_REMOVE(kstat_pv_tree, &kstat_pv_tree, ks); 635 RBT_REMOVE(kstat_nm_tree, &kstat_nm_tree, ks); 636 kstat_version++; 637 rw_exit_write(&kstat_lock); 638 639 pool_put(&kstat_pool, ks); 640 } 641 642 int 643 kstat_read(struct kstat *ks) 644 { 645 getnanouptime(&ks->ks_updated); 646 return (0); 647 } 648 649 int 650 kstat_copy(struct kstat *ks, void *buf) 651 { 652 memcpy(buf, ks->ks_data, ks->ks_datalen); 653 return (0); 654 } 655 656 RBT_GENERATE(kstat_id_tree, kstat, ks_id_entry, kstat_id_cmp); 657 RBT_GENERATE(kstat_pv_tree, kstat, ks_pv_entry, kstat_pv_cmp); 658 RBT_GENERATE(kstat_nm_tree, kstat, ks_nm_entry, kstat_nm_cmp); 659 660 void 661 kstat_kv_init(struct kstat_kv *kv, const char *name, enum kstat_kv_type type) 662 { 663 memset(kv, 0, sizeof(*kv)); 664 strlcpy(kv->kv_key, name, sizeof(kv->kv_key)); /* XXX truncated? */ 665 kv->kv_type = type; 666 kv->kv_unit = KSTAT_KV_U_NONE; 667 } 668 669 void 670 kstat_kv_unit_init(struct kstat_kv *kv, const char *name, 671 enum kstat_kv_type type, enum kstat_kv_unit unit) 672 { 673 switch (type) { 674 case KSTAT_KV_T_COUNTER64: 675 case KSTAT_KV_T_COUNTER32: 676 case KSTAT_KV_T_UINT64: 677 case KSTAT_KV_T_INT64: 678 case KSTAT_KV_T_UINT32: 679 case KSTAT_KV_T_INT32: 680 break; 681 default: 682 panic("kv unit init %s: unit for non-integer type", name); 683 } 684 685 memset(kv, 0, sizeof(*kv)); 686 strlcpy(kv->kv_key, name, sizeof(kv->kv_key)); /* XXX truncated? */ 687 kv->kv_type = type; 688 kv->kv_unit = unit; 689 } 690