1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $DragonFly: src/sys/kern/kern_objcache.c,v 1.4 2005/07/13 16:06:04 dillon Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/kernel.h> 37 #include <sys/systm.h> 38 #include <sys/callout.h> 39 #include <sys/globaldata.h> 40 #include <sys/malloc.h> 41 #include <sys/queue.h> 42 #include <sys/objcache.h> 43 #include <sys/thread.h> 44 #include <sys/thread2.h> 45 46 static MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache"); 47 static MALLOC_DEFINE(M_OBJMAG, "objcache magazine", "Object Cache Magazine"); 48 49 #define INITIAL_MAG_CAPACITY 256 50 51 struct magazine { 52 int rounds; 53 int capacity; 54 SLIST_ENTRY(magazine) nextmagazine; 55 void *objects[]; 56 }; 57 58 SLIST_HEAD(magazinelist, magazine); 59 60 /* 61 * per-cluster cache of magazines 62 * All fields in this structure are protected by the token. 63 */ 64 struct magazinedepot { 65 /* 66 * The per-cpu object caches only exchanges completely full or 67 * completely empty magazines with the depot layer, so only have 68 * to cache these two types of magazines. 69 */ 70 struct magazinelist fullmagazines; 71 struct magazinelist emptymagazines; 72 int magcapacity; 73 74 /* protect this structure */ 75 struct lwkt_token token; 76 77 /* magazines not yet allocated towards limit */ 78 int unallocated_objects; 79 80 /* infrequently used fields */ 81 int waiting; /* waiting for another cpu to 82 * return a full magazine to 83 * the depot */ 84 int contested; /* depot contention count */ 85 }; 86 87 /* 88 * per-cpu object cache 89 * All fields in this structure are protected by crit_enter(). 90 */ 91 struct percpu_objcache { 92 struct magazine *loaded_magazine; /* active magazine */ 93 struct magazine *previous_magazine; /* backup magazine */ 94 95 /* statistics */ 96 int gets_cumulative; /* total calls to get */ 97 int gets_null; /* objcache_get returned NULL */ 98 int puts_cumulative; /* total calls to put */ 99 int puts_othercluster; /* returned to other cluster */ 100 101 /* infrequently used fields */ 102 int waiting; /* waiting for a thread on this cpu to 103 * return an obj to the per-cpu cache */ 104 }; 105 106 /* only until we have NUMA cluster topology information XXX */ 107 #define MAXCLUSTERS 1 108 #define myclusterid 0 109 #define CLUSTER_OF(obj) 0 110 111 /* 112 * Two-level object cache consisting of NUMA cluster-level depots of 113 * fully loaded or completely empty magazines and cpu-level caches of 114 * individual objects. 115 */ 116 struct objcache { 117 char *name; 118 119 /* object constructor and destructor from blank storage */ 120 objcache_ctor_fn *ctor; 121 objcache_dtor_fn *dtor; 122 void *private; 123 124 /* interface to underlying allocator */ 125 objcache_alloc_fn *alloc; 126 objcache_free_fn *free; 127 void *allocator_args; 128 129 SLIST_ENTRY(objcache) oc_next; 130 131 /* NUMA-cluster level caches */ 132 struct magazinedepot depot[MAXCLUSTERS]; 133 134 struct percpu_objcache cache_percpu[]; /* per-cpu caches */ 135 }; 136 137 static struct lwkt_token objcachelist_token; 138 static SLIST_HEAD(objcachelist, objcache) allobjcaches; 139 140 static struct magazine * 141 mag_alloc(int capacity) 142 { 143 struct magazine *mag; 144 145 mag = malloc(__offsetof(struct magazine, objects[capacity]), 146 M_OBJMAG, M_INTWAIT | M_ZERO); 147 mag->capacity = capacity; 148 mag->rounds = 0; 149 return (mag); 150 } 151 152 /* 153 * Create an object cache. 154 */ 155 struct objcache * 156 objcache_create(char *name, int cluster_limit, int mag_capacity, 157 objcache_ctor_fn *ctor, objcache_dtor_fn *dtor, void *private, 158 objcache_alloc_fn *alloc, objcache_free_fn *free, 159 void *allocator_args) 160 { 161 struct objcache *oc; 162 struct magazinedepot *depot; 163 lwkt_tokref olock; 164 int cpuid; 165 166 /* allocate object cache structure */ 167 oc = malloc(__offsetof(struct objcache, cache_percpu[ncpus]), 168 M_OBJCACHE, M_WAITOK | M_ZERO); 169 oc->name = strdup(name, M_TEMP); 170 oc->ctor = ctor; 171 oc->dtor = dtor; 172 oc->private = private; 173 oc->free = free; 174 oc->allocator_args = allocator_args; 175 176 /* initialize depots */ 177 depot = &oc->depot[0]; 178 179 lwkt_token_init(&depot->token); 180 SLIST_INIT(&depot->fullmagazines); 181 SLIST_INIT(&depot->emptymagazines); 182 183 if (mag_capacity == 0) 184 mag_capacity = INITIAL_MAG_CAPACITY; 185 depot->magcapacity = mag_capacity; 186 187 /* 188 * The cluster_limit must be sufficient to have three magazines per 189 * cpu. 190 */ 191 if (cluster_limit == 0) { 192 depot->unallocated_objects = -1; 193 } else { 194 if (cluster_limit < mag_capacity * ncpus * 3) 195 cluster_limit = mag_capacity * ncpus * 3; 196 depot->unallocated_objects = cluster_limit; 197 } 198 oc->alloc = alloc; 199 200 /* initialize per-cpu caches */ 201 for (cpuid = 0; cpuid < ncpus; cpuid++) { 202 struct percpu_objcache *cache_percpu = &oc->cache_percpu[cpuid]; 203 204 cache_percpu->loaded_magazine = mag_alloc(mag_capacity); 205 cache_percpu->previous_magazine = mag_alloc(mag_capacity); 206 } 207 lwkt_gettoken(&olock, &objcachelist_token); 208 SLIST_INSERT_HEAD(&allobjcaches, oc, oc_next); 209 lwkt_reltoken(&olock); 210 211 return (oc); 212 } 213 214 #define MAGAZINE_EMPTY(mag) (mag->rounds == 0) 215 #define MAGAZINE_NOTEMPTY(mag) (mag->rounds != 0) 216 #define MAGAZINE_FULL(mag) (mag->rounds == mag->capacity) 217 218 #define swap(x, y) ({ struct magazine *t = x; x = y; y = t; }) 219 220 /* 221 * Get an object from the object cache. 222 */ 223 void * 224 objcache_get(struct objcache *oc, int ocflags) 225 { 226 struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid]; 227 struct magazine *loadedmag; 228 struct magazine *emptymag; 229 void *obj; 230 struct magazinedepot *depot; 231 lwkt_tokref ilock; 232 233 crit_enter(); 234 ++cpucache->gets_cumulative; 235 236 retry: 237 /* 238 * Loaded magazine has an object. This is the hot path. 239 * It is lock-free and uses a critical section to block 240 * out interrupt handlers on the same processor. 241 */ 242 loadedmag = cpucache->loaded_magazine; 243 if (MAGAZINE_NOTEMPTY(loadedmag)) { 244 obj = loadedmag->objects[--loadedmag->rounds]; 245 crit_exit(); 246 return (obj); 247 } 248 249 /* Previous magazine has an object. */ 250 if (MAGAZINE_NOTEMPTY(cpucache->previous_magazine)) { 251 swap(cpucache->loaded_magazine, cpucache->previous_magazine); 252 loadedmag = cpucache->loaded_magazine; 253 obj = loadedmag->objects[--loadedmag->rounds]; 254 crit_exit(); 255 return (obj); 256 } 257 258 /* 259 * Both magazines empty. Get a full magazine from the depot and 260 * move one of the empty ones to the depot. Do this even if we 261 * block on the token to avoid a non-optimal corner case. 262 * 263 * Obtain the depot token. 264 */ 265 depot = &oc->depot[myclusterid]; 266 #if 0 267 if (!lwkt_trytoken(&ilock, &depot->token)) { 268 lwkt_gettoken(&ilock, &depot->token); 269 ++depot->contested; 270 } 271 #else 272 lwkt_gettoken(&ilock, &depot->token); 273 #endif 274 275 /* Check if depot has a full magazine. */ 276 if (!SLIST_EMPTY(&depot->fullmagazines)) { 277 emptymag = cpucache->previous_magazine; 278 cpucache->previous_magazine = cpucache->loaded_magazine; 279 cpucache->loaded_magazine = SLIST_FIRST(&depot->fullmagazines); 280 SLIST_REMOVE_HEAD(&depot->fullmagazines, nextmagazine); 281 282 /* 283 * Return emptymag to the depot. Due to blocking it may 284 * not be entirely empty. 285 */ 286 if (MAGAZINE_EMPTY(emptymag)) { 287 SLIST_INSERT_HEAD(&depot->emptymagazines, 288 emptymag, nextmagazine); 289 } else { 290 /* 291 * NOTE: magazine is not necessarily entirely full 292 */ 293 SLIST_INSERT_HEAD(&depot->fullmagazines, 294 emptymag, nextmagazine); 295 if (depot->waiting) 296 wakeup(depot); 297 } 298 lwkt_reltoken(&ilock); 299 goto retry; 300 } 301 302 /* 303 * The depot does not have any non-empty magazines. If we have 304 * not hit our object limit we can allocate a new object using 305 * the back-end allocator. 306 * 307 * note: unallocated_objects can be initialized to -1, which has 308 * the effect of removing any allocation limits. 309 */ 310 if (depot->unallocated_objects) { 311 --depot->unallocated_objects; 312 lwkt_reltoken(&ilock); 313 crit_exit(); 314 315 obj = oc->alloc(oc->allocator_args, ocflags); 316 if (obj) { 317 if (oc->ctor(obj, oc->private, ocflags)) 318 return (obj); 319 oc->free(obj, oc->allocator_args); 320 lwkt_gettoken(&ilock, &depot->token); 321 ++depot->unallocated_objects; 322 if (depot->waiting) 323 wakeup(depot); 324 lwkt_reltoken(&ilock); 325 obj = NULL; 326 } 327 if (obj == NULL) { 328 crit_enter(); 329 /* 330 * makes debugging easier when gets_cumulative does 331 * not include gets_null. 332 */ 333 ++cpucache->gets_null; 334 --cpucache->gets_cumulative; 335 crit_exit(); 336 } 337 return(obj); 338 } 339 340 /* 341 * Otherwise block if allowed to. 342 */ 343 if ((ocflags & (M_WAITOK|M_NULLOK)) == M_WAITOK) { 344 ++cpucache->waiting; 345 ++depot->waiting; 346 tsleep(depot, PCATCH, "objcache_get", 0); 347 --cpucache->waiting; 348 --depot->waiting; 349 lwkt_reltoken(&ilock); 350 goto retry; 351 } 352 353 /* 354 * Otherwise fail 355 */ 356 ++cpucache->gets_null; 357 --cpucache->gets_cumulative; 358 crit_exit(); 359 lwkt_reltoken(&ilock); 360 return (NULL); 361 } 362 363 /* 364 * Wrapper for malloc allocation routines. 365 */ 366 void * 367 objcache_malloc_alloc(void *allocator_args, int ocflags) 368 { 369 struct objcache_malloc_args *alloc_args = allocator_args; 370 371 return (malloc(alloc_args->objsize, alloc_args->mtype, 372 ocflags & OC_MFLAGS)); 373 } 374 375 void 376 objcache_malloc_free(void *obj, void *allocator_args) 377 { 378 struct objcache_malloc_args *alloc_args = allocator_args; 379 380 free(obj, alloc_args->mtype); 381 } 382 383 /* 384 * Wrapper for allocation policies that pre-allocate at initialization time 385 * and don't do run-time allocation. 386 */ 387 void * 388 objcache_nop_alloc(void *allocator_args, int ocflags) 389 { 390 return (NULL); 391 } 392 393 void 394 objcache_nop_free(void *obj, void *allocator_args) 395 { 396 } 397 398 /* 399 * Return an object to the object cache. 400 */ 401 void 402 objcache_put(struct objcache *oc, void *obj) 403 { 404 struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid]; 405 struct magazine *loadedmag; 406 struct magazinedepot *depot; 407 lwkt_tokref ilock; 408 409 crit_enter(); 410 ++cpucache->puts_cumulative; 411 412 if (CLUSTER_OF(obj) != myclusterid) { 413 #ifdef notyet 414 /* use lazy IPI to send object to owning cluster XXX todo */ 415 ++cpucache->puts_othercluster; 416 crit_exit(); 417 return; 418 #endif 419 } 420 421 retry: 422 /* 423 * Free slot available in loaded magazine. This is the hot path. 424 * It is lock-free and uses a critical section to block out interrupt 425 * handlers on the same processor. 426 */ 427 loadedmag = cpucache->loaded_magazine; 428 if (!MAGAZINE_FULL(loadedmag)) { 429 loadedmag->objects[loadedmag->rounds++] = obj; 430 if (cpucache->waiting) 431 wakeup(&oc->depot[myclusterid]); 432 crit_exit(); 433 return; 434 } 435 436 /* 437 * Current magazine full, but previous magazine has room. XXX 438 */ 439 if (!MAGAZINE_FULL(cpucache->previous_magazine)) { 440 swap(cpucache->loaded_magazine, cpucache->previous_magazine); 441 loadedmag = cpucache->loaded_magazine; 442 loadedmag->objects[loadedmag->rounds++] = obj; 443 if (cpucache->waiting) 444 wakeup(&oc->depot[myclusterid]); 445 crit_exit(); 446 return; 447 } 448 449 /* 450 * Both magazines full. Get an empty magazine from the depot and 451 * move a full loaded magazine to the depot. Even though the 452 * magazine may wind up with space available after we block on 453 * the token, we still cycle it through to avoid the non-optimal 454 * corner-case. 455 * 456 * Obtain the depot token. 457 */ 458 depot = &oc->depot[myclusterid]; 459 #if 0 460 if (!lwkt_trytoken(&ilock, &depot->token)) { 461 lwkt_gettoken(&ilock, &depot->token); 462 ++depot->contested; 463 } 464 #else 465 lwkt_gettoken(&ilock, &depot->token); 466 #endif 467 468 /* 469 * If an empty magazine is available in the depot, cycle it 470 * through and retry. 471 */ 472 if (!SLIST_EMPTY(&depot->emptymagazines)) { 473 loadedmag = cpucache->previous_magazine; 474 cpucache->previous_magazine = cpucache->loaded_magazine; 475 cpucache->loaded_magazine = SLIST_FIRST(&depot->emptymagazines); 476 SLIST_REMOVE_HEAD(&depot->emptymagazines, nextmagazine); 477 478 /* 479 * Return loadedmag to the depot. Due to blocking it may 480 * not be entirely full and could even be empty. 481 */ 482 if (MAGAZINE_EMPTY(loadedmag)) { 483 SLIST_INSERT_HEAD(&depot->emptymagazines, 484 loadedmag, nextmagazine); 485 } else { 486 SLIST_INSERT_HEAD(&depot->fullmagazines, 487 loadedmag, nextmagazine); 488 if (depot->waiting) 489 wakeup(depot); 490 } 491 lwkt_reltoken(&ilock); 492 goto retry; 493 } 494 495 /* 496 * An empty mag is not available. This is a corner case which can 497 * occur due to cpus holding partially full magazines. Do not try 498 * to allocate a mag, just free the object. 499 */ 500 ++depot->unallocated_objects; 501 if (depot->waiting) 502 wakeup(depot); 503 lwkt_reltoken(&ilock); 504 crit_exit(); 505 oc->dtor(obj, oc->private); 506 oc->free(obj, oc->allocator_args); 507 } 508 509 /* 510 * The object is being put back into the cache, but the caller has 511 * indicated that the object is not in any shape to be reused and should 512 * be dtor'd immediately. 513 */ 514 void 515 objcache_dtor(struct objcache *oc, void *obj) 516 { 517 struct magazinedepot *depot; 518 lwkt_tokref ilock; 519 520 depot = &oc->depot[myclusterid]; 521 #if 0 522 if (!lwkt_trytoken(&ilock, &depot->token)) { 523 lwkt_gettoken(&ilock, &depot->token); 524 ++depot->contested; 525 } 526 #else 527 lwkt_gettoken(&ilock, &depot->token); 528 #endif 529 ++depot->unallocated_objects; 530 if (depot->waiting) 531 wakeup(depot); 532 lwkt_reltoken(&ilock); 533 oc->dtor(obj, oc->private); 534 oc->free(obj, oc->allocator_args); 535 } 536 537 /* 538 * Utility routine for objects that don't require any de-construction. 539 */ 540 void 541 null_dtor(void *obj, void *private) 542 { 543 /* do nothing */ 544 } 545 546 /* 547 * De-construct and de-allocate objects in a magazine. 548 * Returns the number of objects freed. 549 * Does not de-allocate the magazine itself. 550 */ 551 static int 552 mag_purge(struct objcache *oc, struct magazine *mag) 553 { 554 int ndeleted; 555 void *obj; 556 557 ndeleted = 0; 558 crit_enter(); 559 while (mag->rounds) { 560 obj = mag->objects[--mag->rounds]; 561 crit_exit(); 562 oc->dtor(obj, oc->private); 563 oc->free(obj, oc->allocator_args); 564 ++ndeleted; 565 crit_enter(); 566 } 567 crit_exit(); 568 return(ndeleted); 569 } 570 571 /* 572 * De-allocate all magazines in a magazine list. 573 * Returns number of objects de-allocated. 574 */ 575 static int 576 maglist_purge(struct objcache *oc, struct magazinelist *maglist, 577 boolean_t purgeall) 578 { 579 struct magazine *mag; 580 int ndeleted = 0; 581 582 /* can't use SLIST_FOREACH because blocking releases the depot token */ 583 while ((mag = SLIST_FIRST(maglist))) { 584 SLIST_REMOVE_HEAD(maglist, nextmagazine); 585 ndeleted += mag_purge(oc, mag); /* could block! */ 586 free(mag, M_OBJMAG); /* could block! */ 587 if (!purgeall && ndeleted > 0) 588 break; 589 } 590 return (ndeleted); 591 } 592 593 /* 594 * De-allocates all magazines on the full and empty magazine lists. 595 */ 596 static void 597 depot_purge(struct magazinedepot *depot, struct objcache *oc) 598 { 599 depot->unallocated_objects += 600 maglist_purge(oc, &depot->fullmagazines, TRUE); 601 depot->unallocated_objects += 602 maglist_purge(oc, &depot->emptymagazines, TRUE); 603 if (depot->unallocated_objects && depot->waiting) 604 wakeup(depot); 605 } 606 607 #ifdef notneeded 608 void 609 objcache_reclaim(struct objcache *oc) 610 { 611 struct percpu_objcache *cache_percpu = &oc->cache_percpu[myclusterid]; 612 struct magazinedepot *depot = &oc->depot[myclusterid]; 613 614 mag_purge(oc, cache_percpu->loaded_magazine); 615 mag_purge(oc, cache_percpu->previous_magazine); 616 617 /* XXX need depot token */ 618 depot_purge(depot, oc); 619 } 620 #endif 621 622 /* 623 * Try to free up some memory. Return as soon as some free memory found. 624 * For each object cache on the reclaim list, first try the current per-cpu 625 * cache, then the full magazine depot. 626 */ 627 boolean_t 628 objcache_reclaimlist(struct objcache *oclist[], int nlist, int ocflags) 629 { 630 struct objcache *oc; 631 struct percpu_objcache *cpucache; 632 struct magazinedepot *depot; 633 lwkt_tokref ilock; 634 int i, ndel; 635 636 for (i = 0; i < nlist; i++) { 637 oc = oclist[i]; 638 cpucache = &oc->cache_percpu[mycpuid]; 639 depot = &oc->depot[myclusterid]; 640 641 crit_enter(); 642 if ((ndel = mag_purge(oc, cpucache->loaded_magazine)) > 0 || 643 (ndel = mag_purge(oc, cpucache->previous_magazine)) > 0) { 644 crit_exit(); 645 lwkt_gettoken(&ilock, &depot->token); 646 depot->unallocated_objects += ndel; 647 if (depot->unallocated_objects && depot->waiting) 648 wakeup(depot); 649 lwkt_reltoken(&ilock); 650 return (TRUE); 651 } 652 crit_exit(); 653 lwkt_gettoken(&ilock, &depot->token); 654 if ((ndel = 655 maglist_purge(oc, &depot->fullmagazines, FALSE)) > 0) { 656 depot->unallocated_objects += ndel; 657 if (depot->unallocated_objects && depot->waiting) 658 wakeup(depot); 659 lwkt_reltoken(&ilock); 660 return (TRUE); 661 } 662 lwkt_reltoken(&ilock); 663 } 664 return (FALSE); 665 } 666 667 /* 668 * Destroy an object cache. Must have no existing references. 669 * XXX Not clear this is a useful API function. 670 */ 671 void 672 objcache_destroy(struct objcache *oc) 673 { 674 struct percpu_objcache *cache_percpu; 675 int clusterid, cpuid; 676 677 /* XXX need depot token? */ 678 for (clusterid = 0; clusterid < MAXCLUSTERS; clusterid++) 679 depot_purge(&oc->depot[clusterid], oc); 680 681 for (cpuid = 0; cpuid < ncpus; cpuid++) { 682 cache_percpu = &oc->cache_percpu[cpuid]; 683 684 mag_purge(oc, cache_percpu->loaded_magazine); 685 free(cache_percpu->loaded_magazine, M_OBJMAG); 686 687 mag_purge(oc, cache_percpu->previous_magazine); 688 free(cache_percpu->previous_magazine, M_OBJMAG); 689 } 690 691 free(oc->name, M_TEMP); 692 free(oc, M_OBJCACHE); 693 } 694 695 #if 0 696 /* 697 * Populate the per-cluster depot with elements from a linear block 698 * of memory. Must be called for individually for each cluster. 699 * Populated depots should not be destroyed. 700 */ 701 void 702 objcache_populate_linear(struct objcache *oc, void *base, int nelts, int size) 703 { 704 char *p = base; 705 char *end = (char *)base + (nelts * size); 706 struct magazinedepot *depot = &oc->depot[myclusterid]; 707 lwkt_tokref ilock; 708 struct magazine sentinelfullmag = { 0, 0 }; 709 struct magazine *emptymag = &sentinelfullmag; 710 711 lwkt_gettoken(&ilock, &depot->token); 712 while (p < end) { 713 if (MAGAZINE_FULL(emptymag)) { 714 emptymag = mag_alloc(depot->magcapacity); 715 SLIST_INSERT_HEAD(&depot->fullmagazines, emptymag, 716 nextmagazine); 717 } 718 emptymag->objects[emptymag->rounds++] = p; 719 p += size; 720 } 721 depot->unallocated_objects += nelts; 722 if (depot->unallocated_objects && depot->waiting) 723 wakeup(depot); 724 lwkt_reltoken(&ilock); 725 } 726 #endif 727 728 #if 0 729 /* 730 * Check depot contention once a minute. 731 * 2 contested locks per second allowed. 732 */ 733 static int objcache_rebalance_period; 734 static const int objcache_contention_rate = 120; 735 static struct callout objcache_callout; 736 737 #define MAXMAGSIZE 512 738 739 /* 740 * Check depot contention and increase magazine size if necessary. 741 */ 742 static void 743 objcache_timer(void *dummy) 744 { 745 struct objcache *oc; 746 struct magazinedepot *depot; 747 lwkt_tokref olock, dlock; 748 749 lwkt_gettoken(&olock, &objcachelist_token); 750 SLIST_FOREACH(oc, &allobjcaches, oc_next) { 751 depot = &oc->depot[myclusterid]; 752 if (depot->magcapacity < MAXMAGSIZE) { 753 if (depot->contested > objcache_contention_rate) { 754 lwkt_gettoken(&dlock, &depot->token); 755 depot_purge(depot, oc); 756 depot->magcapacity *= 2; 757 lwkt_reltoken(&dlock); 758 printf("objcache_timer: increasing cache %s" 759 " magsize to %d, contested %d times\n", 760 oc->name, depot->magcapacity, 761 depot->contested); 762 } 763 depot->contested = 0; 764 } 765 } 766 lwkt_reltoken(&olock); 767 768 callout_reset(&objcache_callout, objcache_rebalance_period, 769 objcache_timer, NULL); 770 } 771 772 #endif 773 774 static void 775 objcache_init(void) 776 { 777 lwkt_token_init(&objcachelist_token); 778 #if 0 779 callout_init(&objcache_callout); 780 objcache_rebalance_period = 60 * hz; 781 callout_reset(&objcache_callout, objcache_rebalance_period, 782 objcache_timer, NULL); 783 #endif 784 } 785 SYSINIT(objcache, SI_SUB_CPU, SI_ORDER_ANY, objcache_init, 0); 786