1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $DragonFly: src/sys/kern/kern_objcache.c,v 1.7 2006/06/01 06:10:50 dillon Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/kernel.h> 37 #include <sys/systm.h> 38 #include <sys/callout.h> 39 #include <sys/globaldata.h> 40 #include <sys/malloc.h> 41 #include <sys/queue.h> 42 #include <sys/objcache.h> 43 #include <sys/thread.h> 44 #include <sys/thread2.h> 45 46 static MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache"); 47 static MALLOC_DEFINE(M_OBJMAG, "objcache magazine", "Object Cache Magazine"); 48 49 #define INITIAL_MAG_CAPACITY 256 50 51 struct magazine { 52 int rounds; 53 int capacity; 54 SLIST_ENTRY(magazine) nextmagazine; 55 void *objects[]; 56 }; 57 58 SLIST_HEAD(magazinelist, magazine); 59 60 /* 61 * per-cluster cache of magazines 62 * All fields in this structure are protected by the token. 63 */ 64 struct magazinedepot { 65 /* 66 * The per-cpu object caches only exchanges completely full or 67 * completely empty magazines with the depot layer, so only have 68 * to cache these two types of magazines. 69 */ 70 struct magazinelist fullmagazines; 71 struct magazinelist emptymagazines; 72 int magcapacity; 73 74 /* protect this structure */ 75 struct lwkt_token token; 76 77 /* magazines not yet allocated towards limit */ 78 int unallocated_objects; 79 80 /* infrequently used fields */ 81 int waiting; /* waiting for another cpu to 82 * return a full magazine to 83 * the depot */ 84 int contested; /* depot contention count */ 85 }; 86 87 /* 88 * per-cpu object cache 89 * All fields in this structure are protected by crit_enter(). 90 */ 91 struct percpu_objcache { 92 struct magazine *loaded_magazine; /* active magazine */ 93 struct magazine *previous_magazine; /* backup magazine */ 94 95 /* statistics */ 96 int gets_cumulative; /* total calls to get */ 97 int gets_null; /* objcache_get returned NULL */ 98 int puts_cumulative; /* total calls to put */ 99 int puts_othercluster; /* returned to other cluster */ 100 101 /* infrequently used fields */ 102 int waiting; /* waiting for a thread on this cpu to 103 * return an obj to the per-cpu cache */ 104 }; 105 106 /* only until we have NUMA cluster topology information XXX */ 107 #define MAXCLUSTERS 1 108 #define myclusterid 0 109 #define CLUSTER_OF(obj) 0 110 111 /* 112 * Two-level object cache consisting of NUMA cluster-level depots of 113 * fully loaded or completely empty magazines and cpu-level caches of 114 * individual objects. 115 */ 116 struct objcache { 117 char *name; 118 119 /* object constructor and destructor from blank storage */ 120 objcache_ctor_fn *ctor; 121 objcache_dtor_fn *dtor; 122 void *private; 123 124 /* interface to underlying allocator */ 125 objcache_alloc_fn *alloc; 126 objcache_free_fn *free; 127 void *allocator_args; 128 129 SLIST_ENTRY(objcache) oc_next; 130 131 /* NUMA-cluster level caches */ 132 struct magazinedepot depot[MAXCLUSTERS]; 133 134 struct percpu_objcache cache_percpu[]; /* per-cpu caches */ 135 }; 136 137 static struct lwkt_token objcachelist_token; 138 static SLIST_HEAD(objcachelist, objcache) allobjcaches; 139 140 static struct magazine * 141 mag_alloc(int capacity) 142 { 143 struct magazine *mag; 144 145 mag = malloc(__offsetof(struct magazine, objects[capacity]), 146 M_OBJMAG, M_INTWAIT | M_ZERO); 147 mag->capacity = capacity; 148 mag->rounds = 0; 149 return (mag); 150 } 151 152 /* 153 * Create an object cache. 154 */ 155 struct objcache * 156 objcache_create(const char *name, int cluster_limit, int mag_capacity, 157 objcache_ctor_fn *ctor, objcache_dtor_fn *dtor, void *private, 158 objcache_alloc_fn *alloc, objcache_free_fn *free, 159 void *allocator_args) 160 { 161 struct objcache *oc; 162 struct magazinedepot *depot; 163 lwkt_tokref olock; 164 int cpuid; 165 166 /* allocate object cache structure */ 167 oc = malloc(__offsetof(struct objcache, cache_percpu[ncpus]), 168 M_OBJCACHE, M_WAITOK | M_ZERO); 169 oc->name = strdup(name, M_TEMP); 170 oc->ctor = ctor; 171 oc->dtor = dtor; 172 oc->private = private; 173 oc->free = free; 174 oc->allocator_args = allocator_args; 175 176 /* initialize depots */ 177 depot = &oc->depot[0]; 178 179 lwkt_token_init(&depot->token); 180 SLIST_INIT(&depot->fullmagazines); 181 SLIST_INIT(&depot->emptymagazines); 182 183 if (mag_capacity == 0) 184 mag_capacity = INITIAL_MAG_CAPACITY; 185 depot->magcapacity = mag_capacity; 186 187 /* 188 * The cluster_limit must be sufficient to have three magazines per 189 * cpu. 190 */ 191 if (cluster_limit == 0) { 192 depot->unallocated_objects = -1; 193 } else { 194 if (cluster_limit < mag_capacity * ncpus * 3) 195 cluster_limit = mag_capacity * ncpus * 3; 196 depot->unallocated_objects = cluster_limit; 197 } 198 oc->alloc = alloc; 199 200 /* initialize per-cpu caches */ 201 for (cpuid = 0; cpuid < ncpus; cpuid++) { 202 struct percpu_objcache *cache_percpu = &oc->cache_percpu[cpuid]; 203 204 cache_percpu->loaded_magazine = mag_alloc(mag_capacity); 205 cache_percpu->previous_magazine = mag_alloc(mag_capacity); 206 } 207 lwkt_gettoken(&olock, &objcachelist_token); 208 SLIST_INSERT_HEAD(&allobjcaches, oc, oc_next); 209 lwkt_reltoken(&olock); 210 211 return (oc); 212 } 213 214 struct objcache * 215 objcache_create_simple(malloc_type_t mtype, size_t objsize) 216 { 217 struct objcache_malloc_args *margs; 218 struct objcache *oc; 219 220 margs = malloc(sizeof(*margs), M_OBJCACHE, M_WAITOK|M_ZERO); 221 margs->objsize = objsize; 222 margs->mtype = mtype; 223 oc = objcache_create(mtype->ks_shortdesc, 0, 0, 224 null_ctor, null_dtor, NULL, 225 objcache_malloc_alloc, objcache_malloc_free, 226 margs); 227 return (oc); 228 } 229 230 #define MAGAZINE_EMPTY(mag) (mag->rounds == 0) 231 #define MAGAZINE_NOTEMPTY(mag) (mag->rounds != 0) 232 #define MAGAZINE_FULL(mag) (mag->rounds == mag->capacity) 233 234 #define swap(x, y) ({ struct magazine *t = x; x = y; y = t; }) 235 236 /* 237 * Get an object from the object cache. 238 */ 239 void * 240 objcache_get(struct objcache *oc, int ocflags) 241 { 242 struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid]; 243 struct magazine *loadedmag; 244 struct magazine *emptymag; 245 void *obj; 246 struct magazinedepot *depot; 247 lwkt_tokref ilock; 248 249 crit_enter(); 250 ++cpucache->gets_cumulative; 251 252 retry: 253 /* 254 * Loaded magazine has an object. This is the hot path. 255 * It is lock-free and uses a critical section to block 256 * out interrupt handlers on the same processor. 257 */ 258 loadedmag = cpucache->loaded_magazine; 259 if (MAGAZINE_NOTEMPTY(loadedmag)) { 260 obj = loadedmag->objects[--loadedmag->rounds]; 261 crit_exit(); 262 return (obj); 263 } 264 265 /* Previous magazine has an object. */ 266 if (MAGAZINE_NOTEMPTY(cpucache->previous_magazine)) { 267 swap(cpucache->loaded_magazine, cpucache->previous_magazine); 268 loadedmag = cpucache->loaded_magazine; 269 obj = loadedmag->objects[--loadedmag->rounds]; 270 crit_exit(); 271 return (obj); 272 } 273 274 /* 275 * Both magazines empty. Get a full magazine from the depot and 276 * move one of the empty ones to the depot. 277 * 278 * Obtain the depot token. 279 */ 280 depot = &oc->depot[myclusterid]; 281 lwkt_gettoken(&ilock, &depot->token); 282 283 /* 284 * We might have blocked obtaining the token, we must recheck 285 * the cpucache before potentially falling through to the blocking 286 * code or we might deadlock the tsleep() on a low-memory machine. 287 */ 288 if (MAGAZINE_NOTEMPTY(cpucache->loaded_magazine) || 289 MAGAZINE_NOTEMPTY(cpucache->previous_magazine) 290 ) { 291 lwkt_reltoken(&ilock); 292 goto retry; 293 } 294 295 /* Check if depot has a full magazine. */ 296 if (!SLIST_EMPTY(&depot->fullmagazines)) { 297 emptymag = cpucache->previous_magazine; 298 cpucache->previous_magazine = cpucache->loaded_magazine; 299 cpucache->loaded_magazine = SLIST_FIRST(&depot->fullmagazines); 300 SLIST_REMOVE_HEAD(&depot->fullmagazines, nextmagazine); 301 302 /* 303 * Return emptymag to the depot. 304 */ 305 KKASSERT(MAGAZINE_EMPTY(emptymag)); 306 SLIST_INSERT_HEAD(&depot->emptymagazines, 307 emptymag, nextmagazine); 308 lwkt_reltoken(&ilock); 309 goto retry; 310 } 311 312 /* 313 * The depot does not have any non-empty magazines. If we have 314 * not hit our object limit we can allocate a new object using 315 * the back-end allocator. 316 * 317 * note: unallocated_objects can be initialized to -1, which has 318 * the effect of removing any allocation limits. 319 */ 320 if (depot->unallocated_objects) { 321 --depot->unallocated_objects; 322 lwkt_reltoken(&ilock); 323 crit_exit(); 324 325 obj = oc->alloc(oc->allocator_args, ocflags); 326 if (obj) { 327 if (oc->ctor(obj, oc->private, ocflags)) 328 return (obj); 329 oc->free(obj, oc->allocator_args); 330 lwkt_gettoken(&ilock, &depot->token); 331 ++depot->unallocated_objects; 332 if (depot->waiting) 333 wakeup(depot); 334 lwkt_reltoken(&ilock); 335 obj = NULL; 336 } 337 if (obj == NULL) { 338 crit_enter(); 339 /* 340 * makes debugging easier when gets_cumulative does 341 * not include gets_null. 342 */ 343 ++cpucache->gets_null; 344 --cpucache->gets_cumulative; 345 crit_exit(); 346 } 347 return(obj); 348 } 349 350 /* 351 * Otherwise block if allowed to. 352 */ 353 if ((ocflags & (M_WAITOK|M_NULLOK)) == M_WAITOK) { 354 ++cpucache->waiting; 355 ++depot->waiting; 356 tsleep(depot, 0, "objcache_get", 0); 357 --cpucache->waiting; 358 --depot->waiting; 359 lwkt_reltoken(&ilock); 360 goto retry; 361 } 362 363 /* 364 * Otherwise fail 365 */ 366 ++cpucache->gets_null; 367 --cpucache->gets_cumulative; 368 crit_exit(); 369 lwkt_reltoken(&ilock); 370 return (NULL); 371 } 372 373 /* 374 * Wrapper for malloc allocation routines. 375 */ 376 void * 377 objcache_malloc_alloc(void *allocator_args, int ocflags) 378 { 379 struct objcache_malloc_args *alloc_args = allocator_args; 380 381 return (malloc(alloc_args->objsize, alloc_args->mtype, 382 ocflags & OC_MFLAGS)); 383 } 384 385 void 386 objcache_malloc_free(void *obj, void *allocator_args) 387 { 388 struct objcache_malloc_args *alloc_args = allocator_args; 389 390 free(obj, alloc_args->mtype); 391 } 392 393 /* 394 * Wrapper for allocation policies that pre-allocate at initialization time 395 * and don't do run-time allocation. 396 */ 397 void * 398 objcache_nop_alloc(void *allocator_args, int ocflags) 399 { 400 return (NULL); 401 } 402 403 void 404 objcache_nop_free(void *obj, void *allocator_args) 405 { 406 } 407 408 /* 409 * Return an object to the object cache. 410 */ 411 void 412 objcache_put(struct objcache *oc, void *obj) 413 { 414 struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid]; 415 struct magazine *loadedmag; 416 struct magazinedepot *depot; 417 lwkt_tokref ilock; 418 419 crit_enter(); 420 ++cpucache->puts_cumulative; 421 422 if (CLUSTER_OF(obj) != myclusterid) { 423 #ifdef notyet 424 /* use lazy IPI to send object to owning cluster XXX todo */ 425 ++cpucache->puts_othercluster; 426 crit_exit(); 427 return; 428 #endif 429 } 430 431 retry: 432 /* 433 * Free slot available in loaded magazine. This is the hot path. 434 * It is lock-free and uses a critical section to block out interrupt 435 * handlers on the same processor. 436 */ 437 loadedmag = cpucache->loaded_magazine; 438 if (!MAGAZINE_FULL(loadedmag)) { 439 loadedmag->objects[loadedmag->rounds++] = obj; 440 if (cpucache->waiting) 441 wakeup_mycpu(&oc->depot[myclusterid]); 442 crit_exit(); 443 return; 444 } 445 446 /* 447 * Current magazine full, but previous magazine has room. XXX 448 */ 449 if (!MAGAZINE_FULL(cpucache->previous_magazine)) { 450 swap(cpucache->loaded_magazine, cpucache->previous_magazine); 451 loadedmag = cpucache->loaded_magazine; 452 loadedmag->objects[loadedmag->rounds++] = obj; 453 if (cpucache->waiting) 454 wakeup_mycpu(&oc->depot[myclusterid]); 455 crit_exit(); 456 return; 457 } 458 459 /* 460 * Both magazines full. Get an empty magazine from the depot and 461 * move a full loaded magazine to the depot. Even though the 462 * magazine may wind up with space available after we block on 463 * the token, we still cycle it through to avoid the non-optimal 464 * corner-case. 465 * 466 * Obtain the depot token. 467 */ 468 depot = &oc->depot[myclusterid]; 469 lwkt_gettoken(&ilock, &depot->token); 470 471 /* 472 * If an empty magazine is available in the depot, cycle it 473 * through and retry. 474 */ 475 if (!SLIST_EMPTY(&depot->emptymagazines)) { 476 loadedmag = cpucache->previous_magazine; 477 cpucache->previous_magazine = cpucache->loaded_magazine; 478 cpucache->loaded_magazine = SLIST_FIRST(&depot->emptymagazines); 479 SLIST_REMOVE_HEAD(&depot->emptymagazines, nextmagazine); 480 481 /* 482 * Return loadedmag to the depot. Due to blocking it may 483 * not be entirely full and could even be empty. 484 */ 485 if (MAGAZINE_EMPTY(loadedmag)) { 486 SLIST_INSERT_HEAD(&depot->emptymagazines, 487 loadedmag, nextmagazine); 488 } else { 489 SLIST_INSERT_HEAD(&depot->fullmagazines, 490 loadedmag, nextmagazine); 491 if (depot->waiting) 492 wakeup(depot); 493 } 494 lwkt_reltoken(&ilock); 495 goto retry; 496 } 497 498 /* 499 * An empty mag is not available. This is a corner case which can 500 * occur due to cpus holding partially full magazines. Do not try 501 * to allocate a mag, just free the object. 502 */ 503 ++depot->unallocated_objects; 504 if (depot->waiting) 505 wakeup(depot); 506 lwkt_reltoken(&ilock); 507 crit_exit(); 508 oc->dtor(obj, oc->private); 509 oc->free(obj, oc->allocator_args); 510 } 511 512 /* 513 * The object is being put back into the cache, but the caller has 514 * indicated that the object is not in any shape to be reused and should 515 * be dtor'd immediately. 516 */ 517 void 518 objcache_dtor(struct objcache *oc, void *obj) 519 { 520 struct magazinedepot *depot; 521 lwkt_tokref ilock; 522 523 depot = &oc->depot[myclusterid]; 524 lwkt_gettoken(&ilock, &depot->token); 525 ++depot->unallocated_objects; 526 if (depot->waiting) 527 wakeup(depot); 528 lwkt_reltoken(&ilock); 529 oc->dtor(obj, oc->private); 530 oc->free(obj, oc->allocator_args); 531 } 532 533 /* 534 * Utility routine for objects that don't require any de-construction. 535 */ 536 void 537 null_dtor(void *obj, void *private) 538 { 539 /* do nothing */ 540 } 541 542 boolean_t 543 null_ctor(void *obj, void *private, int ocflags) 544 { 545 return TRUE; 546 } 547 548 /* 549 * De-construct and de-allocate objects in a magazine. 550 * Returns the number of objects freed. 551 * Does not de-allocate the magazine itself. 552 */ 553 static int 554 mag_purge(struct objcache *oc, struct magazine *mag) 555 { 556 int ndeleted; 557 void *obj; 558 559 ndeleted = 0; 560 crit_enter(); 561 while (mag->rounds) { 562 obj = mag->objects[--mag->rounds]; 563 crit_exit(); 564 oc->dtor(obj, oc->private); 565 oc->free(obj, oc->allocator_args); 566 ++ndeleted; 567 crit_enter(); 568 } 569 crit_exit(); 570 return(ndeleted); 571 } 572 573 /* 574 * De-allocate all magazines in a magazine list. 575 * Returns number of objects de-allocated. 576 */ 577 static int 578 maglist_purge(struct objcache *oc, struct magazinelist *maglist, 579 boolean_t purgeall) 580 { 581 struct magazine *mag; 582 int ndeleted = 0; 583 584 /* can't use SLIST_FOREACH because blocking releases the depot token */ 585 while ((mag = SLIST_FIRST(maglist))) { 586 SLIST_REMOVE_HEAD(maglist, nextmagazine); 587 ndeleted += mag_purge(oc, mag); /* could block! */ 588 free(mag, M_OBJMAG); /* could block! */ 589 if (!purgeall && ndeleted > 0) 590 break; 591 } 592 return (ndeleted); 593 } 594 595 /* 596 * De-allocates all magazines on the full and empty magazine lists. 597 */ 598 static void 599 depot_purge(struct magazinedepot *depot, struct objcache *oc) 600 { 601 depot->unallocated_objects += 602 maglist_purge(oc, &depot->fullmagazines, TRUE); 603 depot->unallocated_objects += 604 maglist_purge(oc, &depot->emptymagazines, TRUE); 605 if (depot->unallocated_objects && depot->waiting) 606 wakeup(depot); 607 } 608 609 #ifdef notneeded 610 void 611 objcache_reclaim(struct objcache *oc) 612 { 613 struct percpu_objcache *cache_percpu = &oc->cache_percpu[myclusterid]; 614 struct magazinedepot *depot = &oc->depot[myclusterid]; 615 616 mag_purge(oc, cache_percpu->loaded_magazine); 617 mag_purge(oc, cache_percpu->previous_magazine); 618 619 /* XXX need depot token */ 620 depot_purge(depot, oc); 621 } 622 #endif 623 624 /* 625 * Try to free up some memory. Return as soon as some free memory found. 626 * For each object cache on the reclaim list, first try the current per-cpu 627 * cache, then the full magazine depot. 628 */ 629 boolean_t 630 objcache_reclaimlist(struct objcache *oclist[], int nlist, int ocflags) 631 { 632 struct objcache *oc; 633 struct percpu_objcache *cpucache; 634 struct magazinedepot *depot; 635 lwkt_tokref ilock; 636 int i, ndel; 637 638 for (i = 0; i < nlist; i++) { 639 oc = oclist[i]; 640 cpucache = &oc->cache_percpu[mycpuid]; 641 depot = &oc->depot[myclusterid]; 642 643 crit_enter(); 644 if ((ndel = mag_purge(oc, cpucache->loaded_magazine)) > 0 || 645 (ndel = mag_purge(oc, cpucache->previous_magazine)) > 0) { 646 crit_exit(); 647 lwkt_gettoken(&ilock, &depot->token); 648 depot->unallocated_objects += ndel; 649 if (depot->unallocated_objects && depot->waiting) 650 wakeup(depot); 651 lwkt_reltoken(&ilock); 652 return (TRUE); 653 } 654 crit_exit(); 655 lwkt_gettoken(&ilock, &depot->token); 656 if ((ndel = 657 maglist_purge(oc, &depot->fullmagazines, FALSE)) > 0) { 658 depot->unallocated_objects += ndel; 659 if (depot->unallocated_objects && depot->waiting) 660 wakeup(depot); 661 lwkt_reltoken(&ilock); 662 return (TRUE); 663 } 664 lwkt_reltoken(&ilock); 665 } 666 return (FALSE); 667 } 668 669 /* 670 * Destroy an object cache. Must have no existing references. 671 * XXX Not clear this is a useful API function. 672 */ 673 void 674 objcache_destroy(struct objcache *oc) 675 { 676 struct percpu_objcache *cache_percpu; 677 int clusterid, cpuid; 678 679 /* XXX need depot token? */ 680 for (clusterid = 0; clusterid < MAXCLUSTERS; clusterid++) 681 depot_purge(&oc->depot[clusterid], oc); 682 683 for (cpuid = 0; cpuid < ncpus; cpuid++) { 684 cache_percpu = &oc->cache_percpu[cpuid]; 685 686 mag_purge(oc, cache_percpu->loaded_magazine); 687 free(cache_percpu->loaded_magazine, M_OBJMAG); 688 689 mag_purge(oc, cache_percpu->previous_magazine); 690 free(cache_percpu->previous_magazine, M_OBJMAG); 691 } 692 693 free(oc->name, M_TEMP); 694 free(oc, M_OBJCACHE); 695 } 696 697 #if 0 698 /* 699 * Populate the per-cluster depot with elements from a linear block 700 * of memory. Must be called for individually for each cluster. 701 * Populated depots should not be destroyed. 702 */ 703 void 704 objcache_populate_linear(struct objcache *oc, void *base, int nelts, int size) 705 { 706 char *p = base; 707 char *end = (char *)base + (nelts * size); 708 struct magazinedepot *depot = &oc->depot[myclusterid]; 709 lwkt_tokref ilock; 710 struct magazine sentinelfullmag = { 0, 0 }; 711 struct magazine *emptymag = &sentinelfullmag; 712 713 lwkt_gettoken(&ilock, &depot->token); 714 while (p < end) { 715 if (MAGAZINE_FULL(emptymag)) { 716 emptymag = mag_alloc(depot->magcapacity); 717 SLIST_INSERT_HEAD(&depot->fullmagazines, emptymag, 718 nextmagazine); 719 } 720 emptymag->objects[emptymag->rounds++] = p; 721 p += size; 722 } 723 depot->unallocated_objects += nelts; 724 if (depot->unallocated_objects && depot->waiting) 725 wakeup(depot); 726 lwkt_reltoken(&ilock); 727 } 728 #endif 729 730 #if 0 731 /* 732 * Check depot contention once a minute. 733 * 2 contested locks per second allowed. 734 */ 735 static int objcache_rebalance_period; 736 static const int objcache_contention_rate = 120; 737 static struct callout objcache_callout; 738 739 #define MAXMAGSIZE 512 740 741 /* 742 * Check depot contention and increase magazine size if necessary. 743 */ 744 static void 745 objcache_timer(void *dummy) 746 { 747 struct objcache *oc; 748 struct magazinedepot *depot; 749 lwkt_tokref olock, dlock; 750 751 lwkt_gettoken(&olock, &objcachelist_token); 752 SLIST_FOREACH(oc, &allobjcaches, oc_next) { 753 depot = &oc->depot[myclusterid]; 754 if (depot->magcapacity < MAXMAGSIZE) { 755 if (depot->contested > objcache_contention_rate) { 756 lwkt_gettoken(&dlock, &depot->token); 757 depot_purge(depot, oc); 758 depot->magcapacity *= 2; 759 lwkt_reltoken(&dlock); 760 printf("objcache_timer: increasing cache %s" 761 " magsize to %d, contested %d times\n", 762 oc->name, depot->magcapacity, 763 depot->contested); 764 } 765 depot->contested = 0; 766 } 767 } 768 lwkt_reltoken(&olock); 769 770 callout_reset(&objcache_callout, objcache_rebalance_period, 771 objcache_timer, NULL); 772 } 773 774 #endif 775 776 static void 777 objcache_init(void) 778 { 779 lwkt_token_init(&objcachelist_token); 780 #if 0 781 callout_init(&objcache_callout); 782 objcache_rebalance_period = 60 * hz; 783 callout_reset(&objcache_callout, objcache_rebalance_period, 784 objcache_timer, NULL); 785 #endif 786 } 787 SYSINIT(objcache, SI_SUB_CPU, SI_ORDER_ANY, objcache_init, 0); 788