1 /** 2 * This module contains all functions related to an object's lifetime: 3 * allocation, resizing, deallocation, and finalization. 4 * 5 * Copyright: Copyright Digital Mars 2000 - 2012. 6 * License: Distributed under the 7 * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0). 8 * (See accompanying file LICENSE) 9 * Authors: Walter Bright, Sean Kelly, Steven Schveighoffer 10 * Source: $(DRUNTIMESRC src/rt/_lifetime.d) 11 */ 12 13 module rt.lifetime; 14 15 import core.stdc.stdlib; 16 import core.stdc.string; 17 import core.stdc.stdarg; 18 import core.bitop; 19 import core.memory; 20 debug(PRINTF) import core.stdc.stdio; 21 static import rt.tlsgc; 22 23 alias BlkInfo = GC.BlkInfo; 24 alias BlkAttr = GC.BlkAttr; 25 import core.exception : onOutOfMemoryError, onFinalizeError, onInvalidMemoryOperationError; 26 27 private 28 { 29 alias bool function(Object) CollectHandler; 30 __gshared CollectHandler collectHandler = null; 31 32 extern (C) void _d_monitordelete(Object h, bool det); 33 34 enum : size_t 35 { 36 PAGESIZE = 4096, 37 BIGLENGTHMASK = ~(PAGESIZE - 1), 38 SMALLPAD = 1, 39 MEDPAD = ushort.sizeof, 40 LARGEPREFIX = 16, // 16 bytes padding at the front of the array 41 LARGEPAD = LARGEPREFIX + 1, 42 MAXSMALLSIZE = 256-SMALLPAD, 43 MAXMEDSIZE = (PAGESIZE / 2) - MEDPAD 44 } 45 } 46 47 extern (C) void lifetime_init() 48 { 49 // this is run before static ctors, so it is safe to modify immutables 50 } 51 52 /** 53 * 54 */ 55 extern (C) void* _d_allocmemory(size_t sz) 56 { 57 return GC.malloc(sz); 58 } 59 60 /** 61 * 62 */ 63 extern (C) Object _d_newclass(const ClassInfo ci) 64 { 65 void* p; 66 67 debug(PRINTF) printf("_d_newclass(ci = %p, %s)\n", ci, cast(char *)ci.name); 68 if (ci.m_flags & TypeInfo_Class.ClassFlags.isCOMclass) 69 { /* COM objects are not garbage collected, they are reference counted 70 * using AddRef() and Release(). They get free'd by C's free() 71 * function called by Release() when Release()'s reference count goes 72 * to zero. 73 */ 74 p = malloc(ci.initializer.length); 75 if (!p) 76 onOutOfMemoryError(); 77 } 78 else 79 { 80 // TODO: should this be + 1 to avoid having pointers to the next block? 81 BlkAttr attr = BlkAttr.NONE; 82 // extern(C++) classes don't have a classinfo pointer in their vtable so the GC can't finalize them 83 if (ci.m_flags & TypeInfo_Class.ClassFlags.hasDtor 84 && !(ci.m_flags & TypeInfo_Class.ClassFlags.isCPPclass)) 85 attr |= BlkAttr.FINALIZE; 86 if (ci.m_flags & TypeInfo_Class.ClassFlags.noPointers) 87 attr |= BlkAttr.NO_SCAN; 88 p = GC.malloc(ci.initializer.length, attr, ci); 89 debug(PRINTF) printf(" p = %p\n", p); 90 } 91 92 debug(PRINTF) 93 { 94 printf("p = %p\n", p); 95 printf("ci = %p, ci.init.ptr = %p, len = %llu\n", ci, ci.initializer.ptr, cast(ulong)ci.initializer.length); 96 printf("vptr = %p\n", *cast(void**) ci.initializer); 97 printf("vtbl[0] = %p\n", (*cast(void***) ci.initializer)[0]); 98 printf("vtbl[1] = %p\n", (*cast(void***) ci.initializer)[1]); 99 printf("init[0] = %x\n", (cast(uint*) ci.initializer)[0]); 100 printf("init[1] = %x\n", (cast(uint*) ci.initializer)[1]); 101 printf("init[2] = %x\n", (cast(uint*) ci.initializer)[2]); 102 printf("init[3] = %x\n", (cast(uint*) ci.initializer)[3]); 103 printf("init[4] = %x\n", (cast(uint*) ci.initializer)[4]); 104 } 105 106 // initialize it 107 p[0 .. ci.initializer.length] = ci.initializer[]; 108 109 debug(PRINTF) printf("initialization done\n"); 110 return cast(Object) p; 111 } 112 113 114 /** 115 * 116 */ 117 extern (C) void _d_delinterface(void** p) 118 { 119 if (*p) 120 { 121 Interface* pi = **cast(Interface ***)*p; 122 Object o = cast(Object)(*p - pi.offset); 123 124 _d_delclass(&o); 125 *p = null; 126 } 127 } 128 129 130 // used for deletion 131 private extern (D) alias void function (Object) fp_t; 132 133 134 /** 135 * 136 */ 137 extern (C) void _d_delclass(Object* p) 138 { 139 if (*p) 140 { 141 debug(PRINTF) printf("_d_delclass(%p)\n", *p); 142 143 ClassInfo **pc = cast(ClassInfo **)*p; 144 if (*pc) 145 { 146 ClassInfo c = **pc; 147 148 rt_finalize(cast(void*) *p); 149 150 if (c.deallocator) 151 { 152 fp_t fp = cast(fp_t)c.deallocator; 153 (*fp)(*p); // call deallocator 154 *p = null; 155 return; 156 } 157 } 158 else 159 { 160 rt_finalize(cast(void*) *p); 161 } 162 GC.free(cast(void*) *p); 163 *p = null; 164 } 165 } 166 167 /** 168 * This is called for a delete statement where the value 169 * being deleted is a pointer to a struct with a destructor 170 * but doesn't have an overloaded delete operator. 171 */ 172 extern (C) void _d_delstruct(void** p, TypeInfo_Struct inf) 173 { 174 if (*p) 175 { 176 debug(PRINTF) printf("_d_delstruct(%p, %p)\n", *p, cast(void*)inf); 177 178 inf.destroy(*p); 179 GC.free(*p); 180 *p = null; 181 } 182 } 183 184 // strip const/immutable/shared/inout from type info 185 inout(TypeInfo) unqualify(inout(TypeInfo) cti) pure nothrow @nogc 186 { 187 TypeInfo ti = cast() cti; 188 while (ti) 189 { 190 // avoid dynamic type casts 191 auto tti = typeid(ti); 192 if (tti is typeid(TypeInfo_Const)) 193 ti = (cast(TypeInfo_Const)cast(void*)ti).base; 194 else if (tti is typeid(TypeInfo_Invariant)) 195 ti = (cast(TypeInfo_Invariant)cast(void*)ti).base; 196 else if (tti is typeid(TypeInfo_Shared)) 197 ti = (cast(TypeInfo_Shared)cast(void*)ti).base; 198 else if (tti is typeid(TypeInfo_Inout)) 199 ti = (cast(TypeInfo_Inout)cast(void*)ti).base; 200 else 201 break; 202 } 203 return ti; 204 } 205 206 // size used to store the TypeInfo at the end of an allocation for structs that have a destructor 207 size_t structTypeInfoSize(const TypeInfo ti) pure nothrow @nogc 208 { 209 if (ti && typeid(ti) is typeid(TypeInfo_Struct)) // avoid a complete dynamic type cast 210 { 211 auto sti = cast(TypeInfo_Struct)cast(void*)ti; 212 if (sti.xdtor) 213 return size_t.sizeof; 214 } 215 return 0; 216 } 217 218 /** dummy class used to lock for shared array appending */ 219 private class ArrayAllocLengthLock 220 {} 221 222 223 /** 224 Set the allocated length of the array block. This is called 225 any time an array is appended to or its length is set. 226 227 The allocated block looks like this for blocks < PAGESIZE: 228 229 |elem0|elem1|elem2|...|elemN-1|emptyspace|N*elemsize| 230 231 232 The size of the allocated length at the end depends on the block size: 233 234 a block of 16 to 256 bytes has an 8-bit length. 235 236 a block with 512 to pagesize/2 bytes has a 16-bit length. 237 238 For blocks >= pagesize, the length is a size_t and is at the beginning of the 239 block. The reason we have to do this is because the block can extend into 240 more pages, so we cannot trust the block length if it sits at the end of the 241 block, because it might have just been extended. If we can prove in the 242 future that the block is unshared, we may be able to change this, but I'm not 243 sure it's important. 244 245 In order to do put the length at the front, we have to provide 16 bytes 246 buffer space in case the block has to be aligned properly. In x86, certain 247 SSE instructions will only work if the data is 16-byte aligned. In addition, 248 we need the sentinel byte to prevent accidental pointers to the next block. 249 Because of the extra overhead, we only do this for page size and above, where 250 the overhead is minimal compared to the block size. 251 252 So for those blocks, it looks like: 253 254 |N*elemsize|padding|elem0|elem1|...|elemN-1|emptyspace|sentinelbyte| 255 256 where elem0 starts 16 bytes after the first byte. 257 */ 258 bool __setArrayAllocLength(ref BlkInfo info, size_t newlength, bool isshared, const TypeInfo tinext, size_t oldlength = ~0) pure nothrow 259 { 260 import core.atomic; 261 262 size_t typeInfoSize = structTypeInfoSize(tinext); 263 264 if (info.size <= 256) 265 { 266 import core.checkedint; 267 268 bool overflow; 269 auto newlength_padded = addu(newlength, 270 addu(SMALLPAD, typeInfoSize, overflow), 271 overflow); 272 273 if (newlength_padded > info.size || overflow) 274 // new size does not fit inside block 275 return false; 276 277 auto length = cast(ubyte *)(info.base + info.size - typeInfoSize - SMALLPAD); 278 if (oldlength != ~0) 279 { 280 if (isshared) 281 { 282 return cas(cast(shared)length, cast(ubyte)oldlength, cast(ubyte)newlength); 283 } 284 else 285 { 286 if (*length == cast(ubyte)oldlength) 287 *length = cast(ubyte)newlength; 288 else 289 return false; 290 } 291 } 292 else 293 { 294 // setting the initial length, no cas needed 295 *length = cast(ubyte)newlength; 296 } 297 if (typeInfoSize) 298 { 299 auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof); 300 *typeInfo = cast() tinext; 301 } 302 } 303 else if (info.size < PAGESIZE) 304 { 305 if (newlength + MEDPAD + typeInfoSize > info.size) 306 // new size does not fit inside block 307 return false; 308 auto length = cast(ushort *)(info.base + info.size - typeInfoSize - MEDPAD); 309 if (oldlength != ~0) 310 { 311 if (isshared) 312 { 313 return cas(cast(shared)length, cast(ushort)oldlength, cast(ushort)newlength); 314 } 315 else 316 { 317 if (*length == oldlength) 318 *length = cast(ushort)newlength; 319 else 320 return false; 321 } 322 } 323 else 324 { 325 // setting the initial length, no cas needed 326 *length = cast(ushort)newlength; 327 } 328 if (typeInfoSize) 329 { 330 auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof); 331 *typeInfo = cast() tinext; 332 } 333 } 334 else 335 { 336 if (newlength + LARGEPAD > info.size) 337 // new size does not fit inside block 338 return false; 339 auto length = cast(size_t *)(info.base); 340 if (oldlength != ~0) 341 { 342 if (isshared) 343 { 344 return cas(cast(shared)length, cast(size_t)oldlength, cast(size_t)newlength); 345 } 346 else 347 { 348 if (*length == oldlength) 349 *length = newlength; 350 else 351 return false; 352 } 353 } 354 else 355 { 356 // setting the initial length, no cas needed 357 *length = newlength; 358 } 359 if (typeInfoSize) 360 { 361 auto typeInfo = cast(TypeInfo*)(info.base + size_t.sizeof); 362 *typeInfo = cast()tinext; 363 } 364 } 365 return true; // resize succeeded 366 } 367 368 /** 369 get the allocation size of the array for the given block (without padding or type info) 370 */ 371 size_t __arrayAllocLength(ref BlkInfo info, const TypeInfo tinext) pure nothrow 372 { 373 if (info.size <= 256) 374 return *cast(ubyte *)(info.base + info.size - structTypeInfoSize(tinext) - SMALLPAD); 375 376 if (info.size < PAGESIZE) 377 return *cast(ushort *)(info.base + info.size - structTypeInfoSize(tinext) - MEDPAD); 378 379 return *cast(size_t *)(info.base); 380 } 381 382 /** 383 get the start of the array for the given block 384 */ 385 void *__arrayStart(BlkInfo info) nothrow pure 386 { 387 return info.base + ((info.size & BIGLENGTHMASK) ? LARGEPREFIX : 0); 388 } 389 390 /** 391 get the padding required to allocate size bytes. Note that the padding is 392 NOT included in the passed in size. Therefore, do NOT call this function 393 with the size of an allocated block. 394 */ 395 size_t __arrayPad(size_t size, const TypeInfo tinext) nothrow pure @trusted 396 { 397 return size > MAXMEDSIZE ? LARGEPAD : ((size > MAXSMALLSIZE ? MEDPAD : SMALLPAD) + structTypeInfoSize(tinext)); 398 } 399 400 /** 401 allocate an array memory block by applying the proper padding and 402 assigning block attributes if not inherited from the existing block 403 */ 404 BlkInfo __arrayAlloc(size_t arrsize, const TypeInfo ti, const TypeInfo tinext) nothrow pure 405 { 406 import core.checkedint; 407 408 size_t typeInfoSize = structTypeInfoSize(tinext); 409 size_t padsize = arrsize > MAXMEDSIZE ? LARGEPAD : ((arrsize > MAXSMALLSIZE ? MEDPAD : SMALLPAD) + typeInfoSize); 410 411 bool overflow; 412 auto padded_size = addu(arrsize, padsize, overflow); 413 414 if (overflow) 415 return BlkInfo(); 416 417 uint attr = (!(tinext.flags & 1) ? BlkAttr.NO_SCAN : 0) | BlkAttr.APPENDABLE; 418 if (typeInfoSize) 419 attr |= BlkAttr.STRUCTFINAL | BlkAttr.FINALIZE; 420 return GC.qalloc(padded_size, attr, ti); 421 } 422 423 BlkInfo __arrayAlloc(size_t arrsize, ref BlkInfo info, const TypeInfo ti, const TypeInfo tinext) 424 { 425 import core.checkedint; 426 427 if (!info.base) 428 return __arrayAlloc(arrsize, ti, tinext); 429 430 bool overflow; 431 auto padded_size = addu(arrsize, __arrayPad(arrsize, tinext), overflow); 432 if (overflow) 433 { 434 return BlkInfo(); 435 } 436 437 return GC.qalloc(padded_size, info.attr, ti); 438 } 439 440 /** 441 cache for the lookup of the block info 442 */ 443 enum N_CACHE_BLOCKS=8; 444 445 // note this is TLS, so no need to sync. 446 BlkInfo *__blkcache_storage; 447 448 static if (N_CACHE_BLOCKS==1) 449 { 450 version=single_cache; 451 } 452 else 453 { 454 //version=simple_cache; // uncomment to test simple cache strategy 455 //version=random_cache; // uncomment to test random cache strategy 456 457 // ensure N_CACHE_BLOCKS is power of 2. 458 static assert(!((N_CACHE_BLOCKS - 1) & N_CACHE_BLOCKS)); 459 460 version (random_cache) 461 { 462 int __nextRndNum = 0; 463 } 464 int __nextBlkIdx; 465 } 466 467 @property BlkInfo *__blkcache() nothrow 468 { 469 if (!__blkcache_storage) 470 { 471 // allocate the block cache for the first time 472 immutable size = BlkInfo.sizeof * N_CACHE_BLOCKS; 473 __blkcache_storage = cast(BlkInfo *)malloc(size); 474 memset(__blkcache_storage, 0, size); 475 } 476 return __blkcache_storage; 477 } 478 479 // called when thread is exiting. 480 static ~this() 481 { 482 // free the blkcache 483 if (__blkcache_storage) 484 { 485 free(__blkcache_storage); 486 __blkcache_storage = null; 487 } 488 } 489 490 491 // we expect this to be called with the lock in place 492 void processGCMarks(BlkInfo* cache, scope rt.tlsgc.IsMarkedDg isMarked) nothrow 493 { 494 // called after the mark routine to eliminate block cache data when it 495 // might be ready to sweep 496 497 debug(PRINTF) printf("processing GC Marks, %x\n", cache); 498 if (cache) 499 { 500 debug(PRINTF) foreach (i; 0 .. N_CACHE_BLOCKS) 501 { 502 printf("cache entry %d has base ptr %x\tsize %d\tflags %x\n", i, cache[i].base, cache[i].size, cache[i].attr); 503 } 504 auto cache_end = cache + N_CACHE_BLOCKS; 505 for (;cache < cache_end; ++cache) 506 { 507 if (cache.base != null && !isMarked(cache.base)) 508 { 509 debug(PRINTF) printf("clearing cache entry at %x\n", cache.base); 510 cache.base = null; // clear that data. 511 } 512 } 513 } 514 } 515 516 unittest 517 { 518 // Bugzilla 10701 - segfault in GC 519 ubyte[] result; result.length = 4096; 520 GC.free(result.ptr); 521 GC.collect(); 522 } 523 524 /** 525 Get the cached block info of an interior pointer. Returns null if the 526 interior pointer's block is not cached. 527 528 NOTE: The base ptr in this struct can be cleared asynchronously by the GC, 529 so any use of the returned BlkInfo should copy it and then check the 530 base ptr of the copy before actually using it. 531 532 TODO: Change this function so the caller doesn't have to be aware of this 533 issue. Either return by value and expect the caller to always check 534 the base ptr as an indication of whether the struct is valid, or set 535 the BlkInfo as a side-effect and return a bool to indicate success. 536 */ 537 BlkInfo *__getBlkInfo(void *interior) nothrow 538 { 539 BlkInfo *ptr = __blkcache; 540 version (single_cache) 541 { 542 if (ptr.base && ptr.base <= interior && (interior - ptr.base) < ptr.size) 543 return ptr; 544 return null; // not in cache. 545 } 546 else version (simple_cache) 547 { 548 foreach (i; 0..N_CACHE_BLOCKS) 549 { 550 if (ptr.base && ptr.base <= interior && (interior - ptr.base) < ptr.size) 551 return ptr; 552 ptr++; 553 } 554 } 555 else 556 { 557 // try to do a smart lookup, using __nextBlkIdx as the "head" 558 auto curi = ptr + __nextBlkIdx; 559 for (auto i = curi; i >= ptr; --i) 560 { 561 if (i.base && i.base <= interior && cast(size_t)(interior - i.base) < i.size) 562 return i; 563 } 564 565 for (auto i = ptr + N_CACHE_BLOCKS - 1; i > curi; --i) 566 { 567 if (i.base && i.base <= interior && cast(size_t)(interior - i.base) < i.size) 568 return i; 569 } 570 } 571 return null; // not in cache. 572 } 573 574 void __insertBlkInfoCache(BlkInfo bi, BlkInfo *curpos) nothrow 575 { 576 version (single_cache) 577 { 578 *__blkcache = bi; 579 } 580 else 581 { 582 version (simple_cache) 583 { 584 if (curpos) 585 *curpos = bi; 586 else 587 { 588 // note, this is a super-simple algorithm that does not care about 589 // most recently used. It simply uses a round-robin technique to 590 // cache block info. This means that the ordering of the cache 591 // doesn't mean anything. Certain patterns of allocation may 592 // render the cache near-useless. 593 __blkcache[__nextBlkIdx] = bi; 594 __nextBlkIdx = (__nextBlkIdx+1) & (N_CACHE_BLOCKS - 1); 595 } 596 } 597 else version (random_cache) 598 { 599 // strategy: if the block currently is in the cache, move the 600 // current block index to the a random element and evict that 601 // element. 602 auto cache = __blkcache; 603 if (!curpos) 604 { 605 __nextBlkIdx = (__nextRndNum = 1664525 * __nextRndNum + 1013904223) & (N_CACHE_BLOCKS - 1); 606 curpos = cache + __nextBlkIdx; 607 } 608 else 609 { 610 __nextBlkIdx = curpos - cache; 611 } 612 *curpos = bi; 613 } 614 else 615 { 616 // 617 // strategy: If the block currently is in the cache, swap it with 618 // the head element. Otherwise, move the head element up by one, 619 // and insert it there. 620 // 621 auto cache = __blkcache; 622 if (!curpos) 623 { 624 __nextBlkIdx = (__nextBlkIdx+1) & (N_CACHE_BLOCKS - 1); 625 curpos = cache + __nextBlkIdx; 626 } 627 else if (curpos !is cache + __nextBlkIdx) 628 { 629 *curpos = cache[__nextBlkIdx]; 630 curpos = cache + __nextBlkIdx; 631 } 632 *curpos = bi; 633 } 634 } 635 } 636 637 /** 638 * Shrink the "allocated" length of an array to be the exact size of the array. 639 * It doesn't matter what the current allocated length of the array is, the 640 * user is telling the runtime that he knows what he is doing. 641 */ 642 extern(C) void _d_arrayshrinkfit(const TypeInfo ti, void[] arr) /+nothrow+/ 643 { 644 // note, we do not care about shared. We are setting the length no matter 645 // what, so no lock is required. 646 debug(PRINTF) printf("_d_arrayshrinkfit, elemsize = %d, arr.ptr = x%x arr.length = %d\n", ti.next.tsize, arr.ptr, arr.length); 647 auto tinext = unqualify(ti.next); 648 auto size = tinext.tsize; // array element size 649 auto cursize = arr.length * size; 650 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 651 auto bic = isshared ? null : __getBlkInfo(arr.ptr); 652 auto info = bic ? *bic : GC.query(arr.ptr); 653 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 654 { 655 auto newsize = (arr.ptr - __arrayStart(info)) + cursize; 656 657 debug(PRINTF) printf("setting allocated size to %d\n", (arr.ptr - info.base) + cursize); 658 659 // destroy structs that become unused memory when array size is shrinked 660 if (typeid(tinext) is typeid(TypeInfo_Struct)) // avoid a complete dynamic type cast 661 { 662 auto sti = cast(TypeInfo_Struct)cast(void*)tinext; 663 if (sti.xdtor) 664 { 665 auto oldsize = __arrayAllocLength(info, tinext); 666 if (oldsize > cursize) 667 finalize_array(arr.ptr + cursize, oldsize - cursize, sti); 668 } 669 } 670 // Note: Since we "assume" the append is safe, it means it is not shared. 671 // Since it is not shared, we also know it won't throw (no lock). 672 if (!__setArrayAllocLength(info, newsize, false, tinext)) 673 onInvalidMemoryOperationError(); 674 675 // cache the block if not already done. 676 if (!isshared && !bic) 677 __insertBlkInfoCache(info, null); 678 } 679 } 680 681 package bool hasPostblit(in TypeInfo ti) 682 { 683 return (&ti.postblit).funcptr !is &TypeInfo.postblit; 684 } 685 686 void __doPostblit(void *ptr, size_t len, const TypeInfo ti) 687 { 688 if (!hasPostblit(ti)) 689 return; 690 691 if (auto tis = cast(TypeInfo_Struct)ti) 692 { 693 // this is a struct, check the xpostblit member 694 auto pblit = tis.xpostblit; 695 if (!pblit) 696 // postblit not specified, no point in looping. 697 return; 698 699 // optimized for struct, call xpostblit directly for each element 700 immutable size = ti.tsize; 701 const eptr = ptr + len; 702 for (;ptr < eptr;ptr += size) 703 pblit(ptr); 704 } 705 else 706 { 707 // generic case, call the typeinfo's postblit function 708 immutable size = ti.tsize; 709 const eptr = ptr + len; 710 for (;ptr < eptr;ptr += size) 711 ti.postblit(ptr); 712 } 713 } 714 715 716 /** 717 * set the array capacity. If the array capacity isn't currently large enough 718 * to hold the requested capacity (in number of elements), then the array is 719 * resized/reallocated to the appropriate size. Pass in a requested capacity 720 * of 0 to get the current capacity. Returns the number of elements that can 721 * actually be stored once the resizing is done. 722 */ 723 extern(C) size_t _d_arraysetcapacity(const TypeInfo ti, size_t newcapacity, void[]* p) 724 in 725 { 726 assert(ti); 727 assert(!(*p).length || (*p).ptr); 728 } 729 body 730 { 731 // step 1, get the block 732 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 733 auto bic = isshared ? null : __getBlkInfo((*p).ptr); 734 auto info = bic ? *bic : GC.query((*p).ptr); 735 auto tinext = unqualify(ti.next); 736 auto size = tinext.tsize; 737 version (D_InlineAsm_X86) 738 { 739 size_t reqsize = void; 740 741 asm 742 { 743 mov EAX, newcapacity; 744 mul EAX, size; 745 mov reqsize, EAX; 746 jnc Lcontinue; 747 } 748 } 749 else version (D_InlineAsm_X86_64) 750 { 751 size_t reqsize = void; 752 753 asm 754 { 755 mov RAX, newcapacity; 756 mul RAX, size; 757 mov reqsize, RAX; 758 jnc Lcontinue; 759 } 760 } 761 else 762 { 763 import core.checkedint : mulu; 764 765 bool overflow = false; 766 size_t reqsize = mulu(size, newcapacity, overflow); 767 if (!overflow) 768 goto Lcontinue; 769 } 770 Loverflow: 771 onOutOfMemoryError(); 772 assert(0); 773 Lcontinue: 774 775 // step 2, get the actual "allocated" size. If the allocated size does not 776 // match what we expect, then we will need to reallocate anyways. 777 778 // TODO: this probably isn't correct for shared arrays 779 size_t curallocsize = void; 780 size_t curcapacity = void; 781 size_t offset = void; 782 size_t arraypad = void; 783 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 784 { 785 if (info.size <= 256) 786 { 787 arraypad = SMALLPAD + structTypeInfoSize(tinext); 788 curallocsize = *(cast(ubyte *)(info.base + info.size - arraypad)); 789 } 790 else if (info.size < PAGESIZE) 791 { 792 arraypad = MEDPAD + structTypeInfoSize(tinext); 793 curallocsize = *(cast(ushort *)(info.base + info.size - arraypad)); 794 } 795 else 796 { 797 curallocsize = *(cast(size_t *)(info.base)); 798 arraypad = LARGEPAD; 799 } 800 801 802 offset = (*p).ptr - __arrayStart(info); 803 if (offset + (*p).length * size != curallocsize) 804 { 805 curcapacity = 0; 806 } 807 else 808 { 809 // figure out the current capacity of the block from the point 810 // of view of the array. 811 curcapacity = info.size - offset - arraypad; 812 } 813 } 814 else 815 { 816 curallocsize = curcapacity = offset = 0; 817 } 818 debug(PRINTF) printf("_d_arraysetcapacity, p = x%d,%d, newcapacity=%d, info.size=%d, reqsize=%d, curallocsize=%d, curcapacity=%d, offset=%d\n", (*p).ptr, (*p).length, newcapacity, info.size, reqsize, curallocsize, curcapacity, offset); 819 820 if (curcapacity >= reqsize) 821 { 822 // no problems, the current allocated size is large enough. 823 return curcapacity / size; 824 } 825 826 // step 3, try to extend the array in place. 827 if (info.size >= PAGESIZE && curcapacity != 0) 828 { 829 auto extendsize = reqsize + offset + LARGEPAD - info.size; 830 auto u = GC.extend(info.base, extendsize, extendsize); 831 if (u) 832 { 833 // extend worked, save the new current allocated size 834 if (bic) 835 bic.size = u; // update cache 836 curcapacity = u - offset - LARGEPAD; 837 return curcapacity / size; 838 } 839 } 840 841 // step 4, if extending doesn't work, allocate a new array with at least the requested allocated size. 842 auto datasize = (*p).length * size; 843 // copy attributes from original block, or from the typeinfo if the 844 // original block doesn't exist. 845 info = __arrayAlloc(reqsize, info, ti, tinext); 846 if (info.base is null) 847 goto Loverflow; 848 // copy the data over. 849 // note that malloc will have initialized the data we did not request to 0. 850 auto tgt = __arrayStart(info); 851 memcpy(tgt, (*p).ptr, datasize); 852 853 // handle postblit 854 __doPostblit(tgt, datasize, tinext); 855 856 if (!(info.attr & BlkAttr.NO_SCAN)) 857 { 858 // need to memset the newly requested data, except for the data that 859 // malloc returned that we didn't request. 860 void *endptr = tgt + reqsize; 861 void *begptr = tgt + datasize; 862 863 // sanity check 864 assert(endptr >= begptr); 865 memset(begptr, 0, endptr - begptr); 866 } 867 868 // set up the correct length 869 __setArrayAllocLength(info, datasize, isshared, tinext); 870 if (!isshared) 871 __insertBlkInfoCache(info, bic); 872 873 *p = (cast(void*)tgt)[0 .. (*p).length]; 874 875 // determine the padding. This has to be done manually because __arrayPad 876 // assumes you are not counting the pad size, and info.size does include 877 // the pad. 878 if (info.size <= 256) 879 arraypad = SMALLPAD + structTypeInfoSize(tinext); 880 else if (info.size < PAGESIZE) 881 arraypad = MEDPAD + structTypeInfoSize(tinext); 882 else 883 arraypad = LARGEPAD; 884 885 curcapacity = info.size - arraypad; 886 return curcapacity / size; 887 } 888 889 /** 890 * Allocate a new uninitialized array of length elements. 891 * ti is the type of the resulting array, or pointer to element. 892 */ 893 extern (C) void[] _d_newarrayU(const TypeInfo ti, size_t length) pure nothrow 894 { 895 auto tinext = unqualify(ti.next); 896 auto size = tinext.tsize; 897 898 debug(PRINTF) printf("_d_newarrayU(length = x%x, size = %d)\n", length, size); 899 if (length == 0 || size == 0) 900 return null; 901 902 version (D_InlineAsm_X86) 903 { 904 asm pure nothrow @nogc 905 { 906 mov EAX,size ; 907 mul EAX,length ; 908 mov size,EAX ; 909 jnc Lcontinue ; 910 } 911 } 912 else version (D_InlineAsm_X86_64) 913 { 914 asm pure nothrow @nogc 915 { 916 mov RAX,size ; 917 mul RAX,length ; 918 mov size,RAX ; 919 jnc Lcontinue ; 920 } 921 } 922 else 923 { 924 import core.checkedint : mulu; 925 926 bool overflow = false; 927 size = mulu(size, length, overflow); 928 if (!overflow) 929 goto Lcontinue; 930 } 931 Loverflow: 932 onOutOfMemoryError(); 933 assert(0); 934 Lcontinue: 935 936 auto info = __arrayAlloc(size, ti, tinext); 937 if (!info.base) 938 goto Loverflow; 939 debug(PRINTF) printf(" p = %p\n", info.base); 940 // update the length of the array 941 auto arrstart = __arrayStart(info); 942 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 943 __setArrayAllocLength(info, size, isshared, tinext); 944 return arrstart[0..length]; 945 } 946 947 /** 948 * Allocate a new array of length elements. 949 * ti is the type of the resulting array, or pointer to element. 950 * (For when the array is initialized to 0) 951 */ 952 extern (C) void[] _d_newarrayT(const TypeInfo ti, size_t length) pure nothrow 953 { 954 void[] result = _d_newarrayU(ti, length); 955 auto tinext = unqualify(ti.next); 956 auto size = tinext.tsize; 957 958 memset(result.ptr, 0, size * length); 959 return result; 960 } 961 962 /** 963 * For when the array has a non-zero initializer. 964 */ 965 extern (C) void[] _d_newarrayiT(const TypeInfo ti, size_t length) pure nothrow 966 { 967 import core.internal.traits : AliasSeq; 968 969 void[] result = _d_newarrayU(ti, length); 970 auto tinext = unqualify(ti.next); 971 auto size = tinext.tsize; 972 973 auto init = tinext.initializer(); 974 975 switch (init.length) 976 { 977 foreach (T; AliasSeq!(ubyte, ushort, uint, ulong)) 978 { 979 case T.sizeof: 980 (cast(T*)result.ptr)[0 .. size * length / T.sizeof] = *cast(T*)init.ptr; 981 return result; 982 } 983 984 default: 985 { 986 immutable sz = init.length; 987 for (size_t u = 0; u < size * length; u += sz) 988 memcpy(result.ptr + u, init.ptr, sz); 989 return result; 990 } 991 } 992 } 993 994 995 /** 996 * 997 */ 998 void[] _d_newarrayOpT(alias op)(const TypeInfo ti, size_t[] dims) 999 { 1000 debug(PRINTF) printf("_d_newarrayOpT(ndims = %d)\n", dims.length); 1001 if (dims.length == 0) 1002 return null; 1003 1004 void[] foo(const TypeInfo ti, size_t[] dims) 1005 { 1006 auto tinext = unqualify(ti.next); 1007 auto dim = dims[0]; 1008 1009 debug(PRINTF) printf("foo(ti = %p, ti.next = %p, dim = %d, ndims = %d\n", ti, ti.next, dim, dims.length); 1010 if (dims.length == 1) 1011 { 1012 auto r = op(ti, dim); 1013 return *cast(void[]*)(&r); 1014 } 1015 1016 auto allocsize = (void[]).sizeof * dim; 1017 auto info = __arrayAlloc(allocsize, ti, tinext); 1018 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 1019 __setArrayAllocLength(info, allocsize, isshared, tinext); 1020 auto p = __arrayStart(info)[0 .. dim]; 1021 1022 foreach (i; 0..dim) 1023 { 1024 (cast(void[]*)p.ptr)[i] = foo(tinext, dims[1..$]); 1025 } 1026 return p; 1027 } 1028 1029 auto result = foo(ti, dims); 1030 debug(PRINTF) printf("result = %llx\n", result.ptr); 1031 1032 return result; 1033 } 1034 1035 1036 /** 1037 * 1038 */ 1039 extern (C) void[] _d_newarraymTX(const TypeInfo ti, size_t[] dims) 1040 { 1041 debug(PRINTF) printf("_d_newarraymT(dims.length = %d)\n", dims.length); 1042 1043 if (dims.length == 0) 1044 return null; 1045 else 1046 { 1047 return _d_newarrayOpT!(_d_newarrayT)(ti, dims); 1048 } 1049 } 1050 1051 1052 /** 1053 * 1054 */ 1055 extern (C) void[] _d_newarraymiTX(const TypeInfo ti, size_t[] dims) 1056 { 1057 debug(PRINTF) printf("_d_newarraymiT(dims.length = %d)\n", dims.length); 1058 1059 if (dims.length == 0) 1060 return null; 1061 else 1062 { 1063 return _d_newarrayOpT!(_d_newarrayiT)(ti, dims); 1064 } 1065 } 1066 1067 /** 1068 * Allocate an uninitialized non-array item. 1069 * This is an optimization to avoid things needed for arrays like the __arrayPad(size). 1070 */ 1071 extern (C) void* _d_newitemU(in TypeInfo _ti) 1072 { 1073 auto ti = unqualify(_ti); 1074 auto flags = !(ti.flags & 1) ? BlkAttr.NO_SCAN : 0; 1075 immutable tiSize = structTypeInfoSize(ti); 1076 immutable size = ti.tsize + tiSize; 1077 if (tiSize) 1078 flags |= BlkAttr.STRUCTFINAL | BlkAttr.FINALIZE; 1079 1080 auto blkInf = GC.qalloc(size, flags, ti); 1081 auto p = blkInf.base; 1082 1083 if (tiSize) 1084 *cast(TypeInfo*)(p + blkInf.size - tiSize) = cast() ti; 1085 1086 return p; 1087 } 1088 1089 /// Same as above, zero initializes the item. 1090 extern (C) void* _d_newitemT(in TypeInfo _ti) 1091 { 1092 auto p = _d_newitemU(_ti); 1093 memset(p, 0, _ti.tsize); 1094 return p; 1095 } 1096 1097 /// Same as above, for item with non-zero initializer. 1098 extern (C) void* _d_newitemiT(in TypeInfo _ti) 1099 { 1100 auto p = _d_newitemU(_ti); 1101 auto init = _ti.initializer(); 1102 assert(init.length <= _ti.tsize); 1103 memcpy(p, init.ptr, init.length); 1104 return p; 1105 } 1106 1107 /** 1108 * 1109 */ 1110 struct Array 1111 { 1112 size_t length; 1113 byte* data; 1114 } 1115 1116 1117 /** 1118 * This function has been replaced by _d_delarray_t 1119 */ 1120 extern (C) void _d_delarray(void[]* p) 1121 { 1122 _d_delarray_t(p, null); 1123 } 1124 1125 debug(PRINTF) 1126 { 1127 extern(C) void printArrayCache() 1128 { 1129 auto ptr = __blkcache; 1130 printf("CACHE: \n"); 1131 foreach (i; 0 .. N_CACHE_BLOCKS) 1132 { 1133 printf(" %d\taddr:% .8x\tsize:% .10d\tflags:% .8x\n", i, ptr[i].base, ptr[i].size, ptr[i].attr); 1134 } 1135 } 1136 } 1137 1138 /** 1139 * 1140 */ 1141 extern (C) void _d_delarray_t(void[]* p, const TypeInfo_Struct ti) 1142 { 1143 if (p) 1144 { 1145 auto bic = __getBlkInfo(p.ptr); 1146 auto info = bic ? *bic : GC.query(p.ptr); 1147 1148 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 1149 { 1150 if (ti) // ti non-null only if ti is a struct with dtor 1151 { 1152 void* start = __arrayStart(info); 1153 size_t length = __arrayAllocLength(info, ti); 1154 finalize_array(start, length, ti); 1155 } 1156 1157 // if p is in the cache, clear it there as well 1158 if (bic) 1159 bic.base = null; 1160 1161 GC.free(info.base); 1162 *p = null; 1163 } 1164 } 1165 } 1166 1167 unittest 1168 { 1169 __gshared size_t countDtor = 0; 1170 struct S 1171 { 1172 int x; 1173 ~this() { countDtor++; } 1174 } 1175 // destroy large array with x.ptr not base address of allocation 1176 auto x = new S[10000]; 1177 void* p = x.ptr; 1178 assert(GC.addrOf(p) != null); 1179 delete x; 1180 assert(GC.addrOf(p) == null); 1181 assert(countDtor == 10000); 1182 1183 // destroy full array even if only slice passed 1184 auto y = new S[400]; 1185 auto z = y[200 .. 300]; 1186 p = z.ptr; 1187 assert(GC.addrOf(p) != null); 1188 delete z; 1189 assert(GC.addrOf(p) == null); 1190 assert(countDtor == 10000 + 400); 1191 } 1192 1193 /** 1194 * 1195 */ 1196 extern (C) void _d_delmemory(void* *p) 1197 { 1198 if (*p) 1199 { 1200 GC.free(*p); 1201 *p = null; 1202 } 1203 } 1204 1205 1206 /** 1207 * 1208 */ 1209 extern (C) void _d_callinterfacefinalizer(void *p) 1210 { 1211 if (p) 1212 { 1213 Interface *pi = **cast(Interface ***)p; 1214 Object o = cast(Object)(p - pi.offset); 1215 rt_finalize(cast(void*)o); 1216 } 1217 } 1218 1219 1220 /** 1221 * 1222 */ 1223 extern (C) void _d_callfinalizer(void* p) 1224 { 1225 rt_finalize( p ); 1226 } 1227 1228 1229 /** 1230 * 1231 */ 1232 extern (C) void rt_setCollectHandler(CollectHandler h) 1233 { 1234 collectHandler = h; 1235 } 1236 1237 1238 /** 1239 * 1240 */ 1241 extern (C) CollectHandler rt_getCollectHandler() 1242 { 1243 return collectHandler; 1244 } 1245 1246 1247 /** 1248 * 1249 */ 1250 extern (C) int rt_hasFinalizerInSegment(void* p, size_t size, uint attr, in void[] segment) nothrow 1251 { 1252 if (attr & BlkAttr.STRUCTFINAL) 1253 { 1254 if (attr & BlkAttr.APPENDABLE) 1255 return hasArrayFinalizerInSegment(p, size, segment); 1256 return hasStructFinalizerInSegment(p, size, segment); 1257 } 1258 1259 // otherwise class 1260 auto ppv = cast(void**) p; 1261 if (!p || !*ppv) 1262 return false; 1263 1264 auto c = *cast(ClassInfo*)*ppv; 1265 do 1266 { 1267 auto pf = c.destructor; 1268 if (cast(size_t)(pf - segment.ptr) < segment.length) return true; 1269 } 1270 while ((c = c.base) !is null); 1271 1272 return false; 1273 } 1274 1275 int hasStructFinalizerInSegment(void* p, size_t size, in void[] segment) nothrow 1276 { 1277 if (!p) 1278 return false; 1279 1280 auto ti = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1281 return cast(size_t)(cast(void*)ti.xdtor - segment.ptr) < segment.length; 1282 } 1283 1284 int hasArrayFinalizerInSegment(void* p, size_t size, in void[] segment) nothrow 1285 { 1286 if (!p) 1287 return false; 1288 1289 TypeInfo_Struct si = void; 1290 if (size < PAGESIZE) 1291 si = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1292 else 1293 si = *cast(TypeInfo_Struct*)(p + size_t.sizeof); 1294 1295 return cast(size_t)(cast(void*)si.xdtor - segment.ptr) < segment.length; 1296 } 1297 1298 // called by the GC 1299 void finalize_array2(void* p, size_t size) nothrow 1300 { 1301 debug(PRINTF) printf("rt_finalize_array2(p = %p)\n", p); 1302 1303 TypeInfo_Struct si = void; 1304 if (size <= 256) 1305 { 1306 si = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1307 size = *cast(ubyte*)(p + size - size_t.sizeof - SMALLPAD); 1308 } 1309 else if (size < PAGESIZE) 1310 { 1311 si = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1312 size = *cast(ushort*)(p + size - size_t.sizeof - MEDPAD); 1313 } 1314 else 1315 { 1316 si = *cast(TypeInfo_Struct*)(p + size_t.sizeof); 1317 size = *cast(size_t*)p; 1318 p += LARGEPREFIX; 1319 } 1320 1321 try 1322 { 1323 finalize_array(p, size, si); 1324 } 1325 catch (Exception e) 1326 { 1327 onFinalizeError(si, e); 1328 } 1329 } 1330 1331 void finalize_array(void* p, size_t size, const TypeInfo_Struct si) 1332 { 1333 // Due to the fact that the delete operator calls destructors 1334 // for arrays from the last element to the first, we maintain 1335 // compatibility here by doing the same. 1336 auto tsize = si.tsize; 1337 for (auto curP = p + size - tsize; curP >= p; curP -= tsize) 1338 { 1339 // call destructor 1340 si.destroy(curP); 1341 } 1342 } 1343 1344 // called by the GC 1345 void finalize_struct(void* p, size_t size) nothrow 1346 { 1347 debug(PRINTF) printf("finalize_struct(p = %p)\n", p); 1348 1349 auto ti = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1350 try 1351 { 1352 ti.destroy(p); // call destructor 1353 } 1354 catch (Exception e) 1355 { 1356 onFinalizeError(ti, e); 1357 } 1358 } 1359 1360 /** 1361 * 1362 */ 1363 extern (C) void rt_finalize2(void* p, bool det = true, bool resetMemory = true) nothrow 1364 { 1365 debug(PRINTF) printf("rt_finalize2(p = %p)\n", p); 1366 1367 auto ppv = cast(void**) p; 1368 if (!p || !*ppv) 1369 return; 1370 1371 auto pc = cast(ClassInfo*) *ppv; 1372 try 1373 { 1374 if (det || collectHandler is null || collectHandler(cast(Object) p)) 1375 { 1376 auto c = *pc; 1377 do 1378 { 1379 if (c.destructor) 1380 (cast(fp_t) c.destructor)(cast(Object) p); // call destructor 1381 } 1382 while ((c = c.base) !is null); 1383 } 1384 1385 if (ppv[1]) // if monitor is not null 1386 _d_monitordelete(cast(Object) p, det); 1387 1388 if (resetMemory) 1389 { 1390 auto w = (*pc).initializer; 1391 p[0 .. w.length] = w[]; 1392 } 1393 } 1394 catch (Exception e) 1395 { 1396 onFinalizeError(*pc, e); 1397 } 1398 finally 1399 { 1400 *ppv = null; // zero vptr even if `resetMemory` is false 1401 } 1402 } 1403 1404 extern (C) void rt_finalize(void* p, bool det = true) 1405 { 1406 rt_finalize2(p, det, true); 1407 } 1408 1409 extern (C) void rt_finalizeFromGC(void* p, size_t size, uint attr) 1410 { 1411 // to verify: reset memory necessary? 1412 if (!(attr & BlkAttr.STRUCTFINAL)) 1413 rt_finalize2(p, false, false); // class 1414 else if (attr & BlkAttr.APPENDABLE) 1415 finalize_array2(p, size); // array of structs 1416 else 1417 finalize_struct(p, size); // struct 1418 } 1419 1420 1421 /** 1422 * Resize dynamic arrays with 0 initializers. 1423 */ 1424 extern (C) void[] _d_arraysetlengthT(const TypeInfo ti, size_t newlength, void[]* p) 1425 in 1426 { 1427 assert(ti); 1428 assert(!(*p).length || (*p).ptr); 1429 } 1430 body 1431 { 1432 debug(PRINTF) 1433 { 1434 //printf("_d_arraysetlengthT(p = %p, sizeelem = %d, newlength = %d)\n", p, sizeelem, newlength); 1435 if (p) 1436 printf("\tp.ptr = %p, p.length = %d\n", (*p).ptr, (*p).length); 1437 } 1438 1439 void* newdata = void; 1440 if (newlength) 1441 { 1442 if (newlength <= (*p).length) 1443 { 1444 *p = (*p)[0 .. newlength]; 1445 newdata = (*p).ptr; 1446 return newdata[0 .. newlength]; 1447 } 1448 auto tinext = unqualify(ti.next); 1449 size_t sizeelem = tinext.tsize; 1450 version (D_InlineAsm_X86) 1451 { 1452 size_t newsize = void; 1453 1454 asm pure nothrow @nogc 1455 { 1456 mov EAX, newlength; 1457 mul EAX, sizeelem; 1458 mov newsize, EAX; 1459 jc Loverflow; 1460 } 1461 } 1462 else version (D_InlineAsm_X86_64) 1463 { 1464 size_t newsize = void; 1465 1466 asm pure nothrow @nogc 1467 { 1468 mov RAX, newlength; 1469 mul RAX, sizeelem; 1470 mov newsize, RAX; 1471 jc Loverflow; 1472 } 1473 } 1474 else 1475 { 1476 import core.checkedint : mulu; 1477 1478 bool overflow = false; 1479 size_t newsize = mulu(sizeelem, newlength, overflow); 1480 if (overflow) 1481 goto Loverflow; 1482 } 1483 1484 debug(PRINTF) printf("newsize = %x, newlength = %x\n", newsize, newlength); 1485 1486 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 1487 1488 if ((*p).ptr) 1489 { 1490 newdata = (*p).ptr; 1491 if (newlength > (*p).length) 1492 { 1493 size_t size = (*p).length * sizeelem; 1494 auto bic = isshared ? null : __getBlkInfo((*p).ptr); 1495 auto info = bic ? *bic : GC.query((*p).ptr); 1496 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 1497 { 1498 // calculate the extent of the array given the base. 1499 size_t offset = (*p).ptr - __arrayStart(info); 1500 if (info.size >= PAGESIZE) 1501 { 1502 // size of array is at the front of the block 1503 if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1504 { 1505 // check to see if it failed because there is not 1506 // enough space 1507 if (*(cast(size_t*)info.base) == size + offset) 1508 { 1509 // not enough space, try extending 1510 auto extendsize = newsize + offset + LARGEPAD - info.size; 1511 auto u = GC.extend(info.base, extendsize, extendsize); 1512 if (u) 1513 { 1514 // extend worked, now try setting the length 1515 // again. 1516 info.size = u; 1517 if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1518 { 1519 if (!isshared) 1520 __insertBlkInfoCache(info, bic); 1521 goto L1; 1522 } 1523 } 1524 } 1525 1526 // couldn't do it, reallocate 1527 goto L2; 1528 } 1529 else if (!isshared && !bic) 1530 { 1531 // add this to the cache, it wasn't present previously. 1532 __insertBlkInfoCache(info, null); 1533 } 1534 } 1535 else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1536 { 1537 // could not resize in place 1538 goto L2; 1539 } 1540 else if (!isshared && !bic) 1541 { 1542 // add this to the cache, it wasn't present previously. 1543 __insertBlkInfoCache(info, null); 1544 } 1545 } 1546 else 1547 { 1548 if (info.base) 1549 { 1550 L2: 1551 if (bic) 1552 { 1553 // a chance that flags have changed since this was cached, we should fetch the most recent flags 1554 info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE; 1555 } 1556 info = __arrayAlloc(newsize, info, ti, tinext); 1557 } 1558 else 1559 { 1560 info = __arrayAlloc(newsize, ti, tinext); 1561 } 1562 1563 if (info.base is null) 1564 goto Loverflow; 1565 1566 __setArrayAllocLength(info, newsize, isshared, tinext); 1567 if (!isshared) 1568 __insertBlkInfoCache(info, bic); 1569 newdata = cast(byte *)__arrayStart(info); 1570 newdata[0 .. size] = (*p).ptr[0 .. size]; 1571 1572 // do postblit processing 1573 __doPostblit(newdata, size, tinext); 1574 } 1575 L1: 1576 memset(newdata + size, 0, newsize - size); 1577 } 1578 } 1579 else 1580 { 1581 // pointer was null, need to allocate 1582 auto info = __arrayAlloc(newsize, ti, tinext); 1583 if (info.base is null) 1584 goto Loverflow; 1585 __setArrayAllocLength(info, newsize, isshared, tinext); 1586 if (!isshared) 1587 __insertBlkInfoCache(info, null); 1588 newdata = cast(byte *)__arrayStart(info); 1589 memset(newdata, 0, newsize); 1590 } 1591 } 1592 else 1593 { 1594 newdata = (*p).ptr; 1595 } 1596 1597 *p = newdata[0 .. newlength]; 1598 return *p; 1599 1600 Loverflow: 1601 onOutOfMemoryError(); 1602 assert(0); 1603 } 1604 1605 1606 /** 1607 * Resize arrays for non-zero initializers. 1608 * p pointer to array lvalue to be updated 1609 * newlength new .length property of array 1610 * sizeelem size of each element of array 1611 * initsize size of initializer 1612 * ... initializer 1613 */ 1614 extern (C) void[] _d_arraysetlengthiT(const TypeInfo ti, size_t newlength, void[]* p) 1615 in 1616 { 1617 assert(!(*p).length || (*p).ptr); 1618 } 1619 body 1620 { 1621 void* newdata; 1622 auto tinext = unqualify(ti.next); 1623 auto sizeelem = tinext.tsize; 1624 auto initializer = tinext.initializer(); 1625 auto initsize = initializer.length; 1626 1627 assert(sizeelem); 1628 assert(initsize); 1629 assert(initsize <= sizeelem); 1630 assert((sizeelem / initsize) * initsize == sizeelem); 1631 1632 debug(PRINTF) 1633 { 1634 printf("_d_arraysetlengthiT(p = %p, sizeelem = %d, newlength = %d, initsize = %d)\n", p, sizeelem, newlength, initsize); 1635 if (p) 1636 printf("\tp.data = %p, p.length = %d\n", (*p).ptr, (*p).length); 1637 } 1638 1639 if (newlength) 1640 { 1641 version (D_InlineAsm_X86) 1642 { 1643 size_t newsize = void; 1644 1645 asm 1646 { 1647 mov EAX,newlength ; 1648 mul EAX,sizeelem ; 1649 mov newsize,EAX ; 1650 jc Loverflow ; 1651 } 1652 } 1653 else version (D_InlineAsm_X86_64) 1654 { 1655 size_t newsize = void; 1656 1657 asm 1658 { 1659 mov RAX,newlength ; 1660 mul RAX,sizeelem ; 1661 mov newsize,RAX ; 1662 jc Loverflow ; 1663 } 1664 } 1665 else 1666 { 1667 import core.checkedint : mulu; 1668 1669 bool overflow = false; 1670 size_t newsize = mulu(sizeelem, newlength, overflow); 1671 if (overflow) 1672 goto Loverflow; 1673 } 1674 debug(PRINTF) printf("newsize = %x, newlength = %x\n", newsize, newlength); 1675 1676 1677 size_t size = (*p).length * sizeelem; 1678 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 1679 if ((*p).ptr) 1680 { 1681 newdata = (*p).ptr; 1682 if (newlength > (*p).length) 1683 { 1684 auto bic = isshared ? null : __getBlkInfo((*p).ptr); 1685 auto info = bic ? *bic : GC.query((*p).ptr); 1686 1687 // calculate the extent of the array given the base. 1688 size_t offset = (*p).ptr - __arrayStart(info); 1689 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 1690 { 1691 if (info.size >= PAGESIZE) 1692 { 1693 // size of array is at the front of the block 1694 if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1695 { 1696 // check to see if it failed because there is not 1697 // enough space 1698 if (*(cast(size_t*)info.base) == size + offset) 1699 { 1700 // not enough space, try extending 1701 auto extendsize = newsize + offset + LARGEPAD - info.size; 1702 auto u = GC.extend(info.base, extendsize, extendsize); 1703 if (u) 1704 { 1705 // extend worked, now try setting the length 1706 // again. 1707 info.size = u; 1708 if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1709 { 1710 if (!isshared) 1711 __insertBlkInfoCache(info, bic); 1712 goto L1; 1713 } 1714 } 1715 } 1716 1717 // couldn't do it, reallocate 1718 goto L2; 1719 } 1720 else if (!isshared && !bic) 1721 { 1722 // add this to the cache, it wasn't present previously. 1723 __insertBlkInfoCache(info, null); 1724 } 1725 } 1726 else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1727 { 1728 // could not resize in place 1729 goto L2; 1730 } 1731 else if (!isshared && !bic) 1732 { 1733 // add this to the cache, it wasn't present previously. 1734 __insertBlkInfoCache(info, null); 1735 } 1736 } 1737 else 1738 { 1739 // not appendable or not part of the heap yet. 1740 if (info.base) 1741 { 1742 L2: 1743 if (bic) 1744 { 1745 // a chance that flags have changed since this was cached, we should fetch the most recent flags 1746 info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE; 1747 } 1748 info = __arrayAlloc(newsize, info, ti, tinext); 1749 } 1750 else 1751 { 1752 info = __arrayAlloc(newsize, ti, tinext); 1753 } 1754 __setArrayAllocLength(info, newsize, isshared, tinext); 1755 if (!isshared) 1756 __insertBlkInfoCache(info, bic); 1757 newdata = cast(byte *)__arrayStart(info); 1758 newdata[0 .. size] = (*p).ptr[0 .. size]; 1759 1760 // do postblit processing 1761 __doPostblit(newdata, size, tinext); 1762 } 1763 L1: ; 1764 } 1765 } 1766 else 1767 { 1768 // length was zero, need to allocate 1769 auto info = __arrayAlloc(newsize, ti, tinext); 1770 __setArrayAllocLength(info, newsize, isshared, tinext); 1771 if (!isshared) 1772 __insertBlkInfoCache(info, null); 1773 newdata = cast(byte *)__arrayStart(info); 1774 } 1775 1776 auto q = initializer.ptr; // pointer to initializer 1777 1778 if (newsize > size) 1779 { 1780 if (initsize == 1) 1781 { 1782 debug(PRINTF) printf("newdata = %p, size = %d, newsize = %d, *q = %d\n", newdata, size, newsize, *cast(byte*)q); 1783 memset(newdata + size, *cast(byte*)q, newsize - size); 1784 } 1785 else 1786 { 1787 for (size_t u = size; u < newsize; u += initsize) 1788 { 1789 memcpy(newdata + u, q, initsize); 1790 } 1791 } 1792 } 1793 } 1794 else 1795 { 1796 newdata = (*p).ptr; 1797 } 1798 1799 *p = newdata[0 .. newlength]; 1800 return *p; 1801 1802 Loverflow: 1803 onOutOfMemoryError(); 1804 assert(0); 1805 } 1806 1807 1808 /** 1809 * Append y[] to array x[] 1810 */ 1811 extern (C) void[] _d_arrayappendT(const TypeInfo ti, ref byte[] x, byte[] y) 1812 { 1813 auto length = x.length; 1814 auto tinext = unqualify(ti.next); 1815 auto sizeelem = tinext.tsize; // array element size 1816 _d_arrayappendcTX(ti, x, y.length); 1817 memcpy(x.ptr + length * sizeelem, y.ptr, y.length * sizeelem); 1818 1819 // do postblit 1820 __doPostblit(x.ptr + length * sizeelem, y.length * sizeelem, tinext); 1821 return x; 1822 } 1823 1824 1825 /** 1826 * 1827 */ 1828 size_t newCapacity(size_t newlength, size_t size) 1829 { 1830 version (none) 1831 { 1832 size_t newcap = newlength * size; 1833 } 1834 else 1835 { 1836 /* 1837 * Better version by Dave Fladebo: 1838 * This uses an inverse logorithmic algorithm to pre-allocate a bit more 1839 * space for larger arrays. 1840 * - Arrays smaller than PAGESIZE bytes are left as-is, so for the most 1841 * common cases, memory allocation is 1 to 1. The small overhead added 1842 * doesn't affect small array perf. (it's virtually the same as 1843 * current). 1844 * - Larger arrays have some space pre-allocated. 1845 * - As the arrays grow, the relative pre-allocated space shrinks. 1846 * - The logorithmic algorithm allocates relatively more space for 1847 * mid-size arrays, making it very fast for medium arrays (for 1848 * mid-to-large arrays, this turns out to be quite a bit faster than the 1849 * equivalent realloc() code in C, on Linux at least. Small arrays are 1850 * just as fast as GCC). 1851 * - Perhaps most importantly, overall memory usage and stress on the GC 1852 * is decreased significantly for demanding environments. 1853 */ 1854 size_t newcap = newlength * size; 1855 size_t newext = 0; 1856 1857 if (newcap > PAGESIZE) 1858 { 1859 //double mult2 = 1.0 + (size / log10(pow(newcap * 2.0,2.0))); 1860 1861 // redo above line using only integer math 1862 1863 /*static int log2plus1(size_t c) 1864 { int i; 1865 1866 if (c == 0) 1867 i = -1; 1868 else 1869 for (i = 1; c >>= 1; i++) 1870 { 1871 } 1872 return i; 1873 }*/ 1874 1875 /* The following setting for mult sets how much bigger 1876 * the new size will be over what is actually needed. 1877 * 100 means the same size, more means proportionally more. 1878 * More means faster but more memory consumption. 1879 */ 1880 //long mult = 100 + (1000L * size) / (6 * log2plus1(newcap)); 1881 //long mult = 100 + (1000L * size) / log2plus1(newcap); 1882 long mult = 100 + (1000L) / (bsr(newcap) + 1); 1883 1884 // testing shows 1.02 for large arrays is about the point of diminishing return 1885 // 1886 // Commented out because the multipler will never be < 102. In order for it to be < 2, 1887 // then 1000L / (bsr(x) + 1) must be > 2. The highest bsr(x) + 1 1888 // could be is 65 (64th bit set), and 1000L / 64 is much larger 1889 // than 2. We need 500 bit integers for 101 to be achieved :) 1890 /*if (mult < 102) 1891 mult = 102;*/ 1892 /*newext = cast(size_t)((newcap * mult) / 100); 1893 newext -= newext % size;*/ 1894 // This version rounds up to the next element, and avoids using 1895 // mod. 1896 newext = cast(size_t)((newlength * mult + 99) / 100) * size; 1897 debug(PRINTF) printf("mult: %2.2f, alloc: %2.2f\n",mult/100.0,newext / cast(double)size); 1898 } 1899 newcap = newext > newcap ? newext : newcap; 1900 debug(PRINTF) printf("newcap = %d, newlength = %d, size = %d\n", newcap, newlength, size); 1901 } 1902 return newcap; 1903 } 1904 1905 1906 /************************************** 1907 * Extend an array by n elements. 1908 * Caller must initialize those elements. 1909 */ 1910 extern (C) 1911 byte[] _d_arrayappendcTX(const TypeInfo ti, ref byte[] px, size_t n) 1912 { 1913 // This is a cut&paste job from _d_arrayappendT(). Should be refactored. 1914 1915 // only optimize array append where ti is not a shared type 1916 auto tinext = unqualify(ti.next); 1917 auto sizeelem = tinext.tsize; // array element size 1918 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 1919 auto bic = isshared ? null : __getBlkInfo(px.ptr); 1920 auto info = bic ? *bic : GC.query(px.ptr); 1921 auto length = px.length; 1922 auto newlength = length + n; 1923 auto newsize = newlength * sizeelem; 1924 auto size = length * sizeelem; 1925 size_t newcap = void; // for scratch space 1926 1927 // calculate the extent of the array given the base. 1928 size_t offset = cast(void*)px.ptr - __arrayStart(info); 1929 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 1930 { 1931 if (info.size >= PAGESIZE) 1932 { 1933 // size of array is at the front of the block 1934 if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1935 { 1936 // check to see if it failed because there is not 1937 // enough space 1938 newcap = newCapacity(newlength, sizeelem); 1939 if (*(cast(size_t*)info.base) == size + offset) 1940 { 1941 // not enough space, try extending 1942 auto extendoffset = offset + LARGEPAD - info.size; 1943 auto u = GC.extend(info.base, newsize + extendoffset, newcap + extendoffset); 1944 if (u) 1945 { 1946 // extend worked, now try setting the length 1947 // again. 1948 info.size = u; 1949 if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1950 { 1951 if (!isshared) 1952 __insertBlkInfoCache(info, bic); 1953 goto L1; 1954 } 1955 } 1956 } 1957 1958 // couldn't do it, reallocate 1959 goto L2; 1960 } 1961 else if (!isshared && !bic) 1962 { 1963 __insertBlkInfoCache(info, null); 1964 } 1965 } 1966 else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1967 { 1968 // could not resize in place 1969 newcap = newCapacity(newlength, sizeelem); 1970 goto L2; 1971 } 1972 else if (!isshared && !bic) 1973 { 1974 __insertBlkInfoCache(info, null); 1975 } 1976 } 1977 else 1978 { 1979 // not appendable or is null 1980 newcap = newCapacity(newlength, sizeelem); 1981 if (info.base) 1982 { 1983 L2: 1984 if (bic) 1985 { 1986 // a chance that flags have changed since this was cached, we should fetch the most recent flags 1987 info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE; 1988 } 1989 info = __arrayAlloc(newcap, info, ti, tinext); 1990 } 1991 else 1992 { 1993 info = __arrayAlloc(newcap, ti, tinext); 1994 } 1995 __setArrayAllocLength(info, newsize, isshared, tinext); 1996 if (!isshared) 1997 __insertBlkInfoCache(info, bic); 1998 auto newdata = cast(byte *)__arrayStart(info); 1999 memcpy(newdata, px.ptr, length * sizeelem); 2000 // do postblit processing 2001 __doPostblit(newdata, length * sizeelem, tinext); 2002 (cast(void **)(&px))[1] = newdata; 2003 } 2004 2005 L1: 2006 *cast(size_t *)&px = newlength; 2007 return px; 2008 } 2009 2010 2011 /** 2012 * Append dchar to char[] 2013 */ 2014 extern (C) void[] _d_arrayappendcd(ref byte[] x, dchar c) 2015 { 2016 // c could encode into from 1 to 4 characters 2017 char[4] buf = void; 2018 byte[] appendthis; // passed to appendT 2019 if (c <= 0x7F) 2020 { 2021 buf.ptr[0] = cast(char)c; 2022 appendthis = (cast(byte *)buf.ptr)[0..1]; 2023 } 2024 else if (c <= 0x7FF) 2025 { 2026 buf.ptr[0] = cast(char)(0xC0 | (c >> 6)); 2027 buf.ptr[1] = cast(char)(0x80 | (c & 0x3F)); 2028 appendthis = (cast(byte *)buf.ptr)[0..2]; 2029 } 2030 else if (c <= 0xFFFF) 2031 { 2032 buf.ptr[0] = cast(char)(0xE0 | (c >> 12)); 2033 buf.ptr[1] = cast(char)(0x80 | ((c >> 6) & 0x3F)); 2034 buf.ptr[2] = cast(char)(0x80 | (c & 0x3F)); 2035 appendthis = (cast(byte *)buf.ptr)[0..3]; 2036 } 2037 else if (c <= 0x10FFFF) 2038 { 2039 buf.ptr[0] = cast(char)(0xF0 | (c >> 18)); 2040 buf.ptr[1] = cast(char)(0x80 | ((c >> 12) & 0x3F)); 2041 buf.ptr[2] = cast(char)(0x80 | ((c >> 6) & 0x3F)); 2042 buf.ptr[3] = cast(char)(0x80 | (c & 0x3F)); 2043 appendthis = (cast(byte *)buf.ptr)[0..4]; 2044 } 2045 else 2046 { 2047 import core.exception : onUnicodeError; 2048 onUnicodeError("Invalid UTF-8 sequence", 0); // invalid utf character 2049 } 2050 2051 // 2052 // TODO: This always assumes the array type is shared, because we do not 2053 // get a typeinfo from the compiler. Assuming shared is the safest option. 2054 // Once the compiler is fixed, the proper typeinfo should be forwarded. 2055 // 2056 return _d_arrayappendT(typeid(shared char[]), x, appendthis); 2057 } 2058 2059 unittest 2060 { 2061 import core.exception : UnicodeException; 2062 2063 /* Using inline try {} catch {} blocks fails to catch the UnicodeException 2064 * thrown. 2065 * https://issues.dlang.org/show_bug.cgi?id=16799 2066 */ 2067 static void assertThrown(T : Throwable = Exception, E)(lazy E expr, string msg) 2068 { 2069 try 2070 expr; 2071 catch (T e) { 2072 assert(e.msg == msg); 2073 return; 2074 } 2075 } 2076 2077 static void f() 2078 { 2079 string ret; 2080 int i = -1; 2081 ret ~= i; 2082 } 2083 2084 assertThrown!UnicodeException(f(), "Invalid UTF-8 sequence"); 2085 } 2086 2087 2088 /** 2089 * Append dchar to wchar[] 2090 */ 2091 extern (C) void[] _d_arrayappendwd(ref byte[] x, dchar c) 2092 { 2093 // c could encode into from 1 to 2 w characters 2094 wchar[2] buf = void; 2095 byte[] appendthis; // passed to appendT 2096 if (c <= 0xFFFF) 2097 { 2098 buf.ptr[0] = cast(wchar) c; 2099 // note that although we are passing only 1 byte here, appendT 2100 // interprets this as being an array of wchar, making the necessary 2101 // casts. 2102 appendthis = (cast(byte *)buf.ptr)[0..1]; 2103 } 2104 else 2105 { 2106 buf.ptr[0] = cast(wchar) ((((c - 0x10000) >> 10) & 0x3FF) + 0xD800); 2107 buf.ptr[1] = cast(wchar) (((c - 0x10000) & 0x3FF) + 0xDC00); 2108 // ditto from above. 2109 appendthis = (cast(byte *)buf.ptr)[0..2]; 2110 } 2111 2112 // 2113 // TODO: This always assumes the array type is shared, because we do not 2114 // get a typeinfo from the compiler. Assuming shared is the safest option. 2115 // Once the compiler is fixed, the proper typeinfo should be forwarded. 2116 // 2117 return _d_arrayappendT(typeid(shared wchar[]), x, appendthis); 2118 } 2119 2120 2121 /** 2122 * 2123 */ 2124 extern (C) byte[] _d_arraycatT(const TypeInfo ti, byte[] x, byte[] y) 2125 out (result) 2126 { 2127 auto tinext = unqualify(ti.next); 2128 auto sizeelem = tinext.tsize; // array element size 2129 debug(PRINTF) printf("_d_arraycatT(%d,%p ~ %d,%p sizeelem = %d => %d,%p)\n", x.length, x.ptr, y.length, y.ptr, sizeelem, result.length, result.ptr); 2130 assert(result.length == x.length + y.length); 2131 2132 // If a postblit is involved, the contents of result might rightly differ 2133 // from the bitwise concatenation of x and y. 2134 if (!hasPostblit(tinext)) 2135 { 2136 for (size_t i = 0; i < x.length * sizeelem; i++) 2137 assert((cast(byte*)result)[i] == (cast(byte*)x)[i]); 2138 for (size_t i = 0; i < y.length * sizeelem; i++) 2139 assert((cast(byte*)result)[x.length * sizeelem + i] == (cast(byte*)y)[i]); 2140 } 2141 2142 size_t cap = GC.sizeOf(result.ptr); 2143 assert(!cap || cap > result.length * sizeelem); 2144 } 2145 body 2146 { 2147 version (none) 2148 { 2149 /* Cannot use this optimization because: 2150 * char[] a, b; 2151 * char c = 'a'; 2152 * b = a ~ c; 2153 * c = 'b'; 2154 * will change the contents of b. 2155 */ 2156 if (!y.length) 2157 return x; 2158 if (!x.length) 2159 return y; 2160 } 2161 2162 auto tinext = unqualify(ti.next); 2163 auto sizeelem = tinext.tsize; // array element size 2164 debug(PRINTF) printf("_d_arraycatT(%d,%p ~ %d,%p sizeelem = %d)\n", x.length, x.ptr, y.length, y.ptr, sizeelem); 2165 size_t xlen = x.length * sizeelem; 2166 size_t ylen = y.length * sizeelem; 2167 size_t len = xlen + ylen; 2168 2169 if (!len) 2170 return null; 2171 2172 auto info = __arrayAlloc(len, ti, tinext); 2173 byte* p = cast(byte*)__arrayStart(info); 2174 p[len] = 0; // guessing this is to optimize for null-terminated arrays? 2175 memcpy(p, x.ptr, xlen); 2176 memcpy(p + xlen, y.ptr, ylen); 2177 // do postblit processing 2178 __doPostblit(p, xlen + ylen, tinext); 2179 2180 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 2181 __setArrayAllocLength(info, len, isshared, tinext); 2182 return p[0 .. x.length + y.length]; 2183 } 2184 2185 2186 /** 2187 * 2188 */ 2189 extern (C) void[] _d_arraycatnTX(const TypeInfo ti, byte[][] arrs) 2190 { 2191 size_t length; 2192 auto tinext = unqualify(ti.next); 2193 auto size = tinext.tsize; // array element size 2194 2195 foreach (b; arrs) 2196 length += b.length; 2197 2198 if (!length) 2199 return null; 2200 2201 auto allocsize = length * size; 2202 auto info = __arrayAlloc(allocsize, ti, tinext); 2203 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 2204 __setArrayAllocLength(info, allocsize, isshared, tinext); 2205 void *a = __arrayStart (info); 2206 2207 size_t j = 0; 2208 foreach (b; arrs) 2209 { 2210 if (b.length) 2211 { 2212 memcpy(a + j, b.ptr, b.length * size); 2213 j += b.length * size; 2214 } 2215 } 2216 2217 // do postblit processing 2218 __doPostblit(a, j, tinext); 2219 2220 return a[0..length]; 2221 } 2222 2223 2224 /** 2225 * Allocate the array, rely on the caller to do the initialization of the array. 2226 */ 2227 extern (C) 2228 void* _d_arrayliteralTX(const TypeInfo ti, size_t length) 2229 { 2230 auto tinext = unqualify(ti.next); 2231 auto sizeelem = tinext.tsize; // array element size 2232 void* result; 2233 2234 debug(PRINTF) printf("_d_arrayliteralTX(sizeelem = %d, length = %d)\n", sizeelem, length); 2235 if (length == 0 || sizeelem == 0) 2236 result = null; 2237 else 2238 { 2239 auto allocsize = length * sizeelem; 2240 auto info = __arrayAlloc(allocsize, ti, tinext); 2241 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 2242 __setArrayAllocLength(info, allocsize, isshared, tinext); 2243 result = __arrayStart(info); 2244 } 2245 return result; 2246 } 2247 2248 2249 unittest 2250 { 2251 int[] a; 2252 int[] b; 2253 int i; 2254 2255 a = new int[3]; 2256 a[0] = 1; a[1] = 2; a[2] = 3; 2257 b = a.dup; 2258 assert(b.length == 3); 2259 for (i = 0; i < 3; i++) 2260 assert(b[i] == i + 1); 2261 2262 // test slice appending 2263 b = a[0..1]; 2264 b ~= 4; 2265 for (i = 0; i < 3; i++) 2266 assert(a[i] == i + 1); 2267 2268 // test reserving 2269 char[] arr = new char[4093]; 2270 for (i = 0; i < arr.length; i++) 2271 arr[i] = cast(char)(i % 256); 2272 2273 // note that these two commands used to cause corruption, which may not be 2274 // detected. 2275 arr.reserve(4094); 2276 auto arr2 = arr ~ "123"; 2277 assert(arr2[0..arr.length] == arr); 2278 assert(arr2[arr.length..$] == "123"); 2279 2280 // test postblit on array concat, append, length, etc. 2281 static struct S 2282 { 2283 int x; 2284 int pad; 2285 this(this) 2286 { 2287 ++x; 2288 } 2289 } 2290 void testPostBlit(T)() 2291 { 2292 auto sarr = new T[1]; 2293 debug(SENTINEL) {} else 2294 assert(sarr.capacity == 1); 2295 2296 // length extend 2297 auto sarr2 = sarr; 2298 assert(sarr[0].x == 0); 2299 sarr2.length += 1; 2300 assert(sarr2[0].x == 1); 2301 assert(sarr[0].x == 0); 2302 2303 // append 2304 T s; 2305 sarr2 = sarr; 2306 sarr2 ~= s; 2307 assert(sarr2[0].x == 1); 2308 assert(sarr2[1].x == 1); 2309 assert(sarr[0].x == 0); 2310 assert(s.x == 0); 2311 2312 // concat 2313 sarr2 = sarr ~ sarr; 2314 assert(sarr2[0].x == 1); 2315 assert(sarr2[1].x == 1); 2316 assert(sarr[0].x == 0); 2317 2318 // concat multiple (calls different method) 2319 sarr2 = sarr ~ sarr ~ sarr; 2320 assert(sarr2[0].x == 1); 2321 assert(sarr2[1].x == 1); 2322 assert(sarr2[2].x == 1); 2323 assert(sarr[0].x == 0); 2324 2325 // reserve capacity 2326 sarr2 = sarr; 2327 sarr2.reserve(2); 2328 assert(sarr2[0].x == 1); 2329 assert(sarr[0].x == 0); 2330 } 2331 testPostBlit!(S)(); 2332 testPostBlit!(const(S))(); 2333 } 2334 2335 // cannot define structs inside unit test block, or they become nested structs. 2336 version (unittest) 2337 { 2338 struct S1 2339 { 2340 int x = 5; 2341 } 2342 struct S2 2343 { 2344 int x; 2345 this(int x) {this.x = x;} 2346 } 2347 struct S3 2348 { 2349 int[4] x; 2350 this(int x) 2351 {this.x[] = x;} 2352 } 2353 struct S4 2354 { 2355 int *x; 2356 } 2357 2358 } 2359 2360 unittest 2361 { 2362 auto s1 = new S1; 2363 assert(s1.x == 5); 2364 assert(GC.getAttr(s1) == BlkAttr.NO_SCAN); 2365 2366 auto s2 = new S2(3); 2367 assert(s2.x == 3); 2368 assert(GC.getAttr(s2) == BlkAttr.NO_SCAN); 2369 2370 auto s3 = new S3(1); 2371 assert(s3.x == [1,1,1,1]); 2372 assert(GC.getAttr(s3) == BlkAttr.NO_SCAN); 2373 debug(SENTINEL) {} else 2374 assert(GC.sizeOf(s3) == 16); 2375 2376 auto s4 = new S4; 2377 assert(s4.x == null); 2378 assert(GC.getAttr(s4) == 0); 2379 } 2380 2381 unittest 2382 { 2383 // Bugzilla 3454 - Inconsistent flag setting in GC.realloc() 2384 static void test(size_t multiplier) 2385 { 2386 auto p = GC.malloc(8 * multiplier, BlkAttr.NO_SCAN); 2387 assert(GC.getAttr(p) == BlkAttr.NO_SCAN); 2388 p = GC.realloc(p, 2 * multiplier, BlkAttr.NO_SCAN); 2389 assert(GC.getAttr(p) == BlkAttr.NO_SCAN); 2390 } 2391 test(1); 2392 test(1024 * 1024); 2393 } 2394 2395 unittest 2396 { 2397 import core.exception; 2398 try 2399 { 2400 size_t x = size_t.max; 2401 byte[] big_buf = new byte[x]; 2402 } 2403 catch (OutOfMemoryError) 2404 { 2405 } 2406 } 2407 2408 unittest 2409 { 2410 // bugzilla 13854 2411 auto arr = new ubyte[PAGESIZE]; // ensure page size 2412 auto info1 = GC.query(arr.ptr); 2413 assert(info1.base !is arr.ptr); // offset is required for page size or larger 2414 2415 auto arr2 = arr[0..1]; 2416 assert(arr2.capacity == 0); // cannot append 2417 arr2 ~= 0; // add a byte 2418 assert(arr2.ptr !is arr.ptr); // reallocated 2419 auto info2 = GC.query(arr2.ptr); 2420 assert(info2.base is arr2.ptr); // no offset, the capacity is small. 2421 2422 // do the same via setting length 2423 arr2 = arr[0..1]; 2424 assert(arr2.capacity == 0); 2425 arr2.length += 1; 2426 assert(arr2.ptr !is arr.ptr); // reallocated 2427 info2 = GC.query(arr2.ptr); 2428 assert(info2.base is arr2.ptr); // no offset, the capacity is small. 2429 2430 // do the same for char[] since we need a type with an initializer to test certain runtime functions 2431 auto carr = new char[PAGESIZE]; 2432 info1 = GC.query(carr.ptr); 2433 assert(info1.base !is carr.ptr); // offset is required for page size or larger 2434 2435 auto carr2 = carr[0..1]; 2436 assert(carr2.capacity == 0); // cannot append 2437 carr2 ~= 0; // add a byte 2438 assert(carr2.ptr !is carr.ptr); // reallocated 2439 info2 = GC.query(carr2.ptr); 2440 assert(info2.base is carr2.ptr); // no offset, the capacity is small. 2441 2442 // do the same via setting length 2443 carr2 = carr[0..1]; 2444 assert(carr2.capacity == 0); 2445 carr2.length += 1; 2446 assert(carr2.ptr !is carr.ptr); // reallocated 2447 info2 = GC.query(carr2.ptr); 2448 assert(info2.base is carr2.ptr); // no offset, the capacity is small. 2449 } 2450 2451 unittest 2452 { 2453 // bugzilla 13878 2454 auto arr = new ubyte[1]; 2455 auto info = GC.query(arr.ptr); 2456 assert(info.attr & BlkAttr.NO_SCAN); // should be NO_SCAN 2457 arr ~= 0; // ensure array is inserted into cache 2458 debug(SENTINEL) {} else 2459 assert(arr.ptr is info.base); 2460 GC.clrAttr(arr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2461 auto arr2 = arr[0..1]; 2462 assert(arr2.capacity == 0); // cannot append 2463 arr2 ~= 0; 2464 assert(arr2.ptr !is arr.ptr); 2465 info = GC.query(arr2.ptr); 2466 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2467 2468 // do the same via setting length 2469 arr = new ubyte[1]; 2470 arr ~= 0; // ensure array is inserted into cache 2471 GC.clrAttr(arr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2472 arr2 = arr[0..1]; 2473 assert(arr2.capacity == 0); 2474 arr2.length += 1; 2475 assert(arr2.ptr !is arr.ptr); // reallocated 2476 info = GC.query(arr2.ptr); 2477 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2478 2479 // do the same for char[] since we need a type with an initializer to test certain runtime functions 2480 auto carr = new char[1]; 2481 info = GC.query(carr.ptr); 2482 assert(info.attr & BlkAttr.NO_SCAN); // should be NO_SCAN 2483 carr ~= 0; // ensure array is inserted into cache 2484 debug(SENTINEL) {} else 2485 assert(carr.ptr is info.base); 2486 GC.clrAttr(carr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2487 auto carr2 = carr[0..1]; 2488 assert(carr2.capacity == 0); // cannot append 2489 carr2 ~= 0; 2490 assert(carr2.ptr !is carr.ptr); 2491 info = GC.query(carr2.ptr); 2492 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2493 2494 // do the same via setting length 2495 carr = new char[1]; 2496 carr ~= 0; // ensure array is inserted into cache 2497 GC.clrAttr(carr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2498 carr2 = carr[0..1]; 2499 assert(carr2.capacity == 0); 2500 carr2.length += 1; 2501 assert(carr2.ptr !is carr.ptr); // reallocated 2502 info = GC.query(carr2.ptr); 2503 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2504 } 2505 2506 // test struct finalizers 2507 debug(SENTINEL) {} else 2508 unittest 2509 { 2510 __gshared int dtorCount; 2511 static struct S1 2512 { 2513 int x; 2514 2515 ~this() 2516 { 2517 dtorCount++; 2518 } 2519 } 2520 2521 dtorCount = 0; 2522 S1* s1 = new S1; 2523 delete s1; 2524 assert(dtorCount == 1); 2525 2526 dtorCount = 0; 2527 S1[] arr1 = new S1[7]; 2528 delete arr1; 2529 assert(dtorCount == 7); 2530 2531 dtorCount = 0; 2532 S1* s2 = new S1; 2533 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2534 assert(dtorCount == 1); 2535 GC.free(s2); 2536 2537 dtorCount = 0; 2538 const(S1)* s3 = new const(S1); 2539 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2540 assert(dtorCount == 1); 2541 GC.free(cast(void*)s3); 2542 2543 dtorCount = 0; 2544 shared(S1)* s4 = new shared(S1); 2545 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2546 assert(dtorCount == 1); 2547 GC.free(cast(void*)s4); 2548 2549 dtorCount = 0; 2550 const(S1)[] carr1 = new const(S1)[5]; 2551 BlkInfo blkinf1 = GC.query(carr1.ptr); 2552 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2553 assert(dtorCount == 5); 2554 GC.free(blkinf1.base); 2555 2556 dtorCount = 0; 2557 S1[] arr2 = new S1[10]; 2558 arr2.length = 6; 2559 arr2.assumeSafeAppend; 2560 assert(dtorCount == 4); // destructors run explicitely? 2561 2562 dtorCount = 0; 2563 BlkInfo blkinf = GC.query(arr2.ptr); 2564 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2565 assert(dtorCount == 6); 2566 GC.free(blkinf.base); 2567 2568 // associative arrays 2569 import rt.aaA : entryDtor; 2570 // throw away all existing AA entries with dtor 2571 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2572 2573 S1[int] aa1; 2574 aa1[0] = S1(0); 2575 aa1[1] = S1(1); 2576 dtorCount = 0; 2577 aa1 = null; 2578 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2579 assert(dtorCount == 2); 2580 2581 int[S1] aa2; 2582 aa2[S1(0)] = 0; 2583 aa2[S1(1)] = 1; 2584 aa2[S1(2)] = 2; 2585 dtorCount = 0; 2586 aa2 = null; 2587 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2588 assert(dtorCount == 3); 2589 2590 S1[2][int] aa3; 2591 aa3[0] = [S1(0),S1(2)]; 2592 aa3[1] = [S1(1),S1(3)]; 2593 dtorCount = 0; 2594 aa3 = null; 2595 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2596 assert(dtorCount == 4); 2597 } 2598 2599 // test class finalizers exception handling 2600 unittest 2601 { 2602 bool test(E)() 2603 { 2604 import core.exception; 2605 static class C1 2606 { 2607 E exc; 2608 this(E exc) { this.exc = exc; } 2609 ~this() { throw exc; } 2610 } 2611 2612 bool caught = false; 2613 C1 c = new C1(new E("test onFinalizeError")); 2614 try 2615 { 2616 GC.runFinalizers((cast(uint*)&C1.__dtor)[0..1]); 2617 } 2618 catch (FinalizeError err) 2619 { 2620 caught = true; 2621 } 2622 catch (E) 2623 { 2624 } 2625 GC.free(cast(void*)c); 2626 return caught; 2627 } 2628 2629 assert( test!Exception); 2630 import core.exception : InvalidMemoryOperationError; 2631 assert(!test!InvalidMemoryOperationError); 2632 } 2633 2634 // test struct finalizers exception handling 2635 debug(SENTINEL) {} else 2636 unittest 2637 { 2638 bool test(E)() 2639 { 2640 import core.exception; 2641 static struct S1 2642 { 2643 E exc; 2644 ~this() { throw exc; } 2645 } 2646 2647 bool caught = false; 2648 S1* s = new S1(new E("test onFinalizeError")); 2649 try 2650 { 2651 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2652 } 2653 catch (FinalizeError err) 2654 { 2655 caught = true; 2656 } 2657 catch (E) 2658 { 2659 } 2660 GC.free(s); 2661 return caught; 2662 } 2663 2664 assert( test!Exception); 2665 import core.exception : InvalidMemoryOperationError; 2666 assert(!test!InvalidMemoryOperationError); 2667 } 2668 2669 // test bug 14126 2670 unittest 2671 { 2672 static struct S 2673 { 2674 S* thisptr; 2675 ~this() { assert(&this == thisptr); thisptr = null;} 2676 } 2677 2678 S[] test14126 = new S[2048]; // make sure we allocate at least a PAGE 2679 foreach (ref s; test14126) 2680 { 2681 s.thisptr = &s; 2682 } 2683 } 2684