1 ///
2 module std.experimental.allocator.building_blocks.region;
3 
4 import std.experimental.allocator.building_blocks.null_allocator;
5 import std.experimental.allocator.common;
6 import std.typecons : Flag, Yes, No;
7 
8 version (OSX)
9     version = Darwin;
10 else version (iOS)
11     version = Darwin;
12 else version (TVOS)
13     version = Darwin;
14 else version (WatchOS)
15     version = Darwin;
16 
17 /**
18 A $(D Region) allocator allocates memory straight from one contiguous chunk.
19 There is no deallocation, and once the region is full, allocation requests
20 return $(D null). Therefore, $(D Region)s are often used (a) in conjunction with
21 more sophisticated allocators; or (b) for batch-style very fast allocations
22 that deallocate everything at once.
23 
24 The region only stores three pointers, corresponding to the current position in
25 the store and the limits. One allocation entails rounding up the allocation
26 size for alignment purposes, bumping the current pointer, and comparing it
27 against the limit.
28 
29 If $(D ParentAllocator) is different from $(D NullAllocator), $(D Region)
30 deallocates the chunk of memory during destruction.
31 
32 The $(D minAlign) parameter establishes alignment. If $(D minAlign > 1), the
33 sizes of all allocation requests are rounded up to a multiple of $(D minAlign).
34 Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
35 control alignment externally.
36 
37 */
38 struct Region(ParentAllocator = NullAllocator,
39     uint minAlign = platformAlignment,
40     Flag!"growDownwards" growDownwards = No.growDownwards)
41 {
42     static assert(minAlign.isGoodStaticAlignment);
43     static assert(ParentAllocator.alignment >= minAlign);
44 
45     import std.traits : hasMember;
46     import std.typecons : Ternary;
47 
48     // state
49     /**
50     The _parent allocator. Depending on whether $(D ParentAllocator) holds state
51     or not, this is a member variable or an alias for
52     `ParentAllocator.instance`.
53     */
54     static if (stateSize!ParentAllocator)
55     {
56         ParentAllocator parent;
57     }
58     else
59     {
60         alias parent = ParentAllocator.instance;
61     }
62     private void* _current, _begin, _end;
63 
64     /**
65     Constructs a region backed by a user-provided store. Assumes $(D store) is
66     aligned at $(D minAlign). Also assumes the memory was allocated with $(D
67     ParentAllocator) (if different from $(D NullAllocator)).
68 
69     Params:
70     store = User-provided store backing up the region. $(D store) must be
71     aligned at $(D minAlign) (enforced with $(D assert)). If $(D
72     ParentAllocator) is different from $(D NullAllocator), memory is assumed to
73     have been allocated with $(D ParentAllocator).
74     n = Bytes to allocate using $(D ParentAllocator). This constructor is only
75     defined If $(D ParentAllocator) is different from $(D NullAllocator). If
76     $(D parent.allocate(n)) returns $(D null), the region will be initialized
77     as empty (correctly initialized but unable to allocate).
78     */
thisRegion79     this(ubyte[] store)
80     {
81         store = cast(ubyte[])(store.roundUpToAlignment(alignment));
82         store = store[0 .. $.roundDownToAlignment(alignment)];
83         assert(store.ptr.alignedAt(minAlign));
84         assert(store.length % minAlign == 0);
85         _begin = store.ptr;
86         _end = store.ptr + store.length;
87         static if (growDownwards)
88             _current = _end;
89         else
90             _current = store.ptr;
91     }
92 
93     /// Ditto
94     static if (!is(ParentAllocator == NullAllocator))
thisRegion95     this(size_t n)
96     {
97         this(cast(ubyte[])(parent.allocate(n.roundUpToAlignment(alignment))));
98     }
99 
100     /*
101     TODO: The postblit of $(D BasicRegion) should be disabled because such objects
102     should not be copied around naively.
103     */
104 
105     /**
106     If `ParentAllocator` is not `NullAllocator` and defines `deallocate`, the region defines a destructor that uses `ParentAllocator.delete` to free the
107     memory chunk.
108     */
109     static if (!is(ParentAllocator == NullAllocator)
110         && hasMember!(ParentAllocator, "deallocate"))
~thisRegion111     ~this()
112     {
113         parent.deallocate(_begin[0 .. _end - _begin]);
114     }
115 
116 
117     /**
118     Alignment offered.
119     */
120     alias alignment = minAlign;
121 
122     /**
123     Allocates $(D n) bytes of memory. The shortest path involves an alignment
124     adjustment (if $(D alignment > 1)), an increment, and a comparison.
125 
126     Params:
127     n = number of bytes to allocate
128 
129     Returns:
130     A properly-aligned buffer of size $(D n) or $(D null) if request could not
131     be satisfied.
132     */
allocateRegion133     void[] allocate(size_t n)
134     {
135         static if (growDownwards)
136         {
137             if (available < n) return null;
138             static if (minAlign > 1)
139                 const rounded = n.roundUpToAlignment(alignment);
140             else
141                 alias rounded = n;
142             assert(available >= rounded);
143             auto result = (_current - rounded)[0 .. n];
144             assert(result.ptr >= _begin);
145             _current = result.ptr;
146             assert(owns(result) == Ternary.yes);
147             return result;
148         }
149         else
150         {
151             auto result = _current[0 .. n];
152             static if (minAlign > 1)
153                 const rounded = n.roundUpToAlignment(alignment);
154             else
155                 alias rounded = n;
156             _current += rounded;
157             if (_current <= _end) return result;
158             // Slow path, backtrack
159             _current -= rounded;
160             return null;
161         }
162     }
163 
164     /**
165     Allocates $(D n) bytes of memory aligned at alignment $(D a).
166 
167     Params:
168     n = number of bytes to allocate
169     a = alignment for the allocated block
170 
171     Returns:
172     Either a suitable block of $(D n) bytes aligned at $(D a), or $(D null).
173     */
alignedAllocateRegion174     void[] alignedAllocate(size_t n, uint a)
175     {
176         import std.math : isPowerOf2;
177         assert(a.isPowerOf2);
178         static if (growDownwards)
179         {
180             const available = _current - _begin;
181             if (available < n) return null;
182             auto result = (_current - n).alignDownTo(a)[0 .. n];
183             if (result.ptr >= _begin)
184             {
185                 _current = result.ptr;
186                 return result;
187             }
188         }
189         else
190         {
191             // Just bump the pointer to the next good allocation
192             auto save = _current;
193             _current = _current.alignUpTo(a);
194             auto result = allocate(n);
195             if (result.ptr)
196             {
197                 assert(result.length == n);
198                 return result;
199             }
200             // Failed, rollback
201             _current = save;
202         }
203         return null;
204     }
205 
206     /// Allocates and returns all memory available to this region.
allocateAllRegion207     void[] allocateAll()
208     {
209         static if (growDownwards)
210         {
211             auto result = _begin[0 .. available];
212             _current = _begin;
213         }
214         else
215         {
216             auto result = _current[0 .. available];
217             _current = _end;
218         }
219         return result;
220     }
221 
222     /**
223     Expands an allocated block in place. Expansion will succeed only if the
224     block is the last allocated. Defined only if `growDownwards` is
225     `No.growDownwards`.
226     */
227     static if (growDownwards == No.growDownwards)
expandRegion228     bool expand(ref void[] b, size_t delta)
229     {
230         assert(owns(b) == Ternary.yes || b.ptr is null);
231         assert(b.ptr + b.length <= _current || b.ptr is null);
232         if (!b.ptr) return delta == 0;
233         auto newLength = b.length + delta;
234         if (_current < b.ptr + b.length + alignment)
235         {
236             // This was the last allocation! Allocate some more and we're done.
237             if (this.goodAllocSize(b.length) == this.goodAllocSize(newLength)
238                 || allocate(delta).length == delta)
239             {
240                 b = b.ptr[0 .. newLength];
241                 assert(_current < b.ptr + b.length + alignment);
242                 return true;
243             }
244         }
245         return false;
246     }
247 
248     /**
249     Deallocates $(D b). This works only if $(D b) was obtained as the last call
250     to $(D allocate); otherwise (i.e. another allocation has occurred since) it
251     does nothing. This semantics is tricky and therefore $(D deallocate) is
252     defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
253     as the third template argument.
254 
255     Params:
256     b = Block previously obtained by a call to $(D allocate) against this
257     allocator ($(D null) is allowed).
258     */
deallocateRegion259     bool deallocate(void[] b)
260     {
261         assert(owns(b) == Ternary.yes || b.ptr is null);
262         static if (growDownwards)
263         {
264             if (b.ptr == _current)
265             {
266                 _current += this.goodAllocSize(b.length);
267                 return true;
268             }
269         }
270         else
271         {
272             if (b.ptr + this.goodAllocSize(b.length) == _current)
273             {
274                 assert(b.ptr !is null || _current is null);
275                 _current = b.ptr;
276                 return true;
277             }
278         }
279         return false;
280     }
281 
282     /**
283     Deallocates all memory allocated by this region, which can be subsequently
284     reused for new allocations.
285     */
deallocateAllRegion286     bool deallocateAll()
287     {
288         static if (growDownwards)
289         {
290             _current = _end;
291         }
292         else
293         {
294             _current = _begin;
295         }
296         return true;
297     }
298 
299     /**
300     Queries whether $(D b) has been allocated with this region.
301 
302     Params:
303     b = Arbitrary block of memory ($(D null) is allowed; $(D owns(null))
304     returns $(D false)).
305 
306     Returns:
307     $(D true) if $(D b) has been allocated with this region, $(D false)
308     otherwise.
309     */
ownsRegion310     Ternary owns(void[] b) const
311     {
312         return Ternary(b.ptr >= _begin && b.ptr + b.length <= _end);
313     }
314 
315     /**
316     Returns `Ternary.yes` if no memory has been allocated in this region,
317     `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
318     */
emptyRegion319     Ternary empty() const
320     {
321         return Ternary(_current == _begin);
322     }
323 
324     /// Nonstandard property that returns bytes available for allocation.
availableRegion325     size_t available() const
326     {
327         static if (growDownwards)
328         {
329             return _current - _begin;
330         }
331         else
332         {
333             return _end - _current;
334         }
335     }
336 }
337 
338 ///
339 @system unittest
340 {
341     import std.algorithm.comparison : max;
342     import std.experimental.allocator.building_blocks.allocator_list
343         : AllocatorList;
344     import std.experimental.allocator.mallocator : Mallocator;
345     // Create a scalable list of regions. Each gets at least 1MB at a time by
346     // using malloc.
347     auto batchAllocator = AllocatorList!(
348         (size_t n) => Region!Mallocator(max(n, 1024 * 1024))
349     )();
350     auto b = batchAllocator.allocate(101);
351     assert(b.length == 101);
352     // This will cause a second allocation
353     b = batchAllocator.allocate(2 * 1024 * 1024);
354     assert(b.length == 2 * 1024 * 1024);
355     // Destructor will free the memory
356 }
357 
358 @system unittest
359 {
360     import std.experimental.allocator.mallocator : Mallocator;
361     // Create a 64 KB region allocated with malloc
362     auto reg = Region!(Mallocator, Mallocator.alignment,
363         Yes.growDownwards)(1024 * 64);
364     const b = reg.allocate(101);
365     assert(b.length == 101);
366     // Destructor will free the memory
367 }
368 
369 /**
370 
371 $(D InSituRegion) is a convenient region that carries its storage within itself
372 (in the form of a statically-sized array).
373 
374 The first template argument is the size of the region and the second is the
375 needed alignment. Depending on the alignment requested and platform details,
376 the actual available storage may be smaller than the compile-time parameter. To
377 make sure that at least $(D n) bytes are available in the region, use
378 $(D InSituRegion!(n + a - 1, a)).
379 
380 Given that the most frequent use of `InSituRegion` is as a stack allocator, it
381 allocates starting at the end on systems where stack grows downwards, such that
382 hot memory is used first.
383 
384 */
385 struct InSituRegion(size_t size, size_t minAlign = platformAlignment)
386 {
387     import std.algorithm.comparison : max;
388     import std.conv : to;
389     import std.traits : hasMember;
390     import std.typecons : Ternary;
391 
392     static assert(minAlign.isGoodStaticAlignment);
393     static assert(size >= minAlign);
394 
395     version (X86) enum growDownwards = Yes.growDownwards;
396     else version (X86_64) enum growDownwards = Yes.growDownwards;
397     else version (ARM) enum growDownwards = Yes.growDownwards;
398     else version (AArch64) enum growDownwards = Yes.growDownwards;
399     else version (HPPA) enum growDownwards = No.growDownwards;
400     else version (PPC) enum growDownwards = Yes.growDownwards;
401     else version (PPC64) enum growDownwards = Yes.growDownwards;
402     else version (MIPS32) enum growDownwards = Yes.growDownwards;
403     else version (MIPS64) enum growDownwards = Yes.growDownwards;
404     else version (RISCV32) enum growDownwards = Yes.growDownwards;
405     else version (RISCV64) enum growDownwards = Yes.growDownwards;
406     else version (SPARC) enum growDownwards = Yes.growDownwards;
407     else version (SPARC64) enum growDownwards = Yes.growDownwards;
408     else version (SystemZ) enum growDownwards = Yes.growDownwards;
409     else static assert(0, "Dunno how the stack grows on this architecture.");
410 
411     @disable this(this);
412 
413     // state {
414     private Region!(NullAllocator, minAlign, growDownwards) _impl;
415     union
416     {
417         private ubyte[size] _store = void;
418         private double _forAlignmentOnly1 = void;
419     }
420     // }
421 
422     /**
423     An alias for $(D minAlign), which must be a valid alignment (nonzero power
424     of 2). The start of the region and all allocation requests will be rounded
425     up to a multiple of the alignment.
426 
427     ----
428     InSituRegion!(4096) a1;
429     assert(a1.alignment == platformAlignment);
430     InSituRegion!(4096, 64) a2;
431     assert(a2.alignment == 64);
432     ----
433     */
434     alias alignment = minAlign;
435 
lazyInitInSituRegion436     private void lazyInit()
437     {
438         assert(!_impl._current);
439         _impl = typeof(_impl)(_store);
440         assert(_impl._current.alignedAt(alignment));
441     }
442 
443     /**
444     Allocates $(D bytes) and returns them, or $(D null) if the region cannot
445     accommodate the request. For efficiency reasons, if $(D bytes == 0) the
446     function returns an empty non-null slice.
447     */
allocateInSituRegion448     void[] allocate(size_t n)
449     {
450         // Fast path
451     entry:
452         auto result = _impl.allocate(n);
453         if (result.length == n) return result;
454         // Slow path
455         if (_impl._current) return null; // no more room
456         lazyInit;
457         assert(_impl._current);
458         goto entry;
459     }
460 
461     /**
462     As above, but the memory allocated is aligned at $(D a) bytes.
463     */
alignedAllocateInSituRegion464     void[] alignedAllocate(size_t n, uint a)
465     {
466         // Fast path
467     entry:
468         auto result = _impl.alignedAllocate(n, a);
469         if (result.length == n) return result;
470         // Slow path
471         if (_impl._current) return null; // no more room
472         lazyInit;
473         assert(_impl._current);
474         goto entry;
475     }
476 
477     /**
478     Deallocates $(D b). This works only if $(D b) was obtained as the last call
479     to $(D allocate); otherwise (i.e. another allocation has occurred since) it
480     does nothing. This semantics is tricky and therefore $(D deallocate) is
481     defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
482     as the third template argument.
483 
484     Params:
485     b = Block previously obtained by a call to $(D allocate) against this
486     allocator ($(D null) is allowed).
487     */
deallocateInSituRegion488     bool deallocate(void[] b)
489     {
490         if (!_impl._current) return b is null;
491         return _impl.deallocate(b);
492     }
493 
494     /**
495     Returns `Ternary.yes` if `b` is the result of a previous allocation,
496     `Ternary.no` otherwise.
497     */
ownsInSituRegion498     Ternary owns(void[] b)
499     {
500         if (!_impl._current) return Ternary.no;
501         return _impl.owns(b);
502     }
503 
504     /**
505     Expands an allocated block in place. Expansion will succeed only if the
506     block is the last allocated.
507     */
508     static if (hasMember!(typeof(_impl), "expand"))
expandInSituRegion509     bool expand(ref void[] b, size_t delta)
510     {
511         if (!_impl._current) lazyInit;
512         return _impl.expand(b, delta);
513     }
514 
515     /**
516     Deallocates all memory allocated with this allocator.
517     */
deallocateAllInSituRegion518     bool deallocateAll()
519     {
520         // We don't care to lazily init the region
521         return _impl.deallocateAll;
522     }
523 
524     /**
525     Allocates all memory available with this allocator.
526     */
allocateAllInSituRegion527     void[] allocateAll()
528     {
529         if (!_impl._current) lazyInit;
530         return _impl.allocateAll;
531     }
532 
533     /**
534     Nonstandard function that returns the bytes available for allocation.
535     */
availableInSituRegion536     size_t available()
537     {
538         if (!_impl._current) lazyInit;
539         return _impl.available;
540     }
541 }
542 
543 ///
544 @system unittest
545 {
546     // 128KB region, allocated to x86's cache line
547     InSituRegion!(128 * 1024, 16) r1;
548     auto a1 = r1.allocate(101);
549     assert(a1.length == 101);
550 
551     // 128KB region, with fallback to the garbage collector.
552     import std.experimental.allocator.building_blocks.fallback_allocator
553         : FallbackAllocator;
554     import std.experimental.allocator.building_blocks.free_list
555         : FreeList;
556     import std.experimental.allocator.building_blocks.bitmapped_block
557         : BitmappedBlock;
558     import std.experimental.allocator.gc_allocator : GCAllocator;
559     FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
560     const a2 = r2.allocate(102);
561     assert(a2.length == 102);
562 
563     // Reap with GC fallback.
564     InSituRegion!(128 * 1024, 8) tmp3;
565     FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3;
566     r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[])(tmp3.allocateAll()));
567     const a3 = r3.allocate(103);
568     assert(a3.length == 103);
569 
570     // Reap/GC with a freelist for small objects up to 16 bytes.
571     InSituRegion!(128 * 1024, 64) tmp4;
572     FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4;
573     r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[])(tmp4.allocateAll()));
574     const a4 = r4.allocate(104);
575     assert(a4.length == 104);
576 }
577 
578 @system unittest
579 {
580     InSituRegion!(4096, 1) r1;
581     auto a = r1.allocate(2001);
582     assert(a.length == 2001);
583     import std.conv : text;
584     assert(r1.available == 2095, text(r1.available));
585 
586     InSituRegion!(65_536, 1024*4) r2;
587     assert(r2.available <= 65_536);
588     a = r2.allocate(2001);
589     assert(a.length == 2001);
590 }
591 
version(CRuntime_Musl)592 version (CRuntime_Musl)
593 {
594     // sbrk and brk are disabled in Musl:
595     // https://git.musl-libc.org/cgit/musl/commit/?id=7a995fe706e519a4f55399776ef0df9596101f93
596     // https://git.musl-libc.org/cgit/musl/commit/?id=863d628d93ea341b6a32661a1654320ce69f6a07
597 }
version(DragonFlyBSD)598 version (DragonFlyBSD)
599 {
600     // sbrk is deprecated in favor of mmap   (we could implement a mmap + MAP_NORESERVE + PROT_NONE version)
601     // brk has been removed
602     // https://www.dragonflydigest.com/2019/02/22/22586.html
603     // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/dc676eaefa61b0f47bbea1c53eab86fd5ccd78c6
604     // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/4b5665564ef37dc939a3a9ffbafaab9894c18885
605     // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/8618d94a0e2ff8303ad93c123a3fa598c26a116e
606 }
607 else
608 {
609     private extern(C) void* sbrk(long) nothrow @nogc;
610     private extern(C) int brk(shared void*) nothrow @nogc;
611 }
612 
613 /**
614 
615 Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk))
616 for Posix systems. Due to the fact that $(D sbrk) is not thread-safe
617 $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design),
618 $(D SbrkRegion) uses a mutex internally. This implies
619 that uncontrolled calls to $(D brk) and $(D sbrk) may affect the workings of $(D
620 SbrkRegion) adversely.
621 
622 */
version(CRuntime_Musl)623 version (CRuntime_Musl) {} else
version(DragonFlyBSD)624 version (DragonFlyBSD) {} else
625 version (Posix) struct SbrkRegion(uint minAlign = platformAlignment)
626 {
627     import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy,
628         pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock,
629 
630     PTHREAD_MUTEX_INITIALIZER;
631     private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER;
632     import std.typecons : Ternary;
633 
634     static assert(minAlign.isGoodStaticAlignment);
635     static assert(size_t.sizeof == (void*).sizeof);
636     private shared void* _brkInitial, _brkCurrent;
637 
638     /**
639     Instance shared by all callers.
640     */
641     static shared SbrkRegion instance;
642 
643     /**
644     Standard allocator primitives.
645     */
646     enum uint alignment = minAlign;
647 
648     /// Ditto
allocateSbrkRegion649     void[] allocate(size_t bytes) shared
650     {
651         static if (minAlign > 1)
652             const rounded = bytes.roundUpToMultipleOf(alignment);
653         else
654             alias rounded = bytes;
655         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
656         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
657             || assert(0);
658         // Assume sbrk returns the old break. Most online documentation confirms
659         // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
660         // which claims the returned value is not portable.
661         auto p = sbrk(rounded);
662         if (p == cast(void*) -1)
663         {
664             return null;
665         }
666         if (!_brkInitial)
667         {
668             _brkInitial = cast(shared) p;
669             assert(cast(size_t) _brkInitial % minAlign == 0,
670                 "Too large alignment chosen for " ~ typeof(this).stringof);
671         }
672         _brkCurrent = cast(shared) (p + rounded);
673         return p[0 .. bytes];
674     }
675 
676     /// Ditto
alignedAllocateSbrkRegion677     void[] alignedAllocate(size_t bytes, uint a) shared
678     {
679         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
680         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
681             || assert(0);
682         if (!_brkInitial)
683         {
684             // This is one extra call, but it'll happen only once.
685             _brkInitial = cast(shared) sbrk(0);
686             assert(cast(size_t) _brkInitial % minAlign == 0,
687                 "Too large alignment chosen for " ~ typeof(this).stringof);
688             (_brkInitial != cast(void*) -1) || assert(0);
689             _brkCurrent = _brkInitial;
690         }
691         immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
692             cast(size_t) _brkCurrent, a) - _brkCurrent;
693         // Still must make sure the total size is aligned to the allocator's
694         // alignment.
695         immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
696 
697         auto p = sbrk(rounded);
698         if (p == cast(void*) -1)
699         {
700             return null;
701         }
702         _brkCurrent = cast(shared) (p + rounded);
703         return p[delta .. delta + bytes];
704     }
705 
706     /**
707 
708     The $(D expand) method may only succeed if the argument is the last block
709     allocated. In that case, $(D expand) attempts to push the break pointer to
710     the right.
711 
712     */
expandSbrkRegion713     bool expand(ref void[] b, size_t delta) shared
714     {
715         if (b is null) return delta == 0;
716         assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
717         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
718         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
719             || assert(0);
720         if (_brkCurrent != b.ptr + b.length) return false;
721         // Great, can expand the last block
722         static if (minAlign > 1)
723             const rounded = delta.roundUpToMultipleOf(alignment);
724         else
725             alias rounded = bytes;
726         auto p = sbrk(rounded);
727         if (p == cast(void*) -1)
728         {
729             return false;
730         }
731         _brkCurrent = cast(shared) (p + rounded);
732         b = b.ptr[0 .. b.length + delta];
733         return true;
734     }
735 
736     /// Ditto
ownsSbrkRegion737     Ternary owns(void[] b) shared
738     {
739         // No need to lock here.
740         assert(!_brkCurrent || b.ptr + b.length <= _brkCurrent);
741         return Ternary(_brkInitial && b.ptr >= _brkInitial);
742     }
743 
744     /**
745 
746     The $(D deallocate) method only works (and returns $(D true))  on systems
747     that support reducing the  break address (i.e. accept calls to $(D sbrk)
748     with negative offsets). OSX does not accept such. In addition the argument
749     must be the last block allocated.
750 
751     */
deallocateSbrkRegion752     bool deallocate(void[] b) shared
753     {
754         static if (minAlign > 1)
755             const rounded = b.length.roundUpToMultipleOf(alignment);
756         else
757             const rounded = b.length;
758         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
759         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
760             || assert(0);
761         if (_brkCurrent != b.ptr + rounded) return false;
762         assert(b.ptr >= _brkInitial);
763         if (sbrk(-rounded) == cast(void*) -1)
764             return false;
765         _brkCurrent = cast(shared) b.ptr;
766         return true;
767     }
768 
769     /**
770     The $(D deallocateAll) method only works (and returns $(D true)) on systems
771     that support reducing the  break address (i.e. accept calls to $(D sbrk)
772     with negative offsets). OSX does not accept such.
773     */
deallocateAllSbrkRegion774     bool deallocateAll() shared
775     {
776         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
777         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
778             || assert(0);
779         return !_brkInitial || brk(_brkInitial) == 0;
780     }
781 
782     /// Standard allocator API.
emptySbrkRegion783     Ternary empty()
784     {
785         // Also works when they're both null.
786         return Ternary(_brkCurrent == _brkInitial);
787     }
788 }
789 
version(CRuntime_Musl)790 version (CRuntime_Musl) {} else
version(DragonFlyBSD)791 version (DragonFlyBSD) {} else
version(Posix)792 version (Posix) @system nothrow @nogc unittest
793 {
794     // Let's test the assumption that sbrk(n) returns the old address
795     const p1 = sbrk(0);
796     const p2 = sbrk(4096);
797     assert(p1 == p2);
798     const p3 = sbrk(0);
799     assert(p3 == p2 + 4096);
800     // Try to reset brk, but don't make a fuss if it doesn't work
801     sbrk(-4096);
802 }
803 
version(CRuntime_Musl)804 version (CRuntime_Musl) {} else
version(DragonFlyBSD)805 version (DragonFlyBSD) {} else
version(Posix)806 version (Posix) @system nothrow @nogc unittest
807 {
808     import std.typecons : Ternary;
809     alias alloc = SbrkRegion!(8).instance;
810     auto a = alloc.alignedAllocate(2001, 4096);
811     assert(a.length == 2001);
812     auto b = alloc.allocate(2001);
813     assert(b.length == 2001);
814     assert(alloc.owns(a) == Ternary.yes);
815     assert(alloc.owns(b) == Ternary.yes);
816     // reducing the brk does not work on OSX
817     version (Darwin) {} else
818     {
819         assert(alloc.deallocate(b));
820         assert(alloc.deallocateAll);
821     }
822 }
823