1 //////////////////////////////////////////////////////////////////////////////
2 //
3 // (C) Copyright Ion Gaztanaga 2007-2013. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org/libs/container for documentation.
8 //
9 //////////////////////////////////////////////////////////////////////////////
10 
11 
12 #define BOOST_CONTAINER_SOURCE
13 #include <boost/container/detail/alloc_lib.h>
14 
15 #include "errno.h"   //dlmalloc bug EINVAL is used in posix_memalign without checking LACKS_ERRNO_H
16 #include "limits.h"  //CHAR_BIT
17 #ifdef BOOST_CONTAINER_DLMALLOC_FOOTERS
18 #define FOOTERS      1
19 #endif
20 #define USE_LOCKS    1
21 #define MSPACES      1
22 #define NO_MALLINFO  1
23 #define NO_MALLOC_STATS 1
24 
25 
26 #if !defined(NDEBUG)
27    #if !defined(DEBUG)
28       #define DEBUG 1
29       #define DL_DEBUG_DEFINED
30    #endif
31 #endif
32 
33 #define USE_DL_PREFIX
34 
35 #ifdef __GNUC__
36 #define FORCEINLINE inline
37 #endif
38 #include "dlmalloc_2_8_6.c"
39 
40 #ifdef _MSC_VER
41 #pragma warning (push)
42 #pragma warning (disable : 4127)
43 #pragma warning (disable : 4267)
44 #pragma warning (disable : 4127)
45 #pragma warning (disable : 4702)
46 #pragma warning (disable : 4390) /*empty controlled statement found; is this the intent?*/
47 #pragma warning (disable : 4251 4231 4660) /*dll warnings*/
48 #endif
49 
50 #define DL_SIZE_IMPL(p) (chunksize(mem2chunk(p)) - overhead_for(mem2chunk(p)))
51 
52 static size_t s_allocated_memory;
53 
54 ///////////////////////////////////////////////////////////////
55 ///////////////////////////////////////////////////////////////
56 ///////////////////////////////////////////////////////////////
57 //
58 //         SLIGHTLY MODIFIED DLMALLOC FUNCTIONS
59 //
60 ///////////////////////////////////////////////////////////////
61 ///////////////////////////////////////////////////////////////
62 ///////////////////////////////////////////////////////////////
63 
64 //This function is equal to mspace_free
65 //replacing PREACTION with 0 and POSTACTION with nothing
mspace_free_lockless(mspace msp,void * mem)66 static void mspace_free_lockless(mspace msp, void* mem)
67 {
68   if (mem != 0) {
69     mchunkptr p  = mem2chunk(mem);
70 #if FOOTERS
71     mstate fm = get_mstate_for(p);
72     msp = msp; /* placate people compiling -Wunused */
73 #else /* FOOTERS */
74     mstate fm = (mstate)msp;
75 #endif /* FOOTERS */
76     if (!ok_magic(fm)) {
77       USAGE_ERROR_ACTION(fm, p);
78       return;
79     }
80     if (!0){//PREACTION(fm)) {
81       check_inuse_chunk(fm, p);
82       if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
83         size_t psize = chunksize(p);
84         mchunkptr next = chunk_plus_offset(p, psize);
85         s_allocated_memory -= psize;
86         if (!pinuse(p)) {
87           size_t prevsize = p->prev_foot;
88           if (is_mmapped(p)) {
89             psize += prevsize + MMAP_FOOT_PAD;
90             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
91               fm->footprint -= psize;
92             goto postaction;
93           }
94           else {
95             mchunkptr prev = chunk_minus_offset(p, prevsize);
96             psize += prevsize;
97             p = prev;
98             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
99               if (p != fm->dv) {
100                 unlink_chunk(fm, p, prevsize);
101               }
102               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
103                 fm->dvsize = psize;
104                 set_free_with_pinuse(p, psize, next);
105                 goto postaction;
106               }
107             }
108             else
109               goto erroraction;
110           }
111         }
112 
113         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
114           if (!cinuse(next)) {  /* consolidate forward */
115             if (next == fm->top) {
116               size_t tsize = fm->topsize += psize;
117               fm->top = p;
118               p->head = tsize | PINUSE_BIT;
119               if (p == fm->dv) {
120                 fm->dv = 0;
121                 fm->dvsize = 0;
122               }
123               if (should_trim(fm, tsize))
124                 sys_trim(fm, 0);
125               goto postaction;
126             }
127             else if (next == fm->dv) {
128               size_t dsize = fm->dvsize += psize;
129               fm->dv = p;
130               set_size_and_pinuse_of_free_chunk(p, dsize);
131               goto postaction;
132             }
133             else {
134               size_t nsize = chunksize(next);
135               psize += nsize;
136               unlink_chunk(fm, next, nsize);
137               set_size_and_pinuse_of_free_chunk(p, psize);
138               if (p == fm->dv) {
139                 fm->dvsize = psize;
140                 goto postaction;
141               }
142             }
143           }
144           else
145             set_free_with_pinuse(p, psize, next);
146 
147           if (is_small(psize)) {
148             insert_small_chunk(fm, p, psize);
149             check_free_chunk(fm, p);
150           }
151           else {
152             tchunkptr tp = (tchunkptr)p;
153             insert_large_chunk(fm, tp, psize);
154             check_free_chunk(fm, p);
155             if (--fm->release_checks == 0)
156               release_unused_segments(fm);
157           }
158           goto postaction;
159         }
160       }
161     erroraction:
162       USAGE_ERROR_ACTION(fm, p);
163     postaction:
164       ;//POSTACTION(fm);
165     }
166   }
167 }
168 
169 //This function is equal to mspace_malloc
170 //replacing PREACTION with 0 and POSTACTION with nothing
mspace_malloc_lockless(mspace msp,size_t bytes)171 void* mspace_malloc_lockless(mspace msp, size_t bytes)
172 {
173   mstate ms = (mstate)msp;
174   if (!ok_magic(ms)) {
175     USAGE_ERROR_ACTION(ms,ms);
176     return 0;
177   }
178     if (!0){//PREACTION(ms)) {
179     void* mem;
180     size_t nb;
181     if (bytes <= MAX_SMALL_REQUEST) {
182       bindex_t idx;
183       binmap_t smallbits;
184       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
185       idx = small_index(nb);
186       smallbits = ms->smallmap >> idx;
187 
188       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
189         mchunkptr b, p;
190         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
191         b = smallbin_at(ms, idx);
192         p = b->fd;
193         assert(chunksize(p) == small_index2size(idx));
194         unlink_first_small_chunk(ms, b, p, idx);
195         set_inuse_and_pinuse(ms, p, small_index2size(idx));
196         mem = chunk2mem(p);
197         check_malloced_chunk(ms, mem, nb);
198         goto postaction;
199       }
200 
201       else if (nb > ms->dvsize) {
202         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
203           mchunkptr b, p, r;
204           size_t rsize;
205           bindex_t i;
206           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
207           binmap_t leastbit = least_bit(leftbits);
208           compute_bit2idx(leastbit, i);
209           b = smallbin_at(ms, i);
210           p = b->fd;
211           assert(chunksize(p) == small_index2size(i));
212           unlink_first_small_chunk(ms, b, p, i);
213           rsize = small_index2size(i) - nb;
214           /* Fit here cannot be remainderless if 4byte sizes */
215           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
216             set_inuse_and_pinuse(ms, p, small_index2size(i));
217           else {
218             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
219             r = chunk_plus_offset(p, nb);
220             set_size_and_pinuse_of_free_chunk(r, rsize);
221             replace_dv(ms, r, rsize);
222           }
223           mem = chunk2mem(p);
224           check_malloced_chunk(ms, mem, nb);
225           goto postaction;
226         }
227 
228         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
229           check_malloced_chunk(ms, mem, nb);
230           goto postaction;
231         }
232       }
233     }
234     else if (bytes >= MAX_REQUEST)
235       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
236     else {
237       nb = pad_request(bytes);
238       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
239         check_malloced_chunk(ms, mem, nb);
240         goto postaction;
241       }
242     }
243 
244     if (nb <= ms->dvsize) {
245       size_t rsize = ms->dvsize - nb;
246       mchunkptr p = ms->dv;
247       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
248         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
249         ms->dvsize = rsize;
250         set_size_and_pinuse_of_free_chunk(r, rsize);
251         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
252       }
253       else { /* exhaust dv */
254         size_t dvs = ms->dvsize;
255         ms->dvsize = 0;
256         ms->dv = 0;
257         set_inuse_and_pinuse(ms, p, dvs);
258       }
259       mem = chunk2mem(p);
260       check_malloced_chunk(ms, mem, nb);
261       goto postaction;
262     }
263 
264     else if (nb < ms->topsize) { /* Split top */
265       size_t rsize = ms->topsize -= nb;
266       mchunkptr p = ms->top;
267       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
268       r->head = rsize | PINUSE_BIT;
269       set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
270       mem = chunk2mem(p);
271       check_top_chunk(ms, ms->top);
272       check_malloced_chunk(ms, mem, nb);
273       goto postaction;
274     }
275 
276     mem = sys_alloc(ms, nb);
277 
278   postaction:
279       ;//POSTACTION(ms);
280     return mem;
281   }
282 
283   return 0;
284 }
285 
286 //This function is equal to try_realloc_chunk but handling
287 //minimum and desired bytes
try_realloc_chunk_with_min(mstate m,mchunkptr p,size_t min_nb,size_t des_nb,int can_move)288 static mchunkptr try_realloc_chunk_with_min(mstate m, mchunkptr p, size_t min_nb, size_t des_nb, int can_move)
289 {
290   mchunkptr newp = 0;
291   size_t oldsize = chunksize(p);
292   mchunkptr next = chunk_plus_offset(p, oldsize);
293   if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
294               ok_next(p, next) && ok_pinuse(next))) {
295     if (is_mmapped(p)) {
296       newp = mmap_resize(m, p, des_nb, can_move);
297       if(!newp)   //mmap does not return how many bytes we could reallocate, so go the minimum
298          newp = mmap_resize(m, p, min_nb, can_move);
299     }
300     else if (oldsize >= min_nb) {             /* already big enough */
301       size_t nb = oldsize >= des_nb ? des_nb : oldsize;
302       size_t rsize = oldsize - nb;
303       if (rsize >= MIN_CHUNK_SIZE) {      /* split off remainder */
304         mchunkptr r = chunk_plus_offset(p, nb);
305         set_inuse(m, p, nb);
306         set_inuse(m, r, rsize);
307         dispose_chunk(m, r, rsize);
308       }
309       newp = p;
310     }
311     else if (next == m->top) {  /* extend into top */
312       if (oldsize + m->topsize > min_nb) {
313         size_t nb = (oldsize + m->topsize) > des_nb ? des_nb : (oldsize + m->topsize - MALLOC_ALIGNMENT);
314         size_t newsize = oldsize + m->topsize;
315         size_t newtopsize = newsize - nb;
316         mchunkptr newtop = chunk_plus_offset(p, nb);
317         set_inuse(m, p, nb);
318         newtop->head = newtopsize |PINUSE_BIT;
319         m->top = newtop;
320         m->topsize = newtopsize;
321         newp = p;
322       }
323     }
324     else if (next == m->dv) { /* extend into dv */
325       size_t dvs = m->dvsize;
326       if (oldsize + dvs >= min_nb) {
327         size_t nb = (oldsize + dvs) >= des_nb ? des_nb : (oldsize + dvs);
328         size_t dsize = oldsize + dvs - nb;
329         if (dsize >= MIN_CHUNK_SIZE) {
330           mchunkptr r = chunk_plus_offset(p, nb);
331           mchunkptr n = chunk_plus_offset(r, dsize);
332           set_inuse(m, p, nb);
333           set_size_and_pinuse_of_free_chunk(r, dsize);
334           clear_pinuse(n);
335           m->dvsize = dsize;
336           m->dv = r;
337         }
338         else { /* exhaust dv */
339           size_t newsize = oldsize + dvs;
340           set_inuse(m, p, newsize);
341           m->dvsize = 0;
342           m->dv = 0;
343         }
344         newp = p;
345       }
346     }
347     else if (!cinuse(next)) { /* extend into next free chunk */
348       size_t nextsize = chunksize(next);
349       if (oldsize + nextsize >= min_nb) {
350         size_t nb = (oldsize + nextsize) >= des_nb ? des_nb : (oldsize + nextsize);
351         size_t rsize = oldsize + nextsize - nb;
352         unlink_chunk(m, next, nextsize);
353         if (rsize < MIN_CHUNK_SIZE) {
354           size_t newsize = oldsize + nextsize;
355           set_inuse(m, p, newsize);
356         }
357         else {
358           mchunkptr r = chunk_plus_offset(p, nb);
359           set_inuse(m, p, nb);
360           set_inuse(m, r, rsize);
361           dispose_chunk(m, r, rsize);
362         }
363         newp = p;
364       }
365     }
366   }
367   else {
368     USAGE_ERROR_ACTION(m, chunk2mem(p));
369   }
370   return newp;
371 }
372 
373 #define BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(THISMEM, NEXTMEM) \
374    *((void**)(THISMEM)) = *((void**)((NEXTMEM)))
375 
376 //This function is based on internal_bulk_free
377 //replacing iteration over array[] with boost_cont_memchain.
378 //Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes.
379 //After forward merging, backwards merging is also tried
internal_multialloc_free(mstate m,boost_cont_memchain * pchain)380 static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain)
381 {
382 #if FOOTERS
383   boost_cont_memchain ret_chain;
384   BOOST_CONTAINER_MEMCHAIN_INIT(&ret_chain);
385 #endif
386   if (!PREACTION(m)) {
387     boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain);
388     while(!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */
389       void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it);
390       mchunkptr a_p = mem2chunk(a_mem);
391       size_t psize = chunksize(a_p);
392 #if FOOTERS
393       if (get_mstate_for(a_p) != m) {
394          BOOST_CONTAINER_MEMIT_NEXT(a_it);
395          BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(&ret_chain, a_mem);
396          continue;
397       }
398 #endif
399       check_inuse_chunk(m, a_p);
400       if (RTCHECK(ok_address(m, a_p) && ok_inuse(a_p))) {
401          while(1) { /* Internal loop to speed up forward and backward merging (avoids some redundant checks) */
402             boost_cont_memchain_it b_it = a_it;
403             BOOST_CONTAINER_MEMIT_NEXT(b_it);
404             if(!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, b_it)){
405                void *b_mem   = BOOST_CONTAINER_MEMIT_ADDR(b_it);
406                mchunkptr b_p = mem2chunk(b_mem);
407                if (b_p == next_chunk(a_p)) { /* b chunk is contiguous and next so b's size can be added to a */
408                   psize += chunksize(b_p);
409                   set_inuse(m, a_p, psize);
410                   BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(a_mem, b_mem);
411                   continue;
412                }
413                if(RTCHECK(ok_address(m, b_p) && ok_inuse(b_p))){
414                   /* b chunk is contiguous and previous so a's size can be added to b */
415                   if(a_p == next_chunk(b_p)) {
416                      psize += chunksize(b_p);
417                      set_inuse(m, b_p, psize);
418                      a_it = b_it;
419                      a_p = b_p;
420                      a_mem = b_mem;
421                      continue;
422                   }
423                }
424             }
425             /* Normal deallocation starts again in the outer loop */
426             a_it = b_it;
427             s_allocated_memory -= psize;
428             dispose_chunk(m, a_p, psize);
429             break;
430          }
431        }
432        else {
433          CORRUPTION_ERROR_ACTION(m);
434          break;
435        }
436     }
437     if (should_trim(m, m->topsize))
438       sys_trim(m, 0);
439     POSTACTION(m);
440   }
441 #if FOOTERS
442   {
443    boost_cont_memchain_it last_pchain = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
444    BOOST_CONTAINER_MEMCHAIN_INIT(pchain);
445    BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER
446          (pchain
447          , last_pchain
448          , BOOST_CONTAINER_MEMCHAIN_FIRSTMEM(&ret_chain)
449          , BOOST_CONTAINER_MEMCHAIN_LASTMEM(&ret_chain)
450          , BOOST_CONTAINER_MEMCHAIN_SIZE(&ret_chain)
451          );
452    }
453 #endif
454 }
455 
456 ///////////////////////////////////////////////////////////////
457 ///////////////////////////////////////////////////////////////
458 ///////////////////////////////////////////////////////////////
459 //
460 //         NEW FUNCTIONS BASED ON DLMALLOC INTERNALS
461 //
462 ///////////////////////////////////////////////////////////////
463 ///////////////////////////////////////////////////////////////
464 ///////////////////////////////////////////////////////////////
465 
466 #define GET_TRUNCATED_SIZE(ORIG_SIZE, ROUNDTO)     ((ORIG_SIZE)/(ROUNDTO)*(ROUNDTO))
467 #define GET_ROUNDED_SIZE(ORIG_SIZE, ROUNDTO)       ((((ORIG_SIZE)-1)/(ROUNDTO)+1)*(ROUNDTO))
468 #define GET_TRUNCATED_PO2_SIZE(ORIG_SIZE, ROUNDTO) ((ORIG_SIZE) & (~(ROUNDTO-1)))
469 #define GET_ROUNDED_PO2_SIZE(ORIG_SIZE, ROUNDTO)   (((ORIG_SIZE - 1) & (~(ROUNDTO-1))) + ROUNDTO)
470 
471 /* Greatest common divisor and least common multiple
472    gcd is an algorithm that calculates the greatest common divisor of two
473    integers, using Euclid's algorithm.
474 
475    Pre: A > 0 && B > 0
476    Recommended: A > B*/
477 #define CALCULATE_GCD(A, B, OUT)\
478 {\
479    size_t a = A;\
480    size_t b = B;\
481    do\
482    {\
483       size_t tmp = b;\
484       b = a % b;\
485       a = tmp;\
486    } while (b != 0);\
487 \
488    OUT = a;\
489 }
490 
491 /* lcm is an algorithm that calculates the least common multiple of two
492    integers.
493 
494    Pre: A > 0 && B > 0
495    Recommended: A > B*/
496 #define CALCULATE_LCM(A, B, OUT)\
497 {\
498    CALCULATE_GCD(A, B, OUT);\
499    OUT = (A / OUT)*B;\
500 }
501 
calculate_lcm_and_needs_backwards_lcmed(size_t backwards_multiple,size_t received_size,size_t size_to_achieve,size_t * plcm,size_t * pneeds_backwards_lcmed)502 static int calculate_lcm_and_needs_backwards_lcmed
503    (size_t backwards_multiple, size_t received_size, size_t size_to_achieve,
504     size_t *plcm, size_t *pneeds_backwards_lcmed)
505 {
506    /* Now calculate lcm */
507    size_t max = backwards_multiple;
508    size_t min = MALLOC_ALIGNMENT;
509    size_t needs_backwards;
510    size_t needs_backwards_lcmed;
511    size_t lcm;
512    size_t current_forward;
513    /*Swap if necessary*/
514    if(max < min){
515       size_t tmp = min;
516       min = max;
517       max = tmp;
518    }
519    /*Check if it's power of two*/
520    if((backwards_multiple & (backwards_multiple-1)) == 0){
521       if(0 != (size_to_achieve & ((backwards_multiple-1)))){
522          USAGE_ERROR_ACTION(m, oldp);
523          return 0;
524       }
525 
526       lcm = max;
527       /*If we want to use minbytes data to get a buffer between maxbytes
528       and minbytes if maxbytes can't be achieved, calculate the
529       biggest of all possibilities*/
530       current_forward = GET_TRUNCATED_PO2_SIZE(received_size, backwards_multiple);
531       needs_backwards = size_to_achieve - current_forward;
532       assert((needs_backwards % backwards_multiple) == 0);
533       needs_backwards_lcmed = GET_ROUNDED_PO2_SIZE(needs_backwards, lcm);
534       *plcm = lcm;
535       *pneeds_backwards_lcmed = needs_backwards_lcmed;
536       return 1;
537    }
538    /*Check if it's multiple of alignment*/
539    else if((backwards_multiple & (MALLOC_ALIGNMENT - 1u)) == 0){
540       lcm = backwards_multiple;
541       current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
542       //No need to round needs_backwards because backwards_multiple == lcm
543       needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
544       assert((needs_backwards_lcmed & (MALLOC_ALIGNMENT - 1u)) == 0);
545       *plcm = lcm;
546       *pneeds_backwards_lcmed = needs_backwards_lcmed;
547       return 1;
548    }
549    /*Check if it's multiple of the half of the alignmment*/
550    else if((backwards_multiple & ((MALLOC_ALIGNMENT/2u) - 1u)) == 0){
551       lcm = backwards_multiple*2u;
552       current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
553       needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
554       if(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
555       //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
556          needs_backwards_lcmed += backwards_multiple;
557       assert((needs_backwards_lcmed % lcm) == 0);
558       *plcm = lcm;
559       *pneeds_backwards_lcmed = needs_backwards_lcmed;
560       return 1;
561    }
562    /*Check if it's multiple of the quarter of the alignmment*/
563    else if((backwards_multiple & ((MALLOC_ALIGNMENT/4u) - 1u)) == 0){
564       size_t remainder;
565       lcm = backwards_multiple*4u;
566       current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
567       needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
568       //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
569          //needs_backwards_lcmed += backwards_multiple;
570       if(0 != (remainder = ((needs_backwards_lcmed & (MALLOC_ALIGNMENT-1))>>(MALLOC_ALIGNMENT/8u)))){
571          if(backwards_multiple & MALLOC_ALIGNMENT/2u){
572             needs_backwards_lcmed += (remainder)*backwards_multiple;
573          }
574          else{
575             needs_backwards_lcmed += (4-remainder)*backwards_multiple;
576          }
577       }
578       assert((needs_backwards_lcmed % lcm) == 0);
579       *plcm = lcm;
580       *pneeds_backwards_lcmed = needs_backwards_lcmed;
581       return 1;
582    }
583    else{
584       CALCULATE_LCM(max, min, lcm);
585       /*If we want to use minbytes data to get a buffer between maxbytes
586       and minbytes if maxbytes can't be achieved, calculate the
587       biggest of all possibilities*/
588       current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
589       needs_backwards = size_to_achieve - current_forward;
590       assert((needs_backwards % backwards_multiple) == 0);
591       needs_backwards_lcmed = GET_ROUNDED_SIZE(needs_backwards, lcm);
592       *plcm = lcm;
593       *pneeds_backwards_lcmed = needs_backwards_lcmed;
594       return 1;
595    }
596 }
597 
internal_grow_both_sides(mstate m,allocation_type command,void * oldmem,size_t minbytes,size_t maxbytes,size_t * received_size,size_t backwards_multiple,int only_preferred_backwards)598 static void *internal_grow_both_sides
599                          (mstate m
600                          ,allocation_type command
601                          ,void *oldmem
602                          ,size_t minbytes
603                          ,size_t maxbytes
604                          ,size_t *received_size
605                          ,size_t backwards_multiple
606                          ,int only_preferred_backwards)
607 {
608    mchunkptr oldp = mem2chunk(oldmem);
609    size_t oldsize = chunksize(oldp);
610    *received_size = oldsize - overhead_for(oldp);
611    if(minbytes <= *received_size)
612       return oldmem;
613 
614    if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp))) {
615       if(command & BOOST_CONTAINER_EXPAND_FWD){
616          if(try_realloc_chunk_with_min(m, oldp, request2size(minbytes), request2size(maxbytes), 0)){
617             check_inuse_chunk(m, oldp);
618             *received_size = DL_SIZE_IMPL(oldmem);
619             s_allocated_memory += chunksize(oldp) - oldsize;
620             return oldmem;
621          }
622       }
623       else{
624          *received_size = DL_SIZE_IMPL(oldmem);
625          if(*received_size >= maxbytes)
626             return oldmem;
627       }
628 /*
629       Should we check this?
630       if(backwards_multiple &&
631          (0 != (minbytes % backwards_multiple) &&
632           0 != (maxbytes % backwards_multiple)) ){
633          USAGE_ERROR_ACTION(m, oldp);
634          return 0;
635       }
636 */
637       /* We reach here only if forward expansion fails */
638       if(!(command & BOOST_CONTAINER_EXPAND_BWD) || pinuse(oldp)){
639          return 0;
640       }
641       {
642          size_t prevsize = oldp->prev_foot;
643          if ((prevsize & USE_MMAP_BIT) != 0){
644             /*Return failure the previous chunk was mmapped.
645               mremap does not allow expanding to a fixed address (MREMAP_MAYMOVE) without
646               copying (MREMAP_MAYMOVE must be also set).*/
647             return 0;
648          }
649          else {
650             mchunkptr prev = chunk_minus_offset(oldp, prevsize);
651             size_t dsize = oldsize + prevsize;
652             size_t needs_backwards_lcmed;
653             size_t lcm;
654 
655             /* Let's calculate the number of extra bytes of data before the current
656             block's begin. The value is a multiple of backwards_multiple
657             and the alignment*/
658             if(!calculate_lcm_and_needs_backwards_lcmed
659                ( backwards_multiple, *received_size
660                , only_preferred_backwards ? maxbytes : minbytes
661                , &lcm, &needs_backwards_lcmed)
662                || !RTCHECK(ok_address(m, prev))){
663                USAGE_ERROR_ACTION(m, oldp);
664                return 0;
665             }
666             /* Check if previous block has enough size */
667             else if(prevsize < needs_backwards_lcmed){
668                /* preferred size? */
669                return 0;
670             }
671             /* Now take all next space. This must succeed, as we've previously calculated the correct size */
672             if(command & BOOST_CONTAINER_EXPAND_FWD){
673                if(!try_realloc_chunk_with_min(m, oldp, request2size(*received_size), request2size(*received_size), 0)){
674                   assert(0);
675                }
676                check_inuse_chunk(m, oldp);
677                *received_size = DL_SIZE_IMPL(oldmem);
678                s_allocated_memory += chunksize(oldp) - oldsize;
679                oldsize = chunksize(oldp);
680                dsize = oldsize + prevsize;
681             }
682             /* We need a minimum size to split the previous one */
683             if(prevsize >= (needs_backwards_lcmed + MIN_CHUNK_SIZE)){
684                mchunkptr r  = chunk_minus_offset(oldp, needs_backwards_lcmed);
685                size_t rsize = oldsize + needs_backwards_lcmed;
686                size_t newprevsize = dsize - rsize;
687                int prev_was_dv = prev == m->dv;
688 
689                assert(newprevsize >= MIN_CHUNK_SIZE);
690 
691                if (prev_was_dv) {
692                   m->dvsize = newprevsize;
693                }
694                else{/* if ((next->head & INUSE_BITS) == INUSE_BITS) { */
695                   unlink_chunk(m, prev, prevsize);
696                   insert_chunk(m, prev, newprevsize);
697                }
698 
699                set_size_and_pinuse_of_free_chunk(prev, newprevsize);
700                clear_pinuse(r);
701                set_inuse(m, r, rsize);
702                check_malloced_chunk(m, chunk2mem(r), rsize);
703                *received_size = chunksize(r) - overhead_for(r);
704                s_allocated_memory += chunksize(r) - oldsize;
705                return chunk2mem(r);
706             }
707             /* Check if there is no place to create a new block and
708                the whole new block is multiple of the backwards expansion multiple */
709             else if(prevsize >= needs_backwards_lcmed && !(prevsize % lcm)) {
710                /* Just merge the whole previous block */
711                /* prevsize is multiple of lcm (and backwards_multiple)*/
712                *received_size  += prevsize;
713 
714                if (prev != m->dv) {
715                   unlink_chunk(m, prev, prevsize);
716                }
717                else{
718                   m->dvsize = 0;
719                   m->dv     = 0;
720                }
721                set_inuse(m, prev, dsize);
722                check_malloced_chunk(m, chunk2mem(prev), dsize);
723                s_allocated_memory += chunksize(prev) - oldsize;
724                return chunk2mem(prev);
725             }
726             else{
727                /* Previous block was big enough but there is no room
728                   to create an empty block and taking the whole block does
729                   not fulfill alignment requirements */
730                return 0;
731             }
732          }
733       }
734    }
735    else{
736       USAGE_ERROR_ACTION(m, oldmem);
737       return 0;
738    }
739    return 0;
740 }
741 
742 /* This is similar to mmap_resize but:
743    * Only to shrink
744    * It takes min and max sizes
745    * Takes additional 'do_commit' argument to obtain the final
746      size before doing the real shrink operation.
747 */
internal_mmap_shrink_in_place(mstate m,mchunkptr oldp,size_t nbmin,size_t nbmax,size_t * received_size,int do_commit)748 static int internal_mmap_shrink_in_place(mstate m, mchunkptr oldp, size_t nbmin, size_t nbmax, size_t *received_size, int do_commit)
749 {
750   size_t oldsize = chunksize(oldp);
751   *received_size = oldsize;
752   #if HAVE_MREMAP
753   if (is_small(nbmax)) /* Can't shrink mmap regions below small size */
754     return 0;
755   {
756    size_t effective_min = nbmin > MIN_LARGE_SIZE ? nbmin : MIN_LARGE_SIZE;
757    /* Keep old chunk if big enough but not too big */
758    if (oldsize >= effective_min + SIZE_T_SIZE &&
759          (oldsize - effective_min) <= (mparams.granularity << 1))
760       return 0;
761    /* Now calculate new sizes */
762    {
763       size_t offset = oldp->prev_foot;
764       size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
765       size_t newmmsize = mmap_align(effective_min + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
766       *received_size = newmmsize;
767       if(!do_commit){
768          const int flags = 0; /* placate people compiling -Wunused */
769          char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
770                                        oldmmsize, newmmsize, flags);
771          /*This must always succeed */
772          if(!cp){
773             USAGE_ERROR_ACTION(m, m);
774             return 0;
775          }
776          {
777          mchunkptr newp = (mchunkptr)(cp + offset);
778          size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
779          newp->head = psize;
780          mark_inuse_foot(m, newp, psize);
781          chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
782          chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
783 
784          if (cp < m->least_addr)
785             m->least_addr = cp;
786          if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
787             m->max_footprint = m->footprint;
788          check_mmapped_chunk(m, newp);
789          }
790       }
791     }
792     return 1;
793   }
794   #else  //#if HAVE_MREMAP
795   (void)m;
796   (void)oldp;
797   (void)nbmin;
798   (void)nbmax;
799   (void)received_size;
800   (void)do_commit;
801   return 0;
802   #endif //#if HAVE_MREMAP
803 }
804 
internal_shrink(mstate m,void * oldmem,size_t minbytes,size_t maxbytes,size_t * received_size,int do_commit)805 static int internal_shrink(mstate m, void* oldmem, size_t minbytes, size_t maxbytes, size_t *received_size, int do_commit)
806 {
807    *received_size = chunksize(mem2chunk(oldmem)) - overhead_for(mem2chunk(oldmem));
808    if (minbytes >= MAX_REQUEST || maxbytes >= MAX_REQUEST) {
809       MALLOC_FAILURE_ACTION;
810       return 0;
811    }
812    else if(minbytes < MIN_REQUEST){
813       minbytes = MIN_REQUEST;
814    }
815    if (minbytes > maxbytes) {
816       return 0;
817    }
818 
819    {
820       mchunkptr oldp = mem2chunk(oldmem);
821       size_t oldsize = chunksize(oldp);
822       mchunkptr next = chunk_plus_offset(oldp, oldsize);
823       void* extra = 0;
824 
825       /* Try to either shrink or extend into top. Else malloc-copy-free*/
826       if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
827                   ok_next(oldp, next) && ok_pinuse(next))) {
828          size_t nbmin = request2size(minbytes);
829          size_t nbmax = request2size(maxbytes);
830 
831          if (nbmin > oldsize){
832             /* Return error if old size is too small */
833          }
834          else if (is_mmapped(oldp)){
835             return internal_mmap_shrink_in_place(m, oldp, nbmin, nbmax, received_size, do_commit);
836          }
837          else{ // nbmin <= oldsize /* already big enough*/
838             size_t nb = nbmin;
839             size_t rsize = oldsize - nb;
840             if (rsize >= MIN_CHUNK_SIZE) {
841                if(do_commit){
842                   mchunkptr remainder = chunk_plus_offset(oldp, nb);
843                   set_inuse(m, oldp, nb);
844                   set_inuse(m, remainder, rsize);
845                   extra = chunk2mem(remainder);
846                }
847                *received_size = nb - overhead_for(oldp);
848                if(!do_commit)
849                   return 1;
850             }
851          }
852       }
853       else {
854          USAGE_ERROR_ACTION(m, oldmem);
855          return 0;
856       }
857 
858       if (extra != 0 && do_commit) {
859          mspace_free_lockless(m, extra);
860          check_inuse_chunk(m, oldp);
861          return 1;
862       }
863       else {
864          return 0;
865       }
866    }
867 }
868 
869 
870 #define INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM 4096
871 
872 #define SQRT_MAX_SIZE_T           (((size_t)-1)>>(sizeof(size_t)*CHAR_BIT/2))
873 
internal_node_multialloc(mstate m,size_t n_elements,size_t element_size,size_t contiguous_elements,boost_cont_memchain * pchain)874 static int internal_node_multialloc
875    (mstate m, size_t n_elements, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) {
876    void*     mem;            /* malloced aggregate space */
877    mchunkptr p;              /* corresponding chunk */
878    size_t    remainder_size; /* remaining bytes while splitting */
879    flag_t    was_enabled;    /* to disable mmap */
880    size_t    elements_per_segment = 0;
881    size_t    element_req_size = request2size(element_size);
882    boost_cont_memchain_it prev_last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
883 
884    /*Error if wrong element_size parameter */
885    if( !element_size ||
886       /*OR Error if n_elements less thatn contiguous_elements */
887       ((contiguous_elements + 1) > (DL_MULTIALLOC_DEFAULT_CONTIGUOUS + 1) && n_elements < contiguous_elements) ||
888       /* OR Error if integer overflow */
889       (SQRT_MAX_SIZE_T < (element_req_size | contiguous_elements) &&
890          (MAX_SIZE_T/element_req_size) < contiguous_elements)){
891       return 0;
892    }
893    switch(contiguous_elements){
894       case DL_MULTIALLOC_DEFAULT_CONTIGUOUS:
895       {
896          /* Default contiguous, just check that we can store at least one element */
897          elements_per_segment = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM/element_req_size;
898          elements_per_segment += (size_t)(!elements_per_segment);
899       }
900       break;
901       case DL_MULTIALLOC_ALL_CONTIGUOUS:
902          /* All elements should be allocated in a single call */
903          elements_per_segment = n_elements;
904       break;
905       default:
906          /* Allocate in chunks of "contiguous_elements" */
907          elements_per_segment = contiguous_elements;
908    }
909 
910    {
911       size_t    i;
912       size_t next_i;
913       /*
914          Allocate the aggregate chunk.  First disable direct-mmapping so
915          malloc won't use it, since we would not be able to later
916          free/realloc space internal to a segregated mmap region.
917       */
918       was_enabled = use_mmap(m);
919       disable_mmap(m);
920       for(i = 0; i != n_elements; i = next_i)
921       {
922          size_t accum_size;
923          size_t n_elements_left = n_elements - i;
924          next_i = i + ((n_elements_left < elements_per_segment) ? n_elements_left : elements_per_segment);
925          accum_size = element_req_size*(next_i - i);
926 
927          mem = mspace_malloc_lockless(m, accum_size - CHUNK_OVERHEAD);
928          if (mem == 0){
929             BOOST_CONTAINER_MEMIT_NEXT(prev_last_it);
930             while(i--){
931                void *addr = BOOST_CONTAINER_MEMIT_ADDR(prev_last_it);
932                BOOST_CONTAINER_MEMIT_NEXT(prev_last_it);
933                mspace_free_lockless(m, addr);
934             }
935             if (was_enabled)
936                enable_mmap(m);
937             return 0;
938          }
939          p = mem2chunk(mem);
940          remainder_size = chunksize(p);
941          s_allocated_memory += remainder_size;
942 
943          assert(!is_mmapped(p));
944          {  /* split out elements */
945             void *mem_orig = mem;
946             boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
947             size_t num_elements = next_i-i;
948 
949             size_t num_loops = num_elements - 1;
950             remainder_size -= element_req_size*num_loops;
951             while(num_loops--){
952                void **mem_prev = ((void**)mem);
953                set_size_and_pinuse_of_inuse_chunk(m, p, element_req_size);
954                p = chunk_plus_offset(p, element_req_size);
955                mem = chunk2mem(p);
956                *mem_prev = mem;
957             }
958             set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
959             BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements);
960          }
961       }
962       if (was_enabled)
963          enable_mmap(m);
964    }
965    return 1;
966 }
967 
internal_multialloc_arrays(mstate m,size_t n_elements,const size_t * sizes,size_t element_size,size_t contiguous_elements,boost_cont_memchain * pchain)968 static int internal_multialloc_arrays
969    (mstate m, size_t n_elements, const size_t* sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) {
970    void*     mem;            /* malloced aggregate space */
971    mchunkptr p;              /* corresponding chunk */
972    size_t    remainder_size; /* remaining bytes while splitting */
973    flag_t    was_enabled;    /* to disable mmap */
974    size_t    size;
975    size_t boost_cont_multialloc_segmented_malloc_size;
976    size_t max_size;
977 
978    /* Check overflow */
979    if(!element_size){
980       return 0;
981    }
982    max_size = MAX_REQUEST/element_size;
983    /* Different sizes*/
984    switch(contiguous_elements){
985       case DL_MULTIALLOC_DEFAULT_CONTIGUOUS:
986          /* Use default contiguous mem */
987          boost_cont_multialloc_segmented_malloc_size = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM;
988       break;
989       case DL_MULTIALLOC_ALL_CONTIGUOUS:
990          boost_cont_multialloc_segmented_malloc_size = MAX_REQUEST + CHUNK_OVERHEAD;
991       break;
992       default:
993          if(max_size < contiguous_elements){
994             return 0;
995          }
996          else{
997             /* The suggested buffer is just the the element count by the size */
998             boost_cont_multialloc_segmented_malloc_size = element_size*contiguous_elements;
999          }
1000    }
1001 
1002    {
1003       size_t    i;
1004       size_t next_i;
1005       /*
1006          Allocate the aggregate chunk.  First disable direct-mmapping so
1007          malloc won't use it, since we would not be able to later
1008          free/realloc space internal to a segregated mmap region.
1009       */
1010       was_enabled = use_mmap(m);
1011       disable_mmap(m);
1012       for(i = 0, next_i = 0; i != n_elements; i = next_i)
1013       {
1014          int error = 0;
1015          size_t accum_size;
1016          for(accum_size = 0; next_i != n_elements; ++next_i){
1017             size_t cur_array_size   = sizes[next_i];
1018             if(max_size < cur_array_size){
1019                error = 1;
1020                break;
1021             }
1022             else{
1023                size_t reqsize = request2size(cur_array_size*element_size);
1024                if(((boost_cont_multialloc_segmented_malloc_size - CHUNK_OVERHEAD) - accum_size) < reqsize){
1025                   if(!accum_size){
1026                      accum_size += reqsize;
1027                      ++next_i;
1028                   }
1029                   break;
1030                }
1031                accum_size += reqsize;
1032             }
1033          }
1034 
1035          mem = error ? 0 : mspace_malloc_lockless(m, accum_size - CHUNK_OVERHEAD);
1036          if (mem == 0){
1037             boost_cont_memchain_it it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain);
1038             while(i--){
1039                void *addr = BOOST_CONTAINER_MEMIT_ADDR(it);
1040                BOOST_CONTAINER_MEMIT_NEXT(it);
1041                mspace_free_lockless(m, addr);
1042             }
1043             if (was_enabled)
1044                enable_mmap(m);
1045             return 0;
1046          }
1047          p = mem2chunk(mem);
1048          remainder_size = chunksize(p);
1049          s_allocated_memory += remainder_size;
1050 
1051          assert(!is_mmapped(p));
1052 
1053          {  /* split out elements */
1054             void *mem_orig = mem;
1055             boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
1056             size_t num_elements = next_i-i;
1057 
1058             for(++i; i != next_i; ++i) {
1059                void **mem_prev = ((void**)mem);
1060                size = request2size(sizes[i]*element_size);
1061                remainder_size -= size;
1062                set_size_and_pinuse_of_inuse_chunk(m, p, size);
1063                p = chunk_plus_offset(p, size);
1064                mem = chunk2mem(p);
1065                *mem_prev = mem;
1066             }
1067             set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
1068             BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements);
1069          }
1070       }
1071       if (was_enabled)
1072          enable_mmap(m);
1073    }
1074    return 1;
1075 }
1076 
boost_cont_multialloc_arrays(size_t n_elements,const size_t * sizes,size_t element_size,size_t contiguous_elements,boost_cont_memchain * pchain)1077 BOOST_CONTAINER_DECL int boost_cont_multialloc_arrays
1078    (size_t n_elements, const size_t *sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain)
1079 {
1080    int ret = 0;
1081    mstate ms = (mstate)gm;
1082    ensure_initialization();
1083    if (!ok_magic(ms)) {
1084       USAGE_ERROR_ACTION(ms,ms);
1085    }
1086    else if (!PREACTION(ms)) {
1087       ret = internal_multialloc_arrays(ms, n_elements, sizes, element_size, contiguous_elements, pchain);
1088       POSTACTION(ms);
1089    }
1090    return ret;
1091 }
1092 
1093 
1094 /*Doug Lea malloc extensions*/
get_malloc_stats(mstate m)1095 static boost_cont_malloc_stats_t get_malloc_stats(mstate m)
1096 {
1097    boost_cont_malloc_stats_t ret;
1098    ensure_initialization();
1099    if (!PREACTION(m)) {
1100       size_t maxfp = 0;
1101       size_t fp = 0;
1102       size_t used = 0;
1103       check_malloc_state(m);
1104       if (is_initialized(m)) {
1105          msegmentptr s = &m->seg;
1106          maxfp = m->max_footprint;
1107          fp = m->footprint;
1108          used = fp - (m->topsize + TOP_FOOT_SIZE);
1109 
1110          while (s != 0) {
1111             mchunkptr q = align_as_chunk(s->base);
1112             while (segment_holds(s, q) &&
1113                   q != m->top && q->head != FENCEPOST_HEAD) {
1114                if (!cinuse(q))
1115                used -= chunksize(q);
1116                q = next_chunk(q);
1117             }
1118             s = s->next;
1119          }
1120       }
1121 
1122       ret.max_system_bytes   = maxfp;
1123       ret.system_bytes       = fp;
1124       ret.in_use_bytes       = used;
1125       POSTACTION(m);
1126    }
1127    return ret;
1128 }
1129 
boost_cont_size(const void * p)1130 BOOST_CONTAINER_DECL size_t boost_cont_size(const void *p)
1131 {  return DL_SIZE_IMPL(p);  }
1132 
boost_cont_malloc(size_t bytes)1133 BOOST_CONTAINER_DECL void* boost_cont_malloc(size_t bytes)
1134 {
1135    size_t received_bytes;
1136    ensure_initialization();
1137    return boost_cont_allocation_command
1138       (BOOST_CONTAINER_ALLOCATE_NEW, 1, bytes, bytes, &received_bytes, 0).first;
1139 }
1140 
boost_cont_free(void * mem)1141 BOOST_CONTAINER_DECL void boost_cont_free(void* mem)
1142 {
1143    mstate ms = (mstate)gm;
1144    if (!ok_magic(ms)) {
1145       USAGE_ERROR_ACTION(ms,ms);
1146    }
1147    else if (!PREACTION(ms)) {
1148       mspace_free_lockless(ms, mem);
1149       POSTACTION(ms);
1150    }
1151 }
1152 
boost_cont_memalign(size_t bytes,size_t alignment)1153 BOOST_CONTAINER_DECL void* boost_cont_memalign(size_t bytes, size_t alignment)
1154 {
1155    void *addr;
1156    ensure_initialization();
1157    addr = mspace_memalign(gm, alignment, bytes);
1158    if(addr){
1159       s_allocated_memory += chunksize(mem2chunk(addr));
1160    }
1161    return addr;
1162 }
1163 
boost_cont_multialloc_nodes(size_t n_elements,size_t elem_size,size_t contiguous_elements,boost_cont_memchain * pchain)1164 BOOST_CONTAINER_DECL int boost_cont_multialloc_nodes
1165    (size_t n_elements, size_t elem_size, size_t contiguous_elements, boost_cont_memchain *pchain)
1166 {
1167    int ret = 0;
1168    mstate ms = (mstate)gm;
1169    ensure_initialization();
1170    if (!ok_magic(ms)) {
1171       USAGE_ERROR_ACTION(ms,ms);
1172    }
1173    else if (!PREACTION(ms)) {
1174       ret = internal_node_multialloc(ms, n_elements, elem_size, contiguous_elements, pchain);
1175       POSTACTION(ms);
1176    }
1177    return ret;
1178 }
1179 
boost_cont_footprint()1180 BOOST_CONTAINER_DECL size_t boost_cont_footprint()
1181 {
1182    return ((mstate)gm)->footprint;
1183 }
1184 
boost_cont_allocated_memory()1185 BOOST_CONTAINER_DECL size_t boost_cont_allocated_memory()
1186 {
1187    size_t alloc_mem = 0;
1188    mstate m = (mstate)gm;
1189    ensure_initialization();
1190    if (!ok_magic(ms)) {
1191       USAGE_ERROR_ACTION(ms,ms);
1192    }
1193 
1194 
1195    if (!PREACTION(m)) {
1196       check_malloc_state(m);
1197       if (is_initialized(m)) {
1198       size_t nfree = SIZE_T_ONE; /* top always free */
1199       size_t mfree = m->topsize + TOP_FOOT_SIZE;
1200       size_t sum = mfree;
1201       msegmentptr s = &m->seg;
1202       while (s != 0) {
1203          mchunkptr q = align_as_chunk(s->base);
1204          while (segment_holds(s, q) &&
1205                q != m->top && q->head != FENCEPOST_HEAD) {
1206             size_t sz = chunksize(q);
1207             sum += sz;
1208             if (!is_inuse(q)) {
1209             mfree += sz;
1210             ++nfree;
1211             }
1212             q = next_chunk(q);
1213          }
1214          s = s->next;
1215       }
1216       {
1217          size_t uordblks = m->footprint - mfree;
1218          if(nfree)
1219             alloc_mem = (size_t)(uordblks - (nfree-1)*TOP_FOOT_SIZE);
1220          else
1221             alloc_mem = uordblks;
1222          }
1223       }
1224 
1225       POSTACTION(m);
1226    }
1227    return alloc_mem;
1228 }
1229 
boost_cont_chunksize(const void * p)1230 BOOST_CONTAINER_DECL size_t boost_cont_chunksize(const void *p)
1231 {  return chunksize(mem2chunk(p));   }
1232 
boost_cont_all_deallocated()1233 BOOST_CONTAINER_DECL int boost_cont_all_deallocated()
1234 {  return !s_allocated_memory;  }
1235 
boost_cont_malloc_stats()1236 BOOST_CONTAINER_DECL boost_cont_malloc_stats_t boost_cont_malloc_stats()
1237 {
1238   mstate ms = (mstate)gm;
1239   if (ok_magic(ms)) {
1240     return get_malloc_stats(ms);
1241   }
1242   else {
1243     boost_cont_malloc_stats_t r = { 0, 0, 0 };
1244     USAGE_ERROR_ACTION(ms,ms);
1245     return r;
1246   }
1247 }
1248 
boost_cont_in_use_memory()1249 BOOST_CONTAINER_DECL size_t boost_cont_in_use_memory()
1250 {  return s_allocated_memory;   }
1251 
boost_cont_trim(size_t pad)1252 BOOST_CONTAINER_DECL int boost_cont_trim(size_t pad)
1253 {
1254    ensure_initialization();
1255    return dlmalloc_trim(pad);
1256 }
1257 
boost_cont_grow(void * oldmem,size_t minbytes,size_t maxbytes,size_t * received)1258 BOOST_CONTAINER_DECL int boost_cont_grow
1259    (void* oldmem, size_t minbytes, size_t maxbytes, size_t *received)
1260 {
1261    mstate ms = (mstate)gm;
1262    if (!ok_magic(ms)) {
1263       USAGE_ERROR_ACTION(ms,ms);
1264       return 0;
1265    }
1266 
1267    if (!PREACTION(ms)) {
1268       mchunkptr p = mem2chunk(oldmem);
1269       size_t oldsize = chunksize(p);
1270       p = try_realloc_chunk_with_min(ms, p, request2size(minbytes), request2size(maxbytes), 0);
1271       POSTACTION(ms);
1272       if(p){
1273          check_inuse_chunk(ms, p);
1274          *received = DL_SIZE_IMPL(oldmem);
1275          s_allocated_memory += chunksize(p) - oldsize;
1276       }
1277       return 0 != p;
1278    }
1279    return 0;
1280 }
1281 
boost_cont_shrink(void * oldmem,size_t minbytes,size_t maxbytes,size_t * received,int do_commit)1282 BOOST_CONTAINER_DECL int boost_cont_shrink
1283    (void* oldmem, size_t minbytes, size_t maxbytes, size_t *received, int do_commit)
1284 {
1285    mstate ms = (mstate)gm;
1286    if (!ok_magic(ms)) {
1287       USAGE_ERROR_ACTION(ms,ms);
1288       return 0;
1289    }
1290 
1291    if (!PREACTION(ms)) {
1292       int ret = internal_shrink(ms, oldmem, minbytes, maxbytes, received, do_commit);
1293       POSTACTION(ms);
1294       return 0 != ret;
1295    }
1296    return 0;
1297 }
1298 
1299 
boost_cont_alloc(size_t minbytes,size_t preferred_bytes,size_t * received_bytes)1300 BOOST_CONTAINER_DECL void* boost_cont_alloc
1301    (size_t minbytes, size_t preferred_bytes, size_t *received_bytes)
1302 {
1303    //ensure_initialization provided by boost_cont_allocation_command
1304    return boost_cont_allocation_command
1305       (BOOST_CONTAINER_ALLOCATE_NEW, 1, minbytes, preferred_bytes, received_bytes, 0).first;
1306 }
1307 
boost_cont_multidealloc(boost_cont_memchain * pchain)1308 BOOST_CONTAINER_DECL void boost_cont_multidealloc(boost_cont_memchain *pchain)
1309 {
1310    mstate ms = (mstate)gm;
1311    if (!ok_magic(ms)) {
1312       (void)ms;
1313       USAGE_ERROR_ACTION(ms,ms);
1314    }
1315    internal_multialloc_free(ms, pchain);
1316 }
1317 
boost_cont_malloc_check()1318 BOOST_CONTAINER_DECL int boost_cont_malloc_check()
1319 {
1320 #ifdef DEBUG
1321    mstate ms = (mstate)gm;
1322    ensure_initialization();
1323    if (!ok_magic(ms)) {
1324       (void)ms;
1325       USAGE_ERROR_ACTION(ms,ms);
1326       return 0;
1327    }
1328    check_malloc_state(ms);
1329    return 1;
1330 #else
1331    return 1;
1332 #endif
1333 }
1334 
1335 
boost_cont_allocation_command(allocation_type command,size_t sizeof_object,size_t limit_size,size_t preferred_size,size_t * received_size,void * reuse_ptr)1336 BOOST_CONTAINER_DECL boost_cont_command_ret_t boost_cont_allocation_command
1337    (allocation_type command, size_t sizeof_object, size_t limit_size
1338    , size_t preferred_size, size_t *received_size, void *reuse_ptr)
1339 {
1340    boost_cont_command_ret_t ret = { 0, 0 };
1341    ensure_initialization();
1342    if(command & (BOOST_CONTAINER_SHRINK_IN_PLACE | BOOST_CONTAINER_TRY_SHRINK_IN_PLACE)){
1343       int success = boost_cont_shrink( reuse_ptr, preferred_size, limit_size
1344                              , received_size, (command & BOOST_CONTAINER_SHRINK_IN_PLACE));
1345       ret.first = success ? reuse_ptr : 0;
1346       return ret;
1347    }
1348 
1349    *received_size = 0;
1350 
1351    if(limit_size > preferred_size)
1352       return ret;
1353 
1354    {
1355       mstate ms = (mstate)gm;
1356 
1357       /*Expand in place*/
1358       if (!PREACTION(ms)) {
1359          #if FOOTERS
1360          if(reuse_ptr){
1361             mstate m = get_mstate_for(mem2chunk(reuse_ptr));
1362             if (!ok_magic(m)) {
1363                USAGE_ERROR_ACTION(m, reuse_ptr);
1364                return ret;
1365             }
1366          }
1367          #endif
1368          if(reuse_ptr && (command & (BOOST_CONTAINER_EXPAND_FWD | BOOST_CONTAINER_EXPAND_BWD))){
1369             void *r = internal_grow_both_sides
1370                ( ms, command, reuse_ptr, limit_size
1371                , preferred_size, received_size, sizeof_object, 1);
1372             if(r){
1373                ret.first  = r;
1374                ret.second = 1;
1375                goto postaction;
1376             }
1377          }
1378 
1379          if(command & BOOST_CONTAINER_ALLOCATE_NEW){
1380             void *addr = mspace_malloc_lockless(ms, preferred_size);
1381             if(!addr)   addr = mspace_malloc_lockless(ms, limit_size);
1382             if(addr){
1383                s_allocated_memory += chunksize(mem2chunk(addr));
1384             }
1385             *received_size = DL_SIZE_IMPL(addr);
1386             ret.first  = addr;
1387             ret.second = 0;
1388             if(addr){
1389                goto postaction;
1390             }
1391          }
1392 
1393          //Now try to expand both sides with min size
1394          if(reuse_ptr && (command & (BOOST_CONTAINER_EXPAND_FWD | BOOST_CONTAINER_EXPAND_BWD))){
1395             void *r = internal_grow_both_sides
1396                ( ms, command, reuse_ptr, limit_size
1397                , preferred_size, received_size, sizeof_object, 0);
1398             if(r){
1399                ret.first  = r;
1400                ret.second = 1;
1401                goto postaction;
1402             }
1403          }
1404          postaction:
1405          POSTACTION(ms);
1406       }
1407    }
1408    return ret;
1409 }
1410 
boost_cont_mallopt(int param_number,int value)1411 BOOST_CONTAINER_DECL int boost_cont_mallopt(int param_number, int value)
1412 {
1413   return change_mparam(param_number, value);
1414 }
1415 
1416 //#ifdef DL_DEBUG_DEFINED
1417 //   #undef DEBUG
1418 //#endif
1419 
1420 #ifdef _MSC_VER
1421 #pragma warning (pop)
1422 #endif
1423