1 // Copyright (C) 2000, 2001 Stephen Cleary
2 //
3 // Distributed under the Boost Software License, Version 1.0. (See
4 // accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org for updates, documentation, and revision history.
8 
9 #ifndef BOOST_POOL_HPP
10 #define BOOST_POOL_HPP
11 
12 #include <boost/config.hpp>  // for workarounds
13 
14 // std::less, std::less_equal, std::greater
15 #include <functional>
16 // new[], delete[], std::nothrow
17 #include <new>
18 // std::size_t, std::ptrdiff_t
19 #include <cstddef>
20 // std::malloc, std::free
21 #include <cstdlib>
22 // std::invalid_argument
23 #include <exception>
24 // std::max
25 #include <algorithm>
26 
27 #include <boost/pool/poolfwd.hpp>
28 
29 // boost::details::pool::ct_lcm
30 #include <boost/pool/detail/ct_gcd_lcm.hpp>
31 // boost::details::pool::lcm
32 #include <boost/pool/detail/gcd_lcm.hpp>
33 // boost::simple_segregated_storage
34 #include <boost/pool/simple_segregated_storage.hpp>
35 
36 #ifdef BOOST_NO_STDC_NAMESPACE
37  namespace std { using ::malloc; using ::free; }
38 #endif
39 
40 // There are a few places in this file where the expression "this->m" is used.
41 // This expression is used to force instantiation-time name lookup, which I am
42 //   informed is required for strict Standard compliance.  It's only necessary
43 //   if "m" is a member of a base class that is dependent on a template
44 //   parameter.
45 // Thanks to Jens Maurer for pointing this out!
46 
47 namespace boost {
48 
49 struct default_user_allocator_new_delete
50 {
51   typedef std::size_t size_type;
52   typedef std::ptrdiff_t difference_type;
53 
BOOST_PREVENT_MACRO_SUBSTITUTIONboost::default_user_allocator_new_delete54   static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes)
55   { return new (std::nothrow) char[bytes]; }
BOOST_PREVENT_MACRO_SUBSTITUTIONboost::default_user_allocator_new_delete56   static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block)
57   { delete [] block; }
58 };
59 
60 struct default_user_allocator_malloc_free
61 {
62   typedef std::size_t size_type;
63   typedef std::ptrdiff_t difference_type;
64 
BOOST_PREVENT_MACRO_SUBSTITUTIONboost::default_user_allocator_malloc_free65   static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes)
66   { return static_cast<char *>(std::malloc(bytes)); }
BOOST_PREVENT_MACRO_SUBSTITUTIONboost::default_user_allocator_malloc_free67   static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block)
68   { std::free(block); }
69 };
70 
71 namespace details {
72 
73 // PODptr is a class that pretends to be a "pointer" to different class types
74 //  that don't really exist.  It provides member functions to access the "data"
75 //  of the "object" it points to.  Since these "class" types are of variable
76 //  size, and contains some information at the *end* of its memory (for
77 //  alignment reasons), PODptr must contain the size of this "class" as well as
78 //  the pointer to this "object".
79 template <typename SizeType>
80 class PODptr
81 {
82   public:
83     typedef SizeType size_type;
84 
85   private:
86     char * ptr;
87     size_type sz;
88 
ptr_next_size() const89     char * ptr_next_size() const
90     { return (ptr + sz - sizeof(size_type)); }
ptr_next_ptr() const91     char * ptr_next_ptr() const
92     {
93       return (ptr_next_size() -
94           pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
95     }
96 
97   public:
PODptr(char * const nptr,const size_type nsize)98     PODptr(char * const nptr, const size_type nsize)
99     :ptr(nptr), sz(nsize) { }
PODptr()100     PODptr()
101     :ptr(0), sz(0) { }
102 
valid() const103     bool valid() const { return (begin() != 0); }
invalidate()104     void invalidate() { begin() = 0; }
begin()105     char * & begin() { return ptr; }
begin() const106     char * begin() const { return ptr; }
end() const107     char * end() const { return ptr_next_ptr(); }
total_size() const108     size_type total_size() const { return sz; }
element_size() const109     size_type element_size() const
110     {
111       return (sz - sizeof(size_type) -
112           pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
113     }
114 
next_size() const115     size_type & next_size() const
116     {
117       return *(static_cast<size_type *>(static_cast<void*>((ptr_next_size()))));
118     }
next_ptr() const119     char * & next_ptr() const
120     { return *(static_cast<char **>(static_cast<void*>(ptr_next_ptr()))); }
121 
next() const122     PODptr next() const
123     { return PODptr<size_type>(next_ptr(), next_size()); }
next(const PODptr & arg) const124     void next(const PODptr & arg) const
125     {
126       next_ptr() = arg.begin();
127       next_size() = arg.total_size();
128     }
129 };
130 
131 } // namespace details
132 
133 template <typename UserAllocator>
134 class pool: protected simple_segregated_storage<
135     typename UserAllocator::size_type>
136 {
137   public:
138     typedef UserAllocator user_allocator;
139     typedef typename UserAllocator::size_type size_type;
140     typedef typename UserAllocator::difference_type difference_type;
141 
142   private:
143     BOOST_STATIC_CONSTANT(unsigned, min_alloc_size =
144         (::boost::details::pool::ct_lcm<sizeof(void *), sizeof(size_type)>::value) );
145 
146     // Returns 0 if out-of-memory
147     // Called if malloc/ordered_malloc needs to resize the free list
148     void * malloc_need_resize();
149     void * ordered_malloc_need_resize();
150 
151   protected:
152     details::PODptr<size_type> list;
153 
store()154     simple_segregated_storage<size_type> & store() { return *this; }
store() const155     const simple_segregated_storage<size_type> & store() const { return *this; }
156     const size_type requested_size;
157     size_type next_size;
158     size_type start_size;
159     size_type max_size;
160 
161     // finds which POD in the list 'chunk' was allocated from
162     details::PODptr<size_type> find_POD(void * const chunk) const;
163 
164     // is_from() tests a chunk to determine if it belongs in a block
is_from(void * const chunk,char * const i,const size_type sizeof_i)165     static bool is_from(void * const chunk, char * const i,
166         const size_type sizeof_i)
167     {
168       // We use std::less_equal and std::less to test 'chunk'
169       //  against the array bounds because standard operators
170       //  may return unspecified results.
171       // This is to ensure portability.  The operators < <= > >= are only
172       //  defined for pointers to objects that are 1) in the same array, or
173       //  2) subobjects of the same object [5.9/2].
174       // The functor objects guarantee a total order for any pointer [20.3.3/8]
175 //WAS:
176 //      return (std::less_equal<void *>()(static_cast<void *>(i), chunk)
177 //          && std::less<void *>()(chunk,
178 //              static_cast<void *>(i + sizeof_i)));
179       std::less_equal<void *> lt_eq;
180       std::less<void *> lt;
181       return (lt_eq(i, chunk) && lt(chunk, i + sizeof_i));
182     }
183 
alloc_size() const184     size_type alloc_size() const
185     {
186       const unsigned min_size = min_alloc_size;
187       return details::pool::lcm<size_type>(requested_size, min_size);
188     }
189 
190     // for the sake of code readability :)
nextof(void * const ptr)191     static void * & nextof(void * const ptr)
192     { return *(static_cast<void **>(ptr)); }
193 
194   public:
195     // The second parameter here is an extension!
196     // pre: npartition_size != 0 && nnext_size != 0
pool(const size_type nrequested_size,const size_type nnext_size=32,const size_type nmax_size=0)197     explicit pool(const size_type nrequested_size,
198         const size_type nnext_size = 32,
199         const size_type nmax_size = 0)
200     :list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size),max_size(nmax_size)
201     { }
202 
~pool()203     ~pool() { purge_memory(); }
204 
205     // Releases memory blocks that don't have chunks allocated
206     // pre: lists are ordered
207     //  Returns true if memory was actually deallocated
208     bool release_memory();
209 
210     // Releases *all* memory blocks, even if chunks are still allocated
211     //  Returns true if memory was actually deallocated
212     bool purge_memory();
213 
214     // These functions are extensions!
get_next_size() const215     size_type get_next_size() const { return next_size; }
set_next_size(const size_type nnext_size)216     void set_next_size(const size_type nnext_size) { next_size = start_size = nnext_size; }
get_max_size() const217     size_type get_max_size() const { return max_size; }
set_max_size(const size_type nmax_size)218     void set_max_size(const size_type nmax_size) { max_size = nmax_size; }
get_requested_size() const219     size_type get_requested_size() const { return requested_size; }
220 
221     // Both malloc and ordered_malloc do a quick inlined check first for any
222     //  free chunks.  Only if we need to get another memory block do we call
223     //  the non-inlined *_need_resize() functions.
224     // Returns 0 if out-of-memory
BOOST_PREVENT_MACRO_SUBSTITUTION()225     void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION()
226     {
227       // Look for a non-empty storage
228       if (!store().empty())
229         return (store().malloc)();
230       return malloc_need_resize();
231     }
232 
ordered_malloc()233     void * ordered_malloc()
234     {
235       // Look for a non-empty storage
236       if (!store().empty())
237         return (store().malloc)();
238       return ordered_malloc_need_resize();
239     }
240 
241     // Returns 0 if out-of-memory
242     // Allocate a contiguous section of n chunks
243     void * ordered_malloc(size_type n);
244 
245     // pre: 'chunk' must have been previously
246     //        returned by *this.malloc().
BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunk)247     void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunk)
248     { (store().free)(chunk); }
249 
250     // pre: 'chunk' must have been previously
251     //        returned by *this.malloc().
ordered_free(void * const chunk)252     void ordered_free(void * const chunk)
253     { store().ordered_free(chunk); }
254 
255     // pre: 'chunk' must have been previously
256     //        returned by *this.malloc(n).
BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunks,const size_type n)257     void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunks, const size_type n)
258     {
259       const size_type partition_size = alloc_size();
260       const size_type total_req_size = n * requested_size;
261       const size_type num_chunks = total_req_size / partition_size +
262           ((total_req_size % partition_size) ? true : false);
263 
264       store().free_n(chunks, num_chunks, partition_size);
265     }
266 
267     // pre: 'chunk' must have been previously
268     //        returned by *this.malloc(n).
ordered_free(void * const chunks,const size_type n)269     void ordered_free(void * const chunks, const size_type n)
270     {
271       const size_type partition_size = alloc_size();
272       const size_type total_req_size = n * requested_size;
273       const size_type num_chunks = total_req_size / partition_size +
274           ((total_req_size % partition_size) ? true : false);
275 
276       store().ordered_free_n(chunks, num_chunks, partition_size);
277     }
278 
279     // is_from() tests a chunk to determine if it was allocated from *this
is_from(void * const chunk) const280     bool is_from(void * const chunk) const
281     {
282       return (find_POD(chunk).valid());
283     }
284 };
285 
286 template <typename UserAllocator>
release_memory()287 bool pool<UserAllocator>::release_memory()
288 {
289   // This is the return value: it will be set to true when we actually call
290   //  UserAllocator::free(..)
291   bool ret = false;
292 
293   // This is a current & previous iterator pair over the memory block list
294   details::PODptr<size_type> ptr = list;
295   details::PODptr<size_type> prev;
296 
297   // This is a current & previous iterator pair over the free memory chunk list
298   //  Note that "prev_free" in this case does NOT point to the previous memory
299   //  chunk in the free list, but rather the last free memory chunk before the
300   //  current block.
301   void * free_p = this->first;
302   void * prev_free_p = 0;
303 
304   const size_type partition_size = alloc_size();
305 
306   // Search through all the all the allocated memory blocks
307   while (ptr.valid())
308   {
309     // At this point:
310     //  ptr points to a valid memory block
311     //  free_p points to either:
312     //    0 if there are no more free chunks
313     //    the first free chunk in this or some next memory block
314     //  prev_free_p points to either:
315     //    the last free chunk in some previous memory block
316     //    0 if there is no such free chunk
317     //  prev is either:
318     //    the PODptr whose next() is ptr
319     //    !valid() if there is no such PODptr
320 
321     // If there are no more free memory chunks, then every remaining
322     //  block is allocated out to its fullest capacity, and we can't
323     //  release any more memory
324     if (free_p == 0)
325       break;
326 
327     // We have to check all the chunks.  If they are *all* free (i.e., present
328     //  in the free list), then we can free the block.
329     bool all_chunks_free = true;
330 
331     // Iterate 'i' through all chunks in the memory block
332     // if free starts in the memory block, be careful to keep it there
333     void * saved_free = free_p;
334     for (char * i = ptr.begin(); i != ptr.end(); i += partition_size)
335     {
336       // If this chunk is not free
337       if (i != free_p)
338       {
339         // We won't be able to free this block
340         all_chunks_free = false;
341 
342         // free_p might have travelled outside ptr
343         free_p = saved_free;
344         // Abort searching the chunks; we won't be able to free this
345         //  block because a chunk is not free.
346         break;
347       }
348 
349       // We do not increment prev_free_p because we are in the same block
350       free_p = nextof(free_p);
351     }
352 
353     // post: if the memory block has any chunks, free_p points to one of them
354     // otherwise, our assertions above are still valid
355 
356     const details::PODptr<size_type> next = ptr.next();
357 
358     if (!all_chunks_free)
359     {
360       if (is_from(free_p, ptr.begin(), ptr.element_size()))
361       {
362         std::less<void *> lt;
363         void * const end = ptr.end();
364         do
365         {
366           prev_free_p = free_p;
367           free_p = nextof(free_p);
368         } while (free_p && lt(free_p, end));
369       }
370       // This invariant is now restored:
371       //     free_p points to the first free chunk in some next memory block, or
372       //       0 if there is no such chunk.
373       //     prev_free_p points to the last free chunk in this memory block.
374 
375       // We are just about to advance ptr.  Maintain the invariant:
376       // prev is the PODptr whose next() is ptr, or !valid()
377       // if there is no such PODptr
378       prev = ptr;
379     }
380     else
381     {
382       // All chunks from this block are free
383 
384       // Remove block from list
385       if (prev.valid())
386         prev.next(next);
387       else
388         list = next;
389 
390       // Remove all entries in the free list from this block
391       if (prev_free_p != 0)
392         nextof(prev_free_p) = free_p;
393       else
394         this->first = free_p;
395 
396       // And release memory
397       (UserAllocator::free)(ptr.begin());
398       ret = true;
399     }
400 
401     // Increment ptr
402     ptr = next;
403   }
404 
405   next_size = start_size;
406   return ret;
407 }
408 
409 template <typename UserAllocator>
purge_memory()410 bool pool<UserAllocator>::purge_memory()
411 {
412   details::PODptr<size_type> iter = list;
413 
414   if (!iter.valid())
415     return false;
416 
417   do
418   {
419     // hold "next" pointer
420     const details::PODptr<size_type> next = iter.next();
421 
422     // delete the storage
423     (UserAllocator::free)(iter.begin());
424 
425     // increment iter
426     iter = next;
427   } while (iter.valid());
428 
429   list.invalidate();
430   this->first = 0;
431   next_size = start_size;
432 
433   return true;
434 }
435 
436 template <typename UserAllocator>
malloc_need_resize()437 void * pool<UserAllocator>::malloc_need_resize()
438 {
439   // No memory in any of our storages; make a new storage,
440   const size_type partition_size = alloc_size();
441   const size_type POD_size = next_size * partition_size +
442       details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
443   char * const ptr = (UserAllocator::malloc)(POD_size);
444   if (ptr == 0)
445     return 0;
446   const details::PODptr<size_type> node(ptr, POD_size);
447 
448   BOOST_USING_STD_MIN();
449   if(!max_size)
450     next_size <<= 1;
451   else if( next_size*partition_size/requested_size < max_size)
452     next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size);
453 
454   //  initialize it,
455   store().add_block(node.begin(), node.element_size(), partition_size);
456 
457   //  insert it into the list,
458   node.next(list);
459   list = node;
460 
461   //  and return a chunk from it.
462   return (store().malloc)();
463 }
464 
465 template <typename UserAllocator>
ordered_malloc_need_resize()466 void * pool<UserAllocator>::ordered_malloc_need_resize()
467 {
468   // No memory in any of our storages; make a new storage,
469   const size_type partition_size = alloc_size();
470   const size_type POD_size = next_size * partition_size +
471       details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
472   char * const ptr = (UserAllocator::malloc)(POD_size);
473   if (ptr == 0)
474     return 0;
475   const details::PODptr<size_type> node(ptr, POD_size);
476 
477   BOOST_USING_STD_MIN();
478   if(!max_size)
479     next_size <<= 1;
480   else if( next_size*partition_size/requested_size < max_size)
481     next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size);
482 
483   //  initialize it,
484   //  (we can use "add_block" here because we know that
485   //  the free list is empty, so we don't have to use
486   //  the slower ordered version)
487   store().add_ordered_block(node.begin(), node.element_size(), partition_size);
488 
489   //  insert it into the list,
490   //   handle border case
491   if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
492   {
493     node.next(list);
494     list = node;
495   }
496   else
497   {
498     details::PODptr<size_type> prev = list;
499 
500     while (true)
501     {
502       // if we're about to hit the end or
503       //  if we've found where "node" goes
504       if (prev.next_ptr() == 0
505           || std::greater<void *>()(prev.next_ptr(), node.begin()))
506         break;
507 
508       prev = prev.next();
509     }
510 
511     node.next(prev.next());
512     prev.next(node);
513   }
514 
515   //  and return a chunk from it.
516   return (store().malloc)();
517 }
518 
519 template <typename UserAllocator>
ordered_malloc(const size_type n)520 void * pool<UserAllocator>::ordered_malloc(const size_type n)
521 {
522   const size_type partition_size = alloc_size();
523   const size_type total_req_size = n * requested_size;
524   const size_type num_chunks = total_req_size / partition_size +
525       ((total_req_size % partition_size) ? true : false);
526 
527   void * ret = store().malloc_n(num_chunks, partition_size);
528 
529   if (ret != 0)
530     return ret;
531 
532   // Not enougn memory in our storages; make a new storage,
533   BOOST_USING_STD_MAX();
534   next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
535   const size_type POD_size = next_size * partition_size +
536       details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
537   char * const ptr = (UserAllocator::malloc)(POD_size);
538   if (ptr == 0)
539     return 0;
540   const details::PODptr<size_type> node(ptr, POD_size);
541 
542   // Split up block so we can use what wasn't requested
543   //  (we can use "add_block" here because we know that
544   //  the free list is empty, so we don't have to use
545   //  the slower ordered version)
546   if (next_size > num_chunks)
547     store().add_ordered_block(node.begin() + num_chunks * partition_size,
548         node.element_size() - num_chunks * partition_size, partition_size);
549 
550   BOOST_USING_STD_MIN();
551   if(!max_size)
552     next_size <<= 1;
553   else if( next_size*partition_size/requested_size < max_size)
554     next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size);
555 
556   //  insert it into the list,
557   //   handle border case
558   if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
559   {
560     node.next(list);
561     list = node;
562   }
563   else
564   {
565     details::PODptr<size_type> prev = list;
566 
567     while (true)
568     {
569       // if we're about to hit the end or
570       //  if we've found where "node" goes
571       if (prev.next_ptr() == 0
572           || std::greater<void *>()(prev.next_ptr(), node.begin()))
573         break;
574 
575       prev = prev.next();
576     }
577 
578     node.next(prev.next());
579     prev.next(node);
580   }
581 
582   //  and return it.
583   return node.begin();
584 }
585 
586 template <typename UserAllocator>
587 details::PODptr<typename pool<UserAllocator>::size_type>
find_POD(void * const chunk) const588 pool<UserAllocator>::find_POD(void * const chunk) const
589 {
590   // We have to find which storage this chunk is from.
591   details::PODptr<size_type> iter = list;
592   while (iter.valid())
593   {
594     if (is_from(chunk, iter.begin(), iter.element_size()))
595       return iter;
596     iter = iter.next();
597   }
598 
599   return iter;
600 }
601 
602 } // namespace boost
603 
604 #endif
605