1 // Bitmapped Allocator. -*- C++ -*-
2 
3 // Copyright (C) 2004 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15 
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING.  If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
20 
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction.  Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License.  This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
29 
30 
31 
32 #if !defined _BITMAP_ALLOCATOR_H
33 #define _BITMAP_ALLOCATOR_H 1
34 
35 #include <cstddef>
36 //For std::size_t, and ptrdiff_t.
37 #include <utility>
38 //For std::pair.
39 #include <algorithm>
40 //std::find_if, and std::lower_bound.
41 #include <vector>
42 //For the free list of exponentially growing memory blocks. At max,
43 //size of the vector should be  not more than the number of bits in an
44 //integer or an unsigned integer.
45 #include <functional>
46 //For greater_equal, and less_equal.
47 #include <new>
48 //For operator new.
49 #include <bits/gthr.h>
50 //For __gthread_mutex_t, __gthread_mutex_lock and __gthread_mutex_unlock.
51 #include <ext/new_allocator.h>
52 //For __gnu_cxx::new_allocator for std::vector.
53 
54 #include <cassert>
55 #define NDEBUG
56 
57 //#define CHECK_FOR_ERRORS
58 //#define __CPU_HAS_BACKWARD_BRANCH_PREDICTION
59 
60 namespace __gnu_cxx
61 {
62   namespace {
63 #if defined __GTHREADS
64     bool const __threads_enabled = __gthread_active_p();
65 #endif
66 
67   }
68 
69 #if defined __GTHREADS
70   class _Mutex {
71     __gthread_mutex_t _M_mut;
72     //Prevent Copying and assignment.
73     _Mutex (_Mutex const&);
74     _Mutex& operator= (_Mutex const&);
75   public:
_Mutex()76     _Mutex ()
77     {
78       if (__threads_enabled)
79 	{
80 #if !defined __GTHREAD_MUTEX_INIT
81 	  __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mut);
82 #else
83 	  __gthread_mutex_t __mtemp = __GTHREAD_MUTEX_INIT;
84 	  _M_mut = __mtemp;
85 #endif
86 	}
87     }
~_Mutex()88     ~_Mutex ()
89     {
90       //Gthreads does not define a Mutex Destruction Function.
91     }
_M_get()92     __gthread_mutex_t *_M_get() { return &_M_mut; }
93   };
94 
95   class _Lock {
96     _Mutex* _M_pmt;
97     bool _M_locked;
98     //Prevent Copying and assignment.
99     _Lock (_Lock const&);
100     _Lock& operator= (_Lock const&);
101   public:
_Lock(_Mutex * __mptr)102     _Lock(_Mutex* __mptr)
103       : _M_pmt(__mptr), _M_locked(false)
104     { this->_M_lock(); }
_M_lock()105     void _M_lock()
106     {
107       if (__threads_enabled)
108 	{
109 	  _M_locked = true;
110 	  __gthread_mutex_lock(_M_pmt->_M_get());
111 	}
112     }
_M_unlock()113     void _M_unlock()
114     {
115       if (__threads_enabled)
116 	{
117 	  if (__builtin_expect(_M_locked, true))
118 	    {
119 	      __gthread_mutex_unlock(_M_pmt->_M_get());
120 	      _M_locked = false;
121 	    }
122 	}
123     }
~_Lock()124     ~_Lock() { this->_M_unlock(); }
125   };
126 #endif
127 
128 
129 
130   namespace __aux_balloc {
131     static const unsigned int _Bits_Per_Byte = 8;
132     static const unsigned int _Bits_Per_Block = sizeof(unsigned int) * _Bits_Per_Byte;
133 
134     template <typename _Addr_Pair_t>
__balloc_num_blocks(_Addr_Pair_t __ap)135     inline size_t __balloc_num_blocks (_Addr_Pair_t __ap)
136     {
137       return (__ap.second - __ap.first) + 1;
138     }
139 
140     template <typename _Addr_Pair_t>
__balloc_num_bit_maps(_Addr_Pair_t __ap)141     inline size_t __balloc_num_bit_maps (_Addr_Pair_t __ap)
142     {
143       return __balloc_num_blocks(__ap) / _Bits_Per_Block;
144     }
145 
146     //T should be a pointer type.
147     template <typename _Tp>
148     class _Inclusive_between : public std::unary_function<typename std::pair<_Tp, _Tp>, bool> {
149       typedef _Tp pointer;
150       pointer _M_ptr_value;
151       typedef typename std::pair<_Tp, _Tp> _Block_pair;
152 
153     public:
_Inclusive_between(pointer __ptr)154       _Inclusive_between (pointer __ptr) : _M_ptr_value(__ptr) { }
operator()155       bool operator () (_Block_pair __bp) const throw ()
156       {
157 	if (std::less_equal<pointer> ()(_M_ptr_value, __bp.second) &&
158 	    std::greater_equal<pointer> ()(_M_ptr_value, __bp.first))
159 	  return true;
160 	else
161 	  return false;
162       }
163     };
164 
165     //Used to pass a Functor to functions by reference.
166     template <typename _Functor>
167     class _Functor_Ref :
168       public std::unary_function<typename _Functor::argument_type, typename _Functor::result_type> {
169       _Functor& _M_fref;
170 
171     public:
172       typedef typename _Functor::argument_type argument_type;
173       typedef typename _Functor::result_type result_type;
174 
_Functor_Ref(_Functor & __fref)175       _Functor_Ref (_Functor& __fref) : _M_fref(__fref) { }
operator()176       result_type operator() (argument_type __arg) { return _M_fref (__arg); }
177     };
178 
179 
180     //T should be a pointer type, and A is the Allocator for the vector.
181     template <typename _Tp, typename _Alloc>
182     class _Ffit_finder
183       : public std::unary_function<typename std::pair<_Tp, _Tp>, bool> {
184       typedef typename std::vector<std::pair<_Tp, _Tp>, _Alloc> _BPVector;
185       typedef typename _BPVector::difference_type _Counter_type;
186       typedef typename std::pair<_Tp, _Tp> _Block_pair;
187 
188       unsigned int *_M_pbitmap;
189       unsigned int _M_data_offset;
190 
191     public:
_Ffit_finder()192       _Ffit_finder ()
193 	: _M_pbitmap (0), _M_data_offset (0)
194       { }
195 
operator()196       bool operator() (_Block_pair __bp) throw()
197       {
198 	//Set the _rover to the last unsigned integer, which is the
199 	//bitmap to the first free block. Thus, the bitmaps are in exact
200 	//reverse order of the actual memory layout. So, we count down
201 	//the bimaps, which is the same as moving up the memory.
202 
203 	//If the used count stored at the start of the Bit Map headers
204 	//is equal to the number of Objects that the current Block can
205 	//store, then there is definitely no space for another single
206 	//object, so just return false.
207 	_Counter_type __diff = __gnu_cxx::__aux_balloc::__balloc_num_bit_maps (__bp);
208 
209 	assert (*(reinterpret_cast<unsigned int*>(__bp.first) - (__diff + 1)) <=
210 		__gnu_cxx::__aux_balloc::__balloc_num_blocks (__bp));
211 
212 	if (*(reinterpret_cast<unsigned int*>(__bp.first) - (__diff + 1)) ==
213 	    __gnu_cxx::__aux_balloc::__balloc_num_blocks (__bp))
214 	  return false;
215 
216 	unsigned int *__rover = reinterpret_cast<unsigned int*>(__bp.first) - 1;
217 	for (_Counter_type __i = 0; __i < __diff; ++__i)
218 	  {
219 	    _M_data_offset = __i;
220 	    if (*__rover)
221 	      {
222 		_M_pbitmap = __rover;
223 		return true;
224 	      }
225 	    --__rover;
226 	  }
227 	return false;
228       }
229 
_M_get()230       unsigned int *_M_get () { return _M_pbitmap; }
_M_offset()231       unsigned int _M_offset () { return _M_data_offset * _Bits_Per_Block; }
232     };
233 
234     //T should be a pointer type.
235     template <typename _Tp, typename _Alloc>
236     class _Bit_map_counter {
237 
238       typedef typename std::vector<std::pair<_Tp, _Tp>, _Alloc> _BPVector;
239       typedef typename _BPVector::size_type _Index_type;
240       typedef _Tp pointer;
241 
242       _BPVector& _M_vbp;
243       unsigned int *_M_curr_bmap;
244       unsigned int *_M_last_bmap_in_block;
245       _Index_type _M_curr_index;
246 
247     public:
248       //Use the 2nd parameter with care. Make sure that such an entry
249       //exists in the vector before passing that particular index to
250       //this ctor.
251       _Bit_map_counter (_BPVector& Rvbp, int __index = -1)
_M_vbp(Rvbp)252 	: _M_vbp(Rvbp)
253       {
254 	this->_M_reset(__index);
255       }
256 
throw()257       void _M_reset (int __index = -1) throw()
258       {
259 	if (__index == -1)
260 	  {
261 	    _M_curr_bmap = 0;
262 	    _M_curr_index = (_Index_type)-1;
263 	    return;
264 	  }
265 
266 	_M_curr_index = __index;
267 	_M_curr_bmap = reinterpret_cast<unsigned int*>(_M_vbp[_M_curr_index].first) - 1;
268 
269 	assert (__index <= (int)_M_vbp.size() - 1);
270 
271 	_M_last_bmap_in_block = _M_curr_bmap -
272 	  ((_M_vbp[_M_curr_index].second - _M_vbp[_M_curr_index].first + 1) / _Bits_Per_Block - 1);
273       }
274 
275       //Dangerous Function! Use with extreme care. Pass to this
276       //function ONLY those values that are known to be correct,
277       //otherwise this will mess up big time.
_M_set_internal_bit_map(unsigned int * __new_internal_marker)278       void _M_set_internal_bit_map (unsigned int *__new_internal_marker) throw()
279       {
280 	_M_curr_bmap = __new_internal_marker;
281       }
282 
_M_finished()283       bool _M_finished () const throw()
284       {
285 	return (_M_curr_bmap == 0);
286       }
287 
288       _Bit_map_counter& operator++ () throw()
289       {
290 	if (_M_curr_bmap == _M_last_bmap_in_block)
291 	  {
292 	    if (++_M_curr_index == _M_vbp.size())
293 	      {
294 		_M_curr_bmap = 0;
295 	      }
296 	    else
297 	      {
298 		this->_M_reset (_M_curr_index);
299 	      }
300 	  }
301 	else
302 	  {
303 	    --_M_curr_bmap;
304 	  }
305 	return *this;
306       }
307 
_M_get()308       unsigned int *_M_get ()
309       {
310 	return _M_curr_bmap;
311       }
312 
_M_base()313       pointer _M_base () { return _M_vbp[_M_curr_index].first; }
_M_offset()314       unsigned int _M_offset ()
315       {
316 	return _Bits_Per_Block * ((reinterpret_cast<unsigned int*>(this->_M_base()) - _M_curr_bmap) - 1);
317       }
318 
_M_where()319       unsigned int _M_where () { return _M_curr_index; }
320     };
321   }
322 
323   //Generic Version of the bsf instruction.
324   typedef unsigned int _Bit_map_type;
_Bit_scan_forward(register _Bit_map_type __num)325   static inline unsigned int _Bit_scan_forward (register _Bit_map_type __num)
326   {
327     return static_cast<unsigned int>(__builtin_ctz(__num));
328   }
329 
330   struct _OOM_handler {
331     static std::new_handler _S_old_handler;
332     static bool _S_handled_oom;
333     typedef void (*_FL_clear_proc)(void);
334     static _FL_clear_proc _S_oom_fcp;
335 
_OOM_handler_OOM_handler336     _OOM_handler (_FL_clear_proc __fcp)
337     {
338       _S_oom_fcp = __fcp;
339       _S_old_handler = std::set_new_handler (_S_handle_oom_proc);
340       _S_handled_oom = false;
341     }
342 
_S_handle_oom_proc_OOM_handler343     static void _S_handle_oom_proc()
344     {
345       _S_oom_fcp();
346       std::set_new_handler (_S_old_handler);
347       _S_handled_oom = true;
348     }
349 
~_OOM_handler_OOM_handler350     ~_OOM_handler ()
351     {
352       if (!_S_handled_oom)
353 	std::set_new_handler (_S_old_handler);
354     }
355   };
356 
357   std::new_handler _OOM_handler::_S_old_handler;
358   bool _OOM_handler::_S_handled_oom = false;
359   _OOM_handler::_FL_clear_proc _OOM_handler::_S_oom_fcp = 0;
360 
361 
362   class _BA_free_list_store {
363     struct _LT_pointer_compare {
364       template <typename _Tp>
operator_LT_pointer_compare365       bool operator() (_Tp* __pt, _Tp const& __crt) const throw()
366       {
367 	return *__pt < __crt;
368       }
369     };
370 
371 #if defined __GTHREADS
372     static _Mutex _S_bfl_mutex;
373 #endif
374     static std::vector<unsigned int*> _S_free_list;
375     typedef std::vector<unsigned int*>::iterator _FLIter;
376 
_S_validate_free_list(unsigned int * __addr)377     static void _S_validate_free_list(unsigned int *__addr) throw()
378     {
379       const unsigned int __max_size = 64;
380       if (_S_free_list.size() >= __max_size)
381 	{
382 	  //Ok, the threshold value has been reached.
383 	  //We determine which block to remove from the list of free
384 	  //blocks.
385 	  if (*__addr >= *_S_free_list.back())
386 	    {
387 	      //Ok, the new block is greater than or equal to the last
388 	      //block in the list of free blocks. We just free the new
389 	      //block.
390 	      operator delete((void*)__addr);
391 	      return;
392 	    }
393 	  else
394 	    {
395 	      //Deallocate the last block in the list of free lists, and
396 	      //insert the new one in it's correct position.
397 	      operator delete((void*)_S_free_list.back());
398 	      _S_free_list.pop_back();
399 	    }
400 	}
401 
402       //Just add the block to the list of free lists
403       //unconditionally.
404       _FLIter __temp = std::lower_bound(_S_free_list.begin(), _S_free_list.end(),
405 					*__addr, _LT_pointer_compare ());
406       //We may insert the new free list before _temp;
407       _S_free_list.insert(__temp, __addr);
408     }
409 
_S_should_i_give(unsigned int __block_size,unsigned int __required_size)410     static bool _S_should_i_give(unsigned int __block_size, unsigned int __required_size) throw()
411     {
412       const unsigned int __max_wastage_percentage = 36;
413       if (__block_size >= __required_size &&
414 	  (((__block_size - __required_size) * 100 / __block_size) < __max_wastage_percentage))
415 	return true;
416       else
417 	return false;
418     }
419 
420   public:
421     typedef _BA_free_list_store _BFL_type;
422 
_S_insert_free_list(unsigned int * __addr)423     static inline void _S_insert_free_list(unsigned int *__addr) throw()
424     {
425 #if defined __GTHREADS
426       _Lock __bfl_lock(&_S_bfl_mutex);
427 #endif
428       //Call _S_validate_free_list to decide what should be done with this
429       //particular free list.
430       _S_validate_free_list(--__addr);
431     }
432 
_S_get_free_list(unsigned int __sz)433     static unsigned int *_S_get_free_list(unsigned int __sz) throw (std::bad_alloc)
434     {
435 #if defined __GTHREADS
436       _Lock __bfl_lock(&_S_bfl_mutex);
437 #endif
438       _FLIter __temp = std::lower_bound(_S_free_list.begin(), _S_free_list.end(),
439 					__sz, _LT_pointer_compare());
440       if (__temp == _S_free_list.end() || !_S_should_i_give (**__temp, __sz))
441 	{
442 	  //We hold the lock because the OOM_Handler is a stateless
443 	  //entity.
444 	  _OOM_handler __set_handler(_BFL_type::_S_clear);
445 	  unsigned int *__ret_val = reinterpret_cast<unsigned int*>
446 	    (operator new (__sz + sizeof(unsigned int)));
447 	  *__ret_val = __sz;
448 	  return ++__ret_val;
449 	}
450       else
451 	{
452 	  unsigned int* __ret_val = *__temp;
453 	  _S_free_list.erase (__temp);
454 	  return ++__ret_val;
455 	}
456     }
457 
458     //This function just clears the internal Free List, and gives back
459     //all the memory to the OS.
_S_clear()460     static void _S_clear()
461     {
462 #if defined __GTHREADS
463       _Lock __bfl_lock(&_S_bfl_mutex);
464 #endif
465       _FLIter __iter = _S_free_list.begin();
466       while (__iter != _S_free_list.end())
467 	{
468 	  operator delete((void*)*__iter);
469 	  ++__iter;
470 	}
471       _S_free_list.clear();
472     }
473 
474   };
475 
476 #if defined __GTHREADS
477   _Mutex _BA_free_list_store::_S_bfl_mutex;
478 #endif
479   std::vector<unsigned int*> _BA_free_list_store::_S_free_list;
480 
481   template <typename _Tp> class bitmap_allocator;
482   // specialize for void:
483   template <> class bitmap_allocator<void> {
484   public:
485     typedef void*       pointer;
486     typedef const void* const_pointer;
487     //  reference-to-void members are impossible.
488     typedef void  value_type;
489     template <typename _Tp1> struct rebind { typedef bitmap_allocator<_Tp1> other; };
490   };
491 
492   template <typename _Tp> class bitmap_allocator : private _BA_free_list_store {
493   public:
494     typedef size_t    size_type;
495     typedef ptrdiff_t difference_type;
496     typedef _Tp*        pointer;
497     typedef const _Tp*  const_pointer;
498     typedef _Tp&        reference;
499     typedef const _Tp&  const_reference;
500     typedef _Tp         value_type;
501     template <typename _Tp1> struct rebind { typedef bitmap_allocator<_Tp1> other; };
502 
503   private:
504     static const unsigned int _Bits_Per_Byte = 8;
505     static const unsigned int _Bits_Per_Block = sizeof(unsigned int) * _Bits_Per_Byte;
506 
_S_bit_allocate(unsigned int * __pbmap,unsigned int __pos)507     static inline void _S_bit_allocate(unsigned int *__pbmap, unsigned int __pos) throw()
508     {
509       unsigned int __mask = 1 << __pos;
510       __mask = ~__mask;
511       *__pbmap &= __mask;
512     }
513 
_S_bit_free(unsigned int * __pbmap,unsigned int __pos)514     static inline void _S_bit_free(unsigned int *__pbmap, unsigned int __pos) throw()
515     {
516       unsigned int __mask = 1 << __pos;
517       *__pbmap |= __mask;
518     }
519 
_S_memory_get(size_t __sz)520     static inline void *_S_memory_get(size_t __sz) throw (std::bad_alloc)
521     {
522       return operator new(__sz);
523     }
524 
_S_memory_put(void * __vptr)525     static inline void _S_memory_put(void *__vptr) throw ()
526     {
527       operator delete(__vptr);
528     }
529 
530     typedef typename std::pair<pointer, pointer> _Block_pair;
531     typedef typename __gnu_cxx::new_allocator<_Block_pair> _BPVec_allocator_type;
532     typedef typename std::vector<_Block_pair, _BPVec_allocator_type> _BPVector;
533 
534 
535 #if defined CHECK_FOR_ERRORS
536     //Complexity: O(lg(N)). Where, N is the number of block of size
537     //sizeof(value_type).
_S_check_for_free_blocks()538     static void _S_check_for_free_blocks() throw()
539     {
540       typedef typename __gnu_cxx::__aux_balloc::_Ffit_finder<pointer, _BPVec_allocator_type> _FFF;
541       _FFF __fff;
542       typedef typename _BPVector::iterator _BPiter;
543       _BPiter __bpi = std::find_if(_S_mem_blocks.begin(), _S_mem_blocks.end(),
544 				   __gnu_cxx::__aux_balloc::_Functor_Ref<_FFF>(__fff));
545       assert(__bpi == _S_mem_blocks.end());
546     }
547 #endif
548 
549 
550     //Complexity: O(1), but internally depends upon the complexity of
551     //the function _BA_free_list_store::_S_get_free_list. The part
552     //where the bitmap headers are written is of worst case complexity:
553     //O(X),where X is the number of blocks of size sizeof(value_type)
554     //within the newly acquired block. Having a tight bound.
_S_refill_pool()555     static void _S_refill_pool() throw (std::bad_alloc)
556     {
557 #if defined CHECK_FOR_ERRORS
558       _S_check_for_free_blocks();
559 #endif
560 
561       const unsigned int __num_bit_maps = _S_block_size / _Bits_Per_Block;
562       const unsigned int __size_to_allocate = sizeof(unsigned int) +
563 	_S_block_size * sizeof(value_type) + __num_bit_maps*sizeof(unsigned int);
564 
565       unsigned int *__temp =
566 	reinterpret_cast<unsigned int*>(_BA_free_list_store::_S_get_free_list(__size_to_allocate));
567       *__temp = 0;
568       ++__temp;
569 
570       //The Header information goes at the Beginning of the Block.
571       _Block_pair __bp = std::make_pair(reinterpret_cast<pointer>(__temp + __num_bit_maps),
572 				       reinterpret_cast<pointer>(__temp + __num_bit_maps)
573 					+ _S_block_size - 1);
574 
575       //Fill the Vector with this information.
576       _S_mem_blocks.push_back(__bp);
577 
578       unsigned int __bit_mask = 0; //0 Indicates all Allocated.
579       __bit_mask = ~__bit_mask; //1 Indicates all Free.
580 
581       for (unsigned int __i = 0; __i < __num_bit_maps; ++__i)
582 	__temp[__i] = __bit_mask;
583 
584       //On some implementations, operator new might throw bad_alloc, or
585       //malloc might fail if the size passed is too large, therefore, we
586       //limit the size passed to malloc or operator new.
587       _S_block_size *= 2;
588     }
589 
590     static _BPVector _S_mem_blocks;
591     static unsigned int _S_block_size;
592     static __gnu_cxx::__aux_balloc::_Bit_map_counter<pointer, _BPVec_allocator_type> _S_last_request;
593     static typename _BPVector::size_type _S_last_dealloc_index;
594 #if defined __GTHREADS
595     static _Mutex _S_mut;
596 #endif
597 
598     //Complexity: Worst case complexity is O(N), but that is hardly ever
599     //hit. if and when this particular case is encountered, the next few
600     //cases are guaranteed to have a worst case complexity of O(1)!
601     //That's why this function performs very well on the average. you
602     //can consider this function to be having a complexity refrred to
603     //commonly as: Amortized Constant time.
_S_allocate_single_object()604     static pointer _S_allocate_single_object()
605     {
606 #if defined __GTHREADS
607       _Lock __bit_lock(&_S_mut);
608 #endif
609 
610       //The algorithm is something like this: The last_requst variable
611       //points to the last accessed Bit Map. When such a condition
612       //occurs, we try to find a free block in the current bitmap, or
613       //succeeding bitmaps until the last bitmap is reached. If no free
614       //block turns up, we resort to First Fit method.
615 
616       //WARNING: Do not re-order the condition in the while statement
617       //below, because it relies on C++'s short-circuit
618       //evaluation. The return from _S_last_request->_M_get() will NOT
619       //be dereferenceable if _S_last_request->_M_finished() returns
620       //true. This would inevitibly lead to a NULL pointer dereference
621       //if tinkered with.
622       while (_S_last_request._M_finished() == false && (*(_S_last_request._M_get()) == 0))
623 	{
624 	  _S_last_request.operator++();
625 	}
626 
627       if (__builtin_expect(_S_last_request._M_finished() == true, false))
628 	{
629 	  //Fall Back to First Fit algorithm.
630 	  typedef typename __gnu_cxx::__aux_balloc::_Ffit_finder<pointer, _BPVec_allocator_type> _FFF;
631 	  _FFF __fff;
632 	  typedef typename _BPVector::iterator _BPiter;
633 	  _BPiter __bpi = std::find_if(_S_mem_blocks.begin(), _S_mem_blocks.end(),
634 				      __gnu_cxx::__aux_balloc::_Functor_Ref<_FFF>(__fff));
635 
636 	  if (__bpi != _S_mem_blocks.end())
637 	    {
638 	      //Search was successful. Ok, now mark the first bit from
639 	      //the right as 0, meaning Allocated. This bit is obtained
640 	      //by calling _M_get() on __fff.
641 	      unsigned int __nz_bit = _Bit_scan_forward(*__fff._M_get());
642 	      _S_bit_allocate(__fff._M_get(), __nz_bit);
643 
644 	      _S_last_request._M_reset(__bpi - _S_mem_blocks.begin());
645 
646 	      //Now, get the address of the bit we marked as allocated.
647 	      pointer __ret_val = __bpi->first + __fff._M_offset() + __nz_bit;
648 	      unsigned int *__puse_count = reinterpret_cast<unsigned int*>(__bpi->first) -
649 		(__gnu_cxx::__aux_balloc::__balloc_num_bit_maps(*__bpi) + 1);
650 	      ++(*__puse_count);
651 	      return __ret_val;
652 	    }
653 	  else
654 	    {
655 	      //Search was unsuccessful. We Add more memory to the pool
656 	      //by calling _S_refill_pool().
657 	      _S_refill_pool();
658 
659 	      //_M_Reset the _S_last_request structure to the first free
660 	      //block's bit map.
661 	      _S_last_request._M_reset(_S_mem_blocks.size() - 1);
662 
663 	      //Now, mark that bit as allocated.
664 	    }
665 	}
666       //_S_last_request holds a pointer to a valid bit map, that points
667       //to a free block in memory.
668       unsigned int __nz_bit = _Bit_scan_forward(*_S_last_request._M_get());
669       _S_bit_allocate(_S_last_request._M_get(), __nz_bit);
670 
671       pointer __ret_val = _S_last_request._M_base() + _S_last_request._M_offset() + __nz_bit;
672 
673       unsigned int *__puse_count = reinterpret_cast<unsigned int*>
674 	(_S_mem_blocks[_S_last_request._M_where()].first) -
675 	(__gnu_cxx::__aux_balloc::__balloc_num_bit_maps(_S_mem_blocks[_S_last_request._M_where()]) + 1);
676       ++(*__puse_count);
677       return __ret_val;
678     }
679 
680     //Complexity: O(lg(N)), but the worst case is hit quite often! I
681     //need to do something about this. I'll be able to work on it, only
682     //when I have some solid figures from a few real apps.
_S_deallocate_single_object(pointer __p)683     static void _S_deallocate_single_object(pointer __p) throw()
684     {
685 #if defined __GTHREADS
686       _Lock __bit_lock(&_S_mut);
687 #endif
688 
689       typedef typename _BPVector::iterator _Iterator;
690       typedef typename _BPVector::difference_type _Difference_type;
691 
692       _Difference_type __diff;
693       int __displacement;
694 
695       assert(_S_last_dealloc_index >= 0);
696 
697       if (__gnu_cxx::__aux_balloc::_Inclusive_between<pointer>(__p)(_S_mem_blocks[_S_last_dealloc_index]))
698 	{
699 	  assert(_S_last_dealloc_index <= _S_mem_blocks.size() - 1);
700 
701 	  //Initial Assumption was correct!
702 	  __diff = _S_last_dealloc_index;
703 	  __displacement = __p - _S_mem_blocks[__diff].first;
704 	}
705       else
706 	{
707 	  _Iterator _iter = (std::find_if(_S_mem_blocks.begin(), _S_mem_blocks.end(),
708 					  __gnu_cxx::__aux_balloc::_Inclusive_between<pointer>(__p)));
709 	  assert(_iter != _S_mem_blocks.end());
710 
711 	  __diff = _iter - _S_mem_blocks.begin();
712 	  __displacement = __p - _S_mem_blocks[__diff].first;
713 	  _S_last_dealloc_index = __diff;
714 	}
715 
716       //Get the position of the iterator that has been found.
717       const unsigned int __rotate = __displacement % _Bits_Per_Block;
718       unsigned int *__bit_mapC = reinterpret_cast<unsigned int*>(_S_mem_blocks[__diff].first) - 1;
719       __bit_mapC -= (__displacement / _Bits_Per_Block);
720 
721       _S_bit_free(__bit_mapC, __rotate);
722       unsigned int *__puse_count = reinterpret_cast<unsigned int*>
723 	(_S_mem_blocks[__diff].first) -
724 	(__gnu_cxx::__aux_balloc::__balloc_num_bit_maps(_S_mem_blocks[__diff]) + 1);
725 
726       assert(*__puse_count != 0);
727 
728       --(*__puse_count);
729 
730       if (__builtin_expect(*__puse_count == 0, false))
731 	{
732 	  _S_block_size /= 2;
733 
734 	  //We may safely remove this block.
735 	  _Block_pair __bp = _S_mem_blocks[__diff];
736 	  _S_insert_free_list(__puse_count);
737 	  _S_mem_blocks.erase(_S_mem_blocks.begin() + __diff);
738 
739 	  //We reset the _S_last_request variable to reflect the erased
740 	  //block. We do this to protect future requests after the last
741 	  //block has been removed from a particular memory Chunk,
742 	  //which in turn has been returned to the free list, and
743 	  //hence had been erased from the vector, so the size of the
744 	  //vector gets reduced by 1.
745 	  if ((_Difference_type)_S_last_request._M_where() >= __diff--)
746 	    {
747 	      _S_last_request._M_reset(__diff);
748 	      //	      assert(__diff >= 0);
749 	    }
750 
751 	  //If the Index into the vector of the region of memory that
752 	  //might hold the next address that will be passed to
753 	  //deallocated may have been invalidated due to the above
754 	  //erase procedure being called on the vector, hence we try
755 	  //to restore this invariant too.
756 	  if (_S_last_dealloc_index >= _S_mem_blocks.size())
757 	    {
758 	      _S_last_dealloc_index =(__diff != -1 ? __diff : 0);
759 	      assert(_S_last_dealloc_index >= 0);
760 	    }
761 	}
762     }
763 
764   public:
bitmap_allocator()765     bitmap_allocator() throw()
766     { }
767 
bitmap_allocator(const bitmap_allocator &)768     bitmap_allocator(const bitmap_allocator&) { }
769 
bitmap_allocator(const bitmap_allocator<_Tp1> &)770     template <typename _Tp1> bitmap_allocator(const bitmap_allocator<_Tp1>&) throw()
771     { }
772 
throw()773     ~bitmap_allocator() throw()
774     { }
775 
776     //Complexity: O(1), but internally the complexity depends upon the
777     //complexity of the function(s) _S_allocate_single_object and
778     //_S_memory_get.
allocate(size_type __n)779     pointer allocate(size_type __n)
780     {
781       if (__builtin_expect(__n == 1, true))
782 	return _S_allocate_single_object();
783       else
784 	return reinterpret_cast<pointer>(_S_memory_get(__n * sizeof(value_type)));
785     }
786 
787     //Complexity: Worst case complexity is O(N) where N is the number of
788     //blocks of size sizeof(value_type) within the free lists that the
789     //allocator holds. However, this worst case is hit only when the
790     //user supplies a bogus argument to hint. If the hint argument is
791     //sensible, then the complexity drops to O(lg(N)), and in extreme
792     //cases, even drops to as low as O(1). So, if the user supplied
793     //argument is good, then this function performs very well.
allocate(size_type __n,typename bitmap_allocator<void>::const_pointer)794     pointer allocate(size_type __n, typename bitmap_allocator<void>::const_pointer)
795     {
796       return allocate(__n);
797     }
798 
deallocate(pointer __p,size_type __n)799     void deallocate(pointer __p, size_type __n) throw()
800     {
801       if (__builtin_expect(__n == 1, true))
802 	_S_deallocate_single_object(__p);
803       else
804 	_S_memory_put(__p);
805     }
806 
address(reference r)807     pointer address(reference r) const { return &r; }
address(const_reference r)808     const_pointer address(const_reference r) const { return &r; }
809 
max_size(void)810     size_type max_size(void) const throw() { return (size_type()-1)/sizeof(value_type); }
811 
construct(pointer p,const_reference __data)812     void construct (pointer p, const_reference __data)
813     {
814       ::new(p) value_type(__data);
815     }
816 
destroy(pointer p)817     void destroy (pointer p)
818     {
819       p->~value_type();
820     }
821 
822   };
823 
824   template <typename _Tp>
825   typename bitmap_allocator<_Tp>::_BPVector bitmap_allocator<_Tp>::_S_mem_blocks;
826 
827   template <typename _Tp>
828   unsigned int bitmap_allocator<_Tp>::_S_block_size = bitmap_allocator<_Tp>::_Bits_Per_Block;
829 
830   template <typename _Tp>
831   typename __gnu_cxx::bitmap_allocator<_Tp>::_BPVector::size_type
832   bitmap_allocator<_Tp>::_S_last_dealloc_index = 0;
833 
834   template <typename _Tp>
835   __gnu_cxx::__aux_balloc::_Bit_map_counter
836   <typename bitmap_allocator<_Tp>::pointer, typename bitmap_allocator<_Tp>::_BPVec_allocator_type>
837   bitmap_allocator<_Tp>::_S_last_request(_S_mem_blocks);
838 
839 #if defined __GTHREADS
840   template <typename _Tp>
841   __gnu_cxx::_Mutex
842   bitmap_allocator<_Tp>::_S_mut;
843 #endif
844 
845   template <typename _Tp1, typename _Tp2>
846   bool operator== (const bitmap_allocator<_Tp1>&, const bitmap_allocator<_Tp2>&) throw()
847   {
848     return true;
849   }
850 
851   template <typename _Tp1, typename _Tp2>
852   bool operator!= (const bitmap_allocator<_Tp1>&, const bitmap_allocator<_Tp2>&) throw()
853   {
854     return false;
855   }
856 }
857 
858 
859 #endif //_BITMAP_ALLOCATOR_H
860