1 // Allocator details. 2 3 // Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc. 4 // 5 // This file is part of the GNU ISO C++ Library. This library is free 6 // software; you can redistribute it and/or modify it under the 7 // terms of the GNU General Public License as published by the 8 // Free Software Foundation; either version 2, or (at your option) 9 // any later version. 10 11 // This library is distributed in the hope that it will be useful, 12 // but WITHOUT ANY WARRANTY; without even the implied warranty of 13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 // GNU General Public License for more details. 15 16 // You should have received a copy of the GNU General Public License along 17 // with this library; see the file COPYING. If not, write to the Free 18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 19 // USA. 20 21 // As a special exception, you may use this file as part of a free software 22 // library without restriction. Specifically, if other files instantiate 23 // templates or use macros or inline functions from this file, or you compile 24 // this file and link it with other files to produce an executable, this 25 // file does not by itself cause the resulting executable to be covered by 26 // the GNU General Public License. This exception does not however 27 // invalidate any other reasons why the executable file might be covered by 28 // the GNU General Public License. 29 30 // 31 // ISO C++ 14882: 32 // 33 34 #include <bits/c++config.h> 35 #include <cstdlib> 36 #include <ext/pool_allocator.h> 37 38 namespace 39 { 40 __gnu_cxx::__mutex palloc_init_mutex; 41 } // anonymous namespace 42 43 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx) 44 45 // Definitions for __pool_alloc_base. 46 __pool_alloc_base::_Obj* volatile* 47 __pool_alloc_base::_M_get_free_list(size_t __bytes) 48 { 49 size_t __i = ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1); 50 return _S_free_list + __i; 51 } 52 53 __mutex& 54 __pool_alloc_base::_M_get_mutex() 55 { return palloc_init_mutex; } 56 57 // Allocate memory in large chunks in order to avoid fragmenting the 58 // heap too much. Assume that __n is properly aligned. We hold the 59 // allocation lock. 60 char* 61 __pool_alloc_base::_M_allocate_chunk(size_t __n, int& __nobjs) 62 { 63 char* __result; 64 size_t __total_bytes = __n * __nobjs; 65 size_t __bytes_left = _S_end_free - _S_start_free; 66 67 if (__bytes_left >= __total_bytes) 68 { 69 __result = _S_start_free; 70 _S_start_free += __total_bytes; 71 return __result ; 72 } 73 else if (__bytes_left >= __n) 74 { 75 __nobjs = (int)(__bytes_left / __n); 76 __total_bytes = __n * __nobjs; 77 __result = _S_start_free; 78 _S_start_free += __total_bytes; 79 return __result; 80 } 81 else 82 { 83 // Try to make use of the left-over piece. 84 if (__bytes_left > 0) 85 { 86 _Obj* volatile* __free_list = _M_get_free_list(__bytes_left); 87 ((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list; 88 *__free_list = (_Obj*)(void*)_S_start_free; 89 } 90 91 size_t __bytes_to_get = (2 * __total_bytes 92 + _M_round_up(_S_heap_size >> 4)); 93 try 94 { 95 _S_start_free = static_cast<char*>(::operator new(__bytes_to_get)); 96 } 97 catch (...) 98 { 99 // Try to make do with what we have. That can't hurt. We 100 // do not try smaller requests, since that tends to result 101 // in disaster on multi-process machines. 102 size_t __i = __n; 103 for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align) 104 { 105 _Obj* volatile* __free_list = _M_get_free_list(__i); 106 _Obj* __p = *__free_list; 107 if (__p != 0) 108 { 109 *__free_list = __p->_M_free_list_link; 110 _S_start_free = (char*)__p; 111 _S_end_free = _S_start_free + __i; 112 return _M_allocate_chunk(__n, __nobjs); 113 // Any leftover piece will eventually make it to the 114 // right free list. 115 } 116 } 117 // What we have wasn't enough. Rethrow. 118 _S_start_free = _S_end_free = 0; // We have no chunk. 119 __throw_exception_again; 120 } 121 _S_heap_size += __bytes_to_get; 122 _S_end_free = _S_start_free + __bytes_to_get; 123 return _M_allocate_chunk(__n, __nobjs); 124 } 125 } 126 127 // Returns an object of size __n, and optionally adds to "size 128 // __n"'s free list. We assume that __n is properly aligned. We 129 // hold the allocation lock. 130 void* 131 __pool_alloc_base::_M_refill(size_t __n) 132 { 133 int __nobjs = 20; 134 char* __chunk = _M_allocate_chunk(__n, __nobjs); 135 _Obj* volatile* __free_list; 136 _Obj* __result; 137 _Obj* __current_obj; 138 _Obj* __next_obj; 139 140 if (__nobjs == 1) 141 return __chunk; 142 __free_list = _M_get_free_list(__n); 143 144 // Build free list in chunk. 145 __result = (_Obj*)(void*)__chunk; 146 *__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n); 147 for (int __i = 1; ; __i++) 148 { 149 __current_obj = __next_obj; 150 __next_obj = (_Obj*)(void*)((char*)__next_obj + __n); 151 if (__nobjs - 1 == __i) 152 { 153 __current_obj->_M_free_list_link = 0; 154 break; 155 } 156 else 157 __current_obj->_M_free_list_link = __next_obj; 158 } 159 return __result; 160 } 161 162 __pool_alloc_base::_Obj* volatile __pool_alloc_base::_S_free_list[_S_free_list_size]; 163 164 char* __pool_alloc_base::_S_start_free = 0; 165 166 char* __pool_alloc_base::_S_end_free = 0; 167 168 size_t __pool_alloc_base::_S_heap_size = 0; 169 170 // Instantiations. 171 template class __pool_alloc<char>; 172 template class __pool_alloc<wchar_t>; 173 174 _GLIBCXX_END_NAMESPACE 175