1 // Allocators -*- C++ -*-
2 
3 // Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
4 // 2010, 2011
5 // Free Software Foundation, Inc.
6 //
7 // This file is part of the GNU ISO C++ Library.  This library is free
8 // software; you can redistribute it and/or modify it under the
9 // terms of the GNU General Public License as published by the
10 // Free Software Foundation; either version 3, or (at your option)
11 // any later version.
12 
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 // GNU General Public License for more details.
17 
18 // Under Section 7 of GPL version 3, you are granted additional
19 // permissions described in the GCC Runtime Library Exception, version
20 // 3.1, as published by the Free Software Foundation.
21 
22 // You should have received a copy of the GNU General Public License and
23 // a copy of the GCC Runtime Library Exception along with this program;
24 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
25 // <http://www.gnu.org/licenses/>.
26 
27 /*
28  * Copyright (c) 1996-1997
29  * Silicon Graphics Computer Systems, Inc.
30  *
31  * Permission to use, copy, modify, distribute and sell this software
32  * and its documentation for any purpose is hereby granted without fee,
33  * provided that the above copyright notice appear in all copies and
34  * that both that copyright notice and this permission notice appear
35  * in supporting documentation.  Silicon Graphics makes no
36  * representations about the suitability of this software for any
37  * purpose.  It is provided "as is" without express or implied warranty.
38  */
39 
40 /** @file ext/pool_allocator.h
41  *  This file is a GNU extension to the Standard C++ Library.
42  */
43 
44 #ifndef _POOL_ALLOCATOR_H
45 #define _POOL_ALLOCATOR_H 1
46 
47 #include <bits/c++config.h>
48 #include <cstdlib>
49 #include <new>
50 #include <bits/functexcept.h>
51 #include <ext/atomicity.h>
52 #include <ext/concurrence.h>
53 #include <bits/move.h>
54 
55 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
56 {
57 _GLIBCXX_BEGIN_NAMESPACE_VERSION
58 
59   using std::size_t;
60   using std::ptrdiff_t;
61 
62   /**
63    *  @brief  Base class for __pool_alloc.
64    *
65    *  Uses various allocators to fulfill underlying requests (and makes as
66    *  few requests as possible when in default high-speed pool mode).
67    *
68    *  Important implementation properties:
69    *  0. If globally mandated, then allocate objects from new
70    *  1. If the clients request an object of size > _S_max_bytes, the resulting
71    *     object will be obtained directly from new
72    *  2. In all other cases, we allocate an object of size exactly
73    *     _S_round_up(requested_size).  Thus the client has enough size
74    *     information that we can return the object to the proper free list
75    *     without permanently losing part of the object.
76    */
77     class __pool_alloc_base
78     {
79     protected:
80 
81       enum { _S_align = 8 };
82       enum { _S_max_bytes = 128 };
83       enum { _S_free_list_size = (size_t)_S_max_bytes / (size_t)_S_align };
84 
85       union _Obj
86       {
87 	union _Obj* _M_free_list_link;
88 	char        _M_client_data[1];    // The client sees this.
89       };
90 
91       static _Obj* volatile         _S_free_list[_S_free_list_size];
92 
93       // Chunk allocation state.
94       static char*                  _S_start_free;
95       static char*                  _S_end_free;
96       static size_t                 _S_heap_size;
97 
98       size_t
99       _M_round_up(size_t __bytes)
100       { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
101 
102       _GLIBCXX_CONST _Obj* volatile*
103       _M_get_free_list(size_t __bytes) throw ();
104 
105       __mutex&
106       _M_get_mutex() throw ();
107 
108       // Returns an object of size __n, and optionally adds to size __n
109       // free list.
110       void*
111       _M_refill(size_t __n);
112 
113       // Allocates a chunk for nobjs of size size.  nobjs may be reduced
114       // if it is inconvenient to allocate the requested number.
115       char*
116       _M_allocate_chunk(size_t __n, int& __nobjs);
117     };
118 
119 
120   /**
121    * @brief  Allocator using a memory pool with a single lock.
122    * @ingroup allocators
123    */
124   template<typename _Tp>
125     class __pool_alloc : private __pool_alloc_base
126     {
127     private:
128       static _Atomic_word	    _S_force_new;
129 
130     public:
131       typedef size_t     size_type;
132       typedef ptrdiff_t  difference_type;
133       typedef _Tp*       pointer;
134       typedef const _Tp* const_pointer;
135       typedef _Tp&       reference;
136       typedef const _Tp& const_reference;
137       typedef _Tp        value_type;
138 
139       template<typename _Tp1>
140         struct rebind
141         { typedef __pool_alloc<_Tp1> other; };
142 
143       __pool_alloc() _GLIBCXX_USE_NOEXCEPT { }
144 
145       __pool_alloc(const __pool_alloc&) _GLIBCXX_USE_NOEXCEPT { }
146 
147       template<typename _Tp1>
148         __pool_alloc(const __pool_alloc<_Tp1>&) _GLIBCXX_USE_NOEXCEPT { }
149 
150       ~__pool_alloc() _GLIBCXX_USE_NOEXCEPT { }
151 
152       pointer
153       address(reference __x) const _GLIBCXX_NOEXCEPT
154       { return std::__addressof(__x); }
155 
156       const_pointer
157       address(const_reference __x) const _GLIBCXX_NOEXCEPT
158       { return std::__addressof(__x); }
159 
160       size_type
161       max_size() const _GLIBCXX_USE_NOEXCEPT
162       { return size_t(-1) / sizeof(_Tp); }
163 
164 #ifdef __GXX_EXPERIMENTAL_CXX0X__
165       template<typename _Up, typename... _Args>
166         void
167         construct(_Up* __p, _Args&&... __args)
168 	{ ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
169 
170       template<typename _Up>
171         void
172         destroy(_Up* __p) { __p->~_Up(); }
173 #else
174       // _GLIBCXX_RESOLVE_LIB_DEFECTS
175       // 402. wrong new expression in [some_] allocator::construct
176       void
177       construct(pointer __p, const _Tp& __val)
178       { ::new((void *)__p) _Tp(__val); }
179 
180       void
181       destroy(pointer __p) { __p->~_Tp(); }
182 #endif
183 
184       pointer
185       allocate(size_type __n, const void* = 0);
186 
187       void
188       deallocate(pointer __p, size_type __n);
189     };
190 
191   template<typename _Tp>
192     inline bool
193     operator==(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
194     { return true; }
195 
196   template<typename _Tp>
197     inline bool
198     operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
199     { return false; }
200 
201   template<typename _Tp>
202     _Atomic_word
203     __pool_alloc<_Tp>::_S_force_new;
204 
205   template<typename _Tp>
206     _Tp*
207     __pool_alloc<_Tp>::allocate(size_type __n, const void*)
208     {
209       pointer __ret = 0;
210       if (__builtin_expect(__n != 0, true))
211 	{
212 	  if (__n > this->max_size())
213 	    std::__throw_bad_alloc();
214 
215 	  // If there is a race through here, assume answer from getenv
216 	  // will resolve in same direction.  Inspired by techniques
217 	  // to efficiently support threading found in basic_string.h.
218 	  if (_S_force_new == 0)
219 	    {
220 	      if (std::getenv("GLIBCXX_FORCE_NEW"))
221 		__atomic_add_dispatch(&_S_force_new, 1);
222 	      else
223 		__atomic_add_dispatch(&_S_force_new, -1);
224 	    }
225 
226 	  const size_t __bytes = __n * sizeof(_Tp);
227 	  if (__bytes > size_t(_S_max_bytes) || _S_force_new > 0)
228 	    __ret = static_cast<_Tp*>(::operator new(__bytes));
229 	  else
230 	    {
231 	      _Obj* volatile* __free_list = _M_get_free_list(__bytes);
232 
233 	      __scoped_lock sentry(_M_get_mutex());
234 	      _Obj* __restrict__ __result = *__free_list;
235 	      if (__builtin_expect(__result == 0, 0))
236 		__ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes)));
237 	      else
238 		{
239 		  *__free_list = __result->_M_free_list_link;
240 		  __ret = reinterpret_cast<_Tp*>(__result);
241 		}
242 	      if (__ret == 0)
243 		std::__throw_bad_alloc();
244 	    }
245 	}
246       return __ret;
247     }
248 
249   template<typename _Tp>
250     void
251     __pool_alloc<_Tp>::deallocate(pointer __p, size_type __n)
252     {
253       if (__builtin_expect(__n != 0 && __p != 0, true))
254 	{
255 	  const size_t __bytes = __n * sizeof(_Tp);
256 	  if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new > 0)
257 	    ::operator delete(__p);
258 	  else
259 	    {
260 	      _Obj* volatile* __free_list = _M_get_free_list(__bytes);
261 	      _Obj* __q = reinterpret_cast<_Obj*>(__p);
262 
263 	      __scoped_lock sentry(_M_get_mutex());
264 	      __q ->_M_free_list_link = *__free_list;
265 	      *__free_list = __q;
266 	    }
267 	}
268     }
269 
270 _GLIBCXX_END_NAMESPACE_VERSION
271 } // namespace
272 
273 #endif
274