1 // Allocators -*- C++ -*-
2 
3 // Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006
4 // Free Software Foundation, Inc.
5 //
6 // This file is part of the GNU ISO C++ Library.  This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 2, or (at your option)
10 // any later version.
11 
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 // GNU General Public License for more details.
16 
17 // You should have received a copy of the GNU General Public License along
18 // with this library; see the file COPYING.  If not, write to the Free
19 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
20 // USA.
21 
22 // As a special exception, you may use this file as part of a free software
23 // library without restriction.  Specifically, if other files instantiate
24 // templates or use macros or inline functions from this file, or you compile
25 // this file and link it with other files to produce an executable, this
26 // file does not by itself cause the resulting executable to be covered by
27 // the GNU General Public License.  This exception does not however
28 // invalidate any other reasons why the executable file might be covered by
29 // the GNU General Public License.
30 
31 /*
32  * Copyright (c) 1996-1997
33  * Silicon Graphics Computer Systems, Inc.
34  *
35  * Permission to use, copy, modify, distribute and sell this software
36  * and its documentation for any purpose is hereby granted without fee,
37  * provided that the above copyright notice appear in all copies and
38  * that both that copyright notice and this permission notice appear
39  * in supporting documentation.  Silicon Graphics makes no
40  * representations about the suitability of this software for any
41  * purpose.  It is provided "as is" without express or implied warranty.
42  */
43 
44 /** @file ext/pool_allocator.h
45  *  This file is a GNU extension to the Standard C++ Library.
46  */
47 
48 #ifndef _POOL_ALLOCATOR_H
49 #define _POOL_ALLOCATOR_H 1
50 
51 #include <bits/c++config.h>
52 #include <cstdlib>
53 #include <new>
54 #include <bits/functexcept.h>
55 #include <ext/atomicity.h>
56 #include <ext/concurrence.h>
57 
58 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
59 
60   using std::size_t;
61   using std::ptrdiff_t;
62 
63   /**
64    *  @brief  Base class for __pool_alloc.
65    *
66    *  @if maint
67    *  Uses various allocators to fulfill underlying requests (and makes as
68    *  few requests as possible when in default high-speed pool mode).
69    *
70    *  Important implementation properties:
71    *  0. If globally mandated, then allocate objects from new
72    *  1. If the clients request an object of size > _S_max_bytes, the resulting
73    *     object will be obtained directly from new
74    *  2. In all other cases, we allocate an object of size exactly
75    *     _S_round_up(requested_size).  Thus the client has enough size
76    *     information that we can return the object to the proper free list
77    *     without permanently losing part of the object.
78    *
79    *  @endif
80    */
81     class __pool_alloc_base
82     {
83     protected:
84 
85       enum { _S_align = 8 };
86       enum { _S_max_bytes = 128 };
87       enum { _S_free_list_size = (size_t)_S_max_bytes / (size_t)_S_align };
88 
89       union _Obj
90       {
91 	union _Obj* _M_free_list_link;
92 	char        _M_client_data[1];    // The client sees this.
93       };
94 
95       static _Obj* volatile         _S_free_list[_S_free_list_size];
96 
97       // Chunk allocation state.
98       static char*                  _S_start_free;
99       static char*                  _S_end_free;
100       static size_t                 _S_heap_size;
101 
102       size_t
103       _M_round_up(size_t __bytes)
104       { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
105 
106       _Obj* volatile*
107       _M_get_free_list(size_t __bytes);
108 
109       __mutex&
110       _M_get_mutex();
111 
112       // Returns an object of size __n, and optionally adds to size __n
113       // free list.
114       void*
115       _M_refill(size_t __n);
116 
117       // Allocates a chunk for nobjs of size size.  nobjs may be reduced
118       // if it is inconvenient to allocate the requested number.
119       char*
120       _M_allocate_chunk(size_t __n, int& __nobjs);
121     };
122 
123 
124   /// @brief  class __pool_alloc.
125   template<typename _Tp>
126     class __pool_alloc : private __pool_alloc_base
127     {
128     private:
129       static _Atomic_word	    _S_force_new;
130 
131     public:
132       typedef size_t     size_type;
133       typedef ptrdiff_t  difference_type;
134       typedef _Tp*       pointer;
135       typedef const _Tp* const_pointer;
136       typedef _Tp&       reference;
137       typedef const _Tp& const_reference;
138       typedef _Tp        value_type;
139 
140       template<typename _Tp1>
141         struct rebind
142         { typedef __pool_alloc<_Tp1> other; };
143 
144       __pool_alloc() throw() { }
145 
146       __pool_alloc(const __pool_alloc&) throw() { }
147 
148       template<typename _Tp1>
149         __pool_alloc(const __pool_alloc<_Tp1>&) throw() { }
150 
151       ~__pool_alloc() throw() { }
152 
153       pointer
154       address(reference __x) const { return &__x; }
155 
156       const_pointer
157       address(const_reference __x) const { return &__x; }
158 
159       size_type
160       max_size() const throw()
161       { return size_t(-1) / sizeof(_Tp); }
162 
163       // _GLIBCXX_RESOLVE_LIB_DEFECTS
164       // 402. wrong new expression in [some_] allocator::construct
165       void
166       construct(pointer __p, const _Tp& __val)
167       { ::new(__p) _Tp(__val); }
168 
169       void
170       destroy(pointer __p) { __p->~_Tp(); }
171 
172       pointer
173       allocate(size_type __n, const void* = 0);
174 
175       void
176       deallocate(pointer __p, size_type __n);
177     };
178 
179   template<typename _Tp>
180     inline bool
181     operator==(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
182     { return true; }
183 
184   template<typename _Tp>
185     inline bool
186     operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
187     { return false; }
188 
189   template<typename _Tp>
190     _Atomic_word
191     __pool_alloc<_Tp>::_S_force_new;
192 
193   template<typename _Tp>
194     _Tp*
195     __pool_alloc<_Tp>::allocate(size_type __n, const void*)
196     {
197       pointer __ret = 0;
198       if (__builtin_expect(__n != 0, true))
199 	{
200 	  if (__builtin_expect(__n > this->max_size(), false))
201 	    std::__throw_bad_alloc();
202 
203 	  // If there is a race through here, assume answer from getenv
204 	  // will resolve in same direction.  Inspired by techniques
205 	  // to efficiently support threading found in basic_string.h.
206 	  if (_S_force_new == 0)
207 	    {
208 	      if (std::getenv("GLIBCXX_FORCE_NEW"))
209 		__atomic_add_dispatch(&_S_force_new, 1);
210 	      else
211 		__atomic_add_dispatch(&_S_force_new, -1);
212 	    }
213 
214 	  const size_t __bytes = __n * sizeof(_Tp);
215 	  if (__bytes > size_t(_S_max_bytes) || _S_force_new == 1)
216 	    __ret = static_cast<_Tp*>(::operator new(__bytes));
217 	  else
218 	    {
219 	      _Obj* volatile* __free_list = _M_get_free_list(__bytes);
220 
221 	      __scoped_lock sentry(_M_get_mutex());
222 	      _Obj* __restrict__ __result = *__free_list;
223 	      if (__builtin_expect(__result == 0, 0))
224 		__ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes)));
225 	      else
226 		{
227 		  *__free_list = __result->_M_free_list_link;
228 		  __ret = reinterpret_cast<_Tp*>(__result);
229 		}
230 	      if (__builtin_expect(__ret == 0, 0))
231 		std::__throw_bad_alloc();
232 	    }
233 	}
234       return __ret;
235     }
236 
237   template<typename _Tp>
238     void
239     __pool_alloc<_Tp>::deallocate(pointer __p, size_type __n)
240     {
241       if (__builtin_expect(__n != 0 && __p != 0, true))
242 	{
243 	  const size_t __bytes = __n * sizeof(_Tp);
244 	  if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new == 1)
245 	    ::operator delete(__p);
246 	  else
247 	    {
248 	      _Obj* volatile* __free_list = _M_get_free_list(__bytes);
249 	      _Obj* __q = reinterpret_cast<_Obj*>(__p);
250 
251 	      __scoped_lock sentry(_M_get_mutex());
252 	      __q ->_M_free_list_link = *__free_list;
253 	      *__free_list = __q;
254 	    }
255 	}
256     }
257 
258 _GLIBCXX_END_NAMESPACE
259 
260 #endif
261