1 // Allocators -*- C++ -*-
2 
3 // Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15 
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING.  If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
20 
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction.  Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License.  This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
29 
30 /*
31  * Copyright (c) 1996-1997
32  * Silicon Graphics Computer Systems, Inc.
33  *
34  * Permission to use, copy, modify, distribute and sell this software
35  * and its documentation for any purpose is hereby granted without fee,
36  * provided that the above copyright notice appear in all copies and
37  * that both that copyright notice and this permission notice appear
38  * in supporting documentation.  Silicon Graphics makes no
39  * representations about the suitability of this software for any
40  * purpose.  It is provided "as is" without express or implied warranty.
41  */
42 
43 /** @file stl_alloc.h
44  *  This is an internal header file, included by other library headers.
45  *  You should not attempt to use it directly.
46  */
47 
48 #ifndef __GLIBCPP_INTERNAL_ALLOC_H
49 #define __GLIBCPP_INTERNAL_ALLOC_H
50 
51 /**
52  *  @defgroup Allocators Memory Allocators
53  *  @if maint
54  *  stl_alloc.h implements some node allocators.  These are NOT the same as
55  *  allocators in the C++ standard, nor in the original H-P STL.  They do not
56  *  encapsulate different pointer types; we assume that there is only one
57  *  pointer type.  The C++ standard allocators are intended to allocate
58  *  individual objects, not pools or arenas.
59  *
60  *  In this file allocators are of two different styles:  "standard" and
61  *  "SGI" (quotes included).  "Standard" allocators conform to 20.4.  "SGI"
62  *  allocators differ in AT LEAST the following ways (add to this list as you
63  *  discover them):
64  *
65  *   - "Standard" allocate() takes two parameters (n_count,hint=0) but "SGI"
66  *     allocate() takes one paramter (n_size).
67  *   - Likewise, "standard" deallocate()'s argument is a count, but in "SGI"
68  *     is a byte size.
69  *   - max_size(), construct(), and destroy() are missing in "SGI" allocators.
70  *   - reallocate(p,oldsz,newsz) is added in "SGI", and behaves as
71  *     if p=realloc(p,newsz).
72  *
73  *  "SGI" allocators may be wrapped in __allocator to convert the interface
74  *  into a "standard" one.
75  *  @endif
76  *
77  *  @note The @c reallocate member functions have been deprecated for 3.2
78  *        and will be removed in 3.4.  You must define @c _GLIBCPP_DEPRECATED
79  *        to make this visible in 3.2; see c++config.h.
80  *
81  *  The canonical description of these classes is in docs/html/ext/howto.html
82  *  or online at http://gcc.gnu.org/onlinedocs/libstdc++/ext/howto.html#3
83 */
84 
85 #include <cstddef>
86 #include <cstdlib>
87 #include <cstring>
88 #include <bits/functexcept.h>   // For __throw_bad_alloc
89 #include <bits/stl_threads.h>
90 
91 #include <bits/atomicity.h>
92 
93 namespace std
94 {
95   /**
96    *  @if maint
97    *  A new-based allocator, as required by the standard.  Allocation and
98    *  deallocation forward to global new and delete.  "SGI" style, minus
99    *  reallocate().
100    *  @endif
101    *  (See @link Allocators allocators info @endlink for more.)
102    */
103   class __new_alloc
104   {
105   public:
106     static void*
allocate(size_t __n)107     allocate(size_t __n)
108     { return ::operator new(__n); }
109 
110     static void
deallocate(void * __p,size_t)111     deallocate(void* __p, size_t)
112     { ::operator delete(__p); }
113   };
114 
115 
116   /**
117    *  @if maint
118    *  A malloc-based allocator.  Typically slower than the
119    *  __default_alloc_template (below).  Typically thread-safe and more
120    *  storage efficient.  The template argument is unused and is only present
121    *  to permit multiple instantiations (but see __default_alloc_template
122    *  for caveats).  "SGI" style, plus __set_malloc_handler for OOM conditions.
123    *  @endif
124    *  (See @link Allocators allocators info @endlink for more.)
125    */
126   template<int __inst>
127     class __malloc_alloc_template
128     {
129     private:
130       static void* _S_oom_malloc(size_t);
131       static void* _S_oom_realloc(void*, size_t);
132       static void (* __malloc_alloc_oom_handler)();
133 
134     public:
135       static void*
allocate(size_t __n)136       allocate(size_t __n)
137       {
138         void* __result = malloc(__n);
139         if (__builtin_expect(__result == 0, 0))
140 	  __result = _S_oom_malloc(__n);
141         return __result;
142       }
143 
144       static void
deallocate(void * __p,size_t)145       deallocate(void* __p, size_t /* __n */)
146       { free(__p); }
147 
148       static void*
reallocate(void * __p,size_t,size_t __new_sz)149       reallocate(void* __p, size_t /* old_sz */, size_t __new_sz)
150       {
151         void* __result = realloc(__p, __new_sz);
152         if (__builtin_expect(__result == 0, 0))
153           __result = _S_oom_realloc(__p, __new_sz);
154         return __result;
155       }
156 
__set_malloc_handler(void (* __f)())157       static void (* __set_malloc_handler(void (*__f)()))()
158       {
159         void (* __old)() = __malloc_alloc_oom_handler;
160         __malloc_alloc_oom_handler = __f;
161         return __old;
162       }
163     };
164 
165   // malloc_alloc out-of-memory handling
166   template<int __inst>
167     void (* __malloc_alloc_template<__inst>::__malloc_alloc_oom_handler)() = 0;
168 
169   template<int __inst>
170     void*
171     __malloc_alloc_template<__inst>::
_S_oom_malloc(size_t __n)172     _S_oom_malloc(size_t __n)
173     {
174       void (* __my_malloc_handler)();
175       void* __result;
176 
177       for (;;)
178         {
179           __my_malloc_handler = __malloc_alloc_oom_handler;
180           if (__builtin_expect(__my_malloc_handler == 0, 0))
181             __throw_bad_alloc();
182           (*__my_malloc_handler)();
183           __result = malloc(__n);
184           if (__result)
185             return __result;
186         }
187     }
188 
189   template<int __inst>
190     void*
191     __malloc_alloc_template<__inst>::
_S_oom_realloc(void * __p,size_t __n)192     _S_oom_realloc(void* __p, size_t __n)
193     {
194       void (* __my_malloc_handler)();
195       void* __result;
196 
197       for (;;)
198         {
199           __my_malloc_handler = __malloc_alloc_oom_handler;
200           if (__builtin_expect(__my_malloc_handler == 0, 0))
201             __throw_bad_alloc();
202           (*__my_malloc_handler)();
203           __result = realloc(__p, __n);
204           if (__result)
205             return __result;
206         }
207     }
208 
209   // Should not be referenced within the library anymore.
210   typedef __new_alloc                 __mem_interface;
211 
212   /**
213    *  @if maint
214    *  This is used primarily (only?) in _Alloc_traits and other places to
215    *  help provide the _Alloc_type typedef.  All it does is forward the
216    *  requests after some minimal checking.
217    *
218    *  This is neither "standard"-conforming nor "SGI".  The _Alloc parameter
219    *  must be "SGI" style.
220    *  @endif
221    *  (See @link Allocators allocators info @endlink for more.)
222    */
223   template<typename _Tp, typename _Alloc>
224     class __simple_alloc
225     {
226     public:
227       static _Tp*
allocate(size_t __n)228       allocate(size_t __n)
229       {
230 	_Tp* __ret = 0;
231 	if (__n)
232 	  __ret = static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)));
233 	return __ret;
234       }
235 
236       static _Tp*
allocate()237       allocate()
238       { return (_Tp*) _Alloc::allocate(sizeof (_Tp)); }
239 
240       static void
deallocate(_Tp * __p,size_t __n)241       deallocate(_Tp* __p, size_t __n)
242       { if (0 != __n) _Alloc::deallocate(__p, __n * sizeof (_Tp)); }
243 
244       static void
deallocate(_Tp * __p)245       deallocate(_Tp* __p)
246       { _Alloc::deallocate(__p, sizeof (_Tp)); }
247     };
248 
249 
250   /**
251    *  @if maint
252    *  An adaptor for an underlying allocator (_Alloc) to check the size
253    *  arguments for debugging.
254    *
255    *  "There is some evidence that this can confuse Purify." - SGI comment
256    *
257    *  This adaptor is "SGI" style.  The _Alloc parameter must also be "SGI".
258    *  @endif
259    *  (See @link Allocators allocators info @endlink for more.)
260    */
261   template<typename _Alloc>
262     class __debug_alloc
263     {
264     private:
265       // Size of space used to store size.  Note that this must be
266       // large enough to preserve alignment.
267       enum {_S_extra = 8};
268 
269     public:
270       static void*
allocate(size_t __n)271       allocate(size_t __n)
272       {
273         char* __result = (char*)_Alloc::allocate(__n + (int) _S_extra);
274         *(size_t*)__result = __n;
275         return __result + (int) _S_extra;
276       }
277 
278       static void
deallocate(void * __p,size_t __n)279       deallocate(void* __p, size_t __n)
280       {
281         char* __real_p = (char*)__p - (int) _S_extra;
282         if (*(size_t*)__real_p != __n)
283 	  abort();
284         _Alloc::deallocate(__real_p, __n + (int) _S_extra);
285       }
286 
287       static void*
reallocate(void * __p,size_t __old_sz,size_t __new_sz)288       reallocate(void* __p, size_t __old_sz, size_t __new_sz)
289       {
290         char* __real_p = (char*)__p - (int) _S_extra;
291         if (*(size_t*)__real_p != __old_sz)
292 	  abort();
293         char* __result = (char*) _Alloc::reallocate(__real_p,
294 						    __old_sz + (int) _S_extra,
295 						    __new_sz + (int) _S_extra);
296         *(size_t*)__result = __new_sz;
297         return __result + (int) _S_extra;
298       }
299     };
300 
301 
302   /**
303    *  @if maint
304    *  Default node allocator.  "SGI" style.  Uses various allocators to
305    *  fulfill underlying requests (and makes as few requests as possible
306    *  when in default high-speed pool mode).
307    *
308    *  Important implementation properties:
309    *  0. If globally mandated, then allocate objects from __new_alloc
310    *  1. If the clients request an object of size > _MAX_BYTES, the resulting
311    *     object will be obtained directly from __new_alloc
312    *  2. In all other cases, we allocate an object of size exactly
313    *     _S_round_up(requested_size).  Thus the client has enough size
314    *     information that we can return the object to the proper free list
315    *     without permanently losing part of the object.
316    *
317    *  The first template parameter specifies whether more than one thread may
318    *  use this allocator.  It is safe to allocate an object from one instance
319    *  of a default_alloc and deallocate it with another one.  This effectively
320    *  transfers its ownership to the second one.  This may have undesirable
321    *  effects on reference locality.
322    *
323    *  The second parameter is unused and serves only to allow the creation of
324    *  multiple default_alloc instances.  Note that containers built on different
325    *  allocator instances have different types, limiting the utility of this
326    *  approach.  If you do not wish to share the free lists with the main
327    *  default_alloc instance, instantiate this with a non-zero __inst.
328    *
329    *  @endif
330    *  (See @link Allocators allocators info @endlink for more.)
331    */
332   template<bool __threads, int __inst>
333     class __default_alloc_template
334     {
335     private:
336       enum {_ALIGN = 8};
337       enum {_MAX_BYTES = 128};
338       enum {_NFREELISTS = _MAX_BYTES / _ALIGN};
339 
340       union _Obj
341       {
342         union _Obj* _M_free_list_link;
343         char        _M_client_data[1];    // The client sees this.
344       };
345 
346       static _Obj* volatile         _S_free_list[_NFREELISTS];
347 
348       // Chunk allocation state.
349       static char*                  _S_start_free;
350       static char*                  _S_end_free;
351       static size_t                 _S_heap_size;
352 
353       static _STL_mutex_lock        _S_node_allocator_lock;
354 
355       static size_t
_S_round_up(size_t __bytes)356       _S_round_up(size_t __bytes)
357       { return (((__bytes) + (size_t) _ALIGN-1) & ~((size_t) _ALIGN - 1)); }
358 
359       static size_t
_S_freelist_index(size_t __bytes)360       _S_freelist_index(size_t __bytes)
361       { return (((__bytes) + (size_t)_ALIGN - 1)/(size_t)_ALIGN - 1); }
362 
363       // Returns an object of size __n, and optionally adds to size __n
364       // free list.
365       static void*
366       _S_refill(size_t __n);
367 
368       // Allocates a chunk for nobjs of size size.  nobjs may be reduced
369       // if it is inconvenient to allocate the requested number.
370       static char*
371       _S_chunk_alloc(size_t __size, int& __nobjs);
372 
373       // It would be nice to use _STL_auto_lock here.  But we need a
374       // test whether threads are in use.
375       struct _Lock
376       {
_Lock_Lock377         _Lock() { if (__threads) _S_node_allocator_lock._M_acquire_lock(); }
~_Lock_Lock378         ~_Lock() { if (__threads) _S_node_allocator_lock._M_release_lock(); }
379       } __attribute__ ((__unused__));
380       friend struct _Lock;
381 
382       static _Atomic_word _S_force_new;
383 
384     public:
385       // __n must be > 0
386       static void*
allocate(size_t __n)387       allocate(size_t __n)
388       {
389 	void* __ret = 0;
390 
391 	// If there is a race through here, assume answer from getenv
392 	// will resolve in same direction.  Inspired by techniques
393 	// to efficiently support threading found in basic_string.h.
394 	if (_S_force_new == 0)
395 	  {
396 	    if (getenv("GLIBCPP_FORCE_NEW"))
397 	      __atomic_add(&_S_force_new, 1);
398 	    else
399 	      __atomic_add(&_S_force_new, -1);
400 	  }
401 
402 	if ((__n > (size_t) _MAX_BYTES) || (_S_force_new > 0))
403 	  __ret = __new_alloc::allocate(__n);
404 	else
405 	  {
406 	    _Obj* volatile* __my_free_list = _S_free_list
407 	      + _S_freelist_index(__n);
408 	    // Acquire the lock here with a constructor call.  This
409 	    // ensures that it is released in exit or during stack
410 	    // unwinding.
411 	    _Lock __lock_instance;
412 	    _Obj* __restrict__ __result = *__my_free_list;
413 	    if (__builtin_expect(__result == 0, 0))
414 	      __ret = _S_refill(_S_round_up(__n));
415 	    else
416 	      {
417 		*__my_free_list = __result -> _M_free_list_link;
418 		__ret = __result;
419 	      }
420 	    if (__builtin_expect(__ret == 0, 0))
421 	      __throw_bad_alloc();
422 	  }
423 	return __ret;
424       }
425 
426       // __p may not be 0
427       static void
deallocate(void * __p,size_t __n)428       deallocate(void* __p, size_t __n)
429       {
430 	if ((__n > (size_t) _MAX_BYTES) || (_S_force_new > 0))
431 	  __new_alloc::deallocate(__p, __n);
432 	else
433 	  {
434 	    _Obj* volatile*  __my_free_list = _S_free_list
435 	      + _S_freelist_index(__n);
436 	    _Obj* __q = (_Obj*)__p;
437 
438 	    // Acquire the lock here with a constructor call.  This
439 	    // ensures that it is released in exit or during stack
440 	    // unwinding.
441 	    _Lock __lock_instance;
442 	    __q -> _M_free_list_link = *__my_free_list;
443 	    *__my_free_list = __q;
444 	  }
445       }
446 
447       static void*
448       reallocate(void* __p, size_t __old_sz, size_t __new_sz);
449     };
450 
451   template<bool __threads, int __inst> _Atomic_word
452   __default_alloc_template<__threads, __inst>::_S_force_new = 0;
453 
454   template<bool __threads, int __inst>
455     inline bool
456     operator==(const __default_alloc_template<__threads,__inst>&,
457                const __default_alloc_template<__threads,__inst>&)
458     { return true; }
459 
460   template<bool __threads, int __inst>
461     inline bool
462     operator!=(const __default_alloc_template<__threads,__inst>&,
463                const __default_alloc_template<__threads,__inst>&)
464     { return false; }
465 
466 
467   // We allocate memory in large chunks in order to avoid fragmenting the
468   // heap too much.  We assume that __size is properly aligned.  We hold
469   // the allocation lock.
470   template<bool __threads, int __inst>
471     char*
472     __default_alloc_template<__threads, __inst>::
_S_chunk_alloc(size_t __size,int & __nobjs)473     _S_chunk_alloc(size_t __size, int& __nobjs)
474     {
475       char* __result;
476       size_t __total_bytes = __size * __nobjs;
477       size_t __bytes_left = _S_end_free - _S_start_free;
478 
479       if (__bytes_left >= __total_bytes)
480         {
481           __result = _S_start_free;
482           _S_start_free += __total_bytes;
483           return __result ;
484         }
485       else if (__bytes_left >= __size)
486         {
487           __nobjs = (int)(__bytes_left/__size);
488           __total_bytes = __size * __nobjs;
489           __result = _S_start_free;
490           _S_start_free += __total_bytes;
491           return __result;
492         }
493       else
494         {
495           size_t __bytes_to_get =
496             2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
497           // Try to make use of the left-over piece.
498           if (__bytes_left > 0)
499             {
500               _Obj* volatile* __my_free_list =
501                 _S_free_list + _S_freelist_index(__bytes_left);
502 
503               ((_Obj*)(void*)_S_start_free) -> _M_free_list_link = *__my_free_list;
504               *__my_free_list = (_Obj*)(void*)_S_start_free;
505             }
506           _S_start_free = (char*) __new_alloc::allocate(__bytes_to_get);
507           if (_S_start_free == 0)
508             {
509               size_t __i;
510               _Obj* volatile* __my_free_list;
511               _Obj* __p;
512               // Try to make do with what we have.  That can't hurt.  We
513               // do not try smaller requests, since that tends to result
514               // in disaster on multi-process machines.
515               __i = __size;
516               for (; __i <= (size_t) _MAX_BYTES; __i += (size_t) _ALIGN)
517                 {
518                   __my_free_list = _S_free_list + _S_freelist_index(__i);
519                   __p = *__my_free_list;
520                   if (__p != 0)
521                     {
522                       *__my_free_list = __p -> _M_free_list_link;
523                       _S_start_free = (char*)__p;
524                       _S_end_free = _S_start_free + __i;
525                       return _S_chunk_alloc(__size, __nobjs);
526                       // Any leftover piece will eventually make it to the
527                       // right free list.
528                     }
529                 }
530               _S_end_free = 0;        // In case of exception.
531               _S_start_free = (char*)__new_alloc::allocate(__bytes_to_get);
532               // This should either throw an exception or remedy the situation.
533               // Thus we assume it succeeded.
534             }
535           _S_heap_size += __bytes_to_get;
536           _S_end_free = _S_start_free + __bytes_to_get;
537           return _S_chunk_alloc(__size, __nobjs);
538         }
539     }
540 
541 
542   // Returns an object of size __n, and optionally adds to "size
543   // __n"'s free list.  We assume that __n is properly aligned.  We
544   // hold the allocation lock.
545   template<bool __threads, int __inst>
546     void*
_S_refill(size_t __n)547     __default_alloc_template<__threads, __inst>::_S_refill(size_t __n)
548     {
549       int __nobjs = 20;
550       char* __chunk = _S_chunk_alloc(__n, __nobjs);
551       _Obj* volatile* __my_free_list;
552       _Obj* __result;
553       _Obj* __current_obj;
554       _Obj* __next_obj;
555       int __i;
556 
557       if (1 == __nobjs)
558         return __chunk;
559       __my_free_list = _S_free_list + _S_freelist_index(__n);
560 
561       // Build free list in chunk.
562       __result = (_Obj*)(void*)__chunk;
563       *__my_free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
564       for (__i = 1; ; __i++)
565         {
566 	  __current_obj = __next_obj;
567           __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
568 	  if (__nobjs - 1 == __i)
569 	    {
570 	      __current_obj -> _M_free_list_link = 0;
571 	      break;
572 	    }
573 	  else
574 	    __current_obj -> _M_free_list_link = __next_obj;
575 	}
576       return __result;
577     }
578 
579 
580   template<bool threads, int inst>
581     void*
582     __default_alloc_template<threads, inst>::
reallocate(void * __p,size_t __old_sz,size_t __new_sz)583     reallocate(void* __p, size_t __old_sz, size_t __new_sz)
584     {
585       void* __result;
586       size_t __copy_sz;
587 
588       if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES)
589         return(realloc(__p, __new_sz));
590       if (_S_round_up(__old_sz) == _S_round_up(__new_sz))
591         return(__p);
592       __result = allocate(__new_sz);
593       __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
594       memcpy(__result, __p, __copy_sz);
595       deallocate(__p, __old_sz);
596       return __result;
597     }
598 
599   template<bool __threads, int __inst>
600     _STL_mutex_lock
601     __default_alloc_template<__threads,__inst>::_S_node_allocator_lock
602     __STL_MUTEX_INITIALIZER;
603 
604   template<bool __threads, int __inst>
605     char* __default_alloc_template<__threads,__inst>::_S_start_free = 0;
606 
607   template<bool __threads, int __inst>
608     char* __default_alloc_template<__threads,__inst>::_S_end_free = 0;
609 
610   template<bool __threads, int __inst>
611     size_t __default_alloc_template<__threads,__inst>::_S_heap_size = 0;
612 
613   template<bool __threads, int __inst>
614     typename __default_alloc_template<__threads,__inst>::_Obj* volatile
615     __default_alloc_template<__threads,__inst>::_S_free_list[_NFREELISTS];
616 
617   typedef __default_alloc_template<true,0>    __alloc;
618   typedef __default_alloc_template<false,0>   __single_client_alloc;
619 
620 
621   /**
622    *  @brief  The "standard" allocator, as per [20.4].
623    *
624    *  The private _Alloc is "SGI" style.  (See comments at the top
625    *  of stl_alloc.h.)
626    *
627    *  The underlying allocator behaves as follows.
628    *    - __default_alloc_template is used via two typedefs
629    *    - "__single_client_alloc" typedef does no locking for threads
630    *    - "__alloc" typedef is threadsafe via the locks
631    *    - __new_alloc is used for memory requests
632    *
633    *  (See @link Allocators allocators info @endlink for more.)
634    */
635   template<typename _Tp>
636     class allocator
637     {
638       typedef __alloc _Alloc;          // The underlying allocator.
639     public:
640       typedef size_t     size_type;
641       typedef ptrdiff_t  difference_type;
642       typedef _Tp*       pointer;
643       typedef const _Tp* const_pointer;
644       typedef _Tp&       reference;
645       typedef const _Tp& const_reference;
646       typedef _Tp        value_type;
647 
648       template<typename _Tp1>
649         struct rebind
650         { typedef allocator<_Tp1> other; };
651 
throw()652       allocator() throw() {}
throw()653       allocator(const allocator&) throw() {}
654       template<typename _Tp1>
allocator(const allocator<_Tp1> &)655         allocator(const allocator<_Tp1>&) throw() {}
throw()656       ~allocator() throw() {}
657 
658       pointer
address(reference __x)659       address(reference __x) const { return &__x; }
660 
661       const_pointer
address(const_reference __x)662       address(const_reference __x) const { return &__x; }
663 
664       // NB: __n is permitted to be 0.  The C++ standard says nothing
665       // about what the return value is when __n == 0.
666       _Tp*
667       allocate(size_type __n, const void* = 0)
668       {
669 	_Tp* __ret = 0;
670 	if (__n)
671 	  {
672 	    if (__n <= this->max_size())
673 	      __ret = static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)));
674 	    else
675 	      __throw_bad_alloc();
676 	  }
677 	return __ret;
678       }
679 
680       // __p is not permitted to be a null pointer.
681       void
deallocate(pointer __p,size_type __n)682       deallocate(pointer __p, size_type __n)
683       { _Alloc::deallocate(__p, __n * sizeof(_Tp)); }
684 
685       size_type
max_size()686       max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
687 
construct(pointer __p,const _Tp & __val)688       void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
destroy(pointer __p)689       void destroy(pointer __p) { __p->~_Tp(); }
690     };
691 
692   template<>
693     class allocator<void>
694     {
695     public:
696       typedef size_t      size_type;
697       typedef ptrdiff_t   difference_type;
698       typedef void*       pointer;
699       typedef const void* const_pointer;
700       typedef void        value_type;
701 
702       template<typename _Tp1>
703         struct rebind
704         { typedef allocator<_Tp1> other; };
705     };
706 
707 
708   template<typename _T1, typename _T2>
709     inline bool
710     operator==(const allocator<_T1>&, const allocator<_T2>&)
711     { return true; }
712 
713   template<typename _T1, typename _T2>
714     inline bool
715     operator!=(const allocator<_T1>&, const allocator<_T2>&)
716     { return false; }
717 
718 
719   /**
720    *  @if maint
721    *  Allocator adaptor to turn an "SGI" style allocator (e.g.,
722    *  __alloc, __malloc_alloc_template) into a "standard" conforming
723    *  allocator.  Note that this adaptor does *not* assume that all
724    *  objects of the underlying alloc class are identical, nor does it
725    *  assume that all of the underlying alloc's member functions are
726    *  static member functions.  Note, also, that __allocator<_Tp,
727    *  __alloc> is essentially the same thing as allocator<_Tp>.
728    *  @endif
729    *  (See @link Allocators allocators info @endlink for more.)
730    */
731   template<typename _Tp, typename _Alloc>
732     struct __allocator
733     {
734       _Alloc __underlying_alloc;
735 
736       typedef size_t    size_type;
737       typedef ptrdiff_t difference_type;
738       typedef _Tp*       pointer;
739       typedef const _Tp* const_pointer;
740       typedef _Tp&       reference;
741       typedef const _Tp& const_reference;
742       typedef _Tp        value_type;
743 
744       template<typename _Tp1>
745         struct rebind
746         { typedef __allocator<_Tp1, _Alloc> other; };
747 
throw__allocator748       __allocator() throw() {}
throw__allocator749       __allocator(const __allocator& __a) throw()
750       : __underlying_alloc(__a.__underlying_alloc) {}
751 
752       template<typename _Tp1>
__allocator__allocator753         __allocator(const __allocator<_Tp1, _Alloc>& __a) throw()
754         : __underlying_alloc(__a.__underlying_alloc) {}
755 
throw__allocator756       ~__allocator() throw() {}
757 
758       pointer
address__allocator759       address(reference __x) const { return &__x; }
760 
761       const_pointer
address__allocator762       address(const_reference __x) const { return &__x; }
763 
764       // NB: __n is permitted to be 0.  The C++ standard says nothing
765       // about what the return value is when __n == 0.
766       _Tp*
767       allocate(size_type __n, const void* = 0)
768       {
769 	_Tp* __ret = 0;
770 	if (__n)
771 	  __ret = static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)));
772 	return __ret;
773       }
774 
775       // __p is not permitted to be a null pointer.
776       void
deallocate__allocator777       deallocate(pointer __p, size_type __n)
778       { __underlying_alloc.deallocate(__p, __n * sizeof(_Tp)); }
779 
780       size_type
max_size__allocator781       max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
782 
783       void
construct__allocator784       construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
785 
786       void
destroy__allocator787       destroy(pointer __p) { __p->~_Tp(); }
788     };
789 
790   template<typename _Alloc>
791     struct __allocator<void, _Alloc>
792     {
793       typedef size_t      size_type;
794       typedef ptrdiff_t   difference_type;
795       typedef void*       pointer;
796       typedef const void* const_pointer;
797       typedef void        value_type;
798 
799       template<typename _Tp1>
800         struct rebind
801         { typedef __allocator<_Tp1, _Alloc> other; };
802     };
803 
804   template<typename _Tp, typename _Alloc>
805     inline bool
806     operator==(const __allocator<_Tp,_Alloc>& __a1,
807                const __allocator<_Tp,_Alloc>& __a2)
808     { return __a1.__underlying_alloc == __a2.__underlying_alloc; }
809 
810   template<typename _Tp, typename _Alloc>
811     inline bool
812     operator!=(const __allocator<_Tp, _Alloc>& __a1,
813                const __allocator<_Tp, _Alloc>& __a2)
814     { return __a1.__underlying_alloc != __a2.__underlying_alloc; }
815 
816 
817   //@{
818   /** Comparison operators for all of the predifined SGI-style allocators.
819    *  This ensures that __allocator<malloc_alloc> (for example) will work
820    *  correctly.  As required, all allocators compare equal.
821    */
822   template<int inst>
823     inline bool
824     operator==(const __malloc_alloc_template<inst>&,
825                const __malloc_alloc_template<inst>&)
826     { return true; }
827 
828   template<int __inst>
829     inline bool
830     operator!=(const __malloc_alloc_template<__inst>&,
831                const __malloc_alloc_template<__inst>&)
832     { return false; }
833 
834   template<typename _Alloc>
835     inline bool
836     operator==(const __debug_alloc<_Alloc>&, const __debug_alloc<_Alloc>&)
837     { return true; }
838 
839   template<typename _Alloc>
840     inline bool
841     operator!=(const __debug_alloc<_Alloc>&, const __debug_alloc<_Alloc>&)
842     { return false; }
843   //@}
844 
845 
846   /**
847    *  @if maint
848    *  Another allocator adaptor:  _Alloc_traits.  This serves two purposes.
849    *  First, make it possible to write containers that can use either "SGI"
850    *  style allocators or "standard" allocators.  Second, provide a mechanism
851    *  so that containers can query whether or not the allocator has distinct
852    *  instances.  If not, the container can avoid wasting a word of memory to
853    *  store an empty object.  For examples of use, see stl_vector.h, etc, or
854    *  any of the other classes derived from this one.
855    *
856    *  This adaptor uses partial specialization.  The general case of
857    *  _Alloc_traits<_Tp, _Alloc> assumes that _Alloc is a
858    *  standard-conforming allocator, possibly with non-equal instances and
859    *  non-static members.  (It still behaves correctly even if _Alloc has
860    *  static member and if all instances are equal.  Refinements affect
861    *  performance, not correctness.)
862    *
863    *  There are always two members:  allocator_type, which is a standard-
864    *  conforming allocator type for allocating objects of type _Tp, and
865    *  _S_instanceless, a static const member of type bool.  If
866    *  _S_instanceless is true, this means that there is no difference
867    *  between any two instances of type allocator_type.  Furthermore, if
868    *  _S_instanceless is true, then _Alloc_traits has one additional
869    *  member:  _Alloc_type.  This type encapsulates allocation and
870    *  deallocation of objects of type _Tp through a static interface; it
871    *  has two member functions, whose signatures are
872    *
873    *  -  static _Tp* allocate(size_t)
874    *  -  static void deallocate(_Tp*, size_t)
875    *
876    *  The size_t parameters are "standard" style (see top of stl_alloc.h) in
877    *  that they take counts, not sizes.
878    *
879    *  @endif
880    *  (See @link Allocators allocators info @endlink for more.)
881    */
882   //@{
883   // The fully general version.
884   template<typename _Tp, typename _Allocator>
885     struct _Alloc_traits
886     {
887       static const bool _S_instanceless = false;
888       typedef typename _Allocator::template rebind<_Tp>::other allocator_type;
889     };
890 
891   template<typename _Tp, typename _Allocator>
892     const bool _Alloc_traits<_Tp, _Allocator>::_S_instanceless;
893 
894   /// The version for the default allocator.
895   template<typename _Tp, typename _Tp1>
896     struct _Alloc_traits<_Tp, allocator<_Tp1> >
897     {
898       static const bool _S_instanceless = true;
899       typedef __simple_alloc<_Tp, __alloc> _Alloc_type;
900       typedef allocator<_Tp> allocator_type;
901     };
902   //@}
903 
904   //@{
905   /// Versions for the predefined "SGI" style allocators.
906   template<typename _Tp, int __inst>
907     struct _Alloc_traits<_Tp, __malloc_alloc_template<__inst> >
908     {
909       static const bool _S_instanceless = true;
910       typedef __simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
911       typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
912     };
913 
914   template<typename _Tp, bool __threads, int __inst>
915     struct _Alloc_traits<_Tp, __default_alloc_template<__threads, __inst> >
916     {
917       static const bool _S_instanceless = true;
918       typedef __simple_alloc<_Tp, __default_alloc_template<__threads, __inst> >
919       _Alloc_type;
920       typedef __allocator<_Tp, __default_alloc_template<__threads, __inst> >
921       allocator_type;
922     };
923 
924   template<typename _Tp, typename _Alloc>
925     struct _Alloc_traits<_Tp, __debug_alloc<_Alloc> >
926     {
927       static const bool _S_instanceless = true;
928       typedef __simple_alloc<_Tp, __debug_alloc<_Alloc> > _Alloc_type;
929       typedef __allocator<_Tp, __debug_alloc<_Alloc> > allocator_type;
930     };
931   //@}
932 
933   //@{
934   /// Versions for the __allocator adaptor used with the predefined
935   /// "SGI" style allocators.
936   template<typename _Tp, typename _Tp1, int __inst>
937     struct _Alloc_traits<_Tp,
938                          __allocator<_Tp1, __malloc_alloc_template<__inst> > >
939     {
940       static const bool _S_instanceless = true;
941       typedef __simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
942       typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
943     };
944 
945   template<typename _Tp, typename _Tp1, bool __thr, int __inst>
946     struct _Alloc_traits<_Tp, __allocator<_Tp1, __default_alloc_template<__thr, __inst> > >
947     {
948       static const bool _S_instanceless = true;
949       typedef __simple_alloc<_Tp, __default_alloc_template<__thr,__inst> >
950       _Alloc_type;
951       typedef __allocator<_Tp, __default_alloc_template<__thr,__inst> >
952       allocator_type;
953     };
954 
955   template<typename _Tp, typename _Tp1, typename _Alloc>
956     struct _Alloc_traits<_Tp, __allocator<_Tp1, __debug_alloc<_Alloc> > >
957     {
958       static const bool _S_instanceless = true;
959       typedef __simple_alloc<_Tp, __debug_alloc<_Alloc> > _Alloc_type;
960       typedef __allocator<_Tp, __debug_alloc<_Alloc> > allocator_type;
961     };
962   //@}
963 
964   // Inhibit implicit instantiations for required instantiations,
965   // which are defined via explicit instantiations elsewhere.
966   // NB: This syntax is a GNU extension.
967 #if _GLIBCPP_EXTERN_TEMPLATE
968   extern template class allocator<char>;
969   extern template class allocator<wchar_t>;
970   extern template class __default_alloc_template<true,0>;
971 #endif
972 } // namespace std
973 
974 #endif
975