1 /*
2   $Id: malloc-private.h,v 1.4 2006/03/31 12:56:52 wg Exp $
3   Private header file for ptmalloc3, created by Wolfram Gloger
4   and released to the public domain, as explained at
5   http://creativecommons.org/licenses/publicdomain.
6 */
7 
8 /* The following file is replicated from malloc.c */
9 
10 #ifndef MALLOC_PRIVATE_H
11 #define MALLOC_PRIVATE_H
12 
13 #ifndef MALLOC_ALIGNMENT
14 # define MALLOC_ALIGNMENT  ((size_t)8U)
15 #endif
16 #ifndef USE_LOCKS
17 # define USE_LOCKS 0
18 #endif
19 
20 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
21 #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
22 
23 /* the number of bytes to offset an address to align it */
24 #define align_offset(A)\
25  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
26   ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
27 
28 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
29 #define MAP_ANONYMOUS        MAP_ANON
30 #endif /* MAP_ANON */
31 #ifdef MAP_ANONYMOUS
32 #define MMAP_FLAGS        (MAP_PRIVATE|MAP_ANONYMOUS)
33 #define CALL_MMAP(s)      mmap(0, (s), PROT_READ|PROT_WRITE, MMAP_FLAGS, -1, 0)
34 #else /* MAP_ANONYMOUS */
35 /*
36    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
37    is unlikely to be needed, but is supplied just in case.
38 */
39 #include <fcntl.h> /* for O_RDWR */
40 #define MMAP_FLAGS           (MAP_PRIVATE)
41 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
42 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
43            (dev_zero_fd = open("/dev/zero", O_RDWR), \
44             mmap(0, (s), PROT_READ|PROT_WRITE, MMAP_FLAGS, dev_zero_fd, 0)) : \
45             mmap(0, (s), PROT_READ|PROT_WRITE, MMAP_FLAGS, dev_zero_fd, 0))
46 #endif /* MAP_ANONYMOUS */
47 #define CALL_MUNMAP(a, s) munmap((a), (s))
48 
49 struct malloc_chunk {
50   size_t               prev_foot;  /* Size of previous chunk (if free).  */
51   size_t               head;       /* Size and inuse bits. */
52   struct malloc_chunk* fd;         /* double links -- used only if free. */
53   struct malloc_chunk* bk;
54 };
55 
56 typedef struct malloc_chunk  mchunk;
57 typedef struct malloc_chunk* mchunkptr;
58 
59 typedef unsigned int binmap_t;
60 typedef unsigned int flag_t;
61 
62 struct malloc_tree_chunk;
63 typedef struct malloc_tree_chunk* tbinptr;
64 
65 struct malloc_segment {
66   char*        base;             /* base address */
67   size_t       size;             /* allocated size */
68   struct malloc_segment* next;   /* ptr to next segment */
69   flag_t       sflags;           /* mmap and extern flag */
70 };
71 
72 typedef struct malloc_segment  msegment;
73 
74 #define NSMALLBINS        (32U)
75 #define NTREEBINS         (32U)
76 
77 struct malloc_state {
78   binmap_t   smallmap;
79   binmap_t   treemap;
80   size_t     dvsize;
81   size_t     topsize;
82   char*      least_addr;
83   mchunkptr  dv;
84   mchunkptr  top;
85   size_t     trim_check;
86   size_t     release_checks;
87   size_t     magic;
88   mchunkptr  smallbins[(NSMALLBINS+1)*2];
89   tbinptr    treebins[NTREEBINS];
90   size_t     footprint;
91   size_t     max_footprint;
92   flag_t     mflags;
93 #if USE_LOCKS
94   MLOCK_T    mutex;
95 #endif /* USE_LOCKS */
96   msegment   seg;
97   void*      extp;
98   size_t     exts;
99 };
100 
101 /*
102   TOP_FOOT_SIZE is padding at the end of a segment, including space
103   that may be needed to place segment records and fenceposts when new
104   noncontiguous segments are added.
105 */
106 #define TOP_FOOT_SIZE\
107   (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
108 
109 /* ------------------- Chunks sizes and alignments ----------------------- */
110 
111 #define MCHUNK_SIZE         (sizeof(mchunk))
112 
113 #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
114 
115 /* MMapped chunks need a second word of overhead ... */
116 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
117 /* ... and additional padding for fake next-chunk at foot */
118 #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
119 
120 /* The smallest size we can malloc is an aligned minimal chunk */
121 #define MIN_CHUNK_SIZE\
122   ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
123 
124 /* conversion from malloc headers to user pointers, and back */
125 #define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
126 #define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
127 /* chunk associated with aligned address A */
128 #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
129 
130 /* pad request bytes into a usable size */
131 #define pad_request(req) \
132    (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
133 
134 /* The byte and bit size of a size_t */
135 #define SIZE_T_SIZE         (sizeof(size_t))
136 #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
137 
138 /* Some constants coerced to size_t */
139 /* Annoying but necessary to avoid errors on some platforms */
140 #define SIZE_T_ZERO         ((size_t)0)
141 #define SIZE_T_ONE          ((size_t)1)
142 #define SIZE_T_TWO          ((size_t)2)
143 #define SIZE_T_FOUR         ((size_t)4)
144 #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
145 #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
146 #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
147 #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
148 
149 #define IS_MMAPPED_BIT      (SIZE_T_ONE)
150 #define PINUSE_BIT          (SIZE_T_ONE)
151 #define CINUSE_BIT          (SIZE_T_TWO)
152 #define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|SIZE_T_FOUR)
153 
154 /* head field is or'ed with NON_MAIN_ARENA if the chunk was obtained
155    from a non-main arena.  This is only set immediately before handing
156    the chunk to the user, if necessary.  */
157 #define NON_MAIN_ARENA      (SIZE_T_FOUR)
158 
159 #define cinuse(p)           ((p)->head & CINUSE_BIT)
160 #define pinuse(p)           ((p)->head & PINUSE_BIT)
161 #define chunksize(p)        ((p)->head & ~(FLAG_BITS))
162 
163 #define is_mmapped(p)\
164   (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
165 
166 /* Get the internal overhead associated with chunk p */
167 #define overhead_for(p)\
168  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
169 
170 #endif /* MALLOC_PRIVATE_H */
171