1 /* mmap.c -- Memory allocation with mmap.
2    Copyright (C) 2012-2018 Free Software Foundation, Inc.
3    Written by Ian Lance Taylor, Google.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are
7 met:
8 
9     (1) Redistributions of source code must retain the above copyright
10     notice, this list of conditions and the following disclaimer.
11 
12     (2) Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in
14     the documentation and/or other materials provided with the
15     distribution.
16 
17     (3) The name of the author may not be used to
18     endorse or promote products derived from this software without
19     specific prior written permission.
20 
21 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25 INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.  */
32 
33 #include "config.h"
34 
35 #include <errno.h>
36 #include <string.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 #include <sys/types.h>
40 #include <sys/mman.h>
41 
42 #include "backtrace.h"
43 #include "internal.h"
44 
45 /* Memory allocation on systems that provide anonymous mmap.  This
46    permits the backtrace functions to be invoked from a signal
47    handler, assuming that mmap is async-signal safe.  */
48 
49 #ifndef MAP_ANONYMOUS
50 #define MAP_ANONYMOUS MAP_ANON
51 #endif
52 
53 #ifndef MAP_FAILED
54 #define MAP_FAILED ((void *)-1)
55 #endif
56 
57 /* A list of free memory blocks.  */
58 
59 struct backtrace_freelist_struct
60 {
61   /* Next on list.  */
62   struct backtrace_freelist_struct *next;
63   /* Size of this block, including this structure.  */
64   size_t size;
65 };
66 
67 /* Free memory allocated by backtrace_alloc.  */
68 
69 static void
backtrace_free_locked(struct backtrace_state * state,void * addr,size_t size)70 backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
71 {
72   /* Just leak small blocks.  We don't have to be perfect.  Don't put
73      more than 16 entries on the free list, to avoid wasting time
74      searching when allocating a block.  If we have more than 16
75      entries, leak the smallest entry.  */
76 
77   if (size >= sizeof (struct backtrace_freelist_struct))
78     {
79       size_t c;
80       struct backtrace_freelist_struct **ppsmall;
81       struct backtrace_freelist_struct **pp;
82       struct backtrace_freelist_struct *p;
83 
84       c = 0;
85       ppsmall = NULL;
86       for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
87 	{
88 	  if (ppsmall == NULL || (*pp)->size < (*ppsmall)->size)
89 	    ppsmall = pp;
90 	  ++c;
91 	}
92       if (c >= 16)
93 	{
94 	  if (size <= (*ppsmall)->size)
95 	    return;
96 	  *ppsmall = (*ppsmall)->next;
97 	}
98 
99       p = (struct backtrace_freelist_struct *) addr;
100       p->next = state->freelist;
101       p->size = size;
102       state->freelist = p;
103     }
104 }
105 
106 /* Allocate memory like malloc.  If ERROR_CALLBACK is NULL, don't
107    report an error.  */
108 
109 void *
backtrace_alloc(struct backtrace_state * state,size_t size,backtrace_error_callback error_callback,void * data)110 backtrace_alloc (struct backtrace_state *state,
111 		 size_t size, backtrace_error_callback error_callback,
112 		 void *data)
113 {
114   void *ret;
115   int locked;
116   struct backtrace_freelist_struct **pp;
117   size_t pagesize;
118   size_t asksize;
119   void *page;
120 
121   ret = NULL;
122 
123   /* If we can acquire the lock, then see if there is space on the
124      free list.  If we can't acquire the lock, drop straight into
125      using mmap.  __sync_lock_test_and_set returns the old state of
126      the lock, so we have acquired it if it returns 0.  */
127 
128   if (!state->threaded)
129     locked = 1;
130   else
131     locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
132 
133   if (locked)
134     {
135       for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
136 	{
137 	  if ((*pp)->size >= size)
138 	    {
139 	      struct backtrace_freelist_struct *p;
140 
141 	      p = *pp;
142 	      *pp = p->next;
143 
144 	      /* Round for alignment; we assume that no type we care about
145 		 is more than 8 bytes.  */
146 	      size = (size + 7) & ~ (size_t) 7;
147 	      if (size < p->size)
148 		backtrace_free_locked (state, (char *) p + size,
149 				       p->size - size);
150 
151 	      ret = (void *) p;
152 
153 	      break;
154 	    }
155 	}
156 
157       if (state->threaded)
158 	__sync_lock_release (&state->lock_alloc);
159     }
160 
161   if (ret == NULL)
162     {
163       /* Allocate a new page.  */
164 
165       pagesize = getpagesize ();
166       asksize = (size + pagesize - 1) & ~ (pagesize - 1);
167       page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
168 		   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
169       if (page == MAP_FAILED)
170 	{
171 	  if (error_callback)
172 	    error_callback (data, "mmap", errno);
173 	}
174       else
175 	{
176 	  size = (size + 7) & ~ (size_t) 7;
177 	  if (size < asksize)
178 	    backtrace_free (state, (char *) page + size, asksize - size,
179 			    error_callback, data);
180 
181 	  ret = page;
182 	}
183     }
184 
185   return ret;
186 }
187 
188 /* Free memory allocated by backtrace_alloc.  */
189 
190 void
backtrace_free(struct backtrace_state * state,void * addr,size_t size,backtrace_error_callback error_callback ATTRIBUTE_UNUSED,void * data ATTRIBUTE_UNUSED)191 backtrace_free (struct backtrace_state *state, void *addr, size_t size,
192 		backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
193 		void *data ATTRIBUTE_UNUSED)
194 {
195   int locked;
196 
197   /* If we are freeing a large aligned block, just release it back to
198      the system.  This case arises when growing a vector for a large
199      binary with lots of debug info.  Calling munmap here may cause us
200      to call mmap again if there is also a large shared library; we
201      just live with that.  */
202   if (size >= 16 * 4096)
203     {
204       size_t pagesize;
205 
206       pagesize = getpagesize ();
207       if (((uintptr_t) addr & (pagesize - 1)) == 0
208 	  && (size & (pagesize - 1)) == 0)
209 	{
210 	  /* If munmap fails for some reason, just add the block to
211 	     the freelist.  */
212 	  if (munmap (addr, size) == 0)
213 	    return;
214 	}
215     }
216 
217   /* If we can acquire the lock, add the new space to the free list.
218      If we can't acquire the lock, just leak the memory.
219      __sync_lock_test_and_set returns the old state of the lock, so we
220      have acquired it if it returns 0.  */
221 
222   if (!state->threaded)
223     locked = 1;
224   else
225     locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
226 
227   if (locked)
228     {
229       backtrace_free_locked (state, addr, size);
230 
231       if (state->threaded)
232 	__sync_lock_release (&state->lock_alloc);
233     }
234 }
235 
236 /* Grow VEC by SIZE bytes.  */
237 
238 void *
backtrace_vector_grow(struct backtrace_state * state,size_t size,backtrace_error_callback error_callback,void * data,struct backtrace_vector * vec)239 backtrace_vector_grow (struct backtrace_state *state,size_t size,
240 		       backtrace_error_callback error_callback,
241 		       void *data, struct backtrace_vector *vec)
242 {
243   void *ret;
244 
245   if (size > vec->alc)
246     {
247       size_t pagesize;
248       size_t alc;
249       void *base;
250 
251       pagesize = getpagesize ();
252       alc = vec->size + size;
253       if (vec->size == 0)
254 	alc = 16 * size;
255       else if (alc < pagesize)
256 	{
257 	  alc *= 2;
258 	  if (alc > pagesize)
259 	    alc = pagesize;
260 	}
261       else
262 	{
263 	  alc *= 2;
264 	  alc = (alc + pagesize - 1) & ~ (pagesize - 1);
265 	}
266       base = backtrace_alloc (state, alc, error_callback, data);
267       if (base == NULL)
268 	return NULL;
269       if (vec->base != NULL)
270 	{
271 	  memcpy (base, vec->base, vec->size);
272 	  backtrace_free (state, vec->base, vec->size + vec->alc,
273 			  error_callback, data);
274 	}
275       vec->base = base;
276       vec->alc = alc - vec->size;
277     }
278 
279   ret = (char *) vec->base + vec->size;
280   vec->size += size;
281   vec->alc -= size;
282   return ret;
283 }
284 
285 /* Finish the current allocation on VEC.  */
286 
287 void *
backtrace_vector_finish(struct backtrace_state * state ATTRIBUTE_UNUSED,struct backtrace_vector * vec,backtrace_error_callback error_callback ATTRIBUTE_UNUSED,void * data ATTRIBUTE_UNUSED)288 backtrace_vector_finish (
289   struct backtrace_state *state ATTRIBUTE_UNUSED,
290   struct backtrace_vector *vec,
291   backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
292   void *data ATTRIBUTE_UNUSED)
293 {
294   void *ret;
295 
296   ret = vec->base;
297   vec->base = (char *) vec->base + vec->size;
298   vec->size = 0;
299   return ret;
300 }
301 
302 /* Release any extra space allocated for VEC.  */
303 
304 int
backtrace_vector_release(struct backtrace_state * state,struct backtrace_vector * vec,backtrace_error_callback error_callback,void * data)305 backtrace_vector_release (struct backtrace_state *state,
306 			  struct backtrace_vector *vec,
307 			  backtrace_error_callback error_callback,
308 			  void *data)
309 {
310   size_t size;
311   size_t alc;
312   size_t aligned;
313 
314   /* Make sure that the block that we free is aligned on an 8-byte
315      boundary.  */
316   size = vec->size;
317   alc = vec->alc;
318   aligned = (size + 7) & ~ (size_t) 7;
319   alc -= aligned - size;
320 
321   backtrace_free (state, (char *) vec->base + aligned, alc,
322 		  error_callback, data);
323   vec->alc = 0;
324   return 1;
325 }
326