1 /*
2    Samba Unix SMB/CIFS implementation.
3 
4    Samba trivial allocation library - new interface
5 
6    NOTE: Please read talloc_guide.txt for full documentation
7 
8    Copyright (C) Andrew Tridgell 2004
9    Copyright (C) Stefan Metzmacher 2006
10 
11      ** NOTE! The following LGPL license applies to the talloc
12      ** library. This does NOT imply that all of Samba is released
13      ** under the LGPL
14 
15    This library is free software; you can redistribute it and/or
16    modify it under the terms of the GNU Lesser General Public
17    License as published by the Free Software Foundation; either
18    version 3 of the License, or (at your option) any later version.
19 
20    This library is distributed in the hope that it will be useful,
21    but WITHOUT ANY WARRANTY; without even the implied warranty of
22    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23    Lesser General Public License for more details.
24 
25    You should have received a copy of the GNU Lesser General Public
26    License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 */
28 
29 /*
30   inspired by http://swapped.cc/halloc/
31 */
32 
33 #include "replace.h"
34 #include "talloc.h"
35 
36 #ifdef HAVE_SYS_AUXV_H
37 #include <sys/auxv.h>
38 #endif
39 
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
42 #endif
43 
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
46 #endif
47 
48 /* Special macros that are no-ops except when run under Valgrind on
49  * x86.  They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51         /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
54 #include <valgrind.h>
55 #endif
56 
57 /* use this to force every realloc to change the pointer, to stress test
58    code that might not cope */
59 #define ALWAYS_REALLOC 0
60 
61 
62 #define MAX_TALLOC_SIZE 0x10000000
63 
64 #define TALLOC_FLAG_FREE 0x01
65 #define TALLOC_FLAG_LOOP 0x02
66 #define TALLOC_FLAG_POOL 0x04		/* This is a talloc pool */
67 #define TALLOC_FLAG_POOLMEM 0x08	/* This is allocated in a pool */
68 
69 /*
70  * Bits above this are random, used to make it harder to fake talloc
71  * headers during an attack.  Try not to change this without good reason.
72  */
73 #define TALLOC_FLAG_MASK 0x0F
74 
75 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
76 
77 #define TALLOC_MAGIC_BASE 0xe814ec70
78 #define TALLOC_MAGIC_NON_RANDOM ( \
79 	~TALLOC_FLAG_MASK & ( \
80 		TALLOC_MAGIC_BASE + \
81 		(TALLOC_BUILD_VERSION_MAJOR << 24) + \
82 		(TALLOC_BUILD_VERSION_MINOR << 16) + \
83 		(TALLOC_BUILD_VERSION_RELEASE << 8)))
84 static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM;
85 
86 /* by default we abort when given a bad pointer (such as when talloc_free() is called
87    on a pointer that came from malloc() */
88 #ifndef TALLOC_ABORT
89 #define TALLOC_ABORT(reason) abort()
90 #endif
91 
92 #ifndef discard_const_p
93 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
94 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
95 #else
96 # define discard_const_p(type, ptr) ((type *)(ptr))
97 #endif
98 #endif
99 
100 /* these macros gain us a few percent of speed on gcc */
101 #if (__GNUC__ >= 3)
102 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
103    as its first argument */
104 #ifndef likely
105 #define likely(x)   __builtin_expect(!!(x), 1)
106 #endif
107 #ifndef unlikely
108 #define unlikely(x) __builtin_expect(!!(x), 0)
109 #endif
110 #else
111 #ifndef likely
112 #define likely(x) (x)
113 #endif
114 #ifndef unlikely
115 #define unlikely(x) (x)
116 #endif
117 #endif
118 
119 /* this null_context is only used if talloc_enable_leak_report() or
120    talloc_enable_leak_report_full() is called, otherwise it remains
121    NULL
122 */
123 static void *null_context;
124 static bool talloc_report_null;
125 static bool talloc_report_null_full;
126 static void *autofree_context;
127 
128 static void talloc_setup_atexit(void);
129 
130 /* used to enable fill of memory on free, which can be useful for
131  * catching use after free errors when valgrind is too slow
132  */
133 static struct {
134 	bool initialised;
135 	bool enabled;
136 	uint8_t fill_value;
137 } talloc_fill;
138 
139 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
140 
141 /*
142  * do not wipe the header, to allow the
143  * double-free logic to still work
144  */
145 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
146 	if (unlikely(talloc_fill.enabled)) { \
147 		size_t _flen = (_tc)->size; \
148 		char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
149 		memset(_fptr, talloc_fill.fill_value, _flen); \
150 	} \
151 } while (0)
152 
153 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
154 /* Mark the whole chunk as not accessable */
155 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
156 	size_t _flen = TC_HDR_SIZE + (_tc)->size; \
157 	char *_fptr = (char *)(_tc); \
158 	VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
159 } while(0)
160 #else
161 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
162 #endif
163 
164 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
165 	TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
166 	TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
167 } while (0)
168 
169 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
170 	if (unlikely(talloc_fill.enabled)) { \
171 		size_t _flen = (_tc)->size - (_new_size); \
172 		char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
173 		_fptr += (_new_size); \
174 		memset(_fptr, talloc_fill.fill_value, _flen); \
175 	} \
176 } while (0)
177 
178 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
179 /* Mark the unused bytes not accessable */
180 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
181 	size_t _flen = (_tc)->size - (_new_size); \
182 	char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
183 	_fptr += (_new_size); \
184 	VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
185 } while (0)
186 #else
187 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
188 #endif
189 
190 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
191 	TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
192 	TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
193 } while (0)
194 
195 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
196 	if (unlikely(talloc_fill.enabled)) { \
197 		size_t _flen = (_tc)->size - (_new_size); \
198 		char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
199 		_fptr += (_new_size); \
200 		memset(_fptr, talloc_fill.fill_value, _flen); \
201 	} \
202 } while (0)
203 
204 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
205 /* Mark the unused bytes as undefined */
206 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
207 	size_t _flen = (_tc)->size - (_new_size); \
208 	char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
209 	_fptr += (_new_size); \
210 	VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
211 } while (0)
212 #else
213 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
214 #endif
215 
216 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
217 	TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
218 	TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
219 } while (0)
220 
221 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
222 /* Mark the new bytes as undefined */
223 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
224 	size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
225 	size_t _new_used = TC_HDR_SIZE + (_new_size); \
226 	size_t _flen = _new_used - _old_used; \
227 	char *_fptr = _old_used + (char *)(_tc); \
228 	VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
229 } while (0)
230 #else
231 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
232 #endif
233 
234 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
235 	TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
236 } while (0)
237 
238 struct talloc_reference_handle {
239 	struct talloc_reference_handle *next, *prev;
240 	void *ptr;
241 	const char *location;
242 };
243 
244 struct talloc_memlimit {
245 	struct talloc_chunk *parent;
246 	struct talloc_memlimit *upper;
247 	size_t max_size;
248 	size_t cur_size;
249 };
250 
251 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
252 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
253 				size_t size);
254 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
255 				size_t size);
256 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
257 
258 static inline void _tc_set_name_const(struct talloc_chunk *tc,
259 				const char *name);
260 static struct talloc_chunk *_vasprintf_tc(const void *t,
261 				const char *fmt,
262 				va_list ap);
263 
264 typedef int (*talloc_destructor_t)(void *);
265 
266 struct talloc_pool_hdr;
267 
268 struct talloc_chunk {
269 	/*
270 	 * flags includes the talloc magic, which is randomised to
271 	 * make overwrite attacks harder
272 	 */
273 	unsigned flags;
274 
275 	/*
276 	 * If you have a logical tree like:
277 	 *
278 	 *           <parent>
279 	 *           /   |   \
280 	 *          /    |    \
281 	 *         /     |     \
282 	 * <child 1> <child 2> <child 3>
283 	 *
284 	 * The actual talloc tree is:
285 	 *
286 	 *  <parent>
287 	 *     |
288 	 *  <child 1> - <child 2> - <child 3>
289 	 *
290 	 * The children are linked with next/prev pointers, and
291 	 * child 1 is linked to the parent with parent/child
292 	 * pointers.
293 	 */
294 
295 	struct talloc_chunk *next, *prev;
296 	struct talloc_chunk *parent, *child;
297 	struct talloc_reference_handle *refs;
298 	talloc_destructor_t destructor;
299 	const char *name;
300 	size_t size;
301 
302 	/*
303 	 * limit semantics:
304 	 * if 'limit' is set it means all *new* children of the context will
305 	 * be limited to a total aggregate size ox max_size for memory
306 	 * allocations.
307 	 * cur_size is used to keep track of the current use
308 	 */
309 	struct talloc_memlimit *limit;
310 
311 	/*
312 	 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
313 	 * is a pointer to the struct talloc_chunk of the pool that it was
314 	 * allocated from. This way children can quickly find the pool to chew
315 	 * from.
316 	 */
317 	struct talloc_pool_hdr *pool;
318 };
319 
320 /* 16 byte alignment seems to keep everyone happy */
321 #define TC_ALIGN16(s) (((s)+15)&~15)
322 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
323 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
324 
talloc_version_major(void)325 _PUBLIC_ int talloc_version_major(void)
326 {
327 	return TALLOC_VERSION_MAJOR;
328 }
329 
talloc_version_minor(void)330 _PUBLIC_ int talloc_version_minor(void)
331 {
332 	return TALLOC_VERSION_MINOR;
333 }
334 
talloc_test_get_magic(void)335 _PUBLIC_ int talloc_test_get_magic(void)
336 {
337 	return talloc_magic;
338 }
339 
_talloc_chunk_set_free(struct talloc_chunk * tc,const char * location)340 static inline void _talloc_chunk_set_free(struct talloc_chunk *tc,
341 			      const char *location)
342 {
343 	/*
344 	 * Mark this memory as free, and also over-stamp the talloc
345 	 * magic with the old-style magic.
346 	 *
347 	 * Why?  This tries to avoid a memory read use-after-free from
348 	 * disclosing our talloc magic, which would then allow an
349 	 * attacker to prepare a valid header and so run a destructor.
350 	 *
351 	 */
352 	tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE
353 		| (tc->flags & TALLOC_FLAG_MASK);
354 
355 	/* we mark the freed memory with where we called the free
356 	 * from. This means on a double free error we can report where
357 	 * the first free came from
358 	 */
359 	if (location) {
360 		tc->name = location;
361 	}
362 }
363 
_talloc_chunk_set_not_free(struct talloc_chunk * tc)364 static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc)
365 {
366 	/*
367 	 * Mark this memory as not free.
368 	 *
369 	 * Why? This is memory either in a pool (and so available for
370 	 * talloc's re-use or after the realloc().  We need to mark
371 	 * the memory as free() before any realloc() call as we can't
372 	 * write to the memory after that.
373 	 *
374 	 * We put back the normal magic instead of the 'not random'
375 	 * magic.
376 	 */
377 
378 	tc->flags = talloc_magic |
379 		((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE);
380 }
381 
382 static void (*talloc_log_fn)(const char *message);
383 
talloc_set_log_fn(void (* log_fn)(const char * message))384 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
385 {
386 	talloc_log_fn = log_fn;
387 }
388 
389 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
390 void talloc_lib_init(void) __attribute__((constructor));
talloc_lib_init(void)391 void talloc_lib_init(void)
392 {
393 	uint32_t random_value;
394 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
395 	uint8_t *p;
396 	/*
397 	 * Use the kernel-provided random values used for
398 	 * ASLR.  This won't change per-exec, which is ideal for us
399 	 */
400 	p = (uint8_t *) getauxval(AT_RANDOM);
401 	if (p) {
402 		/*
403 		 * We get 16 bytes from getauxval.  By calling rand(),
404 		 * a totally insecure PRNG, but one that will
405 		 * deterministically have a different value when called
406 		 * twice, we ensure that if two talloc-like libraries
407 		 * are somehow loaded in the same address space, that
408 		 * because we choose different bytes, we will keep the
409 		 * protection against collision of multiple talloc
410 		 * libs.
411 		 *
412 		 * This protection is important because the effects of
413 		 * passing a talloc pointer from one to the other may
414 		 * be very hard to determine.
415 		 */
416 		int offset = rand() % (16 - sizeof(random_value));
417 		memcpy(&random_value, p + offset, sizeof(random_value));
418 	} else
419 #endif
420 	{
421 		/*
422 		 * Otherwise, hope the location we are loaded in
423 		 * memory is randomised by someone else
424 		 */
425 		random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
426 	}
427 	talloc_magic = random_value & ~TALLOC_FLAG_MASK;
428 }
429 #else
430 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
431 #endif
432 
talloc_lib_atexit(void)433 static void talloc_lib_atexit(void)
434 {
435 	TALLOC_FREE(autofree_context);
436 
437 	if (talloc_total_size(null_context) == 0) {
438 		return;
439 	}
440 
441 	if (talloc_report_null_full) {
442 		talloc_report_full(null_context, stderr);
443 	} else if (talloc_report_null) {
444 		talloc_report(null_context, stderr);
445 	}
446 }
447 
talloc_setup_atexit(void)448 static void talloc_setup_atexit(void)
449 {
450 	static bool done;
451 
452 	if (done) {
453 		return;
454 	}
455 
456 	atexit(talloc_lib_atexit);
457 	done = true;
458 }
459 
460 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
talloc_log(const char * fmt,...)461 static void talloc_log(const char *fmt, ...)
462 {
463 	va_list ap;
464 	char *message;
465 
466 	if (!talloc_log_fn) {
467 		return;
468 	}
469 
470 	va_start(ap, fmt);
471 	message = talloc_vasprintf(NULL, fmt, ap);
472 	va_end(ap);
473 
474 	talloc_log_fn(message);
475 	talloc_free(message);
476 }
477 
talloc_log_stderr(const char * message)478 static void talloc_log_stderr(const char *message)
479 {
480 	fprintf(stderr, "%s", message);
481 }
482 
talloc_set_log_stderr(void)483 _PUBLIC_ void talloc_set_log_stderr(void)
484 {
485 	talloc_set_log_fn(talloc_log_stderr);
486 }
487 
488 static void (*talloc_abort_fn)(const char *reason);
489 
talloc_set_abort_fn(void (* abort_fn)(const char * reason))490 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
491 {
492 	talloc_abort_fn = abort_fn;
493 }
494 
talloc_abort(const char * reason)495 static void talloc_abort(const char *reason)
496 {
497 	talloc_log("%s\n", reason);
498 
499 	if (!talloc_abort_fn) {
500 		TALLOC_ABORT(reason);
501 	}
502 
503 	talloc_abort_fn(reason);
504 }
505 
talloc_abort_access_after_free(void)506 static void talloc_abort_access_after_free(void)
507 {
508 	talloc_abort("Bad talloc magic value - access after free");
509 }
510 
talloc_abort_unknown_value(void)511 static void talloc_abort_unknown_value(void)
512 {
513 	talloc_abort("Bad talloc magic value - unknown value");
514 }
515 
516 /* panic if we get a bad magic value */
talloc_chunk_from_ptr(const void * ptr)517 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
518 {
519 	const char *pp = (const char *)ptr;
520 	struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
521 	if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
522 		if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK))
523 		    == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) {
524 			talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
525 			talloc_abort_access_after_free();
526 			return NULL;
527 		}
528 
529 		talloc_abort_unknown_value();
530 		return NULL;
531 	}
532 	return tc;
533 }
534 
535 /* hook into the front of the list */
536 #define _TLIST_ADD(list, p) \
537 do { \
538         if (!(list)) { \
539 		(list) = (p); \
540 		(p)->next = (p)->prev = NULL; \
541 	} else { \
542 		(list)->prev = (p); \
543 		(p)->next = (list); \
544 		(p)->prev = NULL; \
545 		(list) = (p); \
546 	}\
547 } while (0)
548 
549 /* remove an element from a list - element doesn't have to be in list. */
550 #define _TLIST_REMOVE(list, p) \
551 do { \
552 	if ((p) == (list)) { \
553 		(list) = (p)->next; \
554 		if (list) (list)->prev = NULL; \
555 	} else { \
556 		if ((p)->prev) (p)->prev->next = (p)->next; \
557 		if ((p)->next) (p)->next->prev = (p)->prev; \
558 	} \
559 	if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
560 } while (0)
561 
562 
563 /*
564   return the parent chunk of a pointer
565 */
talloc_parent_chunk(const void * ptr)566 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
567 {
568 	struct talloc_chunk *tc;
569 
570 	if (unlikely(ptr == NULL)) {
571 		return NULL;
572 	}
573 
574 	tc = talloc_chunk_from_ptr(ptr);
575 	while (tc->prev) tc=tc->prev;
576 
577 	return tc->parent;
578 }
579 
talloc_parent(const void * ptr)580 _PUBLIC_ void *talloc_parent(const void *ptr)
581 {
582 	struct talloc_chunk *tc = talloc_parent_chunk(ptr);
583 	return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
584 }
585 
586 /*
587   find parents name
588 */
talloc_parent_name(const void * ptr)589 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
590 {
591 	struct talloc_chunk *tc = talloc_parent_chunk(ptr);
592 	return tc? tc->name : NULL;
593 }
594 
595 /*
596   A pool carries an in-pool object count count in the first 16 bytes.
597   bytes. This is done to support talloc_steal() to a parent outside of the
598   pool. The count includes the pool itself, so a talloc_free() on a pool will
599   only destroy the pool if the count has dropped to zero. A talloc_free() of a
600   pool member will reduce the count, and eventually also call free(3) on the
601   pool memory.
602 
603   The object count is not put into "struct talloc_chunk" because it is only
604   relevant for talloc pools and the alignment to 16 bytes would increase the
605   memory footprint of each talloc chunk by those 16 bytes.
606 */
607 
608 struct talloc_pool_hdr {
609 	void *end;
610 	unsigned int object_count;
611 	size_t poolsize;
612 };
613 
614 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
615 
talloc_pool_from_chunk(struct talloc_chunk * c)616 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
617 {
618 	return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
619 }
620 
talloc_chunk_from_pool(struct talloc_pool_hdr * h)621 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
622 {
623 	return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
624 }
625 
tc_pool_end(struct talloc_pool_hdr * pool_hdr)626 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
627 {
628 	struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
629 	return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
630 }
631 
tc_pool_space_left(struct talloc_pool_hdr * pool_hdr)632 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
633 {
634 	return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
635 }
636 
637 /* If tc is inside a pool, this gives the next neighbour. */
tc_next_chunk(struct talloc_chunk * tc)638 static inline void *tc_next_chunk(struct talloc_chunk *tc)
639 {
640 	return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
641 }
642 
tc_pool_first_chunk(struct talloc_pool_hdr * pool_hdr)643 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
644 {
645 	struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
646 	return tc_next_chunk(tc);
647 }
648 
649 /* Mark the whole remaining pool as not accessable */
tc_invalidate_pool(struct talloc_pool_hdr * pool_hdr)650 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
651 {
652 	size_t flen = tc_pool_space_left(pool_hdr);
653 
654 	if (unlikely(talloc_fill.enabled)) {
655 		memset(pool_hdr->end, talloc_fill.fill_value, flen);
656 	}
657 
658 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
659 	VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
660 #endif
661 }
662 
663 /*
664   Allocate from a pool
665 */
666 
tc_alloc_pool(struct talloc_chunk * parent,size_t size,size_t prefix_len)667 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
668 						     size_t size, size_t prefix_len)
669 {
670 	struct talloc_pool_hdr *pool_hdr = NULL;
671 	size_t space_left;
672 	struct talloc_chunk *result;
673 	size_t chunk_size;
674 
675 	if (parent == NULL) {
676 		return NULL;
677 	}
678 
679 	if (parent->flags & TALLOC_FLAG_POOL) {
680 		pool_hdr = talloc_pool_from_chunk(parent);
681 	}
682 	else if (parent->flags & TALLOC_FLAG_POOLMEM) {
683 		pool_hdr = parent->pool;
684 	}
685 
686 	if (pool_hdr == NULL) {
687 		return NULL;
688 	}
689 
690 	space_left = tc_pool_space_left(pool_hdr);
691 
692 	/*
693 	 * Align size to 16 bytes
694 	 */
695 	chunk_size = TC_ALIGN16(size + prefix_len);
696 
697 	if (space_left < chunk_size) {
698 		return NULL;
699 	}
700 
701 	result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
702 
703 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
704 	VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
705 #endif
706 
707 	pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
708 
709 	result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
710 	result->pool = pool_hdr;
711 
712 	pool_hdr->object_count++;
713 
714 	return result;
715 }
716 
717 /*
718    Allocate a bit of memory as a child of an existing pointer
719 */
__talloc_with_prefix(const void * context,size_t size,size_t prefix_len,struct talloc_chunk ** tc_ret)720 static inline void *__talloc_with_prefix(const void *context,
721 					size_t size,
722 					size_t prefix_len,
723 					struct talloc_chunk **tc_ret)
724 {
725 	struct talloc_chunk *tc = NULL;
726 	struct talloc_memlimit *limit = NULL;
727 	size_t total_len = TC_HDR_SIZE + size + prefix_len;
728 	struct talloc_chunk *parent = NULL;
729 
730 	if (unlikely(context == NULL)) {
731 		context = null_context;
732 	}
733 
734 	if (unlikely(size >= MAX_TALLOC_SIZE)) {
735 		return NULL;
736 	}
737 
738 	if (unlikely(total_len < TC_HDR_SIZE)) {
739 		return NULL;
740 	}
741 
742 	if (likely(context != NULL)) {
743 		parent = talloc_chunk_from_ptr(context);
744 
745 		if (parent->limit != NULL) {
746 			limit = parent->limit;
747 		}
748 
749 		tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
750 	}
751 
752 	if (tc == NULL) {
753 		char *ptr;
754 
755 		/*
756 		 * Only do the memlimit check/update on actual allocation.
757 		 */
758 		if (!talloc_memlimit_check(limit, total_len)) {
759 			errno = ENOMEM;
760 			return NULL;
761 		}
762 
763 		ptr = malloc(total_len);
764 		if (unlikely(ptr == NULL)) {
765 			return NULL;
766 		}
767 		tc = (struct talloc_chunk *)(ptr + prefix_len);
768 		tc->flags = talloc_magic;
769 		tc->pool  = NULL;
770 
771 		talloc_memlimit_grow(limit, total_len);
772 	}
773 
774 	tc->limit = limit;
775 	tc->size = size;
776 	tc->destructor = NULL;
777 	tc->child = NULL;
778 	tc->name = NULL;
779 	tc->refs = NULL;
780 
781 	if (likely(context != NULL)) {
782 		if (parent->child) {
783 			parent->child->parent = NULL;
784 			tc->next = parent->child;
785 			tc->next->prev = tc;
786 		} else {
787 			tc->next = NULL;
788 		}
789 		tc->parent = parent;
790 		tc->prev = NULL;
791 		parent->child = tc;
792 	} else {
793 		tc->next = tc->prev = tc->parent = NULL;
794 	}
795 
796 	*tc_ret = tc;
797 	return TC_PTR_FROM_CHUNK(tc);
798 }
799 
__talloc(const void * context,size_t size,struct talloc_chunk ** tc)800 static inline void *__talloc(const void *context,
801 			size_t size,
802 			struct talloc_chunk **tc)
803 {
804 	return __talloc_with_prefix(context, size, 0, tc);
805 }
806 
807 /*
808  * Create a talloc pool
809  */
810 
_talloc_pool(const void * context,size_t size)811 static inline void *_talloc_pool(const void *context, size_t size)
812 {
813 	struct talloc_chunk *tc;
814 	struct talloc_pool_hdr *pool_hdr;
815 	void *result;
816 
817 	result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
818 
819 	if (unlikely(result == NULL)) {
820 		return NULL;
821 	}
822 
823 	pool_hdr = talloc_pool_from_chunk(tc);
824 
825 	tc->flags |= TALLOC_FLAG_POOL;
826 	tc->size = 0;
827 
828 	pool_hdr->object_count = 1;
829 	pool_hdr->end = result;
830 	pool_hdr->poolsize = size;
831 
832 	tc_invalidate_pool(pool_hdr);
833 
834 	return result;
835 }
836 
talloc_pool(const void * context,size_t size)837 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
838 {
839 	return _talloc_pool(context, size);
840 }
841 
842 /*
843  * Create a talloc pool correctly sized for a basic size plus
844  * a number of subobjects whose total size is given. Essentially
845  * a custom allocator for talloc to reduce fragmentation.
846  */
847 
_talloc_pooled_object(const void * ctx,size_t type_size,const char * type_name,unsigned num_subobjects,size_t total_subobjects_size)848 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
849 				     size_t type_size,
850 				     const char *type_name,
851 				     unsigned num_subobjects,
852 				     size_t total_subobjects_size)
853 {
854 	size_t poolsize, subobjects_slack, tmp;
855 	struct talloc_chunk *tc;
856 	struct talloc_pool_hdr *pool_hdr;
857 	void *ret;
858 
859 	poolsize = type_size + total_subobjects_size;
860 
861 	if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
862 		goto overflow;
863 	}
864 
865 	if (num_subobjects == UINT_MAX) {
866 		goto overflow;
867 	}
868 	num_subobjects += 1;       /* the object body itself */
869 
870 	/*
871 	 * Alignment can increase the pool size by at most 15 bytes per object
872 	 * plus alignment for the object itself
873 	 */
874 	subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
875 	if (subobjects_slack < num_subobjects) {
876 		goto overflow;
877 	}
878 
879 	tmp = poolsize + subobjects_slack;
880 	if ((tmp < poolsize) || (tmp < subobjects_slack)) {
881 		goto overflow;
882 	}
883 	poolsize = tmp;
884 
885 	ret = _talloc_pool(ctx, poolsize);
886 	if (ret == NULL) {
887 		return NULL;
888 	}
889 
890 	tc = talloc_chunk_from_ptr(ret);
891 	tc->size = type_size;
892 
893 	pool_hdr = talloc_pool_from_chunk(tc);
894 
895 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
896 	VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
897 #endif
898 
899 	pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
900 
901 	_tc_set_name_const(tc, type_name);
902 	return ret;
903 
904 overflow:
905 	return NULL;
906 }
907 
908 /*
909   setup a destructor to be called on free of a pointer
910   the destructor should return 0 on success, or -1 on failure.
911   if the destructor fails then the free is failed, and the memory can
912   be continued to be used
913 */
_talloc_set_destructor(const void * ptr,int (* destructor)(void *))914 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
915 {
916 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
917 	tc->destructor = destructor;
918 }
919 
920 /*
921   increase the reference count on a piece of memory.
922 */
talloc_increase_ref_count(const void * ptr)923 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
924 {
925 	if (unlikely(!talloc_reference(null_context, ptr))) {
926 		return -1;
927 	}
928 	return 0;
929 }
930 
931 /*
932   helper for talloc_reference()
933 
934   this is referenced by a function pointer and should not be inline
935 */
talloc_reference_destructor(struct talloc_reference_handle * handle)936 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
937 {
938 	struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
939 	_TLIST_REMOVE(ptr_tc->refs, handle);
940 	return 0;
941 }
942 
943 /*
944    more efficient way to add a name to a pointer - the name must point to a
945    true string constant
946 */
_tc_set_name_const(struct talloc_chunk * tc,const char * name)947 static inline void _tc_set_name_const(struct talloc_chunk *tc,
948 					const char *name)
949 {
950 	tc->name = name;
951 }
952 
953 /*
954   internal talloc_named_const()
955 */
_talloc_named_const(const void * context,size_t size,const char * name)956 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
957 {
958 	void *ptr;
959 	struct talloc_chunk *tc;
960 
961 	ptr = __talloc(context, size, &tc);
962 	if (unlikely(ptr == NULL)) {
963 		return NULL;
964 	}
965 
966 	_tc_set_name_const(tc, name);
967 
968 	return ptr;
969 }
970 
971 /*
972   make a secondary reference to a pointer, hanging off the given context.
973   the pointer remains valid until both the original caller and this given
974   context are freed.
975 
976   the major use for this is when two different structures need to reference the
977   same underlying data, and you want to be able to free the two instances separately,
978   and in either order
979 */
_talloc_reference_loc(const void * context,const void * ptr,const char * location)980 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
981 {
982 	struct talloc_chunk *tc;
983 	struct talloc_reference_handle *handle;
984 	if (unlikely(ptr == NULL)) return NULL;
985 
986 	tc = talloc_chunk_from_ptr(ptr);
987 	handle = (struct talloc_reference_handle *)_talloc_named_const(context,
988 						   sizeof(struct talloc_reference_handle),
989 						   TALLOC_MAGIC_REFERENCE);
990 	if (unlikely(handle == NULL)) return NULL;
991 
992 	/* note that we hang the destructor off the handle, not the
993 	   main context as that allows the caller to still setup their
994 	   own destructor on the context if they want to */
995 	talloc_set_destructor(handle, talloc_reference_destructor);
996 	handle->ptr = discard_const_p(void, ptr);
997 	handle->location = location;
998 	_TLIST_ADD(tc->refs, handle);
999 	return handle->ptr;
1000 }
1001 
1002 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
1003 
_tc_free_poolmem(struct talloc_chunk * tc,const char * location)1004 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
1005 					const char *location)
1006 {
1007 	struct talloc_pool_hdr *pool;
1008 	struct talloc_chunk *pool_tc;
1009 	void *next_tc;
1010 
1011 	pool = tc->pool;
1012 	pool_tc = talloc_chunk_from_pool(pool);
1013 	next_tc = tc_next_chunk(tc);
1014 
1015 	_talloc_chunk_set_free(tc, location);
1016 
1017 	TC_INVALIDATE_FULL_CHUNK(tc);
1018 
1019 	if (unlikely(pool->object_count == 0)) {
1020 		talloc_abort("Pool object count zero!");
1021 		return;
1022 	}
1023 
1024 	pool->object_count--;
1025 
1026 	if (unlikely(pool->object_count == 1
1027 		     && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
1028 		/*
1029 		 * if there is just one object left in the pool
1030 		 * and pool->flags does not have TALLOC_FLAG_FREE,
1031 		 * it means this is the pool itself and
1032 		 * the rest is available for new objects
1033 		 * again.
1034 		 */
1035 		pool->end = tc_pool_first_chunk(pool);
1036 		tc_invalidate_pool(pool);
1037 		return;
1038 	}
1039 
1040 	if (unlikely(pool->object_count == 0)) {
1041 		/*
1042 		 * we mark the freed memory with where we called the free
1043 		 * from. This means on a double free error we can report where
1044 		 * the first free came from
1045 		 */
1046 		pool_tc->name = location;
1047 
1048 		if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
1049 			_tc_free_poolmem(pool_tc, location);
1050 		} else {
1051 			/*
1052 			 * The tc_memlimit_update_on_free()
1053 			 * call takes into account the
1054 			 * prefix TP_HDR_SIZE allocated before
1055 			 * the pool talloc_chunk.
1056 			 */
1057 			tc_memlimit_update_on_free(pool_tc);
1058 			TC_INVALIDATE_FULL_CHUNK(pool_tc);
1059 			free(pool);
1060 		}
1061 		return;
1062 	}
1063 
1064 	if (pool->end == next_tc) {
1065 		/*
1066 		 * if pool->pool still points to end of
1067 		 * 'tc' (which is stored in the 'next_tc' variable),
1068 		 * we can reclaim the memory of 'tc'.
1069 		 */
1070 		pool->end = tc;
1071 		return;
1072 	}
1073 
1074 	/*
1075 	 * Do nothing. The memory is just "wasted", waiting for the pool
1076 	 * itself to be freed.
1077 	 */
1078 }
1079 
1080 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1081 						  void *ptr,
1082 						  const char *location);
1083 
1084 static inline int _talloc_free_internal(void *ptr, const char *location);
1085 
1086 /*
1087    internal free call that takes a struct talloc_chunk *.
1088 */
_tc_free_internal(struct talloc_chunk * tc,const char * location)1089 static inline int _tc_free_internal(struct talloc_chunk *tc,
1090 				const char *location)
1091 {
1092 	void *ptr_to_free;
1093 	void *ptr = TC_PTR_FROM_CHUNK(tc);
1094 
1095 	if (unlikely(tc->refs)) {
1096 		int is_child;
1097 		/* check if this is a reference from a child or
1098 		 * grandchild back to it's parent or grandparent
1099 		 *
1100 		 * in that case we need to remove the reference and
1101 		 * call another instance of talloc_free() on the current
1102 		 * pointer.
1103 		 */
1104 		is_child = talloc_is_parent(tc->refs, ptr);
1105 		_talloc_free_internal(tc->refs, location);
1106 		if (is_child) {
1107 			return _talloc_free_internal(ptr, location);
1108 		}
1109 		return -1;
1110 	}
1111 
1112 	if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1113 		/* we have a free loop - stop looping */
1114 		return 0;
1115 	}
1116 
1117 	if (unlikely(tc->destructor)) {
1118 		talloc_destructor_t d = tc->destructor;
1119 
1120 		/*
1121 		 * Protect the destructor against some overwrite
1122 		 * attacks, by explicitly checking it has the right
1123 		 * magic here.
1124 		 */
1125 		if (talloc_chunk_from_ptr(ptr) != tc) {
1126 			/*
1127 			 * This can't actually happen, the
1128 			 * call itself will panic.
1129 			 */
1130 			TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1131 		}
1132 
1133 		if (d == (talloc_destructor_t)-1) {
1134 			return -1;
1135 		}
1136 		tc->destructor = (talloc_destructor_t)-1;
1137 		if (d(ptr) == -1) {
1138 			/*
1139 			 * Only replace the destructor pointer if
1140 			 * calling the destructor didn't modify it.
1141 			 */
1142 			if (tc->destructor == (talloc_destructor_t)-1) {
1143 				tc->destructor = d;
1144 			}
1145 			return -1;
1146 		}
1147 		tc->destructor = NULL;
1148 	}
1149 
1150 	if (tc->parent) {
1151 		_TLIST_REMOVE(tc->parent->child, tc);
1152 		if (tc->parent->child) {
1153 			tc->parent->child->parent = tc->parent;
1154 		}
1155 	} else {
1156 		if (tc->prev) tc->prev->next = tc->next;
1157 		if (tc->next) tc->next->prev = tc->prev;
1158 		tc->prev = tc->next = NULL;
1159 	}
1160 
1161 	tc->flags |= TALLOC_FLAG_LOOP;
1162 
1163 	_tc_free_children_internal(tc, ptr, location);
1164 
1165 	_talloc_chunk_set_free(tc, location);
1166 
1167 	if (tc->flags & TALLOC_FLAG_POOL) {
1168 		struct talloc_pool_hdr *pool;
1169 
1170 		pool = talloc_pool_from_chunk(tc);
1171 
1172 		if (unlikely(pool->object_count == 0)) {
1173 			talloc_abort("Pool object count zero!");
1174 			return 0;
1175 		}
1176 
1177 		pool->object_count--;
1178 
1179 		if (likely(pool->object_count != 0)) {
1180 			return 0;
1181 		}
1182 
1183 		/*
1184 		 * With object_count==0, a pool becomes a normal piece of
1185 		 * memory to free. If it's allocated inside a pool, it needs
1186 		 * to be freed as poolmem, else it needs to be just freed.
1187 		*/
1188 		ptr_to_free = pool;
1189 	} else {
1190 		ptr_to_free = tc;
1191 	}
1192 
1193 	if (tc->flags & TALLOC_FLAG_POOLMEM) {
1194 		_tc_free_poolmem(tc, location);
1195 		return 0;
1196 	}
1197 
1198 	tc_memlimit_update_on_free(tc);
1199 
1200 	TC_INVALIDATE_FULL_CHUNK(tc);
1201 	free(ptr_to_free);
1202 	return 0;
1203 }
1204 
1205 /*
1206    internal talloc_free call
1207 */
_talloc_free_internal(void * ptr,const char * location)1208 static inline int _talloc_free_internal(void *ptr, const char *location)
1209 {
1210 	struct talloc_chunk *tc;
1211 
1212 	if (unlikely(ptr == NULL)) {
1213 		return -1;
1214 	}
1215 
1216 	/* possibly initialised the talloc fill value */
1217 	if (unlikely(!talloc_fill.initialised)) {
1218 		const char *fill = getenv(TALLOC_FILL_ENV);
1219 		if (fill != NULL) {
1220 			talloc_fill.enabled = true;
1221 			talloc_fill.fill_value = strtoul(fill, NULL, 0);
1222 		}
1223 		talloc_fill.initialised = true;
1224 	}
1225 
1226 	tc = talloc_chunk_from_ptr(ptr);
1227 	return _tc_free_internal(tc, location);
1228 }
1229 
1230 static inline size_t _talloc_total_limit_size(const void *ptr,
1231 					struct talloc_memlimit *old_limit,
1232 					struct talloc_memlimit *new_limit);
1233 
1234 /*
1235    move a lump of memory from one talloc context to another return the
1236    ptr on success, or NULL if it could not be transferred.
1237    passing NULL as ptr will always return NULL with no side effects.
1238 */
_talloc_steal_internal(const void * new_ctx,const void * ptr)1239 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1240 {
1241 	struct talloc_chunk *tc, *new_tc;
1242 	size_t ctx_size = 0;
1243 
1244 	if (unlikely(!ptr)) {
1245 		return NULL;
1246 	}
1247 
1248 	if (unlikely(new_ctx == NULL)) {
1249 		new_ctx = null_context;
1250 	}
1251 
1252 	tc = talloc_chunk_from_ptr(ptr);
1253 
1254 	if (tc->limit != NULL) {
1255 
1256 		ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1257 
1258 		/* Decrement the memory limit from the source .. */
1259 		talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1260 
1261 		if (tc->limit->parent == tc) {
1262 			tc->limit->upper = NULL;
1263 		} else {
1264 			tc->limit = NULL;
1265 		}
1266 	}
1267 
1268 	if (unlikely(new_ctx == NULL)) {
1269 		if (tc->parent) {
1270 			_TLIST_REMOVE(tc->parent->child, tc);
1271 			if (tc->parent->child) {
1272 				tc->parent->child->parent = tc->parent;
1273 			}
1274 		} else {
1275 			if (tc->prev) tc->prev->next = tc->next;
1276 			if (tc->next) tc->next->prev = tc->prev;
1277 		}
1278 
1279 		tc->parent = tc->next = tc->prev = NULL;
1280 		return discard_const_p(void, ptr);
1281 	}
1282 
1283 	new_tc = talloc_chunk_from_ptr(new_ctx);
1284 
1285 	if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1286 		return discard_const_p(void, ptr);
1287 	}
1288 
1289 	if (tc->parent) {
1290 		_TLIST_REMOVE(tc->parent->child, tc);
1291 		if (tc->parent->child) {
1292 			tc->parent->child->parent = tc->parent;
1293 		}
1294 	} else {
1295 		if (tc->prev) tc->prev->next = tc->next;
1296 		if (tc->next) tc->next->prev = tc->prev;
1297 		tc->prev = tc->next = NULL;
1298 	}
1299 
1300 	tc->parent = new_tc;
1301 	if (new_tc->child) new_tc->child->parent = NULL;
1302 	_TLIST_ADD(new_tc->child, tc);
1303 
1304 	if (tc->limit || new_tc->limit) {
1305 		ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1306 						    new_tc->limit);
1307 		/* .. and increment it in the destination. */
1308 		if (new_tc->limit) {
1309 			talloc_memlimit_grow(new_tc->limit, ctx_size);
1310 		}
1311 	}
1312 
1313 	return discard_const_p(void, ptr);
1314 }
1315 
1316 /*
1317    move a lump of memory from one talloc context to another return the
1318    ptr on success, or NULL if it could not be transferred.
1319    passing NULL as ptr will always return NULL with no side effects.
1320 */
_talloc_steal_loc(const void * new_ctx,const void * ptr,const char * location)1321 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1322 {
1323 	struct talloc_chunk *tc;
1324 
1325 	if (unlikely(ptr == NULL)) {
1326 		return NULL;
1327 	}
1328 
1329 	tc = talloc_chunk_from_ptr(ptr);
1330 
1331 	if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1332 		struct talloc_reference_handle *h;
1333 
1334 		talloc_log("WARNING: talloc_steal with references at %s\n",
1335 			   location);
1336 
1337 		for (h=tc->refs; h; h=h->next) {
1338 			talloc_log("\treference at %s\n",
1339 				   h->location);
1340 		}
1341 	}
1342 
1343 #if 0
1344 	/* this test is probably too expensive to have on in the
1345 	   normal build, but it useful for debugging */
1346 	if (talloc_is_parent(new_ctx, ptr)) {
1347 		talloc_log("WARNING: stealing into talloc child at %s\n", location);
1348 	}
1349 #endif
1350 
1351 	return _talloc_steal_internal(new_ctx, ptr);
1352 }
1353 
1354 /*
1355    this is like a talloc_steal(), but you must supply the old
1356    parent. This resolves the ambiguity in a talloc_steal() which is
1357    called on a context that has more than one parent (via references)
1358 
1359    The old parent can be either a reference or a parent
1360 */
talloc_reparent(const void * old_parent,const void * new_parent,const void * ptr)1361 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1362 {
1363 	struct talloc_chunk *tc;
1364 	struct talloc_reference_handle *h;
1365 
1366 	if (unlikely(ptr == NULL)) {
1367 		return NULL;
1368 	}
1369 
1370 	if (old_parent == talloc_parent(ptr)) {
1371 		return _talloc_steal_internal(new_parent, ptr);
1372 	}
1373 
1374 	tc = talloc_chunk_from_ptr(ptr);
1375 	for (h=tc->refs;h;h=h->next) {
1376 		if (talloc_parent(h) == old_parent) {
1377 			if (_talloc_steal_internal(new_parent, h) != h) {
1378 				return NULL;
1379 			}
1380 			return discard_const_p(void, ptr);
1381 		}
1382 	}
1383 
1384 	/* it wasn't a parent */
1385 	return NULL;
1386 }
1387 
1388 /*
1389   remove a secondary reference to a pointer. This undo's what
1390   talloc_reference() has done. The context and pointer arguments
1391   must match those given to a talloc_reference()
1392 */
talloc_unreference(const void * context,const void * ptr)1393 static inline int talloc_unreference(const void *context, const void *ptr)
1394 {
1395 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1396 	struct talloc_reference_handle *h;
1397 
1398 	if (unlikely(context == NULL)) {
1399 		context = null_context;
1400 	}
1401 
1402 	for (h=tc->refs;h;h=h->next) {
1403 		struct talloc_chunk *p = talloc_parent_chunk(h);
1404 		if (p == NULL) {
1405 			if (context == NULL) break;
1406 		} else if (TC_PTR_FROM_CHUNK(p) == context) {
1407 			break;
1408 		}
1409 	}
1410 	if (h == NULL) {
1411 		return -1;
1412 	}
1413 
1414 	return _talloc_free_internal(h, __location__);
1415 }
1416 
1417 /*
1418   remove a specific parent context from a pointer. This is a more
1419   controlled variant of talloc_free()
1420 */
talloc_unlink(const void * context,void * ptr)1421 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1422 {
1423 	struct talloc_chunk *tc_p, *new_p, *tc_c;
1424 	void *new_parent;
1425 
1426 	if (ptr == NULL) {
1427 		return -1;
1428 	}
1429 
1430 	if (context == NULL) {
1431 		context = null_context;
1432 	}
1433 
1434 	if (talloc_unreference(context, ptr) == 0) {
1435 		return 0;
1436 	}
1437 
1438 	if (context != NULL) {
1439 		tc_c = talloc_chunk_from_ptr(context);
1440 	} else {
1441 		tc_c = NULL;
1442 	}
1443 	if (tc_c != talloc_parent_chunk(ptr)) {
1444 		return -1;
1445 	}
1446 
1447 	tc_p = talloc_chunk_from_ptr(ptr);
1448 
1449 	if (tc_p->refs == NULL) {
1450 		return _talloc_free_internal(ptr, __location__);
1451 	}
1452 
1453 	new_p = talloc_parent_chunk(tc_p->refs);
1454 	if (new_p) {
1455 		new_parent = TC_PTR_FROM_CHUNK(new_p);
1456 	} else {
1457 		new_parent = NULL;
1458 	}
1459 
1460 	if (talloc_unreference(new_parent, ptr) != 0) {
1461 		return -1;
1462 	}
1463 
1464 	_talloc_steal_internal(new_parent, ptr);
1465 
1466 	return 0;
1467 }
1468 
1469 /*
1470   add a name to an existing pointer - va_list version
1471 */
1472 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1473 				const char *fmt,
1474 				va_list ap) PRINTF_ATTRIBUTE(2,0);
1475 
tc_set_name_v(struct talloc_chunk * tc,const char * fmt,va_list ap)1476 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1477 				const char *fmt,
1478 				va_list ap)
1479 {
1480 	struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1481 							fmt,
1482 							ap);
1483 	if (likely(name_tc)) {
1484 		tc->name = TC_PTR_FROM_CHUNK(name_tc);
1485 		_tc_set_name_const(name_tc, ".name");
1486 	} else {
1487 		tc->name = NULL;
1488 	}
1489 	return tc->name;
1490 }
1491 
1492 /*
1493   add a name to an existing pointer
1494 */
talloc_set_name(const void * ptr,const char * fmt,...)1495 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1496 {
1497 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1498 	const char *name;
1499 	va_list ap;
1500 	va_start(ap, fmt);
1501 	name = tc_set_name_v(tc, fmt, ap);
1502 	va_end(ap);
1503 	return name;
1504 }
1505 
1506 
1507 /*
1508   create a named talloc pointer. Any talloc pointer can be named, and
1509   talloc_named() operates just like talloc() except that it allows you
1510   to name the pointer.
1511 */
talloc_named(const void * context,size_t size,const char * fmt,...)1512 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1513 {
1514 	va_list ap;
1515 	void *ptr;
1516 	const char *name;
1517 	struct talloc_chunk *tc;
1518 
1519 	ptr = __talloc(context, size, &tc);
1520 	if (unlikely(ptr == NULL)) return NULL;
1521 
1522 	va_start(ap, fmt);
1523 	name = tc_set_name_v(tc, fmt, ap);
1524 	va_end(ap);
1525 
1526 	if (unlikely(name == NULL)) {
1527 		_talloc_free_internal(ptr, __location__);
1528 		return NULL;
1529 	}
1530 
1531 	return ptr;
1532 }
1533 
1534 /*
1535   return the name of a talloc ptr, or "UNNAMED"
1536 */
__talloc_get_name(const void * ptr)1537 static inline const char *__talloc_get_name(const void *ptr)
1538 {
1539 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1540 	if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1541 		return ".reference";
1542 	}
1543 	if (likely(tc->name)) {
1544 		return tc->name;
1545 	}
1546 	return "UNNAMED";
1547 }
1548 
talloc_get_name(const void * ptr)1549 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1550 {
1551 	return __talloc_get_name(ptr);
1552 }
1553 
1554 /*
1555   check if a pointer has the given name. If it does, return the pointer,
1556   otherwise return NULL
1557 */
talloc_check_name(const void * ptr,const char * name)1558 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1559 {
1560 	const char *pname;
1561 	if (unlikely(ptr == NULL)) return NULL;
1562 	pname = __talloc_get_name(ptr);
1563 	if (likely(pname == name || strcmp(pname, name) == 0)) {
1564 		return discard_const_p(void, ptr);
1565 	}
1566 	return NULL;
1567 }
1568 
talloc_abort_type_mismatch(const char * location,const char * name,const char * expected)1569 static void talloc_abort_type_mismatch(const char *location,
1570 					const char *name,
1571 					const char *expected)
1572 {
1573 	const char *reason;
1574 
1575 	reason = talloc_asprintf(NULL,
1576 				 "%s: Type mismatch: name[%s] expected[%s]",
1577 				 location,
1578 				 name?name:"NULL",
1579 				 expected);
1580 	if (!reason) {
1581 		reason = "Type mismatch";
1582 	}
1583 
1584 	talloc_abort(reason);
1585 }
1586 
_talloc_get_type_abort(const void * ptr,const char * name,const char * location)1587 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1588 {
1589 	const char *pname;
1590 
1591 	if (unlikely(ptr == NULL)) {
1592 		talloc_abort_type_mismatch(location, NULL, name);
1593 		return NULL;
1594 	}
1595 
1596 	pname = __talloc_get_name(ptr);
1597 	if (likely(pname == name || strcmp(pname, name) == 0)) {
1598 		return discard_const_p(void, ptr);
1599 	}
1600 
1601 	talloc_abort_type_mismatch(location, pname, name);
1602 	return NULL;
1603 }
1604 
1605 /*
1606   this is for compatibility with older versions of talloc
1607 */
talloc_init(const char * fmt,...)1608 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1609 {
1610 	va_list ap;
1611 	void *ptr;
1612 	const char *name;
1613 	struct talloc_chunk *tc;
1614 
1615 	ptr = __talloc(NULL, 0, &tc);
1616 	if (unlikely(ptr == NULL)) return NULL;
1617 
1618 	va_start(ap, fmt);
1619 	name = tc_set_name_v(tc, fmt, ap);
1620 	va_end(ap);
1621 
1622 	if (unlikely(name == NULL)) {
1623 		_talloc_free_internal(ptr, __location__);
1624 		return NULL;
1625 	}
1626 
1627 	return ptr;
1628 }
1629 
_tc_free_children_internal(struct talloc_chunk * tc,void * ptr,const char * location)1630 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1631 						  void *ptr,
1632 						  const char *location)
1633 {
1634 	while (tc->child) {
1635 		/* we need to work out who will own an abandoned child
1636 		   if it cannot be freed. In priority order, the first
1637 		   choice is owner of any remaining reference to this
1638 		   pointer, the second choice is our parent, and the
1639 		   final choice is the null context. */
1640 		void *child = TC_PTR_FROM_CHUNK(tc->child);
1641 		const void *new_parent = null_context;
1642 		if (unlikely(tc->child->refs)) {
1643 			struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1644 			if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1645 		}
1646 		if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1647 			if (talloc_parent_chunk(child) != tc) {
1648 				/*
1649 				 * Destructor already reparented this child.
1650 				 * No further reparenting needed.
1651 				 */
1652 				continue;
1653 			}
1654 			if (new_parent == null_context) {
1655 				struct talloc_chunk *p = talloc_parent_chunk(ptr);
1656 				if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1657 			}
1658 			_talloc_steal_internal(new_parent, child);
1659 		}
1660 	}
1661 }
1662 
1663 /*
1664   this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1665   should probably not be used in new code. It's in here to keep the talloc
1666   code consistent across Samba 3 and 4.
1667 */
talloc_free_children(void * ptr)1668 _PUBLIC_ void talloc_free_children(void *ptr)
1669 {
1670 	struct talloc_chunk *tc_name = NULL;
1671 	struct talloc_chunk *tc;
1672 
1673 	if (unlikely(ptr == NULL)) {
1674 		return;
1675 	}
1676 
1677 	tc = talloc_chunk_from_ptr(ptr);
1678 
1679 	/* we do not want to free the context name if it is a child .. */
1680 	if (likely(tc->child)) {
1681 		for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1682 			if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1683 		}
1684 		if (tc_name) {
1685 			_TLIST_REMOVE(tc->child, tc_name);
1686 			if (tc->child) {
1687 				tc->child->parent = tc;
1688 			}
1689 		}
1690 	}
1691 
1692 	_tc_free_children_internal(tc, ptr, __location__);
1693 
1694 	/* .. so we put it back after all other children have been freed */
1695 	if (tc_name) {
1696 		if (tc->child) {
1697 			tc->child->parent = NULL;
1698 		}
1699 		tc_name->parent = tc;
1700 		_TLIST_ADD(tc->child, tc_name);
1701 	}
1702 }
1703 
1704 /*
1705    Allocate a bit of memory as a child of an existing pointer
1706 */
_talloc(const void * context,size_t size)1707 _PUBLIC_ void *_talloc(const void *context, size_t size)
1708 {
1709 	struct talloc_chunk *tc;
1710 	return __talloc(context, size, &tc);
1711 }
1712 
1713 /*
1714   externally callable talloc_set_name_const()
1715 */
talloc_set_name_const(const void * ptr,const char * name)1716 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1717 {
1718 	_tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1719 }
1720 
1721 /*
1722   create a named talloc pointer. Any talloc pointer can be named, and
1723   talloc_named() operates just like talloc() except that it allows you
1724   to name the pointer.
1725 */
talloc_named_const(const void * context,size_t size,const char * name)1726 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1727 {
1728 	return _talloc_named_const(context, size, name);
1729 }
1730 
1731 /*
1732    free a talloc pointer. This also frees all child pointers of this
1733    pointer recursively
1734 
1735    return 0 if the memory is actually freed, otherwise -1. The memory
1736    will not be freed if the ref_count is > 1 or the destructor (if
1737    any) returns non-zero
1738 */
_talloc_free(void * ptr,const char * location)1739 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1740 {
1741 	struct talloc_chunk *tc;
1742 
1743 	if (unlikely(ptr == NULL)) {
1744 		return -1;
1745 	}
1746 
1747 	tc = talloc_chunk_from_ptr(ptr);
1748 
1749 	if (unlikely(tc->refs != NULL)) {
1750 		struct talloc_reference_handle *h;
1751 
1752 		if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1753 			/* in this case we do know which parent should
1754 			   get this pointer, as there is really only
1755 			   one parent */
1756 			return talloc_unlink(null_context, ptr);
1757 		}
1758 
1759 		talloc_log("ERROR: talloc_free with references at %s\n",
1760 			   location);
1761 
1762 		for (h=tc->refs; h; h=h->next) {
1763 			talloc_log("\treference at %s\n",
1764 				   h->location);
1765 		}
1766 		return -1;
1767 	}
1768 
1769 	return _talloc_free_internal(ptr, location);
1770 }
1771 
1772 
1773 
1774 /*
1775   A talloc version of realloc. The context argument is only used if
1776   ptr is NULL
1777 */
_talloc_realloc(const void * context,void * ptr,size_t size,const char * name)1778 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1779 {
1780 	struct talloc_chunk *tc;
1781 	void *new_ptr;
1782 	bool malloced = false;
1783 	struct talloc_pool_hdr *pool_hdr = NULL;
1784 	size_t old_size = 0;
1785 	size_t new_size = 0;
1786 
1787 	/* size zero is equivalent to free() */
1788 	if (unlikely(size == 0)) {
1789 		talloc_unlink(context, ptr);
1790 		return NULL;
1791 	}
1792 
1793 	if (unlikely(size >= MAX_TALLOC_SIZE)) {
1794 		return NULL;
1795 	}
1796 
1797 	/* realloc(NULL) is equivalent to malloc() */
1798 	if (ptr == NULL) {
1799 		return _talloc_named_const(context, size, name);
1800 	}
1801 
1802 	tc = talloc_chunk_from_ptr(ptr);
1803 
1804 	/* don't allow realloc on referenced pointers */
1805 	if (unlikely(tc->refs)) {
1806 		return NULL;
1807 	}
1808 
1809 	/* don't let anybody try to realloc a talloc_pool */
1810 	if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1811 		return NULL;
1812 	}
1813 
1814 	if (tc->limit && (size > tc->size)) {
1815 		if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1816 			errno = ENOMEM;
1817 			return NULL;
1818 		}
1819 	}
1820 
1821 	/* handle realloc inside a talloc_pool */
1822 	if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1823 		pool_hdr = tc->pool;
1824 	}
1825 
1826 #if (ALWAYS_REALLOC == 0)
1827 	/* don't shrink if we have less than 1k to gain */
1828 	if (size < tc->size && tc->limit == NULL) {
1829 		if (pool_hdr) {
1830 			void *next_tc = tc_next_chunk(tc);
1831 			TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1832 			tc->size = size;
1833 			if (next_tc == pool_hdr->end) {
1834 				/* note: tc->size has changed, so this works */
1835 				pool_hdr->end = tc_next_chunk(tc);
1836 			}
1837 			return ptr;
1838 		} else if ((tc->size - size) < 1024) {
1839 			/*
1840 			 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1841 			 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1842 			 * after each realloc call, which slows down
1843 			 * testing a lot :-(.
1844 			 *
1845 			 * That is why we only mark memory as undefined here.
1846 			 */
1847 			TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1848 
1849 			/* do not shrink if we have less than 1k to gain */
1850 			tc->size = size;
1851 			return ptr;
1852 		}
1853 	} else if (tc->size == size) {
1854 		/*
1855 		 * do not change the pointer if it is exactly
1856 		 * the same size.
1857 		 */
1858 		return ptr;
1859 	}
1860 #endif
1861 
1862 	/*
1863 	 * by resetting magic we catch users of the old memory
1864 	 *
1865 	 * We mark this memory as free, and also over-stamp the talloc
1866 	 * magic with the old-style magic.
1867 	 *
1868 	 * Why?  This tries to avoid a memory read use-after-free from
1869 	 * disclosing our talloc magic, which would then allow an
1870 	 * attacker to prepare a valid header and so run a destructor.
1871 	 *
1872 	 * What else?  We have to re-stamp back a valid normal magic
1873 	 * on this memory once realloc() is done, as it will have done
1874 	 * a memcpy() into the new valid memory.  We can't do this in
1875 	 * reverse as that would be a real use-after-free.
1876 	 */
1877 	_talloc_chunk_set_free(tc, NULL);
1878 
1879 #if ALWAYS_REALLOC
1880 	if (pool_hdr) {
1881 		new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1882 		pool_hdr->object_count--;
1883 
1884 		if (new_ptr == NULL) {
1885 			new_ptr = malloc(TC_HDR_SIZE+size);
1886 			malloced = true;
1887 			new_size = size;
1888 		}
1889 
1890 		if (new_ptr) {
1891 			memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1892 			TC_INVALIDATE_FULL_CHUNK(tc);
1893 		}
1894 	} else {
1895 		/* We're doing malloc then free here, so record the difference. */
1896 		old_size = tc->size;
1897 		new_size = size;
1898 		new_ptr = malloc(size + TC_HDR_SIZE);
1899 		if (new_ptr) {
1900 			memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1901 			free(tc);
1902 		}
1903 	}
1904 #else
1905 	if (pool_hdr) {
1906 		struct talloc_chunk *pool_tc;
1907 		void *next_tc = tc_next_chunk(tc);
1908 		size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1909 		size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1910 		size_t space_needed;
1911 		size_t space_left;
1912 		unsigned int chunk_count = pool_hdr->object_count;
1913 
1914 		pool_tc = talloc_chunk_from_pool(pool_hdr);
1915 		if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1916 			chunk_count -= 1;
1917 		}
1918 
1919 		if (chunk_count == 1) {
1920 			/*
1921 			 * optimize for the case where 'tc' is the only
1922 			 * chunk in the pool.
1923 			 */
1924 			char *start = tc_pool_first_chunk(pool_hdr);
1925 			space_needed = new_chunk_size;
1926 			space_left = (char *)tc_pool_end(pool_hdr) - start;
1927 
1928 			if (space_left >= space_needed) {
1929 				size_t old_used = TC_HDR_SIZE + tc->size;
1930 				size_t new_used = TC_HDR_SIZE + size;
1931 				new_ptr = start;
1932 
1933 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1934 				{
1935 					/*
1936 					 * The area from
1937 					 * start -> tc may have
1938 					 * been freed and thus been marked as
1939 					 * VALGRIND_MEM_NOACCESS. Set it to
1940 					 * VALGRIND_MEM_UNDEFINED so we can
1941 					 * copy into it without valgrind errors.
1942 					 * We can't just mark
1943 					 * new_ptr -> new_ptr + old_used
1944 					 * as this may overlap on top of tc,
1945 					 * (which is why we use memmove, not
1946 					 * memcpy below) hence the MIN.
1947 					 */
1948 					size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1949 					VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1950 				}
1951 #endif
1952 
1953 				memmove(new_ptr, tc, old_used);
1954 
1955 				tc = (struct talloc_chunk *)new_ptr;
1956 				TC_UNDEFINE_GROW_CHUNK(tc, size);
1957 
1958 				/*
1959 				 * first we do not align the pool pointer
1960 				 * because we want to invalidate the padding
1961 				 * too.
1962 				 */
1963 				pool_hdr->end = new_used + (char *)new_ptr;
1964 				tc_invalidate_pool(pool_hdr);
1965 
1966 				/* now the aligned pointer */
1967 				pool_hdr->end = new_chunk_size + (char *)new_ptr;
1968 				goto got_new_ptr;
1969 			}
1970 
1971 			next_tc = NULL;
1972 		}
1973 
1974 		if (new_chunk_size == old_chunk_size) {
1975 			TC_UNDEFINE_GROW_CHUNK(tc, size);
1976 			_talloc_chunk_set_not_free(tc);
1977 			tc->size = size;
1978 			return ptr;
1979 		}
1980 
1981 		if (next_tc == pool_hdr->end) {
1982 			/*
1983 			 * optimize for the case where 'tc' is the last
1984 			 * chunk in the pool.
1985 			 */
1986 			space_needed = new_chunk_size - old_chunk_size;
1987 			space_left = tc_pool_space_left(pool_hdr);
1988 
1989 			if (space_left >= space_needed) {
1990 				TC_UNDEFINE_GROW_CHUNK(tc, size);
1991 				_talloc_chunk_set_not_free(tc);
1992 				tc->size = size;
1993 				pool_hdr->end = tc_next_chunk(tc);
1994 				return ptr;
1995 			}
1996 		}
1997 
1998 		new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1999 
2000 		if (new_ptr == NULL) {
2001 			new_ptr = malloc(TC_HDR_SIZE+size);
2002 			malloced = true;
2003 			new_size = size;
2004 		}
2005 
2006 		if (new_ptr) {
2007 			memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
2008 
2009 			_tc_free_poolmem(tc, __location__ "_talloc_realloc");
2010 		}
2011 	}
2012 	else {
2013 		/* We're doing realloc here, so record the difference. */
2014 		old_size = tc->size;
2015 		new_size = size;
2016 		new_ptr = realloc(tc, size + TC_HDR_SIZE);
2017 	}
2018 got_new_ptr:
2019 #endif
2020 	if (unlikely(!new_ptr)) {
2021 		/*
2022 		 * Ok, this is a strange spot.  We have to put back
2023 		 * the old talloc_magic and any flags, except the
2024 		 * TALLOC_FLAG_FREE as this was not free'ed by the
2025 		 * realloc() call after all
2026 		 */
2027 		_talloc_chunk_set_not_free(tc);
2028 		return NULL;
2029 	}
2030 
2031 	/*
2032 	 * tc is now the new value from realloc(), the old memory we
2033 	 * can't access any more and was preemptively marked as
2034 	 * TALLOC_FLAG_FREE before the call.  Now we mark it as not
2035 	 * free again
2036 	 */
2037 	tc = (struct talloc_chunk *)new_ptr;
2038 	_talloc_chunk_set_not_free(tc);
2039 	if (malloced) {
2040 		tc->flags &= ~TALLOC_FLAG_POOLMEM;
2041 	}
2042 	if (tc->parent) {
2043 		tc->parent->child = tc;
2044 	}
2045 	if (tc->child) {
2046 		tc->child->parent = tc;
2047 	}
2048 
2049 	if (tc->prev) {
2050 		tc->prev->next = tc;
2051 	}
2052 	if (tc->next) {
2053 		tc->next->prev = tc;
2054 	}
2055 
2056 	if (new_size > old_size) {
2057 		talloc_memlimit_grow(tc->limit, new_size - old_size);
2058 	} else if (new_size < old_size) {
2059 		talloc_memlimit_shrink(tc->limit, old_size - new_size);
2060 	}
2061 
2062 	tc->size = size;
2063 	_tc_set_name_const(tc, name);
2064 
2065 	return TC_PTR_FROM_CHUNK(tc);
2066 }
2067 
2068 /*
2069   a wrapper around talloc_steal() for situations where you are moving a pointer
2070   between two structures, and want the old pointer to be set to NULL
2071 */
_talloc_move(const void * new_ctx,const void * _pptr)2072 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
2073 {
2074 	const void **pptr = discard_const_p(const void *,_pptr);
2075 	void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2076 	(*pptr) = NULL;
2077 	return ret;
2078 }
2079 
2080 enum talloc_mem_count_type {
2081 	TOTAL_MEM_SIZE,
2082 	TOTAL_MEM_BLOCKS,
2083 	TOTAL_MEM_LIMIT,
2084 };
2085 
_talloc_total_mem_internal(const void * ptr,enum talloc_mem_count_type type,struct talloc_memlimit * old_limit,struct talloc_memlimit * new_limit)2086 static inline size_t _talloc_total_mem_internal(const void *ptr,
2087 					 enum talloc_mem_count_type type,
2088 					 struct talloc_memlimit *old_limit,
2089 					 struct talloc_memlimit *new_limit)
2090 {
2091 	size_t total = 0;
2092 	struct talloc_chunk *c, *tc;
2093 
2094 	if (ptr == NULL) {
2095 		ptr = null_context;
2096 	}
2097 	if (ptr == NULL) {
2098 		return 0;
2099 	}
2100 
2101 	tc = talloc_chunk_from_ptr(ptr);
2102 
2103 	if (old_limit || new_limit) {
2104 		if (tc->limit && tc->limit->upper == old_limit) {
2105 			tc->limit->upper = new_limit;
2106 		}
2107 	}
2108 
2109 	/* optimize in the memlimits case */
2110 	if (type == TOTAL_MEM_LIMIT &&
2111 	    tc->limit != NULL &&
2112 	    tc->limit != old_limit &&
2113 	    tc->limit->parent == tc) {
2114 		return tc->limit->cur_size;
2115 	}
2116 
2117 	if (tc->flags & TALLOC_FLAG_LOOP) {
2118 		return 0;
2119 	}
2120 
2121 	tc->flags |= TALLOC_FLAG_LOOP;
2122 
2123 	if (old_limit || new_limit) {
2124 		if (old_limit == tc->limit) {
2125 			tc->limit = new_limit;
2126 		}
2127 	}
2128 
2129 	switch (type) {
2130 	case TOTAL_MEM_SIZE:
2131 		if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2132 			total = tc->size;
2133 		}
2134 		break;
2135 	case TOTAL_MEM_BLOCKS:
2136 		total++;
2137 		break;
2138 	case TOTAL_MEM_LIMIT:
2139 		if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2140 			/*
2141 			 * Don't count memory allocated from a pool
2142 			 * when calculating limits. Only count the
2143 			 * pool itself.
2144 			 */
2145 			if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2146 				if (tc->flags & TALLOC_FLAG_POOL) {
2147 					/*
2148 					 * If this is a pool, the allocated
2149 					 * size is in the pool header, and
2150 					 * remember to add in the prefix
2151 					 * length.
2152 					 */
2153 					struct talloc_pool_hdr *pool_hdr
2154 							= talloc_pool_from_chunk(tc);
2155 					total = pool_hdr->poolsize +
2156 							TC_HDR_SIZE +
2157 							TP_HDR_SIZE;
2158 				} else {
2159 					total = tc->size + TC_HDR_SIZE;
2160 				}
2161 			}
2162 		}
2163 		break;
2164 	}
2165 	for (c = tc->child; c; c = c->next) {
2166 		total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2167 						    old_limit, new_limit);
2168 	}
2169 
2170 	tc->flags &= ~TALLOC_FLAG_LOOP;
2171 
2172 	return total;
2173 }
2174 
2175 /*
2176   return the total size of a talloc pool (subtree)
2177 */
talloc_total_size(const void * ptr)2178 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2179 {
2180 	return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2181 }
2182 
2183 /*
2184   return the total number of blocks in a talloc pool (subtree)
2185 */
talloc_total_blocks(const void * ptr)2186 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2187 {
2188 	return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2189 }
2190 
2191 /*
2192   return the number of external references to a pointer
2193 */
talloc_reference_count(const void * ptr)2194 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2195 {
2196 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2197 	struct talloc_reference_handle *h;
2198 	size_t ret = 0;
2199 
2200 	for (h=tc->refs;h;h=h->next) {
2201 		ret++;
2202 	}
2203 	return ret;
2204 }
2205 
2206 /*
2207   report on memory usage by all children of a pointer, giving a full tree view
2208 */
talloc_report_depth_cb(const void * ptr,int depth,int max_depth,void (* callback)(const void * ptr,int depth,int max_depth,int is_ref,void * private_data),void * private_data)2209 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2210 			    void (*callback)(const void *ptr,
2211 			  		     int depth, int max_depth,
2212 					     int is_ref,
2213 					     void *private_data),
2214 			    void *private_data)
2215 {
2216 	struct talloc_chunk *c, *tc;
2217 
2218 	if (ptr == NULL) {
2219 		ptr = null_context;
2220 	}
2221 	if (ptr == NULL) return;
2222 
2223 	tc = talloc_chunk_from_ptr(ptr);
2224 
2225 	if (tc->flags & TALLOC_FLAG_LOOP) {
2226 		return;
2227 	}
2228 
2229 	callback(ptr, depth, max_depth, 0, private_data);
2230 
2231 	if (max_depth >= 0 && depth >= max_depth) {
2232 		return;
2233 	}
2234 
2235 	tc->flags |= TALLOC_FLAG_LOOP;
2236 	for (c=tc->child;c;c=c->next) {
2237 		if (c->name == TALLOC_MAGIC_REFERENCE) {
2238 			struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2239 			callback(h->ptr, depth + 1, max_depth, 1, private_data);
2240 		} else {
2241 			talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2242 		}
2243 	}
2244 	tc->flags &= ~TALLOC_FLAG_LOOP;
2245 }
2246 
talloc_report_depth_FILE_helper(const void * ptr,int depth,int max_depth,int is_ref,void * _f)2247 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2248 {
2249 	const char *name = __talloc_get_name(ptr);
2250 	struct talloc_chunk *tc;
2251 	FILE *f = (FILE *)_f;
2252 
2253 	if (is_ref) {
2254 		fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2255 		return;
2256 	}
2257 
2258 	tc = talloc_chunk_from_ptr(ptr);
2259 	if (tc->limit && tc->limit->parent == tc) {
2260 		fprintf(f, "%*s%-30s is a memlimit context"
2261 			" (max_size = %lu bytes, cur_size = %lu bytes)\n",
2262 			depth*4, "",
2263 			name,
2264 			(unsigned long)tc->limit->max_size,
2265 			(unsigned long)tc->limit->cur_size);
2266 	}
2267 
2268 	if (depth == 0) {
2269 		fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2270 			(max_depth < 0 ? "full " :""), name,
2271 			(unsigned long)talloc_total_size(ptr),
2272 			(unsigned long)talloc_total_blocks(ptr));
2273 		return;
2274 	}
2275 
2276 	fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2277 		depth*4, "",
2278 		name,
2279 		(unsigned long)talloc_total_size(ptr),
2280 		(unsigned long)talloc_total_blocks(ptr),
2281 		(int)talloc_reference_count(ptr), ptr);
2282 
2283 #if 0
2284 	fprintf(f, "content: ");
2285 	if (talloc_total_size(ptr)) {
2286 		int tot = talloc_total_size(ptr);
2287 		int i;
2288 
2289 		for (i = 0; i < tot; i++) {
2290 			if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2291 				fprintf(f, "%c", ((char *)ptr)[i]);
2292 			} else {
2293 				fprintf(f, "~%02x", ((char *)ptr)[i]);
2294 			}
2295 		}
2296 	}
2297 	fprintf(f, "\n");
2298 #endif
2299 }
2300 
2301 /*
2302   report on memory usage by all children of a pointer, giving a full tree view
2303 */
talloc_report_depth_file(const void * ptr,int depth,int max_depth,FILE * f)2304 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2305 {
2306 	if (f) {
2307 		talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2308 		fflush(f);
2309 	}
2310 }
2311 
2312 /*
2313   report on memory usage by all children of a pointer, giving a full tree view
2314 */
talloc_report_full(const void * ptr,FILE * f)2315 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2316 {
2317 	talloc_report_depth_file(ptr, 0, -1, f);
2318 }
2319 
2320 /*
2321   report on memory usage by all children of a pointer
2322 */
talloc_report(const void * ptr,FILE * f)2323 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2324 {
2325 	talloc_report_depth_file(ptr, 0, 1, f);
2326 }
2327 
2328 /*
2329   enable tracking of the NULL context
2330 */
talloc_enable_null_tracking(void)2331 _PUBLIC_ void talloc_enable_null_tracking(void)
2332 {
2333 	if (null_context == NULL) {
2334 		null_context = _talloc_named_const(NULL, 0, "null_context");
2335 		if (autofree_context != NULL) {
2336 			talloc_reparent(NULL, null_context, autofree_context);
2337 		}
2338 	}
2339 }
2340 
2341 /*
2342   enable tracking of the NULL context, not moving the autofree context
2343   into the NULL context. This is needed for the talloc testsuite
2344 */
talloc_enable_null_tracking_no_autofree(void)2345 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2346 {
2347 	if (null_context == NULL) {
2348 		null_context = _talloc_named_const(NULL, 0, "null_context");
2349 	}
2350 }
2351 
2352 /*
2353   disable tracking of the NULL context
2354 */
talloc_disable_null_tracking(void)2355 _PUBLIC_ void talloc_disable_null_tracking(void)
2356 {
2357 	if (null_context != NULL) {
2358 		/* we have to move any children onto the real NULL
2359 		   context */
2360 		struct talloc_chunk *tc, *tc2;
2361 		tc = talloc_chunk_from_ptr(null_context);
2362 		for (tc2 = tc->child; tc2; tc2=tc2->next) {
2363 			if (tc2->parent == tc) tc2->parent = NULL;
2364 			if (tc2->prev == tc) tc2->prev = NULL;
2365 		}
2366 		for (tc2 = tc->next; tc2; tc2=tc2->next) {
2367 			if (tc2->parent == tc) tc2->parent = NULL;
2368 			if (tc2->prev == tc) tc2->prev = NULL;
2369 		}
2370 		tc->child = NULL;
2371 		tc->next = NULL;
2372 	}
2373 	talloc_free(null_context);
2374 	null_context = NULL;
2375 }
2376 
2377 /*
2378   enable leak reporting on exit
2379 */
talloc_enable_leak_report(void)2380 _PUBLIC_ void talloc_enable_leak_report(void)
2381 {
2382 	talloc_enable_null_tracking();
2383 	talloc_report_null = true;
2384 	talloc_setup_atexit();
2385 }
2386 
2387 /*
2388   enable full leak reporting on exit
2389 */
talloc_enable_leak_report_full(void)2390 _PUBLIC_ void talloc_enable_leak_report_full(void)
2391 {
2392 	talloc_enable_null_tracking();
2393 	talloc_report_null_full = true;
2394 	talloc_setup_atexit();
2395 }
2396 
2397 /*
2398    talloc and zero memory.
2399 */
_talloc_zero(const void * ctx,size_t size,const char * name)2400 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2401 {
2402 	void *p = _talloc_named_const(ctx, size, name);
2403 
2404 	if (p) {
2405 		memset(p, '\0', size);
2406 	}
2407 
2408 	return p;
2409 }
2410 
2411 /*
2412   memdup with a talloc.
2413 */
_talloc_memdup(const void * t,const void * p,size_t size,const char * name)2414 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2415 {
2416 	void *newp = NULL;
2417 
2418 	if (likely(size > 0) && unlikely(p == NULL)) {
2419 		return NULL;
2420 	}
2421 
2422 	newp = _talloc_named_const(t, size, name);
2423 	if (likely(newp != NULL) && likely(size > 0)) {
2424 		memcpy(newp, p, size);
2425 	}
2426 
2427 	return newp;
2428 }
2429 
__talloc_strlendup(const void * t,const char * p,size_t len)2430 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2431 {
2432 	char *ret;
2433 	struct talloc_chunk *tc;
2434 
2435 	ret = (char *)__talloc(t, len + 1, &tc);
2436 	if (unlikely(!ret)) return NULL;
2437 
2438 	memcpy(ret, p, len);
2439 	ret[len] = 0;
2440 
2441 	_tc_set_name_const(tc, ret);
2442 	return ret;
2443 }
2444 
2445 /*
2446   strdup with a talloc
2447 */
talloc_strdup(const void * t,const char * p)2448 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2449 {
2450 	if (unlikely(!p)) return NULL;
2451 	return __talloc_strlendup(t, p, strlen(p));
2452 }
2453 
2454 /*
2455   strndup with a talloc
2456 */
talloc_strndup(const void * t,const char * p,size_t n)2457 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2458 {
2459 	if (unlikely(!p)) return NULL;
2460 	return __talloc_strlendup(t, p, strnlen(p, n));
2461 }
2462 
__talloc_strlendup_append(char * s,size_t slen,const char * a,size_t alen)2463 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2464 					      const char *a, size_t alen)
2465 {
2466 	char *ret;
2467 
2468 	ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2469 	if (unlikely(!ret)) return NULL;
2470 
2471 	/* append the string and the trailing \0 */
2472 	memcpy(&ret[slen], a, alen);
2473 	ret[slen+alen] = 0;
2474 
2475 	_tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2476 	return ret;
2477 }
2478 
2479 /*
2480  * Appends at the end of the string.
2481  */
talloc_strdup_append(char * s,const char * a)2482 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2483 {
2484 	if (unlikely(!s)) {
2485 		return talloc_strdup(NULL, a);
2486 	}
2487 
2488 	if (unlikely(!a)) {
2489 		return s;
2490 	}
2491 
2492 	return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2493 }
2494 
2495 /*
2496  * Appends at the end of the talloc'ed buffer,
2497  * not the end of the string.
2498  */
talloc_strdup_append_buffer(char * s,const char * a)2499 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2500 {
2501 	size_t slen;
2502 
2503 	if (unlikely(!s)) {
2504 		return talloc_strdup(NULL, a);
2505 	}
2506 
2507 	if (unlikely(!a)) {
2508 		return s;
2509 	}
2510 
2511 	slen = talloc_get_size(s);
2512 	if (likely(slen > 0)) {
2513 		slen--;
2514 	}
2515 
2516 	return __talloc_strlendup_append(s, slen, a, strlen(a));
2517 }
2518 
2519 /*
2520  * Appends at the end of the string.
2521  */
talloc_strndup_append(char * s,const char * a,size_t n)2522 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2523 {
2524 	if (unlikely(!s)) {
2525 		return talloc_strndup(NULL, a, n);
2526 	}
2527 
2528 	if (unlikely(!a)) {
2529 		return s;
2530 	}
2531 
2532 	return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2533 }
2534 
2535 /*
2536  * Appends at the end of the talloc'ed buffer,
2537  * not the end of the string.
2538  */
talloc_strndup_append_buffer(char * s,const char * a,size_t n)2539 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2540 {
2541 	size_t slen;
2542 
2543 	if (unlikely(!s)) {
2544 		return talloc_strndup(NULL, a, n);
2545 	}
2546 
2547 	if (unlikely(!a)) {
2548 		return s;
2549 	}
2550 
2551 	slen = talloc_get_size(s);
2552 	if (likely(slen > 0)) {
2553 		slen--;
2554 	}
2555 
2556 	return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2557 }
2558 
2559 #ifndef HAVE_VA_COPY
2560 #ifdef HAVE___VA_COPY
2561 #define va_copy(dest, src) __va_copy(dest, src)
2562 #else
2563 #define va_copy(dest, src) (dest) = (src)
2564 #endif
2565 #endif
2566 
2567 static struct talloc_chunk *_vasprintf_tc(const void *t,
2568 					  const char *fmt,
2569 					  va_list ap) PRINTF_ATTRIBUTE(2,0);
2570 
_vasprintf_tc(const void * t,const char * fmt,va_list ap)2571 static struct talloc_chunk *_vasprintf_tc(const void *t,
2572 					  const char *fmt,
2573 					  va_list ap)
2574 {
2575 	int vlen;
2576 	size_t len;
2577 	char *ret;
2578 	va_list ap2;
2579 	struct talloc_chunk *tc;
2580 	char buf[1024];
2581 
2582 	/* this call looks strange, but it makes it work on older solaris boxes */
2583 	va_copy(ap2, ap);
2584 	vlen = vsnprintf(buf, sizeof(buf), fmt, ap2);
2585 	va_end(ap2);
2586 	if (unlikely(vlen < 0)) {
2587 		return NULL;
2588 	}
2589 	len = vlen;
2590 	if (unlikely(len + 1 < len)) {
2591 		return NULL;
2592 	}
2593 
2594 	ret = (char *)__talloc(t, len+1, &tc);
2595 	if (unlikely(!ret)) return NULL;
2596 
2597 	if (len < sizeof(buf)) {
2598 		memcpy(ret, buf, len+1);
2599 	} else {
2600 		va_copy(ap2, ap);
2601 		vsnprintf(ret, len+1, fmt, ap2);
2602 		va_end(ap2);
2603 	}
2604 
2605 	_tc_set_name_const(tc, ret);
2606 	return tc;
2607 }
2608 
talloc_vasprintf(const void * t,const char * fmt,va_list ap)2609 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2610 {
2611 	struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2612 	if (tc == NULL) {
2613 		return NULL;
2614 	}
2615 	return TC_PTR_FROM_CHUNK(tc);
2616 }
2617 
2618 
2619 /*
2620   Perform string formatting, and return a pointer to newly allocated
2621   memory holding the result, inside a memory pool.
2622  */
talloc_asprintf(const void * t,const char * fmt,...)2623 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2624 {
2625 	va_list ap;
2626 	char *ret;
2627 
2628 	va_start(ap, fmt);
2629 	ret = talloc_vasprintf(t, fmt, ap);
2630 	va_end(ap);
2631 	return ret;
2632 }
2633 
2634 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2635 						 const char *fmt, va_list ap)
2636 						 PRINTF_ATTRIBUTE(3,0);
2637 
__talloc_vaslenprintf_append(char * s,size_t slen,const char * fmt,va_list ap)2638 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2639 						 const char *fmt, va_list ap)
2640 {
2641 	ssize_t alen;
2642 	va_list ap2;
2643 	char c;
2644 
2645 	va_copy(ap2, ap);
2646 	alen = vsnprintf(&c, 1, fmt, ap2);
2647 	va_end(ap2);
2648 
2649 	if (alen <= 0) {
2650 		/* Either the vsnprintf failed or the format resulted in
2651 		 * no characters being formatted. In the former case, we
2652 		 * ought to return NULL, in the latter we ought to return
2653 		 * the original string. Most current callers of this
2654 		 * function expect it to never return NULL.
2655 		 */
2656 		return s;
2657 	}
2658 
2659 	s = talloc_realloc(NULL, s, char, slen + alen + 1);
2660 	if (!s) return NULL;
2661 
2662 	va_copy(ap2, ap);
2663 	vsnprintf(s + slen, alen + 1, fmt, ap2);
2664 	va_end(ap2);
2665 
2666 	_tc_set_name_const(talloc_chunk_from_ptr(s), s);
2667 	return s;
2668 }
2669 
2670 /**
2671  * Realloc @p s to append the formatted result of @p fmt and @p ap,
2672  * and return @p s, which may have moved.  Good for gradually
2673  * accumulating output into a string buffer. Appends at the end
2674  * of the string.
2675  **/
talloc_vasprintf_append(char * s,const char * fmt,va_list ap)2676 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2677 {
2678 	if (unlikely(!s)) {
2679 		return talloc_vasprintf(NULL, fmt, ap);
2680 	}
2681 
2682 	return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2683 }
2684 
2685 /**
2686  * Realloc @p s to append the formatted result of @p fmt and @p ap,
2687  * and return @p s, which may have moved. Always appends at the
2688  * end of the talloc'ed buffer, not the end of the string.
2689  **/
talloc_vasprintf_append_buffer(char * s,const char * fmt,va_list ap)2690 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2691 {
2692 	size_t slen;
2693 
2694 	if (unlikely(!s)) {
2695 		return talloc_vasprintf(NULL, fmt, ap);
2696 	}
2697 
2698 	slen = talloc_get_size(s);
2699 	if (likely(slen > 0)) {
2700 		slen--;
2701 	}
2702 
2703 	return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2704 }
2705 
2706 /*
2707   Realloc @p s to append the formatted result of @p fmt and return @p
2708   s, which may have moved.  Good for gradually accumulating output
2709   into a string buffer.
2710  */
talloc_asprintf_append(char * s,const char * fmt,...)2711 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2712 {
2713 	va_list ap;
2714 
2715 	va_start(ap, fmt);
2716 	s = talloc_vasprintf_append(s, fmt, ap);
2717 	va_end(ap);
2718 	return s;
2719 }
2720 
2721 /*
2722   Realloc @p s to append the formatted result of @p fmt and return @p
2723   s, which may have moved.  Good for gradually accumulating output
2724   into a buffer.
2725  */
talloc_asprintf_append_buffer(char * s,const char * fmt,...)2726 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2727 {
2728 	va_list ap;
2729 
2730 	va_start(ap, fmt);
2731 	s = talloc_vasprintf_append_buffer(s, fmt, ap);
2732 	va_end(ap);
2733 	return s;
2734 }
2735 
2736 /*
2737   alloc an array, checking for integer overflow in the array size
2738 */
_talloc_array(const void * ctx,size_t el_size,unsigned count,const char * name)2739 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2740 {
2741 	if (count >= MAX_TALLOC_SIZE/el_size) {
2742 		return NULL;
2743 	}
2744 	return _talloc_named_const(ctx, el_size * count, name);
2745 }
2746 
2747 /*
2748   alloc an zero array, checking for integer overflow in the array size
2749 */
_talloc_zero_array(const void * ctx,size_t el_size,unsigned count,const char * name)2750 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2751 {
2752 	if (count >= MAX_TALLOC_SIZE/el_size) {
2753 		return NULL;
2754 	}
2755 	return _talloc_zero(ctx, el_size * count, name);
2756 }
2757 
2758 /*
2759   realloc an array, checking for integer overflow in the array size
2760 */
_talloc_realloc_array(const void * ctx,void * ptr,size_t el_size,unsigned count,const char * name)2761 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2762 {
2763 	if (count >= MAX_TALLOC_SIZE/el_size) {
2764 		return NULL;
2765 	}
2766 	return _talloc_realloc(ctx, ptr, el_size * count, name);
2767 }
2768 
2769 /*
2770   a function version of talloc_realloc(), so it can be passed as a function pointer
2771   to libraries that want a realloc function (a realloc function encapsulates
2772   all the basic capabilities of an allocation library, which is why this is useful)
2773 */
talloc_realloc_fn(const void * context,void * ptr,size_t size)2774 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2775 {
2776 	return _talloc_realloc(context, ptr, size, NULL);
2777 }
2778 
2779 
talloc_autofree_destructor(void * ptr)2780 static int talloc_autofree_destructor(void *ptr)
2781 {
2782 	autofree_context = NULL;
2783 	return 0;
2784 }
2785 
2786 /*
2787   return a context which will be auto-freed on exit
2788   this is useful for reducing the noise in leak reports
2789 */
talloc_autofree_context(void)2790 _PUBLIC_ void *talloc_autofree_context(void)
2791 {
2792 	if (autofree_context == NULL) {
2793 		autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2794 		talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2795 		talloc_setup_atexit();
2796 	}
2797 	return autofree_context;
2798 }
2799 
talloc_get_size(const void * context)2800 _PUBLIC_ size_t talloc_get_size(const void *context)
2801 {
2802 	struct talloc_chunk *tc;
2803 
2804 	if (context == NULL) {
2805 		return 0;
2806 	}
2807 
2808 	tc = talloc_chunk_from_ptr(context);
2809 
2810 	return tc->size;
2811 }
2812 
2813 /*
2814   find a parent of this context that has the given name, if any
2815 */
talloc_find_parent_byname(const void * context,const char * name)2816 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2817 {
2818 	struct talloc_chunk *tc;
2819 
2820 	if (context == NULL) {
2821 		return NULL;
2822 	}
2823 
2824 	tc = talloc_chunk_from_ptr(context);
2825 	while (tc) {
2826 		if (tc->name && strcmp(tc->name, name) == 0) {
2827 			return TC_PTR_FROM_CHUNK(tc);
2828 		}
2829 		while (tc && tc->prev) tc = tc->prev;
2830 		if (tc) {
2831 			tc = tc->parent;
2832 		}
2833 	}
2834 	return NULL;
2835 }
2836 
2837 /*
2838   show the parentage of a context
2839 */
talloc_show_parents(const void * context,FILE * file)2840 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2841 {
2842 	struct talloc_chunk *tc;
2843 
2844 	if (context == NULL) {
2845 		fprintf(file, "talloc no parents for NULL\n");
2846 		return;
2847 	}
2848 
2849 	tc = talloc_chunk_from_ptr(context);
2850 	fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2851 	while (tc) {
2852 		fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2853 		while (tc && tc->prev) tc = tc->prev;
2854 		if (tc) {
2855 			tc = tc->parent;
2856 		}
2857 	}
2858 	fflush(file);
2859 }
2860 
2861 /*
2862   return 1 if ptr is a parent of context
2863 */
_talloc_is_parent(const void * context,const void * ptr,int depth)2864 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2865 {
2866 	struct talloc_chunk *tc;
2867 
2868 	if (context == NULL) {
2869 		return 0;
2870 	}
2871 
2872 	tc = talloc_chunk_from_ptr(context);
2873 	while (tc) {
2874 		if (depth <= 0) {
2875 			return 0;
2876 		}
2877 		if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2878 		while (tc && tc->prev) tc = tc->prev;
2879 		if (tc) {
2880 			tc = tc->parent;
2881 			depth--;
2882 		}
2883 	}
2884 	return 0;
2885 }
2886 
2887 /*
2888   return 1 if ptr is a parent of context
2889 */
talloc_is_parent(const void * context,const void * ptr)2890 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2891 {
2892 	return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2893 }
2894 
2895 /*
2896   return the total size of memory used by this context and all children
2897 */
_talloc_total_limit_size(const void * ptr,struct talloc_memlimit * old_limit,struct talloc_memlimit * new_limit)2898 static inline size_t _talloc_total_limit_size(const void *ptr,
2899 					struct talloc_memlimit *old_limit,
2900 					struct talloc_memlimit *new_limit)
2901 {
2902 	return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2903 					  old_limit, new_limit);
2904 }
2905 
talloc_memlimit_check(struct talloc_memlimit * limit,size_t size)2906 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2907 {
2908 	struct talloc_memlimit *l;
2909 
2910 	for (l = limit; l != NULL; l = l->upper) {
2911 		if (l->max_size != 0 &&
2912 		    ((l->max_size <= l->cur_size) ||
2913 		     (l->max_size - l->cur_size < size))) {
2914 			return false;
2915 		}
2916 	}
2917 
2918 	return true;
2919 }
2920 
2921 /*
2922   Update memory limits when freeing a talloc_chunk.
2923 */
tc_memlimit_update_on_free(struct talloc_chunk * tc)2924 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2925 {
2926 	size_t limit_shrink_size;
2927 
2928 	if (!tc->limit) {
2929 		return;
2930 	}
2931 
2932 	/*
2933 	 * Pool entries don't count. Only the pools
2934 	 * themselves are counted as part of the memory
2935 	 * limits. Note that this also takes care of
2936 	 * nested pools which have both flags
2937 	 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2938 	 */
2939 	if (tc->flags & TALLOC_FLAG_POOLMEM) {
2940 		return;
2941 	}
2942 
2943 	/*
2944 	 * If we are part of a memory limited context hierarchy
2945 	 * we need to subtract the memory used from the counters
2946 	 */
2947 
2948 	limit_shrink_size = tc->size+TC_HDR_SIZE;
2949 
2950 	/*
2951 	 * If we're deallocating a pool, take into
2952 	 * account the prefix size added for the pool.
2953 	 */
2954 
2955 	if (tc->flags & TALLOC_FLAG_POOL) {
2956 		limit_shrink_size += TP_HDR_SIZE;
2957 	}
2958 
2959 	talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2960 
2961 	if (tc->limit->parent == tc) {
2962 		free(tc->limit);
2963 	}
2964 
2965 	tc->limit = NULL;
2966 }
2967 
2968 /*
2969   Increase memory limit accounting after a malloc/realloc.
2970 */
talloc_memlimit_grow(struct talloc_memlimit * limit,size_t size)2971 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2972 				size_t size)
2973 {
2974 	struct talloc_memlimit *l;
2975 
2976 	for (l = limit; l != NULL; l = l->upper) {
2977 		size_t new_cur_size = l->cur_size + size;
2978 		if (new_cur_size < l->cur_size) {
2979 			talloc_abort("logic error in talloc_memlimit_grow\n");
2980 			return;
2981 		}
2982 		l->cur_size = new_cur_size;
2983 	}
2984 }
2985 
2986 /*
2987   Decrease memory limit accounting after a free/realloc.
2988 */
talloc_memlimit_shrink(struct talloc_memlimit * limit,size_t size)2989 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2990 				size_t size)
2991 {
2992 	struct talloc_memlimit *l;
2993 
2994 	for (l = limit; l != NULL; l = l->upper) {
2995 		if (l->cur_size < size) {
2996 			talloc_abort("logic error in talloc_memlimit_shrink\n");
2997 			return;
2998 		}
2999 		l->cur_size = l->cur_size - size;
3000 	}
3001 }
3002 
talloc_set_memlimit(const void * ctx,size_t max_size)3003 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
3004 {
3005 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
3006 	struct talloc_memlimit *orig_limit;
3007 	struct talloc_memlimit *limit = NULL;
3008 
3009 	if (tc->limit && tc->limit->parent == tc) {
3010 		tc->limit->max_size = max_size;
3011 		return 0;
3012 	}
3013 	orig_limit = tc->limit;
3014 
3015 	limit = malloc(sizeof(struct talloc_memlimit));
3016 	if (limit == NULL) {
3017 		return 1;
3018 	}
3019 	limit->parent = tc;
3020 	limit->max_size = max_size;
3021 	limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
3022 
3023 	if (orig_limit) {
3024 		limit->upper = orig_limit;
3025 	} else {
3026 		limit->upper = NULL;
3027 	}
3028 
3029 	return 0;
3030 }
3031