1 /*
2    Samba Unix SMB/CIFS implementation.
3 
4    Samba trivial allocation library - new interface
5 
6    NOTE: Please read talloc_guide.txt for full documentation
7 
8    Copyright (C) Andrew Tridgell 2004
9    Copyright (C) Stefan Metzmacher 2006
10 
11      ** NOTE! The following LGPL license applies to the talloc
12      ** library. This does NOT imply that all of Samba is released
13      ** under the LGPL
14 
15    This library is free software; you can redistribute it and/or
16    modify it under the terms of the GNU Lesser General Public
17    License as published by the Free Software Foundation; either
18    version 3 of the License, or (at your option) any later version.
19 
20    This library is distributed in the hope that it will be useful,
21    but WITHOUT ANY WARRANTY; without even the implied warranty of
22    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23    Lesser General Public License for more details.
24 
25    You should have received a copy of the GNU Lesser General Public
26    License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 */
28 
29 /*
30   inspired by http://swapped.cc/halloc/
31 */
32 
33 #include "replace.h"
34 #include "talloc.h"
35 
36 #ifdef HAVE_SYS_AUXV_H
37 #include <sys/auxv.h>
38 #endif
39 
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
42 #endif
43 
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
46 #endif
47 
48 /* Special macros that are no-ops except when run under Valgrind on
49  * x86.  They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51         /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
54 #include <valgrind.h>
55 #endif
56 
57 /* use this to force every realloc to change the pointer, to stress test
58    code that might not cope */
59 #define ALWAYS_REALLOC 0
60 
61 
62 #define MAX_TALLOC_SIZE 0x10000000
63 
64 #define TALLOC_FLAG_FREE 0x01
65 #define TALLOC_FLAG_LOOP 0x02
66 #define TALLOC_FLAG_POOL 0x04		/* This is a talloc pool */
67 #define TALLOC_FLAG_POOLMEM 0x08	/* This is allocated in a pool */
68 
69 /*
70  * Bits above this are random, used to make it harder to fake talloc
71  * headers during an attack.  Try not to change this without good reason.
72  */
73 #define TALLOC_FLAG_MASK 0x0F
74 
75 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
76 
77 #define TALLOC_MAGIC_BASE 0xe814ec70
78 #define TALLOC_MAGIC_NON_RANDOM ( \
79 	~TALLOC_FLAG_MASK & ( \
80 		TALLOC_MAGIC_BASE + \
81 		(TALLOC_BUILD_VERSION_MAJOR << 24) + \
82 		(TALLOC_BUILD_VERSION_MINOR << 16) + \
83 		(TALLOC_BUILD_VERSION_RELEASE << 8)))
84 static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM;
85 
86 /* by default we abort when given a bad pointer (such as when talloc_free() is called
87    on a pointer that came from malloc() */
88 #ifndef TALLOC_ABORT
89 #define TALLOC_ABORT(reason) abort()
90 #endif
91 
92 #ifndef discard_const_p
93 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
94 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
95 #else
96 # define discard_const_p(type, ptr) ((type *)(ptr))
97 #endif
98 #endif
99 
100 /* these macros gain us a few percent of speed on gcc */
101 #if (__GNUC__ >= 3)
102 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
103    as its first argument */
104 #ifndef likely
105 #define likely(x)   __builtin_expect(!!(x), 1)
106 #endif
107 #ifndef unlikely
108 #define unlikely(x) __builtin_expect(!!(x), 0)
109 #endif
110 #else
111 #ifndef likely
112 #define likely(x) (x)
113 #endif
114 #ifndef unlikely
115 #define unlikely(x) (x)
116 #endif
117 #endif
118 
119 /* this null_context is only used if talloc_enable_leak_report() or
120    talloc_enable_leak_report_full() is called, otherwise it remains
121    NULL
122 */
123 static void *null_context;
124 static bool talloc_report_null;
125 static bool talloc_report_null_full;
126 static void *autofree_context;
127 
128 static void talloc_setup_atexit(void);
129 
130 /* used to enable fill of memory on free, which can be useful for
131  * catching use after free errors when valgrind is too slow
132  */
133 static struct {
134 	bool initialised;
135 	bool enabled;
136 	uint8_t fill_value;
137 } talloc_fill;
138 
139 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
140 
141 /*
142  * do not wipe the header, to allow the
143  * double-free logic to still work
144  */
145 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
146 	if (unlikely(talloc_fill.enabled)) { \
147 		size_t _flen = (_tc)->size; \
148 		char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
149 		memset(_fptr, talloc_fill.fill_value, _flen); \
150 	} \
151 } while (0)
152 
153 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
154 /* Mark the whole chunk as not accessable */
155 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
156 	size_t _flen = TC_HDR_SIZE + (_tc)->size; \
157 	char *_fptr = (char *)(_tc); \
158 	VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
159 } while(0)
160 #else
161 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
162 #endif
163 
164 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
165 	TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
166 	TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
167 } while (0)
168 
169 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
170 	if (unlikely(talloc_fill.enabled)) { \
171 		size_t _flen = (_tc)->size - (_new_size); \
172 		char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
173 		_fptr += (_new_size); \
174 		memset(_fptr, talloc_fill.fill_value, _flen); \
175 	} \
176 } while (0)
177 
178 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
179 /* Mark the unused bytes not accessable */
180 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
181 	size_t _flen = (_tc)->size - (_new_size); \
182 	char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
183 	_fptr += (_new_size); \
184 	VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
185 } while (0)
186 #else
187 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
188 #endif
189 
190 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
191 	TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
192 	TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
193 } while (0)
194 
195 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
196 	if (unlikely(talloc_fill.enabled)) { \
197 		size_t _flen = (_tc)->size - (_new_size); \
198 		char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
199 		_fptr += (_new_size); \
200 		memset(_fptr, talloc_fill.fill_value, _flen); \
201 	} \
202 } while (0)
203 
204 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
205 /* Mark the unused bytes as undefined */
206 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
207 	size_t _flen = (_tc)->size - (_new_size); \
208 	char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
209 	_fptr += (_new_size); \
210 	VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
211 } while (0)
212 #else
213 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
214 #endif
215 
216 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
217 	TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
218 	TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
219 } while (0)
220 
221 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
222 /* Mark the new bytes as undefined */
223 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
224 	size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
225 	size_t _new_used = TC_HDR_SIZE + (_new_size); \
226 	size_t _flen = _new_used - _old_used; \
227 	char *_fptr = _old_used + (char *)(_tc); \
228 	VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
229 } while (0)
230 #else
231 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
232 #endif
233 
234 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
235 	TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
236 } while (0)
237 
238 struct talloc_reference_handle {
239 	struct talloc_reference_handle *next, *prev;
240 	void *ptr;
241 	const char *location;
242 };
243 
244 struct talloc_memlimit {
245 	struct talloc_chunk *parent;
246 	struct talloc_memlimit *upper;
247 	size_t max_size;
248 	size_t cur_size;
249 };
250 
251 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
252 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
253 				size_t size);
254 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
255 				size_t size);
256 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
257 
258 static inline void _tc_set_name_const(struct talloc_chunk *tc,
259 				const char *name);
260 static struct talloc_chunk *_vasprintf_tc(const void *t,
261 				const char *fmt,
262 				va_list ap);
263 
264 typedef int (*talloc_destructor_t)(void *);
265 
266 struct talloc_pool_hdr;
267 
268 struct talloc_chunk {
269 	/*
270 	 * flags includes the talloc magic, which is randomised to
271 	 * make overwrite attacks harder
272 	 */
273 	unsigned flags;
274 
275 	/*
276 	 * If you have a logical tree like:
277 	 *
278 	 *           <parent>
279 	 *           /   |   \
280 	 *          /    |    \
281 	 *         /     |     \
282 	 * <child 1> <child 2> <child 3>
283 	 *
284 	 * The actual talloc tree is:
285 	 *
286 	 *  <parent>
287 	 *     |
288 	 *  <child 1> - <child 2> - <child 3>
289 	 *
290 	 * The children are linked with next/prev pointers, and
291 	 * child 1 is linked to the parent with parent/child
292 	 * pointers.
293 	 */
294 
295 	struct talloc_chunk *next, *prev;
296 	struct talloc_chunk *parent, *child;
297 	struct talloc_reference_handle *refs;
298 	talloc_destructor_t destructor;
299 	const char *name;
300 	size_t size;
301 
302 	/*
303 	 * limit semantics:
304 	 * if 'limit' is set it means all *new* children of the context will
305 	 * be limited to a total aggregate size ox max_size for memory
306 	 * allocations.
307 	 * cur_size is used to keep track of the current use
308 	 */
309 	struct talloc_memlimit *limit;
310 
311 	/*
312 	 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
313 	 * is a pointer to the struct talloc_chunk of the pool that it was
314 	 * allocated from. This way children can quickly find the pool to chew
315 	 * from.
316 	 */
317 	struct talloc_pool_hdr *pool;
318 };
319 
320 union talloc_chunk_cast_u {
321 	uint8_t *ptr;
322 	struct talloc_chunk *chunk;
323 };
324 
325 /* 16 byte alignment seems to keep everyone happy */
326 #define TC_ALIGN16(s) (((s)+15)&~15)
327 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
328 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
329 
talloc_version_major(void)330 _PUBLIC_ int talloc_version_major(void)
331 {
332 	return TALLOC_VERSION_MAJOR;
333 }
334 
talloc_version_minor(void)335 _PUBLIC_ int talloc_version_minor(void)
336 {
337 	return TALLOC_VERSION_MINOR;
338 }
339 
talloc_test_get_magic(void)340 _PUBLIC_ int talloc_test_get_magic(void)
341 {
342 	return talloc_magic;
343 }
344 
_talloc_chunk_set_free(struct talloc_chunk * tc,const char * location)345 static inline void _talloc_chunk_set_free(struct talloc_chunk *tc,
346 			      const char *location)
347 {
348 	/*
349 	 * Mark this memory as free, and also over-stamp the talloc
350 	 * magic with the old-style magic.
351 	 *
352 	 * Why?  This tries to avoid a memory read use-after-free from
353 	 * disclosing our talloc magic, which would then allow an
354 	 * attacker to prepare a valid header and so run a destructor.
355 	 *
356 	 */
357 	tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE
358 		| (tc->flags & TALLOC_FLAG_MASK);
359 
360 	/* we mark the freed memory with where we called the free
361 	 * from. This means on a double free error we can report where
362 	 * the first free came from
363 	 */
364 	if (location) {
365 		tc->name = location;
366 	}
367 }
368 
_talloc_chunk_set_not_free(struct talloc_chunk * tc)369 static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc)
370 {
371 	/*
372 	 * Mark this memory as not free.
373 	 *
374 	 * Why? This is memory either in a pool (and so available for
375 	 * talloc's re-use or after the realloc().  We need to mark
376 	 * the memory as free() before any realloc() call as we can't
377 	 * write to the memory after that.
378 	 *
379 	 * We put back the normal magic instead of the 'not random'
380 	 * magic.
381 	 */
382 
383 	tc->flags = talloc_magic |
384 		((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE);
385 }
386 
387 static void (*talloc_log_fn)(const char *message);
388 
talloc_set_log_fn(void (* log_fn)(const char * message))389 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
390 {
391 	talloc_log_fn = log_fn;
392 }
393 
394 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
395 void talloc_lib_init(void) __attribute__((constructor));
talloc_lib_init(void)396 void talloc_lib_init(void)
397 {
398 	uint32_t random_value;
399 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
400 	uint8_t *p;
401 	/*
402 	 * Use the kernel-provided random values used for
403 	 * ASLR.  This won't change per-exec, which is ideal for us
404 	 */
405 	p = (uint8_t *) getauxval(AT_RANDOM);
406 	if (p) {
407 		/*
408 		 * We get 16 bytes from getauxval.  By calling rand(),
409 		 * a totally insecure PRNG, but one that will
410 		 * deterministically have a different value when called
411 		 * twice, we ensure that if two talloc-like libraries
412 		 * are somehow loaded in the same address space, that
413 		 * because we choose different bytes, we will keep the
414 		 * protection against collision of multiple talloc
415 		 * libs.
416 		 *
417 		 * This protection is important because the effects of
418 		 * passing a talloc pointer from one to the other may
419 		 * be very hard to determine.
420 		 */
421 		int offset = rand() % (16 - sizeof(random_value));
422 		memcpy(&random_value, p + offset, sizeof(random_value));
423 	} else
424 #endif
425 	{
426 		/*
427 		 * Otherwise, hope the location we are loaded in
428 		 * memory is randomised by someone else
429 		 */
430 		random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
431 	}
432 	talloc_magic = random_value & ~TALLOC_FLAG_MASK;
433 }
434 #else
435 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
436 #endif
437 
talloc_lib_atexit(void)438 static void talloc_lib_atexit(void)
439 {
440 	TALLOC_FREE(autofree_context);
441 
442 	if (talloc_total_size(null_context) == 0) {
443 		return;
444 	}
445 
446 	if (talloc_report_null_full) {
447 		talloc_report_full(null_context, stderr);
448 	} else if (talloc_report_null) {
449 		talloc_report(null_context, stderr);
450 	}
451 }
452 
talloc_setup_atexit(void)453 static void talloc_setup_atexit(void)
454 {
455 	static bool done;
456 
457 	if (done) {
458 		return;
459 	}
460 
461 	atexit(talloc_lib_atexit);
462 	done = true;
463 }
464 
465 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
talloc_log(const char * fmt,...)466 static void talloc_log(const char *fmt, ...)
467 {
468 	va_list ap;
469 	char *message;
470 
471 	if (!talloc_log_fn) {
472 		return;
473 	}
474 
475 	va_start(ap, fmt);
476 	message = talloc_vasprintf(NULL, fmt, ap);
477 	va_end(ap);
478 
479 	talloc_log_fn(message);
480 	talloc_free(message);
481 }
482 
talloc_log_stderr(const char * message)483 static void talloc_log_stderr(const char *message)
484 {
485 	fprintf(stderr, "%s", message);
486 }
487 
talloc_set_log_stderr(void)488 _PUBLIC_ void talloc_set_log_stderr(void)
489 {
490 	talloc_set_log_fn(talloc_log_stderr);
491 }
492 
493 static void (*talloc_abort_fn)(const char *reason);
494 
talloc_set_abort_fn(void (* abort_fn)(const char * reason))495 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
496 {
497 	talloc_abort_fn = abort_fn;
498 }
499 
talloc_abort(const char * reason)500 static void talloc_abort(const char *reason)
501 {
502 	talloc_log("%s\n", reason);
503 
504 	if (!talloc_abort_fn) {
505 		TALLOC_ABORT(reason);
506 	}
507 
508 	talloc_abort_fn(reason);
509 }
510 
talloc_abort_access_after_free(void)511 static void talloc_abort_access_after_free(void)
512 {
513 	talloc_abort("Bad talloc magic value - access after free");
514 }
515 
talloc_abort_unknown_value(void)516 static void talloc_abort_unknown_value(void)
517 {
518 	talloc_abort("Bad talloc magic value - unknown value");
519 }
520 
521 /* panic if we get a bad magic value */
talloc_chunk_from_ptr(const void * ptr)522 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
523 {
524 	const char *pp = (const char *)ptr;
525 	struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
526 	if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
527 		if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK))
528 		    == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) {
529 			talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
530 			talloc_abort_access_after_free();
531 			return NULL;
532 		}
533 
534 		talloc_abort_unknown_value();
535 		return NULL;
536 	}
537 	return tc;
538 }
539 
540 /* hook into the front of the list */
541 #define _TLIST_ADD(list, p) \
542 do { \
543         if (!(list)) { \
544 		(list) = (p); \
545 		(p)->next = (p)->prev = NULL; \
546 	} else { \
547 		(list)->prev = (p); \
548 		(p)->next = (list); \
549 		(p)->prev = NULL; \
550 		(list) = (p); \
551 	}\
552 } while (0)
553 
554 /* remove an element from a list - element doesn't have to be in list. */
555 #define _TLIST_REMOVE(list, p) \
556 do { \
557 	if ((p) == (list)) { \
558 		(list) = (p)->next; \
559 		if (list) (list)->prev = NULL; \
560 	} else { \
561 		if ((p)->prev) (p)->prev->next = (p)->next; \
562 		if ((p)->next) (p)->next->prev = (p)->prev; \
563 	} \
564 	if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
565 } while (0)
566 
567 
568 /*
569   return the parent chunk of a pointer
570 */
talloc_parent_chunk(const void * ptr)571 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
572 {
573 	struct talloc_chunk *tc;
574 
575 	if (unlikely(ptr == NULL)) {
576 		return NULL;
577 	}
578 
579 	tc = talloc_chunk_from_ptr(ptr);
580 	while (tc->prev) tc=tc->prev;
581 
582 	return tc->parent;
583 }
584 
talloc_parent(const void * ptr)585 _PUBLIC_ void *talloc_parent(const void *ptr)
586 {
587 	struct talloc_chunk *tc = talloc_parent_chunk(ptr);
588 	return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
589 }
590 
591 /*
592   find parents name
593 */
talloc_parent_name(const void * ptr)594 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
595 {
596 	struct talloc_chunk *tc = talloc_parent_chunk(ptr);
597 	return tc? tc->name : NULL;
598 }
599 
600 /*
601   A pool carries an in-pool object count count in the first 16 bytes.
602   bytes. This is done to support talloc_steal() to a parent outside of the
603   pool. The count includes the pool itself, so a talloc_free() on a pool will
604   only destroy the pool if the count has dropped to zero. A talloc_free() of a
605   pool member will reduce the count, and eventually also call free(3) on the
606   pool memory.
607 
608   The object count is not put into "struct talloc_chunk" because it is only
609   relevant for talloc pools and the alignment to 16 bytes would increase the
610   memory footprint of each talloc chunk by those 16 bytes.
611 */
612 
613 struct talloc_pool_hdr {
614 	void *end;
615 	unsigned int object_count;
616 	size_t poolsize;
617 };
618 
619 union talloc_pool_hdr_cast_u {
620 	uint8_t *ptr;
621 	struct talloc_pool_hdr *hdr;
622 };
623 
624 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
625 
talloc_pool_from_chunk(struct talloc_chunk * c)626 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
627 {
628 	union talloc_chunk_cast_u tcc = { .chunk = c };
629 	union talloc_pool_hdr_cast_u tphc = { tcc.ptr - TP_HDR_SIZE };
630 	return tphc.hdr;
631 }
632 
talloc_chunk_from_pool(struct talloc_pool_hdr * h)633 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
634 {
635 	union talloc_pool_hdr_cast_u tphc = { .hdr = h };
636 	union talloc_chunk_cast_u tcc = { .ptr = tphc.ptr + TP_HDR_SIZE };
637 	return tcc.chunk;
638 }
639 
tc_pool_end(struct talloc_pool_hdr * pool_hdr)640 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
641 {
642 	struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
643 	return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
644 }
645 
tc_pool_space_left(struct talloc_pool_hdr * pool_hdr)646 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
647 {
648 	return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
649 }
650 
651 /* If tc is inside a pool, this gives the next neighbour. */
tc_next_chunk(struct talloc_chunk * tc)652 static inline void *tc_next_chunk(struct talloc_chunk *tc)
653 {
654 	return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
655 }
656 
tc_pool_first_chunk(struct talloc_pool_hdr * pool_hdr)657 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
658 {
659 	struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
660 	return tc_next_chunk(tc);
661 }
662 
663 /* Mark the whole remaining pool as not accessable */
tc_invalidate_pool(struct talloc_pool_hdr * pool_hdr)664 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
665 {
666 	size_t flen = tc_pool_space_left(pool_hdr);
667 
668 	if (unlikely(talloc_fill.enabled)) {
669 		memset(pool_hdr->end, talloc_fill.fill_value, flen);
670 	}
671 
672 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
673 	VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
674 #endif
675 }
676 
677 /*
678   Allocate from a pool
679 */
680 
tc_alloc_pool(struct talloc_chunk * parent,size_t size,size_t prefix_len)681 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
682 						     size_t size, size_t prefix_len)
683 {
684 	struct talloc_pool_hdr *pool_hdr = NULL;
685 	union talloc_chunk_cast_u tcc;
686 	size_t space_left;
687 	struct talloc_chunk *result;
688 	size_t chunk_size;
689 
690 	if (parent == NULL) {
691 		return NULL;
692 	}
693 
694 	if (parent->flags & TALLOC_FLAG_POOL) {
695 		pool_hdr = talloc_pool_from_chunk(parent);
696 	}
697 	else if (parent->flags & TALLOC_FLAG_POOLMEM) {
698 		pool_hdr = parent->pool;
699 	}
700 
701 	if (pool_hdr == NULL) {
702 		return NULL;
703 	}
704 
705 	space_left = tc_pool_space_left(pool_hdr);
706 
707 	/*
708 	 * Align size to 16 bytes
709 	 */
710 	chunk_size = TC_ALIGN16(size + prefix_len);
711 
712 	if (space_left < chunk_size) {
713 		return NULL;
714 	}
715 
716 	tcc = (union talloc_chunk_cast_u) {
717 		.ptr = ((uint8_t *)pool_hdr->end) + prefix_len
718 	};
719 	result = tcc.chunk;
720 
721 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
722 	VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
723 #endif
724 
725 	pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
726 
727 	result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
728 	result->pool = pool_hdr;
729 
730 	pool_hdr->object_count++;
731 
732 	return result;
733 }
734 
735 /*
736    Allocate a bit of memory as a child of an existing pointer
737 */
__talloc_with_prefix(const void * context,size_t size,size_t prefix_len,struct talloc_chunk ** tc_ret)738 static inline void *__talloc_with_prefix(const void *context,
739 					size_t size,
740 					size_t prefix_len,
741 					struct talloc_chunk **tc_ret)
742 {
743 	struct talloc_chunk *tc = NULL;
744 	struct talloc_memlimit *limit = NULL;
745 	size_t total_len = TC_HDR_SIZE + size + prefix_len;
746 	struct talloc_chunk *parent = NULL;
747 
748 	if (unlikely(context == NULL)) {
749 		context = null_context;
750 	}
751 
752 	if (unlikely(size >= MAX_TALLOC_SIZE)) {
753 		return NULL;
754 	}
755 
756 	if (unlikely(total_len < TC_HDR_SIZE)) {
757 		return NULL;
758 	}
759 
760 	if (likely(context != NULL)) {
761 		parent = talloc_chunk_from_ptr(context);
762 
763 		if (parent->limit != NULL) {
764 			limit = parent->limit;
765 		}
766 
767 		tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
768 	}
769 
770 	if (tc == NULL) {
771 		uint8_t *ptr = NULL;
772 		union talloc_chunk_cast_u tcc;
773 
774 		/*
775 		 * Only do the memlimit check/update on actual allocation.
776 		 */
777 		if (!talloc_memlimit_check(limit, total_len)) {
778 			errno = ENOMEM;
779 			return NULL;
780 		}
781 
782 		ptr = malloc(total_len);
783 		if (unlikely(ptr == NULL)) {
784 			return NULL;
785 		}
786 		tcc = (union talloc_chunk_cast_u) { .ptr = ptr + prefix_len };
787 		tc = tcc.chunk;
788 		tc->flags = talloc_magic;
789 		tc->pool  = NULL;
790 
791 		talloc_memlimit_grow(limit, total_len);
792 	}
793 
794 	tc->limit = limit;
795 	tc->size = size;
796 	tc->destructor = NULL;
797 	tc->child = NULL;
798 	tc->name = NULL;
799 	tc->refs = NULL;
800 
801 	if (likely(context != NULL)) {
802 		if (parent->child) {
803 			parent->child->parent = NULL;
804 			tc->next = parent->child;
805 			tc->next->prev = tc;
806 		} else {
807 			tc->next = NULL;
808 		}
809 		tc->parent = parent;
810 		tc->prev = NULL;
811 		parent->child = tc;
812 	} else {
813 		tc->next = tc->prev = tc->parent = NULL;
814 	}
815 
816 	*tc_ret = tc;
817 	return TC_PTR_FROM_CHUNK(tc);
818 }
819 
__talloc(const void * context,size_t size,struct talloc_chunk ** tc)820 static inline void *__talloc(const void *context,
821 			size_t size,
822 			struct talloc_chunk **tc)
823 {
824 	return __talloc_with_prefix(context, size, 0, tc);
825 }
826 
827 /*
828  * Create a talloc pool
829  */
830 
_talloc_pool(const void * context,size_t size)831 static inline void *_talloc_pool(const void *context, size_t size)
832 {
833 	struct talloc_chunk *tc;
834 	struct talloc_pool_hdr *pool_hdr;
835 	void *result;
836 
837 	result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
838 
839 	if (unlikely(result == NULL)) {
840 		return NULL;
841 	}
842 
843 	pool_hdr = talloc_pool_from_chunk(tc);
844 
845 	tc->flags |= TALLOC_FLAG_POOL;
846 	tc->size = 0;
847 
848 	pool_hdr->object_count = 1;
849 	pool_hdr->end = result;
850 	pool_hdr->poolsize = size;
851 
852 	tc_invalidate_pool(pool_hdr);
853 
854 	return result;
855 }
856 
talloc_pool(const void * context,size_t size)857 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
858 {
859 	return _talloc_pool(context, size);
860 }
861 
862 /*
863  * Create a talloc pool correctly sized for a basic size plus
864  * a number of subobjects whose total size is given. Essentially
865  * a custom allocator for talloc to reduce fragmentation.
866  */
867 
_talloc_pooled_object(const void * ctx,size_t type_size,const char * type_name,unsigned num_subobjects,size_t total_subobjects_size)868 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
869 				     size_t type_size,
870 				     const char *type_name,
871 				     unsigned num_subobjects,
872 				     size_t total_subobjects_size)
873 {
874 	size_t poolsize, subobjects_slack, tmp;
875 	struct talloc_chunk *tc;
876 	struct talloc_pool_hdr *pool_hdr;
877 	void *ret;
878 
879 	poolsize = type_size + total_subobjects_size;
880 
881 	if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
882 		goto overflow;
883 	}
884 
885 	if (num_subobjects == UINT_MAX) {
886 		goto overflow;
887 	}
888 	num_subobjects += 1;       /* the object body itself */
889 
890 	/*
891 	 * Alignment can increase the pool size by at most 15 bytes per object
892 	 * plus alignment for the object itself
893 	 */
894 	subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
895 	if (subobjects_slack < num_subobjects) {
896 		goto overflow;
897 	}
898 
899 	tmp = poolsize + subobjects_slack;
900 	if ((tmp < poolsize) || (tmp < subobjects_slack)) {
901 		goto overflow;
902 	}
903 	poolsize = tmp;
904 
905 	ret = _talloc_pool(ctx, poolsize);
906 	if (ret == NULL) {
907 		return NULL;
908 	}
909 
910 	tc = talloc_chunk_from_ptr(ret);
911 	tc->size = type_size;
912 
913 	pool_hdr = talloc_pool_from_chunk(tc);
914 
915 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
916 	VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
917 #endif
918 
919 	pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
920 
921 	_tc_set_name_const(tc, type_name);
922 	return ret;
923 
924 overflow:
925 	return NULL;
926 }
927 
928 /*
929   setup a destructor to be called on free of a pointer
930   the destructor should return 0 on success, or -1 on failure.
931   if the destructor fails then the free is failed, and the memory can
932   be continued to be used
933 */
_talloc_set_destructor(const void * ptr,int (* destructor)(void *))934 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
935 {
936 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
937 	tc->destructor = destructor;
938 }
939 
940 /*
941   increase the reference count on a piece of memory.
942 */
talloc_increase_ref_count(const void * ptr)943 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
944 {
945 	if (unlikely(!talloc_reference(null_context, ptr))) {
946 		return -1;
947 	}
948 	return 0;
949 }
950 
951 /*
952   helper for talloc_reference()
953 
954   this is referenced by a function pointer and should not be inline
955 */
talloc_reference_destructor(struct talloc_reference_handle * handle)956 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
957 {
958 	struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
959 	_TLIST_REMOVE(ptr_tc->refs, handle);
960 	return 0;
961 }
962 
963 /*
964    more efficient way to add a name to a pointer - the name must point to a
965    true string constant
966 */
_tc_set_name_const(struct talloc_chunk * tc,const char * name)967 static inline void _tc_set_name_const(struct talloc_chunk *tc,
968 					const char *name)
969 {
970 	tc->name = name;
971 }
972 
973 /*
974   internal talloc_named_const()
975 */
_talloc_named_const(const void * context,size_t size,const char * name)976 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
977 {
978 	void *ptr;
979 	struct talloc_chunk *tc;
980 
981 	ptr = __talloc(context, size, &tc);
982 	if (unlikely(ptr == NULL)) {
983 		return NULL;
984 	}
985 
986 	_tc_set_name_const(tc, name);
987 
988 	return ptr;
989 }
990 
991 /*
992   make a secondary reference to a pointer, hanging off the given context.
993   the pointer remains valid until both the original caller and this given
994   context are freed.
995 
996   the major use for this is when two different structures need to reference the
997   same underlying data, and you want to be able to free the two instances separately,
998   and in either order
999 */
_talloc_reference_loc(const void * context,const void * ptr,const char * location)1000 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
1001 {
1002 	struct talloc_chunk *tc;
1003 	struct talloc_reference_handle *handle;
1004 	if (unlikely(ptr == NULL)) return NULL;
1005 
1006 	tc = talloc_chunk_from_ptr(ptr);
1007 	handle = (struct talloc_reference_handle *)_talloc_named_const(context,
1008 						   sizeof(struct talloc_reference_handle),
1009 						   TALLOC_MAGIC_REFERENCE);
1010 	if (unlikely(handle == NULL)) return NULL;
1011 
1012 	/* note that we hang the destructor off the handle, not the
1013 	   main context as that allows the caller to still setup their
1014 	   own destructor on the context if they want to */
1015 	talloc_set_destructor(handle, talloc_reference_destructor);
1016 	handle->ptr = discard_const_p(void, ptr);
1017 	handle->location = location;
1018 	_TLIST_ADD(tc->refs, handle);
1019 	return handle->ptr;
1020 }
1021 
1022 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
1023 
_tc_free_poolmem(struct talloc_chunk * tc,const char * location)1024 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
1025 					const char *location)
1026 {
1027 	struct talloc_pool_hdr *pool;
1028 	struct talloc_chunk *pool_tc;
1029 	void *next_tc;
1030 
1031 	pool = tc->pool;
1032 	pool_tc = talloc_chunk_from_pool(pool);
1033 	next_tc = tc_next_chunk(tc);
1034 
1035 	_talloc_chunk_set_free(tc, location);
1036 
1037 	TC_INVALIDATE_FULL_CHUNK(tc);
1038 
1039 	if (unlikely(pool->object_count == 0)) {
1040 		talloc_abort("Pool object count zero!");
1041 		return;
1042 	}
1043 
1044 	pool->object_count--;
1045 
1046 	if (unlikely(pool->object_count == 1
1047 		     && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
1048 		/*
1049 		 * if there is just one object left in the pool
1050 		 * and pool->flags does not have TALLOC_FLAG_FREE,
1051 		 * it means this is the pool itself and
1052 		 * the rest is available for new objects
1053 		 * again.
1054 		 */
1055 		pool->end = tc_pool_first_chunk(pool);
1056 		tc_invalidate_pool(pool);
1057 		return;
1058 	}
1059 
1060 	if (unlikely(pool->object_count == 0)) {
1061 		/*
1062 		 * we mark the freed memory with where we called the free
1063 		 * from. This means on a double free error we can report where
1064 		 * the first free came from
1065 		 */
1066 		pool_tc->name = location;
1067 
1068 		if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
1069 			_tc_free_poolmem(pool_tc, location);
1070 		} else {
1071 			/*
1072 			 * The tc_memlimit_update_on_free()
1073 			 * call takes into account the
1074 			 * prefix TP_HDR_SIZE allocated before
1075 			 * the pool talloc_chunk.
1076 			 */
1077 			tc_memlimit_update_on_free(pool_tc);
1078 			TC_INVALIDATE_FULL_CHUNK(pool_tc);
1079 			free(pool);
1080 		}
1081 		return;
1082 	}
1083 
1084 	if (pool->end == next_tc) {
1085 		/*
1086 		 * if pool->pool still points to end of
1087 		 * 'tc' (which is stored in the 'next_tc' variable),
1088 		 * we can reclaim the memory of 'tc'.
1089 		 */
1090 		pool->end = tc;
1091 		return;
1092 	}
1093 
1094 	/*
1095 	 * Do nothing. The memory is just "wasted", waiting for the pool
1096 	 * itself to be freed.
1097 	 */
1098 }
1099 
1100 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1101 						  void *ptr,
1102 						  const char *location);
1103 
1104 static inline int _talloc_free_internal(void *ptr, const char *location);
1105 
1106 /*
1107    internal free call that takes a struct talloc_chunk *.
1108 */
_tc_free_internal(struct talloc_chunk * tc,const char * location)1109 static inline int _tc_free_internal(struct talloc_chunk *tc,
1110 				const char *location)
1111 {
1112 	void *ptr_to_free;
1113 	void *ptr = TC_PTR_FROM_CHUNK(tc);
1114 
1115 	if (unlikely(tc->refs)) {
1116 		int is_child;
1117 		/* check if this is a reference from a child or
1118 		 * grandchild back to it's parent or grandparent
1119 		 *
1120 		 * in that case we need to remove the reference and
1121 		 * call another instance of talloc_free() on the current
1122 		 * pointer.
1123 		 */
1124 		is_child = talloc_is_parent(tc->refs, ptr);
1125 		_talloc_free_internal(tc->refs, location);
1126 		if (is_child) {
1127 			return _talloc_free_internal(ptr, location);
1128 		}
1129 		return -1;
1130 	}
1131 
1132 	if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1133 		/* we have a free loop - stop looping */
1134 		return 0;
1135 	}
1136 
1137 	if (unlikely(tc->destructor)) {
1138 		talloc_destructor_t d = tc->destructor;
1139 
1140 		/*
1141 		 * Protect the destructor against some overwrite
1142 		 * attacks, by explicitly checking it has the right
1143 		 * magic here.
1144 		 */
1145 		if (talloc_chunk_from_ptr(ptr) != tc) {
1146 			/*
1147 			 * This can't actually happen, the
1148 			 * call itself will panic.
1149 			 */
1150 			TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1151 		}
1152 
1153 		if (d == (talloc_destructor_t)-1) {
1154 			return -1;
1155 		}
1156 		tc->destructor = (talloc_destructor_t)-1;
1157 		if (d(ptr) == -1) {
1158 			/*
1159 			 * Only replace the destructor pointer if
1160 			 * calling the destructor didn't modify it.
1161 			 */
1162 			if (tc->destructor == (talloc_destructor_t)-1) {
1163 				tc->destructor = d;
1164 			}
1165 			return -1;
1166 		}
1167 		tc->destructor = NULL;
1168 	}
1169 
1170 	if (tc->parent) {
1171 		_TLIST_REMOVE(tc->parent->child, tc);
1172 		if (tc->parent->child) {
1173 			tc->parent->child->parent = tc->parent;
1174 		}
1175 	} else {
1176 		if (tc->prev) tc->prev->next = tc->next;
1177 		if (tc->next) tc->next->prev = tc->prev;
1178 		tc->prev = tc->next = NULL;
1179 	}
1180 
1181 	tc->flags |= TALLOC_FLAG_LOOP;
1182 
1183 	_tc_free_children_internal(tc, ptr, location);
1184 
1185 	_talloc_chunk_set_free(tc, location);
1186 
1187 	if (tc->flags & TALLOC_FLAG_POOL) {
1188 		struct talloc_pool_hdr *pool;
1189 
1190 		pool = talloc_pool_from_chunk(tc);
1191 
1192 		if (unlikely(pool->object_count == 0)) {
1193 			talloc_abort("Pool object count zero!");
1194 			return 0;
1195 		}
1196 
1197 		pool->object_count--;
1198 
1199 		if (likely(pool->object_count != 0)) {
1200 			return 0;
1201 		}
1202 
1203 		/*
1204 		 * With object_count==0, a pool becomes a normal piece of
1205 		 * memory to free. If it's allocated inside a pool, it needs
1206 		 * to be freed as poolmem, else it needs to be just freed.
1207 		*/
1208 		ptr_to_free = pool;
1209 	} else {
1210 		ptr_to_free = tc;
1211 	}
1212 
1213 	if (tc->flags & TALLOC_FLAG_POOLMEM) {
1214 		_tc_free_poolmem(tc, location);
1215 		return 0;
1216 	}
1217 
1218 	tc_memlimit_update_on_free(tc);
1219 
1220 	TC_INVALIDATE_FULL_CHUNK(tc);
1221 	free(ptr_to_free);
1222 	return 0;
1223 }
1224 
1225 /*
1226    internal talloc_free call
1227 */
_talloc_free_internal(void * ptr,const char * location)1228 static inline int _talloc_free_internal(void *ptr, const char *location)
1229 {
1230 	struct talloc_chunk *tc;
1231 
1232 	if (unlikely(ptr == NULL)) {
1233 		return -1;
1234 	}
1235 
1236 	/* possibly initialised the talloc fill value */
1237 	if (unlikely(!talloc_fill.initialised)) {
1238 		const char *fill = getenv(TALLOC_FILL_ENV);
1239 		if (fill != NULL) {
1240 			talloc_fill.enabled = true;
1241 			talloc_fill.fill_value = strtoul(fill, NULL, 0);
1242 		}
1243 		talloc_fill.initialised = true;
1244 	}
1245 
1246 	tc = talloc_chunk_from_ptr(ptr);
1247 	return _tc_free_internal(tc, location);
1248 }
1249 
1250 static inline size_t _talloc_total_limit_size(const void *ptr,
1251 					struct talloc_memlimit *old_limit,
1252 					struct talloc_memlimit *new_limit);
1253 
1254 /*
1255    move a lump of memory from one talloc context to another return the
1256    ptr on success, or NULL if it could not be transferred.
1257    passing NULL as ptr will always return NULL with no side effects.
1258 */
_talloc_steal_internal(const void * new_ctx,const void * ptr)1259 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1260 {
1261 	struct talloc_chunk *tc, *new_tc;
1262 	size_t ctx_size = 0;
1263 
1264 	if (unlikely(!ptr)) {
1265 		return NULL;
1266 	}
1267 
1268 	if (unlikely(new_ctx == NULL)) {
1269 		new_ctx = null_context;
1270 	}
1271 
1272 	tc = talloc_chunk_from_ptr(ptr);
1273 
1274 	if (tc->limit != NULL) {
1275 
1276 		ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1277 
1278 		/* Decrement the memory limit from the source .. */
1279 		talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1280 
1281 		if (tc->limit->parent == tc) {
1282 			tc->limit->upper = NULL;
1283 		} else {
1284 			tc->limit = NULL;
1285 		}
1286 	}
1287 
1288 	if (unlikely(new_ctx == NULL)) {
1289 		if (tc->parent) {
1290 			_TLIST_REMOVE(tc->parent->child, tc);
1291 			if (tc->parent->child) {
1292 				tc->parent->child->parent = tc->parent;
1293 			}
1294 		} else {
1295 			if (tc->prev) tc->prev->next = tc->next;
1296 			if (tc->next) tc->next->prev = tc->prev;
1297 		}
1298 
1299 		tc->parent = tc->next = tc->prev = NULL;
1300 		return discard_const_p(void, ptr);
1301 	}
1302 
1303 	new_tc = talloc_chunk_from_ptr(new_ctx);
1304 
1305 	if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1306 		return discard_const_p(void, ptr);
1307 	}
1308 
1309 	if (tc->parent) {
1310 		_TLIST_REMOVE(tc->parent->child, tc);
1311 		if (tc->parent->child) {
1312 			tc->parent->child->parent = tc->parent;
1313 		}
1314 	} else {
1315 		if (tc->prev) tc->prev->next = tc->next;
1316 		if (tc->next) tc->next->prev = tc->prev;
1317 		tc->prev = tc->next = NULL;
1318 	}
1319 
1320 	tc->parent = new_tc;
1321 	if (new_tc->child) new_tc->child->parent = NULL;
1322 	_TLIST_ADD(new_tc->child, tc);
1323 
1324 	if (tc->limit || new_tc->limit) {
1325 		ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1326 						    new_tc->limit);
1327 		/* .. and increment it in the destination. */
1328 		if (new_tc->limit) {
1329 			talloc_memlimit_grow(new_tc->limit, ctx_size);
1330 		}
1331 	}
1332 
1333 	return discard_const_p(void, ptr);
1334 }
1335 
1336 /*
1337    move a lump of memory from one talloc context to another return the
1338    ptr on success, or NULL if it could not be transferred.
1339    passing NULL as ptr will always return NULL with no side effects.
1340 */
_talloc_steal_loc(const void * new_ctx,const void * ptr,const char * location)1341 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1342 {
1343 	struct talloc_chunk *tc;
1344 
1345 	if (unlikely(ptr == NULL)) {
1346 		return NULL;
1347 	}
1348 
1349 	tc = talloc_chunk_from_ptr(ptr);
1350 
1351 	if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1352 		struct talloc_reference_handle *h;
1353 
1354 		talloc_log("WARNING: talloc_steal with references at %s\n",
1355 			   location);
1356 
1357 		for (h=tc->refs; h; h=h->next) {
1358 			talloc_log("\treference at %s\n",
1359 				   h->location);
1360 		}
1361 	}
1362 
1363 #if 0
1364 	/* this test is probably too expensive to have on in the
1365 	   normal build, but it useful for debugging */
1366 	if (talloc_is_parent(new_ctx, ptr)) {
1367 		talloc_log("WARNING: stealing into talloc child at %s\n", location);
1368 	}
1369 #endif
1370 
1371 	return _talloc_steal_internal(new_ctx, ptr);
1372 }
1373 
1374 /*
1375    this is like a talloc_steal(), but you must supply the old
1376    parent. This resolves the ambiguity in a talloc_steal() which is
1377    called on a context that has more than one parent (via references)
1378 
1379    The old parent can be either a reference or a parent
1380 */
talloc_reparent(const void * old_parent,const void * new_parent,const void * ptr)1381 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1382 {
1383 	struct talloc_chunk *tc;
1384 	struct talloc_reference_handle *h;
1385 
1386 	if (unlikely(ptr == NULL)) {
1387 		return NULL;
1388 	}
1389 
1390 	if (old_parent == talloc_parent(ptr)) {
1391 		return _talloc_steal_internal(new_parent, ptr);
1392 	}
1393 
1394 	tc = talloc_chunk_from_ptr(ptr);
1395 	for (h=tc->refs;h;h=h->next) {
1396 		if (talloc_parent(h) == old_parent) {
1397 			if (_talloc_steal_internal(new_parent, h) != h) {
1398 				return NULL;
1399 			}
1400 			return discard_const_p(void, ptr);
1401 		}
1402 	}
1403 
1404 	/* it wasn't a parent */
1405 	return NULL;
1406 }
1407 
1408 /*
1409   remove a secondary reference to a pointer. This undo's what
1410   talloc_reference() has done. The context and pointer arguments
1411   must match those given to a talloc_reference()
1412 */
talloc_unreference(const void * context,const void * ptr)1413 static inline int talloc_unreference(const void *context, const void *ptr)
1414 {
1415 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1416 	struct talloc_reference_handle *h;
1417 
1418 	if (unlikely(context == NULL)) {
1419 		context = null_context;
1420 	}
1421 
1422 	for (h=tc->refs;h;h=h->next) {
1423 		struct talloc_chunk *p = talloc_parent_chunk(h);
1424 		if (p == NULL) {
1425 			if (context == NULL) break;
1426 		} else if (TC_PTR_FROM_CHUNK(p) == context) {
1427 			break;
1428 		}
1429 	}
1430 	if (h == NULL) {
1431 		return -1;
1432 	}
1433 
1434 	return _talloc_free_internal(h, __location__);
1435 }
1436 
1437 /*
1438   remove a specific parent context from a pointer. This is a more
1439   controlled variant of talloc_free()
1440 */
1441 
1442 /* coverity[ -tainted_data_sink : arg-1 ] */
talloc_unlink(const void * context,void * ptr)1443 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1444 {
1445 	struct talloc_chunk *tc_p, *new_p, *tc_c;
1446 	void *new_parent;
1447 
1448 	if (ptr == NULL) {
1449 		return -1;
1450 	}
1451 
1452 	if (context == NULL) {
1453 		context = null_context;
1454 	}
1455 
1456 	if (talloc_unreference(context, ptr) == 0) {
1457 		return 0;
1458 	}
1459 
1460 	if (context != NULL) {
1461 		tc_c = talloc_chunk_from_ptr(context);
1462 	} else {
1463 		tc_c = NULL;
1464 	}
1465 	if (tc_c != talloc_parent_chunk(ptr)) {
1466 		return -1;
1467 	}
1468 
1469 	tc_p = talloc_chunk_from_ptr(ptr);
1470 
1471 	if (tc_p->refs == NULL) {
1472 		return _talloc_free_internal(ptr, __location__);
1473 	}
1474 
1475 	new_p = talloc_parent_chunk(tc_p->refs);
1476 	if (new_p) {
1477 		new_parent = TC_PTR_FROM_CHUNK(new_p);
1478 	} else {
1479 		new_parent = NULL;
1480 	}
1481 
1482 	if (talloc_unreference(new_parent, ptr) != 0) {
1483 		return -1;
1484 	}
1485 
1486 	_talloc_steal_internal(new_parent, ptr);
1487 
1488 	return 0;
1489 }
1490 
1491 /*
1492   add a name to an existing pointer - va_list version
1493 */
1494 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1495 				const char *fmt,
1496 				va_list ap) PRINTF_ATTRIBUTE(2,0);
1497 
tc_set_name_v(struct talloc_chunk * tc,const char * fmt,va_list ap)1498 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1499 				const char *fmt,
1500 				va_list ap)
1501 {
1502 	struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1503 							fmt,
1504 							ap);
1505 	if (likely(name_tc)) {
1506 		tc->name = TC_PTR_FROM_CHUNK(name_tc);
1507 		_tc_set_name_const(name_tc, ".name");
1508 	} else {
1509 		tc->name = NULL;
1510 	}
1511 	return tc->name;
1512 }
1513 
1514 /*
1515   add a name to an existing pointer
1516 */
talloc_set_name(const void * ptr,const char * fmt,...)1517 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1518 {
1519 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1520 	const char *name;
1521 	va_list ap;
1522 	va_start(ap, fmt);
1523 	name = tc_set_name_v(tc, fmt, ap);
1524 	va_end(ap);
1525 	return name;
1526 }
1527 
1528 
1529 /*
1530   create a named talloc pointer. Any talloc pointer can be named, and
1531   talloc_named() operates just like talloc() except that it allows you
1532   to name the pointer.
1533 */
talloc_named(const void * context,size_t size,const char * fmt,...)1534 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1535 {
1536 	va_list ap;
1537 	void *ptr;
1538 	const char *name;
1539 	struct talloc_chunk *tc;
1540 
1541 	ptr = __talloc(context, size, &tc);
1542 	if (unlikely(ptr == NULL)) return NULL;
1543 
1544 	va_start(ap, fmt);
1545 	name = tc_set_name_v(tc, fmt, ap);
1546 	va_end(ap);
1547 
1548 	if (unlikely(name == NULL)) {
1549 		_talloc_free_internal(ptr, __location__);
1550 		return NULL;
1551 	}
1552 
1553 	return ptr;
1554 }
1555 
1556 /*
1557   return the name of a talloc ptr, or "UNNAMED"
1558 */
__talloc_get_name(const void * ptr)1559 static inline const char *__talloc_get_name(const void *ptr)
1560 {
1561 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1562 	if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1563 		return ".reference";
1564 	}
1565 	if (likely(tc->name)) {
1566 		return tc->name;
1567 	}
1568 	return "UNNAMED";
1569 }
1570 
talloc_get_name(const void * ptr)1571 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1572 {
1573 	return __talloc_get_name(ptr);
1574 }
1575 
1576 /*
1577   check if a pointer has the given name. If it does, return the pointer,
1578   otherwise return NULL
1579 */
talloc_check_name(const void * ptr,const char * name)1580 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1581 {
1582 	const char *pname;
1583 	if (unlikely(ptr == NULL)) return NULL;
1584 	pname = __talloc_get_name(ptr);
1585 	if (likely(pname == name || strcmp(pname, name) == 0)) {
1586 		return discard_const_p(void, ptr);
1587 	}
1588 	return NULL;
1589 }
1590 
talloc_abort_type_mismatch(const char * location,const char * name,const char * expected)1591 static void talloc_abort_type_mismatch(const char *location,
1592 					const char *name,
1593 					const char *expected)
1594 {
1595 	const char *reason;
1596 
1597 	reason = talloc_asprintf(NULL,
1598 				 "%s: Type mismatch: name[%s] expected[%s]",
1599 				 location,
1600 				 name?name:"NULL",
1601 				 expected);
1602 	if (!reason) {
1603 		reason = "Type mismatch";
1604 	}
1605 
1606 	talloc_abort(reason);
1607 }
1608 
_talloc_get_type_abort(const void * ptr,const char * name,const char * location)1609 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1610 {
1611 	const char *pname;
1612 
1613 	if (unlikely(ptr == NULL)) {
1614 		talloc_abort_type_mismatch(location, NULL, name);
1615 		return NULL;
1616 	}
1617 
1618 	pname = __talloc_get_name(ptr);
1619 	if (likely(pname == name || strcmp(pname, name) == 0)) {
1620 		return discard_const_p(void, ptr);
1621 	}
1622 
1623 	talloc_abort_type_mismatch(location, pname, name);
1624 	return NULL;
1625 }
1626 
1627 /*
1628   this is for compatibility with older versions of talloc
1629 */
talloc_init(const char * fmt,...)1630 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1631 {
1632 	va_list ap;
1633 	void *ptr;
1634 	const char *name;
1635 	struct talloc_chunk *tc;
1636 
1637 	ptr = __talloc(NULL, 0, &tc);
1638 	if (unlikely(ptr == NULL)) return NULL;
1639 
1640 	va_start(ap, fmt);
1641 	name = tc_set_name_v(tc, fmt, ap);
1642 	va_end(ap);
1643 
1644 	if (unlikely(name == NULL)) {
1645 		_talloc_free_internal(ptr, __location__);
1646 		return NULL;
1647 	}
1648 
1649 	return ptr;
1650 }
1651 
_tc_free_children_internal(struct talloc_chunk * tc,void * ptr,const char * location)1652 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1653 						  void *ptr,
1654 						  const char *location)
1655 {
1656 	while (tc->child) {
1657 		/* we need to work out who will own an abandoned child
1658 		   if it cannot be freed. In priority order, the first
1659 		   choice is owner of any remaining reference to this
1660 		   pointer, the second choice is our parent, and the
1661 		   final choice is the null context. */
1662 		void *child = TC_PTR_FROM_CHUNK(tc->child);
1663 		const void *new_parent = null_context;
1664 		if (unlikely(tc->child->refs)) {
1665 			struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1666 			if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1667 		}
1668 		if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1669 			if (talloc_parent_chunk(child) != tc) {
1670 				/*
1671 				 * Destructor already reparented this child.
1672 				 * No further reparenting needed.
1673 				 */
1674 				continue;
1675 			}
1676 			if (new_parent == null_context) {
1677 				struct talloc_chunk *p = talloc_parent_chunk(ptr);
1678 				if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1679 			}
1680 			_talloc_steal_internal(new_parent, child);
1681 		}
1682 	}
1683 }
1684 
1685 /*
1686   this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1687   should probably not be used in new code. It's in here to keep the talloc
1688   code consistent across Samba 3 and 4.
1689 */
talloc_free_children(void * ptr)1690 _PUBLIC_ void talloc_free_children(void *ptr)
1691 {
1692 	struct talloc_chunk *tc_name = NULL;
1693 	struct talloc_chunk *tc;
1694 
1695 	if (unlikely(ptr == NULL)) {
1696 		return;
1697 	}
1698 
1699 	tc = talloc_chunk_from_ptr(ptr);
1700 
1701 	/* we do not want to free the context name if it is a child .. */
1702 	if (likely(tc->child)) {
1703 		for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1704 			if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1705 		}
1706 		if (tc_name) {
1707 			_TLIST_REMOVE(tc->child, tc_name);
1708 			if (tc->child) {
1709 				tc->child->parent = tc;
1710 			}
1711 		}
1712 	}
1713 
1714 	_tc_free_children_internal(tc, ptr, __location__);
1715 
1716 	/* .. so we put it back after all other children have been freed */
1717 	if (tc_name) {
1718 		if (tc->child) {
1719 			tc->child->parent = NULL;
1720 		}
1721 		tc_name->parent = tc;
1722 		_TLIST_ADD(tc->child, tc_name);
1723 	}
1724 }
1725 
1726 /*
1727    Allocate a bit of memory as a child of an existing pointer
1728 */
_talloc(const void * context,size_t size)1729 _PUBLIC_ void *_talloc(const void *context, size_t size)
1730 {
1731 	struct talloc_chunk *tc;
1732 	return __talloc(context, size, &tc);
1733 }
1734 
1735 /*
1736   externally callable talloc_set_name_const()
1737 */
talloc_set_name_const(const void * ptr,const char * name)1738 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1739 {
1740 	_tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1741 }
1742 
1743 /*
1744   create a named talloc pointer. Any talloc pointer can be named, and
1745   talloc_named() operates just like talloc() except that it allows you
1746   to name the pointer.
1747 */
talloc_named_const(const void * context,size_t size,const char * name)1748 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1749 {
1750 	return _talloc_named_const(context, size, name);
1751 }
1752 
1753 /*
1754    free a talloc pointer. This also frees all child pointers of this
1755    pointer recursively
1756 
1757    return 0 if the memory is actually freed, otherwise -1. The memory
1758    will not be freed if the ref_count is > 1 or the destructor (if
1759    any) returns non-zero
1760 */
_talloc_free(void * ptr,const char * location)1761 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1762 {
1763 	struct talloc_chunk *tc;
1764 
1765 	if (unlikely(ptr == NULL)) {
1766 		return -1;
1767 	}
1768 
1769 	tc = talloc_chunk_from_ptr(ptr);
1770 
1771 	if (unlikely(tc->refs != NULL)) {
1772 		struct talloc_reference_handle *h;
1773 
1774 		if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1775 			/* in this case we do know which parent should
1776 			   get this pointer, as there is really only
1777 			   one parent */
1778 			return talloc_unlink(null_context, ptr);
1779 		}
1780 
1781 		talloc_log("ERROR: talloc_free with references at %s\n",
1782 			   location);
1783 
1784 		for (h=tc->refs; h; h=h->next) {
1785 			talloc_log("\treference at %s\n",
1786 				   h->location);
1787 		}
1788 		return -1;
1789 	}
1790 
1791 	return _talloc_free_internal(ptr, location);
1792 }
1793 
1794 
1795 
1796 /*
1797   A talloc version of realloc. The context argument is only used if
1798   ptr is NULL
1799 */
_talloc_realloc(const void * context,void * ptr,size_t size,const char * name)1800 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1801 {
1802 	struct talloc_chunk *tc;
1803 	void *new_ptr;
1804 	bool malloced = false;
1805 	struct talloc_pool_hdr *pool_hdr = NULL;
1806 	size_t old_size = 0;
1807 	size_t new_size = 0;
1808 
1809 	/* size zero is equivalent to free() */
1810 	if (unlikely(size == 0)) {
1811 		talloc_unlink(context, ptr);
1812 		return NULL;
1813 	}
1814 
1815 	if (unlikely(size >= MAX_TALLOC_SIZE)) {
1816 		return NULL;
1817 	}
1818 
1819 	/* realloc(NULL) is equivalent to malloc() */
1820 	if (ptr == NULL) {
1821 		return _talloc_named_const(context, size, name);
1822 	}
1823 
1824 	tc = talloc_chunk_from_ptr(ptr);
1825 
1826 	/* don't allow realloc on referenced pointers */
1827 	if (unlikely(tc->refs)) {
1828 		return NULL;
1829 	}
1830 
1831 	/* don't let anybody try to realloc a talloc_pool */
1832 	if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1833 		return NULL;
1834 	}
1835 
1836 	if (tc->limit && (size > tc->size)) {
1837 		if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1838 			errno = ENOMEM;
1839 			return NULL;
1840 		}
1841 	}
1842 
1843 	/* handle realloc inside a talloc_pool */
1844 	if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1845 		pool_hdr = tc->pool;
1846 	}
1847 
1848 #if (ALWAYS_REALLOC == 0)
1849 	/* don't shrink if we have less than 1k to gain */
1850 	if (size < tc->size && tc->limit == NULL) {
1851 		if (pool_hdr) {
1852 			void *next_tc = tc_next_chunk(tc);
1853 			TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1854 			tc->size = size;
1855 			if (next_tc == pool_hdr->end) {
1856 				/* note: tc->size has changed, so this works */
1857 				pool_hdr->end = tc_next_chunk(tc);
1858 			}
1859 			return ptr;
1860 		} else if ((tc->size - size) < 1024) {
1861 			/*
1862 			 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1863 			 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1864 			 * after each realloc call, which slows down
1865 			 * testing a lot :-(.
1866 			 *
1867 			 * That is why we only mark memory as undefined here.
1868 			 */
1869 			TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1870 
1871 			/* do not shrink if we have less than 1k to gain */
1872 			tc->size = size;
1873 			return ptr;
1874 		}
1875 	} else if (tc->size == size) {
1876 		/*
1877 		 * do not change the pointer if it is exactly
1878 		 * the same size.
1879 		 */
1880 		return ptr;
1881 	}
1882 #endif
1883 
1884 	/*
1885 	 * by resetting magic we catch users of the old memory
1886 	 *
1887 	 * We mark this memory as free, and also over-stamp the talloc
1888 	 * magic with the old-style magic.
1889 	 *
1890 	 * Why?  This tries to avoid a memory read use-after-free from
1891 	 * disclosing our talloc magic, which would then allow an
1892 	 * attacker to prepare a valid header and so run a destructor.
1893 	 *
1894 	 * What else?  We have to re-stamp back a valid normal magic
1895 	 * on this memory once realloc() is done, as it will have done
1896 	 * a memcpy() into the new valid memory.  We can't do this in
1897 	 * reverse as that would be a real use-after-free.
1898 	 */
1899 	_talloc_chunk_set_free(tc, NULL);
1900 
1901 #if ALWAYS_REALLOC
1902 	if (pool_hdr) {
1903 		new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1904 		pool_hdr->object_count--;
1905 
1906 		if (new_ptr == NULL) {
1907 			new_ptr = malloc(TC_HDR_SIZE+size);
1908 			malloced = true;
1909 			new_size = size;
1910 		}
1911 
1912 		if (new_ptr) {
1913 			memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1914 			TC_INVALIDATE_FULL_CHUNK(tc);
1915 		}
1916 	} else {
1917 		/* We're doing malloc then free here, so record the difference. */
1918 		old_size = tc->size;
1919 		new_size = size;
1920 		new_ptr = malloc(size + TC_HDR_SIZE);
1921 		if (new_ptr) {
1922 			memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1923 			free(tc);
1924 		}
1925 	}
1926 #else
1927 	if (pool_hdr) {
1928 		struct talloc_chunk *pool_tc;
1929 		void *next_tc = tc_next_chunk(tc);
1930 		size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1931 		size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1932 		size_t space_needed;
1933 		size_t space_left;
1934 		unsigned int chunk_count = pool_hdr->object_count;
1935 
1936 		pool_tc = talloc_chunk_from_pool(pool_hdr);
1937 		if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1938 			chunk_count -= 1;
1939 		}
1940 
1941 		if (chunk_count == 1) {
1942 			/*
1943 			 * optimize for the case where 'tc' is the only
1944 			 * chunk in the pool.
1945 			 */
1946 			char *start = tc_pool_first_chunk(pool_hdr);
1947 			space_needed = new_chunk_size;
1948 			space_left = (char *)tc_pool_end(pool_hdr) - start;
1949 
1950 			if (space_left >= space_needed) {
1951 				size_t old_used = TC_HDR_SIZE + tc->size;
1952 				size_t new_used = TC_HDR_SIZE + size;
1953 				new_ptr = start;
1954 
1955 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1956 				{
1957 					/*
1958 					 * The area from
1959 					 * start -> tc may have
1960 					 * been freed and thus been marked as
1961 					 * VALGRIND_MEM_NOACCESS. Set it to
1962 					 * VALGRIND_MEM_UNDEFINED so we can
1963 					 * copy into it without valgrind errors.
1964 					 * We can't just mark
1965 					 * new_ptr -> new_ptr + old_used
1966 					 * as this may overlap on top of tc,
1967 					 * (which is why we use memmove, not
1968 					 * memcpy below) hence the MIN.
1969 					 */
1970 					size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1971 					VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1972 				}
1973 #endif
1974 
1975 				memmove(new_ptr, tc, old_used);
1976 
1977 				tc = (struct talloc_chunk *)new_ptr;
1978 				TC_UNDEFINE_GROW_CHUNK(tc, size);
1979 
1980 				/*
1981 				 * first we do not align the pool pointer
1982 				 * because we want to invalidate the padding
1983 				 * too.
1984 				 */
1985 				pool_hdr->end = new_used + (char *)new_ptr;
1986 				tc_invalidate_pool(pool_hdr);
1987 
1988 				/* now the aligned pointer */
1989 				pool_hdr->end = new_chunk_size + (char *)new_ptr;
1990 				goto got_new_ptr;
1991 			}
1992 
1993 			next_tc = NULL;
1994 		}
1995 
1996 		if (new_chunk_size == old_chunk_size) {
1997 			TC_UNDEFINE_GROW_CHUNK(tc, size);
1998 			_talloc_chunk_set_not_free(tc);
1999 			tc->size = size;
2000 			return ptr;
2001 		}
2002 
2003 		if (next_tc == pool_hdr->end) {
2004 			/*
2005 			 * optimize for the case where 'tc' is the last
2006 			 * chunk in the pool.
2007 			 */
2008 			space_needed = new_chunk_size - old_chunk_size;
2009 			space_left = tc_pool_space_left(pool_hdr);
2010 
2011 			if (space_left >= space_needed) {
2012 				TC_UNDEFINE_GROW_CHUNK(tc, size);
2013 				_talloc_chunk_set_not_free(tc);
2014 				tc->size = size;
2015 				pool_hdr->end = tc_next_chunk(tc);
2016 				return ptr;
2017 			}
2018 		}
2019 
2020 		new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
2021 
2022 		if (new_ptr == NULL) {
2023 			new_ptr = malloc(TC_HDR_SIZE+size);
2024 			malloced = true;
2025 			new_size = size;
2026 		}
2027 
2028 		if (new_ptr) {
2029 			memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
2030 
2031 			_tc_free_poolmem(tc, __location__ "_talloc_realloc");
2032 		}
2033 	}
2034 	else {
2035 		/* We're doing realloc here, so record the difference. */
2036 		old_size = tc->size;
2037 		new_size = size;
2038 		new_ptr = realloc(tc, size + TC_HDR_SIZE);
2039 	}
2040 got_new_ptr:
2041 #endif
2042 	if (unlikely(!new_ptr)) {
2043 		/*
2044 		 * Ok, this is a strange spot.  We have to put back
2045 		 * the old talloc_magic and any flags, except the
2046 		 * TALLOC_FLAG_FREE as this was not free'ed by the
2047 		 * realloc() call after all
2048 		 */
2049 		_talloc_chunk_set_not_free(tc);
2050 		return NULL;
2051 	}
2052 
2053 	/*
2054 	 * tc is now the new value from realloc(), the old memory we
2055 	 * can't access any more and was preemptively marked as
2056 	 * TALLOC_FLAG_FREE before the call.  Now we mark it as not
2057 	 * free again
2058 	 */
2059 	tc = (struct talloc_chunk *)new_ptr;
2060 	_talloc_chunk_set_not_free(tc);
2061 	if (malloced) {
2062 		tc->flags &= ~TALLOC_FLAG_POOLMEM;
2063 	}
2064 	if (tc->parent) {
2065 		tc->parent->child = tc;
2066 	}
2067 	if (tc->child) {
2068 		tc->child->parent = tc;
2069 	}
2070 
2071 	if (tc->prev) {
2072 		tc->prev->next = tc;
2073 	}
2074 	if (tc->next) {
2075 		tc->next->prev = tc;
2076 	}
2077 
2078 	if (new_size > old_size) {
2079 		talloc_memlimit_grow(tc->limit, new_size - old_size);
2080 	} else if (new_size < old_size) {
2081 		talloc_memlimit_shrink(tc->limit, old_size - new_size);
2082 	}
2083 
2084 	tc->size = size;
2085 	_tc_set_name_const(tc, name);
2086 
2087 	return TC_PTR_FROM_CHUNK(tc);
2088 }
2089 
2090 /*
2091   a wrapper around talloc_steal() for situations where you are moving a pointer
2092   between two structures, and want the old pointer to be set to NULL
2093 */
_talloc_move(const void * new_ctx,const void * _pptr)2094 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
2095 {
2096 	const void **pptr = discard_const_p(const void *,_pptr);
2097 	void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2098 	(*pptr) = NULL;
2099 	return ret;
2100 }
2101 
2102 enum talloc_mem_count_type {
2103 	TOTAL_MEM_SIZE,
2104 	TOTAL_MEM_BLOCKS,
2105 	TOTAL_MEM_LIMIT,
2106 };
2107 
_talloc_total_mem_internal(const void * ptr,enum talloc_mem_count_type type,struct talloc_memlimit * old_limit,struct talloc_memlimit * new_limit)2108 static inline size_t _talloc_total_mem_internal(const void *ptr,
2109 					 enum talloc_mem_count_type type,
2110 					 struct talloc_memlimit *old_limit,
2111 					 struct talloc_memlimit *new_limit)
2112 {
2113 	size_t total = 0;
2114 	struct talloc_chunk *c, *tc;
2115 
2116 	if (ptr == NULL) {
2117 		ptr = null_context;
2118 	}
2119 	if (ptr == NULL) {
2120 		return 0;
2121 	}
2122 
2123 	tc = talloc_chunk_from_ptr(ptr);
2124 
2125 	if (old_limit || new_limit) {
2126 		if (tc->limit && tc->limit->upper == old_limit) {
2127 			tc->limit->upper = new_limit;
2128 		}
2129 	}
2130 
2131 	/* optimize in the memlimits case */
2132 	if (type == TOTAL_MEM_LIMIT &&
2133 	    tc->limit != NULL &&
2134 	    tc->limit != old_limit &&
2135 	    tc->limit->parent == tc) {
2136 		return tc->limit->cur_size;
2137 	}
2138 
2139 	if (tc->flags & TALLOC_FLAG_LOOP) {
2140 		return 0;
2141 	}
2142 
2143 	tc->flags |= TALLOC_FLAG_LOOP;
2144 
2145 	if (old_limit || new_limit) {
2146 		if (old_limit == tc->limit) {
2147 			tc->limit = new_limit;
2148 		}
2149 	}
2150 
2151 	switch (type) {
2152 	case TOTAL_MEM_SIZE:
2153 		if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2154 			total = tc->size;
2155 		}
2156 		break;
2157 	case TOTAL_MEM_BLOCKS:
2158 		total++;
2159 		break;
2160 	case TOTAL_MEM_LIMIT:
2161 		if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2162 			/*
2163 			 * Don't count memory allocated from a pool
2164 			 * when calculating limits. Only count the
2165 			 * pool itself.
2166 			 */
2167 			if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2168 				if (tc->flags & TALLOC_FLAG_POOL) {
2169 					/*
2170 					 * If this is a pool, the allocated
2171 					 * size is in the pool header, and
2172 					 * remember to add in the prefix
2173 					 * length.
2174 					 */
2175 					struct talloc_pool_hdr *pool_hdr
2176 							= talloc_pool_from_chunk(tc);
2177 					total = pool_hdr->poolsize +
2178 							TC_HDR_SIZE +
2179 							TP_HDR_SIZE;
2180 				} else {
2181 					total = tc->size + TC_HDR_SIZE;
2182 				}
2183 			}
2184 		}
2185 		break;
2186 	}
2187 	for (c = tc->child; c; c = c->next) {
2188 		total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2189 						    old_limit, new_limit);
2190 	}
2191 
2192 	tc->flags &= ~TALLOC_FLAG_LOOP;
2193 
2194 	return total;
2195 }
2196 
2197 /*
2198   return the total size of a talloc pool (subtree)
2199 */
talloc_total_size(const void * ptr)2200 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2201 {
2202 	return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2203 }
2204 
2205 /*
2206   return the total number of blocks in a talloc pool (subtree)
2207 */
talloc_total_blocks(const void * ptr)2208 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2209 {
2210 	return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2211 }
2212 
2213 /*
2214   return the number of external references to a pointer
2215 */
talloc_reference_count(const void * ptr)2216 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2217 {
2218 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2219 	struct talloc_reference_handle *h;
2220 	size_t ret = 0;
2221 
2222 	for (h=tc->refs;h;h=h->next) {
2223 		ret++;
2224 	}
2225 	return ret;
2226 }
2227 
2228 /*
2229   report on memory usage by all children of a pointer, giving a full tree view
2230 */
talloc_report_depth_cb(const void * ptr,int depth,int max_depth,void (* callback)(const void * ptr,int depth,int max_depth,int is_ref,void * private_data),void * private_data)2231 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2232 			    void (*callback)(const void *ptr,
2233 			  		     int depth, int max_depth,
2234 					     int is_ref,
2235 					     void *private_data),
2236 			    void *private_data)
2237 {
2238 	struct talloc_chunk *c, *tc;
2239 
2240 	if (ptr == NULL) {
2241 		ptr = null_context;
2242 	}
2243 	if (ptr == NULL) return;
2244 
2245 	tc = talloc_chunk_from_ptr(ptr);
2246 
2247 	if (tc->flags & TALLOC_FLAG_LOOP) {
2248 		return;
2249 	}
2250 
2251 	callback(ptr, depth, max_depth, 0, private_data);
2252 
2253 	if (max_depth >= 0 && depth >= max_depth) {
2254 		return;
2255 	}
2256 
2257 	tc->flags |= TALLOC_FLAG_LOOP;
2258 	for (c=tc->child;c;c=c->next) {
2259 		if (c->name == TALLOC_MAGIC_REFERENCE) {
2260 			struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2261 			callback(h->ptr, depth + 1, max_depth, 1, private_data);
2262 		} else {
2263 			talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2264 		}
2265 	}
2266 	tc->flags &= ~TALLOC_FLAG_LOOP;
2267 }
2268 
talloc_report_depth_FILE_helper(const void * ptr,int depth,int max_depth,int is_ref,void * _f)2269 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2270 {
2271 	const char *name = __talloc_get_name(ptr);
2272 	struct talloc_chunk *tc;
2273 	FILE *f = (FILE *)_f;
2274 
2275 	if (is_ref) {
2276 		fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2277 		return;
2278 	}
2279 
2280 	tc = talloc_chunk_from_ptr(ptr);
2281 	if (tc->limit && tc->limit->parent == tc) {
2282 		fprintf(f, "%*s%-30s is a memlimit context"
2283 			" (max_size = %lu bytes, cur_size = %lu bytes)\n",
2284 			depth*4, "",
2285 			name,
2286 			(unsigned long)tc->limit->max_size,
2287 			(unsigned long)tc->limit->cur_size);
2288 	}
2289 
2290 	if (depth == 0) {
2291 		fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2292 			(max_depth < 0 ? "full " :""), name,
2293 			(unsigned long)talloc_total_size(ptr),
2294 			(unsigned long)talloc_total_blocks(ptr));
2295 		return;
2296 	}
2297 
2298 	fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2299 		depth*4, "",
2300 		name,
2301 		(unsigned long)talloc_total_size(ptr),
2302 		(unsigned long)talloc_total_blocks(ptr),
2303 		(int)talloc_reference_count(ptr), ptr);
2304 
2305 #if 0
2306 	fprintf(f, "content: ");
2307 	if (talloc_total_size(ptr)) {
2308 		int tot = talloc_total_size(ptr);
2309 		int i;
2310 
2311 		for (i = 0; i < tot; i++) {
2312 			if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2313 				fprintf(f, "%c", ((char *)ptr)[i]);
2314 			} else {
2315 				fprintf(f, "~%02x", ((char *)ptr)[i]);
2316 			}
2317 		}
2318 	}
2319 	fprintf(f, "\n");
2320 #endif
2321 }
2322 
2323 /*
2324   report on memory usage by all children of a pointer, giving a full tree view
2325 */
talloc_report_depth_file(const void * ptr,int depth,int max_depth,FILE * f)2326 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2327 {
2328 	if (f) {
2329 		talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2330 		fflush(f);
2331 	}
2332 }
2333 
2334 /*
2335   report on memory usage by all children of a pointer, giving a full tree view
2336 */
talloc_report_full(const void * ptr,FILE * f)2337 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2338 {
2339 	talloc_report_depth_file(ptr, 0, -1, f);
2340 }
2341 
2342 /*
2343   report on memory usage by all children of a pointer
2344 */
talloc_report(const void * ptr,FILE * f)2345 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2346 {
2347 	talloc_report_depth_file(ptr, 0, 1, f);
2348 }
2349 
2350 /*
2351   enable tracking of the NULL context
2352 */
talloc_enable_null_tracking(void)2353 _PUBLIC_ void talloc_enable_null_tracking(void)
2354 {
2355 	if (null_context == NULL) {
2356 		null_context = _talloc_named_const(NULL, 0, "null_context");
2357 		if (autofree_context != NULL) {
2358 			talloc_reparent(NULL, null_context, autofree_context);
2359 		}
2360 	}
2361 }
2362 
2363 /*
2364   enable tracking of the NULL context, not moving the autofree context
2365   into the NULL context. This is needed for the talloc testsuite
2366 */
talloc_enable_null_tracking_no_autofree(void)2367 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2368 {
2369 	if (null_context == NULL) {
2370 		null_context = _talloc_named_const(NULL, 0, "null_context");
2371 	}
2372 }
2373 
2374 /*
2375   disable tracking of the NULL context
2376 */
talloc_disable_null_tracking(void)2377 _PUBLIC_ void talloc_disable_null_tracking(void)
2378 {
2379 	if (null_context != NULL) {
2380 		/* we have to move any children onto the real NULL
2381 		   context */
2382 		struct talloc_chunk *tc, *tc2;
2383 		tc = talloc_chunk_from_ptr(null_context);
2384 		for (tc2 = tc->child; tc2; tc2=tc2->next) {
2385 			if (tc2->parent == tc) tc2->parent = NULL;
2386 			if (tc2->prev == tc) tc2->prev = NULL;
2387 		}
2388 		for (tc2 = tc->next; tc2; tc2=tc2->next) {
2389 			if (tc2->parent == tc) tc2->parent = NULL;
2390 			if (tc2->prev == tc) tc2->prev = NULL;
2391 		}
2392 		tc->child = NULL;
2393 		tc->next = NULL;
2394 	}
2395 	talloc_free(null_context);
2396 	null_context = NULL;
2397 }
2398 
2399 /*
2400   enable leak reporting on exit
2401 */
talloc_enable_leak_report(void)2402 _PUBLIC_ void talloc_enable_leak_report(void)
2403 {
2404 	talloc_enable_null_tracking();
2405 	talloc_report_null = true;
2406 	talloc_setup_atexit();
2407 }
2408 
2409 /*
2410   enable full leak reporting on exit
2411 */
talloc_enable_leak_report_full(void)2412 _PUBLIC_ void talloc_enable_leak_report_full(void)
2413 {
2414 	talloc_enable_null_tracking();
2415 	talloc_report_null_full = true;
2416 	talloc_setup_atexit();
2417 }
2418 
2419 /*
2420    talloc and zero memory.
2421 */
_talloc_zero(const void * ctx,size_t size,const char * name)2422 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2423 {
2424 	void *p = _talloc_named_const(ctx, size, name);
2425 
2426 	if (p) {
2427 		memset(p, '\0', size);
2428 	}
2429 
2430 	return p;
2431 }
2432 
2433 /*
2434   memdup with a talloc.
2435 */
_talloc_memdup(const void * t,const void * p,size_t size,const char * name)2436 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2437 {
2438 	void *newp = NULL;
2439 
2440 	if (likely(size > 0) && unlikely(p == NULL)) {
2441 		return NULL;
2442 	}
2443 
2444 	newp = _talloc_named_const(t, size, name);
2445 	if (likely(newp != NULL) && likely(size > 0)) {
2446 		memcpy(newp, p, size);
2447 	}
2448 
2449 	return newp;
2450 }
2451 
__talloc_strlendup(const void * t,const char * p,size_t len)2452 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2453 {
2454 	char *ret;
2455 	struct talloc_chunk *tc;
2456 
2457 	ret = (char *)__talloc(t, len + 1, &tc);
2458 	if (unlikely(!ret)) return NULL;
2459 
2460 	memcpy(ret, p, len);
2461 	ret[len] = 0;
2462 
2463 	_tc_set_name_const(tc, ret);
2464 	return ret;
2465 }
2466 
2467 /*
2468   strdup with a talloc
2469 */
talloc_strdup(const void * t,const char * p)2470 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2471 {
2472 	if (unlikely(!p)) return NULL;
2473 	return __talloc_strlendup(t, p, strlen(p));
2474 }
2475 
2476 /*
2477   strndup with a talloc
2478 */
talloc_strndup(const void * t,const char * p,size_t n)2479 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2480 {
2481 	if (unlikely(!p)) return NULL;
2482 	return __talloc_strlendup(t, p, strnlen(p, n));
2483 }
2484 
__talloc_strlendup_append(char * s,size_t slen,const char * a,size_t alen)2485 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2486 					      const char *a, size_t alen)
2487 {
2488 	char *ret;
2489 
2490 	ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2491 	if (unlikely(!ret)) return NULL;
2492 
2493 	/* append the string and the trailing \0 */
2494 	memcpy(&ret[slen], a, alen);
2495 	ret[slen+alen] = 0;
2496 
2497 	_tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2498 	return ret;
2499 }
2500 
2501 /*
2502  * Appends at the end of the string.
2503  */
talloc_strdup_append(char * s,const char * a)2504 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2505 {
2506 	if (unlikely(!s)) {
2507 		return talloc_strdup(NULL, a);
2508 	}
2509 
2510 	if (unlikely(!a)) {
2511 		return s;
2512 	}
2513 
2514 	return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2515 }
2516 
2517 /*
2518  * Appends at the end of the talloc'ed buffer,
2519  * not the end of the string.
2520  */
talloc_strdup_append_buffer(char * s,const char * a)2521 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2522 {
2523 	size_t slen;
2524 
2525 	if (unlikely(!s)) {
2526 		return talloc_strdup(NULL, a);
2527 	}
2528 
2529 	if (unlikely(!a)) {
2530 		return s;
2531 	}
2532 
2533 	slen = talloc_get_size(s);
2534 	if (likely(slen > 0)) {
2535 		slen--;
2536 	}
2537 
2538 	return __talloc_strlendup_append(s, slen, a, strlen(a));
2539 }
2540 
2541 /*
2542  * Appends at the end of the string.
2543  */
talloc_strndup_append(char * s,const char * a,size_t n)2544 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2545 {
2546 	if (unlikely(!s)) {
2547 		return talloc_strndup(NULL, a, n);
2548 	}
2549 
2550 	if (unlikely(!a)) {
2551 		return s;
2552 	}
2553 
2554 	return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2555 }
2556 
2557 /*
2558  * Appends at the end of the talloc'ed buffer,
2559  * not the end of the string.
2560  */
talloc_strndup_append_buffer(char * s,const char * a,size_t n)2561 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2562 {
2563 	size_t slen;
2564 
2565 	if (unlikely(!s)) {
2566 		return talloc_strndup(NULL, a, n);
2567 	}
2568 
2569 	if (unlikely(!a)) {
2570 		return s;
2571 	}
2572 
2573 	slen = talloc_get_size(s);
2574 	if (likely(slen > 0)) {
2575 		slen--;
2576 	}
2577 
2578 	return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2579 }
2580 
2581 #ifndef HAVE_VA_COPY
2582 #ifdef HAVE___VA_COPY
2583 #define va_copy(dest, src) __va_copy(dest, src)
2584 #else
2585 #define va_copy(dest, src) (dest) = (src)
2586 #endif
2587 #endif
2588 
2589 static struct talloc_chunk *_vasprintf_tc(const void *t,
2590 					  const char *fmt,
2591 					  va_list ap) PRINTF_ATTRIBUTE(2,0);
2592 
_vasprintf_tc(const void * t,const char * fmt,va_list ap)2593 static struct talloc_chunk *_vasprintf_tc(const void *t,
2594 					  const char *fmt,
2595 					  va_list ap)
2596 {
2597 	int vlen;
2598 	size_t len;
2599 	char *ret;
2600 	va_list ap2;
2601 	struct talloc_chunk *tc;
2602 	char buf[1024];
2603 
2604 	/* this call looks strange, but it makes it work on older solaris boxes */
2605 	va_copy(ap2, ap);
2606 	vlen = vsnprintf(buf, sizeof(buf), fmt, ap2);
2607 	va_end(ap2);
2608 	if (unlikely(vlen < 0)) {
2609 		return NULL;
2610 	}
2611 	len = vlen;
2612 	if (unlikely(len + 1 < len)) {
2613 		return NULL;
2614 	}
2615 
2616 	ret = (char *)__talloc(t, len+1, &tc);
2617 	if (unlikely(!ret)) return NULL;
2618 
2619 	if (len < sizeof(buf)) {
2620 		memcpy(ret, buf, len+1);
2621 	} else {
2622 		va_copy(ap2, ap);
2623 		vsnprintf(ret, len+1, fmt, ap2);
2624 		va_end(ap2);
2625 	}
2626 
2627 	_tc_set_name_const(tc, ret);
2628 	return tc;
2629 }
2630 
talloc_vasprintf(const void * t,const char * fmt,va_list ap)2631 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2632 {
2633 	struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2634 	if (tc == NULL) {
2635 		return NULL;
2636 	}
2637 	return TC_PTR_FROM_CHUNK(tc);
2638 }
2639 
2640 
2641 /*
2642   Perform string formatting, and return a pointer to newly allocated
2643   memory holding the result, inside a memory pool.
2644  */
talloc_asprintf(const void * t,const char * fmt,...)2645 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2646 {
2647 	va_list ap;
2648 	char *ret;
2649 
2650 	va_start(ap, fmt);
2651 	ret = talloc_vasprintf(t, fmt, ap);
2652 	va_end(ap);
2653 	return ret;
2654 }
2655 
2656 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2657 						 const char *fmt, va_list ap)
2658 						 PRINTF_ATTRIBUTE(3,0);
2659 
__talloc_vaslenprintf_append(char * s,size_t slen,const char * fmt,va_list ap)2660 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2661 						 const char *fmt, va_list ap)
2662 {
2663 	ssize_t alen;
2664 	va_list ap2;
2665 	char c;
2666 
2667 	va_copy(ap2, ap);
2668 	alen = vsnprintf(&c, 1, fmt, ap2);
2669 	va_end(ap2);
2670 
2671 	if (alen <= 0) {
2672 		/* Either the vsnprintf failed or the format resulted in
2673 		 * no characters being formatted. In the former case, we
2674 		 * ought to return NULL, in the latter we ought to return
2675 		 * the original string. Most current callers of this
2676 		 * function expect it to never return NULL.
2677 		 */
2678 		return s;
2679 	}
2680 
2681 	s = talloc_realloc(NULL, s, char, slen + alen + 1);
2682 	if (!s) return NULL;
2683 
2684 	va_copy(ap2, ap);
2685 	vsnprintf(s + slen, alen + 1, fmt, ap2);
2686 	va_end(ap2);
2687 
2688 	_tc_set_name_const(talloc_chunk_from_ptr(s), s);
2689 	return s;
2690 }
2691 
2692 /**
2693  * Realloc @p s to append the formatted result of @p fmt and @p ap,
2694  * and return @p s, which may have moved.  Good for gradually
2695  * accumulating output into a string buffer. Appends at the end
2696  * of the string.
2697  **/
talloc_vasprintf_append(char * s,const char * fmt,va_list ap)2698 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2699 {
2700 	if (unlikely(!s)) {
2701 		return talloc_vasprintf(NULL, fmt, ap);
2702 	}
2703 
2704 	return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2705 }
2706 
2707 /**
2708  * Realloc @p s to append the formatted result of @p fmt and @p ap,
2709  * and return @p s, which may have moved. Always appends at the
2710  * end of the talloc'ed buffer, not the end of the string.
2711  **/
talloc_vasprintf_append_buffer(char * s,const char * fmt,va_list ap)2712 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2713 {
2714 	size_t slen;
2715 
2716 	if (unlikely(!s)) {
2717 		return talloc_vasprintf(NULL, fmt, ap);
2718 	}
2719 
2720 	slen = talloc_get_size(s);
2721 	if (likely(slen > 0)) {
2722 		slen--;
2723 	}
2724 
2725 	return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2726 }
2727 
2728 /*
2729   Realloc @p s to append the formatted result of @p fmt and return @p
2730   s, which may have moved.  Good for gradually accumulating output
2731   into a string buffer.
2732  */
talloc_asprintf_append(char * s,const char * fmt,...)2733 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2734 {
2735 	va_list ap;
2736 
2737 	va_start(ap, fmt);
2738 	s = talloc_vasprintf_append(s, fmt, ap);
2739 	va_end(ap);
2740 	return s;
2741 }
2742 
2743 /*
2744   Realloc @p s to append the formatted result of @p fmt and return @p
2745   s, which may have moved.  Good for gradually accumulating output
2746   into a buffer.
2747  */
talloc_asprintf_append_buffer(char * s,const char * fmt,...)2748 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2749 {
2750 	va_list ap;
2751 
2752 	va_start(ap, fmt);
2753 	s = talloc_vasprintf_append_buffer(s, fmt, ap);
2754 	va_end(ap);
2755 	return s;
2756 }
2757 
2758 /*
2759   alloc an array, checking for integer overflow in the array size
2760 */
_talloc_array(const void * ctx,size_t el_size,unsigned count,const char * name)2761 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2762 {
2763 	if (count >= MAX_TALLOC_SIZE/el_size) {
2764 		return NULL;
2765 	}
2766 	return _talloc_named_const(ctx, el_size * count, name);
2767 }
2768 
2769 /*
2770   alloc an zero array, checking for integer overflow in the array size
2771 */
_talloc_zero_array(const void * ctx,size_t el_size,unsigned count,const char * name)2772 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2773 {
2774 	if (count >= MAX_TALLOC_SIZE/el_size) {
2775 		return NULL;
2776 	}
2777 	return _talloc_zero(ctx, el_size * count, name);
2778 }
2779 
2780 /*
2781   realloc an array, checking for integer overflow in the array size
2782 */
_talloc_realloc_array(const void * ctx,void * ptr,size_t el_size,unsigned count,const char * name)2783 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2784 {
2785 	if (count >= MAX_TALLOC_SIZE/el_size) {
2786 		return NULL;
2787 	}
2788 	return _talloc_realloc(ctx, ptr, el_size * count, name);
2789 }
2790 
2791 /*
2792   a function version of talloc_realloc(), so it can be passed as a function pointer
2793   to libraries that want a realloc function (a realloc function encapsulates
2794   all the basic capabilities of an allocation library, which is why this is useful)
2795 */
talloc_realloc_fn(const void * context,void * ptr,size_t size)2796 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2797 {
2798 	return _talloc_realloc(context, ptr, size, NULL);
2799 }
2800 
2801 
talloc_autofree_destructor(void * ptr)2802 static int talloc_autofree_destructor(void *ptr)
2803 {
2804 	autofree_context = NULL;
2805 	return 0;
2806 }
2807 
2808 /*
2809   return a context which will be auto-freed on exit
2810   this is useful for reducing the noise in leak reports
2811 */
talloc_autofree_context(void)2812 _PUBLIC_ void *talloc_autofree_context(void)
2813 {
2814 	if (autofree_context == NULL) {
2815 		autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2816 		talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2817 		talloc_setup_atexit();
2818 	}
2819 	return autofree_context;
2820 }
2821 
talloc_get_size(const void * context)2822 _PUBLIC_ size_t talloc_get_size(const void *context)
2823 {
2824 	struct talloc_chunk *tc;
2825 
2826 	if (context == NULL) {
2827 		return 0;
2828 	}
2829 
2830 	tc = talloc_chunk_from_ptr(context);
2831 
2832 	return tc->size;
2833 }
2834 
2835 /*
2836   find a parent of this context that has the given name, if any
2837 */
talloc_find_parent_byname(const void * context,const char * name)2838 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2839 {
2840 	struct talloc_chunk *tc;
2841 
2842 	if (context == NULL) {
2843 		return NULL;
2844 	}
2845 
2846 	tc = talloc_chunk_from_ptr(context);
2847 	while (tc) {
2848 		if (tc->name && strcmp(tc->name, name) == 0) {
2849 			return TC_PTR_FROM_CHUNK(tc);
2850 		}
2851 		while (tc && tc->prev) tc = tc->prev;
2852 		if (tc) {
2853 			tc = tc->parent;
2854 		}
2855 	}
2856 	return NULL;
2857 }
2858 
2859 /*
2860   show the parentage of a context
2861 */
talloc_show_parents(const void * context,FILE * file)2862 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2863 {
2864 	struct talloc_chunk *tc;
2865 
2866 	if (context == NULL) {
2867 		fprintf(file, "talloc no parents for NULL\n");
2868 		return;
2869 	}
2870 
2871 	tc = talloc_chunk_from_ptr(context);
2872 	fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2873 	while (tc) {
2874 		fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2875 		while (tc && tc->prev) tc = tc->prev;
2876 		if (tc) {
2877 			tc = tc->parent;
2878 		}
2879 	}
2880 	fflush(file);
2881 }
2882 
2883 /*
2884   return 1 if ptr is a parent of context
2885 */
_talloc_is_parent(const void * context,const void * ptr,int depth)2886 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2887 {
2888 	struct talloc_chunk *tc;
2889 
2890 	if (context == NULL) {
2891 		return 0;
2892 	}
2893 
2894 	tc = talloc_chunk_from_ptr(context);
2895 	while (tc) {
2896 		if (depth <= 0) {
2897 			return 0;
2898 		}
2899 		if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2900 		while (tc && tc->prev) tc = tc->prev;
2901 		if (tc) {
2902 			tc = tc->parent;
2903 			depth--;
2904 		}
2905 	}
2906 	return 0;
2907 }
2908 
2909 /*
2910   return 1 if ptr is a parent of context
2911 */
talloc_is_parent(const void * context,const void * ptr)2912 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2913 {
2914 	return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2915 }
2916 
2917 /*
2918   return the total size of memory used by this context and all children
2919 */
_talloc_total_limit_size(const void * ptr,struct talloc_memlimit * old_limit,struct talloc_memlimit * new_limit)2920 static inline size_t _talloc_total_limit_size(const void *ptr,
2921 					struct talloc_memlimit *old_limit,
2922 					struct talloc_memlimit *new_limit)
2923 {
2924 	return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2925 					  old_limit, new_limit);
2926 }
2927 
talloc_memlimit_check(struct talloc_memlimit * limit,size_t size)2928 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2929 {
2930 	struct talloc_memlimit *l;
2931 
2932 	for (l = limit; l != NULL; l = l->upper) {
2933 		if (l->max_size != 0 &&
2934 		    ((l->max_size <= l->cur_size) ||
2935 		     (l->max_size - l->cur_size < size))) {
2936 			return false;
2937 		}
2938 	}
2939 
2940 	return true;
2941 }
2942 
2943 /*
2944   Update memory limits when freeing a talloc_chunk.
2945 */
tc_memlimit_update_on_free(struct talloc_chunk * tc)2946 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2947 {
2948 	size_t limit_shrink_size;
2949 
2950 	if (!tc->limit) {
2951 		return;
2952 	}
2953 
2954 	/*
2955 	 * Pool entries don't count. Only the pools
2956 	 * themselves are counted as part of the memory
2957 	 * limits. Note that this also takes care of
2958 	 * nested pools which have both flags
2959 	 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2960 	 */
2961 	if (tc->flags & TALLOC_FLAG_POOLMEM) {
2962 		return;
2963 	}
2964 
2965 	/*
2966 	 * If we are part of a memory limited context hierarchy
2967 	 * we need to subtract the memory used from the counters
2968 	 */
2969 
2970 	limit_shrink_size = tc->size+TC_HDR_SIZE;
2971 
2972 	/*
2973 	 * If we're deallocating a pool, take into
2974 	 * account the prefix size added for the pool.
2975 	 */
2976 
2977 	if (tc->flags & TALLOC_FLAG_POOL) {
2978 		limit_shrink_size += TP_HDR_SIZE;
2979 	}
2980 
2981 	talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2982 
2983 	if (tc->limit->parent == tc) {
2984 		free(tc->limit);
2985 	}
2986 
2987 	tc->limit = NULL;
2988 }
2989 
2990 /*
2991   Increase memory limit accounting after a malloc/realloc.
2992 */
talloc_memlimit_grow(struct talloc_memlimit * limit,size_t size)2993 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2994 				size_t size)
2995 {
2996 	struct talloc_memlimit *l;
2997 
2998 	for (l = limit; l != NULL; l = l->upper) {
2999 		size_t new_cur_size = l->cur_size + size;
3000 		if (new_cur_size < l->cur_size) {
3001 			talloc_abort("logic error in talloc_memlimit_grow\n");
3002 			return;
3003 		}
3004 		l->cur_size = new_cur_size;
3005 	}
3006 }
3007 
3008 /*
3009   Decrease memory limit accounting after a free/realloc.
3010 */
talloc_memlimit_shrink(struct talloc_memlimit * limit,size_t size)3011 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
3012 				size_t size)
3013 {
3014 	struct talloc_memlimit *l;
3015 
3016 	for (l = limit; l != NULL; l = l->upper) {
3017 		if (l->cur_size < size) {
3018 			talloc_abort("logic error in talloc_memlimit_shrink\n");
3019 			return;
3020 		}
3021 		l->cur_size = l->cur_size - size;
3022 	}
3023 }
3024 
talloc_set_memlimit(const void * ctx,size_t max_size)3025 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
3026 {
3027 	struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
3028 	struct talloc_memlimit *orig_limit;
3029 	struct talloc_memlimit *limit = NULL;
3030 
3031 	if (tc->limit && tc->limit->parent == tc) {
3032 		tc->limit->max_size = max_size;
3033 		return 0;
3034 	}
3035 	orig_limit = tc->limit;
3036 
3037 	limit = malloc(sizeof(struct talloc_memlimit));
3038 	if (limit == NULL) {
3039 		return 1;
3040 	}
3041 	limit->parent = tc;
3042 	limit->max_size = max_size;
3043 	limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
3044 
3045 	if (orig_limit) {
3046 		limit->upper = orig_limit;
3047 	} else {
3048 		limit->upper = NULL;
3049 	}
3050 
3051 	return 0;
3052 }
3053