xref: /openbsd/lib/libc/stdlib/atexit.c (revision 2c53affb)
1 /*	$OpenBSD: atexit.c,v 1.29 2022/12/27 17:10:06 jmc Exp $ */
2 /*
3  * Copyright (c) 2002 Daniel Hartmeier
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  *    - Redistributions of source code must retain the above copyright
11  *      notice, this list of conditions and the following disclaimer.
12  *    - Redistributions in binary form must reproduce the above
13  *      copyright notice, this list of conditions and the following
14  *      disclaimer in the documentation and/or other materials provided
15  *      with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
20  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
21  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
27  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <tib.h>
37 #include <unistd.h>
38 
39 #include "atexit.h"
40 #include "atfork.h"
41 #include "thread_private.h"
42 
43 struct atexit *__atexit;
44 static int restartloop;
45 
46 /* define and initialize the list */
47 struct atfork_listhead _atfork_list = TAILQ_HEAD_INITIALIZER(_atfork_list);
48 
49 
50 /*
51  * Function pointers are stored in a linked list of pages. The list
52  * is initially empty, and pages are allocated on demand. The first
53  * function pointer in the first allocated page (the last one in
54  * the linked list) is reserved for the cleanup function.
55  *
56  * Outside the following functions, all pages are mprotect()'ed
57  * to prevent unintentional/malicious corruption.
58  */
59 
60 /*
61  * Register a function to be performed at exit or when a shared object
62  * with the given dso handle is unloaded dynamically.  Also used as
63  * the backend for atexit().  For more info on this API, see:
64  *
65  *	http://www.codesourcery.com/cxx-abi/abi.html#dso-dtor
66  */
67 int
__cxa_atexit(void (* func)(void *),void * arg,void * dso)68 __cxa_atexit(void (*func)(void *), void *arg, void *dso)
69 {
70 	struct atexit *p;
71 	struct atexit_fn *fnp;
72 	int pgsize = getpagesize();
73 	int ret = -1;
74 
75 	if (pgsize < sizeof(*p))
76 		return (-1);
77 	_ATEXIT_LOCK();
78 	p = __atexit;
79 	if (p != NULL) {
80 		if (p->ind + 1 >= p->max)
81 			p = NULL;
82 		else if (mprotect(p, pgsize, PROT_READ | PROT_WRITE))
83 			goto unlock;
84 	}
85 	if (p == NULL) {
86 		p = mmap(NULL, pgsize, PROT_READ | PROT_WRITE,
87 		    MAP_ANON | MAP_PRIVATE, -1, 0);
88 		if (p == MAP_FAILED)
89 			goto unlock;
90 		if (__atexit == NULL) {
91 			memset(&p->fns[0], 0, sizeof(p->fns[0]));
92 			p->ind = 1;
93 		} else
94 			p->ind = 0;
95 		p->max = (pgsize - ((char *)&p->fns[0] - (char *)p)) /
96 		    sizeof(p->fns[0]);
97 		p->next = __atexit;
98 		__atexit = p;
99 	}
100 	fnp = &p->fns[p->ind++];
101 	fnp->fn_ptr = func;
102 	fnp->fn_arg = arg;
103 	fnp->fn_dso = dso;
104 	if (mprotect(p, pgsize, PROT_READ))
105 		goto unlock;
106 	restartloop = 1;
107 	ret = 0;
108 unlock:
109 	_ATEXIT_UNLOCK();
110 	return (ret);
111 }
112 DEF_STRONG(__cxa_atexit);
113 
114 /*
115  * Copy of atexit() used by libc and anything statically linked into the
116  * executable.  This passes NULL for the dso, so the callbacks are only
117  * invoked by exit() and not dlclose()
118  */
119 int
atexit(void (* fn)(void))120 atexit(void (*fn)(void))
121 {
122 	return (__cxa_atexit((void (*)(void *))fn, NULL, NULL));
123 }
124 DEF_STRONG(atexit);
125 
126 void
_thread_finalize(void)127 _thread_finalize(void)
128 {
129 	struct tib *tib = TIB_GET();
130 
131 	while (tib->tib_atexit) {
132 		struct thread_atexit_fn *fnp = tib->tib_atexit;
133 		tib->tib_atexit = fnp->next;
134 		fnp->func(fnp->arg);
135 		free(fnp);
136 	}
137 }
138 
139 /*
140  * Call all handlers registered with __cxa_atexit() for the shared
141  * object owning 'dso'.
142  * Note: if 'dso' is NULL, then all remaining handlers are called.
143  */
144 void
__cxa_finalize(void * dso)145 __cxa_finalize(void *dso)
146 {
147 	struct atexit *p, *q;
148 	struct atexit_fn fn;
149 	int n, pgsize = getpagesize();
150 	static int call_depth;
151 
152 	if (dso == NULL)
153 		_thread_finalize();
154 
155 	_ATEXIT_LOCK();
156 	call_depth++;
157 
158 restart:
159 	restartloop = 0;
160 	for (p = __atexit; p != NULL; p = p->next) {
161 		for (n = p->ind; --n >= 0;) {
162 			if (p->fns[n].fn_ptr == NULL)
163 				continue;	/* already called */
164 			if (dso != NULL && dso != p->fns[n].fn_dso)
165 				continue;	/* wrong DSO */
166 
167 			/*
168 			 * Mark handler as having been already called to avoid
169 			 * dupes and loops, then call the appropriate function.
170 			 */
171 			fn = p->fns[n];
172 			if (mprotect(p, pgsize, PROT_READ | PROT_WRITE) == 0) {
173 				p->fns[n].fn_ptr = NULL;
174 				mprotect(p, pgsize, PROT_READ);
175 			}
176 			_ATEXIT_UNLOCK();
177 			(*fn.fn_ptr)(fn.fn_arg);
178 			_ATEXIT_LOCK();
179 			if (restartloop)
180 				goto restart;
181 		}
182 	}
183 
184 	call_depth--;
185 
186 	/*
187 	 * If called via exit(), unmap the pages since we have now run
188 	 * all the handlers.  We defer this until calldepth == 0 so that
189 	 * we don't unmap things prematurely if called recursively.
190 	 */
191 	if (dso == NULL && call_depth == 0) {
192 		for (p = __atexit; p != NULL; ) {
193 			q = p;
194 			p = p->next;
195 			munmap(q, pgsize);
196 		}
197 		__atexit = NULL;
198 	}
199 	_ATEXIT_UNLOCK();
200 
201 	/*
202 	 * If unloading a DSO, unregister any atfork handlers registered
203 	 * by it.  Skip the locking if the list is currently empty.
204 	 */
205 	if (dso != NULL && TAILQ_FIRST(&_atfork_list) != NULL) {
206 		struct atfork_fn *af, *afnext;
207 
208 		_ATFORK_LOCK();
209 		TAILQ_FOREACH_SAFE(af, &_atfork_list, fn_next, afnext)
210 			if (af->fn_dso == dso) {
211 				TAILQ_REMOVE(&_atfork_list, af, fn_next);
212 				free(af);
213 			}
214 		_ATFORK_UNLOCK();
215 
216 	}
217 }
218 DEF_STRONG(__cxa_finalize);
219 
220 /*
221  * Register the cleanup function
222  */
223 void
__atexit_register_cleanup(void (* func)(void))224 __atexit_register_cleanup(void (*func)(void))
225 {
226 	struct atexit *p;
227 	int pgsize = getpagesize();
228 
229 	if (pgsize < sizeof(*p))
230 		return;
231 	_ATEXIT_LOCK();
232 	p = __atexit;
233 	while (p != NULL && p->next != NULL)
234 		p = p->next;
235 	if (p == NULL) {
236 		p = mmap(NULL, pgsize, PROT_READ | PROT_WRITE,
237 		    MAP_ANON | MAP_PRIVATE, -1, 0);
238 		if (p == MAP_FAILED)
239 			goto unlock;
240 		p->ind = 1;
241 		p->max = (pgsize - ((char *)&p->fns[0] - (char *)p)) /
242 		    sizeof(p->fns[0]);
243 		p->next = NULL;
244 		__atexit = p;
245 	} else {
246 		if (mprotect(p, pgsize, PROT_READ | PROT_WRITE))
247 			goto unlock;
248 	}
249 	p->fns[0].fn_ptr = (void (*)(void *))func;
250 	p->fns[0].fn_arg = NULL;
251 	p->fns[0].fn_dso = NULL;
252 	mprotect(p, pgsize, PROT_READ);
253 	restartloop = 1;
254 unlock:
255 	_ATEXIT_UNLOCK();
256 }
257