1 /*
2  * Copyright (c) 2013, NVIDIA CORPORATION.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and/or associated documentation files (the
6  * "Materials"), to deal in the Materials without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sublicense, and/or sell copies of the Materials, and to
9  * permit persons to whom the Materials are furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included
13  * unaltered in all copies or substantial portions of the Materials.
14  * Any additions, deletions, or changes to the original source files
15  * must be clearly indicated in accompanying documentation.
16  *
17  * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23  * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
24  */
25 
26 #include <string.h>
27 #include <pthread.h>
28 #include <dlfcn.h>
29 
30 #include "trace.h"
31 #include "glvnd_list.h"
32 #include "GLdispatch.h"
33 #include "GLdispatchPrivate.h"
34 #include "stub.h"
35 #include "glvnd_pthread.h"
36 #include "app_error_check.h"
37 
38 /*
39  * Global current dispatch table list. We need this to fix up all current
40  * dispatch tables whenever GetProcAddress() is called on a new function.
41  * Accesses to this need to be protected by the dispatch lock.
42  */
43 static struct glvnd_list currentDispatchList;
44 
45 /*
46  * Number of clients using GLdispatch.
47  */
48 static int clientRefcount;
49 
50 /*
51  * The number of current contexts that GLdispatch is aware of
52  */
53 static int numCurrentContexts;
54 
55 /**
56  * Private data for each API state.
57  */
58 typedef struct __GLdispatchThreadStatePrivateRec {
59     /// A pointer back to the API state.
60     __GLdispatchThreadState *threadState;
61 
62     /// ID of the current vendor for this state
63     int vendorID;
64 
65     /// The current (high-level) __GLdispatch table
66     __GLdispatchTable *dispatch;
67 } __GLdispatchThreadStatePrivate;
68 
69 /*
70  * List of valid extension procs which have been assigned prototypes. At make
71  * current time, if the new context's generation is out-of-date, we iterate
72  * through this list and fix up the new context with entrypoints with a greater
73  * generation number. Accesses to this need to be protected by the dispatch
74  * lock.
75  */
76 static struct glvnd_list extProcList;
77 
78 /*
79  * Dispatch stub list for entrypoint rewriting.
80  */
81 static struct glvnd_list dispatchStubList;
82 static int nextDispatchStubID = 1;
83 static int localDispatchStubId = -1;
84 
85 /*
86  * Track the latest generation of the dispatch stub list so that vendor
87  * libraries can determine when their copies of the stub offsets need to
88  * be updated.
89  *
90  * Note: wrapping is theoretically an issue here, but encountering this
91  * situation would require loading and unloading an API library that registers
92  * its entrypoints with GLdispatch 2^63-1 times, so it is unlikely to be an
93  * issue in practice.
94  */
95 static GLint64 dispatchStubListGeneration;
96 
97 /*
98  * Used when generating new vendor IDs for GLdispatch clients.  Valid vendor
99  * IDs must be non-zero.
100  */
101 static int firstUnusedVendorID = 1;
102 
103 /**
104  * The key used to store the __GLdispatchThreadState for the current thread.
105  */
106 static glvnd_key_t threadContextKey;
107 
108 static void SetCurrentThreadState(__GLdispatchThreadState *threadState);
109 static void ThreadDestroyed(void *data);
110 static int RegisterStubCallbacks(const __GLdispatchStubPatchCallbacks *callbacks);
111 
112 
113 /*
114  * The vendor ID of the current "owner" of the entrypoint code.  0 if
115  * we are using the default libglvnd stubs.
116  */
117 static int stubOwnerVendorID;
118 
119 /*
120  * The current set of patch callbacks being used, or NULL if using the
121  * default libglvnd entrypoints.
122  */
123 static const __GLdispatchPatchCallbacks *stubCurrentPatchCb;
124 
125 static glvnd_thread_t firstThreadId = GLVND_THREAD_NULL_INIT;
126 static int isMultiThreaded = 0;
127 
128 /*
129  * The dispatch lock. This should be taken around any code that manipulates the
130  * above global variables or makes calls to _glapi_get_proc_offset() or
131  * _glapi_get_proc_offset().
132  */
133 struct {
134     glvnd_mutex_t lock;
135     int isLocked;
136 } dispatchLock = { GLVND_MUTEX_INITIALIZER, 0 };
137 
LockDispatch(void)138 static inline void LockDispatch(void)
139 {
140     __glvndPthreadFuncs.mutex_lock(&dispatchLock.lock);
141     dispatchLock.isLocked = 1;
142 }
143 
UnlockDispatch(void)144 static inline void UnlockDispatch(void)
145 {
146     dispatchLock.isLocked = 0;
147     __glvndPthreadFuncs.mutex_unlock(&dispatchLock.lock);
148 }
149 
150 #define CheckDispatchLocked() assert(dispatchLock.isLocked)
151 
__glDispatchGetABIVersion(void)152 int __glDispatchGetABIVersion(void)
153 {
154     return GLDISPATCH_ABI_VERSION;
155 }
156 
157 #if defined(USE_ATTRIBUTE_CONSTRUCTOR)
__glDispatchOnLoadInit(void)158 void __attribute__ ((constructor)) __glDispatchOnLoadInit(void)
159 #else
160 void _init(void)
161 #endif
162 {
163     // Here, we only initialize the pthreads imports. Everything else we'll
164     // deal with in __glDispatchInit.
165     glvndSetupPthreads();
166     glvndAppErrorCheckInit();
167 }
168 
__glDispatchInit(void)169 void __glDispatchInit(void)
170 {
171     LockDispatch();
172 
173     if (clientRefcount == 0) {
174         // Initialize the GLAPI layer.
175         _glapi_init();
176         __glvndPthreadFuncs.key_create(&threadContextKey, ThreadDestroyed);
177 
178         glvnd_list_init(&extProcList);
179         glvnd_list_init(&currentDispatchList);
180         glvnd_list_init(&dispatchStubList);
181 
182         // Register GLdispatch's static entrypoints for rewriting
183         localDispatchStubId = RegisterStubCallbacks(stub_get_patch_callbacks());
184     }
185 
186     clientRefcount++;
187     UnlockDispatch();
188 }
189 
__glDispatchNewVendorID(void)190 int __glDispatchNewVendorID(void)
191 {
192     int vendorID;
193 
194     LockDispatch();
195     vendorID = firstUnusedVendorID++;
196     UnlockDispatch();
197 
198     return vendorID;
199 }
200 
noop_func(void)201 static void noop_func(void)
202 {
203     // nop
204 }
205 
DispatchCurrentRef(__GLdispatchTable * dispatch)206 static void DispatchCurrentRef(__GLdispatchTable *dispatch)
207 {
208     CheckDispatchLocked();
209     dispatch->currentThreads++;
210     if (dispatch->currentThreads == 1) {
211         glvnd_list_add(&dispatch->entry, &currentDispatchList);
212     }
213 }
214 
DispatchCurrentUnref(__GLdispatchTable * dispatch)215 static void DispatchCurrentUnref(__GLdispatchTable *dispatch)
216 {
217     CheckDispatchLocked();
218     dispatch->currentThreads--;
219     if (dispatch->currentThreads == 0) {
220         glvnd_list_del(&dispatch->entry);
221     }
222     assert(dispatch->currentThreads >= 0);
223 }
224 
225 /*
226  * Fix up a dispatch table. Calls to this function must be protected by the
227  * dispatch lock.
228  */
FixupDispatchTable(__GLdispatchTable * dispatch)229 static GLboolean FixupDispatchTable(__GLdispatchTable *dispatch)
230 {
231     DBG_PRINTF(20, "dispatch=%p\n", dispatch);
232     CheckDispatchLocked();
233 
234     void **tbl;
235     int count = _glapi_get_stub_count();
236     int i;
237 
238     if (dispatch->table == NULL) {
239         dispatch->table = (struct _glapi_table *)
240             calloc(1, _glapi_get_dispatch_table_size() * sizeof(void *));
241         if (dispatch->table == NULL) {
242             return GL_FALSE;
243         }
244     }
245 
246     tbl = (void **)dispatch->table;
247     for (i=dispatch->stubsPopulated; i<count; i++) {
248         const char *name = _glapi_get_proc_name(i);
249         void *procAddr;
250 
251         assert(name != NULL);
252 
253         procAddr = (void*)(*dispatch->getProcAddress)(
254             name, dispatch->getProcAddressParam);
255         tbl[i] = procAddr ? procAddr : (void *)noop_func;
256     }
257     dispatch->stubsPopulated = count;
258 
259     return GL_TRUE;
260 }
261 
__glDispatchGetProcAddress(const char * procName)262 PUBLIC __GLdispatchProc __glDispatchGetProcAddress(const char *procName)
263 {
264     int prevCount;
265     _glapi_proc addr;
266 
267     /*
268      * We need to lock the dispatch before calling into glapi in order to
269      * prevent races when retrieving the entrypoint stub.
270      */
271     LockDispatch();
272     prevCount = _glapi_get_stub_count();
273     addr = _glapi_get_proc_address(procName);
274     if (addr != NULL && prevCount != _glapi_get_stub_count()) {
275         __GLdispatchTable *curDispatch;
276 
277         /*
278          * Fixup any current dispatch tables to contain the right pointer
279          * to this proc.
280          */
281         glvnd_list_for_each_entry(curDispatch, &currentDispatchList, entry) {
282             // Sanity check: Every current dispatch table must have already
283             // been allocated. That's important because it means
284             // FixupDispatchTable can't fail.
285             assert(curDispatch->table != NULL);
286             FixupDispatchTable(curDispatch);
287         }
288     }
289     UnlockDispatch();
290 
291     return addr;
292 }
293 
__glDispatchCreateTable(__GLgetProcAddressCallback getProcAddress,void * param)294 PUBLIC __GLdispatchTable *__glDispatchCreateTable(
295         __GLgetProcAddressCallback getProcAddress, void *param)
296 {
297     __GLdispatchTable *dispatch = calloc(1, sizeof(__GLdispatchTable));
298     if (dispatch == NULL) {
299         return NULL;
300     }
301 
302     dispatch->getProcAddress = getProcAddress;
303     dispatch->getProcAddressParam = param;
304 
305     return dispatch;
306 }
307 
__glDispatchDestroyTable(__GLdispatchTable * dispatch)308 PUBLIC void __glDispatchDestroyTable(__GLdispatchTable *dispatch)
309 {
310     /*
311      * XXX: Technically, dispatch->currentThreads should be 0 if we're calling
312      * into this function, but buggy apps may unload libGLX without losing
313      * current, in which case this won't be true when the dispatch table
314      * is destroyed.
315      */
316     LockDispatch();
317     free(dispatch->table);
318     free(dispatch);
319     UnlockDispatch();
320 }
321 
CurrentEntrypointsSafeToUse(int vendorID)322 static int CurrentEntrypointsSafeToUse(int vendorID)
323 {
324     CheckDispatchLocked();
325     return !stubOwnerVendorID || (vendorID == stubOwnerVendorID);
326 }
327 
PatchingIsDisabledByEnvVar(void)328 static inline int PatchingIsDisabledByEnvVar(void)
329 {
330     static GLboolean inited = GL_FALSE;
331     static GLboolean disallowPatch = GL_FALSE;
332 
333     CheckDispatchLocked();
334 
335     if (!inited) {
336         char *disallowPatchStr = getenv("__GLVND_DISALLOW_PATCHING");
337         if (disallowPatchStr) {
338             disallowPatch = atoi(disallowPatchStr);
339         } else if (glvndAppErrorCheckGetEnabled()) {
340             // Entrypoint rewriting means skipping the dispatch table in
341             // libGLdispatch, which would disable checking for calling OpenGL
342             // functions without a context.
343             disallowPatch = GL_TRUE;
344         }
345         inited = GL_TRUE;
346     }
347 
348     return disallowPatch;
349 }
350 
ContextIsCurrentInAnyOtherThread(void)351 static inline int ContextIsCurrentInAnyOtherThread(void)
352 {
353     int thisThreadsContext = !!__glDispatchGetCurrentThreadState();
354     int otherContexts;
355 
356     CheckDispatchLocked();
357 
358     otherContexts = (numCurrentContexts - thisThreadsContext);
359     assert(otherContexts >= 0);
360 
361     return !!otherContexts;
362 }
363 
PatchingIsSafe(void)364 static int PatchingIsSafe(void)
365 {
366     CheckDispatchLocked();
367 
368     /*
369      * Can only patch entrypoints on supported TLS access models
370      */
371     if (glvnd_list_is_empty(&dispatchStubList)) {
372         return 0;
373     }
374 
375     if (PatchingIsDisabledByEnvVar()) {
376         return 0;
377     }
378 
379     if (ContextIsCurrentInAnyOtherThread()) {
380         return 0;
381     }
382 
383     return 1;
384 }
385 
386 typedef struct __GLdispatchStubCallbackRec {
387     __GLdispatchStubPatchCallbacks callbacks;
388     int id;
389     GLboolean isPatched;
390 
391     struct glvnd_list entry;
392 } __GLdispatchStubCallback;
393 
394 /**
395  * Does the same thing as __glDispatchRegisterStubCallbacks, but requires the
396  * caller to already be holding the dispatch lock.
397  *
398  * This is used in __glDispatchInit to register the libGLdispatch's own stub
399  * functions.
400  */
RegisterStubCallbacks(const __GLdispatchStubPatchCallbacks * callbacks)401 int RegisterStubCallbacks(const __GLdispatchStubPatchCallbacks *callbacks)
402 {
403     if (callbacks == NULL) {
404         return -1;
405     }
406 
407     __GLdispatchStubCallback *stub = malloc(sizeof(*stub));
408     if (stub == NULL) {
409         return -1;
410     }
411 
412     memcpy(&stub->callbacks, callbacks, sizeof(__GLdispatchStubPatchCallbacks));
413     stub->isPatched = GL_FALSE;
414 
415     stub->id = nextDispatchStubID++;
416     glvnd_list_add(&stub->entry, &dispatchStubList);
417     dispatchStubListGeneration++;
418 
419     return stub->id;
420 }
421 
__glDispatchRegisterStubCallbacks(const __GLdispatchStubPatchCallbacks * callbacks)422 int __glDispatchRegisterStubCallbacks(const __GLdispatchStubPatchCallbacks *callbacks)
423 {
424     int ret;
425     LockDispatch();
426     ret = RegisterStubCallbacks(callbacks);
427     UnlockDispatch();
428     return ret;
429 }
430 
__glDispatchUnregisterStubCallbacks(int stubId)431 void __glDispatchUnregisterStubCallbacks(int stubId)
432 {
433     __GLdispatchStubCallback *curStub, *tmpStub;
434     if (stubId < 0) {
435         return;
436     }
437 
438     LockDispatch();
439 
440     glvnd_list_for_each_entry_safe(curStub, tmpStub, &dispatchStubList, entry) {
441         if (curStub->id == stubId) {
442             glvnd_list_del(&curStub->entry);
443             free(curStub);
444             break;
445         }
446     }
447 
448     dispatchStubListGeneration++;
449     UnlockDispatch();
450 }
451 
UnregisterAllStubCallbacks(void)452 void UnregisterAllStubCallbacks(void)
453 {
454     __GLdispatchStubCallback *curStub, *tmpStub;
455     CheckDispatchLocked();
456 
457     glvnd_list_for_each_entry_safe(curStub, tmpStub, &dispatchStubList, entry) {
458         glvnd_list_del(&curStub->entry);
459         free(curStub);
460     }
461 
462     dispatchStubListGeneration++;
463 }
464 
465 
466 /*
467  * Attempt to patch entrypoints with the given patch function and vendor ID.
468  * If the function pointers are NULL, then this attempts to restore the default
469  * libglvnd entrypoints.
470  *
471  * Returns 1 on success, 0 on failure.
472  */
PatchEntrypoints(const __GLdispatchPatchCallbacks * patchCb,int vendorID,GLboolean force)473 static int PatchEntrypoints(
474    const __GLdispatchPatchCallbacks *patchCb,
475    int vendorID,
476    GLboolean force
477 )
478 {
479     __GLdispatchStubCallback *stub;
480     CheckDispatchLocked();
481 
482     if (!force && !PatchingIsSafe()) {
483         return 0;
484     }
485 
486     if (patchCb == stubCurrentPatchCb) {
487         // Entrypoints already using the requested patch; no need to do anything
488         return 1;
489     }
490 
491     if (stubCurrentPatchCb) {
492         // Notify the previous vendor that it no longer owns these
493         // entrypoints. If this is being called from a library unload,
494         // though, then skip the callback, because the vendor may have
495         // already been unloaded.
496         if (stubCurrentPatchCb->releasePatch != NULL && !force) {
497             stubCurrentPatchCb->releasePatch();
498         }
499 
500         // Restore the stubs to the default implementation.
501         glvnd_list_for_each_entry(stub, &dispatchStubList, entry) {
502             if (stub->isPatched) {
503                 stub->callbacks.restoreFuncs();
504                 stub->isPatched = GL_FALSE;
505             }
506         }
507 
508         stubCurrentPatchCb = NULL;
509         stubOwnerVendorID = 0;
510     }
511 
512     if (patchCb) {
513         GLboolean anySuccess = GL_FALSE;
514 
515         glvnd_list_for_each_entry(stub, &dispatchStubList, entry) {
516             if (patchCb->isPatchSupported(stub->callbacks.getStubType(),
517                         stub->callbacks.getStubSize()))
518             {
519                 if (stub->callbacks.startPatch()) {
520                     if (patchCb->initiatePatch(stub->callbacks.getStubType(),
521                                 stub->callbacks.getStubSize(),
522                                 stub->callbacks.getPatchOffset)) {
523                         stub->callbacks.finishPatch();
524                         stub->isPatched = GL_TRUE;
525                         anySuccess = GL_TRUE;
526                     } else {
527                         stub->callbacks.abortPatch();
528                         stub->isPatched = GL_FALSE;
529                     }
530                 }
531             } else if (stub->isPatched) {
532                 // The vendor library can't patch these stubs, but they were
533                 // patched before. Restore them now.
534                 stub->callbacks.restoreFuncs();
535                 stub->isPatched = GL_FALSE;
536             }
537         }
538 
539         if (anySuccess) {
540             stubCurrentPatchCb = patchCb;
541             stubOwnerVendorID = vendorID;
542         } else {
543             stubCurrentPatchCb = NULL;
544             stubOwnerVendorID = 0;
545         }
546     }
547 
548     return 1;
549 }
550 
__glDispatchMakeCurrent(__GLdispatchThreadState * threadState,__GLdispatchTable * dispatch,int vendorID,const __GLdispatchPatchCallbacks * patchCb)551 PUBLIC GLboolean __glDispatchMakeCurrent(__GLdispatchThreadState *threadState,
552                                          __GLdispatchTable *dispatch,
553                                          int vendorID,
554                                          const __GLdispatchPatchCallbacks *patchCb)
555 {
556     __GLdispatchThreadStatePrivate *priv;
557 
558     if (__glDispatchGetCurrentThreadState() != NULL) {
559         assert(!"__glDispatchMakeCurrent called with a current API state\n");
560         return GL_FALSE;
561     }
562 
563     priv = (__GLdispatchThreadStatePrivate *) malloc(sizeof(__GLdispatchThreadStatePrivate));
564     if (priv == NULL) {
565         return GL_FALSE;
566     }
567 
568     // We need to fix up the dispatch table if it hasn't been
569     // initialized, or there are new dynamic entries which were
570     // added since the last time make current was called.
571     LockDispatch();
572 
573     // Patch if necessary
574     PatchEntrypoints(patchCb, vendorID, GL_FALSE);
575 
576     // If the current entrypoints are unsafe to use with this vendor, bail out.
577     if (!CurrentEntrypointsSafeToUse(vendorID)) {
578         UnlockDispatch();
579         free(priv);
580         return GL_FALSE;
581     }
582 
583     if (!FixupDispatchTable(dispatch)) {
584         UnlockDispatch();
585         free(priv);
586         return GL_FALSE;
587     }
588 
589     DispatchCurrentRef(dispatch);
590     numCurrentContexts++;
591 
592     UnlockDispatch();
593 
594     /*
595      * Update the API state with the new values.
596      */
597     priv->dispatch = dispatch;
598     priv->vendorID = vendorID;
599     priv->threadState = threadState;
600     threadState->priv = priv;
601 
602     /*
603      * Set the current state in TLS.
604      */
605     SetCurrentThreadState(threadState);
606     _glapi_set_current(dispatch->table);
607 
608     return GL_TRUE;
609 }
610 
LoseCurrentInternal(__GLdispatchThreadState * curThreadState,GLboolean threadDestroyed)611 static void LoseCurrentInternal(__GLdispatchThreadState *curThreadState,
612         GLboolean threadDestroyed)
613 {
614     LockDispatch();
615     // Note that we don't try to restore the default stubs here. Chances are,
616     // the next MakeCurrent will be from the same vendor, and if we leave them
617     // patched, then we won't have to go through the overhead of patching them
618     // again.
619 
620     if (curThreadState) {
621         numCurrentContexts--;
622         if (curThreadState->priv != NULL) {
623             if (curThreadState->priv->dispatch != NULL) {
624                 DispatchCurrentUnref(curThreadState->priv->dispatch);
625             }
626 
627             free(curThreadState->priv);
628             curThreadState->priv = NULL;
629         }
630     }
631     UnlockDispatch();
632 
633     if (!threadDestroyed) {
634         SetCurrentThreadState(NULL);
635         _glapi_set_current(NULL);
636     }
637 }
638 
__glDispatchLoseCurrent(void)639 PUBLIC void __glDispatchLoseCurrent(void)
640 {
641     __GLdispatchThreadState *curThreadState = __glDispatchGetCurrentThreadState();
642     if (curThreadState == NULL) {
643         return;
644     }
645     LoseCurrentInternal(curThreadState, GL_FALSE);
646 }
647 
__glDispatchForceUnpatch(int vendorID)648 PUBLIC GLboolean __glDispatchForceUnpatch(int vendorID)
649 {
650     GLboolean ret = GL_FALSE;
651 
652     LockDispatch();
653     if (stubCurrentPatchCb != NULL && stubOwnerVendorID == vendorID) {
654         /*
655          * The vendor library with the patch callbacks is about to be unloaded,
656          * so we need to unpatch the entrypoints even if there's a current
657          * context on another thread.
658          *
659          * If a buggy application is trying to call an OpenGL function on
660          * another thread, then we're going to run into problems, but in that
661          * case, it's just as likely that the other thread would be somewhere
662          * in the vendor library itself.
663          */
664         PatchEntrypoints(NULL, 0, GL_TRUE);
665         ret = GL_TRUE;
666     }
667     UnlockDispatch();
668 
669     return ret;
670 }
671 
__glDispatchGetCurrentThreadState(void)672 __GLdispatchThreadState *__glDispatchGetCurrentThreadState(void)
673 {
674     return (__GLdispatchThreadState *) __glvndPthreadFuncs.getspecific(threadContextKey);
675 }
676 
SetCurrentThreadState(__GLdispatchThreadState * threadState)677 void SetCurrentThreadState(__GLdispatchThreadState *threadState)
678 {
679     __glvndPthreadFuncs.setspecific(threadContextKey, threadState);
680 }
681 
682 /*
683  * Handles resetting GLdispatch state after a fork.
684  */
__glDispatchReset(void)685 void __glDispatchReset(void)
686 {
687     __GLdispatchTable *cur, *tmp;
688 
689     /* Reset the dispatch lock */
690     __glvndPthreadFuncs.mutex_init(&dispatchLock.lock, NULL);
691     dispatchLock.isLocked = 0;
692 
693     LockDispatch();
694     /*
695      * Clear out the current dispatch list.
696      */
697 
698     glvnd_list_for_each_entry_safe(cur, tmp, &currentDispatchList, entry) {
699         cur->currentThreads = 0;
700         glvnd_list_del(&cur->entry);
701     }
702     UnlockDispatch();
703 
704     /* Clear GLAPI TLS entries. */
705     SetCurrentThreadState(NULL);
706     _glapi_set_current(NULL);
707 }
708 
709 /*
710  * Handles cleanup on library unload.
711  */
__glDispatchFini(void)712 void __glDispatchFini(void)
713 {
714     LockDispatch();
715 
716     if (clientRefcount <= 0) {
717         assert(clientRefcount > 0);
718         UnlockDispatch();
719         return;
720     }
721 
722     clientRefcount--;
723 
724     if (clientRefcount == 0) {
725         /* This frees the dispatchStubList */
726         UnregisterAllStubCallbacks();
727 
728         __glvndPthreadFuncs.key_delete(threadContextKey);
729 
730         // Clean up GLAPI thread state
731         _glapi_destroy();
732     }
733 
734     UnlockDispatch();
735 }
736 
__glDispatchCheckMultithreaded(void)737 void __glDispatchCheckMultithreaded(void)
738 {
739     if (!__glvndPthreadFuncs.is_singlethreaded)
740     {
741         // Check to see if the current thread has a dispatch table assigned to
742         // it, and if it doesn't, then plug in the no-op table.
743         // This is a partial workaround to broken applications that try to call
744         // OpenGL functions without a current context, without adding any
745         // additional overhead to the dispatch stubs themselves. As long as the
746         // thread calls at least one GLX function first, any OpenGL calls will
747         // go to the no-op stubs instead of crashing.
748         if (_glapi_get_current() == NULL) {
749             // Calling _glapi_set_current(NULL) will plug in the no-op table.
750             _glapi_set_current(NULL);
751         }
752 
753         LockDispatch();
754         if (!isMultiThreaded) {
755             glvnd_thread_t tid = __glvndPthreadFuncs.self();
756             if (__glvndPthreadFuncs.equal(firstThreadId, GLVND_THREAD_NULL)) {
757                 firstThreadId = tid;
758             } else if (!__glvndPthreadFuncs.equal(firstThreadId, tid)) {
759                 isMultiThreaded = 1;
760                 _glapi_set_multithread();
761             }
762         }
763 
764         if (stubCurrentPatchCb != NULL && stubCurrentPatchCb->threadAttach != NULL) {
765             stubCurrentPatchCb->threadAttach();
766         }
767         UnlockDispatch();
768     }
769 }
770 
ThreadDestroyed(void * data)771 void ThreadDestroyed(void *data)
772 {
773     if (data != NULL) {
774         __GLdispatchThreadState *threadState = (__GLdispatchThreadState *) data;
775         LoseCurrentInternal(threadState, GL_TRUE);
776 
777         if (threadState->threadDestroyedCallback != NULL) {
778             threadState->threadDestroyedCallback(threadState);
779         }
780     }
781 }
782 
783