1 /* epoch.h - epoch based memory reclamation */
2 /* $OpenLDAP$ */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
4  *
5  * Copyright 2018-2021 The OpenLDAP Foundation.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted only as authorized by the OpenLDAP
10  * Public License.
11  *
12  * A copy of this license is available in the file LICENSE in the
13  * top-level directory of the distribution or, alternatively, at
14  * <http://www.OpenLDAP.org/license.html>.
15  */
16 
17 #ifndef __LLOAD_EPOCH_H
18 #define __LLOAD_EPOCH_H
19 
20 /** @file epoch.h
21  *
22  * Implementation of epoch based memory reclamation, in principle
23  * similar to the algorithm presented in
24  * https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-579.pdf
25  */
26 
27 typedef uintptr_t epoch_t;
28 
29 /** @brief A callback function used to free object and associated data */
30 typedef void (dispose_cb)( void *object );
31 
32 /** @brief Initiate global state */
33 void epoch_init( void );
34 
35 /** @brief Finalise global state and free any objects still pending */
36 void epoch_shutdown( void );
37 
38 /** @brief Register thread as active
39  *
40  * In order to safely access managed objects, a thread should call
41  * this function or make sure no other thread is running (e.g. config
42  * pause, late shutdown). After calling this, it is guaranteed that no
43  * reachable objects will be freed before all threads have called
44  * `epoch_leave( current_epoch + 1 )` so it is essential that there
45  * is an upper limit to the amount of time between #epoch_join and
46  * corresponding #epoch_leave or the number of unfreed objects might
47  * grow without bounds.
48  *
49  * To simplify locking, memory is only freed when the current epoch
50  * is advanced rather than on leaving it.
51  *
52  * Can be safely called multiple times by the same thread as long as
53  * a matching #epoch_leave() call is made eventually.
54  *
55  * @return The observed epoch, to be passed to #epoch_leave()
56  */
57 epoch_t epoch_join( void );
58 
59 /** @brief Register thread as inactive
60  *
61  * A thread should call this after they are finished with work
62  * performed since matching call to #epoch_join(). It is not safe
63  * to keep a local reference to managed objects after this call
64  * unless other precautions have been made to prevent it being
65  * released.
66  *
67  * @param[in] epoch Epoch identifier returned by a previous call to
68  * #epoch_join().
69  */
70 void epoch_leave( epoch_t epoch );
71 
72 /** @brief Return an unreachable object to be freed
73  *
74  * The object should already be unreachable at the point of call and
75  * cb will be invoked when no other thread that could have seen it
76  * is active any more. This happens when we have advanced by two
77  * epochs.
78  *
79  * @param[in] ptr Object to be released/freed
80  * @param[in] cb Callback to invoke when safe to do so
81  */
82 void epoch_append( void *ptr, dispose_cb *cb );
83 
84 /**
85  * \defgroup Reference counting helpers
86  */
87 /**@{*/
88 
89 /** @brief Acquire a reference if possible
90  *
91  * Atomically, check reference count is non-zero and increment if so.
92  * Returns old reference count.
93  *
94  * @param[in] refp Pointer to a reference counter
95  * @return 0 if reference was already zero, non-zero if reference
96  * count was successfully incremented
97  */
98 int acquire_ref( uintptr_t *refp );
99 
100 /** @brief Check reference count and try to decrement
101  *
102  * Atomically, decrement reference count if non-zero and register
103  * object if decremented to zero. Returning previous reference count.
104  *
105  * @param[in] refp Pointer to a reference counter
106  * @param[in] object The managed object
107  * @param[in] cb Callback to invoke when safe to do so
108  * @return 0 if reference was already zero, non-zero if reference
109  * count was non-zero at the time of call
110  */
111 int try_release_ref( uintptr_t *refp, void *object, dispose_cb *cb );
112 
113 /** @brief Read reference count
114  *
115  * @param[in] object Pointer to the managed object
116  * @param[in] ref_field Member where reference count is stored in
117  * the object
118  * @return Current value of reference counter
119  */
120 #define IS_ALIVE( object, ref_field ) \
121     __atomic_load_n( &(object)->ref_field, __ATOMIC_ACQUIRE )
122 
123 /** @brief Release reference
124  *
125  * A cheaper alternative to #try_release_ref(), safe only when we know
126  * reference count was already non-zero.
127  *
128  * @param[in] object The managed object
129  * @param[in] ref_field Member where reference count is stored in
130  * the object
131  * @param[in] cb Callback to invoke when safe to do so
132  */
133 #define RELEASE_REF( object, ref_field, cb ) \
134     do { \
135         assert( IS_ALIVE( (object), ref_field ) ); \
136         if ( !__atomic_sub_fetch( \
137                      &(object)->ref_field, 1, __ATOMIC_ACQ_REL ) ) { \
138             epoch_append( object, (dispose_cb *)cb ); \
139         } \
140     } while (0)
141 
142 /**@}*/
143 
144 #endif /* __LLOAD_EPOCH_H */
145