1 /*
2  * object_pool.c :  generic pool of reference-counted objects
3  *
4  * ====================================================================
5  *    Licensed to the Apache Software Foundation (ASF) under one
6  *    or more contributor license agreements.  See the NOTICE file
7  *    distributed with this work for additional information
8  *    regarding copyright ownership.  The ASF licenses this file
9  *    to you under the Apache License, Version 2.0 (the
10  *    "License"); you may not use this file except in compliance
11  *    with the License.  You may obtain a copy of the License at
12  *
13  *      http://www.apache.org/licenses/LICENSE-2.0
14  *
15  *    Unless required by applicable law or agreed to in writing,
16  *    software distributed under the License is distributed on an
17  *    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
18  *    KIND, either express or implied.  See the License for the
19  *    specific language governing permissions and limitations
20  *    under the License.
21  * ====================================================================
22  */
23 
24 
25 
26 
27 #include <assert.h>
28 
29 #include "svn_error.h"
30 #include "svn_hash.h"
31 #include "svn_pools.h"
32 
33 #include "private/svn_atomic.h"
34 #include "private/svn_object_pool.h"
35 #include "private/svn_subr_private.h"
36 #include "private/svn_dep_compat.h"
37 
38 
39 
40 /* A reference counting wrapper around the user-provided object.
41  */
42 typedef struct object_ref_t
43 {
44   /* reference to the parent container */
45   svn_object_pool__t *object_pool;
46 
47   /* identifies the bucket in OBJECT_POOL->OBJECTS in which this entry
48    * belongs. */
49   svn_membuf_t key;
50 
51   /* User provided object. Usually a wrapper. */
52   void *object;
53 
54   /* private pool. This instance and its other members got allocated in it.
55    * Will be destroyed when this instance is cleaned up. */
56   apr_pool_t *pool;
57 
58   /* Number of references to this data struct */
59   volatile svn_atomic_t ref_count;
60 } object_ref_t;
61 
62 
63 /* Core data structure.  All access to it must be serialized using MUTEX.
64  */
65 struct svn_object_pool__t
66 {
67   /* serialization object for all non-atomic data in this struct */
68   svn_mutex__t *mutex;
69 
70   /* object_ref_t.KEY -> object_ref_t* mapping.
71    *
72    * In shared object mode, there is at most one such entry per key and it
73    * may or may not be in use.  In exclusive mode, only unused references
74    * will be put here and they form chains if there are multiple unused
75    * instances for the key. */
76   apr_hash_t *objects;
77 
78   /* same as objects->count but allows for non-sync'ed access */
79   volatile svn_atomic_t object_count;
80 
81   /* Number of entries in OBJECTS with a reference count 0.
82      Due to races, this may be *temporarily* off by one or more.
83      Hence we must not strictly depend on it. */
84   volatile svn_atomic_t unused_count;
85 
86   /* the root pool owning this structure */
87   apr_pool_t *pool;
88 };
89 
90 
91 /* Pool cleanup function for the whole object pool.
92  */
93 static apr_status_t
object_pool_cleanup(void * baton)94 object_pool_cleanup(void *baton)
95 {
96   svn_object_pool__t *object_pool = baton;
97 
98   /* all entries must have been released up by now */
99   SVN_ERR_ASSERT_NO_RETURN(   object_pool->object_count
100                            == object_pool->unused_count);
101 
102   return APR_SUCCESS;
103 }
104 
105 /* Remove entries from OBJECTS in OBJECT_POOL that have a ref-count of 0.
106  *
107  * Requires external serialization on OBJECT_POOL.
108  */
109 static void
remove_unused_objects(svn_object_pool__t * object_pool)110 remove_unused_objects(svn_object_pool__t *object_pool)
111 {
112   apr_pool_t *subpool = svn_pool_create(object_pool->pool);
113 
114   /* process all hash buckets */
115   apr_hash_index_t *hi;
116   for (hi = apr_hash_first(subpool, object_pool->objects);
117        hi != NULL;
118        hi = apr_hash_next(hi))
119     {
120       object_ref_t *object_ref = apr_hash_this_val(hi);
121 
122       /* note that we won't hand out new references while access
123          to the hash is serialized */
124       if (svn_atomic_read(&object_ref->ref_count) == 0)
125         {
126           apr_hash_set(object_pool->objects, object_ref->key.data,
127                        object_ref->key.size, NULL);
128           svn_atomic_dec(&object_pool->object_count);
129           svn_atomic_dec(&object_pool->unused_count);
130 
131           svn_pool_destroy(object_ref->pool);
132         }
133     }
134 
135   svn_pool_destroy(subpool);
136 }
137 
138 /* Cleanup function called when an object_ref_t gets released.
139  */
140 static apr_status_t
object_ref_cleanup(void * baton)141 object_ref_cleanup(void *baton)
142 {
143   object_ref_t *object = baton;
144   svn_object_pool__t *object_pool = object->object_pool;
145 
146   /* If we released the last reference to object, there is one more
147      unused entry.
148 
149      Note that unused_count does not need to be always exact but only
150      needs to become exact *eventually* (we use it to check whether we
151      should remove unused objects every now and then).  I.e. it must
152      never drift off / get stuck but always reflect the true value once
153      all threads left the racy sections.
154    */
155   if (svn_atomic_dec(&object->ref_count) == 0)
156     svn_atomic_inc(&object_pool->unused_count);
157 
158   return APR_SUCCESS;
159 }
160 
161 /* Handle reference counting for the OBJECT_REF that the caller is about
162  * to return.  The reference will be released when POOL gets cleaned up.
163  *
164  * Requires external serialization on OBJECT_REF->OBJECT_POOL.
165  */
166 static void
add_object_ref(object_ref_t * object_ref,apr_pool_t * pool)167 add_object_ref(object_ref_t *object_ref,
168               apr_pool_t *pool)
169 {
170   /* Update ref counter.
171      Note that this is racy with object_ref_cleanup; see comment there. */
172   if (svn_atomic_inc(&object_ref->ref_count) == 0)
173     svn_atomic_dec(&object_ref->object_pool->unused_count);
174 
175   /* Make sure the reference gets released automatically.
176      Since POOL might be a parent pool of OBJECT_REF->OBJECT_POOL,
177      to the reference counting update before destroing any of the
178      pool hierarchy. */
179   apr_pool_pre_cleanup_register(pool, object_ref, object_ref_cleanup);
180 }
181 
182 /* Actual implementation of svn_object_pool__lookup.
183  *
184  * Requires external serialization on OBJECT_POOL.
185  */
186 static svn_error_t *
lookup(void ** object,svn_object_pool__t * object_pool,svn_membuf_t * key,apr_pool_t * result_pool)187 lookup(void **object,
188        svn_object_pool__t *object_pool,
189        svn_membuf_t *key,
190        apr_pool_t *result_pool)
191 {
192   object_ref_t *object_ref
193     = apr_hash_get(object_pool->objects, key->data, key->size);
194 
195   if (object_ref)
196     {
197       *object = object_ref->object;
198       add_object_ref(object_ref, result_pool);
199     }
200   else
201     {
202       *object = NULL;
203     }
204 
205   return SVN_NO_ERROR;
206 }
207 
208 /* Actual implementation of svn_object_pool__insert.
209  *
210  * Requires external serialization on OBJECT_POOL.
211  */
212 static svn_error_t *
insert(void ** object,svn_object_pool__t * object_pool,const svn_membuf_t * key,void * item,apr_pool_t * item_pool,apr_pool_t * result_pool)213 insert(void **object,
214        svn_object_pool__t *object_pool,
215        const svn_membuf_t *key,
216        void *item,
217        apr_pool_t *item_pool,
218        apr_pool_t *result_pool)
219 {
220   object_ref_t *object_ref
221     = apr_hash_get(object_pool->objects, key->data, key->size);
222   if (object_ref)
223     {
224       /* Destroy the new one and return a reference to the existing one
225        * because the existing one may already have references on it.
226        */
227       svn_pool_destroy(item_pool);
228     }
229   else
230     {
231       /* add new index entry */
232       object_ref = apr_pcalloc(item_pool, sizeof(*object_ref));
233       object_ref->object_pool = object_pool;
234       object_ref->object = item;
235       object_ref->pool = item_pool;
236 
237       svn_membuf__create(&object_ref->key, key->size, item_pool);
238       object_ref->key.size = key->size;
239       memcpy(object_ref->key.data, key->data, key->size);
240 
241       apr_hash_set(object_pool->objects, object_ref->key.data,
242                    object_ref->key.size, object_ref);
243       svn_atomic_inc(&object_pool->object_count);
244 
245       /* the new entry is *not* in use yet.
246        * add_object_ref will update counters again.
247        */
248       svn_atomic_inc(&object_ref->object_pool->unused_count);
249     }
250 
251   /* return a reference to the object we just added */
252   *object = object_ref->object;
253   add_object_ref(object_ref, result_pool);
254 
255   /* limit memory usage */
256   if (svn_atomic_read(&object_pool->unused_count) * 2
257       > apr_hash_count(object_pool->objects) + 2)
258     remove_unused_objects(object_pool);
259 
260   return SVN_NO_ERROR;
261 }
262 
263 
264 /* API implementation */
265 
266 svn_error_t *
svn_object_pool__create(svn_object_pool__t ** object_pool,svn_boolean_t thread_safe,apr_pool_t * pool)267 svn_object_pool__create(svn_object_pool__t **object_pool,
268                         svn_boolean_t thread_safe,
269                         apr_pool_t *pool)
270 {
271   svn_object_pool__t *result;
272 
273   /* construct the object pool in our private ROOT_POOL to survive POOL
274    * cleanup and to prevent threading issues with the allocator
275    */
276   result = apr_pcalloc(pool, sizeof(*result));
277   SVN_ERR(svn_mutex__init(&result->mutex, thread_safe, pool));
278 
279   result->pool = pool;
280   result->objects = svn_hash__make(result->pool);
281 
282   /* make sure we clean up nicely.
283    * We need two cleanup functions of which exactly one will be run
284    * (disabling the respective other as the first step).  If the owning
285    * pool does not cleaned up / destroyed explicitly, it may live longer
286    * than our allocator.  So, we need do act upon cleanup requests from
287    * either side - owning_pool and root_pool.
288    */
289   apr_pool_cleanup_register(pool, result, object_pool_cleanup,
290                             apr_pool_cleanup_null);
291 
292   *object_pool = result;
293   return SVN_NO_ERROR;
294 }
295 
296 apr_pool_t *
svn_object_pool__new_item_pool(svn_object_pool__t * object_pool)297 svn_object_pool__new_item_pool(svn_object_pool__t *object_pool)
298 {
299   return svn_pool_create(object_pool->pool);
300 }
301 
302 svn_error_t *
svn_object_pool__lookup(void ** object,svn_object_pool__t * object_pool,svn_membuf_t * key,apr_pool_t * result_pool)303 svn_object_pool__lookup(void **object,
304                         svn_object_pool__t *object_pool,
305                         svn_membuf_t *key,
306                         apr_pool_t *result_pool)
307 {
308   *object = NULL;
309   SVN_MUTEX__WITH_LOCK(object_pool->mutex,
310                        lookup(object, object_pool, key, result_pool));
311   return SVN_NO_ERROR;
312 }
313 
314 svn_error_t *
svn_object_pool__insert(void ** object,svn_object_pool__t * object_pool,const svn_membuf_t * key,void * item,apr_pool_t * item_pool,apr_pool_t * result_pool)315 svn_object_pool__insert(void **object,
316                         svn_object_pool__t *object_pool,
317                         const svn_membuf_t *key,
318                         void *item,
319                         apr_pool_t *item_pool,
320                         apr_pool_t *result_pool)
321 {
322   *object = NULL;
323   SVN_MUTEX__WITH_LOCK(object_pool->mutex,
324                        insert(object, object_pool, key, item,
325                               item_pool, result_pool));
326   return SVN_NO_ERROR;
327 }
328