1 #include "mupdf/fitz.h"
2 
3 #include <assert.h>
4 #include <limits.h>
5 #include <stdio.h>
6 #include <string.h>
7 
8 typedef struct fz_item
9 {
10 	void *key;
11 	fz_storable *val;
12 	size_t size;
13 	struct fz_item *next;
14 	struct fz_item *prev;
15 	fz_store *store;
16 	const fz_store_type *type;
17 } fz_item;
18 
19 /* Every entry in fz_store is protected by the alloc lock */
20 struct fz_store
21 {
22 	int refs;
23 
24 	/* Every item in the store is kept in a doubly linked list, ordered
25 	 * by usage (so LRU entries are at the end). */
26 	fz_item *head;
27 	fz_item *tail;
28 
29 	/* We have a hash table that allows to quickly find a subset of the
30 	 * entries (those whose keys are indirect objects). */
31 	fz_hash_table *hash;
32 
33 	/* We keep track of the size of the store, and keep it below max. */
34 	size_t max;
35 	size_t size;
36 
37 	int defer_reap_count;
38 	int needs_reaping;
39 	int scavenging;
40 };
41 
42 void
fz_new_store_context(fz_context * ctx,size_t max)43 fz_new_store_context(fz_context *ctx, size_t max)
44 {
45 	fz_store *store;
46 	store = fz_malloc_struct(ctx, fz_store);
47 	fz_try(ctx)
48 	{
49 		store->hash = fz_new_hash_table(ctx, 4096, sizeof(fz_store_hash), FZ_LOCK_ALLOC, NULL);
50 	}
51 	fz_catch(ctx)
52 	{
53 		fz_free(ctx, store);
54 		fz_rethrow(ctx);
55 	}
56 	store->refs = 1;
57 	store->head = NULL;
58 	store->tail = NULL;
59 	store->size = 0;
60 	store->max = max;
61 	store->defer_reap_count = 0;
62 	store->needs_reaping = 0;
63 	ctx->store = store;
64 }
65 
66 void *
fz_keep_storable(fz_context * ctx,const fz_storable * sc)67 fz_keep_storable(fz_context *ctx, const fz_storable *sc)
68 {
69 	/* Explicitly drop const to allow us to use const
70 	 * sanely throughout the code. */
71 	fz_storable *s = (fz_storable *)sc;
72 
73 	return fz_keep_imp(ctx, s, &s->refs);
74 }
75 
fz_keep_key_storable(fz_context * ctx,const fz_key_storable * sc)76 void *fz_keep_key_storable(fz_context *ctx, const fz_key_storable *sc)
77 {
78 	return fz_keep_storable(ctx, &sc->storable);
79 }
80 
81 /*
82 	Entered with FZ_LOCK_ALLOC held.
83 	Drops FZ_LOCK_ALLOC.
84 */
85 static void
do_reap(fz_context * ctx)86 do_reap(fz_context *ctx)
87 {
88 	fz_store *store = ctx->store;
89 	fz_item *item, *prev, *remove;
90 
91 	if (store == NULL)
92 	{
93 		fz_unlock(ctx, FZ_LOCK_ALLOC);
94 		return;
95 	}
96 
97 	fz_assert_lock_held(ctx, FZ_LOCK_ALLOC);
98 
99 	ctx->store->needs_reaping = 0;
100 
101 	FZ_LOG_DUMP_STORE(ctx, "Before reaping store:\n");
102 
103 	/* Reap the items */
104 	remove = NULL;
105 	for (item = store->tail; item; item = prev)
106 	{
107 		prev = item->prev;
108 
109 		if (item->type->needs_reap == NULL || item->type->needs_reap(ctx, item->key) == 0)
110 			continue;
111 
112 		/* We have to drop it */
113 		store->size -= item->size;
114 
115 		/* Unlink from the linked list */
116 		if (item->next)
117 			item->next->prev = item->prev;
118 		else
119 			store->tail = item->prev;
120 		if (item->prev)
121 			item->prev->next = item->next;
122 		else
123 			store->head = item->next;
124 
125 		/* Remove from the hash table */
126 		if (item->type->make_hash_key)
127 		{
128 			fz_store_hash hash = { NULL };
129 			hash.drop = item->val->drop;
130 			if (item->type->make_hash_key(ctx, &hash, item->key))
131 				fz_hash_remove(ctx, store->hash, &hash);
132 		}
133 
134 		/* Store whether to drop this value or not in 'prev' */
135 		if (item->val->refs > 0)
136 			(void)Memento_dropRef(item->val);
137 		item->prev = (item->val->refs > 0 && --item->val->refs == 0) ? item : NULL;
138 
139 		/* Store it in our removal chain - just singly linked */
140 		item->next = remove;
141 		remove = item;
142 	}
143 	fz_unlock(ctx, FZ_LOCK_ALLOC);
144 
145 	/* Now drop the remove chain */
146 	for (item = remove; item != NULL; item = remove)
147 	{
148 		remove = item->next;
149 
150 		/* Drop a reference to the value (freeing if required) */
151 		if (item->prev)
152 			item->val->drop(ctx, item->val);
153 
154 		/* Always drops the key and drop the item */
155 		item->type->drop_key(ctx, item->key);
156 		fz_free(ctx, item);
157 	}
158 	FZ_LOG_DUMP_STORE(ctx, "After reaping store:\n");
159 }
160 
fz_drop_key_storable(fz_context * ctx,const fz_key_storable * sc)161 void fz_drop_key_storable(fz_context *ctx, const fz_key_storable *sc)
162 {
163 	/* Explicitly drop const to allow us to use const
164 	 * sanely throughout the code. */
165 	fz_key_storable *s = (fz_key_storable *)sc;
166 	int drop;
167 	int unlock = 1;
168 
169 	if (s == NULL)
170 		return;
171 
172 	fz_lock(ctx, FZ_LOCK_ALLOC);
173 	assert(s->storable.refs != 0);
174 	if (s->storable.refs > 0)
175 	{
176 		(void)Memento_dropRef(s);
177 		drop = --s->storable.refs == 0;
178 		if (!drop && s->storable.refs == s->store_key_refs)
179 		{
180 			if (ctx->store->defer_reap_count > 0)
181 			{
182 				ctx->store->needs_reaping = 1;
183 			}
184 			else
185 			{
186 				do_reap(ctx);
187 				unlock = 0;
188 			}
189 		}
190 	}
191 	else
192 		drop = 0;
193 	if (unlock)
194 		fz_unlock(ctx, FZ_LOCK_ALLOC);
195 	/*
196 		If we are dropping the last reference to an object, then
197 		it cannot possibly be in the store (as the store always
198 		keeps a ref to everything in it, and doesn't drop via
199 		this method. So we can simply drop the storable object
200 		itself without any operations on the fz_store.
201 	 */
202 	if (drop)
203 		s->storable.drop(ctx, &s->storable);
204 }
205 
fz_keep_key_storable_key(fz_context * ctx,const fz_key_storable * sc)206 void *fz_keep_key_storable_key(fz_context *ctx, const fz_key_storable *sc)
207 {
208 	/* Explicitly drop const to allow us to use const
209 	 * sanely throughout the code. */
210 	fz_key_storable *s = (fz_key_storable *)sc;
211 
212 	if (s == NULL)
213 		return NULL;
214 
215 	fz_lock(ctx, FZ_LOCK_ALLOC);
216 	if (s->storable.refs > 0)
217 	{
218 		(void)Memento_takeRef(s);
219 		++s->storable.refs;
220 		++s->store_key_refs;
221 	}
222 	fz_unlock(ctx, FZ_LOCK_ALLOC);
223 	return s;
224 }
225 
fz_drop_key_storable_key(fz_context * ctx,const fz_key_storable * sc)226 void fz_drop_key_storable_key(fz_context *ctx, const fz_key_storable *sc)
227 {
228 	/* Explicitly drop const to allow us to use const
229 	 * sanely throughout the code. */
230 	fz_key_storable *s = (fz_key_storable *)sc;
231 	int drop;
232 
233 	if (s == NULL)
234 		return;
235 
236 	fz_lock(ctx, FZ_LOCK_ALLOC);
237 	assert(s->store_key_refs > 0 && s->storable.refs >= s->store_key_refs);
238 	(void)Memento_dropRef(s);
239 	drop = --s->storable.refs == 0;
240 	--s->store_key_refs;
241 	fz_unlock(ctx, FZ_LOCK_ALLOC);
242 	/*
243 		If we are dropping the last reference to an object, then
244 		it cannot possibly be in the store (as the store always
245 		keeps a ref to everything in it, and doesn't drop via
246 		this method. So we can simply drop the storable object
247 		itself without any operations on the fz_store.
248 	 */
249 	if (drop)
250 		s->storable.drop(ctx, &s->storable);
251 }
252 
253 static void
evict(fz_context * ctx,fz_item * item)254 evict(fz_context *ctx, fz_item *item)
255 {
256 	fz_store *store = ctx->store;
257 	int drop;
258 
259 	store->size -= item->size;
260 	/* Unlink from the linked list */
261 	if (item->next)
262 		item->next->prev = item->prev;
263 	else
264 		store->tail = item->prev;
265 	if (item->prev)
266 		item->prev->next = item->next;
267 	else
268 		store->head = item->next;
269 
270 	/* Drop a reference to the value (freeing if required) */
271 	if (item->val->refs > 0)
272 		(void)Memento_dropRef(item->val);
273 	drop = (item->val->refs > 0 && --item->val->refs == 0);
274 
275 	/* Remove from the hash table */
276 	if (item->type->make_hash_key)
277 	{
278 		fz_store_hash hash = { NULL };
279 		hash.drop = item->val->drop;
280 		if (item->type->make_hash_key(ctx, &hash, item->key))
281 			fz_hash_remove(ctx, store->hash, &hash);
282 	}
283 	fz_unlock(ctx, FZ_LOCK_ALLOC);
284 	if (drop)
285 		item->val->drop(ctx, item->val);
286 
287 	/* Always drops the key and drop the item */
288 	item->type->drop_key(ctx, item->key);
289 	fz_free(ctx, item);
290 	fz_lock(ctx, FZ_LOCK_ALLOC);
291 }
292 
293 static size_t
ensure_space(fz_context * ctx,size_t tofree)294 ensure_space(fz_context *ctx, size_t tofree)
295 {
296 	fz_item *item, *prev;
297 	size_t count;
298 	fz_store *store = ctx->store;
299 	fz_item *to_be_freed = NULL;
300 
301 	fz_assert_lock_held(ctx, FZ_LOCK_ALLOC);
302 
303 	/* First check that we *can* free tofree; if not, we'd rather not
304 	 * cache this. */
305 	count = 0;
306 	for (item = store->tail; item; item = item->prev)
307 	{
308 		if (item->val->refs == 1)
309 		{
310 			count += item->size;
311 			if (count >= tofree)
312 				break;
313 		}
314 	}
315 
316 	/* If we ran out of items to search, then we can never free enough */
317 	if (item == NULL)
318 	{
319 		return 0;
320 	}
321 
322 	/* Now move all the items to be freed onto 'to_be_freed' */
323 	count = 0;
324 	for (item = store->tail; item; item = prev)
325 	{
326 		prev = item->prev;
327 		if (item->val->refs != 1)
328 			continue;
329 
330 		store->size -= item->size;
331 
332 		/* Unlink from the linked list */
333 		if (item->next)
334 			item->next->prev = item->prev;
335 		else
336 			store->tail = item->prev;
337 		if (item->prev)
338 			item->prev->next = item->next;
339 		else
340 			store->head = item->next;
341 
342 		/* Remove from the hash table */
343 		if (item->type->make_hash_key)
344 		{
345 			fz_store_hash hash = { NULL };
346 			hash.drop = item->val->drop;
347 			if (item->type->make_hash_key(ctx, &hash, item->key))
348 				fz_hash_remove(ctx, store->hash, &hash);
349 		}
350 
351 		/* Link into to_be_freed */
352 		item->next = to_be_freed;
353 		to_be_freed = item;
354 
355 		count += item->size;
356 		if (count >= tofree)
357 			break;
358 	}
359 
360 	/* Now we can safely drop the lock and free our pending items. These
361 	 * have all been removed from both the store list, and the hash table,
362 	 * so they can't be 'found' by anyone else in the meantime. */
363 
364 	while (to_be_freed)
365 	{
366 		fz_item *item = to_be_freed;
367 		int drop;
368 
369 		to_be_freed = to_be_freed->next;
370 
371 		/* Drop a reference to the value (freeing if required) */
372 		if (item->val->refs > 0)
373 			(void)Memento_dropRef(item->val);
374 		drop = (item->val->refs > 0 && --item->val->refs == 0);
375 
376 		fz_unlock(ctx, FZ_LOCK_ALLOC);
377 		if (drop)
378 			item->val->drop(ctx, item->val);
379 
380 		/* Always drops the key and drop the item */
381 		item->type->drop_key(ctx, item->key);
382 		fz_free(ctx, item);
383 		fz_lock(ctx, FZ_LOCK_ALLOC);
384 	}
385 
386 	return count;
387 }
388 
389 static void
touch(fz_store * store,fz_item * item)390 touch(fz_store *store, fz_item *item)
391 {
392 	if (item->next != item)
393 	{
394 		/* Already in the list - unlink it */
395 		if (item->next)
396 			item->next->prev = item->prev;
397 		else
398 			store->tail = item->prev;
399 		if (item->prev)
400 			item->prev->next = item->next;
401 		else
402 			store->head = item->next;
403 	}
404 	/* Now relink it at the start of the LRU chain */
405 	item->next = store->head;
406 	if (item->next)
407 		item->next->prev = item;
408 	else
409 		store->tail = item;
410 	store->head = item;
411 	item->prev = NULL;
412 }
413 
414 void *
fz_store_item(fz_context * ctx,void * key,void * val_,size_t itemsize,const fz_store_type * type)415 fz_store_item(fz_context *ctx, void *key, void *val_, size_t itemsize, const fz_store_type *type)
416 {
417 	fz_item *item = NULL;
418 	size_t size;
419 	fz_storable *val = (fz_storable *)val_;
420 	fz_store *store = ctx->store;
421 	fz_store_hash hash = { NULL };
422 	int use_hash = 0;
423 
424 	if (!store)
425 		return NULL;
426 
427 	/* If we fail for any reason, we swallow the exception and continue.
428 	 * All that the above program will see is that we failed to store
429 	 * the item. */
430 
431 	item = Memento_label(fz_malloc_no_throw(ctx, sizeof (fz_item)), "fz_item");
432 	if (!item)
433 		return NULL;
434 	memset(item, 0, sizeof (fz_item));
435 
436 	if (type->make_hash_key)
437 	{
438 		hash.drop = val->drop;
439 		use_hash = type->make_hash_key(ctx, &hash, key);
440 	}
441 
442 	type->keep_key(ctx, key);
443 	fz_lock(ctx, FZ_LOCK_ALLOC);
444 
445 	/* Fill out the item. To start with, we always set item->next == item
446 	 * and item->prev == item. This is so that we can spot items that have
447 	 * been put into the hash table without having made it into the linked
448 	 * list yet. */
449 	item->key = key;
450 	item->val = val;
451 	item->size = itemsize;
452 	item->next = item;
453 	item->prev = item;
454 	item->type = type;
455 
456 	/* If we can index it fast, put it into the hash table. This serves
457 	 * to check whether we have one there already. */
458 	if (use_hash)
459 	{
460 		fz_item *existing = NULL;
461 
462 		fz_try(ctx)
463 		{
464 			/* May drop and retake the lock */
465 			existing = fz_hash_insert(ctx, store->hash, &hash, item);
466 		}
467 		fz_catch(ctx)
468 		{
469 			/* Any error here means that item never made it into the
470 			 * hash - so no one else can have a reference. */
471 			fz_unlock(ctx, FZ_LOCK_ALLOC);
472 			fz_free(ctx, item);
473 			type->drop_key(ctx, key);
474 			return NULL;
475 		}
476 		if (existing)
477 		{
478 			/* There was one there already! Take a new reference
479 			 * to the existing one, and drop our current one. */
480 			fz_warn(ctx, "found duplicate %s in the store", type->name);
481 			touch(store, existing);
482 			if (existing->val->refs > 0)
483 			{
484 				(void)Memento_takeRef(existing->val);
485 				existing->val->refs++;
486 			}
487 			fz_unlock(ctx, FZ_LOCK_ALLOC);
488 			fz_free(ctx, item);
489 			type->drop_key(ctx, key);
490 			return existing->val;
491 		}
492 	}
493 
494 	/* Now bump the ref */
495 	if (val->refs > 0)
496 	{
497 		(void)Memento_takeRef(val);
498 		val->refs++;
499 	}
500 
501 	/* If we haven't got an infinite store, check for space within it */
502 	if (store->max != FZ_STORE_UNLIMITED)
503 	{
504 		/* FIXME: Overflow? */
505 		size = store->size + itemsize;
506 		if (size > store->max)
507 		{
508 			FZ_LOG_STORE(ctx, "Store size exceeded: item=%zu, size=%zu, max=%zu\n",
509 				itemsize, store->size, store->max);
510 			while (size > store->max)
511 			{
512 				size_t saved;
513 
514 				/* First, do any outstanding reaping, even if defer_reap_count > 0 */
515 				if (store->needs_reaping)
516 				{
517 					do_reap(ctx); /* Drops alloc lock */
518 					fz_lock(ctx, FZ_LOCK_ALLOC);
519 				}
520 				size = store->size + itemsize;
521 				if (size <= store->max)
522 					break;
523 
524 				/* ensure_space may drop, then retake the lock */
525 				saved = ensure_space(ctx, size - store->max);
526 				size -= saved;
527 				if (saved == 0)
528 				{
529 					/* Failed to free any space. */
530 					/* We used to 'unstore' it here, but that's wrong.
531 					 * If we've already spent the memory to malloc it
532 					 * then not putting it in the store just means that
533 					 * a resource used multiple times will just be malloced
534 					 * again. Better to put it in the store, have the
535 					 * store account for it, and for it to potentially be reused.
536 					 * When the caller drops the reference to it, it can then
537 					 * be dropped from the store on the next attempt to store
538 					 * anything else. */
539 					break;
540 				}
541 			}
542 			FZ_LOG_DUMP_STORE(ctx, "After eviction:\n");
543 		}
544 	}
545 	store->size += itemsize;
546 
547 	/* Regardless of whether it's indexed, it goes into the linked list */
548 	touch(store, item);
549 	fz_unlock(ctx, FZ_LOCK_ALLOC);
550 
551 	return NULL;
552 }
553 
554 void *
fz_find_item(fz_context * ctx,fz_store_drop_fn * drop,void * key,const fz_store_type * type)555 fz_find_item(fz_context *ctx, fz_store_drop_fn *drop, void *key, const fz_store_type *type)
556 {
557 	fz_item *item;
558 	fz_store *store = ctx->store;
559 	fz_store_hash hash = { NULL };
560 	int use_hash = 0;
561 
562 	if (!store)
563 		return NULL;
564 
565 	if (!key)
566 		return NULL;
567 
568 	if (type->make_hash_key)
569 	{
570 		hash.drop = drop;
571 		use_hash = type->make_hash_key(ctx, &hash, key);
572 	}
573 
574 	fz_lock(ctx, FZ_LOCK_ALLOC);
575 	if (use_hash)
576 	{
577 		/* We can find objects keyed on indirected objects quickly */
578 		item = fz_hash_find(ctx, store->hash, &hash);
579 	}
580 	else
581 	{
582 		/* Others we have to hunt for slowly */
583 		for (item = store->head; item; item = item->next)
584 		{
585 			if (item->val->drop == drop && !type->cmp_key(ctx, item->key, key))
586 				break;
587 		}
588 	}
589 	if (item)
590 	{
591 		/* LRU the block. This also serves to ensure that any item
592 		 * picked up from the hash before it has made it into the
593 		 * linked list does not get whipped out again due to the
594 		 * store being full. */
595 		touch(store, item);
596 		/* And bump the refcount before returning */
597 		if (item->val->refs > 0)
598 		{
599 			(void)Memento_takeRef(item->val);
600 			item->val->refs++;
601 		}
602 		fz_unlock(ctx, FZ_LOCK_ALLOC);
603 		return (void *)item->val;
604 	}
605 	fz_unlock(ctx, FZ_LOCK_ALLOC);
606 
607 	return NULL;
608 }
609 
610 void
fz_remove_item(fz_context * ctx,fz_store_drop_fn * drop,void * key,const fz_store_type * type)611 fz_remove_item(fz_context *ctx, fz_store_drop_fn *drop, void *key, const fz_store_type *type)
612 {
613 	fz_item *item;
614 	fz_store *store = ctx->store;
615 	int dodrop;
616 	fz_store_hash hash = { NULL };
617 	int use_hash = 0;
618 
619 	if (type->make_hash_key)
620 	{
621 		hash.drop = drop;
622 		use_hash = type->make_hash_key(ctx, &hash, key);
623 	}
624 
625 	fz_lock(ctx, FZ_LOCK_ALLOC);
626 	if (use_hash)
627 	{
628 		/* We can find objects keyed on indirect objects quickly */
629 		item = fz_hash_find(ctx, store->hash, &hash);
630 		if (item)
631 			fz_hash_remove(ctx, store->hash, &hash);
632 	}
633 	else
634 	{
635 		/* Others we have to hunt for slowly */
636 		for (item = store->head; item; item = item->next)
637 			if (item->val->drop == drop && !type->cmp_key(ctx, item->key, key))
638 				break;
639 	}
640 	if (item)
641 	{
642 		/* Momentarily things can be in the hash table without being
643 		 * in the list. Don't attempt to unlink these. We indicate
644 		 * such items by setting item->next == item. */
645 		if (item->next != item)
646 		{
647 			if (item->next)
648 				item->next->prev = item->prev;
649 			else
650 				store->tail = item->prev;
651 			if (item->prev)
652 				item->prev->next = item->next;
653 			else
654 				store->head = item->next;
655 		}
656 		if (item->val->refs > 0)
657 			(void)Memento_dropRef(item->val);
658 		dodrop = (item->val->refs > 0 && --item->val->refs == 0);
659 		fz_unlock(ctx, FZ_LOCK_ALLOC);
660 		if (dodrop)
661 			item->val->drop(ctx, item->val);
662 		type->drop_key(ctx, item->key);
663 		fz_free(ctx, item);
664 	}
665 	else
666 		fz_unlock(ctx, FZ_LOCK_ALLOC);
667 }
668 
669 void
fz_empty_store(fz_context * ctx)670 fz_empty_store(fz_context *ctx)
671 {
672 	fz_store *store = ctx->store;
673 
674 	if (store == NULL)
675 		return;
676 
677 	fz_lock(ctx, FZ_LOCK_ALLOC);
678 	/* Run through all the items in the store */
679 	while (store->head)
680 		evict(ctx, store->head); /* Drops then retakes lock */
681 	fz_unlock(ctx, FZ_LOCK_ALLOC);
682 }
683 
684 fz_store *
fz_keep_store_context(fz_context * ctx)685 fz_keep_store_context(fz_context *ctx)
686 {
687 	if (ctx == NULL || ctx->store == NULL)
688 		return NULL;
689 	return fz_keep_imp(ctx, ctx->store, &ctx->store->refs);
690 }
691 
692 void
fz_drop_store_context(fz_context * ctx)693 fz_drop_store_context(fz_context *ctx)
694 {
695 	if (!ctx)
696 		return;
697 	if (fz_drop_imp(ctx, ctx->store, &ctx->store->refs))
698 	{
699 		fz_empty_store(ctx);
700 		fz_drop_hash_table(ctx, ctx->store->hash);
701 		fz_free(ctx, ctx->store);
702 		ctx->store = NULL;
703 	}
704 }
705 
706 static void
fz_debug_store_item(fz_context * ctx,void * state,void * key_,int keylen,void * item_)707 fz_debug_store_item(fz_context *ctx, void *state, void *key_, int keylen, void *item_)
708 {
709 	unsigned char *key = key_;
710 	fz_item *item = item_;
711 	int i;
712 	char buf[256];
713 	fz_output *out = (fz_output *)state;
714 	fz_unlock(ctx, FZ_LOCK_ALLOC);
715 	item->type->format_key(ctx, buf, sizeof buf, item->key);
716 	fz_lock(ctx, FZ_LOCK_ALLOC);
717 	fz_write_printf(ctx, out, "STORE\thash[");
718 	for (i=0; i < keylen; ++i)
719 		fz_write_printf(ctx, out,"%02x", key[i]);
720 	fz_write_printf(ctx, out, "][refs=%d][size=%d] key=%s val=%p\n", item->val->refs, (int)item->size, buf, (void *)item->val);
721 }
722 
723 static void
fz_debug_store_locked(fz_context * ctx,fz_output * out)724 fz_debug_store_locked(fz_context *ctx, fz_output *out)
725 {
726 	fz_item *item, *next;
727 	char buf[256];
728 	fz_store *store = ctx->store;
729 	size_t list_total = 0;
730 
731 	fz_write_printf(ctx, out, "STORE\t-- resource store contents --\n");
732 
733 	for (item = store->head; item; item = next)
734 	{
735 		next = item->next;
736 		if (next)
737 		{
738 			(void)Memento_takeRef(next->val);
739 			next->val->refs++;
740 		}
741 		fz_unlock(ctx, FZ_LOCK_ALLOC);
742 		item->type->format_key(ctx, buf, sizeof buf, item->key);
743 		fz_lock(ctx, FZ_LOCK_ALLOC);
744 		fz_write_printf(ctx, out, "STORE\tstore[*][refs=%d][size=%d] key=%s val=%p\n",
745 				item->val->refs, (int)item->size, buf, (void *)item->val);
746 		list_total += item->size;
747 		if (next)
748 		{
749 			(void)Memento_dropRef(next->val);
750 			next->val->refs--;
751 		}
752 	}
753 
754 	fz_write_printf(ctx, out, "STORE\t-- resource store hash contents --\n");
755 	fz_hash_for_each(ctx, store->hash, out, fz_debug_store_item);
756 	fz_write_printf(ctx, out, "STORE\t-- end --\n");
757 
758 	fz_write_printf(ctx, out, "STORE\tmax=%zu, size=%zu, actual size=%zu\n", store->max, store->size, list_total);
759 }
760 
761 void
fz_debug_store(fz_context * ctx,fz_output * out)762 fz_debug_store(fz_context *ctx, fz_output *out)
763 {
764 	fz_lock(ctx, FZ_LOCK_ALLOC);
765 	fz_debug_store_locked(ctx, out);
766 	fz_unlock(ctx, FZ_LOCK_ALLOC);
767 }
768 
769 /*
770 	Consider if we have blocks of the following sizes in the store, from oldest
771 	to newest:
772 
773 	A 32
774 	B 64
775 	C 128
776 	D 256
777 
778 	Further suppose we need to free 97 bytes. Naively freeing blocks until we have
779 	freed enough would mean we'd free A, B and C, when we could have freed just C.
780 
781 	We are forced into an n^2 algorithm by the need to drop the lock as part of the
782 	eviction, so we might as well embrace it and go for a solution that properly
783 	drops just C.
784 
785 	The algorithm used is to scan the list of blocks from oldest to newest, counting
786 	how many bytes we can free in the blocks we pass. We stop this scan when we have
787 	found enough blocks. We then free the largest block. This releases the lock
788 	momentarily, which means we have to start the scan process all over again, so
789 	we repeat. This guarantees we only evict a minimum of blocks, but does mean we
790 	scan more blocks than we'd ideally like.
791  */
792 static int
scavenge(fz_context * ctx,size_t tofree)793 scavenge(fz_context *ctx, size_t tofree)
794 {
795 	fz_store *store = ctx->store;
796 	size_t freed = 0;
797 	fz_item *item;
798 
799 	if (store->scavenging)
800 		return 0;
801 
802 	store->scavenging = 1;
803 
804 	do
805 	{
806 		/* Count through a suffix of objects in the store until
807 		 * we find enough to give us what we need to evict. */
808 		size_t suffix_size = 0;
809 		fz_item *largest = NULL;
810 
811 		for (item = store->tail; item; item = item->prev)
812 		{
813 			if (item->val->refs == 1)
814 			{
815 				/* This one is evictable */
816 				suffix_size += item->size;
817 				if (largest == NULL || item->size > largest->size)
818 					largest = item;
819 				if (suffix_size >= tofree - freed)
820 					break;
821 			}
822 		}
823 
824 		/* If there are no evictable blocks, we can't find anything to free. */
825 		if (largest == NULL)
826 			break;
827 
828 		/* Free largest. */
829 		if (freed == 0) {
830 			FZ_LOG_DUMP_STORE(ctx, "Before scavenge:\n");
831 		}
832 		freed += largest->size;
833 		evict(ctx, largest); /* Drops then retakes lock */
834 	}
835 	while (freed < tofree);
836 
837 	if (freed != 0) {
838 		FZ_LOG_DUMP_STORE(ctx, "After scavenge:\n");
839 	}
840 	store->scavenging = 0;
841 	/* Success is managing to evict any blocks */
842 	return freed != 0;
843 }
844 
845 void
fz_drop_storable(fz_context * ctx,const fz_storable * sc)846 fz_drop_storable(fz_context *ctx, const fz_storable *sc)
847 {
848 	/* Explicitly drop const to allow us to use const
849 	 * sanely throughout the code. */
850 	fz_storable *s = (fz_storable *)sc;
851 	int num;
852 
853 	if (s == NULL)
854 		return;
855 
856 	fz_lock(ctx, FZ_LOCK_ALLOC);
857 	/* Drop the ref, and leave num as being the number of
858 	 * refs left (-1 meaning, "statically allocated"). */
859 	if (s->refs > 0)
860 	{
861 		(void)Memento_dropIntRef(s);
862 		num = --s->refs;
863 	}
864 	else
865 		num = -1;
866 
867 	/* If we have just 1 ref left, it's possible that
868 	 * this ref is held by the store. If the store is
869 	 * oversized, we ought to throw any such references
870 	 * away to try to bring the store down to a "legal"
871 	 * size. Run a scavenge to check for this case. */
872 	if (ctx->store->max != FZ_STORE_UNLIMITED)
873 		if (num == 1 && ctx->store->size > ctx->store->max)
874 			scavenge(ctx, ctx->store->size - ctx->store->max);
875 	fz_unlock(ctx, FZ_LOCK_ALLOC);
876 
877 	/* If we have no references to an object left, then
878 	 * it cannot possibly be in the store (as the store always
879 	 * keeps a ref to everything in it, and doesn't drop via
880 	 * this method). So we can simply drop the storable object
881 	 * itself without any operations on the fz_store.
882 	 */
883 	if (num == 0)
884 		s->drop(ctx, s);
885 }
886 
fz_store_scavenge_external(fz_context * ctx,size_t size,int * phase)887 int fz_store_scavenge_external(fz_context *ctx, size_t size, int *phase)
888 {
889 	int ret;
890 
891 	fz_lock(ctx, FZ_LOCK_ALLOC);
892 	ret = fz_store_scavenge(ctx, size, phase);
893 	fz_unlock(ctx, FZ_LOCK_ALLOC);
894 
895 	return ret;
896 }
897 
fz_store_scavenge(fz_context * ctx,size_t size,int * phase)898 int fz_store_scavenge(fz_context *ctx, size_t size, int *phase)
899 {
900 	fz_store *store;
901 	size_t max;
902 
903 	store = ctx->store;
904 	if (store == NULL)
905 		return 0;
906 
907 #ifdef DEBUG_SCAVENGING
908 	fz_write_printf(ctx, fz_stdout(ctx), "Scavenging: store=%zu size=%zu phase=%d\n", store->size, size, *phase);
909 	fz_debug_store_locked(ctx, fz_stdout(ctx));
910 	Memento_stats();
911 #endif
912 	do
913 	{
914 		size_t tofree;
915 
916 		/* Calculate 'max' as the maximum size of the store for this phase */
917 		if (*phase >= 16)
918 			max = 0;
919 		else if (store->max != FZ_STORE_UNLIMITED)
920 			max = store->max / 16 * (16 - *phase);
921 		else
922 			max = store->size / (16 - *phase) * (15 - *phase);
923 		(*phase)++;
924 
925 		/* Slightly baroque calculations to avoid overflow */
926 		if (size > SIZE_MAX - store->size)
927 			tofree = SIZE_MAX - max;
928 		else if (size + store->size > max)
929 			continue;
930 		else
931 			tofree = size + store->size - max;
932 
933 		if (scavenge(ctx, tofree))
934 		{
935 #ifdef DEBUG_SCAVENGING
936 			fz_write_printf(ctx, fz_stdout(ctx), "scavenged: store=%zu\n", store->size);
937 			fz_debug_store(ctx, fz_stdout(ctx));
938 			Memento_stats();
939 #endif
940 			return 1;
941 		}
942 	}
943 	while (max > 0);
944 
945 #ifdef DEBUG_SCAVENGING
946 	fz_write_printf(ctx, fz_stdout(ctx), "scavenging failed\n");
947 	fz_debug_store(ctx, fz_stdout(ctx));
948 	Memento_listBlocks();
949 #endif
950 	return 0;
951 }
952 
953 int
fz_shrink_store(fz_context * ctx,unsigned int percent)954 fz_shrink_store(fz_context *ctx, unsigned int percent)
955 {
956 	int success;
957 	fz_store *store;
958 	size_t new_size;
959 
960 	if (percent >= 100)
961 		return 1;
962 
963 	store = ctx->store;
964 	if (store == NULL)
965 		return 0;
966 
967 #ifdef DEBUG_SCAVENGING
968 	fz_write_printf(ctx, fz_stdout(ctx), "fz_shrink_store: %zu\n", store->size/(1024*1024));
969 #endif
970 	fz_lock(ctx, FZ_LOCK_ALLOC);
971 
972 	new_size = (size_t)(((uint64_t)store->size * percent) / 100);
973 	if (store->size > new_size)
974 		scavenge(ctx, store->size - new_size);
975 
976 	success = (store->size <= new_size) ? 1 : 0;
977 	fz_unlock(ctx, FZ_LOCK_ALLOC);
978 #ifdef DEBUG_SCAVENGING
979 	fz_write_printf(ctx, fz_stdout(ctx), "fz_shrink_store after: %zu\n", store->size/(1024*1024));
980 #endif
981 
982 	return success;
983 }
984 
fz_filter_store(fz_context * ctx,fz_store_filter_fn * fn,void * arg,const fz_store_type * type)985 void fz_filter_store(fz_context *ctx, fz_store_filter_fn *fn, void *arg, const fz_store_type *type)
986 {
987 	fz_store *store;
988 	fz_item *item, *prev, *remove;
989 
990 	store = ctx->store;
991 	if (store == NULL)
992 		return;
993 
994 	fz_lock(ctx, FZ_LOCK_ALLOC);
995 
996 	/* Filter the items */
997 	remove = NULL;
998 	for (item = store->tail; item; item = prev)
999 	{
1000 		prev = item->prev;
1001 		if (item->type != type)
1002 			continue;
1003 
1004 		if (fn(ctx, arg, item->key) == 0)
1005 			continue;
1006 
1007 		/* We have to drop it */
1008 		store->size -= item->size;
1009 
1010 		/* Unlink from the linked list */
1011 		if (item->next)
1012 			item->next->prev = item->prev;
1013 		else
1014 			store->tail = item->prev;
1015 		if (item->prev)
1016 			item->prev->next = item->next;
1017 		else
1018 			store->head = item->next;
1019 
1020 		/* Remove from the hash table */
1021 		if (item->type->make_hash_key)
1022 		{
1023 			fz_store_hash hash = { NULL };
1024 			hash.drop = item->val->drop;
1025 			if (item->type->make_hash_key(ctx, &hash, item->key))
1026 				fz_hash_remove(ctx, store->hash, &hash);
1027 		}
1028 
1029 		/* Store whether to drop this value or not in 'prev' */
1030 		if (item->val->refs > 0)
1031 			(void)Memento_dropRef(item->val);
1032 		item->prev = (item->val->refs > 0 && --item->val->refs == 0) ? item : NULL;
1033 
1034 		/* Store it in our removal chain - just singly linked */
1035 		item->next = remove;
1036 		remove = item;
1037 	}
1038 	fz_unlock(ctx, FZ_LOCK_ALLOC);
1039 
1040 	/* Now drop the remove chain */
1041 	for (item = remove; item != NULL; item = remove)
1042 	{
1043 		remove = item->next;
1044 
1045 		/* Drop a reference to the value (freeing if required) */
1046 		if (item->prev) /* See above for our abuse of prev here */
1047 			item->val->drop(ctx, item->val);
1048 
1049 		/* Always drops the key and drop the item */
1050 		item->type->drop_key(ctx, item->key);
1051 		fz_free(ctx, item);
1052 	}
1053 }
1054 
fz_defer_reap_start(fz_context * ctx)1055 void fz_defer_reap_start(fz_context *ctx)
1056 {
1057 	if (ctx->store == NULL)
1058 		return;
1059 
1060 	fz_lock(ctx, FZ_LOCK_ALLOC);
1061 	ctx->store->defer_reap_count++;
1062 	fz_unlock(ctx, FZ_LOCK_ALLOC);
1063 }
1064 
fz_defer_reap_end(fz_context * ctx)1065 void fz_defer_reap_end(fz_context *ctx)
1066 {
1067 	int reap;
1068 
1069 	if (ctx->store == NULL)
1070 		return;
1071 
1072 	fz_lock(ctx, FZ_LOCK_ALLOC);
1073 	--ctx->store->defer_reap_count;
1074 	reap = ctx->store->defer_reap_count == 0 && ctx->store->needs_reaping;
1075 	if (reap)
1076 		do_reap(ctx); /* Drops FZ_LOCK_ALLOC */
1077 	else
1078 		fz_unlock(ctx, FZ_LOCK_ALLOC);
1079 }
1080 
1081 #ifdef ENABLE_STORE_LOGGING
1082 
fz_log_dump_store(fz_context * ctx,const char * fmt,...)1083 void fz_log_dump_store(fz_context *ctx, const char *fmt, ...)
1084 {
1085 	fz_output *out;
1086 	va_list args;
1087 	va_start(args, fmt);
1088 	out = fz_new_log_for_module(ctx, "STORE");
1089 	fz_write_vprintf(ctx, out, fmt, args);
1090 	va_end(args);
1091 	fz_debug_store(ctx, out);
1092 	fz_write_printf(ctx, out, "STORE\tEND\n");
1093 	fz_close_output(ctx, out);
1094 	fz_drop_output(ctx, out);
1095 }
1096 
1097 #endif
1098