xref: /freebsd/contrib/jemalloc/src/rtree.c (revision 325151a3)
1 #define	JEMALLOC_RTREE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 static unsigned
5 hmin(unsigned ha, unsigned hb)
6 {
7 
8 	return (ha < hb ? ha : hb);
9 }
10 
11 /* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
12 bool
13 rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
14     rtree_node_dalloc_t *dalloc)
15 {
16 	unsigned bits_in_leaf, height, i;
17 
18 	assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
19 
20 	bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
21 	    : (bits % RTREE_BITS_PER_LEVEL);
22 	if (bits > bits_in_leaf) {
23 		height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
24 		if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
25 			height++;
26 	} else
27 		height = 1;
28 	assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
29 
30 	rtree->alloc = alloc;
31 	rtree->dalloc = dalloc;
32 	rtree->height = height;
33 
34 	/* Root level. */
35 	rtree->levels[0].subtree = NULL;
36 	rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
37 	    bits_in_leaf;
38 	rtree->levels[0].cumbits = rtree->levels[0].bits;
39 	/* Interior levels. */
40 	for (i = 1; i < height-1; i++) {
41 		rtree->levels[i].subtree = NULL;
42 		rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
43 		rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
44 		    RTREE_BITS_PER_LEVEL;
45 	}
46 	/* Leaf level. */
47 	if (height > 1) {
48 		rtree->levels[height-1].subtree = NULL;
49 		rtree->levels[height-1].bits = bits_in_leaf;
50 		rtree->levels[height-1].cumbits = bits;
51 	}
52 
53 	/* Compute lookup table to be used by rtree_start_level(). */
54 	for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
55 		rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
56 		    1);
57 	}
58 
59 	return (false);
60 }
61 
62 static void
63 rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
64 {
65 
66 	if (level + 1 < rtree->height) {
67 		size_t nchildren, i;
68 
69 		nchildren = ZU(1) << rtree->levels[level].bits;
70 		for (i = 0; i < nchildren; i++) {
71 			rtree_node_elm_t *child = node[i].child;
72 			if (child != NULL)
73 				rtree_delete_subtree(rtree, child, level + 1);
74 		}
75 	}
76 	rtree->dalloc(node);
77 }
78 
79 void
80 rtree_delete(rtree_t *rtree)
81 {
82 	unsigned i;
83 
84 	for (i = 0; i < rtree->height; i++) {
85 		rtree_node_elm_t *subtree = rtree->levels[i].subtree;
86 		if (subtree != NULL)
87 			rtree_delete_subtree(rtree, subtree, i);
88 	}
89 }
90 
91 static rtree_node_elm_t *
92 rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
93 {
94 	rtree_node_elm_t *node;
95 
96 	if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
97 		/*
98 		 * Another thread is already in the process of initializing.
99 		 * Spin-wait until initialization is complete.
100 		 */
101 		do {
102 			CPU_SPINWAIT;
103 			node = atomic_read_p((void **)elmp);
104 		} while (node == RTREE_NODE_INITIALIZING);
105 	} else {
106 		node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
107 		if (node == NULL)
108 			return (NULL);
109 		atomic_write_p((void **)elmp, node);
110 	}
111 
112 	return (node);
113 }
114 
115 rtree_node_elm_t *
116 rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
117 {
118 
119 	return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
120 }
121 
122 rtree_node_elm_t *
123 rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
124 {
125 
126 	return (rtree_node_init(rtree, level, &elm->child));
127 }
128