1 /* Copyright © 2012 Brandon L Black <blblack@gmail.com>
2 *
3 * This file is part of gdnsd.
4 *
5 * gdnsd is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * gdnsd is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with gdnsd. If not, see <http://www.gnu.org/licenses/>.
17 *
18 */
19
20 #include <config.h>
21 #include "ltarena.h"
22
23 #include <gdnsd/alloc.h>
24 #include <gdnsd/compiler.h>
25 #include <gdnsd/dname.h>
26
27 #include <inttypes.h>
28 #include <string.h>
29 #include <stdlib.h>
30
31 // ltarena: used for dname/label strings only, pooled to
32 // reduce the per-alloc overhead of malloc aligning and
33 // tracking every single one needlessly.
34 // Each pool is normally POOL_SIZE, non-growing to preserve
35 // *some* amount of locality-of-reference to the related
36 // objects referencing the strings.
37 // We initially reserve room in the ltarena object to track
38 // 8 pools, which expands by doubling to support far more
39 // pools than needed by even the largest zones in existence.
40 #define POOL_SIZE 512U // *must* be >= (256 + (red_size*2)),
41 // && multiple of 4
42 #define INIT_POOLS_ALLOC 4U // *must* be 2^n && > 0
43
44 // Normally, our pools are initialized to all-zeros for us
45 // by xcalloc(), and no red zones are employed. In debug
46 // builds, we initialize a whole pool to 0xDEADBEEF,
47 // define a redzone of 4 bytes before and after
48 // each block, and then zero out the valid allocated area
49 // of each block as it's handed out.
50 #ifndef NDEBUG
51 # define RED_SIZE 4
52 #else
53 # define RED_SIZE 0
54 #endif
55
56 // INIT_DNHASH_MASK + 1 is the initial number of slots
57 // in the dnhash table, which grows by doubling every
58 // time the count of stored unique dnames reaches half
59 // the slot count.
60 #define INIT_DNHASH_MASK 31U // *must* be 2^n-1 && > 0
61
62 typedef struct {
63 unsigned count;
64 unsigned mask;
65 const uint8_t** table;
66 } dnhash_t;
67
68 F_MALLOC
dnhash_new(void)69 static dnhash_t* dnhash_new(void) {
70 dmn_assert(INIT_DNHASH_MASK);
71 dmn_assert(!((INIT_DNHASH_MASK + 1U) & INIT_DNHASH_MASK)); // 2^n-1
72
73 dnhash_t* rv = xmalloc(sizeof(dnhash_t));
74 rv->count = 0;
75 rv->mask = INIT_DNHASH_MASK;
76 rv->table = xcalloc(INIT_DNHASH_MASK + 1U, sizeof(uint8_t*));
77 return rv;
78 }
79
80 F_NONNULL
dnhash_destroy(dnhash_t * dnhash)81 static void dnhash_destroy(dnhash_t* dnhash) {
82 dmn_assert(dnhash->table);
83 dmn_assert(dnhash->mask);
84 free(dnhash->table);
85 free(dnhash);
86 }
87
88 // grow a dnhash_t's hashtable size by doubling
89 F_NONNULL
dnhash_grow(dnhash_t * dnhash)90 static void dnhash_grow(dnhash_t* dnhash) {
91 dmn_assert(dnhash->count);
92 // assert that dnhash->mask is still 2^n-1 and >0
93 dmn_assert(dnhash->mask); dmn_assert(!((dnhash->mask + 1U) & dnhash->mask));
94
95 const uint8_t** old_table = dnhash->table;
96 const unsigned old_mask = dnhash->mask;
97 const unsigned new_mask = (old_mask << 1U) | 1U;
98 const uint8_t** new_table = xcalloc(new_mask + 1U, sizeof(uint8_t*));
99 for(unsigned i = 0; i <= old_mask; i++) {
100 const uint8_t* item = old_table[i];
101 if(item) {
102 unsigned jmpby = 1U;
103 unsigned new_slot = dname_hash(item) & new_mask;
104 while(new_table[new_slot]) {
105 new_slot += jmpby++;
106 new_slot &= new_mask;
107 }
108 new_table[new_slot] = item;
109 }
110 }
111
112 free(dnhash->table);
113 dnhash->table = new_table;
114 dnhash->mask = new_mask;
115 }
116
117 struct _ltarena {
118 uint8_t** pools;
119 unsigned pool;
120 unsigned poffs;
121 unsigned palloc;
122 dnhash_t* dnhash;
123 };
124
make_pool(void)125 static void* make_pool(void) {
126 dmn_assert(!(POOL_SIZE & 3U)); // multiple of four
127
128 void* p;
129 if(RED_SIZE) {
130 // malloc + fill in deadbeef if using redzones
131 p = xmalloc(POOL_SIZE);
132 uint32_t* p32 = p;
133 unsigned idx = POOL_SIZE >> 2U;
134 while(idx--)
135 p32[idx] = 0xDEADBEEF;
136 }
137 else {
138 // get mem from calloc
139 p = xcalloc(1, POOL_SIZE);
140 }
141
142 // let valgrind know what's going on, if running
143 // and we're a debug build
144 NOWARN_VALGRIND_MAKE_MEM_NOACCESS(p, POOL_SIZE);
145 NOWARN_VALGRIND_CREATE_MEMPOOL(p, RED_SIZE, 1U);
146
147 return p;
148 }
149
lta_new(void)150 ltarena_t* lta_new(void) {
151 ltarena_t* rv = xcalloc(1, sizeof(ltarena_t));
152 rv->palloc = INIT_POOLS_ALLOC;
153 rv->pools = xmalloc(INIT_POOLS_ALLOC * sizeof(uint8_t*));
154 rv->pools[0] = make_pool();
155 rv->dnhash = dnhash_new();
156 return rv;
157 }
158
lta_close(ltarena_t * lta)159 void lta_close(ltarena_t* lta) {
160 if(lta->dnhash) {
161 dnhash_destroy(lta->dnhash);
162 lta->dnhash = NULL;
163 lta->pools = xrealloc(lta->pools, (lta->pool + 1) * sizeof(uint8_t*));
164 }
165 }
166
lta_destroy(ltarena_t * lta)167 void lta_destroy(ltarena_t* lta) {
168 lta_close(lta);
169 unsigned whichp = lta->pool + 1U;
170 while(whichp--) {
171 NOWARN_VALGRIND_DESTROY_MEMPOOL(lta->pools[whichp]);
172 free(lta->pools[whichp]);
173 }
174 free(lta->pools);
175 free(lta);
176 }
177
178 F_MALLOC F_NONNULL
lta_malloc(ltarena_t * lta,const unsigned size)179 static uint8_t* lta_malloc(ltarena_t* lta, const unsigned size) {
180 dmn_assert(size);
181 dmn_assert(lta->dnhash); // not closed
182
183 // Currently, all allocations obey this assertion.
184 // Only labels + dnames are stored here, which max out at 256
185 dmn_assert(size <= 256);
186
187 // the requested size + redzones on either end, giving the total
188 // this allocation will steal from the pool
189 const unsigned size_plus_red = size + RED_SIZE + RED_SIZE;
190
191 // this could be a compile-time check, just stuffing here instead for now
192 dmn_assert(POOL_SIZE >= (256 + RED_SIZE + RED_SIZE));
193
194 // This logically follows from the above asserts, but JIC
195 dmn_assert(size_plus_red <= POOL_SIZE);
196
197 // handle pool switch if we're out of room
198 // + take care to extend the pools array if necc.
199 if(unlikely((lta->poffs + size_plus_red > POOL_SIZE))) {
200 if(unlikely(++lta->pool == lta->palloc)) {
201 lta->palloc <<= 1U;
202 lta->pools = xrealloc(lta->pools, lta->palloc * sizeof(uint8_t*));
203 }
204 lta->pools[lta->pool] = make_pool();
205 lta->poffs = 0;
206 }
207
208 // assign the space and move our poffs pointer
209 uint8_t* rval = <a->pools[lta->pool][lta->poffs + RED_SIZE];
210 lta->poffs += size_plus_red;
211
212 // mark the allocation for valgrind and zero it if doing redzone stuff
213 NOWARN_VALGRIND_MEMPOOL_ALLOC(lta->pools[lta->pool], rval, size);
214 if(RED_SIZE)
215 memset(rval, 0, size);
216
217 return rval;
218 }
219
lta_labeldup(ltarena_t * lta,const uint8_t * label)220 uint8_t* lta_labeldup(ltarena_t* lta, const uint8_t* label) {
221 const unsigned sz = *label + 1U;
222 uint8_t* rv = lta_malloc(lta, sz);
223 memcpy(rv, label, sz);
224 return rv;
225 }
226
227 // this mixes internal access to dnhash_t as well, so it's not
228 // properly a just method of ltarena_t in that sense.
lta_dnamedup(ltarena_t * lta,const uint8_t * dname)229 const uint8_t* lta_dnamedup(ltarena_t* lta, const uint8_t* dname) {
230 dnhash_t* dnhash = lta->dnhash;
231 dmn_assert(dnhash); // not closed
232
233 const unsigned hmask = dnhash->mask;
234 const uint8_t** table = dnhash->table;
235 uint32_t jmpby = 1U;
236 uint32_t slotnum = dname_hash(dname) & hmask;
237 while(table[slotnum]) {
238 if(!gdnsd_dname_cmp(dname, table[slotnum]))
239 return table[slotnum];
240 slotnum += jmpby++;
241 slotnum &= hmask;
242 }
243
244 const unsigned dnlen = *dname + 1U;
245 uint8_t* retval = lta_malloc(lta, dnlen);
246 table[slotnum] = retval;
247 memcpy(retval, dname, dnlen);
248
249 if(++dnhash->count > (dnhash->mask >> 1U))
250 dnhash_grow(dnhash);
251
252 return retval;
253 }
254