1 /*
2 * Copyright (c) 2014 DeNA Co., Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to
6 * deal in the Software without restriction, including without limitation the
7 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8 * sell copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 * IN THE SOFTWARE.
21 */
22 #include <assert.h>
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <stddef.h>
26 #include <stdio.h>
27 #include <stdint.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <stdarg.h>
31 #include <sys/mman.h>
32 #include <unistd.h>
33 #include "h2o/memory.h"
34 #include "h2o/file.h"
35
36 #if defined(__linux__)
37 #if defined(__ANDROID__) && (__ANDROID_API__ < 21)
38 #define USE_POSIX_FALLOCATE 0
39 #else
40 #define USE_POSIX_FALLOCATE 1
41 #endif
42 #elif __FreeBSD__ >= 9
43 #define USE_POSIX_FALLOCATE 1
44 #elif __NetBSD__ >= 7
45 #define USE_POSIX_FALLOCATE 1
46 #else
47 #define USE_POSIX_FALLOCATE 0
48 #endif
49
50 #if defined(__clang__)
51 #if __has_feature(address_sanitizer)
52 #define ASAN_IN_USE 1
53 #endif
54 #elif __SANITIZE_ADDRESS__ /* gcc */
55 #define ASAN_IN_USE 1
56 #else
57 #define ASAN_IN_USE 0
58 #endif
59
60 struct st_h2o_mem_recycle_chunk_t {
61 struct st_h2o_mem_recycle_chunk_t *next;
62 };
63
64 union un_h2o_mem_pool_chunk_t {
65 union un_h2o_mem_pool_chunk_t *next;
66 char bytes[4096];
67 };
68
69 struct st_h2o_mem_pool_direct_t {
70 struct st_h2o_mem_pool_direct_t *next;
71 size_t _dummy; /* align to 2*sizeof(void*) */
72 char bytes[1];
73 };
74
75 struct st_h2o_mem_pool_shared_ref_t {
76 struct st_h2o_mem_pool_shared_ref_t *next;
77 struct st_h2o_mem_pool_shared_entry_t *entry;
78 };
79
80 void *(*volatile h2o_mem__set_secure)(void *, int, size_t) = memset;
81
82 __thread h2o_mem_recycle_t h2o_mem_pool_allocator = {16};
83 size_t h2o_mmap_errors = 0;
84
h2o__fatal(const char * file,int line,const char * msg,...)85 void h2o__fatal(const char *file, int line, const char *msg, ...)
86 {
87 char buf[1024];
88 va_list args;
89
90 va_start(args, msg);
91 vsnprintf(buf, sizeof(buf), msg, args);
92 va_end(args);
93
94 h2o_error_printf("fatal:%s:%d:%s\n", file, line, buf);
95
96 abort();
97 }
98
h2o_mem_alloc_recycle(h2o_mem_recycle_t * allocator,size_t sz)99 void *h2o_mem_alloc_recycle(h2o_mem_recycle_t *allocator, size_t sz)
100 {
101 struct st_h2o_mem_recycle_chunk_t *chunk;
102 if (allocator->cnt == 0)
103 return h2o_mem_alloc(sz);
104 /* detach and return the pooled pointer */
105 chunk = allocator->_link;
106 assert(chunk != NULL);
107 allocator->_link = chunk->next;
108 --allocator->cnt;
109 return chunk;
110 }
111
h2o_mem_free_recycle(h2o_mem_recycle_t * allocator,void * p)112 void h2o_mem_free_recycle(h2o_mem_recycle_t *allocator, void *p)
113 {
114 #if !ASAN_IN_USE
115 /* register the pointer to the pool and return unless the pool is full */
116 if (allocator->cnt < allocator->max) {
117 struct st_h2o_mem_recycle_chunk_t *chunk = p;
118 chunk->next = allocator->_link;
119 allocator->_link = chunk;
120 ++allocator->cnt;
121 return;
122 }
123 #endif
124
125 free(p);
126 }
127
h2o_mem_clear_recycle(h2o_mem_recycle_t * allocator,int full)128 void h2o_mem_clear_recycle(h2o_mem_recycle_t *allocator, int full)
129 {
130 struct st_h2o_mem_recycle_chunk_t *chunk;
131
132 if (allocator->cnt != 0) {
133 do {
134 chunk = allocator->_link;
135 allocator->_link = allocator->_link->next;
136 free(chunk);
137 --allocator->cnt;
138 } while (full && allocator->cnt != 0);
139 assert((allocator->cnt != 0) == (allocator->_link != NULL));
140 }
141 }
142
h2o_mem_init_pool(h2o_mem_pool_t * pool)143 void h2o_mem_init_pool(h2o_mem_pool_t *pool)
144 {
145 pool->chunks = NULL;
146 pool->chunk_offset = sizeof(pool->chunks->bytes);
147 pool->directs = NULL;
148 pool->shared_refs = NULL;
149 }
150
h2o_mem_clear_pool(h2o_mem_pool_t * pool)151 void h2o_mem_clear_pool(h2o_mem_pool_t *pool)
152 {
153 /* release the refcounted chunks */
154 if (pool->shared_refs != NULL) {
155 struct st_h2o_mem_pool_shared_ref_t *ref = pool->shared_refs;
156 do {
157 h2o_mem_release_shared(ref->entry->bytes);
158 } while ((ref = ref->next) != NULL);
159 pool->shared_refs = NULL;
160 }
161 /* release the direct chunks */
162 if (pool->directs != NULL) {
163 struct st_h2o_mem_pool_direct_t *direct = pool->directs, *next;
164 do {
165 next = direct->next;
166 free(direct);
167 } while ((direct = next) != NULL);
168 pool->directs = NULL;
169 }
170 /* free chunks, and reset the first chunk */
171 while (pool->chunks != NULL) {
172 union un_h2o_mem_pool_chunk_t *next = pool->chunks->next;
173 h2o_mem_free_recycle(&h2o_mem_pool_allocator, pool->chunks);
174 pool->chunks = next;
175 }
176 pool->chunk_offset = sizeof(pool->chunks->bytes);
177 }
178
h2o_mem__do_alloc_pool_aligned(h2o_mem_pool_t * pool,size_t alignment,size_t sz)179 void *h2o_mem__do_alloc_pool_aligned(h2o_mem_pool_t *pool, size_t alignment, size_t sz)
180 {
181 #define ALIGN_TO(x, a) (((x) + (a)-1) & ~((a)-1))
182 void *ret;
183
184 if (sz >= (sizeof(pool->chunks->bytes) - sizeof(pool->chunks->next)) / 4) {
185 /* allocate large requests directly */
186 struct st_h2o_mem_pool_direct_t *newp = h2o_mem_alloc(offsetof(struct st_h2o_mem_pool_direct_t, bytes) + sz);
187 newp->next = pool->directs;
188 pool->directs = newp;
189 return newp->bytes;
190 }
191
192 /* return a valid pointer even for 0 sized allocs */
193 if (H2O_UNLIKELY(sz == 0))
194 sz = 1;
195
196 pool->chunk_offset = ALIGN_TO(pool->chunk_offset, alignment);
197 if (sizeof(pool->chunks->bytes) - pool->chunk_offset < sz) {
198 /* allocate new chunk */
199 union un_h2o_mem_pool_chunk_t *newp = h2o_mem_alloc_recycle(&h2o_mem_pool_allocator, sizeof(*newp));
200 newp->next = pool->chunks;
201 pool->chunks = newp;
202 pool->chunk_offset = ALIGN_TO(sizeof(newp->next), alignment);
203 }
204
205 ret = pool->chunks->bytes + pool->chunk_offset;
206 pool->chunk_offset += sz;
207 return ret;
208 #undef ALIGN_TO
209 }
210
link_shared(h2o_mem_pool_t * pool,struct st_h2o_mem_pool_shared_entry_t * entry)211 static void link_shared(h2o_mem_pool_t *pool, struct st_h2o_mem_pool_shared_entry_t *entry)
212 {
213 struct st_h2o_mem_pool_shared_ref_t *ref = h2o_mem_alloc_pool(pool, *ref, 1);
214 ref->entry = entry;
215 ref->next = pool->shared_refs;
216 pool->shared_refs = ref;
217 }
218
h2o_mem_alloc_shared(h2o_mem_pool_t * pool,size_t sz,void (* dispose)(void *))219 void *h2o_mem_alloc_shared(h2o_mem_pool_t *pool, size_t sz, void (*dispose)(void *))
220 {
221 struct st_h2o_mem_pool_shared_entry_t *entry = h2o_mem_alloc(offsetof(struct st_h2o_mem_pool_shared_entry_t, bytes) + sz);
222 entry->refcnt = 1;
223 entry->dispose = dispose;
224 if (pool != NULL)
225 link_shared(pool, entry);
226 return entry->bytes;
227 }
228
h2o_mem_link_shared(h2o_mem_pool_t * pool,void * p)229 void h2o_mem_link_shared(h2o_mem_pool_t *pool, void *p)
230 {
231 h2o_mem_addref_shared(p);
232 link_shared(pool, H2O_STRUCT_FROM_MEMBER(struct st_h2o_mem_pool_shared_entry_t, bytes, p));
233 }
234
topagesize(size_t capacity)235 static size_t topagesize(size_t capacity)
236 {
237 size_t pagesize = getpagesize();
238 return (offsetof(h2o_buffer_t, _buf) + capacity + pagesize - 1) / pagesize * pagesize;
239 }
240
241 /**
242 * size of the smallest bin is 4096 bytes (1<<12)
243 */
244 #define H2O_BUFFER_MIN_ALLOC_POWER 12
245
246 /**
247 * Retains recycle bins for `h2o_buffer_t`.
248 */
249 static __thread struct {
250 /**
251 * Holds recycle bins for `h2o_buffer_t`. Bin for capacity 2^x is located at x - H2O_BUFFER_MIN_ALLOC_POWER.
252 */
253 h2o_mem_recycle_t *bins;
254 /**
255 * Bins for capacicties no greater than this value exist.
256 */
257 size_t largest_power;
258 /**
259 * Bin containing chunks of sizeof(h2o_buffer_t). This is used by empties buffers to retain the previous capacity.
260 */
261 h2o_mem_recycle_t zero_sized;
262 } buffer_recycle_bins = {NULL, H2O_BUFFER_MIN_ALLOC_POWER - 1, {100}};
263
buffer_size_to_power(size_t sz)264 static unsigned buffer_size_to_power(size_t sz)
265 {
266 assert(sz != 0);
267
268 unsigned power = sizeof(unsigned long long) * 8 - __builtin_clzll(sz) - 1;
269 if (power < H2O_BUFFER_MIN_ALLOC_POWER) {
270 power = H2O_BUFFER_MIN_ALLOC_POWER;
271 } else if (sz != (1 << power)) {
272 ++power;
273 }
274 return power;
275 }
276
h2o_buffer_clear_recycle(int full)277 void h2o_buffer_clear_recycle(int full)
278 {
279 for (unsigned i = H2O_BUFFER_MIN_ALLOC_POWER; i <= buffer_recycle_bins.largest_power; ++i)
280 h2o_mem_clear_recycle(&buffer_recycle_bins.bins[i - H2O_BUFFER_MIN_ALLOC_POWER], full);
281
282 if (full) {
283 free(buffer_recycle_bins.bins);
284 buffer_recycle_bins.bins = NULL;
285 buffer_recycle_bins.largest_power = H2O_BUFFER_MIN_ALLOC_POWER - 1;
286 }
287
288 h2o_mem_clear_recycle(&buffer_recycle_bins.zero_sized, full);
289 }
290
buffer_get_recycle(unsigned power,int only_if_exists)291 static h2o_mem_recycle_t *buffer_get_recycle(unsigned power, int only_if_exists)
292 {
293 if (power > buffer_recycle_bins.largest_power) {
294 if (only_if_exists)
295 return NULL;
296 buffer_recycle_bins.bins =
297 h2o_mem_realloc(buffer_recycle_bins.bins, sizeof(*buffer_recycle_bins.bins) * (power - H2O_BUFFER_MIN_ALLOC_POWER + 1));
298 do {
299 ++buffer_recycle_bins.largest_power;
300 buffer_recycle_bins.bins[buffer_recycle_bins.largest_power - H2O_BUFFER_MIN_ALLOC_POWER] = (h2o_mem_recycle_t){16};
301 } while (buffer_recycle_bins.largest_power < power);
302 }
303
304 return &buffer_recycle_bins.bins[power - H2O_BUFFER_MIN_ALLOC_POWER];
305 }
306
buffer_init(h2o_buffer_t * buf,size_t size,char * bytes,size_t capacity,h2o_buffer_prototype_t * prototype,int fd)307 static void buffer_init(h2o_buffer_t *buf, size_t size, char *bytes, size_t capacity, h2o_buffer_prototype_t *prototype, int fd)
308 {
309 buf->size = size;
310 buf->bytes = bytes;
311 buf->capacity = capacity;
312 buf->_prototype = prototype;
313 buf->_fd = fd;
314 }
315
h2o_buffer__do_free(h2o_buffer_t * buffer)316 void h2o_buffer__do_free(h2o_buffer_t *buffer)
317 {
318 assert(buffer->_prototype != NULL);
319
320 if (buffer->_fd != -1) {
321 close(buffer->_fd);
322 munmap((void *)buffer, topagesize(buffer->capacity));
323 } else {
324 h2o_mem_recycle_t *allocator;
325 if (buffer->bytes == NULL) {
326 allocator = &buffer_recycle_bins.zero_sized;
327 } else {
328 unsigned power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + buffer->capacity);
329 assert(((size_t)1 << power) == offsetof(h2o_buffer_t, _buf) + buffer->capacity);
330 allocator = buffer_get_recycle(power, 0);
331 assert(allocator != NULL);
332 }
333 h2o_mem_free_recycle(allocator, buffer);
334 }
335 }
336
h2o_buffer_reserve(h2o_buffer_t ** _inbuf,size_t min_guarantee)337 h2o_iovec_t h2o_buffer_reserve(h2o_buffer_t **_inbuf, size_t min_guarantee)
338 {
339 h2o_iovec_t reserved = h2o_buffer_try_reserve(_inbuf, min_guarantee);
340 if (reserved.base == NULL) {
341 h2o_fatal("failed to reserve buffer; capacity: %zu, min_guarantee: %zu", (*_inbuf)->capacity, min_guarantee);
342 }
343 return reserved;
344 }
345
buffer_allocate(h2o_buffer_prototype_t * prototype,size_t min_capacity,size_t desired_capacity)346 static h2o_buffer_t *buffer_allocate(h2o_buffer_prototype_t *prototype, size_t min_capacity, size_t desired_capacity)
347 {
348 h2o_buffer_t *newp;
349 unsigned alloc_power;
350
351 /* normalize */
352 if (min_capacity < prototype->_initial_buf.capacity)
353 min_capacity = prototype->_initial_buf.capacity;
354
355 /* try to allocate at first using `desired_capacity`, otherwise bail out to AllocNormal */
356 if (desired_capacity <= min_capacity)
357 goto AllocNormal;
358 alloc_power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + desired_capacity);
359 h2o_mem_recycle_t *allocator = buffer_get_recycle(alloc_power, 1);
360 if (allocator == NULL || allocator->cnt == 0)
361 goto AllocNormal;
362 newp = h2o_mem_alloc_recycle(allocator, (size_t)1 << alloc_power);
363 goto AllocDone;
364
365 AllocNormal:
366 /* allocate using `min_capacity` */
367 alloc_power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + min_capacity);
368 newp = h2o_mem_alloc_recycle(buffer_get_recycle(alloc_power, 0), (size_t)1 << alloc_power);
369
370 AllocDone:
371 buffer_init(newp, 0, newp->_buf, ((size_t)1 << alloc_power) - offsetof(h2o_buffer_t, _buf), prototype, -1);
372 return newp;
373 }
374
h2o_buffer_try_reserve(h2o_buffer_t ** _inbuf,size_t min_guarantee)375 h2o_iovec_t h2o_buffer_try_reserve(h2o_buffer_t **_inbuf, size_t min_guarantee)
376 {
377 h2o_buffer_t *inbuf = *_inbuf;
378 h2o_iovec_t ret;
379
380 if (inbuf->bytes == NULL) {
381 h2o_buffer_prototype_t *prototype;
382 size_t desired_capacity;
383 if (inbuf->_prototype == NULL) {
384 prototype = H2O_STRUCT_FROM_MEMBER(h2o_buffer_prototype_t, _initial_buf, inbuf);
385 desired_capacity = 0;
386 } else {
387 prototype = inbuf->_prototype;
388 desired_capacity = inbuf->capacity;
389 h2o_mem_free_recycle(&buffer_recycle_bins.zero_sized, inbuf);
390 }
391 inbuf = buffer_allocate(prototype, min_guarantee, desired_capacity);
392 *_inbuf = inbuf;
393 } else {
394 if (min_guarantee <= inbuf->capacity - inbuf->size - (inbuf->bytes - inbuf->_buf)) {
395 /* ok */
396 } else if ((inbuf->size + min_guarantee) * 2 <= inbuf->capacity) {
397 /* the capacity should be less than or equal to 2 times of: size + guarantee */
398 memmove(inbuf->_buf, inbuf->bytes, inbuf->size);
399 inbuf->bytes = inbuf->_buf;
400 } else {
401 size_t new_capacity = inbuf->capacity;
402 do {
403 new_capacity *= 2;
404 } while (new_capacity - inbuf->size < min_guarantee);
405 if (inbuf->_prototype->mmap_settings != NULL && inbuf->_prototype->mmap_settings->threshold <= new_capacity) {
406 size_t new_allocsize = topagesize(new_capacity);
407 int fd;
408 h2o_buffer_t *newp;
409 if (inbuf->_fd == -1) {
410 if ((fd = h2o_file_mktemp(inbuf->_prototype->mmap_settings->fn_template)) == -1) {
411 h2o_perror("failed to create temporary file");
412 goto MapError;
413 }
414 } else {
415 fd = inbuf->_fd;
416 }
417 int fallocate_ret;
418 #if USE_POSIX_FALLOCATE
419 fallocate_ret = posix_fallocate(fd, 0, new_allocsize);
420 if (fallocate_ret != 0) {
421 errno = fallocate_ret;
422 }
423 #else
424 fallocate_ret = ftruncate(fd, new_allocsize);
425 #endif
426 if (fallocate_ret != 0) {
427 h2o_perror("failed to resize temporary file");
428 goto MapError;
429 }
430 if ((newp = (void *)mmap(NULL, new_allocsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)) == MAP_FAILED) {
431 h2o_perror("mmap failed");
432 goto MapError;
433 }
434 if (inbuf->_fd == -1) {
435 /* copy data (moving from malloc to mmap) */
436 buffer_init(newp, inbuf->size, newp->_buf, new_capacity, inbuf->_prototype, fd);
437 memcpy(newp->_buf, inbuf->bytes, inbuf->size);
438 h2o_buffer__do_free(inbuf);
439 *_inbuf = inbuf = newp;
440 } else {
441 /* munmap */
442 size_t offset = inbuf->bytes - inbuf->_buf;
443 munmap((void *)inbuf, topagesize(inbuf->capacity));
444 *_inbuf = inbuf = newp;
445 inbuf->capacity = new_capacity;
446 inbuf->bytes = newp->_buf + offset;
447 }
448 } else {
449 unsigned alloc_power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + new_capacity);
450 new_capacity = ((size_t)1 << alloc_power) - offsetof(h2o_buffer_t, _buf);
451 h2o_buffer_t *newp = h2o_mem_alloc_recycle(buffer_get_recycle(alloc_power, 0), (size_t)1 << alloc_power);
452 buffer_init(newp, inbuf->size, newp->_buf, new_capacity, inbuf->_prototype, -1);
453 memcpy(newp->_buf, inbuf->bytes, inbuf->size);
454 h2o_buffer__do_free(inbuf);
455 *_inbuf = inbuf = newp;
456 }
457 }
458 }
459
460 ret.base = inbuf->bytes + inbuf->size;
461 ret.len = inbuf->_buf + inbuf->capacity - ret.base;
462
463 return ret;
464
465 MapError:
466 __sync_add_and_fetch(&h2o_mmap_errors, 1);
467 ret.base = NULL;
468 ret.len = 0;
469 return ret;
470 }
471
h2o_buffer_consume(h2o_buffer_t ** inbuf,size_t delta)472 void h2o_buffer_consume(h2o_buffer_t **inbuf, size_t delta)
473 {
474 if (delta != 0) {
475 if ((*inbuf)->size == delta) {
476 h2o_buffer_consume_all(inbuf, 0);
477 } else {
478 assert((*inbuf)->bytes != NULL);
479 (*inbuf)->size -= delta;
480 (*inbuf)->bytes += delta;
481 }
482 }
483 }
484
h2o_buffer_consume_all(h2o_buffer_t ** inbuf,int record_capacity)485 void h2o_buffer_consume_all(h2o_buffer_t **inbuf, int record_capacity)
486 {
487 if ((*inbuf)->size != 0) {
488 if (record_capacity) {
489 h2o_buffer_t *newp = h2o_mem_alloc_recycle(&buffer_recycle_bins.zero_sized, sizeof(*newp));
490 buffer_init(newp, 0, NULL, (*inbuf)->capacity, (*inbuf)->_prototype, -1);
491 h2o_buffer__do_free(*inbuf);
492 *inbuf = newp;
493 } else {
494 h2o_buffer_t *prototype_buf = &(*inbuf)->_prototype->_initial_buf;
495 h2o_buffer__do_free(*inbuf);
496 *inbuf = prototype_buf;
497 }
498 }
499 }
500
h2o_buffer__dispose_linked(void * p)501 void h2o_buffer__dispose_linked(void *p)
502 {
503 h2o_buffer_t **buf = p;
504 h2o_buffer_dispose(buf);
505 }
506
h2o_vector__expand(h2o_mem_pool_t * pool,h2o_vector_t * vector,size_t alignment,size_t element_size,size_t new_capacity)507 void h2o_vector__expand(h2o_mem_pool_t *pool, h2o_vector_t *vector, size_t alignment, size_t element_size, size_t new_capacity)
508 {
509 void *new_entries;
510 assert(vector->capacity < new_capacity);
511 if (vector->capacity == 0)
512 vector->capacity = 4;
513 while (vector->capacity < new_capacity)
514 vector->capacity *= 2;
515 if (pool != NULL) {
516 new_entries = h2o_mem_alloc_pool_aligned(pool, alignment, element_size * vector->capacity);
517 h2o_memcpy(new_entries, vector->entries, element_size * vector->size);
518 } else {
519 new_entries = h2o_mem_realloc(vector->entries, element_size * vector->capacity);
520 }
521 vector->entries = new_entries;
522 }
523
h2o_mem_swap(void * _x,void * _y,size_t len)524 void h2o_mem_swap(void *_x, void *_y, size_t len)
525 {
526 char *x = _x, *y = _y;
527 char buf[256];
528
529 while (len != 0) {
530 size_t blocksz = len < sizeof(buf) ? len : sizeof(buf);
531 memcpy(buf, x, blocksz);
532 memcpy(x, y, blocksz);
533 memcpy(y, buf, blocksz);
534 len -= blocksz;
535 x += blocksz;
536 y += blocksz;
537 }
538 }
539
h2o_dump_memory(FILE * fp,const char * buf,size_t len)540 void h2o_dump_memory(FILE *fp, const char *buf, size_t len)
541 {
542 size_t i, j;
543
544 for (i = 0; i < len; i += 16) {
545 fprintf(fp, "%08zx", i);
546 for (j = 0; j != 16; ++j) {
547 if (i + j < len)
548 fprintf(fp, " %02x", (int)(unsigned char)buf[i + j]);
549 else
550 fprintf(fp, " ");
551 }
552 fprintf(fp, " ");
553 for (j = 0; j != 16 && i + j < len; ++j) {
554 int ch = buf[i + j];
555 fputc(' ' <= ch && ch < 0x7f ? ch : '.', fp);
556 }
557 fprintf(fp, "\n");
558 }
559 }
560
h2o_append_to_null_terminated_list(void *** list,void * element)561 void h2o_append_to_null_terminated_list(void ***list, void *element)
562 {
563 size_t cnt;
564
565 for (cnt = 0; (*list)[cnt] != NULL; ++cnt)
566 ;
567 *list = h2o_mem_realloc(*list, (cnt + 2) * sizeof(void *));
568 (*list)[cnt++] = element;
569 (*list)[cnt] = NULL;
570 }
571
h2o_strerror_r(int err,char * buf,size_t len)572 char *h2o_strerror_r(int err, char *buf, size_t len)
573 {
574 #ifndef _GNU_SOURCE
575 strerror_r(err, buf, len);
576 return buf;
577 #else
578 /**
579 * The GNU-specific strerror_r() returns a pointer to a string containing the error message.
580 * This may be either a pointer to a string that the function stores in buf,
581 * or a pointer to some (immutable) static string (in which case buf is unused)
582 */
583 return strerror_r(err, buf, len);
584 #endif
585 }
586
h2o_perror(const char * msg)587 void h2o_perror(const char *msg)
588 {
589 char buf[128];
590
591 h2o_error_printf("%s: %s\n", msg, h2o_strerror_r(errno, buf, sizeof(buf)));
592 }
593