1 /* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
2
3 #include "lib.h"
4 #include "array.h"
5 #include "str-sanitize.h"
6 #include "mmap-util.h"
7 #include "mail-index-private.h"
8 #include "mail-index-modseq.h"
9
mail_index_map_init_extbufs(struct mail_index_map * map,unsigned int initial_count)10 void mail_index_map_init_extbufs(struct mail_index_map *map,
11 unsigned int initial_count)
12 {
13 #define EXTENSION_NAME_APPROX_LEN 20
14 #define EXT_GLOBAL_ALLOC_SIZE \
15 ((sizeof(map->extensions) + sizeof(buffer_t)) * 2)
16 #define EXT_PER_ALLOC_SIZE \
17 (EXTENSION_NAME_APPROX_LEN + \
18 sizeof(struct mail_index_ext) + sizeof(uint32_t))
19 size_t size;
20
21 if (map->extension_pool == NULL) {
22 size = EXT_GLOBAL_ALLOC_SIZE +
23 initial_count * EXT_PER_ALLOC_SIZE;
24 map->extension_pool =
25 pool_alloconly_create(MEMPOOL_GROWING"map extensions",
26 nearest_power(size));
27 } else {
28 p_clear(map->extension_pool);
29
30 /* try to use the existing pool's size for initial_count so
31 we don't grow it needlessly */
32 size = p_get_max_easy_alloc_size(map->extension_pool);
33 if (size > EXT_GLOBAL_ALLOC_SIZE + EXT_PER_ALLOC_SIZE) {
34 initial_count = (size - EXT_GLOBAL_ALLOC_SIZE) /
35 EXT_PER_ALLOC_SIZE;
36 }
37 }
38
39 p_array_init(&map->extensions, map->extension_pool, initial_count);
40 p_array_init(&map->ext_id_map, map->extension_pool, initial_count);
41 }
42
mail_index_map_lookup_ext(struct mail_index_map * map,const char * name,uint32_t * idx_r)43 bool mail_index_map_lookup_ext(struct mail_index_map *map, const char *name,
44 uint32_t *idx_r)
45 {
46 const struct mail_index_ext *ext;
47
48 if (!array_is_created(&map->extensions))
49 return FALSE;
50
51 array_foreach(&map->extensions, ext) {
52 if (strcmp(ext->name, name) == 0) {
53 *idx_r = array_foreach_idx(&map->extensions, ext);
54 return TRUE;
55 }
56 }
57 return FALSE;
58 }
59
mail_index_map_ext_hdr_offset(unsigned int name_len)60 unsigned int mail_index_map_ext_hdr_offset(unsigned int name_len)
61 {
62 size_t size = sizeof(struct mail_index_ext_header) + name_len;
63 return MAIL_INDEX_HEADER_SIZE_ALIGN(size);
64 }
65
66 uint32_t
mail_index_map_register_ext(struct mail_index_map * map,const char * name,uint32_t ext_offset,const struct mail_index_ext_header * ext_hdr)67 mail_index_map_register_ext(struct mail_index_map *map,
68 const char *name, uint32_t ext_offset,
69 const struct mail_index_ext_header *ext_hdr)
70 {
71 struct mail_index_ext *ext;
72 uint32_t idx, ext_map_idx, empty_idx = (uint32_t)-1;
73
74 if (!array_is_created(&map->extensions)) {
75 mail_index_map_init_extbufs(map, 5);
76 idx = 0;
77 } else {
78 idx = array_count(&map->extensions);
79 }
80 i_assert(!mail_index_map_lookup_ext(map, name, &ext_map_idx));
81
82 ext = array_append_space(&map->extensions);
83 ext->name = p_strdup(map->extension_pool, name);
84 ext->ext_offset = ext_offset;
85 ext->hdr_offset = ext_offset == (uint32_t)-1 ? (uint32_t)-1 :
86 ext_offset + mail_index_map_ext_hdr_offset(strlen(name));
87 ext->hdr_size = ext_hdr->hdr_size;
88 ext->record_offset = ext_hdr->record_offset;
89 ext->record_size = ext_hdr->record_size;
90 ext->record_align = ext_hdr->record_align;
91 ext->reset_id = ext_hdr->reset_id;
92
93 ext->index_idx = mail_index_ext_register(map->index, name,
94 ext_hdr->hdr_size,
95 ext_hdr->record_size,
96 ext_hdr->record_align);
97
98 /* Update index ext_id -> map ext_id mapping. Fill non-used
99 ext_ids with (uint32_t)-1 */
100 while (array_count(&map->ext_id_map) < ext->index_idx)
101 array_push_back(&map->ext_id_map, &empty_idx);
102 array_idx_set(&map->ext_id_map, ext->index_idx, &idx);
103 return idx;
104 }
105
mail_index_map_ext_get_next(struct mail_index_map * map,unsigned int * offset_p,const struct mail_index_ext_header ** ext_hdr_r,const char ** name_r)106 int mail_index_map_ext_get_next(struct mail_index_map *map,
107 unsigned int *offset_p,
108 const struct mail_index_ext_header **ext_hdr_r,
109 const char **name_r)
110 {
111 const struct mail_index_ext_header *ext_hdr;
112 unsigned int offset, name_offset;
113
114 offset = *offset_p;
115 *name_r = "";
116
117 /* Extension header contains:
118 - struct mail_index_ext_header
119 - name (not 0-terminated)
120 - 64bit alignment padding
121 - extension header contents
122 - 64bit alignment padding
123 */
124 name_offset = offset + sizeof(*ext_hdr);
125 ext_hdr = MAIL_INDEX_MAP_HDR_OFFSET(map, offset);
126 if (offset + sizeof(*ext_hdr) >= map->hdr.header_size)
127 return -1;
128
129 offset += mail_index_map_ext_hdr_offset(ext_hdr->name_size);
130 if (offset > map->hdr.header_size)
131 return -1;
132
133 *name_r = t_strndup(MAIL_INDEX_MAP_HDR_OFFSET(map, name_offset),
134 ext_hdr->name_size);
135 if (strcmp(*name_r, str_sanitize(*name_r, SIZE_MAX)) != 0) {
136 /* we allow only plain ASCII names, so this extension
137 is most likely broken */
138 *name_r = "";
139 }
140
141 /* finally make sure that the hdr_size is small enough.
142 do this last so that we could return a usable name. */
143 offset += MAIL_INDEX_HEADER_SIZE_ALIGN(ext_hdr->hdr_size);
144 if (offset > map->hdr.header_size)
145 return -1;
146
147 *offset_p = offset;
148 *ext_hdr_r = ext_hdr;
149 return 0;
150 }
151
152 static int
mail_index_map_ext_hdr_check_record(const struct mail_index_header * hdr,const struct mail_index_ext_header * ext_hdr,const char ** error_r)153 mail_index_map_ext_hdr_check_record(const struct mail_index_header *hdr,
154 const struct mail_index_ext_header *ext_hdr,
155 const char **error_r)
156 {
157 if (ext_hdr->record_align == 0) {
158 *error_r = "Record field alignment is zero";
159 return -1;
160 }
161
162 /* until we get 128 bit CPUs having a larger alignment is pointless */
163 if (ext_hdr->record_align > sizeof(uint64_t)) {
164 *error_r = "Record alignment is too large";
165 return -1;
166 }
167 /* a large record size is most likely a bug somewhere. the maximum
168 record size is limited to 64k anyway, so try to fail earlier. */
169 if (ext_hdr->record_size >= 32768) {
170 *error_r = "Record size is too large";
171 return -1;
172 }
173
174 if (ext_hdr->record_offset == 0) {
175 /* if we get here from extension introduction, record_offset=0
176 and hdr->record_size hasn't been updated yet */
177 return 0;
178 }
179
180 if (ext_hdr->record_offset + ext_hdr->record_size > hdr->record_size) {
181 *error_r = t_strdup_printf("Record field points "
182 "outside record size (%u+%u > %u)",
183 ext_hdr->record_offset,
184 ext_hdr->record_size,
185 hdr->record_size);
186 return -1;
187 }
188
189 if ((ext_hdr->record_offset % ext_hdr->record_align) != 0) {
190 *error_r = t_strdup_printf("Record field alignment %u "
191 "not used", ext_hdr->record_align);
192 return -1;
193 }
194 if ((hdr->record_size % ext_hdr->record_align) != 0) {
195 *error_r = t_strdup_printf("Record size not aligned by %u "
196 "as required by extension",
197 ext_hdr->record_align);
198 return -1;
199 }
200 return 0;
201 }
202
mail_index_map_ext_hdr_check(const struct mail_index_header * hdr,const struct mail_index_ext_header * ext_hdr,const char * name,const char ** error_r)203 int mail_index_map_ext_hdr_check(const struct mail_index_header *hdr,
204 const struct mail_index_ext_header *ext_hdr,
205 const char *name, const char **error_r)
206 {
207 if (ext_hdr->record_size == 0 && ext_hdr->hdr_size == 0) {
208 *error_r = "Invalid field values";
209 return -1;
210 }
211 if (*name == '\0') {
212 *error_r = "Broken name";
213 return -1;
214 }
215
216 if (ext_hdr->record_size != 0) {
217 if (mail_index_map_ext_hdr_check_record(hdr, ext_hdr,
218 error_r) < 0)
219 return -1;
220 }
221 if (ext_hdr->hdr_size > MAIL_INDEX_EXT_HEADER_MAX_SIZE) {
222 *error_r = t_strdup_printf("Headersize too large (%u)",
223 ext_hdr->hdr_size);
224 return -1;
225 }
226 return 0;
227 }
228
mail_index_header_init(struct mail_index * index,struct mail_index_header * hdr)229 static void mail_index_header_init(struct mail_index *index,
230 struct mail_index_header *hdr)
231 {
232 i_assert((sizeof(*hdr) % sizeof(uint64_t)) == 0);
233
234 i_zero(hdr);
235
236 hdr->major_version = MAIL_INDEX_MAJOR_VERSION;
237 hdr->minor_version = MAIL_INDEX_MINOR_VERSION;
238 hdr->base_header_size = sizeof(*hdr);
239 hdr->header_size = sizeof(*hdr);
240 hdr->record_size = sizeof(struct mail_index_record);
241
242 #ifndef WORDS_BIGENDIAN
243 hdr->compat_flags |= MAIL_INDEX_COMPAT_LITTLE_ENDIAN;
244 #endif
245
246 hdr->indexid = index->indexid;
247 hdr->log_file_seq = 1;
248 hdr->next_uid = 1;
249 hdr->first_recent_uid = 1;
250 }
251
mail_index_map_alloc(struct mail_index * index)252 struct mail_index_map *mail_index_map_alloc(struct mail_index *index)
253 {
254 struct mail_index_map tmp_map;
255
256 i_zero(&tmp_map);
257 mail_index_header_init(index, &tmp_map.hdr);
258 tmp_map.hdr_copy_buf = t_buffer_create(sizeof(tmp_map.hdr));
259 buffer_append(tmp_map.hdr_copy_buf, &tmp_map.hdr, sizeof(tmp_map.hdr));
260 tmp_map.index = index;
261
262 /* a bit kludgy way to do this, but it initializes everything
263 nicely and correctly */
264 return mail_index_map_clone(&tmp_map);
265 }
266
mail_index_record_map_free(struct mail_index_map * map,struct mail_index_record_map * rec_map)267 static void mail_index_record_map_free(struct mail_index_map *map,
268 struct mail_index_record_map *rec_map)
269 {
270 if (rec_map->buffer != NULL) {
271 i_assert(rec_map->mmap_base == NULL);
272 buffer_free(&rec_map->buffer);
273 } else if (rec_map->mmap_base != NULL) {
274 i_assert(rec_map->buffer == NULL);
275 if (munmap(rec_map->mmap_base, rec_map->mmap_size) < 0)
276 mail_index_set_syscall_error(map->index, "munmap()");
277 rec_map->mmap_base = NULL;
278 }
279 array_free(&rec_map->maps);
280 if (rec_map->modseq != NULL)
281 mail_index_map_modseq_free(&rec_map->modseq);
282 i_free(rec_map);
283 }
284
mail_index_record_map_unlink(struct mail_index_map * map)285 static void mail_index_record_map_unlink(struct mail_index_map *map)
286 {
287 struct mail_index_map *const *maps;
288 unsigned int idx = UINT_MAX;
289
290 array_foreach(&map->rec_map->maps, maps) {
291 if (*maps == map) {
292 idx = array_foreach_idx(&map->rec_map->maps, maps);
293 break;
294 }
295 }
296 i_assert(idx != UINT_MAX);
297
298 array_delete(&map->rec_map->maps, idx, 1);
299 if (array_count(&map->rec_map->maps) == 0) {
300 mail_index_record_map_free(map, map->rec_map);
301 map->rec_map = NULL;
302 }
303 }
304
mail_index_unmap(struct mail_index_map ** _map)305 void mail_index_unmap(struct mail_index_map **_map)
306 {
307 struct mail_index_map *map = *_map;
308
309 *_map = NULL;
310 if (--map->refcount > 0)
311 return;
312
313 i_assert(map->refcount == 0);
314 mail_index_record_map_unlink(map);
315
316 pool_unref(&map->extension_pool);
317 if (array_is_created(&map->keyword_idx_map))
318 array_free(&map->keyword_idx_map);
319 buffer_free(&map->hdr_copy_buf);
320 i_free(map);
321 }
322
mail_index_map_copy_records(struct mail_index_record_map * dest,const struct mail_index_record_map * src,unsigned int record_size)323 static void mail_index_map_copy_records(struct mail_index_record_map *dest,
324 const struct mail_index_record_map *src,
325 unsigned int record_size)
326 {
327 size_t size;
328
329 size = src->records_count * record_size;
330 /* +1% so we have a bit of space to grow. useful for huge mailboxes. */
331 dest->buffer = buffer_create_dynamic(default_pool,
332 size + I_MAX(size/100, 1024));
333 buffer_append(dest->buffer, src->records, size);
334
335 dest->records = buffer_get_modifiable_data(dest->buffer, NULL);
336 dest->records_count = src->records_count;
337 }
338
mail_index_map_copy_header(struct mail_index_map * dest,const struct mail_index_map * src)339 static void mail_index_map_copy_header(struct mail_index_map *dest,
340 const struct mail_index_map *src)
341 {
342 /* use src->hdr copy directly, because if we got here
343 from syncing it has the latest changes. */
344 if (src != dest)
345 dest->hdr = src->hdr;
346 if (dest->hdr_copy_buf != NULL) {
347 if (src == dest)
348 return;
349
350 buffer_set_used_size(dest->hdr_copy_buf, 0);
351 } else {
352 dest->hdr_copy_buf =
353 buffer_create_dynamic(default_pool,
354 dest->hdr.header_size);
355 }
356 buffer_append(dest->hdr_copy_buf, &dest->hdr,
357 I_MIN(sizeof(dest->hdr), src->hdr.base_header_size));
358 if (src != dest) {
359 buffer_write(dest->hdr_copy_buf, src->hdr.base_header_size,
360 MAIL_INDEX_MAP_HDR_OFFSET(src, src->hdr.base_header_size),
361 src->hdr.header_size - src->hdr.base_header_size);
362 }
363 i_assert(dest->hdr_copy_buf->used == dest->hdr.header_size);
364 }
365
366 static struct mail_index_record_map *
mail_index_record_map_alloc(struct mail_index_map * map)367 mail_index_record_map_alloc(struct mail_index_map *map)
368 {
369 struct mail_index_record_map *rec_map;
370
371 rec_map = i_new(struct mail_index_record_map, 1);
372 i_array_init(&rec_map->maps, 4);
373 array_push_back(&rec_map->maps, &map);
374 return rec_map;
375 }
376
mail_index_map_clone(const struct mail_index_map * map)377 struct mail_index_map *mail_index_map_clone(const struct mail_index_map *map)
378 {
379 struct mail_index_map *mem_map;
380 struct mail_index_ext *ext;
381 unsigned int count;
382
383 mem_map = i_new(struct mail_index_map, 1);
384 mem_map->index = map->index;
385 mem_map->refcount = 1;
386 if (map->rec_map == NULL) {
387 mem_map->rec_map = mail_index_record_map_alloc(mem_map);
388 mem_map->rec_map->buffer =
389 buffer_create_dynamic(default_pool, 1024);
390 } else {
391 mem_map->rec_map = map->rec_map;
392 array_push_back(&mem_map->rec_map->maps, &mem_map);
393 }
394
395 mail_index_map_copy_header(mem_map, map);
396
397 /* copy extensions */
398 if (array_is_created(&map->ext_id_map)) {
399 count = array_count(&map->ext_id_map);
400 mail_index_map_init_extbufs(mem_map, count + 2);
401
402 array_append_array(&mem_map->extensions, &map->extensions);
403 array_append_array(&mem_map->ext_id_map, &map->ext_id_map);
404
405 /* fix the name pointers to use our own pool */
406 array_foreach_modifiable(&mem_map->extensions, ext) {
407 i_assert(ext->record_offset + ext->record_size <=
408 mem_map->hdr.record_size);
409 ext->name = p_strdup(mem_map->extension_pool,
410 ext->name);
411 }
412 }
413
414 /* copy keyword map */
415 if (array_is_created(&map->keyword_idx_map)) {
416 i_array_init(&mem_map->keyword_idx_map,
417 array_count(&map->keyword_idx_map) + 4);
418 array_append_array(&mem_map->keyword_idx_map,
419 &map->keyword_idx_map);
420 }
421
422 return mem_map;
423 }
424
mail_index_record_map_move_to_private(struct mail_index_map * map)425 void mail_index_record_map_move_to_private(struct mail_index_map *map)
426 {
427 struct mail_index_record_map *new_map;
428 const struct mail_index_record *rec;
429
430 if (array_count(&map->rec_map->maps) > 1) {
431 /* Multiple references to the rec_map. Create a clone of the
432 rec_map, which is in memory. */
433 new_map = mail_index_record_map_alloc(map);
434 mail_index_map_copy_records(new_map, map->rec_map,
435 map->hdr.record_size);
436 mail_index_record_map_unlink(map);
437 map->rec_map = new_map;
438 if (map->rec_map->modseq != NULL)
439 new_map->modseq = mail_index_map_modseq_clone(map->rec_map->modseq);
440 } else {
441 new_map = map->rec_map;
442 }
443
444 if (new_map->records_count != map->hdr.messages_count) {
445 /* The rec_map has more messages than what map contains.
446 These messages aren't necessary (and may confuse the caller),
447 so truncate them away. */
448 i_assert(new_map->records_count > map->hdr.messages_count);
449 new_map->records_count = map->hdr.messages_count;
450 if (new_map->records_count == 0)
451 new_map->last_appended_uid = 0;
452 else {
453 rec = MAIL_INDEX_REC_AT_SEQ(map, new_map->records_count);
454 new_map->last_appended_uid = rec->uid;
455 }
456 buffer_set_used_size(new_map->buffer, new_map->records_count *
457 map->hdr.record_size);
458 }
459 }
460
mail_index_map_move_to_memory(struct mail_index_map * map)461 void mail_index_map_move_to_memory(struct mail_index_map *map)
462 {
463 struct mail_index_record_map *new_map;
464
465 if (map->rec_map->mmap_base == NULL) {
466 /* rec_map is already in memory */
467 return;
468 }
469
470 /* Move the rec_map contents to memory. If this is the only map that
471 refers to the rec_map, it can be directly replaced and the old
472 content munmap()ed. Otherwise, create a new rec_map for this map. */
473 if (array_count(&map->rec_map->maps) == 1)
474 new_map = map->rec_map;
475 else {
476 new_map = mail_index_record_map_alloc(map);
477 new_map->modseq = map->rec_map->modseq == NULL ? NULL :
478 mail_index_map_modseq_clone(map->rec_map->modseq);
479 }
480
481 mail_index_map_copy_records(new_map, map->rec_map,
482 map->hdr.record_size);
483 mail_index_map_copy_header(map, map);
484
485 if (new_map != map->rec_map) {
486 mail_index_record_map_unlink(map);
487 map->rec_map = new_map;
488 } else {
489 if (munmap(new_map->mmap_base, new_map->mmap_size) < 0)
490 mail_index_set_syscall_error(map->index, "munmap()");
491 new_map->mmap_base = NULL;
492 }
493 }
494
mail_index_map_get_ext_idx(struct mail_index_map * map,uint32_t ext_id,uint32_t * idx_r)495 bool mail_index_map_get_ext_idx(struct mail_index_map *map,
496 uint32_t ext_id, uint32_t *idx_r)
497 {
498 const uint32_t *id;
499
500 if (!array_is_created(&map->ext_id_map) ||
501 ext_id >= array_count(&map->ext_id_map))
502 return FALSE;
503
504 id = array_idx(&map->ext_id_map, ext_id);
505 *idx_r = *id;
506 return *idx_r != (uint32_t)-1;
507 }
508
mail_index_bsearch_uid(struct mail_index_map * map,uint32_t uid,uint32_t left_idx,int nearest_side)509 static uint32_t mail_index_bsearch_uid(struct mail_index_map *map,
510 uint32_t uid, uint32_t left_idx,
511 int nearest_side)
512 {
513 const struct mail_index_record *rec_base, *rec;
514 uint32_t idx, right_idx, record_size;
515
516 i_assert(map->hdr.messages_count <= map->rec_map->records_count);
517
518 rec_base = map->rec_map->records;
519 record_size = map->hdr.record_size;
520
521 idx = left_idx;
522 right_idx = I_MIN(map->hdr.messages_count, uid);
523
524 i_assert(right_idx < INT_MAX);
525 while (left_idx < right_idx) {
526 idx = (left_idx + right_idx) / 2;
527
528 rec = CONST_PTR_OFFSET(rec_base, idx * record_size);
529 if (rec->uid < uid)
530 left_idx = idx+1;
531 else if (rec->uid > uid)
532 right_idx = idx;
533 else
534 break;
535 }
536 i_assert(idx < map->hdr.messages_count);
537
538 rec = CONST_PTR_OFFSET(rec_base, idx * record_size);
539 if (rec->uid != uid) {
540 if (nearest_side > 0) {
541 /* we want uid or larger */
542 return rec->uid > uid ? idx+1 :
543 (idx == map->hdr.messages_count-1 ? 0 : idx+2);
544 } else {
545 /* we want uid or smaller */
546 return rec->uid < uid ? idx + 1 : idx;
547 }
548 }
549
550 return idx+1;
551 }
552
mail_index_map_lookup_seq_range(struct mail_index_map * map,uint32_t first_uid,uint32_t last_uid,uint32_t * first_seq_r,uint32_t * last_seq_r)553 void mail_index_map_lookup_seq_range(struct mail_index_map *map,
554 uint32_t first_uid, uint32_t last_uid,
555 uint32_t *first_seq_r,
556 uint32_t *last_seq_r)
557 {
558 i_assert(first_uid > 0);
559 i_assert(first_uid <= last_uid);
560
561 if (map->hdr.messages_count == 0) {
562 *first_seq_r = *last_seq_r = 0;
563 return;
564 }
565
566 *first_seq_r = mail_index_bsearch_uid(map, first_uid, 0, 1);
567 if (*first_seq_r == 0 ||
568 MAIL_INDEX_REC_AT_SEQ(map, *first_seq_r)->uid > last_uid) {
569 *first_seq_r = *last_seq_r = 0;
570 return;
571 }
572
573 if (last_uid >= map->hdr.next_uid-1) {
574 /* we want the last message */
575 last_uid = map->hdr.next_uid-1;
576 if (first_uid > last_uid) {
577 *first_seq_r = *last_seq_r = 0;
578 return;
579 }
580
581 *last_seq_r = map->hdr.messages_count;
582 return;
583 }
584
585 if (first_uid == last_uid)
586 *last_seq_r = *first_seq_r;
587 else {
588 /* optimization - binary lookup only from right side: */
589 *last_seq_r = mail_index_bsearch_uid(map, last_uid,
590 *first_seq_r - 1, -1);
591 }
592 i_assert(*last_seq_r >= *first_seq_r);
593 }
594