1 /* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
2 
3 #include "lib.h"
4 #include "ioloop.h"
5 #include "buffer.h"
6 #include "hash.h"
7 #include "file-cache.h"
8 #include "read-full.h"
9 #include "write-full.h"
10 #include "mmap-util.h"
11 #include "mail-cache-private.h"
12 
13 #include <stddef.h>
14 
15 #define CACHE_FIELD_IS_NEWLY_WANTED(cache, field_idx) \
16 	((cache)->field_file_map[field_idx] == (uint32_t)-1 && \
17 	 (cache)->fields[field_idx].used)
18 
field_has_fixed_size(enum mail_cache_field_type type)19 static bool field_has_fixed_size(enum mail_cache_field_type type)
20 {
21 	switch (type) {
22 	case MAIL_CACHE_FIELD_FIXED_SIZE:
23 	case MAIL_CACHE_FIELD_BITMASK:
24 		return TRUE;
25 	case MAIL_CACHE_FIELD_VARIABLE_SIZE:
26 	case MAIL_CACHE_FIELD_STRING:
27 	case MAIL_CACHE_FIELD_HEADER:
28 		return FALSE;
29 
30 	case MAIL_CACHE_FIELD_COUNT:
31 		break;
32 	}
33 
34 	i_unreached();
35 	return FALSE;
36 }
37 
field_decision_is_valid(enum mail_cache_decision_type type)38 static bool field_decision_is_valid(enum mail_cache_decision_type type)
39 {
40 	switch (type & ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED)) {
41 	case MAIL_CACHE_DECISION_NO:
42 	case MAIL_CACHE_DECISION_TEMP:
43 	case MAIL_CACHE_DECISION_YES:
44 		return TRUE;
45 	default:
46 		return FALSE;
47 	}
48 }
49 
field_type_verify(struct mail_cache * cache,unsigned int idx,enum mail_cache_field_type type,unsigned int size)50 static int field_type_verify(struct mail_cache *cache, unsigned int idx,
51 			     enum mail_cache_field_type type, unsigned int size)
52 {
53 	const struct mail_cache_field *field = &cache->fields[idx].field;
54 
55 	if (field->type != type) {
56 		mail_cache_set_corrupted(cache,
57 			"registered field %s type changed", field->name);
58 		return -1;
59 	}
60 	if (field->field_size != size && field_has_fixed_size(type)) {
61 		mail_cache_set_corrupted(cache,
62 			"registered field %s size changed", field->name);
63 		return -1;
64 	}
65 	return 0;
66 }
67 
68 static void
mail_cache_field_update(struct mail_cache * cache,const struct mail_cache_field * newfield)69 mail_cache_field_update(struct mail_cache *cache,
70 			const struct mail_cache_field *newfield)
71 {
72 	struct mail_cache_field_private *orig;
73 	bool initial_registering;
74 
75 	i_assert(newfield->type < MAIL_CACHE_FIELD_COUNT);
76 
77 	/* are we still doing the initial cache field registering for
78 	   internal fields and for mail_*cache_fields settings? */
79 	initial_registering = cache->file_fields_count == 0;
80 
81 	orig = &cache->fields[newfield->idx];
82 	if ((newfield->decision & MAIL_CACHE_DECISION_FORCED) != 0 ||
83 	    ((orig->field.decision & MAIL_CACHE_DECISION_FORCED) == 0 &&
84 	     newfield->decision > orig->field.decision)) {
85 		orig->field.decision = newfield->decision;
86 		if (!initial_registering)
87 			orig->decision_dirty = TRUE;
88 	}
89 	if (orig->field.last_used < newfield->last_used) {
90 		orig->field.last_used = newfield->last_used;
91 		if (!initial_registering)
92 			orig->decision_dirty = TRUE;
93 	}
94 	if (orig->decision_dirty)
95 		cache->field_header_write_pending = TRUE;
96 
97 	(void)field_type_verify(cache, newfield->idx,
98 				newfield->type, newfield->field_size);
99 }
100 
mail_cache_register_fields(struct mail_cache * cache,struct mail_cache_field * fields,unsigned int fields_count)101 void mail_cache_register_fields(struct mail_cache *cache,
102 				struct mail_cache_field *fields,
103 				unsigned int fields_count)
104 {
105 	char *name;
106 	void *value;
107 	unsigned int new_idx;
108 	unsigned int i, j, registered_count;
109 
110 	new_idx = cache->fields_count;
111 	for (i = 0; i < fields_count; i++) {
112 		if (hash_table_lookup_full(cache->field_name_hash,
113 					   fields[i].name, &name, &value)) {
114 			fields[i].idx = POINTER_CAST_TO(value, unsigned int);
115 			mail_cache_field_update(cache, &fields[i]);
116 			continue;
117 		}
118 
119 		/* check if the same header is being registered in the
120 		   same field array */
121 		for (j = 0; j < i; j++) {
122 			if (strcasecmp(fields[i].name, fields[j].name) == 0) {
123 				fields[i].idx = fields[j].idx;
124 				break;
125 			}
126 		}
127 
128 		if (j == i)
129 			fields[i].idx = new_idx++;
130 	}
131 
132 	if (new_idx == cache->fields_count)
133 		return;
134 
135 	/* @UNSAFE */
136 	cache->fields = i_realloc_type(cache->fields,
137 				       struct mail_cache_field_private,
138 				       cache->fields_count, new_idx);
139 	cache->field_file_map =
140 		i_realloc_type(cache->field_file_map, uint32_t,
141 			       cache->fields_count, new_idx);
142 
143 	registered_count = cache->fields_count;
144 	for (i = 0; i < fields_count; i++) {
145 		unsigned int idx = fields[i].idx;
146 
147 		if (idx < registered_count)
148 			continue;
149 
150 		/* new index - save it */
151 		name = p_strdup(cache->field_pool, fields[i].name);
152 		cache->fields[idx].field = fields[i];
153 		cache->fields[idx].field.name = name;
154 		cache->fields[idx].field.last_used = fields[i].last_used;
155 		cache->field_file_map[idx] = (uint32_t)-1;
156 
157 		if (!field_has_fixed_size(cache->fields[idx].field.type))
158 			cache->fields[idx].field.field_size = UINT_MAX;
159 
160 		hash_table_insert(cache->field_name_hash, name,
161 				  POINTER_CAST(idx));
162 		registered_count++;
163 	}
164 	i_assert(registered_count == new_idx);
165 	cache->fields_count = new_idx;
166 }
167 
168 unsigned int
mail_cache_register_lookup(struct mail_cache * cache,const char * name)169 mail_cache_register_lookup(struct mail_cache *cache, const char *name)
170 {
171 	char *key;
172 	void *value;
173 
174 	if (hash_table_lookup_full(cache->field_name_hash, name, &key, &value))
175 		return POINTER_CAST_TO(value, unsigned int);
176 	else
177 		return UINT_MAX;
178 }
179 
180 const struct mail_cache_field *
mail_cache_register_get_field(struct mail_cache * cache,unsigned int field_idx)181 mail_cache_register_get_field(struct mail_cache *cache, unsigned int field_idx)
182 {
183 	i_assert(field_idx < cache->fields_count);
184 
185 	return &cache->fields[field_idx].field;
186 }
187 
188 struct mail_cache_field *
mail_cache_register_get_list(struct mail_cache * cache,pool_t pool,unsigned int * count_r)189 mail_cache_register_get_list(struct mail_cache *cache, pool_t pool,
190 			     unsigned int *count_r)
191 {
192         struct mail_cache_field *list;
193 	unsigned int i;
194 
195 	if (!cache->opened)
196 		(void)mail_cache_open_and_verify(cache);
197 
198 	list = cache->fields_count == 0 ? NULL :
199 		p_new(pool, struct mail_cache_field, cache->fields_count);
200 	for (i = 0; i < cache->fields_count; i++) {
201 		list[i] = cache->fields[i].field;
202 		list[i].name = p_strdup(pool, list[i].name);
203 	}
204 
205 	*count_r = cache->fields_count;
206 	return list;
207 }
208 
209 static int
mail_cache_header_fields_get_offset(struct mail_cache * cache,uint32_t * offset_r,const struct mail_cache_header_fields ** field_hdr_r)210 mail_cache_header_fields_get_offset(struct mail_cache *cache,
211 				    uint32_t *offset_r,
212 				    const struct mail_cache_header_fields **field_hdr_r)
213 {
214 	const struct mail_cache_header_fields *field_hdr;
215 	struct mail_cache_header_fields tmp_field_hdr;
216 	const void *data;
217 	uint32_t offset = 0, next_offset, field_hdr_size;
218 	unsigned int next_count = 0;
219 	int ret;
220 
221 	if (MAIL_CACHE_IS_UNUSABLE(cache)) {
222 		*offset_r = 0;
223 		if (field_hdr_r != NULL)
224 			*field_hdr_r = NULL;
225 		return 0;
226 	}
227 
228 	/* find the latest header */
229 	offset = 0;
230 	next_offset = cache->last_field_header_offset != 0 ?
231 		cache->last_field_header_offset :
232 		mail_index_offset_to_uint32(cache->hdr->field_header_offset);
233 	while (next_offset != 0) {
234 		if (next_offset == offset) {
235 			mail_cache_set_corrupted(cache,
236 				"next_offset in field header loops");
237 			return -1;
238 		}
239 		/* In Dovecot v2.2+ we don't try to use any holes,
240 		   so next_offset must always be larger than current offset.
241 		   also makes it easier to guarantee there aren't any loops
242 		   (which we don't bother doing for old files) */
243 		if (next_offset < offset && cache->hdr->minor_version != 0) {
244 			mail_cache_set_corrupted(cache,
245 				"next_offset in field header decreases");
246 			return -1;
247 		}
248 		offset = next_offset;
249 
250 		if (cache->mmap_base != NULL || cache->map_with_read) {
251 			ret = mail_cache_map(cache, offset, sizeof(*field_hdr),
252 					     &data);
253 			if (ret <= 0) {
254 				if (ret < 0)
255 					return -1;
256 				mail_cache_set_corrupted(cache,
257 					"header field next_offset points outside file");
258 				return -1;
259 			}
260 			field_hdr = data;
261 		} else {
262 			/* if we need to follow multiple offsets to get to
263 			   the last one, it's faster to just pread() the file
264 			   instead of going through cache */
265 			ret = pread_full(cache->fd, &tmp_field_hdr,
266 					 sizeof(tmp_field_hdr), offset);
267 			if (ret < 0) {
268 				mail_cache_set_syscall_error(cache, "pread()");
269 				return -1;
270 			}
271 			if (ret == 0) {
272 				mail_cache_set_corrupted(cache,
273 					"header field next_offset points outside file");
274 				return -1;
275 			}
276 			field_hdr = &tmp_field_hdr;
277 		}
278 
279 		next_offset =
280 			mail_index_offset_to_uint32(field_hdr->next_offset);
281 		next_count++;
282 	}
283 
284 	if (offset == 0) {
285 		mail_cache_set_corrupted(cache, "missing header fields");
286 		return -1;
287 	}
288 	cache->last_field_header_offset = offset;
289 
290 	if (next_count > cache->index->optimization_set.cache.purge_header_continue_count) {
291 		mail_cache_purge_later(cache, t_strdup_printf(
292 			"Too many continued headers (%u)", next_count));
293 	}
294 
295 	if (field_hdr_r != NULL) {
296 		/* detect corrupted size later */
297 		field_hdr_size = I_MAX(field_hdr->size, sizeof(*field_hdr));
298 		if (cache->file_cache != NULL) {
299 			/* invalidate the cache fields area to make sure we
300 			   get the latest cache decisions/last_used fields */
301 			file_cache_invalidate(cache->file_cache, offset,
302 					      field_hdr_size);
303 		}
304 		if (cache->read_buf != NULL)
305 			buffer_set_used_size(cache->read_buf, 0);
306 		ret = mail_cache_map(cache, offset, field_hdr_size, &data);
307 		if (ret < 0)
308 			return -1;
309 		if (ret == 0) {
310 			mail_cache_set_corrupted(cache,
311 				"header field size outside file");
312 			return -1;
313 		}
314 		*field_hdr_r = data;
315 	}
316 	*offset_r = offset;
317 	return 0;
318 }
319 
mail_cache_header_fields_read(struct mail_cache * cache)320 int mail_cache_header_fields_read(struct mail_cache *cache)
321 {
322 	const struct mail_cache_header_fields *field_hdr;
323 	struct mail_cache_field field;
324 	const uint32_t *last_used, *sizes;
325 	const uint8_t *types, *decisions;
326 	const char *p, *names, *end;
327 	char *orig_key;
328 	void *orig_value;
329 	unsigned int fidx, new_fields_count;
330 	struct mail_cache_purge_drop_ctx drop_ctx;
331 	uint32_t offset, i;
332 
333 	if (mail_cache_header_fields_get_offset(cache, &offset, &field_hdr) < 0)
334 		return -1;
335 
336 	if (offset == 0) {
337 		/* no fields - the file is empty */
338 		return 0;
339 	}
340 
341 	/* check the fixed size of the header. name[] has to be checked
342 	   separately */
343 	if (field_hdr->fields_count > INT_MAX / MAIL_CACHE_FIELD_NAMES(1) ||
344 	    field_hdr->size < MAIL_CACHE_FIELD_NAMES(field_hdr->fields_count)) {
345 		mail_cache_set_corrupted(cache, "invalid field header size");
346 		return -1;
347 	}
348 
349 	new_fields_count = field_hdr->fields_count;
350 	if (new_fields_count != 0) {
351 		cache->file_field_map =
352 			i_realloc_type(cache->file_field_map, unsigned int,
353 				       cache->file_fields_count, new_fields_count);
354 	} else {
355 		i_free_and_null(cache->file_field_map);
356 	}
357 	cache->file_fields_count = new_fields_count;
358 
359 	last_used = CONST_PTR_OFFSET(field_hdr, MAIL_CACHE_FIELD_LAST_USED());
360 	sizes = CONST_PTR_OFFSET(field_hdr,
361 		MAIL_CACHE_FIELD_SIZE(field_hdr->fields_count));
362 	types = CONST_PTR_OFFSET(field_hdr,
363 		MAIL_CACHE_FIELD_TYPE(field_hdr->fields_count));
364 	decisions = CONST_PTR_OFFSET(field_hdr,
365 		MAIL_CACHE_FIELD_DECISION(field_hdr->fields_count));
366 	names = CONST_PTR_OFFSET(field_hdr,
367 		MAIL_CACHE_FIELD_NAMES(field_hdr->fields_count));
368 	end = CONST_PTR_OFFSET(field_hdr, field_hdr->size);
369 	i_assert(names <= end);
370 
371 	/* clear the old mapping */
372 	for (i = 0; i < cache->fields_count; i++)
373 		cache->field_file_map[i] = (uint32_t)-1;
374 
375 	mail_cache_purge_drop_init(cache, &cache->index->map->hdr, &drop_ctx);
376 	i_zero(&field);
377 	for (i = 0; i < field_hdr->fields_count; i++) {
378 		for (p = names; p != end && *p != '\0'; p++) ;
379 		if (p == end || *names == '\0') {
380 			mail_cache_set_corrupted(cache,
381 				"field header names corrupted");
382 			return -1;
383 		}
384 
385 		if (types[i] > MAIL_CACHE_FIELD_COUNT) {
386 			mail_cache_set_corrupted(cache, "field type corrupted");
387 			return -1;
388 		}
389 		if (!field_decision_is_valid(decisions[i])) {
390 			mail_cache_set_corrupted(cache,
391 				"field decision type corrupted");
392 			return -1;
393 		}
394 
395 		/* ignore any forced-flags in the file */
396 		enum mail_cache_decision_type file_dec =
397 			decisions[i] & ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED);
398 
399 		if (hash_table_lookup_full(cache->field_name_hash, names,
400 					   &orig_key, &orig_value)) {
401 			/* already exists, see if decision can be updated */
402 			fidx = POINTER_CAST_TO(orig_value, unsigned int);
403 			enum mail_cache_decision_type cur_dec =
404 				cache->fields[fidx].field.decision;
405 			if ((cur_dec & MAIL_CACHE_DECISION_FORCED) != 0) {
406 				/* Forced decision. If the decision has
407 				   changed, update the fields in the file. */
408 				if ((cur_dec & ENUM_NEGATE(MAIL_CACHE_DECISION_FORCED)) != file_dec)
409 					cache->field_header_write_pending = TRUE;
410 			} else if (cache->fields[fidx].decision_dirty) {
411 				/* Decisions have recently been updated
412 				   internally. Don't change them. */
413 			} else {
414 				/* Use the decision from the cache file. */
415 				cache->fields[fidx].field.decision = file_dec;
416 			}
417 			if (field_type_verify(cache, fidx,
418 					      types[i], sizes[i]) < 0)
419 				return -1;
420 		} else {
421 			/* field is currently unknown, so just use whatever
422 			   exists in the file. */
423 			field.name = names;
424 			field.type = types[i];
425 			field.field_size = sizes[i];
426 			field.decision = file_dec;
427 			mail_cache_register_fields(cache, &field, 1);
428 			fidx = field.idx;
429 		}
430 		if (cache->field_file_map[fidx] != (uint32_t)-1) {
431 			mail_cache_set_corrupted(cache,
432 				"Duplicated field in header: %s", names);
433 			return -1;
434 		}
435 		cache->fields[fidx].used = TRUE;
436 
437 		cache->field_file_map[fidx] = i;
438 		cache->file_field_map[i] = fidx;
439 
440 		/* Update last_used if it's newer than ours. Note that the
441 		   last_used may have been overwritten while we were reading
442 		   this cache header. In theory this can mean that the
443 		   last_used field is only half-updated and contains garbage.
444 		   This practically won't matter, since the worst that can
445 		   happen is that we trigger a purge earlier than necessary.
446 		   The purging re-reads the last_used while cache is locked and
447 		   correctly figures out whether to drop the field. */
448 		if ((time_t)last_used[i] > cache->fields[fidx].field.last_used)
449 			cache->fields[fidx].field.last_used = last_used[i];
450 
451 		switch (mail_cache_purge_drop_test(&drop_ctx, fidx)) {
452 		case MAIL_CACHE_PURGE_DROP_DECISION_NONE:
453 			break;
454 		case MAIL_CACHE_PURGE_DROP_DECISION_DROP:
455 			mail_cache_purge_later(cache, t_strdup_printf(
456 				"Drop old field %s (last_used=%"PRIdTIME_T")",
457 				cache->fields[fidx].field.name,
458 				cache->fields[fidx].field.last_used));
459 			break;
460 		case MAIL_CACHE_PURGE_DROP_DECISION_TO_TEMP:
461 			/* This cache decision change can cause the field to be
462 			   dropped for old mails, so do it via purging. */
463 			mail_cache_purge_later(cache, t_strdup_printf(
464 				"Change cache decision to temp for old field %s "
465 				"(last_used=%"PRIdTIME_T")",
466 				cache->fields[fidx].field.name,
467 				cache->fields[fidx].field.last_used));
468 			break;
469 		}
470 
471                 names = p + 1;
472 	}
473 	return 0;
474 }
475 
copy_to_buf(struct mail_cache * cache,buffer_t * dest,bool add_new,size_t offset,size_t size)476 static void copy_to_buf(struct mail_cache *cache, buffer_t *dest, bool add_new,
477 			size_t offset, size_t size)
478 {
479 	const void *data;
480 	unsigned int i, field;
481 
482 	/* copy the existing fields */
483 	for (i = 0; i < cache->file_fields_count; i++) {
484 		field = cache->file_field_map[i];
485                 data = CONST_PTR_OFFSET(&cache->fields[field], offset);
486 		buffer_append(dest, data, size);
487 	}
488 	if (!add_new)
489 		return;
490 
491 	/* copy newly wanted fields */
492 	for (i = 0; i < cache->fields_count; i++) {
493 		if (CACHE_FIELD_IS_NEWLY_WANTED(cache, i)) {
494 			data = CONST_PTR_OFFSET(&cache->fields[i], offset);
495 			buffer_append(dest, data, size);
496 		}
497 	}
498 }
499 
copy_to_buf_byte(struct mail_cache * cache,buffer_t * dest,bool add_new,size_t offset)500 static void copy_to_buf_byte(struct mail_cache *cache, buffer_t *dest,
501 			     bool add_new, size_t offset)
502 {
503 	const int *data;
504 	unsigned int i, field;
505 	uint8_t byte;
506 
507 	/* copy the existing fields */
508 	for (i = 0; i < cache->file_fields_count; i++) {
509 		field = cache->file_field_map[i];
510                 data = CONST_PTR_OFFSET(&cache->fields[field], offset);
511 		byte = (uint8_t)*data;
512 		buffer_append(dest, &byte, 1);
513 	}
514 	if (!add_new)
515 		return;
516 
517 	/* copy newly wanted fields */
518 	for (i = 0; i < cache->fields_count; i++) {
519 		if (CACHE_FIELD_IS_NEWLY_WANTED(cache, i)) {
520 			data = CONST_PTR_OFFSET(&cache->fields[i], offset);
521 			byte = (uint8_t)*data;
522 			buffer_append(dest, &byte, 1);
523 		}
524 	}
525 }
526 
mail_cache_header_fields_update_locked(struct mail_cache * cache)527 static int mail_cache_header_fields_update_locked(struct mail_cache *cache)
528 {
529 	buffer_t *buffer;
530 	uint32_t i, offset, dec_offset;
531 	int ret = 0;
532 
533 	if (mail_cache_header_fields_read(cache) < 0 ||
534 	    mail_cache_header_fields_get_offset(cache, &offset, NULL) < 0)
535 		return -1;
536 
537 	buffer = t_buffer_create(256);
538 
539 	copy_to_buf(cache, buffer, FALSE,
540 		    offsetof(struct mail_cache_field, last_used),
541 		    sizeof(uint32_t));
542 	ret = mail_cache_write(cache, buffer->data, buffer->used,
543 			       offset + MAIL_CACHE_FIELD_LAST_USED());
544 	if (ret == 0) {
545 		buffer_set_used_size(buffer, 0);
546 		copy_to_buf_byte(cache, buffer, FALSE,
547 				 offsetof(struct mail_cache_field, decision));
548 
549 		dec_offset = offset +
550 			MAIL_CACHE_FIELD_DECISION(cache->file_fields_count);
551 		ret = mail_cache_write(cache, buffer->data, buffer->used,
552 				       dec_offset);
553 		if (ret == 0) {
554 			for (i = 0; i < cache->file_fields_count; i++)
555 				cache->fields[i].decision_dirty = FALSE;
556 		}
557 	}
558 
559 	if (ret == 0)
560 		cache->field_header_write_pending = FALSE;
561 	return ret;
562 }
563 
mail_cache_header_fields_update(struct mail_cache * cache)564 int mail_cache_header_fields_update(struct mail_cache *cache)
565 {
566 	int ret;
567 
568 	if (cache->locked) {
569 		T_BEGIN {
570 			ret = mail_cache_header_fields_update_locked(cache);
571 		} T_END;
572 		return ret;
573 	}
574 
575 	if (mail_cache_lock(cache) <= 0)
576 		return -1;
577 
578 	T_BEGIN {
579 		ret = mail_cache_header_fields_update_locked(cache);
580 	} T_END;
581 	i_assert(!cache->hdr_modified);
582 	mail_cache_unlock(cache);
583 	return ret;
584 }
585 
mail_cache_header_fields_get(struct mail_cache * cache,buffer_t * dest)586 void mail_cache_header_fields_get(struct mail_cache *cache, buffer_t *dest)
587 {
588 	struct mail_cache_header_fields hdr;
589 	unsigned int field;
590 	const char *name;
591 	uint32_t i;
592 
593 	i_zero(&hdr);
594 	hdr.fields_count = cache->file_fields_count;
595 	for (i = 0; i < cache->fields_count; i++) {
596 		if (CACHE_FIELD_IS_NEWLY_WANTED(cache, i))
597 			hdr.fields_count++;
598 	}
599 	buffer_append(dest, &hdr, sizeof(hdr));
600 
601 	/* we have to keep the field order for the existing fields. */
602 	copy_to_buf(cache, dest, TRUE,
603 		    offsetof(struct mail_cache_field, last_used),
604 		    sizeof(uint32_t));
605 	copy_to_buf(cache, dest, TRUE,
606 		    offsetof(struct mail_cache_field, field_size),
607 		    sizeof(uint32_t));
608 	copy_to_buf_byte(cache, dest, TRUE,
609 			 offsetof(struct mail_cache_field, type));
610 	copy_to_buf_byte(cache, dest, TRUE,
611 			 offsetof(struct mail_cache_field, decision));
612 
613 	i_assert(dest->used == sizeof(hdr) +
614 		 (sizeof(uint32_t)*2 + 2) * hdr.fields_count);
615 
616 	/* add existing fields' names */
617 	for (i = 0; i < cache->file_fields_count; i++) {
618 		field = cache->file_field_map[i];
619 		name = cache->fields[field].field.name;
620 		buffer_append(dest, name, strlen(name)+1);
621 	}
622 	/* add newly wanted fields' names */
623 	for (i = 0; i < cache->fields_count; i++) {
624 		if (CACHE_FIELD_IS_NEWLY_WANTED(cache, i)) {
625 			name = cache->fields[i].field.name;
626 			buffer_append(dest, name, strlen(name)+1);
627 		}
628 	}
629 
630 	hdr.size = dest->used;
631 	buffer_write(dest, 0, &hdr, sizeof(hdr));
632 
633 	if ((hdr.size & 3) != 0)
634 		buffer_append_zero(dest, 4 - (hdr.size & 3));
635 }
636 
mail_cache_header_fields_get_next_offset(struct mail_cache * cache,uint32_t * offset_r)637 int mail_cache_header_fields_get_next_offset(struct mail_cache *cache,
638 					     uint32_t *offset_r)
639 {
640 	if (mail_cache_header_fields_get_offset(cache, offset_r, NULL) < 0)
641 		return -1;
642 
643 	if (*offset_r == 0) {
644 		*offset_r = offsetof(struct mail_cache_header,
645 				     field_header_offset);
646 	} else {
647 		*offset_r += offsetof(struct mail_cache_header_fields,
648 				      next_offset);
649 	}
650 	return 0;
651 }
652