1 /* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
2 
3 #include "lib.h"
4 #include "array.h"
5 #include "buffer.h"
6 #include "str.h"
7 #include "mail-cache-private.h"
8 
9 
10 #define CACHE_PREFETCH IO_BLOCK_SIZE
11 
mail_cache_lookup_event(struct mail_cache_view * view,uint32_t seq)12 static struct event *mail_cache_lookup_event(struct mail_cache_view *view,
13 					     uint32_t seq)
14 {
15 	struct event *e = event_create(view->cache->event);
16 	uint32_t uid;
17 	mail_index_lookup_uid(view->view, seq, &uid);
18 	event_set_name(e, "mail_cache_lookup_finished");
19 	event_add_int(e, "seq", seq);
20 	event_add_int(e, "uid", uid);
21 	event_set_append_log_prefix(e, t_strdup_printf("UID %u: ", uid));
22 	return e;
23 }
24 
mail_cache_get_record(struct mail_cache * cache,uint32_t offset,const struct mail_cache_record ** rec_r)25 int mail_cache_get_record(struct mail_cache *cache, uint32_t offset,
26 			  const struct mail_cache_record **rec_r)
27 {
28 	const struct mail_cache_record *rec;
29 	const void *data;
30 	int ret;
31 
32 	i_assert(offset != 0);
33 
34 	if (offset % sizeof(uint32_t) != 0) {
35 		/* records are always 32-bit aligned */
36 		mail_cache_set_corrupted(cache, "invalid record offset");
37 		return -1;
38 	}
39 
40 	/* we don't know yet how large the record is, so just guess */
41 	if (mail_cache_map(cache, offset, sizeof(*rec) + CACHE_PREFETCH,
42 			   &data) < 0)
43 		return -1;
44 
45 	if (offset + sizeof(*rec) > cache->mmap_length) {
46 		mail_cache_set_corrupted(cache, "record points outside file");
47 		return -1;
48 	}
49 	rec = data;
50 
51 	if (rec->size < sizeof(*rec)) {
52 		mail_cache_set_corrupted(cache, "invalid record size");
53 		return -1;
54 	}
55 	if (rec->size > CACHE_PREFETCH) {
56 		/* larger than we guessed. map the rest of the record. */
57 		if ((ret = mail_cache_map(cache, offset, rec->size, &data)) < 0)
58 			return -1;
59 		if (ret == 0) {
60 			mail_cache_set_corrupted(cache, "record points outside file");
61 			return -1;
62 		}
63 		rec = data;
64 	}
65 
66 	*rec_r = rec;
67 	return 0;
68 }
69 
mail_cache_lookup_cur_offset(struct mail_index_view * view,uint32_t seq,uint32_t * reset_id_r)70 uint32_t mail_cache_lookup_cur_offset(struct mail_index_view *view,
71 				      uint32_t seq, uint32_t *reset_id_r)
72 {
73 	struct mail_cache *cache = mail_index_view_get_index(view)->cache;
74 	struct mail_index_map *map;
75 	const void *data;
76 	uint32_t offset;
77 
78 	mail_index_lookup_ext_full(view, seq, cache->ext_id, &map, &data, NULL);
79 	if (data == NULL) {
80 		/* no cache offsets */
81 		return 0;
82 	}
83 	offset = *((const uint32_t *)data);
84 	if (offset == 0)
85 		return 0;
86 
87 	if (!mail_index_ext_get_reset_id(view, map, cache->ext_id, reset_id_r))
88 		i_unreached();
89 	return offset;
90 }
91 
92 static int
mail_cache_lookup_offset(struct mail_cache * cache,struct mail_index_view * view,uint32_t seq,uint32_t * offset_r)93 mail_cache_lookup_offset(struct mail_cache *cache, struct mail_index_view *view,
94 			 uint32_t seq, uint32_t *offset_r)
95 {
96 	uint32_t offset, reset_id, reset_id2;
97 	int ret;
98 
99 	offset = mail_cache_lookup_cur_offset(view, seq, &reset_id);
100 	if (offset == 0)
101 		return 0;
102 
103 	while (cache->hdr->file_seq != reset_id) {
104 		/* reset_it doesn't match - sync the index/cache */
105 		if ((ret = mail_cache_sync_reset_id(cache)) <= 0)
106 			return ret;
107 
108 		/* lookup again after syncing */
109 		offset = mail_cache_lookup_cur_offset(view, seq, &reset_id2);
110 		if (offset == 0)
111 			return 0;
112 		if (cache->hdr->file_seq == reset_id2)
113 			break; /* match - all good */
114 		if (reset_id == reset_id2) {
115 			/* reset_id didn't change after sync. This means it's
116 			   pointing to an old already deleted cache file. */
117 			return 0;
118 		}
119 		/* reset_id changed - try again */
120 		reset_id = reset_id2;
121 	}
122 
123 	*offset_r = offset;
124 	return 1;
125 }
126 
mail_cache_track_loops(struct mail_cache_loop_track * loop_track,uoff_t offset,uoff_t size)127 bool mail_cache_track_loops(struct mail_cache_loop_track *loop_track,
128 			    uoff_t offset, uoff_t size)
129 {
130 	i_assert(offset != 0);
131 	i_assert(size != 0);
132 
133 	/* looping happens only in rare error conditions, so it's enough if we
134 	   just catch it eventually. we do this by checking if we've seen
135 	   more record data than possible in the accessed file area. */
136 	if (loop_track->size_sum == 0) {
137 		/* first call */
138 		loop_track->min_offset = offset;
139 		loop_track->max_offset = offset + size;
140 	} else {
141 		if (loop_track->min_offset > offset)
142 			loop_track->min_offset = offset;
143 		if (loop_track->max_offset < offset + size)
144 			loop_track->max_offset = offset + size;
145 	}
146 
147 	loop_track->size_sum += size;
148 	return loop_track->size_sum >
149 		(loop_track->max_offset - loop_track->min_offset);
150 }
151 
mail_cache_lookup_iter_init(struct mail_cache_view * view,uint32_t seq,struct mail_cache_lookup_iterate_ctx * ctx_r)152 void mail_cache_lookup_iter_init(struct mail_cache_view *view, uint32_t seq,
153 				 struct mail_cache_lookup_iterate_ctx *ctx_r)
154 {
155 	struct mail_cache_lookup_iterate_ctx *ctx = ctx_r;
156 	int ret;
157 
158 	if (!view->cache->opened)
159 		(void)mail_cache_open_and_verify(view->cache);
160 
161 	i_zero(ctx);
162 	ctx->view = view;
163 	ctx->seq = seq;
164 
165 	if (!MAIL_CACHE_IS_UNUSABLE(view->cache)) {
166 		/* look up the first offset */
167 		ret = mail_cache_lookup_offset(view->cache, view->view, seq,
168 					       &ctx->offset);
169 		if (ret <= 0) {
170 			ctx->stop = TRUE;
171 			ctx->failed = ret < 0;
172 		}
173 	}
174 	ctx->remap_counter = view->cache->remap_counter;
175 
176 	i_zero(&view->loop_track);
177 }
178 
179 static bool
mail_cache_lookup_iter_transaction(struct mail_cache_lookup_iterate_ctx * ctx)180 mail_cache_lookup_iter_transaction(struct mail_cache_lookup_iterate_ctx *ctx)
181 {
182 	ctx->rec = mail_cache_transaction_lookup_rec(ctx->view->transaction,
183 						     ctx->seq,
184 						     &ctx->trans_next_idx);
185 	if (ctx->rec == NULL)
186 		return FALSE;
187 
188 	ctx->inmemory_field_idx = TRUE;
189 	ctx->remap_counter = ctx->view->cache->remap_counter;
190 	ctx->pos = sizeof(*ctx->rec);
191 	ctx->rec_size = ctx->rec->size;
192 	return TRUE;
193 }
194 
195 static int
mail_cache_lookup_iter_next_record(struct mail_cache_lookup_iterate_ctx * ctx)196 mail_cache_lookup_iter_next_record(struct mail_cache_lookup_iterate_ctx *ctx)
197 {
198 	struct mail_cache_view *view = ctx->view;
199 
200 	if (ctx->failed)
201 		return -1;
202 
203 	if (ctx->rec != NULL)
204 		ctx->offset = ctx->rec->prev_offset;
205 	if (ctx->offset == 0) {
206 		/* end of this record list. check newly appended data. */
207 		if (view->trans_seq1 > ctx->seq ||
208 		    view->trans_seq2 < ctx->seq)
209 			return 0;
210 		/* check data still in memory. this works for recent mails
211 		   even with INDEX=MEMORY */
212 		if (!ctx->memory_appends_checked) {
213 			if (mail_cache_lookup_iter_transaction(ctx))
214 				return 1;
215 			ctx->memory_appends_checked = TRUE;
216 		}
217 		if (MAIL_CACHE_IS_UNUSABLE(view->cache) || ctx->stop)
218 			return 0;
219 
220 		/* check data already written to cache file */
221 		if (ctx->disk_appends_checked ||
222 		    mail_cache_lookup_offset(view->cache, view->trans_view,
223 					     ctx->seq, &ctx->offset) <= 0)
224 			return 0;
225 
226 		ctx->disk_appends_checked = TRUE;
227 		ctx->remap_counter = view->cache->remap_counter;
228 		i_zero(&view->loop_track);
229 	}
230 
231 	if (ctx->stop)
232 		return 0;
233 
234 	/* look up the next record */
235 	if (mail_cache_get_record(view->cache, ctx->offset, &ctx->rec) < 0)
236 		return -1;
237 	if (mail_cache_track_loops(&view->loop_track, ctx->offset,
238 				   ctx->rec->size)) {
239 		mail_cache_set_corrupted(view->cache,
240 					 "record list is circular");
241 		return -1;
242 	}
243 	ctx->inmemory_field_idx = FALSE;
244 	ctx->remap_counter = view->cache->remap_counter;
245 
246 	ctx->pos = sizeof(*ctx->rec);
247 	ctx->rec_size = ctx->rec->size;
248 	return 1;
249 }
250 
251 static int
mail_cache_lookup_rec_get_field(struct mail_cache_lookup_iterate_ctx * ctx,unsigned int * field_idx_r)252 mail_cache_lookup_rec_get_field(struct mail_cache_lookup_iterate_ctx *ctx,
253 				unsigned int *field_idx_r)
254 {
255 	struct mail_cache *cache = ctx->view->cache;
256 	uint32_t file_field;
257 
258 	file_field = *((const uint32_t *)CONST_PTR_OFFSET(ctx->rec, ctx->pos));
259 	if (ctx->inmemory_field_idx) {
260 		*field_idx_r = file_field;
261 		return 0;
262 	}
263 
264 	if (file_field >= cache->file_fields_count) {
265 		/* new field, have to re-read fields header to figure
266 		   out its size. don't do this if we're purging. */
267 		if (!cache->locked) {
268 			if (mail_cache_header_fields_read(cache) < 0)
269 				return -1;
270 		}
271 		if (file_field >= cache->file_fields_count) {
272 			mail_cache_set_corrupted(cache,
273 				"field index too large (%u >= %u)",
274 				file_field, cache->file_fields_count);
275 			return -1;
276 		}
277 
278 		/* field reading might have re-mmaped the file and
279 		   caused rec pointer to break. need to get it again. */
280 		if (mail_cache_get_record(cache, ctx->offset, &ctx->rec) < 0)
281 			return -1;
282 		ctx->remap_counter = cache->remap_counter;
283 	}
284 
285 	*field_idx_r = cache->file_field_map[file_field];
286 	return 0;
287 }
288 
mail_cache_lookup_iter_next(struct mail_cache_lookup_iterate_ctx * ctx,struct mail_cache_iterate_field * field_r)289 int mail_cache_lookup_iter_next(struct mail_cache_lookup_iterate_ctx *ctx,
290 				struct mail_cache_iterate_field *field_r)
291 {
292 	struct mail_cache *cache = ctx->view->cache;
293 	unsigned int field_idx;
294 	unsigned int data_size;
295 	int ret;
296 
297 	i_assert(ctx->remap_counter == cache->remap_counter);
298 
299 	if (ctx->pos + sizeof(uint32_t) > ctx->rec_size) {
300 		if (ctx->pos != ctx->rec_size) {
301 			mail_cache_set_corrupted(cache,
302 				"record has invalid size");
303 			return -1;
304 		}
305 
306 		if ((ret = mail_cache_lookup_iter_next_record(ctx)) <= 0)
307 			return ret;
308 	}
309 
310 	/* return the next field */
311 	if (mail_cache_lookup_rec_get_field(ctx, &field_idx) < 0)
312 		return -1;
313 	ctx->pos += sizeof(uint32_t);
314 
315 	data_size = cache->fields[field_idx].field.field_size;
316 	if (data_size == UINT_MAX &&
317 	    ctx->pos + sizeof(uint32_t) <= ctx->rec->size) {
318 		/* variable size field. get its size from the file. */
319 		data_size = *((const uint32_t *)
320 			      CONST_PTR_OFFSET(ctx->rec, ctx->pos));
321 		ctx->pos += sizeof(uint32_t);
322 	}
323 
324 	if (ctx->rec->size - ctx->pos < data_size) {
325 		mail_cache_set_corrupted(cache,
326 			"record continues outside its allocated size");
327 		return -1;
328 	}
329 
330 	field_r->field_idx = field_idx;
331 	field_r->data = CONST_PTR_OFFSET(ctx->rec, ctx->pos);
332 	field_r->size = data_size;
333 	field_r->offset = ctx->offset + ctx->pos;
334 
335 	/* each record begins from 32bit aligned position */
336 	ctx->pos += (data_size + sizeof(uint32_t)-1) & ~(sizeof(uint32_t)-1);
337 	return 1;
338 }
339 
mail_cache_seq(struct mail_cache_view * view,uint32_t seq)340 static int mail_cache_seq(struct mail_cache_view *view, uint32_t seq)
341 {
342 	struct mail_cache_lookup_iterate_ctx iter;
343 	struct mail_cache_iterate_field field;
344 	int ret;
345 
346 	view->cached_exists_value = (view->cached_exists_value + 1) & UINT8_MAX;
347 	if (view->cached_exists_value == 0) {
348 		/* wrapped, we'll have to clear the buffer */
349 		buffer_set_used_size(view->cached_exists_buf, 0);
350 		view->cached_exists_value++;
351 	}
352 	view->cached_exists_seq = seq;
353 
354 	mail_cache_lookup_iter_init(view, seq, &iter);
355 	while ((ret = mail_cache_lookup_iter_next(&iter, &field)) > 0) {
356 		buffer_write(view->cached_exists_buf, field.field_idx,
357 			     &view->cached_exists_value, 1);
358 	}
359 	return ret;
360 }
361 
mail_cache_field_exists(struct mail_cache_view * view,uint32_t seq,unsigned int field)362 int mail_cache_field_exists(struct mail_cache_view *view, uint32_t seq,
363 			    unsigned int field)
364 {
365 	const uint8_t *data;
366 
367 	i_assert(seq > 0);
368 
369 	/* NOTE: view might point to a non-committed transaction that has
370 	   fields that don't yet exist in the cache file. So don't add any
371 	   fast-paths checking whether the field exists in the file. */
372 
373 	/* FIXME: we should discard the cache if view has been synced */
374 	if (view->cached_exists_seq != seq) {
375 		if (mail_cache_seq(view, seq) < 0)
376 			return -1;
377 	}
378 
379 	data = view->cached_exists_buf->data;
380 	return (field < view->cached_exists_buf->used &&
381 		data[field] == view->cached_exists_value) ? 1 : 0;
382 }
383 
mail_cache_field_exists_any(struct mail_cache_view * view,uint32_t seq)384 bool mail_cache_field_exists_any(struct mail_cache_view *view, uint32_t seq)
385 {
386 	uint32_t reset_id;
387 
388 	return mail_cache_lookup_cur_offset(view->view, seq, &reset_id) != 0;
389 }
390 
391 enum mail_cache_decision_type
mail_cache_field_get_decision(struct mail_cache * cache,unsigned int field_idx)392 mail_cache_field_get_decision(struct mail_cache *cache, unsigned int field_idx)
393 {
394 	i_assert(field_idx < cache->fields_count);
395 
396 	return cache->fields[field_idx].field.decision;
397 }
398 
399 static int
mail_cache_lookup_bitmask(struct mail_cache_lookup_iterate_ctx * iter,unsigned int field_idx,unsigned int field_size,buffer_t * dest_buf)400 mail_cache_lookup_bitmask(struct mail_cache_lookup_iterate_ctx *iter,
401 			  unsigned int field_idx, unsigned int field_size,
402 			  buffer_t *dest_buf)
403 {
404 	struct mail_cache_iterate_field field;
405 	const unsigned char *src;
406 	unsigned char *dest;
407 	unsigned int i;
408 	bool found = FALSE;
409 	int ret;
410 
411 	/* make sure all bits are cleared first */
412 	buffer_write_zero(dest_buf, 0, field_size);
413 
414 	while ((ret = mail_cache_lookup_iter_next(iter, &field)) > 0) {
415 		if (field.field_idx != field_idx)
416 			continue;
417 
418 		/* merge all bits */
419 		src = field.data;
420 		dest = buffer_get_space_unsafe(dest_buf, 0, field.size);
421 		for (i = 0; i < field.size; i++)
422 			dest[i] |= src[i];
423 		found = TRUE;
424 	}
425 	return ret < 0 ? -1 : (found ? 1 : 0);
426 }
427 
mail_cache_lookup_field(struct mail_cache_view * view,buffer_t * dest_buf,uint32_t seq,unsigned int field_idx)428 int mail_cache_lookup_field(struct mail_cache_view *view, buffer_t *dest_buf,
429 			    uint32_t seq, unsigned int field_idx)
430 {
431 	const struct mail_cache_field *field_def;
432 	struct mail_cache_lookup_iterate_ctx iter;
433 	struct mail_cache_iterate_field field;
434 	int ret;
435 	struct event *lookup_event;
436 
437 	ret = mail_cache_field_exists(view, seq, field_idx);
438 	mail_cache_decision_state_update(view, seq, field_idx);
439 	if (ret <= 0)
440 		return ret;
441 
442 	lookup_event = mail_cache_lookup_event(view, seq);
443 
444 	/* the field should exist */
445 	mail_cache_lookup_iter_init(view, seq, &iter);
446 	field_def = &view->cache->fields[field_idx].field;
447 	event_add_str(lookup_event, "field", field_def->name);
448 	if (field_def->type == MAIL_CACHE_FIELD_BITMASK) {
449 		ret = mail_cache_lookup_bitmask(&iter, field_idx,
450 						field_def->field_size,
451 						dest_buf);
452 	} else {
453 		/* return the first one that's found. if there are multiple
454 		   they're all identical. */
455 		while ((ret = mail_cache_lookup_iter_next(&iter, &field)) > 0) {
456 			if (field.field_idx == field_idx) {
457 				buffer_append(dest_buf, field.data, field.size);
458 				break;
459 			}
460 		}
461 	}
462 	e_debug(lookup_event, "Looked up field %s from mail cache", field_def->name);
463 	event_unref(&lookup_event);
464 	return ret;
465 }
466 
467 struct header_lookup_data {
468 	uint32_t data_size;
469 	const unsigned char *data;
470 };
471 
472 struct header_lookup_line {
473 	uint32_t line_num;
474 	struct header_lookup_data *data;
475 };
476 
477 struct header_lookup_context {
478 	struct mail_cache_view *view;
479 	pool_t pool;
480 	ARRAY(struct header_lookup_line) lines;
481 };
482 
483 enum {
484 	HDR_FIELD_STATE_DONTWANT = 0,
485 	HDR_FIELD_STATE_WANT,
486 	HDR_FIELD_STATE_SEEN
487 };
488 
header_lines_save(struct header_lookup_context * ctx,const struct mail_cache_iterate_field * field)489 static void header_lines_save(struct header_lookup_context *ctx,
490 			      const struct mail_cache_iterate_field *field)
491 {
492 	const uint32_t *lines = field->data;
493 	uint32_t data_size = field->size;
494 	struct header_lookup_line hdr_line;
495         struct header_lookup_data *hdr_data;
496 	void *data_dup;
497 	unsigned int i, lines_count, pos;
498 
499 	/* data = { line_nums[], 0, "headers" } */
500 	for (i = 0; data_size >= sizeof(uint32_t); i++) {
501 		data_size -= sizeof(uint32_t);
502 		if (lines[i] == 0)
503 			break;
504 	}
505 	lines_count = i;
506 	pos = (lines_count+1) * sizeof(uint32_t);
507 
508 	hdr_data = p_new(ctx->pool, struct header_lookup_data, 1);
509 	hdr_data->data_size = data_size;
510 	if (data_size > 0) {
511 		hdr_data->data = data_dup =
512 			p_malloc(ctx->pool, data_size);
513 		memcpy(data_dup, CONST_PTR_OFFSET(field->data, pos), data_size);
514 	}
515 
516 	for (i = 0; i < lines_count; i++) {
517 		hdr_line.line_num = lines[i];
518 		hdr_line.data = hdr_data;
519 		array_push_back(&ctx->lines, &hdr_line);
520 	}
521 }
522 
header_lookup_line_cmp(const struct header_lookup_line * l1,const struct header_lookup_line * l2)523 static int header_lookup_line_cmp(const struct header_lookup_line *l1,
524 				  const struct header_lookup_line *l2)
525 {
526 	return (int)l1->line_num - (int)l2->line_num;
527 }
528 
529 static int
mail_cache_lookup_headers_real(struct mail_cache_view * view,string_t * dest,uint32_t seq,const unsigned int field_idxs[],unsigned int fields_count,pool_t * pool_r)530 mail_cache_lookup_headers_real(struct mail_cache_view *view, string_t *dest,
531 			       uint32_t seq, const unsigned int field_idxs[],
532 			       unsigned int fields_count, pool_t *pool_r)
533 {
534 	struct mail_cache_lookup_iterate_ctx iter;
535 	struct mail_cache_iterate_field field;
536 	struct header_lookup_context ctx;
537 	struct header_lookup_line *lines;
538 	const unsigned char *p, *start, *end;
539 	uint8_t *field_state;
540 	unsigned int i, count, max_field = 0;
541 	size_t hdr_size;
542 	uint8_t want = HDR_FIELD_STATE_WANT;
543 	buffer_t *buf;
544 	int ret;
545 	struct event *lookup_event;
546 
547 	*pool_r = NULL;
548 
549 	if (fields_count == 0)
550 		return 1;
551 
552 	/* update the decision state regardless of whether the fields
553 	   actually exist or not. */
554 	for (i = 0; i < fields_count; i++)
555 		mail_cache_decision_state_update(view, seq, field_idxs[i]);
556 
557 	/* mark all the fields we want to find. */
558 	buf = t_buffer_create(32);
559 	for (i = 0; i < fields_count; i++) {
560 		if (field_idxs[i] > max_field)
561 			max_field = field_idxs[i];
562 
563 		buffer_write(buf, field_idxs[i], &want, 1);
564 	}
565 	field_state = buffer_get_modifiable_data(buf, NULL);
566 
567 	/* lookup the fields */
568 	i_zero(&ctx);
569 	ctx.view = view;
570 	ctx.pool = *pool_r = pool_alloconly_create(MEMPOOL_GROWING"mail cache headers", 1024);
571 	t_array_init(&ctx.lines, 32);
572 
573 	lookup_event = mail_cache_lookup_event(view, seq);
574 	mail_cache_lookup_iter_init(view, seq, &iter);
575 	while ((ret = mail_cache_lookup_iter_next(&iter, &field)) > 0) {
576 		if (field.field_idx > max_field ||
577 		    field_state[field.field_idx] != HDR_FIELD_STATE_WANT) {
578 			/* a) don't want it, b) duplicate */
579 		} else {
580 			field_state[field.field_idx] = HDR_FIELD_STATE_SEEN;
581 			header_lines_save(&ctx, &field);
582 		}
583 		const char *field_name = view->cache->fields[field.field_idx].field.name;
584 		e_debug(event_create_passthrough(lookup_event)->
585 			add_str("field", field_name)->event(),
586 			"Looked up field %s from mail cache", field_name);
587 	}
588 	event_unref(&lookup_event);
589 	if (ret < 0)
590 		return -1;
591 
592 	/* check that all fields were found */
593 	for (i = 0; i <= max_field; i++) {
594 		if (field_state[i] == HDR_FIELD_STATE_WANT)
595 			return 0;
596 	}
597 
598 	/* we need to return headers in the order they existed originally.
599 	   we can do this by sorting the messages by their line numbers. */
600 	array_sort(&ctx.lines, header_lookup_line_cmp);
601 	lines = array_get_modifiable(&ctx.lines, &count);
602 
603 	/* then start filling dest buffer from the headers */
604 	for (i = 0; i < count; i++) {
605 		start = lines[i].data->data;
606 		end = start + lines[i].data->data_size;
607 
608 		/* find the end of the (multiline) header */
609 		for (p = start; p != end; p++) {
610 			if (*p == '\n' &&
611 			    (p+1 == end || (p[1] != ' ' && p[1] != '\t'))) {
612 				p++;
613 				break;
614 			}
615 		}
616 		hdr_size = (size_t)(p - start);
617 		buffer_append(dest, start, hdr_size);
618 
619 		/* if there are more lines for this header, the following lines
620 		   continue after this one. so skip this line. */
621 		lines[i].data->data += hdr_size;
622 		lines[i].data->data_size -= hdr_size;
623 	}
624 	return 1;
625 }
626 
mail_cache_lookup_headers(struct mail_cache_view * view,string_t * dest,uint32_t seq,const unsigned int field_idxs[],unsigned int fields_count)627 int mail_cache_lookup_headers(struct mail_cache_view *view, string_t *dest,
628 			      uint32_t seq, const unsigned int field_idxs[],
629 			      unsigned int fields_count)
630 {
631 	pool_t pool = NULL;
632 	int ret;
633 
634 	if (buffer_get_pool(dest)->datastack_pool)
635 		ret = mail_cache_lookup_headers_real(view, dest, seq,
636 						     field_idxs, fields_count,
637 						     &pool);
638 	else T_BEGIN {
639 		ret = mail_cache_lookup_headers_real(view, dest, seq,
640 						     field_idxs, fields_count,
641 						     &pool);
642 	} T_END;
643 	pool_unref(&pool);
644 	return ret;
645 }
646 
647 static uint32_t
mail_cache_get_highest_seq_with_cache(struct mail_cache_view * view,uint32_t below_seq,uint32_t * reset_id_r)648 mail_cache_get_highest_seq_with_cache(struct mail_cache_view *view,
649 				      uint32_t below_seq, uint32_t *reset_id_r)
650 {
651 	struct mail_cache_missing_reason_cache *rc = &view->reason_cache;
652 	uint32_t seq = below_seq-1, highest_checked_seq = 0;
653 
654 	/* find the newest mail that has anything in cache */
655 	if (rc->log_file_head_offset == view->view->log_file_head_offset &&
656 	    rc->log_file_head_seq == view->view->log_file_head_seq) {
657 		/* reason_cache matches the current view - we can use it */
658 		highest_checked_seq = rc->highest_checked_seq;
659 	} else {
660 		rc->log_file_head_offset = view->view->log_file_head_offset;
661 		rc->log_file_head_seq = view->view->log_file_head_seq;
662 	}
663 	rc->highest_checked_seq = below_seq;
664 
665 	/* first check anything not already in reason_cache */
666 	for (; seq > highest_checked_seq; seq--) {
667 		if (mail_cache_lookup_cur_offset(view->view, seq, reset_id_r) != 0) {
668 			rc->highest_seq_with_cache = seq;
669 			rc->reset_id = *reset_id_r;
670 			return seq;
671 		}
672 	}
673 	if (seq == 0)
674 		return 0;
675 	/* then return the result from cache */
676 	*reset_id_r = rc->reset_id;
677 	return rc->highest_seq_with_cache;
678 }
679 
680 const char *
mail_cache_get_missing_reason(struct mail_cache_view * view,uint32_t seq)681 mail_cache_get_missing_reason(struct mail_cache_view *view, uint32_t seq)
682 {
683 	uint32_t offset, reset_id;
684 
685 	if (mail_index_is_expunged(view->view, seq))
686 		return "Mail is already expunged";
687 
688 	if (MAIL_CACHE_IS_UNUSABLE(view->cache))
689 		return "Cache file is unusable";
690 
691 	offset = mail_cache_lookup_cur_offset(view->view, seq, &reset_id);
692 	if (offset != 0) {
693 		if (view->cache->hdr->file_seq != reset_id) {
694 			return t_strdup_printf(
695 				"Index reset_id=%u doesn't match cache reset_id=%u",
696 				reset_id, view->cache->hdr->file_seq);
697 		}
698 		return t_strdup_printf(
699 			"Mail has other cached fields, reset_id=%u", reset_id);
700 	}
701 	seq = mail_cache_get_highest_seq_with_cache(view, seq, &reset_id);
702 	if (seq == 0) {
703 		return t_strdup_printf("Cache file is empty, reset_id=%u",
704 				       view->cache->hdr->file_seq);
705 	}
706 
707 	uint32_t uid;
708 	mail_index_lookup_uid(view->view, seq, &uid);
709 
710 	if (view->cache->hdr->file_seq != reset_id) {
711 		return t_strdup_printf(
712 			"Mail not cached, highest cached seq=%u uid=%u: "
713 			"Index reset_id=%u doesn't match cache reset_id=%u",
714 			seq, uid, reset_id, view->cache->hdr->file_seq);
715 	}
716 	return t_strdup_printf(
717 		"Mail not cached, highest cached seq=%u uid=%u: reset_id=%u",
718 		seq, uid, reset_id);
719 }
720