1 /* Copyright (c) 2004-2018 Dovecot authors, see the included COPYING file */
2 
3 #include "lib.h"
4 #include "ioloop.h"
5 #include "array.h"
6 #include "mmap-util.h"
7 #include "mail-index-modseq.h"
8 #include "mail-index-view-private.h"
9 #include "mail-index-sync-private.h"
10 #include "mail-transaction-log.h"
11 #include "mail-transaction-log-private.h"
12 
13 /* If we have less than this many bytes to sync from log file, don't bother
14    reading the main index */
15 #define MAIL_INDEX_SYNC_MIN_READ_INDEX_SIZE 2048
16 
17 static void
mail_index_sync_update_log_offset(struct mail_index_sync_map_ctx * ctx,struct mail_index_map * map,bool eol)18 mail_index_sync_update_log_offset(struct mail_index_sync_map_ctx *ctx,
19 				  struct mail_index_map *map, bool eol)
20 {
21 	uint32_t prev_seq;
22 	uoff_t prev_offset;
23 
24 	mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
25 					       &prev_seq, &prev_offset);
26 	if (prev_seq == 0) {
27 		/* handling lost changes in view syncing */
28 		return;
29 	}
30 
31 	if (!eol) {
32 		if (prev_offset == ctx->ext_intro_end_offset &&
33 		    prev_seq == ctx->ext_intro_seq) {
34 			/* previous transaction was an extension introduction.
35 			   we probably came here from
36 			   mail_index_sync_ext_reset(). if there are any more
37 			   views which want to continue syncing it needs the
38 			   intro. so back up a bit more.
39 
40 			   don't do this in case the last transaction in the
41 			   log is the extension intro, so we don't keep trying
42 			   to sync it over and over again. */
43 			prev_offset = ctx->ext_intro_offset;
44 		}
45 		map->hdr.log_file_seq = prev_seq;
46 	} else {
47 		i_assert(ctx->view->index->log->head->hdr.file_seq == prev_seq);
48 		if (map->hdr.log_file_seq != prev_seq) {
49 			map->hdr.log_file_seq = prev_seq;
50 			map->hdr.log_file_tail_offset = 0;
51 		}
52 	}
53 	map->hdr.log_file_head_offset = prev_offset;
54 }
55 
mail_index_sync_replace_map(struct mail_index_sync_map_ctx * ctx,struct mail_index_map * map)56 static void mail_index_sync_replace_map(struct mail_index_sync_map_ctx *ctx,
57 					struct mail_index_map *map)
58 {
59         struct mail_index_view *view = ctx->view;
60 
61 	i_assert(view->map != map);
62 
63 	mail_index_sync_update_log_offset(ctx, view->map, FALSE);
64 	mail_index_unmap(&view->map);
65 	view->map = map;
66 
67 	if (ctx->type != MAIL_INDEX_SYNC_HANDLER_VIEW)
68 		view->index->map = map;
69 
70 	mail_index_modseq_sync_map_replaced(ctx->modseq_ctx);
71 }
72 
73 static struct mail_index_map *
mail_index_sync_move_to_private_memory(struct mail_index_sync_map_ctx * ctx)74 mail_index_sync_move_to_private_memory(struct mail_index_sync_map_ctx *ctx)
75 {
76 	struct mail_index_map *map = ctx->view->map;
77 
78 	if (map->refcount > 1) {
79 		/* Multiple views point to this map. Make a copy of the map
80 		   (but not rec_map). */
81 		map = mail_index_map_clone(map);
82 		mail_index_sync_replace_map(ctx, map);
83 		i_assert(ctx->view->map == map);
84 	}
85 
86 	if (!MAIL_INDEX_MAP_IS_IN_MEMORY(ctx->view->map)) {
87 		/* map points to mmap()ed area, copy it into memory. */
88 		mail_index_map_move_to_memory(ctx->view->map);
89 		mail_index_modseq_sync_map_replaced(ctx->modseq_ctx);
90 	}
91 	return map;
92 }
93 
94 struct mail_index_map *
mail_index_sync_get_atomic_map(struct mail_index_sync_map_ctx * ctx)95 mail_index_sync_get_atomic_map(struct mail_index_sync_map_ctx *ctx)
96 {
97 	/* First make sure we have a private map with rec_map pointing to
98 	   memory. */
99 	(void)mail_index_sync_move_to_private_memory(ctx);
100 	/* Next make sure the rec_map is also private to us. */
101 	mail_index_record_map_move_to_private(ctx->view->map);
102 	mail_index_modseq_sync_map_replaced(ctx->modseq_ctx);
103 	return ctx->view->map;
104 }
105 
106 static int
mail_index_header_update_counts(struct mail_index_header * hdr,uint8_t old_flags,uint8_t new_flags,const char ** error_r)107 mail_index_header_update_counts(struct mail_index_header *hdr,
108 				uint8_t old_flags, uint8_t new_flags,
109 				const char **error_r)
110 {
111 	if (((old_flags ^ new_flags) & MAIL_SEEN) != 0) {
112 		/* different seen-flag */
113 		if ((old_flags & MAIL_SEEN) != 0) {
114 			if (hdr->seen_messages_count == 0) {
115 				*error_r = "Seen counter wrong";
116 				return -1;
117 			}
118 			hdr->seen_messages_count--;
119 		} else {
120 			if (hdr->seen_messages_count >= hdr->messages_count) {
121 				*error_r = "Seen counter wrong";
122 				return -1;
123 			}
124 
125 			if (++hdr->seen_messages_count == hdr->messages_count)
126 				hdr->first_unseen_uid_lowwater = hdr->next_uid;
127 		}
128 	}
129 
130 	if (((old_flags ^ new_flags) & MAIL_DELETED) != 0) {
131 		/* different deleted-flag */
132 		if ((old_flags & MAIL_DELETED) == 0) {
133 			hdr->deleted_messages_count++;
134 			if (hdr->deleted_messages_count > hdr->messages_count) {
135 				*error_r = "Deleted counter wrong";
136 				return -1;
137 			}
138 		} else {
139 			if (hdr->deleted_messages_count == 0 ||
140 			    hdr->deleted_messages_count > hdr->messages_count) {
141 				*error_r = "Deleted counter wrong";
142 				return -1;
143 			}
144 
145 			if (--hdr->deleted_messages_count == 0)
146 				hdr->first_deleted_uid_lowwater = hdr->next_uid;
147 		}
148 	}
149 	return 0;
150 }
151 
152 static void
mail_index_sync_header_update_counts_all(struct mail_index_sync_map_ctx * ctx,uint32_t uid,uint8_t old_flags,uint8_t new_flags)153 mail_index_sync_header_update_counts_all(struct mail_index_sync_map_ctx *ctx,
154 					 uint32_t uid,
155 					 uint8_t old_flags, uint8_t new_flags)
156 {
157 	struct mail_index_map *const *maps;
158 	const char *error;
159 	unsigned int i, count;
160 
161 	maps = array_get(&ctx->view->map->rec_map->maps, &count);
162 	for (i = 0; i < count; i++) {
163 		if (uid >= maps[i]->hdr.next_uid)
164 			continue;
165 
166 		if (mail_index_header_update_counts(&maps[i]->hdr,
167 						    old_flags, new_flags,
168 						    &error) < 0)
169 			mail_index_sync_set_corrupted(ctx, "%s", error);
170 	}
171 }
172 
173 static void
mail_index_sync_header_update_counts(struct mail_index_sync_map_ctx * ctx,uint32_t uid,uint8_t old_flags,uint8_t new_flags)174 mail_index_sync_header_update_counts(struct mail_index_sync_map_ctx *ctx,
175 				     uint32_t uid, uint8_t old_flags,
176 				     uint8_t new_flags)
177 {
178 	const char *error;
179 
180 	if (uid >= ctx->view->map->hdr.next_uid) {
181 		mail_index_sync_set_corrupted(ctx, "uid %u >= next_uid %u",
182 					      uid, ctx->view->map->hdr.next_uid);
183 	} else {
184 		if (mail_index_header_update_counts(&ctx->view->map->hdr,
185 						    old_flags, new_flags,
186 						    &error) < 0)
187 			mail_index_sync_set_corrupted(ctx, "%s", error);
188 	}
189 }
190 
191 static void
mail_index_header_update_lowwaters(struct mail_index_sync_map_ctx * ctx,uint32_t uid,enum mail_flags flags)192 mail_index_header_update_lowwaters(struct mail_index_sync_map_ctx *ctx,
193 				   uint32_t uid, enum mail_flags flags)
194 {
195 	struct mail_index_map *const *maps;
196 	unsigned int i, count;
197 
198 	maps = array_get(&ctx->view->map->rec_map->maps, &count);
199 	for (i = 0; i < count; i++) {
200 		if ((flags & MAIL_SEEN) == 0 &&
201 		    uid < maps[i]->hdr.first_unseen_uid_lowwater)
202 			maps[i]->hdr.first_unseen_uid_lowwater = uid;
203 		if ((flags & MAIL_DELETED) != 0 &&
204 		    uid < maps[i]->hdr.first_deleted_uid_lowwater)
205 			maps[i]->hdr.first_deleted_uid_lowwater = uid;
206 	}
207 }
208 
209 static void
sync_expunge_call_handlers(struct mail_index_sync_map_ctx * ctx,uint32_t seq1,uint32_t seq2)210 sync_expunge_call_handlers(struct mail_index_sync_map_ctx *ctx,
211 			   uint32_t seq1, uint32_t seq2)
212 {
213 	const struct mail_index_expunge_handler *eh;
214 	struct mail_index_record *rec;
215 	uint32_t seq;
216 
217 	array_foreach(&ctx->expunge_handlers, eh) {
218 		for (seq = seq1; seq <= seq2; seq++) {
219 			rec = MAIL_INDEX_REC_AT_SEQ(ctx->view->map, seq);
220 			eh->handler(ctx, PTR_OFFSET(rec, eh->record_offset),
221 				    eh->sync_context);
222 		}
223 	}
224 }
225 
226 static bool
sync_expunge_handlers_init(struct mail_index_sync_map_ctx * ctx)227 sync_expunge_handlers_init(struct mail_index_sync_map_ctx *ctx)
228 {
229 	/* call expunge handlers only when syncing index file */
230 	if (ctx->type != MAIL_INDEX_SYNC_HANDLER_FILE)
231 		return FALSE;
232 
233 	if (!ctx->expunge_handlers_set)
234 		mail_index_sync_init_expunge_handlers(ctx);
235 
236 	if (!array_is_created(&ctx->expunge_handlers))
237 		return FALSE;
238 	return TRUE;
239 }
240 
241 static void
sync_expunge_range(struct mail_index_sync_map_ctx * ctx,const ARRAY_TYPE (seq_range)* seqs)242 sync_expunge_range(struct mail_index_sync_map_ctx *ctx, const ARRAY_TYPE(seq_range) *seqs)
243 {
244 	struct mail_index_map *map;
245 	const struct seq_range *range;
246 	unsigned int i, count;
247 	uint32_t dest_seq1, prev_seq2, orig_rec_count;
248 
249 	range = array_get(seqs, &count);
250 	if (count == 0)
251 		return;
252 
253 	/* Get a private in-memory rec_map, which we can modify. */
254 	map = mail_index_sync_get_atomic_map(ctx);
255 
256 	/* call the expunge handlers first */
257 	if (sync_expunge_handlers_init(ctx)) {
258 		for (i = 0; i < count; i++) {
259 			sync_expunge_call_handlers(ctx,
260 				range[i].seq1, range[i].seq2);
261 		}
262 	}
263 
264 	prev_seq2 = 0;
265 	dest_seq1 = 1;
266 	orig_rec_count = map->rec_map->records_count;
267 	for (i = 0; i < count; i++) {
268 		uint32_t seq1 = range[i].seq1;
269 		uint32_t seq2 = range[i].seq2;
270 		struct mail_index_record *rec;
271 		uint32_t seq_count, seq;
272 
273 		i_assert(seq1 > prev_seq2);
274 
275 		for (seq = seq1; seq <= seq2; seq++) {
276 			rec = MAIL_INDEX_REC_AT_SEQ(map, seq);
277 			mail_index_sync_header_update_counts(ctx, rec->uid, rec->flags, 0);
278 		}
279 
280 		if (prev_seq2+1 <= seq1-1) {
281 			/* @UNSAFE: move (prev_seq2+1) .. (seq1-1) to its
282 			   final location in the map if necessary */
283 			uint32_t move_count = (seq1-1) - (prev_seq2+1) + 1;
284 			if (prev_seq2+1-1 != dest_seq1-1)
285 				memmove(MAIL_INDEX_REC_AT_SEQ(map, dest_seq1),
286 					MAIL_INDEX_REC_AT_SEQ(map, prev_seq2+1),
287 					move_count * map->hdr.record_size);
288 			dest_seq1 += move_count;
289 		}
290 		seq_count = seq2 - seq1 + 1;
291 		map->rec_map->records_count -= seq_count;
292 		map->hdr.messages_count -= seq_count;
293 		mail_index_modseq_expunge(ctx->modseq_ctx, seq1, seq2);
294 		prev_seq2 = seq2;
295 	}
296 	/* Final stragglers */
297 	if (orig_rec_count > prev_seq2) {
298 		uint32_t final_move_count = orig_rec_count - prev_seq2;
299 		memmove(MAIL_INDEX_REC_AT_SEQ(map, dest_seq1),
300 			MAIL_INDEX_REC_AT_SEQ(map, prev_seq2+1),
301 			final_move_count * map->hdr.record_size);
302 	}
303 }
304 
sync_append_record(struct mail_index_map * map)305 static void *sync_append_record(struct mail_index_map *map)
306 {
307 	size_t append_pos;
308 	void *ret;
309 
310 	append_pos = map->rec_map->records_count * map->hdr.record_size;
311 	ret = buffer_get_space_unsafe(map->rec_map->buffer, append_pos,
312 				      map->hdr.record_size);
313 	map->rec_map->records =
314 		buffer_get_modifiable_data(map->rec_map->buffer, NULL);
315 	return ret;
316 }
317 
sync_update_ignored_change(struct mail_index_sync_map_ctx * ctx)318 static bool sync_update_ignored_change(struct mail_index_sync_map_ctx *ctx)
319 {
320 	struct mail_index_transaction_commit_result *result =
321 		ctx->view->index->sync_commit_result;
322 	uint32_t prev_log_seq;
323 	uoff_t prev_log_offset, trans_start_offset, trans_end_offset;
324 
325 	if (result == NULL)
326 		return FALSE;
327 
328 	/* we'll return TRUE if this modseq change was written within the
329 	   transaction that was just committed */
330 	mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
331 					       &prev_log_seq, &prev_log_offset);
332 	if (prev_log_seq != result->log_file_seq)
333 		return FALSE;
334 
335 	trans_end_offset = result->log_file_offset;
336 	trans_start_offset = trans_end_offset - result->commit_size;
337 	if (prev_log_offset < trans_start_offset ||
338 	    prev_log_offset >= trans_end_offset)
339 		return FALSE;
340 
341 	return TRUE;
342 }
343 
344 static int
sync_modseq_update(struct mail_index_sync_map_ctx * ctx,const struct mail_transaction_modseq_update * u,unsigned int size)345 sync_modseq_update(struct mail_index_sync_map_ctx *ctx,
346 		   const struct mail_transaction_modseq_update *u,
347 		   unsigned int size)
348 {
349 	struct mail_index_view *view = ctx->view;
350 	const struct mail_transaction_modseq_update *end;
351 	uint32_t seq;
352 	uint64_t min_modseq;
353 	int ret;
354 
355 	end = CONST_PTR_OFFSET(u, size);
356 	for (; u < end; u++) {
357 		if (u->uid == 0)
358 			seq = 0;
359 		else if (!mail_index_lookup_seq(view, u->uid, &seq))
360 			continue;
361 
362 		min_modseq = ((uint64_t)u->modseq_high32 << 32) |
363 			u->modseq_low32;
364 
365 		ret = seq == 0 ? 1 :
366 			mail_index_modseq_set(view, seq, min_modseq);
367 		if (ret < 0) {
368 			mail_index_sync_set_corrupted(ctx,
369 				"modseqs updated before they were enabled");
370 			return -1;
371 		}
372 		if (ret == 0 && sync_update_ignored_change(ctx))
373 			view->index->sync_commit_result->ignored_modseq_changes++;
374 	}
375 	return 1;
376 }
377 
sync_append(const struct mail_index_record * rec,struct mail_index_sync_map_ctx * ctx)378 static int sync_append(const struct mail_index_record *rec,
379 		       struct mail_index_sync_map_ctx *ctx)
380 {
381 	struct mail_index_view *view = ctx->view;
382 	struct mail_index_map *map = view->map;
383 	const struct mail_index_record *old_rec;
384 	enum mail_flags new_flags;
385 	void *dest;
386 
387 	if (rec->uid < map->hdr.next_uid) {
388 		mail_index_sync_set_corrupted(ctx,
389 			"Append with UID %u, but next_uid = %u",
390 			rec->uid, map->hdr.next_uid);
391 		return -1;
392 	}
393 
394 	/* We'll need to append a new record. If map currently points to
395 	   mmap()ed index, it first needs to be moved to memory since we can't
396 	   write past the mmap()ed memory area. */
397 	map = mail_index_sync_move_to_private_memory(ctx);
398 
399 	if (rec->uid <= map->rec_map->last_appended_uid) {
400 		i_assert(map->hdr.messages_count < map->rec_map->records_count);
401 		/* the flags may have changed since it was added to map.
402 		   use the updated flags already, so flag counters won't get
403 		   broken. */
404 		old_rec = MAIL_INDEX_MAP_IDX(map, map->hdr.messages_count);
405 		i_assert(old_rec->uid == rec->uid);
406 		new_flags = old_rec->flags;
407 	} else {
408 		/* don't rely on buffer->used being at the correct position.
409 		   at least expunges can move it */
410 		dest = sync_append_record(map);
411 		memcpy(dest, rec, sizeof(*rec));
412 		memset(PTR_OFFSET(dest, sizeof(*rec)), 0,
413 		       map->hdr.record_size - sizeof(*rec));
414 		map->rec_map->records_count++;
415 		map->rec_map->last_appended_uid = rec->uid;
416 		new_flags = rec->flags;
417 
418 		mail_index_modseq_append(ctx->modseq_ctx,
419 					 map->rec_map->records_count);
420 	}
421 
422 	map->hdr.messages_count++;
423 	map->hdr.next_uid = rec->uid+1;
424 
425 	if ((new_flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0 &&
426 	    (view->index->flags & MAIL_INDEX_OPEN_FLAG_NO_DIRTY) == 0)
427 		map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
428 
429 	mail_index_header_update_lowwaters(ctx, rec->uid, new_flags);
430 	mail_index_sync_header_update_counts(ctx, rec->uid, 0, new_flags);
431 	return 1;
432 }
433 
sync_flag_update(const struct mail_transaction_flag_update * u,struct mail_index_sync_map_ctx * ctx)434 static int sync_flag_update(const struct mail_transaction_flag_update *u,
435 			    struct mail_index_sync_map_ctx *ctx)
436 {
437 	struct mail_index_view *view = ctx->view;
438 	struct mail_index_record *rec;
439 	uint8_t flag_mask, old_flags;
440 	uint32_t seq, seq1, seq2;
441 
442 	if (!mail_index_lookup_seq_range(view, u->uid1, u->uid2, &seq1, &seq2))
443 		return 1;
444 
445 	if (!MAIL_TRANSACTION_FLAG_UPDATE_IS_INTERNAL(u)) {
446 		mail_index_modseq_update_flags(ctx->modseq_ctx,
447 					       u->add_flags | u->remove_flags,
448 					       seq1, seq2);
449 	}
450 
451 	if ((u->add_flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0 &&
452 	    (view->index->flags & MAIL_INDEX_OPEN_FLAG_NO_DIRTY) == 0)
453 		view->map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
454 
455         flag_mask = (unsigned char)~u->remove_flags;
456 
457 	if (((u->add_flags | u->remove_flags) &
458 	     (MAIL_SEEN | MAIL_DELETED)) == 0) {
459 		/* we're not modifying any counted/lowwatered flags */
460 		for (seq = seq1; seq <= seq2; seq++) {
461 			rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
462 			rec->flags = (rec->flags & flag_mask) | u->add_flags;
463 		}
464 	} else {
465 		for (seq = seq1; seq <= seq2; seq++) {
466 			rec = MAIL_INDEX_REC_AT_SEQ(view->map, seq);
467 
468 			old_flags = rec->flags;
469 			rec->flags = (rec->flags & flag_mask) | u->add_flags;
470 
471 			mail_index_header_update_lowwaters(ctx, rec->uid,
472 							   rec->flags);
473 			mail_index_sync_header_update_counts_all(ctx, rec->uid,
474 								 old_flags,
475 								 rec->flags);
476 		}
477 	}
478 	return 1;
479 }
480 
sync_header_update(const struct mail_transaction_header_update * u,struct mail_index_sync_map_ctx * ctx)481 static int sync_header_update(const struct mail_transaction_header_update *u,
482 			      struct mail_index_sync_map_ctx *ctx)
483 {
484 #define MAIL_INDEX_HEADER_UPDATE_FIELD_IN_RANGE(u, field) \
485 	((u)->offset <= offsetof(struct mail_index_header, field) && \
486 	 (u)->offset + (u)->size > offsetof(struct mail_index_header, field))
487 	struct mail_index_map *map = ctx->view->map;
488 	uint32_t orig_log_file_tail_offset = map->hdr.log_file_tail_offset;
489 	uint32_t orig_next_uid = map->hdr.next_uid;
490 
491 	if (u->offset >= map->hdr.base_header_size ||
492 	    u->offset + u->size > map->hdr.base_header_size) {
493 		mail_index_sync_set_corrupted(ctx,
494 			"Header update outside range: %u + %u > %u",
495 			u->offset, u->size, map->hdr.base_header_size);
496 		return -1;
497 	}
498 
499 	buffer_write(map->hdr_copy_buf, u->offset, u + 1, u->size);
500 	i_assert(map->hdr_copy_buf->used == map->hdr.header_size);
501 
502 	/* @UNSAFE */
503 	if ((uint32_t)(u->offset + u->size) <= sizeof(map->hdr)) {
504 		memcpy(PTR_OFFSET(&map->hdr, u->offset),
505 		       u + 1, u->size);
506 	} else if (u->offset < sizeof(map->hdr)) {
507 		memcpy(PTR_OFFSET(&map->hdr, u->offset),
508 		       u + 1, sizeof(map->hdr) - u->offset);
509 	}
510 
511 	if (map->hdr.next_uid < orig_next_uid) {
512 		/* next_uid update tried to shrink its value. this can happen
513 		   in some race conditions with e.g. with dsync, so just
514 		   silently ignore it. */
515 		map->hdr.next_uid = orig_next_uid;
516 	}
517 
518 	/* the tail offset updates are intended for internal transaction
519 	   log handling. we'll update the offset in the header only when
520 	   the sync is finished. */
521 	map->hdr.log_file_tail_offset = orig_log_file_tail_offset;
522 	return 1;
523 }
524 
525 static int
mail_index_sync_record_real(struct mail_index_sync_map_ctx * ctx,const struct mail_transaction_header * hdr,const void * data)526 mail_index_sync_record_real(struct mail_index_sync_map_ctx *ctx,
527 			    const struct mail_transaction_header *hdr,
528 			    const void *data)
529 {
530 	int ret = 0;
531 
532 	switch (hdr->type & MAIL_TRANSACTION_TYPE_MASK) {
533 	case MAIL_TRANSACTION_APPEND: {
534 		const struct mail_index_record *rec, *end;
535 
536 		end = CONST_PTR_OFFSET(data, hdr->size);
537 		for (rec = data; rec < end; rec++) {
538 			ret = sync_append(rec, ctx);
539 			if (ret <= 0)
540 				break;
541 		}
542 		break;
543 	}
544 	case MAIL_TRANSACTION_EXPUNGE:
545 	case MAIL_TRANSACTION_EXPUNGE|MAIL_TRANSACTION_EXPUNGE_PROT: {
546 		const struct mail_transaction_expunge *rec = data, *end;
547 		ARRAY_TYPE(seq_range) seqs;
548 		uint32_t seq1, seq2;
549 
550 		if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
551 			/* this is simply a request for expunge */
552 			break;
553 		}
554 		t_array_init(&seqs, 64);
555 		end = CONST_PTR_OFFSET(data, hdr->size);
556 		for (; rec != end; rec++) {
557 			if (mail_index_lookup_seq_range(ctx->view,
558 					rec->uid1, rec->uid2, &seq1, &seq2))
559 				seq_range_array_add_range(&seqs, seq1, seq2);
560 		}
561 		sync_expunge_range(ctx, &seqs);
562 		break;
563 	}
564 	case MAIL_TRANSACTION_EXPUNGE_GUID:
565 	case MAIL_TRANSACTION_EXPUNGE_GUID|MAIL_TRANSACTION_EXPUNGE_PROT: {
566 		const struct mail_transaction_expunge_guid *rec = data, *end;
567 		ARRAY_TYPE(seq_range) seqs;
568 		uint32_t seq;
569 
570 		if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
571 			/* this is simply a request for expunge */
572 			break;
573 		}
574 		t_array_init(&seqs, 64);
575 		end = CONST_PTR_OFFSET(data, hdr->size);
576 		for (; rec != end; rec++) {
577 			i_assert(rec->uid != 0);
578 
579 			if (mail_index_lookup_seq(ctx->view, rec->uid, &seq))
580 				seq_range_array_add(&seqs, seq);
581 		}
582 
583 		sync_expunge_range(ctx, &seqs);
584 		break;
585 	}
586 	case MAIL_TRANSACTION_FLAG_UPDATE: {
587 		const struct mail_transaction_flag_update *rec, *end;
588 
589 		end = CONST_PTR_OFFSET(data, hdr->size);
590 		for (rec = data; rec < end; rec++) {
591 			ret = sync_flag_update(rec, ctx);
592 			if (ret <= 0)
593 				break;
594 		}
595 		break;
596 	}
597 	case MAIL_TRANSACTION_HEADER_UPDATE: {
598 		const struct mail_transaction_header_update *rec;
599 		unsigned int i;
600 
601 		for (i = 0; i < hdr->size; ) {
602 			rec = CONST_PTR_OFFSET(data, i);
603 			ret = sync_header_update(rec, ctx);
604 			if (ret <= 0)
605 				break;
606 
607 			i += sizeof(*rec) + rec->size;
608 			if ((i % 4) != 0)
609 				i += 4 - (i % 4);
610 		}
611 		break;
612 	}
613 	case MAIL_TRANSACTION_EXT_INTRO: {
614 		const struct mail_transaction_ext_intro *rec = data;
615 		unsigned int i;
616 		uint32_t prev_seq;
617 		uoff_t prev_offset;
618 
619 		mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
620 						       &prev_seq, &prev_offset);
621 		ctx->ext_intro_seq = prev_seq;
622 		ctx->ext_intro_offset = prev_offset;
623 		ctx->ext_intro_end_offset =
624 			prev_offset + hdr->size + sizeof(*hdr);
625 
626 		for (i = 0; i < hdr->size; ) {
627 			if (i + sizeof(*rec) > hdr->size) {
628 				/* should be just extra padding */
629 				break;
630 			}
631 
632 			rec = CONST_PTR_OFFSET(data, i);
633 			/* name_size checked by _log_view_next() */
634 			i_assert(i + sizeof(*rec) + rec->name_size <= hdr->size);
635 
636 			ret = mail_index_sync_ext_intro(ctx, rec);
637 			if (ret <= 0)
638 				break;
639 
640 			i += sizeof(*rec) + rec->name_size;
641 			if ((i % 4) != 0)
642 				i += 4 - (i % 4);
643 		}
644 		break;
645 	}
646 	case MAIL_TRANSACTION_EXT_RESET: {
647 		struct mail_transaction_ext_reset rec;
648 
649 		/* old versions have only new_reset_id */
650 		if (hdr->size < sizeof(uint32_t)) {
651 			mail_index_sync_set_corrupted(ctx,
652 				"ext reset: invalid record size");
653 			ret = -1;
654 			break;
655 		}
656 		i_zero(&rec);
657 		memcpy(&rec, data, I_MIN(hdr->size, sizeof(rec)));
658 		ret = mail_index_sync_ext_reset(ctx, &rec);
659 		break;
660 	}
661 	case MAIL_TRANSACTION_EXT_HDR_UPDATE: {
662 		const struct mail_transaction_ext_hdr_update *rec;
663 		unsigned int i;
664 
665 		for (i = 0; i < hdr->size; ) {
666 			rec = CONST_PTR_OFFSET(data, i);
667 
668 			if (i + sizeof(*rec) > hdr->size ||
669 			    i + sizeof(*rec) + rec->size > hdr->size) {
670 				mail_index_sync_set_corrupted(ctx,
671 					"ext hdr update: invalid record size");
672 				ret = -1;
673 				break;
674 			}
675 
676 			ret = mail_index_sync_ext_hdr_update(ctx, rec->offset,
677 							     rec->size, rec + 1);
678 			if (ret <= 0)
679 				break;
680 
681 			i += sizeof(*rec) + rec->size;
682 			if ((i % 4) != 0)
683 				i += 4 - (i % 4);
684 		}
685 		break;
686 	}
687 	case MAIL_TRANSACTION_EXT_HDR_UPDATE32: {
688 		const struct mail_transaction_ext_hdr_update32 *rec;
689 		unsigned int i;
690 
691 		for (i = 0; i < hdr->size; ) {
692 			rec = CONST_PTR_OFFSET(data, i);
693 
694 			if (i + sizeof(*rec) > hdr->size ||
695 			    i + sizeof(*rec) + rec->size > hdr->size) {
696 				mail_index_sync_set_corrupted(ctx,
697 					"ext hdr update: invalid record size");
698 				ret = -1;
699 				break;
700 			}
701 
702 			ret = mail_index_sync_ext_hdr_update(ctx, rec->offset,
703 							     rec->size, rec + 1);
704 			if (ret <= 0)
705 				break;
706 
707 			i += sizeof(*rec) + rec->size;
708 			if ((i % 4) != 0)
709 				i += 4 - (i % 4);
710 		}
711 		break;
712 	}
713 	case MAIL_TRANSACTION_EXT_REC_UPDATE: {
714 		const struct mail_transaction_ext_rec_update *rec;
715 		unsigned int i, record_size;
716 
717 		if (ctx->cur_ext_map_idx == (uint32_t)-1) {
718 			mail_index_sync_set_corrupted(ctx,
719 				"Extension record updated "
720 				"without intro prefix");
721 			ret = -1;
722 			break;
723 		}
724 
725 		if (ctx->cur_ext_ignore) {
726 			ret = 1;
727 			break;
728 		}
729 
730 		/* the record is padded to 32bits in the transaction log */
731 		record_size = (sizeof(*rec) + ctx->cur_ext_record_size + 3) & ~3U;
732 
733 		for (i = 0; i < hdr->size; i += record_size) {
734 			rec = CONST_PTR_OFFSET(data, i);
735 
736 			if (i + record_size > hdr->size) {
737 				mail_index_sync_set_corrupted(ctx,
738 					"ext rec update: invalid record size");
739 				ret = -1;
740 				break;
741 			}
742 
743 			ret = mail_index_sync_ext_rec_update(ctx, rec);
744 			if (ret <= 0)
745 				break;
746 		}
747 		break;
748 	}
749 	case MAIL_TRANSACTION_EXT_ATOMIC_INC: {
750 		const struct mail_transaction_ext_atomic_inc *rec, *end;
751 
752 		if (ctx->cur_ext_map_idx == (uint32_t)-1) {
753 			mail_index_sync_set_corrupted(ctx,
754 				"Extension record updated "
755 				"without intro prefix");
756 			ret = -1;
757 			break;
758 		}
759 
760 		if (ctx->cur_ext_ignore) {
761 			ret = 1;
762 			break;
763 		}
764 
765 		end = CONST_PTR_OFFSET(data, hdr->size);
766 		for (rec = data; rec < end; rec++) {
767 			ret = mail_index_sync_ext_atomic_inc(ctx, rec);
768 			if (ret <= 0)
769 				break;
770 		}
771 		break;
772 	}
773 	case MAIL_TRANSACTION_KEYWORD_UPDATE: {
774 		const struct mail_transaction_keyword_update *rec = data;
775 
776 		ret = mail_index_sync_keywords(ctx, hdr, rec);
777 		break;
778 	}
779 	case MAIL_TRANSACTION_KEYWORD_RESET: {
780 		const struct mail_transaction_keyword_reset *rec = data;
781 
782 		ret = mail_index_sync_keywords_reset(ctx, hdr, rec);
783 		break;
784 	}
785 	case MAIL_TRANSACTION_MODSEQ_UPDATE: {
786 		const struct mail_transaction_modseq_update *rec = data;
787 
788 		ret = sync_modseq_update(ctx, rec, hdr->size);
789 		break;
790 	}
791 	case MAIL_TRANSACTION_INDEX_DELETED:
792 		if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
793 			/* next sync finishes the deletion */
794 			ctx->view->index->index_delete_requested = TRUE;
795 		} else {
796 			/* transaction log reading handles this */
797 		}
798 		break;
799 	case MAIL_TRANSACTION_INDEX_UNDELETED:
800 		ctx->view->index->index_delete_requested = FALSE;
801 		break;
802 	case MAIL_TRANSACTION_BOUNDARY:
803 		break;
804 	case MAIL_TRANSACTION_ATTRIBUTE_UPDATE:
805 		break;
806 	default:
807 		mail_index_sync_set_corrupted(ctx,
808 			"Unknown transaction record type 0x%x",
809 			(hdr->type & MAIL_TRANSACTION_TYPE_MASK));
810 		ret = -1;
811 		break;
812 	}
813 	return ret;
814 }
815 
mail_index_sync_record(struct mail_index_sync_map_ctx * ctx,const struct mail_transaction_header * hdr,const void * data)816 int mail_index_sync_record(struct mail_index_sync_map_ctx *ctx,
817 			   const struct mail_transaction_header *hdr,
818 			   const void *data)
819 {
820 	int ret;
821 
822 	T_BEGIN {
823 		ret = mail_index_sync_record_real(ctx, hdr, data);
824 	} T_END;
825 	return ret;
826 }
827 
mail_index_sync_map_init(struct mail_index_sync_map_ctx * sync_map_ctx,struct mail_index_view * view,enum mail_index_sync_handler_type type)828 void mail_index_sync_map_init(struct mail_index_sync_map_ctx *sync_map_ctx,
829 			      struct mail_index_view *view,
830 			      enum mail_index_sync_handler_type type)
831 {
832 	i_zero(sync_map_ctx);
833 	sync_map_ctx->view = view;
834 	sync_map_ctx->cur_ext_map_idx = (uint32_t)-1;
835 	sync_map_ctx->type = type;
836 	sync_map_ctx->modseq_ctx = mail_index_modseq_sync_begin(sync_map_ctx);
837 
838 	mail_index_sync_init_handlers(sync_map_ctx);
839 }
840 
mail_index_sync_map_deinit(struct mail_index_sync_map_ctx * sync_map_ctx)841 void mail_index_sync_map_deinit(struct mail_index_sync_map_ctx *sync_map_ctx)
842 {
843 	i_assert(sync_map_ctx->modseq_ctx == NULL);
844 
845 	buffer_free(&sync_map_ctx->unknown_extensions);
846 	if (sync_map_ctx->expunge_handlers_used)
847 		mail_index_sync_deinit_expunge_handlers(sync_map_ctx);
848 	mail_index_sync_deinit_handlers(sync_map_ctx);
849 }
850 
mail_index_sync_update_hdr_dirty_flag(struct mail_index_map * map)851 static void mail_index_sync_update_hdr_dirty_flag(struct mail_index_map *map)
852 {
853 	const struct mail_index_record *rec;
854 	uint32_t seq;
855 
856 	if ((map->hdr.flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0 ||
857 	    (map->index->flags & MAIL_INDEX_OPEN_FLAG_NO_DIRTY) != 0)
858 		return;
859 
860 	/* do we have dirty flags anymore? */
861 	for (seq = 1; seq <= map->rec_map->records_count; seq++) {
862 		rec = MAIL_INDEX_REC_AT_SEQ(map, seq);
863 		if ((rec->flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0) {
864 			map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
865 			break;
866 		}
867 	}
868 }
869 
870 #ifdef DEBUG
mail_index_map_check(struct mail_index_map * map)871 void mail_index_map_check(struct mail_index_map *map)
872 {
873 	const struct mail_index_header *hdr = &map->hdr;
874 	unsigned int del = 0, seen = 0;
875 	uint32_t seq, prev_uid = 0;
876 
877 	i_assert(hdr->messages_count <= map->rec_map->records_count);
878 	for (seq = 1; seq <= hdr->messages_count; seq++) {
879 		const struct mail_index_record *rec;
880 
881 		rec = MAIL_INDEX_REC_AT_SEQ(map, seq);
882 		i_assert(rec->uid > prev_uid);
883 		prev_uid = rec->uid;
884 
885 		if ((rec->flags & MAIL_DELETED) != 0) {
886 			i_assert(rec->uid >= hdr->first_deleted_uid_lowwater);
887 			del++;
888 		}
889 		if ((rec->flags & MAIL_SEEN) != 0)
890 			seen++;
891 		else
892 			i_assert(rec->uid >= hdr->first_unseen_uid_lowwater);
893 	}
894 	i_assert(del == hdr->deleted_messages_count);
895 	i_assert(seen == hdr->seen_messages_count);
896 }
897 #endif
898 
mail_index_sync_map_want_index_reopen(struct mail_index_map * map,enum mail_index_sync_handler_type type)899 bool mail_index_sync_map_want_index_reopen(struct mail_index_map *map,
900 					   enum mail_index_sync_handler_type type)
901 {
902 	struct mail_index *index = map->index;
903 
904 	if (index->log->head == NULL)
905 		return TRUE;
906 
907 	uoff_t start_offset = type == MAIL_INDEX_SYNC_HANDLER_FILE ?
908 		map->hdr.log_file_tail_offset : map->hdr.log_file_head_offset;
909 	/* don't check this if mmap is disabled, because reopening
910 	   index causes sync to get lost. */
911 	if ((index->flags & MAIL_INDEX_OPEN_FLAG_MMAP_DISABLE) == 0) {
912 		uoff_t log_size, index_size;
913 
914 		if (index->fd == -1 &&
915 		    index->log->head->hdr.prev_file_seq != 0) {
916 			/* we don't know the index's size, so use the
917 			   smallest index size we're willing to read */
918 			index_size = MAIL_INDEX_SYNC_MIN_READ_INDEX_SIZE;
919 		} else {
920 			index_size = map->hdr.header_size +
921 				map->rec_map->records_count *
922 				map->hdr.record_size;
923 		}
924 
925 		/* this isn't necessary correct currently, but it should be
926 		   close enough */
927 		log_size = index->log->head->last_size;
928 		if (log_size > start_offset &&
929 		    log_size - start_offset > index_size)
930 			return TRUE;
931 	}
932 	return FALSE;
933 }
934 
mail_index_sync_map(struct mail_index_map ** _map,enum mail_index_sync_handler_type type,const char ** reason_r)935 int mail_index_sync_map(struct mail_index_map **_map,
936 			enum mail_index_sync_handler_type type,
937 			const char **reason_r)
938 {
939 	struct mail_index_map *map = *_map;
940 	struct mail_index *index = map->index;
941 	struct mail_index_view *view;
942 	struct mail_index_sync_map_ctx sync_map_ctx;
943 	const struct mail_transaction_header *thdr;
944 	const void *tdata;
945 	uint32_t prev_seq;
946 	uoff_t start_offset, prev_offset;
947 	const char *reason, *error;
948 	int ret;
949 	bool had_dirty, reset;
950 
951 	i_assert(index->log->head != NULL);
952 	i_assert(index->map == map || type == MAIL_INDEX_SYNC_HANDLER_VIEW);
953 
954 	start_offset = type == MAIL_INDEX_SYNC_HANDLER_FILE ?
955 		map->hdr.log_file_tail_offset : map->hdr.log_file_head_offset;
956 
957 	view = mail_index_view_open_with_map(index, map);
958 	ret = mail_transaction_log_view_set(view->log_view,
959 					    map->hdr.log_file_seq, start_offset,
960 					    (uint32_t)-1, UOFF_T_MAX,
961 					    &reset, &reason);
962 	if (ret <= 0) {
963 		mail_index_view_close(&view);
964 		if (ret < 0) {
965 			/* I/O failure */
966 			return -1;
967 		}
968 		/* the seq/offset is probably broken */
969 		*reason_r = t_strdup_printf(
970 			"Lost log for seq=%u offset=%"PRIuUOFF_T": %s "
971 			"(initial_mapped=%d)",
972 			map->hdr.log_file_seq, start_offset, reason,
973 			index->initial_mapped ? 1 : 0);
974 		return 0;
975 	}
976 
977 	mail_transaction_log_get_head(index->log, &prev_seq, &prev_offset);
978 	if (prev_seq != map->hdr.log_file_seq ||
979 	    prev_offset - map->hdr.log_file_tail_offset >
980 	    		index->optimization_set.index.rewrite_min_log_bytes) {
981 		/* we're reading more from log than we would have preferred.
982 		   remember that we probably want to rewrite index soon. */
983 		index->index_min_write = TRUE;
984 	}
985 
986 	/* view referenced the map. avoid unnecessary map cloning by
987 	   unreferencing the map while view exists. */
988 	map->refcount--;
989 
990 	had_dirty = (map->hdr.flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0;
991 	if (had_dirty)
992 		map->hdr.flags &= ENUM_NEGATE(MAIL_INDEX_HDR_FLAG_HAVE_DIRTY);
993 
994 	mail_transaction_log_view_get_prev_pos(view->log_view,
995 					       &prev_seq, &prev_offset);
996 
997 	mail_index_sync_map_init(&sync_map_ctx, view, type);
998 	if (reset) {
999 		/* Reset the entire index. Leave only indexid and
1000 		   log_file_seq. */
1001 		mail_transaction_log_view_get_prev_pos(view->log_view,
1002 						       &prev_seq, &prev_offset);
1003 		map = mail_index_map_alloc(index);
1004 		if ((index->map->hdr.flags & MAIL_INDEX_HDR_FLAG_FSCKD) != 0)
1005 			map->hdr.flags |= MAIL_INDEX_HDR_FLAG_FSCKD;
1006 		map->hdr.log_file_seq = prev_seq;
1007 		map->hdr.log_file_tail_offset = 0;
1008 		mail_index_sync_replace_map(&sync_map_ctx, map);
1009 	}
1010 	map = NULL;
1011 
1012 	/* FIXME: when transaction sync lock is removed, we'll need to handle
1013 	   the case when a transaction is committed while mailbox is being
1014 	   synced ([synced transactions][new transaction][ext transaction]).
1015 	   this means int_offset contains [synced] and ext_offset contains
1016 	   all */
1017 	while ((ret = mail_transaction_log_view_next(view->log_view, &thdr,
1018 						     &tdata)) > 0) {
1019 		mail_transaction_log_view_get_prev_pos(view->log_view,
1020 						       &prev_seq, &prev_offset);
1021 
1022 		if (LOG_IS_BEFORE(prev_seq, prev_offset,
1023 				  view->map->hdr.log_file_seq,
1024 				  view->map->hdr.log_file_head_offset)) {
1025 			/* this has been synced already. */
1026 			i_assert(type == MAIL_INDEX_SYNC_HANDLER_FILE);
1027 			continue;
1028 		}
1029 
1030 		/* we'll just skip over broken entries */
1031 		(void)mail_index_sync_record(&sync_map_ctx, thdr, tdata);
1032 	}
1033 	map = view->map;
1034 
1035 	if (had_dirty)
1036 		mail_index_sync_update_hdr_dirty_flag(map);
1037 	mail_index_modseq_sync_end(&sync_map_ctx.modseq_ctx);
1038 
1039 	mail_index_sync_update_log_offset(&sync_map_ctx, view->map, TRUE);
1040 
1041 #ifdef DEBUG
1042 	mail_index_map_check(map);
1043 #endif
1044 	i_assert(map->hdr.indexid == index->indexid || map->hdr.indexid == 0);
1045 
1046 	/* transaction log tracks internally the current tail offset.
1047 	   besides using header updates, it also updates the offset to skip
1048 	   over following external transactions to avoid extra unneeded log
1049 	   reading. */
1050 	i_assert(map->hdr.log_file_seq == index->log->head->hdr.file_seq);
1051 	if (map->hdr.log_file_tail_offset < index->log->head->max_tail_offset) {
1052 		map->hdr.log_file_tail_offset =
1053 			index->log->head->max_tail_offset;
1054 	}
1055 
1056 	buffer_write(map->hdr_copy_buf, 0, &map->hdr, sizeof(map->hdr));
1057 	if (!MAIL_INDEX_MAP_IS_IN_MEMORY(map)) {
1058 		memcpy(map->rec_map->mmap_base, map->hdr_copy_buf->data,
1059 		       map->hdr_copy_buf->used);
1060 	}
1061 
1062 	/* restore refcount before closing the view. this is necessary also
1063 	   if map got cloned, because view closing would otherwise destroy it */
1064 	map->refcount++;
1065 	mail_index_sync_map_deinit(&sync_map_ctx);
1066 	mail_index_view_close(&view);
1067 
1068 	i_assert(index->map == map || type == MAIL_INDEX_SYNC_HANDLER_VIEW);
1069 
1070 	if (mail_index_map_check_header(map, &error) <= 0) {
1071 		mail_index_set_error(index,
1072 			"Synchronization corrupted index header %s: %s",
1073 			index->filepath, error);
1074 		(void)mail_index_fsck(index);
1075 		map = index->map;
1076 	} else if (sync_map_ctx.errors) {
1077 		/* make sure the index looks valid now */
1078 		(void)mail_index_fsck(index);
1079 		map = index->map;
1080 	}
1081 
1082 	*_map = map;
1083 	return ret < 0 ? -1 : 1;
1084 }
1085