1 /* Copyright (c) 2003-2018 Dovecot authors, see the included COPYING file */
2 
3 #include "lib.h"
4 #include "array.h"
5 #include "buffer.h"
6 #include "hash.h"
7 #include "llist.h"
8 #include "nfs-workarounds.h"
9 #include "file-cache.h"
10 #include "mmap-util.h"
11 #include "read-full.h"
12 #include "write-full.h"
13 #include "mail-cache-private.h"
14 #include "ioloop.h"
15 
16 #include <unistd.h>
17 
18 #define MAIL_CACHE_MIN_HEADER_READ_SIZE 4096
19 
20 static struct event_category event_category_mail_cache = {
21 	.name = "mail-cache",
22 };
23 
mail_cache_set_syscall_error(struct mail_cache * cache,const char * function)24 void mail_cache_set_syscall_error(struct mail_cache *cache,
25 				  const char *function)
26 {
27 	mail_index_file_set_syscall_error(cache->index, cache->filepath,
28 					  function);
29 }
30 
mail_cache_unlink(struct mail_cache * cache)31 static void mail_cache_unlink(struct mail_cache *cache)
32 {
33 	if (!cache->index->readonly && !MAIL_INDEX_IS_IN_MEMORY(cache->index))
34 		i_unlink_if_exists(cache->filepath);
35 	/* mark the cache as unusable */
36 	cache->hdr = NULL;
37 }
38 
mail_cache_set_corrupted(struct mail_cache * cache,const char * fmt,...)39 void mail_cache_set_corrupted(struct mail_cache *cache, const char *fmt, ...)
40 {
41 	va_list va;
42 
43 	mail_cache_unlink(cache);
44 
45 	va_start(va, fmt);
46 	T_BEGIN {
47 		const char *reason = t_strdup_vprintf(fmt, va);
48 		const char *errstr = t_strdup_printf(
49 			"Deleting corrupted cache: %s", reason);
50 		e_error(event_create_passthrough(cache->event)->
51 			set_name("mail_cache_corrupted")->
52 			add_str("reason", reason)->event(), "%s", errstr);
53 		mail_index_set_error_nolog(cache->index, errstr);
54 	} T_END;
55 	va_end(va);
56 }
57 
mail_cache_set_seq_corrupted_reason(struct mail_cache_view * cache_view,uint32_t seq,const char * reason)58 void mail_cache_set_seq_corrupted_reason(struct mail_cache_view *cache_view,
59 					 uint32_t seq, const char *reason)
60 {
61 	uint32_t uid, empty = 0;
62 	struct mail_cache *cache = cache_view->cache;
63 	struct mail_index_view *view = cache_view->view;
64 
65 	/* drop cache pointer */
66 	struct mail_index_transaction *t =
67 		mail_index_transaction_begin(view, MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL);
68 	mail_index_update_ext(t, seq, cache->ext_id, &empty, NULL);
69 
70 	if (mail_index_transaction_commit(&t) < 0) {
71 		/* I/O error (e.g. out of disk space). Ignore this for now,
72 		   maybe it works again later. */
73 		return;
74 	}
75 
76 	mail_index_lookup_uid(cache_view->view, seq, &uid);
77 	const char *errstr = t_strdup_printf(
78 		"Deleting corrupted cache record uid=%u: %s", uid, reason);
79 	e_error(event_create_passthrough(cache->event)->
80 		set_name("mail_cache_record_corrupted")->
81 		add_int("uid", uid)->
82 		add_str("reason", reason)->event(), "%s", errstr);
83 	mail_cache_expunge_count(cache, 1);
84 }
85 
mail_cache_file_close(struct mail_cache * cache)86 void mail_cache_file_close(struct mail_cache *cache)
87 {
88 	if (cache->mmap_base != NULL) {
89 		if (munmap(cache->mmap_base, cache->mmap_length) < 0)
90 			mail_cache_set_syscall_error(cache, "munmap()");
91 	}
92 
93 	if (cache->file_cache != NULL)
94 		file_cache_set_fd(cache->file_cache, -1);
95 	if (cache->read_buf != NULL)
96 		buffer_set_used_size(cache->read_buf, 0);
97 
98 	cache->mmap_base = NULL;
99 	cache->hdr = NULL;
100 	cache->mmap_length = 0;
101 	cache->last_field_header_offset = 0;
102 
103 	file_lock_free(&cache->file_lock);
104 	cache->locked = FALSE;
105 
106 	if (cache->fd != -1) {
107 		if (close(cache->fd) < 0)
108 			mail_cache_set_syscall_error(cache, "close()");
109 		cache->fd = -1;
110 	}
111 	cache->opened = FALSE;
112 }
113 
mail_cache_init_file_cache(struct mail_cache * cache)114 static void mail_cache_init_file_cache(struct mail_cache *cache)
115 {
116 	struct stat st;
117 
118 	if (cache->file_cache != NULL)
119 		file_cache_set_fd(cache->file_cache, cache->fd);
120 
121 	if (fstat(cache->fd, &st) == 0) {
122 		if (cache->file_cache != NULL)
123 			(void)file_cache_set_size(cache->file_cache, st.st_size);
124 	} else if (!ESTALE_FSTAT(errno)) {
125 		mail_cache_set_syscall_error(cache, "fstat()");
126 	}
127 
128 	cache->last_stat_size = st.st_size;
129 	cache->st_ino = st.st_ino;
130 	cache->st_dev = st.st_dev;
131 }
132 
mail_cache_try_open(struct mail_cache * cache)133 static int mail_cache_try_open(struct mail_cache *cache)
134 {
135 	int ret;
136 
137 	i_assert(!cache->opened);
138 	cache->opened = TRUE;
139 
140 	if (MAIL_INDEX_IS_IN_MEMORY(cache->index))
141 		return 0;
142 
143 	i_assert(cache->fd == -1);
144 	cache->fd = nfs_safe_open(cache->filepath,
145 				  cache->index->readonly ? O_RDONLY : O_RDWR);
146 	if (cache->fd == -1) {
147 		mail_cache_file_close(cache);
148 		if (errno == ENOENT) {
149 			mail_cache_purge_later_reset(cache);
150 			return 0;
151 		}
152 
153 		mail_cache_set_syscall_error(cache, "open()");
154 		return -1;
155 	}
156 
157 	mail_cache_init_file_cache(cache);
158 
159 	if ((ret = mail_cache_map_all(cache)) <= 0) {
160 		mail_cache_file_close(cache);
161 		return ret;
162 	}
163 	return 1;
164 }
165 
mail_cache_need_reopen(struct mail_cache * cache)166 static bool mail_cache_need_reopen(struct mail_cache *cache)
167 {
168 	struct stat st;
169 
170 	if (MAIL_INDEX_IS_IN_MEMORY(cache->index)) {
171 		/* disabled */
172 		return FALSE;
173 	}
174 
175 	if (cache->fd == -1)
176 		return TRUE;
177 
178 	/* see if the file has changed */
179 	if ((cache->index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) {
180 		i_assert(!cache->locked);
181 		nfs_flush_file_handle_cache(cache->filepath);
182 	}
183 	if (nfs_safe_stat(cache->filepath, &st) < 0) {
184 		/* if cache was already marked as corrupted, don't log errors
185 		   about nonexistent cache file */
186 		if (cache->hdr != NULL || errno != ENOENT)
187 			mail_cache_set_syscall_error(cache, "stat()");
188 		return TRUE;
189 	}
190 	cache->last_stat_size = st.st_size;
191 
192 	if (st.st_ino != cache->st_ino ||
193 	    !CMP_DEV_T(st.st_dev, cache->st_dev)) {
194 		/* file changed */
195 		return TRUE;
196 	}
197 
198 	if ((cache->index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) {
199 		/* if the old file has been deleted, the new file may have
200 		   the same inode as the old one. we'll catch this here by
201 		   checking if fstat() fails with ESTALE */
202 		if (fstat(cache->fd, &st) < 0) {
203 			if (ESTALE_FSTAT(errno))
204 				return TRUE;
205 			mail_cache_set_syscall_error(cache, "fstat()");
206 			return FALSE;
207 		}
208 	}
209 	return FALSE;
210 }
211 
mail_cache_reopen(struct mail_cache * cache)212 int mail_cache_reopen(struct mail_cache *cache)
213 {
214 	mail_cache_file_close(cache);
215 	return mail_cache_open_and_verify(cache);
216 }
217 
mail_cache_update_need_purge(struct mail_cache * cache)218 static void mail_cache_update_need_purge(struct mail_cache *cache)
219 {
220 	const struct mail_index_cache_optimization_settings *set =
221 		&cache->index->optimization_set.cache;
222 	const struct mail_cache_header *hdr = cache->hdr;
223 	struct stat st;
224 	unsigned int msg_count;
225 	unsigned int records_count, cont_percentage, delete_percentage;
226 	const char *want_purge_reason = NULL;
227 
228 	if (hdr->minor_version == 0) {
229 		/* purge to get ourself into the new header version */
230 		mail_cache_purge_later(cache, "Minor version too old");
231 		return;
232 	}
233 
234 	msg_count = cache->index->map->rec_map->records_count;
235 	if (msg_count == 0)
236 		records_count = 1;
237 	else if (hdr->record_count == 0 || hdr->record_count > msg_count*2) {
238 		/* probably not the real record_count, but hole offset that
239 		   Dovecot <=v2.1 versions used to use in this position.
240 		   we already checked that minor_version>0, but this could
241 		   happen if old Dovecot was used to access mailbox after
242 		   it had been updated. */
243 		records_count = I_MAX(msg_count, 1);
244 	} else {
245 		records_count = hdr->record_count;
246 	}
247 
248 	cont_percentage = hdr->continued_record_count * 100 / records_count;
249 	if (cont_percentage >= set->purge_continued_percentage) {
250 		/* too many continued rows, purge */
251 		want_purge_reason = t_strdup_printf(
252 			"Too many continued records (%u/%u)",
253 			hdr->continued_record_count, records_count);
254 	}
255 
256 	delete_percentage = hdr->deleted_record_count * 100 /
257 		(records_count + hdr->deleted_record_count);
258 	if (delete_percentage >= set->purge_delete_percentage) {
259 		/* too many deleted records, purge */
260 		want_purge_reason = t_strdup_printf(
261 			"Too many deleted records (%u/%u)",
262 			hdr->deleted_record_count, records_count);
263 	}
264 
265 	if (want_purge_reason != NULL) {
266 		if (fstat(cache->fd, &st) < 0) {
267 			if (!ESTALE_FSTAT(errno))
268 				mail_cache_set_syscall_error(cache, "fstat()");
269 			return;
270 		}
271 		if ((uoff_t)st.st_size >= set->purge_min_size)
272 			mail_cache_purge_later(cache, want_purge_reason);
273 	}
274 
275 }
276 
mail_cache_verify_header(struct mail_cache * cache,const struct mail_cache_header * hdr)277 static bool mail_cache_verify_header(struct mail_cache *cache,
278 				     const struct mail_cache_header *hdr)
279 {
280 	/* check that the header is still ok */
281 	if (cache->mmap_length < sizeof(struct mail_cache_header)) {
282 		mail_cache_set_corrupted(cache, "File too small");
283 		return FALSE;
284 	}
285 
286 	if (hdr->major_version != MAIL_CACHE_MAJOR_VERSION) {
287 		/* version changed - upgrade silently */
288 		mail_cache_set_corrupted(cache, "Unsupported major version (%u)",
289 					 hdr->major_version);
290 		return FALSE;
291 	}
292 	if (hdr->compat_sizeof_uoff_t != sizeof(uoff_t)) {
293 		/* architecture change - handle silently(?) */
294 		mail_cache_set_corrupted(cache, "Unsupported uoff_t size (%u)",
295 					 hdr->compat_sizeof_uoff_t);
296 		return FALSE;
297 	}
298 
299 	if (hdr->indexid != cache->index->indexid) {
300 		/* index id changed - handle silently */
301 		mail_cache_unlink(cache);
302 		return FALSE;
303 	}
304 	if (hdr->file_seq == 0) {
305 		mail_cache_set_corrupted(cache, "file_seq is 0");
306 		return FALSE;
307 	}
308 	return TRUE;
309 }
310 
311 static int
mail_cache_map_finish(struct mail_cache * cache,uoff_t offset,size_t size,const void * hdr_data,bool copy_hdr,bool * corrupted_r)312 mail_cache_map_finish(struct mail_cache *cache, uoff_t offset, size_t size,
313 		      const void *hdr_data, bool copy_hdr, bool *corrupted_r)
314 {
315 	const struct mail_cache_header *hdr = hdr_data;
316 
317 	*corrupted_r = FALSE;
318 
319 	if (offset == 0) {
320 		/* verify the header validity only with offset=0. this way
321 		   we won't waste time re-verifying it all the time */
322 		if (!mail_cache_verify_header(cache, hdr)) {
323 			if (!MAIL_CACHE_IS_UNUSABLE(cache) &&
324 			    cache->hdr->file_seq != 0)
325 				mail_cache_purge_later(cache, "Invalid header");
326 			*corrupted_r = TRUE;
327 			return -1;
328 		}
329 	}
330 	if (hdr_data != NULL) {
331 		if (!copy_hdr)
332 			cache->hdr = hdr;
333 		else {
334 			memcpy(&cache->hdr_ro_copy, hdr,
335 			       sizeof(cache->hdr_ro_copy));
336 			cache->hdr = &cache->hdr_ro_copy;
337 		}
338 		mail_cache_update_need_purge(cache);
339 	} else {
340 		i_assert(cache->hdr != NULL);
341 	}
342 	i_assert(cache->hdr->file_seq != 0);
343 
344 	if (offset + size > cache->mmap_length)
345 		return 0;
346 	return 1;
347 }
348 
349 static int
mail_cache_map_with_read(struct mail_cache * cache,size_t offset,size_t size,const void ** data_r,bool * corrupted_r)350 mail_cache_map_with_read(struct mail_cache *cache, size_t offset, size_t size,
351 			 const void **data_r, bool *corrupted_r)
352 {
353 	const void *hdr_data;
354 	void *data;
355 	ssize_t ret;
356 
357 	if (cache->read_buf == NULL) {
358 		cache->read_buf =
359 			buffer_create_dynamic(default_pool, size);
360 	} else if (cache->read_offset <= offset &&
361 		   cache->read_offset + cache->read_buf->used >= offset+size) {
362 		/* already mapped */
363 		*data_r = CONST_PTR_OFFSET(cache->read_buf->data,
364 					   offset - cache->read_offset);
365 		hdr_data = offset == 0 ? *data_r : NULL;
366 		return mail_cache_map_finish(cache, offset, size, hdr_data,
367 					     TRUE, corrupted_r);
368 	} else {
369 		buffer_set_used_size(cache->read_buf, 0);
370 	}
371 	if (offset == 0 && size < MAIL_CACHE_MIN_HEADER_READ_SIZE) {
372 		/* we can usually read the fields header after the cache
373 		   header. we need them both, so try to read them all with one
374 		   pread() call. */
375 		size = MAIL_CACHE_MIN_HEADER_READ_SIZE;
376 	}
377 
378 	data = buffer_append_space_unsafe(cache->read_buf, size);
379 	ret = pread(cache->fd, data, size, offset);
380 	if (ret < 0) {
381 		if (errno != ESTALE)
382 			mail_cache_set_syscall_error(cache, "read()");
383 
384 		buffer_set_used_size(cache->read_buf, 0);
385 		cache->hdr = NULL;
386 		cache->mmap_length = 0;
387 		return -1;
388 	}
389 	buffer_set_used_size(cache->read_buf, ret);
390 
391 	cache->read_offset = offset;
392 	cache->mmap_length = offset + cache->read_buf->used;
393 
394 	*data_r = data;
395 	hdr_data = offset == 0 ? *data_r : NULL;
396 	return mail_cache_map_finish(cache, offset,
397 				     cache->read_buf->used, hdr_data,
398 				     TRUE, corrupted_r);
399 }
400 
401 static int
mail_cache_map_full(struct mail_cache * cache,size_t offset,size_t size,const void ** data_r,bool * corrupted_r)402 mail_cache_map_full(struct mail_cache *cache, size_t offset, size_t size,
403 		    const void **data_r, bool *corrupted_r)
404 {
405 	struct stat st;
406 	const void *data;
407 	ssize_t ret;
408 	size_t orig_size = size;
409 
410 	*corrupted_r = FALSE;
411 
412 	if (size == 0)
413 		size = sizeof(struct mail_cache_header);
414 
415 	/* verify offset + size before trying to allocate a huge amount of
416 	   memory due to them. note that we may be prefetching more than we
417 	   actually need, so don't fail too early. */
418 	if ((size > cache->mmap_length || offset + size > cache->mmap_length) &&
419 	    (offset > 0 || size > sizeof(struct mail_cache_header))) {
420 		if (fstat(cache->fd, &st) < 0) {
421 			e_error(cache->index->event,
422 				"fstat(%s) failed: %m", cache->filepath);
423 			return -1;
424 		}
425 		cache->last_stat_size = st.st_size;
426 		if (offset >= (uoff_t)st.st_size) {
427 			*data_r = NULL;
428 			return 0;
429 		}
430 		if (size > (uoff_t)st.st_size - offset)
431 			size = st.st_size - offset;
432 	}
433 
434 	cache->remap_counter++;
435 	if (cache->map_with_read)
436 		return mail_cache_map_with_read(cache, offset, size, data_r,
437 						corrupted_r);
438 
439 	if (cache->file_cache != NULL) {
440 		ret = file_cache_read(cache->file_cache, offset, size);
441 		if (ret < 0) {
442                         /* In case of ESTALE we'll simply fail without error
443                            messages. The caller will then just have to
444                            fallback to generating the value itself.
445 
446                            We can't simply reopen the cache file, because
447                            using it requires also having updated file
448                            offsets. */
449                         if (errno != ESTALE)
450                                 mail_cache_set_syscall_error(cache, "read()");
451 			cache->hdr = NULL;
452 			return -1;
453 		}
454 
455 		data = file_cache_get_map(cache->file_cache,
456 					  &cache->mmap_length);
457 		*data_r = offset > cache->mmap_length ? NULL :
458 			CONST_PTR_OFFSET(data, offset);
459 		return mail_cache_map_finish(cache, offset, size,
460 					     offset == 0 ? data : NULL, TRUE,
461 					     corrupted_r);
462 	}
463 
464 	if (offset < cache->mmap_length &&
465 	    size <= cache->mmap_length - offset) {
466 		/* already mapped */
467 		i_assert(cache->mmap_base != NULL);
468 		*data_r = CONST_PTR_OFFSET(cache->mmap_base, offset);
469 		if (orig_size > cache->mmap_length - offset) {
470 			/* requested offset/size points outside file */
471 			return 0;
472 		}
473 		return 1;
474 	}
475 
476 	if (cache->mmap_base != NULL) {
477 		if (munmap(cache->mmap_base, cache->mmap_length) < 0)
478 			mail_cache_set_syscall_error(cache, "munmap()");
479 	} else {
480 		if (cache->fd == -1) {
481 			/* unusable, waiting for purging or
482 			   index is in memory */
483 			i_assert(cache->need_purge_file_seq != 0 ||
484 				 MAIL_INDEX_IS_IN_MEMORY(cache->index));
485 			return -1;
486 		}
487 	}
488 
489 	/* map the whole file */
490 	cache->hdr = NULL;
491 	cache->mmap_length = 0;
492 	if (cache->read_buf != NULL)
493 		buffer_set_used_size(cache->read_buf, 0);
494 
495 	cache->mmap_base = mmap_ro_file(cache->fd, &cache->mmap_length);
496 	if (cache->mmap_base == MAP_FAILED) {
497 		cache->mmap_base = NULL;
498 		if (ioloop_time != cache->last_mmap_error_time) {
499 			cache->last_mmap_error_time = ioloop_time;
500 			mail_cache_set_syscall_error(cache, t_strdup_printf(
501 				"mmap(size=%zu)", cache->mmap_length));
502 		}
503 		cache->mmap_length = 0;
504 		return -1;
505 	}
506 	*data_r = offset > cache->mmap_length ? NULL :
507 		CONST_PTR_OFFSET(cache->mmap_base, offset);
508 	return mail_cache_map_finish(cache, offset, orig_size,
509 				     cache->mmap_base, FALSE, corrupted_r);
510 }
511 
mail_cache_map(struct mail_cache * cache,size_t offset,size_t size,const void ** data_r)512 int mail_cache_map(struct mail_cache *cache, size_t offset, size_t size,
513 		   const void **data_r)
514 {
515 	i_assert(offset != 0);
516 
517 	bool corrupted;
518 	int ret = mail_cache_map_full(cache, offset, size, data_r, &corrupted);
519 	i_assert(!corrupted);
520 	return ret;
521 }
522 
mail_cache_map_all(struct mail_cache * cache)523 int mail_cache_map_all(struct mail_cache *cache)
524 {
525 	const void *data;
526 	bool corrupted;
527 
528 	int ret = mail_cache_map_full(cache, 0, 0, &data, &corrupted);
529 	i_assert(ret != 0);
530 	if (corrupted) {
531 		i_assert(ret == -1);
532 		return 0;
533 	}
534 	return ret < 0 ? -1 : 1;
535 }
536 
mail_cache_open_and_verify(struct mail_cache * cache)537 int mail_cache_open_and_verify(struct mail_cache *cache)
538 {
539 	int ret;
540 
541 	if (cache->opened) {
542 		if (!MAIL_CACHE_IS_UNUSABLE(cache))
543 			return 1;
544 		mail_cache_file_close(cache);
545 	}
546 	if ((ret = mail_cache_try_open(cache)) < 0) {
547 		/* I/O error */
548 		mail_cache_file_close(cache);
549 		return -1;
550 	}
551 
552 	if (ret > 0) {
553 		if (mail_cache_header_fields_read(cache) < 0) {
554 			/* corrupted */
555 			ret = 0;
556 		}
557 	}
558 	if (ret == 0) {
559 		/* cache was corrupted and should have been deleted already. */
560 		mail_cache_file_close(cache);
561 	}
562 	return ret;
563 }
564 
565 struct mail_cache *
mail_cache_open_or_create_path(struct mail_index * index,const char * path)566 mail_cache_open_or_create_path(struct mail_index *index, const char *path)
567 {
568 	struct mail_cache *cache;
569 
570 	cache = i_new(struct mail_cache, 1);
571 	cache->index = index;
572 	cache->fd = -1;
573 	cache->filepath = i_strdup(path);
574 	cache->field_pool = pool_alloconly_create("Cache fields", 2048);
575 	hash_table_create(&cache->field_name_hash, cache->field_pool, 0,
576 			  strcase_hash, strcasecmp);
577 
578 	cache->event = event_create(index->event);
579 	event_add_category(cache->event, &event_category_mail_cache);
580 
581 	cache->dotlock_settings.use_excl_lock =
582 		(index->flags & MAIL_INDEX_OPEN_FLAG_DOTLOCK_USE_EXCL) != 0;
583 	cache->dotlock_settings.nfs_flush =
584 		(index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0;
585 	cache->dotlock_settings.timeout =
586 		I_MIN(MAIL_CACHE_LOCK_TIMEOUT, index->set.max_lock_timeout_secs);
587 	cache->dotlock_settings.stale_timeout = MAIL_CACHE_LOCK_CHANGE_TIMEOUT;
588 
589 	if (!MAIL_INDEX_IS_IN_MEMORY(index) &&
590 	    (index->flags & MAIL_INDEX_OPEN_FLAG_MMAP_DISABLE) != 0)
591 		cache->file_cache = file_cache_new_path(-1, cache->filepath);
592 	cache->map_with_read =
593 		(cache->index->flags & MAIL_INDEX_OPEN_FLAG_SAVEONLY) != 0;
594 
595 	cache->ext_id =
596 		mail_index_ext_register(index, "cache", 0,
597 					sizeof(uint32_t), sizeof(uint32_t));
598 	mail_index_register_expunge_handler(index, cache->ext_id,
599 					    mail_cache_expunge_handler);
600 	return cache;
601 }
602 
mail_cache_open_or_create(struct mail_index * index)603 struct mail_cache *mail_cache_open_or_create(struct mail_index *index)
604 {
605 	const char *path = t_strconcat(index->filepath,
606 				       MAIL_CACHE_FILE_SUFFIX, NULL);
607 	return mail_cache_open_or_create_path(index, path);
608 }
609 
mail_cache_free(struct mail_cache ** _cache)610 void mail_cache_free(struct mail_cache **_cache)
611 {
612 	struct mail_cache *cache = *_cache;
613 
614 	*_cache = NULL;
615 
616 	i_assert(cache->views == NULL);
617 
618 	if (cache->file_cache != NULL)
619 		file_cache_free(&cache->file_cache);
620 
621 	mail_index_unregister_expunge_handler(cache->index, cache->ext_id);
622 	mail_cache_file_close(cache);
623 
624 	buffer_free(&cache->read_buf);
625 	hash_table_destroy(&cache->field_name_hash);
626 	pool_unref(&cache->field_pool);
627 	event_unref(&cache->event);
628 	i_free(cache->need_purge_reason);
629 	i_free(cache->field_file_map);
630 	i_free(cache->file_field_map);
631 	i_free(cache->fields);
632 	i_free(cache->filepath);
633 	i_free(cache);
634 }
635 
mail_cache_lock_file(struct mail_cache * cache)636 static int mail_cache_lock_file(struct mail_cache *cache)
637 {
638 	unsigned int timeout_secs;
639 	bool nonblock = FALSE;
640 	int ret;
641 
642 	if (cache->last_lock_failed) {
643 		/* previous locking failed. don't waste time waiting on it
644 		   again, just try once to see if it's available now. */
645 		nonblock = TRUE;
646 	}
647 
648 	i_assert(cache->file_lock == NULL);
649 	if (cache->index->set.lock_method != FILE_LOCK_METHOD_DOTLOCK) {
650 		timeout_secs = I_MIN(MAIL_CACHE_LOCK_TIMEOUT,
651 				     cache->index->set.max_lock_timeout_secs);
652 
653 		ret = mail_index_lock_fd(cache->index, cache->filepath,
654 					 cache->fd, F_WRLCK,
655 					 nonblock ? 0 : timeout_secs,
656 					 &cache->file_lock);
657 	} else {
658 		struct dotlock *dotlock;
659 		enum dotlock_create_flags flags =
660 			nonblock ? DOTLOCK_CREATE_FLAG_NONBLOCK : 0;
661 
662 		ret = file_dotlock_create(&cache->dotlock_settings,
663 					  cache->filepath, flags, &dotlock);
664 		if (ret > 0)
665 			cache->file_lock = file_lock_from_dotlock(&dotlock);
666 		else if (ret < 0) {
667 			mail_cache_set_syscall_error(cache,
668 						     "file_dotlock_create()");
669 		}
670 	}
671 	cache->last_lock_failed = ret <= 0;
672 
673 	/* don't bother warning if locking failed due to a timeout. since cache
674 	   updating isn't all that important we're using a very short timeout
675 	   so it can be triggered sometimes on heavy load */
676 	if (ret <= 0)
677 		return ret;
678 
679 	mail_index_flush_read_cache(cache->index, cache->filepath, cache->fd,
680 				    TRUE);
681 	return 1;
682 }
683 
mail_cache_unlock_file(struct mail_cache * cache)684 static void mail_cache_unlock_file(struct mail_cache *cache)
685 {
686 	if (cache->file_lock != NULL)
687 		file_unlock(&cache->file_lock);
688 }
689 
690 static bool
mail_cache_verify_reset_id(struct mail_cache * cache,uint32_t * reset_id_r)691 mail_cache_verify_reset_id(struct mail_cache *cache, uint32_t *reset_id_r)
692 {
693 	const struct mail_index_ext *ext;
694 	struct mail_index_view *iview;
695 	uint32_t reset_id;
696 
697 	iview = mail_index_view_open(cache->index);
698 	ext = mail_index_view_get_ext(iview, cache->ext_id);
699 	reset_id = ext == NULL ? 0 : ext->reset_id;
700 	mail_index_view_close(&iview);
701 
702 	*reset_id_r = reset_id;
703 	return cache->hdr->file_seq == reset_id;
704 }
705 
706 static int
mail_cache_sync_wait_index(struct mail_cache * cache,uint32_t * reset_id_r)707 mail_cache_sync_wait_index(struct mail_cache *cache, uint32_t *reset_id_r)
708 {
709 	const char *lock_reason = "cache reset_id sync";
710 	uint32_t file_seq;
711 	uoff_t file_offset;
712 	bool cache_locked = cache->file_lock != NULL;
713 	int ret;
714 
715 	if (cache->index->log_sync_locked)
716 		return 0;
717 
718 	/* Wait for .log file lock, so we can be sure that there is no cache
719 	   purging going on. (Because it first recreates the cache file,
720 	   unlocks it and only then writes the changes to the index and
721 	   releases the .log lock.) To prevent deadlocks, cache file must be
722 	   locked after the .log, not before. */
723 	if (cache_locked)
724 		mail_cache_unlock_file(cache);
725 	if (mail_transaction_log_sync_lock(cache->index->log, lock_reason,
726 					   &file_seq, &file_offset) < 0)
727 		return -1;
728 	/* Lock the cache file as well so we'll get a guaranteed result on
729 	   whether the reset_id can be synced or if it's already desynced and
730 	   the cache just needs to be recreated. */
731 	ret = -1;
732 	while (mail_cache_lock_file(cache) > 0) {
733 		/* Locked the current fd, but it may have already been
734 		   recreated. Reopen and retry if needed. */
735 		if (!mail_cache_need_reopen(cache)) {
736 			ret = 1;
737 			break;
738 		}
739 		if ((ret = mail_cache_reopen(cache)) <= 0)
740 			break;
741 	}
742 
743 	if (ret <= 0)
744 		;
745 	else if (mail_index_refresh(cache->index) < 0)
746 		ret = -1;
747 	else
748 		ret = mail_cache_verify_reset_id(cache, reset_id_r) ? 1 : 0;
749 	mail_transaction_log_sync_unlock(cache->index->log, lock_reason);
750 	if (ret <= 0 || !cache_locked)
751 		mail_cache_unlock_file(cache);
752 	return ret;
753 }
754 
mail_cache_sync_reset_id(struct mail_cache * cache)755 int mail_cache_sync_reset_id(struct mail_cache *cache)
756 {
757 	uint32_t reset_id;
758 	int ret;
759 
760 	/* verify that the index reset_id matches the cache's file_seq */
761 	if (mail_cache_verify_reset_id(cache, &reset_id))
762 		return 1;
763 
764 	/* Mismatch. See if we can get it synced. */
765 	if (cache->index->mapping) {
766 		/* Syncing is already locked, and we're in the middle of
767 		   mapping the index. The cache is unusable. */
768 		i_assert(cache->index->log_sync_locked);
769 		mail_cache_set_corrupted(cache, "reset_id mismatch during sync");
770 		return 0;
771 	}
772 
773 	/* See if reset_id changes after refreshing the index. */
774 	if (mail_index_refresh(cache->index) < 0)
775 		return -1;
776 	if (mail_cache_verify_reset_id(cache, &reset_id))
777 		return 1;
778 
779 	/* Use locking to wait for a potential cache purging to finish.
780 	   If that didn't work either, the cache is corrupted or lost. */
781 	ret = mail_cache_sync_wait_index(cache, &reset_id);
782 	if (ret == 0 && cache->fd != -1 && reset_id != 0) {
783 		mail_cache_set_corrupted(cache,
784 			"reset_id mismatch even after locking "
785 			"(file_seq=%u != reset_id=%u)",
786 			cache->hdr == NULL ? 0 : cache->hdr->file_seq,
787 			reset_id);
788 	}
789 	return ret;
790 }
791 
mail_cache_lock(struct mail_cache * cache)792 int mail_cache_lock(struct mail_cache *cache)
793 {
794 	int ret;
795 
796 	i_assert(!cache->locked);
797 	/* the only reason why we might be in here while mapping the index is
798 	   if we're coming from mail_cache_expunge_count() while syncing the
799 	   index. */
800 	i_assert(!cache->index->mapping || cache->index->log_sync_locked);
801 
802 	if (MAIL_INDEX_IS_IN_MEMORY(cache->index) ||
803 	    cache->index->readonly)
804 		return 0;
805 
806 	/* Make sure at least some cache file is opened. Usually it's the
807 	   latest one, so delay until it's locked to check whether a newer
808 	   cache file exists. */
809 	if ((ret = mail_cache_open_and_verify(cache)) < 0)
810 		return -1;
811 	if (ret == 0) {
812 		/* Cache doesn't exist or it was just found to be corrupted and
813 		   was unlinked. Cache purging will create it back. */
814 		return 0;
815 	}
816 
817 	for (;;) {
818 		if (mail_cache_lock_file(cache) <= 0)
819 			return -1;
820 		if (!mail_cache_need_reopen(cache)) {
821 			/* locked the latest file */
822 			break;
823 		}
824 		if ((ret = mail_cache_reopen(cache)) <= 0) {
825 			i_assert(cache->file_lock == NULL);
826 			return ret;
827 		}
828 		i_assert(cache->file_lock == NULL);
829 		/* okay, so it was just purged. try again. */
830 	}
831 
832 	if ((ret = mail_cache_sync_reset_id(cache)) <= 0) {
833 		mail_cache_unlock_file(cache);
834 		return ret;
835 	}
836 	i_assert(cache->file_lock != NULL);
837 
838 	/* successfully locked - make sure our header is up to date */
839 	cache->locked = TRUE;
840 	cache->hdr_modified = FALSE;
841 
842 	if (cache->file_cache != NULL) {
843 		file_cache_invalidate(cache->file_cache, 0,
844 				      sizeof(struct mail_cache_header));
845 	}
846 	if (cache->read_buf != NULL)
847 		buffer_set_used_size(cache->read_buf, 0);
848 	if ((ret = mail_cache_map_all(cache)) <= 0) {
849 		mail_cache_unlock(cache);
850 		return ret;
851 	}
852 	cache->hdr_copy = *cache->hdr;
853 	return 1;
854 }
855 
mail_cache_flush_and_unlock(struct mail_cache * cache)856 int mail_cache_flush_and_unlock(struct mail_cache *cache)
857 {
858 	int ret = 0;
859 
860 	i_assert(cache->locked);
861 
862 	if (cache->field_header_write_pending)
863                 ret = mail_cache_header_fields_update(cache);
864 
865 	/* Cache may become unusable during for various reasons, e.g.
866 	   mail_cache_map(). Also the above mail_cache_header_fields_update()
867 	   call can make it unusable, so check this after it. */
868 	if (MAIL_CACHE_IS_UNUSABLE(cache)) {
869 		mail_cache_unlock(cache);
870 		return -1;
871 	}
872 
873 	if (cache->hdr_modified) {
874 		cache->hdr_modified = FALSE;
875 		if (mail_cache_write(cache, &cache->hdr_copy,
876 				     sizeof(cache->hdr_copy), 0) < 0)
877 			ret = -1;
878 		cache->hdr_ro_copy = cache->hdr_copy;
879 		mail_cache_update_need_purge(cache);
880 	}
881 
882 	mail_cache_unlock(cache);
883 	return ret;
884 }
885 
mail_cache_unlock(struct mail_cache * cache)886 void mail_cache_unlock(struct mail_cache *cache)
887 {
888 	i_assert(cache->locked);
889 
890 	if (MAIL_CACHE_IS_UNUSABLE(cache)) {
891 		/* we found it to be broken during the lock. just clean up. */
892 		cache->hdr_modified = FALSE;
893 	} else if (cache->index->set.fsync_mode == FSYNC_MODE_ALWAYS) {
894 		if (fdatasync(cache->fd) < 0)
895 			mail_cache_set_syscall_error(cache, "fdatasync()");
896 	}
897 
898 	cache->locked = FALSE;
899 	mail_cache_unlock_file(cache);
900 }
901 
mail_cache_write(struct mail_cache * cache,const void * data,size_t size,uoff_t offset)902 int mail_cache_write(struct mail_cache *cache, const void *data, size_t size,
903 		     uoff_t offset)
904 {
905 	i_assert(cache->locked);
906 
907 	if (pwrite_full(cache->fd, data, size, offset) < 0) {
908 		mail_cache_set_syscall_error(cache, "pwrite_full()");
909 		return -1;
910 	}
911 
912 	if (cache->file_cache != NULL)
913 		file_cache_write(cache->file_cache, data, size, offset);
914 	if (cache->read_buf != NULL)
915 		buffer_set_used_size(cache->read_buf, 0);
916 	return 0;
917 }
918 
mail_cache_append(struct mail_cache * cache,const void * data,size_t size,uint32_t * offset)919 int mail_cache_append(struct mail_cache *cache, const void *data, size_t size,
920 		      uint32_t *offset)
921 {
922 	struct stat st;
923 
924 	if (*offset == 0) {
925 		if (fstat(cache->fd, &st) < 0) {
926 			if (!ESTALE_FSTAT(errno))
927 				mail_cache_set_syscall_error(cache, "fstat()");
928 			return -1;
929 		}
930 		cache->last_stat_size = st.st_size;
931 		if ((uoff_t)st.st_size > cache->index->optimization_set.cache.max_size) {
932 			mail_cache_set_corrupted(cache, "Cache file too large");
933 			return -1;
934 		}
935 		*offset = st.st_size;
936 	}
937 	if (*offset >= cache->index->optimization_set.cache.max_size ||
938 	    cache->index->optimization_set.cache.max_size - *offset < size) {
939 		mail_cache_set_corrupted(cache, "Cache file too large");
940 		return -1;
941 	}
942 	if (mail_cache_write(cache, data, size, *offset) < 0)
943 		return -1;
944 	return 0;
945 }
946 
mail_cache_exists(struct mail_cache * cache)947 bool mail_cache_exists(struct mail_cache *cache)
948 {
949 	return !MAIL_CACHE_IS_UNUSABLE(cache);
950 }
951 
952 struct mail_cache_view *
mail_cache_view_open(struct mail_cache * cache,struct mail_index_view * iview)953 mail_cache_view_open(struct mail_cache *cache, struct mail_index_view *iview)
954 {
955 	struct mail_cache_view *view;
956 
957 	view = i_new(struct mail_cache_view, 1);
958 	view->cache = cache;
959 	view->view = iview;
960 	view->cached_exists_buf =
961 		buffer_create_dynamic(default_pool,
962 				      cache->file_fields_count + 10);
963 	DLLIST_PREPEND(&cache->views, view);
964 	return view;
965 }
966 
mail_cache_view_close(struct mail_cache_view ** _view)967 void mail_cache_view_close(struct mail_cache_view **_view)
968 {
969 	struct mail_cache_view *view = *_view;
970 
971 	i_assert(view->trans_view == NULL);
972 
973 	*_view = NULL;
974 	if (view->cache->field_header_write_pending &&
975 	    !view->cache->purging)
976                 (void)mail_cache_header_fields_update(view->cache);
977 
978 	DLLIST_REMOVE(&view->cache->views, view);
979 	buffer_free(&view->cached_exists_buf);
980 	i_free(view);
981 }
982 
mail_cache_view_update_cache_decisions(struct mail_cache_view * view,bool update)983 void mail_cache_view_update_cache_decisions(struct mail_cache_view *view,
984 					    bool update)
985 {
986 	view->no_decision_updates = !update;
987 }
988 
mail_cache_get_first_new_seq(struct mail_index_view * view)989 uint32_t mail_cache_get_first_new_seq(struct mail_index_view *view)
990 {
991 	const struct mail_index_header *idx_hdr;
992 	uint32_t first_new_seq, message_count;
993 
994 	idx_hdr = mail_index_get_header(view);
995 	if (idx_hdr->day_first_uid[7] == 0)
996 		return 1;
997 
998 	if (!mail_index_lookup_seq_range(view, idx_hdr->day_first_uid[7],
999 					 (uint32_t)-1, &first_new_seq,
1000 					 &message_count)) {
1001 		/* all messages are too old */
1002 		return idx_hdr->messages_count+1;
1003 	}
1004 	return first_new_seq;
1005 }
1006