xref: /dragonfly/sys/vfs/hammer/hammer_mirror.c (revision f746689a)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_mirror.c,v 1.17 2008/07/31 22:30:33 dillon Exp $
35  */
36 /*
37  * HAMMER mirroring ioctls - serialize and deserialize modifications made
38  *			     to a filesystem.
39  */
40 
41 #include "hammer.h"
42 
43 static int hammer_mirror_check(hammer_cursor_t cursor,
44 				struct hammer_ioc_mrecord_rec *mrec);
45 static int hammer_mirror_update(hammer_cursor_t cursor,
46 				struct hammer_ioc_mrecord_rec *mrec);
47 static int hammer_mirror_write(hammer_cursor_t cursor,
48 				struct hammer_ioc_mrecord_rec *mrec,
49 				char *udata);
50 static int hammer_ioc_mirror_write_rec(hammer_cursor_t cursor,
51 				struct hammer_ioc_mrecord_rec *mrec,
52 				struct hammer_ioc_mirror_rw *mirror,
53 				u_int32_t localization,
54 				char *uptr);
55 static int hammer_ioc_mirror_write_pass(hammer_cursor_t cursor,
56 				struct hammer_ioc_mrecord_rec *mrec,
57 				struct hammer_ioc_mirror_rw *mirror,
58 				u_int32_t localization);
59 static int hammer_ioc_mirror_write_skip(hammer_cursor_t cursor,
60 				struct hammer_ioc_mrecord_skip *mrec,
61 				struct hammer_ioc_mirror_rw *mirror,
62 				u_int32_t localization);
63 static int hammer_mirror_delete_to(hammer_cursor_t cursor,
64 			        struct hammer_ioc_mirror_rw *mirror);
65 static int hammer_mirror_localize_data(hammer_data_ondisk_t data,
66 				hammer_btree_leaf_elm_t leaf);
67 
68 /*
69  * All B-Tree records within the specified key range which also conform
70  * to the transaction id range are returned.  Mirroring code keeps track
71  * of the last transaction id fully scanned and can efficiently pick up
72  * where it left off if interrupted.
73  *
74  * The PFS is identified in the mirror structure.  The passed ip is just
75  * some directory in the overall HAMMER filesystem and has nothing to
76  * do with the PFS.
77  */
78 int
79 hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip,
80 		       struct hammer_ioc_mirror_rw *mirror)
81 {
82 	struct hammer_cmirror cmirror;
83 	struct hammer_cursor cursor;
84 	union hammer_ioc_mrecord_any mrec;
85 	hammer_btree_leaf_elm_t elm;
86 	const int crc_start = HAMMER_MREC_CRCOFF;
87 	char *uptr;
88 	int error;
89 	int data_len;
90 	int bytes;
91 	int eatdisk;
92 	int mrec_flags;
93 	u_int32_t localization;
94 	u_int32_t rec_crc;
95 
96 	localization = (u_int32_t)mirror->pfs_id << 16;
97 
98 	if ((mirror->key_beg.localization | mirror->key_end.localization) &
99 	    HAMMER_LOCALIZE_PSEUDOFS_MASK) {
100 		return(EINVAL);
101 	}
102 	if (hammer_btree_cmp(&mirror->key_beg, &mirror->key_end) > 0)
103 		return(EINVAL);
104 
105 	mirror->key_cur = mirror->key_beg;
106 	mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
107 	mirror->key_cur.localization += localization;
108 	bzero(&mrec, sizeof(mrec));
109 	bzero(&cmirror, sizeof(cmirror));
110 
111 	/*
112 	 * Make CRC errors non-fatal (at least on data), causing an EDOM
113 	 * error instead of EIO.
114 	 */
115 	trans->flags |= HAMMER_TRANSF_CRCDOM;
116 
117 retry:
118 	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
119 	if (error) {
120 		hammer_done_cursor(&cursor);
121 		goto failed;
122 	}
123 	cursor.key_beg = mirror->key_cur;
124 	cursor.key_end = mirror->key_end;
125 	cursor.key_end.localization &= HAMMER_LOCALIZE_MASK;
126 	cursor.key_end.localization += localization;
127 
128 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
129 	cursor.flags |= HAMMER_CURSOR_BACKEND;
130 
131 	/*
132 	 * This flag filters the search to only return elements whos create
133 	 * or delete TID is >= mirror_tid.  The B-Tree uses the mirror_tid
134 	 * field stored with internal and leaf nodes to shortcut the scan.
135 	 */
136 	cursor.flags |= HAMMER_CURSOR_MIRROR_FILTERED;
137 	cursor.cmirror = &cmirror;
138 	cmirror.mirror_tid = mirror->tid_beg;
139 
140 	error = hammer_btree_first(&cursor);
141 	while (error == 0) {
142 		/*
143 		 * Yield to more important tasks
144 		 */
145 		if (error == 0) {
146 			error = hammer_signal_check(trans->hmp);
147 			if (error)
148 				break;
149 		}
150 
151 		/*
152 		 * An internal node can be returned in mirror-filtered
153 		 * mode and indicates that the scan is returning a skip
154 		 * range in the cursor->cmirror structure.
155 		 */
156 		uptr = (char *)mirror->ubuf + mirror->count;
157 		if (cursor.node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
158 			/*
159 			 * Check space
160 			 */
161 			mirror->key_cur = cmirror.skip_beg;
162 			bytes = sizeof(mrec.skip);
163 			if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) >
164 			    mirror->size) {
165 				break;
166 			}
167 
168 			/*
169 			 * Fill mrec
170 			 */
171 			mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
172 			mrec.head.type = HAMMER_MREC_TYPE_SKIP;
173 			mrec.head.rec_size = bytes;
174 			mrec.skip.skip_beg = cmirror.skip_beg;
175 			mrec.skip.skip_end = cmirror.skip_end;
176 			mrec.head.rec_crc = crc32(&mrec.head.rec_size,
177 						 bytes - crc_start);
178 			error = copyout(&mrec, uptr, bytes);
179 			eatdisk = 0;
180 			goto didwrite;
181 		}
182 
183 		/*
184 		 * Leaf node.  In full-history mode we could filter out
185 		 * elements modified outside the user-requested TID range.
186 		 *
187 		 * However, such elements must be returned so the writer
188 		 * can compare them against the target to determine what
189 		 * needs to be deleted on the target, particular for
190 		 * no-history mirrors.
191 		 */
192 		KKASSERT(cursor.node->ondisk->type == HAMMER_BTREE_TYPE_LEAF);
193 		elm = &cursor.node->ondisk->elms[cursor.index].leaf;
194 		mirror->key_cur = elm->base;
195 
196 		/*
197 		 * Determine if we should generate a PASS or a REC.  PASS
198 		 * records are records without any data payload.  Such
199 		 * records will be generated if the target is already expected
200 		 * to have the record, allowing it to delete the gaps.
201 		 *
202 		 * A PASS record is also used to perform deletions on the
203 		 * target.
204 		 *
205 		 * Such deletions are needed if the master or files on the
206 		 * master are no-history, or if the slave is so far behind
207 		 * the master has already been pruned.
208 		 */
209 		if (elm->base.create_tid < mirror->tid_beg ||
210 		    elm->base.create_tid > mirror->tid_end) {
211 			bytes = sizeof(mrec.rec);
212 			if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) >
213 			    mirror->size) {
214 				break;
215 			}
216 
217 			/*
218 			 * Fill mrec.
219 			 */
220 			mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
221 			mrec.head.type = HAMMER_MREC_TYPE_PASS;
222 			mrec.head.rec_size = bytes;
223 			mrec.rec.leaf = *elm;
224 			mrec.head.rec_crc = crc32(&mrec.head.rec_size,
225 						 bytes - crc_start);
226 			error = copyout(&mrec, uptr, bytes);
227 			eatdisk = 1;
228 			goto didwrite;
229 
230 		}
231 
232 		/*
233 		 * The core code exports the data to userland.
234 		 *
235 		 * CRC errors on data are reported but passed through,
236 		 * but the data must be washed by the user program.
237 		 */
238 		mrec_flags = 0;
239 		data_len = (elm->data_offset) ? elm->data_len : 0;
240 		if (data_len) {
241 			error = hammer_btree_extract(&cursor,
242 						     HAMMER_CURSOR_GET_DATA);
243 			if (error) {
244 				if (error != EDOM)
245 					break;
246 				mrec_flags |= HAMMER_MRECF_CRC_ERROR |
247 					      HAMMER_MRECF_DATA_CRC_BAD;
248 			}
249 		}
250 
251 		bytes = sizeof(mrec.rec) + data_len;
252 		if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) > mirror->size)
253 			break;
254 
255 		/*
256 		 * Construct the record for userland and copyout.
257 		 *
258 		 * The user is asking for a snapshot, if the record was
259 		 * deleted beyond the user-requested ending tid, the record
260 		 * is not considered deleted from the point of view of
261 		 * userland and delete_tid is cleared.
262 		 */
263 		mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
264 		mrec.head.type = HAMMER_MREC_TYPE_REC | mrec_flags;
265 		mrec.head.rec_size = bytes;
266 		mrec.rec.leaf = *elm;
267 
268 		if (elm->base.delete_tid > mirror->tid_end)
269 			mrec.rec.leaf.base.delete_tid = 0;
270 		rec_crc = crc32(&mrec.head.rec_size,
271 				sizeof(mrec.rec) - crc_start);
272 		if (data_len)
273 			rec_crc = crc32_ext(cursor.data, data_len, rec_crc);
274 		mrec.head.rec_crc = rec_crc;
275 		error = copyout(&mrec, uptr, sizeof(mrec.rec));
276 		if (data_len && error == 0) {
277 			error = copyout(cursor.data, uptr + sizeof(mrec.rec),
278 					data_len);
279 		}
280 		eatdisk = 1;
281 
282 		/*
283 		 * eatdisk controls whether we skip the current cursor
284 		 * position on the next scan or not.  If doing a SKIP
285 		 * the cursor is already positioned properly for the next
286 		 * scan and eatdisk will be 0.
287 		 */
288 didwrite:
289 		if (error == 0) {
290 			mirror->count += HAMMER_HEAD_DOALIGN(bytes);
291 			if (eatdisk)
292 				cursor.flags |= HAMMER_CURSOR_ATEDISK;
293 			else
294 				cursor.flags &= ~HAMMER_CURSOR_ATEDISK;
295 			error = hammer_btree_iterate(&cursor);
296 		}
297 	}
298 	if (error == ENOENT) {
299 		mirror->key_cur = mirror->key_end;
300 		error = 0;
301 	}
302 	hammer_done_cursor(&cursor);
303 	if (error == EDEADLK)
304 		goto retry;
305 	if (error == EINTR) {
306 		mirror->head.flags |= HAMMER_IOC_HEAD_INTR;
307 		error = 0;
308 	}
309 failed:
310 	mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
311 	return(error);
312 }
313 
314 /*
315  * Copy records from userland to the target mirror.
316  *
317  * The PFS is identified in the mirror structure.  The passed ip is just
318  * some directory in the overall HAMMER filesystem and has nothing to
319  * do with the PFS.  In fact, there might not even be a root directory for
320  * the PFS yet!
321  */
322 int
323 hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip,
324 		       struct hammer_ioc_mirror_rw *mirror)
325 {
326 	union hammer_ioc_mrecord_any mrec;
327 	struct hammer_cursor cursor;
328 	u_int32_t localization;
329 	int checkspace_count = 0;
330 	int error;
331 	int bytes;
332 	char *uptr;
333 	int seq;
334 
335 	localization = (u_int32_t)mirror->pfs_id << 16;
336 	seq = trans->hmp->flusher.act;
337 
338 	/*
339 	 * Validate the mirror structure and relocalize the tracking keys.
340 	 */
341 	if (mirror->size < 0 || mirror->size > 0x70000000)
342 		return(EINVAL);
343 	mirror->key_beg.localization &= HAMMER_LOCALIZE_MASK;
344 	mirror->key_beg.localization += localization;
345 	mirror->key_end.localization &= HAMMER_LOCALIZE_MASK;
346 	mirror->key_end.localization += localization;
347 	mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
348 	mirror->key_cur.localization += localization;
349 
350 	/*
351 	 * Set up our tracking cursor for the loop.  The tracking cursor
352 	 * is used to delete records that are no longer present on the
353 	 * master.  The last handled record at key_cur must be skipped.
354 	 */
355 	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
356 
357 	cursor.key_beg = mirror->key_cur;
358 	cursor.key_end = mirror->key_end;
359 	cursor.flags |= HAMMER_CURSOR_BACKEND;
360 	error = hammer_btree_first(&cursor);
361 	if (error == 0)
362 		cursor.flags |= HAMMER_CURSOR_ATEDISK;
363 	if (error == ENOENT)
364 		error = 0;
365 
366 	/*
367 	 * Loop until our input buffer has been exhausted.
368 	 */
369 	while (error == 0 &&
370 		mirror->count + sizeof(mrec.head) <= mirror->size) {
371 
372 	        /*
373 		 * Don't blow out the buffer cache.  Leave room for frontend
374 		 * cache as well.
375 		 */
376 		while (hammer_flusher_meta_halflimit(trans->hmp) ||
377 		       hammer_flusher_undo_exhausted(trans, 2)) {
378 			hammer_unlock_cursor(&cursor);
379 			hammer_flusher_wait(trans->hmp, seq);
380 			hammer_lock_cursor(&cursor);
381 			seq = hammer_flusher_async_one(trans->hmp);
382 		}
383 
384 		/*
385 		 * If there is insufficient free space it may be due to
386 		 * reserved bigblocks, which flushing might fix.
387 		 */
388 		if (hammer_checkspace(trans->hmp, HAMMER_CHKSPC_MIRROR)) {
389 			if (++checkspace_count == 10) {
390 				error = ENOSPC;
391 				break;
392 			}
393 			hammer_unlock_cursor(&cursor);
394 			hammer_flusher_wait(trans->hmp, seq);
395 			hammer_lock_cursor(&cursor);
396 			seq = hammer_flusher_async(trans->hmp, NULL);
397 		}
398 
399 
400 		/*
401 		 * Acquire and validate header
402 		 */
403 		if ((bytes = mirror->size - mirror->count) > sizeof(mrec))
404 			bytes = sizeof(mrec);
405 		uptr = (char *)mirror->ubuf + mirror->count;
406 		error = copyin(uptr, &mrec, bytes);
407 		if (error)
408 			break;
409 		if (mrec.head.signature != HAMMER_IOC_MIRROR_SIGNATURE) {
410 			error = EINVAL;
411 			break;
412 		}
413 		if (mrec.head.rec_size < sizeof(mrec.head) ||
414 		    mrec.head.rec_size > sizeof(mrec) + HAMMER_XBUFSIZE ||
415 		    mirror->count + mrec.head.rec_size > mirror->size) {
416 			error = EINVAL;
417 			break;
418 		}
419 
420 		switch(mrec.head.type & HAMMER_MRECF_TYPE_MASK) {
421 		case HAMMER_MREC_TYPE_SKIP:
422 			if (mrec.head.rec_size != sizeof(mrec.skip))
423 				error = EINVAL;
424 			if (error == 0)
425 				error = hammer_ioc_mirror_write_skip(&cursor, &mrec.skip, mirror, localization);
426 			break;
427 		case HAMMER_MREC_TYPE_REC:
428 			if (mrec.head.rec_size < sizeof(mrec.rec))
429 				error = EINVAL;
430 			if (error == 0)
431 				error = hammer_ioc_mirror_write_rec(&cursor, &mrec.rec, mirror, localization, uptr + sizeof(mrec.rec));
432 			break;
433 		case HAMMER_MREC_TYPE_REC_BADCRC:
434 			/*
435 			 * Records with bad data payloads are ignored XXX.
436 			 */
437 			if (mrec.head.rec_size < sizeof(mrec.rec))
438 				error = EINVAL;
439 			break;
440 		case HAMMER_MREC_TYPE_PASS:
441 			if (mrec.head.rec_size != sizeof(mrec.rec))
442 				error = EINVAL;
443 			if (error == 0)
444 				error = hammer_ioc_mirror_write_pass(&cursor, &mrec.rec, mirror, localization);
445 			break;
446 		default:
447 			error = EINVAL;
448 			break;
449 		}
450 
451 		/*
452 		 * Retry the current record on deadlock, otherwise setup
453 		 * for the next loop.
454 		 */
455 		if (error == EDEADLK) {
456 			while (error == EDEADLK) {
457 				hammer_recover_cursor(&cursor);
458 				error = hammer_cursor_upgrade(&cursor);
459 			}
460 		} else {
461 			if (error == EALREADY)
462 				error = 0;
463 			if (error == 0) {
464 				mirror->count +=
465 					HAMMER_HEAD_DOALIGN(mrec.head.rec_size);
466 			}
467 		}
468 	}
469 	hammer_done_cursor(&cursor);
470 
471 	/*
472 	 * cumulative error
473 	 */
474 	if (error) {
475 		mirror->head.flags |= HAMMER_IOC_HEAD_ERROR;
476 		mirror->head.error = error;
477 	}
478 
479 	/*
480 	 * ioctls don't update the RW data structure if an error is returned,
481 	 * always return 0.
482 	 */
483 	return(0);
484 }
485 
486 /*
487  * Handle skip records.
488  *
489  * We must iterate from the last resolved record position at mirror->key_cur
490  * to skip_beg and delete any records encountered.
491  *
492  * mirror->key_cur must be carefully set when we succeed in processing
493  * this mrec.
494  */
495 static int
496 hammer_ioc_mirror_write_skip(hammer_cursor_t cursor,
497 			     struct hammer_ioc_mrecord_skip *mrec,
498 			     struct hammer_ioc_mirror_rw *mirror,
499 			     u_int32_t localization)
500 {
501 	int error;
502 
503 	/*
504 	 * Relocalize the skip range
505 	 */
506 	mrec->skip_beg.localization &= HAMMER_LOCALIZE_MASK;
507 	mrec->skip_beg.localization += localization;
508 	mrec->skip_end.localization &= HAMMER_LOCALIZE_MASK;
509 	mrec->skip_end.localization += localization;
510 
511 	/*
512 	 * Iterate from current position to skip_beg, deleting any records
513 	 * we encounter.
514 	 */
515 	cursor->key_end = mrec->skip_beg;
516 	cursor->flags |= HAMMER_CURSOR_BACKEND;
517 	error = hammer_mirror_delete_to(cursor, mirror);
518 
519 	/*
520 	 * Now skip past the skip (which is the whole point point of
521 	 * having a skip record).  The sender has not sent us any records
522 	 * for the skip area so we wouldn't know what to keep and what
523 	 * to delete anyway.
524 	 *
525 	 * Clear ATEDISK because skip_end is non-inclusive, so we can't
526 	 * count an exact match if we happened to get one.
527 	 */
528 	if (error == 0) {
529 		mirror->key_cur = mrec->skip_end;
530 		cursor->key_beg = mrec->skip_end;
531 		error = hammer_btree_lookup(cursor);
532 		cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
533 		if (error == ENOENT)
534 			error = 0;
535 	}
536 	return(error);
537 }
538 
539 /*
540  * Handle B-Tree records.
541  *
542  * We must iterate to mrec->base.key (non-inclusively), and then process
543  * the record.  We are allowed to write a new record or delete an existing
544  * record, but cannot replace an existing record.
545  *
546  * mirror->key_cur must be carefully set when we succeed in processing
547  * this mrec.
548  */
549 static int
550 hammer_ioc_mirror_write_rec(hammer_cursor_t cursor,
551 			    struct hammer_ioc_mrecord_rec *mrec,
552 			    struct hammer_ioc_mirror_rw *mirror,
553 			    u_int32_t localization,
554 			    char *uptr)
555 {
556 	hammer_transaction_t trans;
557 	u_int32_t rec_crc;
558 	int error;
559 
560 	trans = cursor->trans;
561 	rec_crc = crc32(mrec, sizeof(*mrec));
562 
563 	if (mrec->leaf.data_len < 0 ||
564 	    mrec->leaf.data_len > HAMMER_XBUFSIZE ||
565 	    mrec->leaf.data_len + sizeof(*mrec) > mrec->head.rec_size) {
566 		return(EINVAL);
567 	}
568 
569 	/*
570 	 * Re-localize for target.  relocalization of data is handled
571 	 * by hammer_mirror_write().
572 	 */
573 	mrec->leaf.base.localization &= HAMMER_LOCALIZE_MASK;
574 	mrec->leaf.base.localization += localization;
575 
576 	/*
577 	 * Delete records through until we reach (non-inclusively) the
578 	 * target record.
579 	 */
580 	cursor->key_end = mrec->leaf.base;
581 	cursor->flags &= ~HAMMER_CURSOR_END_INCLUSIVE;
582 	cursor->flags |= HAMMER_CURSOR_BACKEND;
583 	error = hammer_mirror_delete_to(cursor, mirror);
584 
585 	/*
586 	 * Locate the record.
587 	 *
588 	 * If the record exists only the delete_tid may be updated.
589 	 *
590 	 * If the record does not exist we can create it only if the
591 	 * create_tid is not too old.  If the create_tid is too old
592 	 * it may have already been destroyed on the slave from pruning.
593 	 *
594 	 * Note that mirror operations are effectively as-of operations
595 	 * and delete_tid can be 0 for mirroring purposes even if it is
596 	 * not actually 0 at the originator.
597 	 *
598 	 * These functions can return EDEADLK
599 	 */
600 	cursor->key_beg = mrec->leaf.base;
601 	cursor->flags |= HAMMER_CURSOR_BACKEND;
602 	cursor->flags &= ~HAMMER_CURSOR_INSERT;
603 	error = hammer_btree_lookup(cursor);
604 
605 	if (error == 0 && hammer_mirror_check(cursor, mrec)) {
606 		error = hammer_mirror_update(cursor, mrec);
607 	} else if (error == ENOENT) {
608 		if (mrec->leaf.base.create_tid >= mirror->tid_beg)
609 			error = hammer_mirror_write(cursor, mrec, uptr);
610 		else
611 			error = 0;
612 	}
613 	if (error == 0 || error == EALREADY)
614 		mirror->key_cur = mrec->leaf.base;
615 	return(error);
616 }
617 
618 /*
619  * This works like write_rec but no write or update is necessary,
620  * and no data payload is included so we couldn't do a write even
621  * if we wanted to.
622  *
623  * We must still iterate for deletions, and we can validate the
624  * record header which is a good way to test for corrupted mirror
625  * targets XXX.
626  *
627  * mirror->key_cur must be carefully set when we succeed in processing
628  * this mrec.
629  */
630 static
631 int
632 hammer_ioc_mirror_write_pass(hammer_cursor_t cursor,
633 			     struct hammer_ioc_mrecord_rec *mrec,
634 			     struct hammer_ioc_mirror_rw *mirror,
635 			     u_int32_t localization)
636 {
637 	hammer_transaction_t trans;
638 	u_int32_t rec_crc;
639 	int error;
640 
641 	trans = cursor->trans;
642 	rec_crc = crc32(mrec, sizeof(*mrec));
643 
644 	/*
645 	 * Re-localize for target.  Relocalization of data is handled
646 	 * by hammer_mirror_write().
647 	 */
648 	mrec->leaf.base.localization &= HAMMER_LOCALIZE_MASK;
649 	mrec->leaf.base.localization += localization;
650 
651 	/*
652 	 * Delete records through until we reach (non-inclusively) the
653 	 * target record.
654 	 */
655 	cursor->key_end = mrec->leaf.base;
656 	cursor->flags &= ~HAMMER_CURSOR_END_INCLUSIVE;
657 	cursor->flags |= HAMMER_CURSOR_BACKEND;
658 
659 	error = hammer_mirror_delete_to(cursor, mirror);
660 
661 	/*
662 	 * Locate the record and get past it by setting ATEDISK.  Perform
663 	 * any necessary deletions.  We have no data payload and cannot
664 	 * create a new record.
665 	 */
666 	if (error == 0) {
667 		mirror->key_cur = mrec->leaf.base;
668 		cursor->key_beg = mrec->leaf.base;
669 		cursor->flags |= HAMMER_CURSOR_BACKEND;
670 		cursor->flags &= ~HAMMER_CURSOR_INSERT;
671 		error = hammer_btree_lookup(cursor);
672 		if (error == 0) {
673 			if (hammer_mirror_check(cursor, mrec))
674 				error = hammer_mirror_update(cursor, mrec);
675 			cursor->flags |= HAMMER_CURSOR_ATEDISK;
676 		} else {
677 			cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
678 		}
679 		if (error == ENOENT)
680 			error = 0;
681 	}
682 	return(error);
683 }
684 
685 /*
686  * As part of the mirror write we iterate across swaths of records
687  * on the target which no longer exist on the source, and mark them
688  * deleted.
689  *
690  * The caller has indexed the cursor and set up key_end.  We iterate
691  * through to key_end.
692  *
693  * There is an edge case where the master has deleted a record whos
694  * create_tid exactly matches our end_tid.  We cannot delete this
695  * record on the slave yet because we cannot assign delete_tid == create_tid.
696  * The deletion should be picked up on the next sequence since in order
697  * to have been deleted on the master a transaction must have occured with
698  * a TID greater then the create_tid of the record.
699  */
700 static
701 int
702 hammer_mirror_delete_to(hammer_cursor_t cursor,
703 		       struct hammer_ioc_mirror_rw *mirror)
704 {
705 	hammer_btree_leaf_elm_t elm;
706 	int error;
707 
708 	error = hammer_btree_iterate(cursor);
709 	while (error == 0) {
710 		elm = &cursor->node->ondisk->elms[cursor->index].leaf;
711 		KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
712 		cursor->flags |= HAMMER_CURSOR_ATEDISK;
713 		if (elm->base.delete_tid == 0 &&
714 		    elm->base.create_tid != mirror->tid_end) {
715 			error = hammer_delete_at_cursor(cursor,
716 							HAMMER_DELETE_ADJUST,
717 							mirror->tid_end,
718 							time_second,
719 							1, NULL);
720 		}
721 		if (error == 0)
722 			error = hammer_btree_iterate(cursor);
723 	}
724 	if (error == ENOENT)
725 		error = 0;
726 	return(error);
727 }
728 
729 /*
730  * Check whether an update is needed in the case where a match already
731  * exists on the target.  The only type of update allowed in this case
732  * is an update of the delete_tid.
733  *
734  * Return non-zero if the update should proceed.
735  */
736 static
737 int
738 hammer_mirror_check(hammer_cursor_t cursor, struct hammer_ioc_mrecord_rec *mrec)
739 {
740 	hammer_btree_leaf_elm_t leaf = cursor->leaf;
741 
742 	if (leaf->base.delete_tid != mrec->leaf.base.delete_tid) {
743 		if (mrec->leaf.base.delete_tid != 0)
744 			return(1);
745 	}
746 	return(0);
747 }
748 
749 /*
750  * Update a record in-place.  Only the delete_tid can change, and
751  * only from zero to non-zero.
752  */
753 static
754 int
755 hammer_mirror_update(hammer_cursor_t cursor,
756 		     struct hammer_ioc_mrecord_rec *mrec)
757 {
758 	int error;
759 
760 	/*
761 	 * This case shouldn't occur.
762 	 */
763 	if (mrec->leaf.base.delete_tid == 0)
764 		return(0);
765 
766 	/*
767 	 * Mark the record deleted on the mirror target.
768 	 */
769 	error = hammer_delete_at_cursor(cursor, HAMMER_DELETE_ADJUST,
770 					mrec->leaf.base.delete_tid,
771 					mrec->leaf.delete_ts,
772 					1, NULL);
773 	cursor->flags |= HAMMER_CURSOR_ATEDISK;
774 	return(error);
775 }
776 
777 /*
778  * Write out a new record.
779  */
780 static
781 int
782 hammer_mirror_write(hammer_cursor_t cursor,
783 		    struct hammer_ioc_mrecord_rec *mrec,
784 		    char *udata)
785 {
786 	hammer_transaction_t trans;
787 	hammer_buffer_t data_buffer;
788 	hammer_off_t ndata_offset;
789 	hammer_tid_t high_tid;
790 	void *ndata;
791 	int error;
792 	int doprop;
793 
794 	trans = cursor->trans;
795 	data_buffer = NULL;
796 
797 	/*
798 	 * Get the sync lock so the whole mess is atomic
799 	 */
800 	hammer_sync_lock_sh(trans);
801 
802 	/*
803 	 * Allocate and adjust data
804 	 */
805 	if (mrec->leaf.data_len && mrec->leaf.data_offset) {
806 		ndata = hammer_alloc_data(trans, mrec->leaf.data_len,
807 					  mrec->leaf.base.rec_type,
808 					  &ndata_offset, &data_buffer, &error);
809 		if (ndata == NULL)
810 			return(error);
811 		mrec->leaf.data_offset = ndata_offset;
812 		hammer_modify_buffer(trans, data_buffer, NULL, 0);
813 		error = copyin(udata, ndata, mrec->leaf.data_len);
814 		if (error == 0) {
815 			if (hammer_crc_test_leaf(ndata, &mrec->leaf) == 0) {
816 				kprintf("data crc mismatch on pipe\n");
817 				error = EINVAL;
818 			} else {
819 				error = hammer_mirror_localize_data(
820 							ndata, &mrec->leaf);
821 			}
822 		}
823 		hammer_modify_buffer_done(data_buffer);
824 	} else {
825 		mrec->leaf.data_offset = 0;
826 		error = 0;
827 		ndata = NULL;
828 	}
829 	if (error)
830 		goto failed;
831 
832 	/*
833 	 * Do the insertion.  This can fail with a EDEADLK or EALREADY
834 	 */
835 	cursor->flags |= HAMMER_CURSOR_INSERT;
836 	error = hammer_btree_lookup(cursor);
837 	if (error != ENOENT) {
838 		if (error == 0)
839 			error = EALREADY;
840 		goto failed;
841 	}
842 
843 	error = hammer_btree_insert(cursor, &mrec->leaf, &doprop);
844 
845 	/*
846 	 * Cursor is left on the current element, we want to skip it now.
847 	 */
848 	cursor->flags |= HAMMER_CURSOR_ATEDISK;
849 	cursor->flags &= ~HAMMER_CURSOR_INSERT;
850 
851 	/*
852 	 * Track a count of active inodes.
853 	 */
854 	if (error == 0 &&
855 	    mrec->leaf.base.rec_type == HAMMER_RECTYPE_INODE &&
856 	    mrec->leaf.base.delete_tid == 0) {
857 		hammer_modify_volume_field(trans,
858 					   trans->rootvol,
859 					   vol0_stat_inodes);
860 		++trans->hmp->rootvol->ondisk->vol0_stat_inodes;
861 		hammer_modify_volume_done(trans->rootvol);
862 	}
863 
864 	/*
865 	 * vol0_next_tid must track the highest TID stored in the filesystem.
866 	 * We do not need to generate undo for this update.
867 	 */
868 	high_tid = mrec->leaf.base.create_tid;
869 	if (high_tid < mrec->leaf.base.delete_tid)
870 		high_tid = mrec->leaf.base.delete_tid;
871 	if (trans->rootvol->ondisk->vol0_next_tid < high_tid) {
872 		hammer_modify_volume(trans, trans->rootvol, NULL, 0);
873 		trans->rootvol->ondisk->vol0_next_tid = high_tid;
874 		hammer_modify_volume_done(trans->rootvol);
875 	}
876 
877 	if (error == 0 && doprop)
878 		hammer_btree_do_propagation(cursor, NULL, &mrec->leaf);
879 
880 failed:
881 	/*
882 	 * Cleanup
883 	 */
884 	if (error && mrec->leaf.data_offset) {
885 		hammer_blockmap_free(cursor->trans,
886 				     mrec->leaf.data_offset,
887 				     mrec->leaf.data_len);
888 	}
889 	hammer_sync_unlock(trans);
890 	if (data_buffer)
891 		hammer_rel_buffer(data_buffer, 0);
892 	return(error);
893 }
894 
895 /*
896  * Localize the data payload.  Directory entries may need their
897  * localization adjusted.
898  *
899  * PFS directory entries must be skipped entirely (return EALREADY).
900  */
901 static
902 int
903 hammer_mirror_localize_data(hammer_data_ondisk_t data,
904 			    hammer_btree_leaf_elm_t leaf)
905 {
906 	u_int32_t localization;
907 
908 	if (leaf->base.rec_type == HAMMER_RECTYPE_DIRENTRY) {
909 		if (data->entry.obj_id == HAMMER_OBJID_ROOT)
910 			return(EALREADY);
911 		localization = leaf->base.localization &
912 			       HAMMER_LOCALIZE_PSEUDOFS_MASK;
913 		if (data->entry.localization != localization) {
914 			data->entry.localization = localization;
915 			hammer_crc_set_leaf(data, leaf);
916 		}
917 	}
918 	return(0);
919 }
920 
921