xref: /dragonfly/sys/vfs/hammer/hammer_undo.c (revision a4fe36f1)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * HAMMER undo - undo buffer/FIFO management.
37  */
38 
39 #include "hammer.h"
40 
41 static int
42 hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
43 {
44         if (node1->offset < node2->offset)
45                 return(-1);
46         if (node1->offset > node2->offset)
47                 return(1);
48         return(0);
49 }
50 
51 RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
52              hammer_und_rb_compare, hammer_off_t, offset);
53 
54 /*
55  * Convert a zone-3 undo offset into a zone-2 buffer offset.
56  */
57 hammer_off_t
58 hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
59 {
60 	hammer_volume_t root_volume;
61 	hammer_blockmap_t undomap __debugvar;
62 	hammer_off_t result_offset;
63 	int i;
64 
65 	KKASSERT(hammer_is_zone_undo(zone3_off));
66 	root_volume = hammer_get_root_volume(hmp, errorp);
67 	if (*errorp)
68 		return(0);
69 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
70 	KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
71 	KKASSERT(zone3_off < undomap->alloc_offset);
72 
73 	/*
74 	 * undo offsets[i] in zone-2 +
75 	 * big-block offset of zone-3 address
76 	 * which results zone-2 address
77 	 */
78 	i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_BIGBLOCK_SIZE;
79 	result_offset = root_volume->ondisk->vol0_undo_array[i] +
80 			(zone3_off & HAMMER_BIGBLOCK_MASK64);
81 
82 	hammer_rel_volume(root_volume, 0);
83 	return(result_offset);
84 }
85 
86 /*
87  * Generate UNDO record(s) for the block of data at the specified zone1
88  * or zone2 offset.
89  *
90  * The recovery code will execute UNDOs in reverse order, allowing overlaps.
91  * All the UNDOs are executed together so if we already laid one down we
92  * do not have to lay another one down for the same range.
93  *
94  * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
95  * will be laid down for any unused space.  UNDO FIFO media structures
96  * will implement the hdr_seq field (it used to be reserved01), and
97  * both flush and recovery mechanics will be very different.
98  *
99  * WARNING!  See also hammer_generate_redo() in hammer_redo.c
100  */
101 int
102 hammer_generate_undo(hammer_transaction_t trans,
103 		     hammer_off_t zone_off, void *base, int len)
104 {
105 	hammer_mount_t hmp;
106 	hammer_volume_t root_volume;
107 	hammer_blockmap_t undomap;
108 	hammer_buffer_t buffer = NULL;
109 	hammer_fifo_undo_t undo;
110 	hammer_fifo_tail_t tail;
111 	hammer_off_t next_offset;
112 	int error;
113 	int bytes;
114 	int n;
115 
116 	hmp = trans->hmp;
117 
118 	/*
119 	 * A SYNC record may be required before we can lay down a general
120 	 * UNDO.  This ensures that the nominal recovery span contains
121 	 * at least one SYNC record telling the recovery code how far
122 	 * out-of-span it must go to run the REDOs.
123 	 */
124 	if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
125 	    hmp->version >= HAMMER_VOL_VERSION_FOUR) {
126 		hammer_generate_redo_sync(trans);
127 	}
128 
129 	/*
130 	 * Enter the offset into our undo history.  If there is an existing
131 	 * undo we do not have to generate a new one.
132 	 */
133 	if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
134 		return(0);
135 
136 	root_volume = trans->rootvol;
137 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
138 
139 	/* no undo recursion */
140 	hammer_modify_volume_noundo(NULL, root_volume);
141 	hammer_lock_ex(&hmp->undo_lock);
142 
143 	/* undo had better not roll over (loose test) */
144 	if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
145 		hpanic("insufficient undo FIFO space!");
146 
147 	/*
148 	 * Loop until the undo for the entire range has been laid down.
149 	 */
150 	while (len) {
151 		/*
152 		 * Fetch the layout offset in the UNDO FIFO, wrap it as
153 		 * necessary.
154 		 */
155 		if (undomap->next_offset == undomap->alloc_offset) {
156 			undomap->next_offset =
157 				HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
158 		}
159 		next_offset = undomap->next_offset;
160 
161 		/*
162 		 * This is a tail-chasing FIFO, when we hit the start of a new
163 		 * buffer we don't have to read it in.
164 		 */
165 		if ((next_offset & HAMMER_BUFMASK) == 0) {
166 			undo = hammer_bnew(hmp, next_offset, &error, &buffer);
167 			hammer_format_undo(undo, hmp->undo_seqno ^ 0x40000000);
168 		} else {
169 			undo = hammer_bread(hmp, next_offset, &error, &buffer);
170 		}
171 		if (error)
172 			break;
173 		/* no undo recursion */
174 		hammer_modify_buffer_noundo(NULL, buffer);
175 
176 		/*
177 		 * Calculate how big a media structure fits up to the next
178 		 * alignment point and how large a data payload we can
179 		 * accomodate.
180 		 *
181 		 * If n calculates to 0 or negative there is no room for
182 		 * anything but a PAD.
183 		 */
184 		bytes = HAMMER_UNDO_ALIGN -
185 			((int)next_offset & HAMMER_UNDO_MASK);
186 		n = bytes -
187 		    (int)sizeof(struct hammer_fifo_undo) -
188 		    (int)sizeof(struct hammer_fifo_tail);
189 
190 		/*
191 		 * If available space is insufficient for any payload
192 		 * we have to lay down a PAD.
193 		 *
194 		 * The minimum PAD is 8 bytes and the head and tail will
195 		 * overlap each other in that case.  PADs do not have
196 		 * sequence numbers or CRCs.
197 		 *
198 		 * A PAD may not start on a boundary.  That is, every
199 		 * 512-byte block in the UNDO/REDO FIFO must begin with
200 		 * a record containing a sequence number.
201 		 */
202 		if (n <= 0) {
203 			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
204 			KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
205 			tail = (void *)((char *)undo + bytes - sizeof(*tail));
206 			if ((void *)undo != (void *)tail) {
207 				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
208 				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
209 				tail->tail_size = bytes;
210 			}
211 			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
212 			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
213 			undo->head.hdr_size = bytes;
214 			/* NO CRC OR SEQ NO */
215 			undomap->next_offset += bytes;
216 			hammer_modify_buffer_done(buffer);
217 			hammer_stats_undo += bytes;
218 			continue;
219 		}
220 
221 		/*
222 		 * Calculate the actual payload and recalculate the size
223 		 * of the media structure as necessary.
224 		 */
225 		if (n > len) {
226 			n = len;
227 			bytes = ((n + HAMMER_HEAD_ALIGN_MASK) &
228 				 ~HAMMER_HEAD_ALIGN_MASK) +
229 				(int)sizeof(struct hammer_fifo_undo) +
230 				(int)sizeof(struct hammer_fifo_tail);
231 		}
232 		if (hammer_debug_general & 0x0080) {
233 			hdkprintf("undo %016jx %d %d\n",
234 				(intmax_t)next_offset, bytes, n);
235 		}
236 
237 		undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
238 		undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
239 		undo->head.hdr_size = bytes;
240 		undo->head.hdr_seq = hmp->undo_seqno++;
241 		undo->head.hdr_crc = 0;
242 		undo->undo_offset = zone_off;
243 		undo->undo_data_bytes = n;
244 		bcopy(base, undo + 1, n);
245 
246 		tail = (void *)((char *)undo + bytes - sizeof(*tail));
247 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
248 		tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
249 		tail->tail_size = bytes;
250 
251 		KKASSERT(bytes >= sizeof(undo->head));
252 		undo->head.hdr_crc = crc32(undo, HAMMER_FIFO_HEAD_CRCOFF) ^
253 			     crc32(&undo->head + 1, bytes - sizeof(undo->head));
254 		undomap->next_offset += bytes;
255 		hammer_stats_undo += bytes;
256 
257 		/*
258 		 * Before we finish off the buffer we have to deal with any
259 		 * junk between the end of the media structure we just laid
260 		 * down and the UNDO alignment boundary.  We do this by laying
261 		 * down a dummy PAD.  Even though we will probably overwrite
262 		 * it almost immediately we have to do this so recovery runs
263 		 * can iterate the UNDO space without having to depend on
264 		 * the indices in the volume header.
265 		 *
266 		 * This dummy PAD will be overwritten on the next undo so
267 		 * we do not adjust undomap->next_offset.
268 		 */
269 		bytes = HAMMER_UNDO_ALIGN -
270 			((int)undomap->next_offset & HAMMER_UNDO_MASK);
271 		if (bytes != HAMMER_UNDO_ALIGN) {
272 			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
273 			undo = (void *)(tail + 1);
274 			tail = (void *)((char *)undo + bytes - sizeof(*tail));
275 			if ((void *)undo != (void *)tail) {
276 				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
277 				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
278 				tail->tail_size = bytes;
279 			}
280 			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
281 			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
282 			undo->head.hdr_size = bytes;
283 			/* NO CRC OR SEQ NO */
284 		}
285 		hammer_modify_buffer_done(buffer);
286 
287 		/*
288 		 * Adjust for loop
289 		 */
290 		len -= n;
291 		base = (char *)base + n;
292 		zone_off += n;
293 	}
294 	hammer_modify_volume_done(root_volume);
295 	hammer_unlock(&hmp->undo_lock);
296 
297 	if (buffer)
298 		hammer_rel_buffer(buffer, 0);
299 	return(error);
300 }
301 
302 /*
303  * Preformat a new UNDO block.  We could read the old one in but we get
304  * better performance if we just pre-format a new one.
305  *
306  * The recovery code always works forwards so the caller just makes sure the
307  * seqno is not contiguous with prior UNDOs or ancient UNDOs now being
308  * overwritten.
309  *
310  * The preformatted UNDO headers use the smallest possible sector size
311  * (512) to ensure that any missed media writes are caught.
312  *
313  * NOTE: Also used by the REDO code.
314  */
315 void
316 hammer_format_undo(void *base, uint32_t seqno)
317 {
318 	hammer_fifo_head_t head;
319 	hammer_fifo_tail_t tail;
320 	int i;
321 	int bytes = HAMMER_UNDO_ALIGN;
322 
323 	bzero(base, HAMMER_BUFSIZE);
324 
325 	for (i = 0; i < HAMMER_BUFSIZE; i += bytes) {
326 		head = (void *)((char *)base + i);
327 		tail = (void *)((char *)head + bytes - sizeof(*tail));
328 
329 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
330 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
331 		head->hdr_size = bytes;
332 		head->hdr_seq = seqno++;
333 		head->hdr_crc = 0;
334 
335 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
336 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
337 		tail->tail_size = bytes;
338 
339 		head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
340 			     crc32(head + 1, bytes - sizeof(*head));
341 	}
342 }
343 
344 /*
345  * HAMMER version 4+ conversion support.
346  *
347  * Convert a HAMMER version < 4 UNDO FIFO area to a 4+ UNDO FIFO area.
348  * The 4+ UNDO FIFO area is backwards compatible.  The conversion is
349  * needed to initialize the sequence space and place headers on the
350  * new 512-byte undo boundary.
351  */
352 int
353 hammer_upgrade_undo_4(hammer_transaction_t trans)
354 {
355 	hammer_mount_t hmp;
356 	hammer_volume_t root_volume;
357 	hammer_blockmap_t undomap;
358 	hammer_buffer_t buffer = NULL;
359 	hammer_fifo_head_t head;
360 	hammer_fifo_tail_t tail;
361 	hammer_off_t next_offset;
362 	uint32_t seqno;
363 	int error;
364 	int bytes;
365 
366 	hmp = trans->hmp;
367 
368 	root_volume = trans->rootvol;
369 
370 	/* no undo recursion */
371 	hammer_lock_ex(&hmp->undo_lock);
372 	hammer_modify_volume_noundo(NULL, root_volume);
373 
374 	/*
375 	 * Adjust the in-core undomap and the on-disk undomap.
376 	 */
377 	next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
378 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
379 	undomap->next_offset = next_offset;
380 	undomap->first_offset = next_offset;
381 
382 	undomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
383 	undomap->next_offset = next_offset;
384 	undomap->first_offset = next_offset;
385 
386 	/*
387 	 * Loop over the entire UNDO space creating DUMMY entries.  Sequence
388 	 * numbers are assigned.
389 	 */
390 	seqno = 0;
391 	bytes = HAMMER_UNDO_ALIGN;
392 
393 	while (next_offset != undomap->alloc_offset) {
394 		head = hammer_bnew(hmp, next_offset, &error, &buffer);
395 		if (error)
396 			break;
397 		hammer_modify_buffer_noundo(NULL, buffer);
398 		tail = (void *)((char *)head + bytes - sizeof(*tail));
399 
400 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
401 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
402 		head->hdr_size = bytes;
403 		head->hdr_seq = seqno;
404 		head->hdr_crc = 0;
405 
406 		tail = (void *)((char *)head + bytes - sizeof(*tail));
407 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
408 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
409 		tail->tail_size = bytes;
410 
411 		head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
412 			     crc32(head + 1, bytes - sizeof(*head));
413 		hammer_modify_buffer_done(buffer);
414 
415 		hammer_stats_undo += bytes;
416 		next_offset += HAMMER_UNDO_ALIGN;
417 		++seqno;
418 	}
419 
420 	/*
421 	 * The sequence number will be the next sequence number to lay down.
422 	 */
423 	hmp->undo_seqno = seqno;
424 	hmkprintf(hmp, "version upgrade seqno start %08x\n", seqno);
425 
426 	hammer_modify_volume_done(root_volume);
427 	hammer_unlock(&hmp->undo_lock);
428 
429 	if (buffer)
430 		hammer_rel_buffer(buffer, 0);
431 	return (error);
432 }
433 
434 /*
435  * UNDO HISTORY API
436  *
437  * It is not necessary to layout an undo record for the same address space
438  * multiple times.  Maintain a cache of recent undo's.
439  */
440 
441 /*
442  * Enter an undo into the history.  Return EALREADY if the request completely
443  * covers a previous request.
444  */
445 int
446 hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
447 {
448 	hammer_undo_t node;
449 	hammer_undo_t onode __debugvar;
450 
451 	node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
452 	if (node) {
453 		TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
454 		TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
455 		if (bytes <= node->bytes)
456 			return(EALREADY);
457 		node->bytes = bytes;
458 		return(0);
459 	}
460 	if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
461 		node = &hmp->undos[hmp->undo_alloc++];
462 	} else {
463 		node = TAILQ_FIRST(&hmp->undo_lru_list);
464 		TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
465 		RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
466 	}
467 	node->offset = offset;
468 	node->bytes = bytes;
469 	TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
470 	onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
471 	KKASSERT(onode == NULL);
472 	return(0);
473 }
474 
475 void
476 hammer_clear_undo_history(hammer_mount_t hmp)
477 {
478 	RB_INIT(&hmp->rb_undo_root);
479 	TAILQ_INIT(&hmp->undo_lru_list);
480 	hmp->undo_alloc = 0;
481 }
482 
483 /*
484  * Return how much of the undo FIFO has been used
485  *
486  * The calculation includes undo FIFO space still reserved from a previous
487  * flush (because it will still be run on recovery if a crash occurs and
488  * we can't overwrite it yet).
489  */
490 int64_t
491 hammer_undo_used(hammer_transaction_t trans)
492 {
493 	hammer_blockmap_t cundomap;
494 	hammer_blockmap_t dundomap;
495 	int64_t max_bytes __debugvar;
496 	int64_t bytes;
497 
498 	cundomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
499 	dundomap = &trans->rootvol->ondisk->
500 				vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
501 
502 	if (dundomap->first_offset <= cundomap->next_offset) {
503 		bytes = cundomap->next_offset - dundomap->first_offset;
504 	} else {
505 		bytes = cundomap->alloc_offset - dundomap->first_offset +
506 		        (cundomap->next_offset & HAMMER_OFF_LONG_MASK);
507 	}
508 	max_bytes = cundomap->alloc_offset & HAMMER_OFF_SHORT_MASK;
509 	KKASSERT(bytes <= max_bytes);
510 	return(bytes);
511 }
512 
513 /*
514  * Return how much of the undo FIFO is available for new records.
515  */
516 int64_t
517 hammer_undo_space(hammer_transaction_t trans)
518 {
519 	hammer_blockmap_t rootmap;
520 	int64_t max_bytes;
521 
522 	rootmap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
523 	max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
524 	return(max_bytes - hammer_undo_used(trans));
525 }
526 
527 int64_t
528 hammer_undo_max(hammer_mount_t hmp)
529 {
530 	hammer_blockmap_t rootmap;
531 	int64_t max_bytes;
532 
533 	rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
534 	max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
535 
536 	return(max_bytes);
537 }
538 
539 /*
540  * Returns 1 if the undo buffer should be reclaimed on release.  The
541  * only undo buffer we do NOT want to reclaim is the one at the current
542  * append offset.
543  */
544 int
545 hammer_undo_reclaim(hammer_io_t io)
546 {
547 	hammer_blockmap_t undomap;
548 	hammer_off_t next_offset;
549 
550 	undomap = &io->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
551 	next_offset = undomap->next_offset & ~HAMMER_BUFMASK64;
552 	if (HAMMER_ITOB(io)->zoneX_offset == next_offset)
553 		return(0);
554 	return(1);
555 }
556