xref: /dragonfly/sys/vfs/hammer/hammer_undo.c (revision 896f2e3a)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * HAMMER undo - undo buffer/FIFO management.
37  */
38 
39 #include "hammer.h"
40 
41 static int hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2);
42 
43 RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
44              hammer_und_rb_compare, hammer_off_t, offset);
45 
46 /*
47  * Convert a zone-3 undo offset into a zone-2 buffer offset.
48  */
49 hammer_off_t
50 hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
51 {
52 	hammer_volume_t root_volume;
53 	hammer_blockmap_t undomap __debugvar;
54 	hammer_off_t result_offset;
55 	int i;
56 
57 	KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
58 	root_volume = hammer_get_root_volume(hmp, errorp);
59 	if (*errorp)
60 		return(0);
61 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
62 	KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
63 	KKASSERT(zone3_off < undomap->alloc_offset);
64 
65 	i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_BIGBLOCK_SIZE;
66 	result_offset = root_volume->ondisk->vol0_undo_array[i] +
67 			(zone3_off & HAMMER_BIGBLOCK_MASK64);
68 
69 	hammer_rel_volume(root_volume, 0);
70 	return(result_offset);
71 }
72 
73 /*
74  * Generate UNDO record(s) for the block of data at the specified zone1
75  * or zone2 offset.
76  *
77  * The recovery code will execute UNDOs in reverse order, allowing overlaps.
78  * All the UNDOs are executed together so if we already laid one down we
79  * do not have to lay another one down for the same range.
80  *
81  * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
82  * will be laid down for any unused space.  UNDO FIFO media structures
83  * will implement the hdr_seq field (it used to be reserved01), and
84  * both flush and recovery mechanics will be very different.
85  *
86  * WARNING!  See also hammer_generate_redo() in hammer_redo.c
87  */
88 int
89 hammer_generate_undo(hammer_transaction_t trans,
90 		     hammer_off_t zone_off, void *base, int len)
91 {
92 	hammer_mount_t hmp;
93 	hammer_volume_t root_volume;
94 	hammer_blockmap_t undomap;
95 	hammer_buffer_t buffer = NULL;
96 	hammer_fifo_undo_t undo;
97 	hammer_fifo_tail_t tail;
98 	hammer_off_t next_offset;
99 	int error;
100 	int bytes;
101 	int n;
102 
103 	hmp = trans->hmp;
104 
105 	/*
106 	 * A SYNC record may be required before we can lay down a general
107 	 * UNDO.  This ensures that the nominal recovery span contains
108 	 * at least one SYNC record telling the recovery code how far
109 	 * out-of-span it must go to run the REDOs.
110 	 */
111 	if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
112 	    hmp->version >= HAMMER_VOL_VERSION_FOUR) {
113 		hammer_generate_redo_sync(trans);
114 	}
115 
116 	/*
117 	 * Enter the offset into our undo history.  If there is an existing
118 	 * undo we do not have to generate a new one.
119 	 */
120 	if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
121 		return(0);
122 
123 	root_volume = trans->rootvol;
124 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
125 
126 	/* no undo recursion */
127 	hammer_modify_volume_noundo(NULL, root_volume);
128 	hammer_lock_ex(&hmp->undo_lock);
129 
130 	/* undo had better not roll over (loose test) */
131 	if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
132 		panic("hammer: insufficient undo FIFO space!");
133 
134 	/*
135 	 * Loop until the undo for the entire range has been laid down.
136 	 */
137 	while (len) {
138 		/*
139 		 * Fetch the layout offset in the UNDO FIFO, wrap it as
140 		 * necessary.
141 		 */
142 		if (undomap->next_offset == undomap->alloc_offset) {
143 			undomap->next_offset =
144 				HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
145 		}
146 		next_offset = undomap->next_offset;
147 
148 		/*
149 		 * This is a tail-chasing FIFO, when we hit the start of a new
150 		 * buffer we don't have to read it in.
151 		 */
152 		if ((next_offset & HAMMER_BUFMASK) == 0) {
153 			undo = hammer_bnew(hmp, next_offset, &error, &buffer);
154 			hammer_format_undo(undo, hmp->undo_seqno ^ 0x40000000);
155 		} else {
156 			undo = hammer_bread(hmp, next_offset, &error, &buffer);
157 		}
158 		if (error)
159 			break;
160 		/* no undo recursion */
161 		hammer_modify_buffer_noundo(NULL, buffer);
162 
163 		/*
164 		 * Calculate how big a media structure fits up to the next
165 		 * alignment point and how large a data payload we can
166 		 * accomodate.
167 		 *
168 		 * If n calculates to 0 or negative there is no room for
169 		 * anything but a PAD.
170 		 */
171 		bytes = HAMMER_UNDO_ALIGN -
172 			((int)next_offset & HAMMER_UNDO_MASK);
173 		n = bytes -
174 		    (int)sizeof(struct hammer_fifo_undo) -
175 		    (int)sizeof(struct hammer_fifo_tail);
176 
177 		/*
178 		 * If available space is insufficient for any payload
179 		 * we have to lay down a PAD.
180 		 *
181 		 * The minimum PAD is 8 bytes and the head and tail will
182 		 * overlap each other in that case.  PADs do not have
183 		 * sequence numbers or CRCs.
184 		 *
185 		 * A PAD may not start on a boundary.  That is, every
186 		 * 512-byte block in the UNDO/REDO FIFO must begin with
187 		 * a record containing a sequence number.
188 		 */
189 		if (n <= 0) {
190 			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
191 			KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
192 			tail = (void *)((char *)undo + bytes - sizeof(*tail));
193 			if ((void *)undo != (void *)tail) {
194 				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
195 				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
196 				tail->tail_size = bytes;
197 			}
198 			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
199 			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
200 			undo->head.hdr_size = bytes;
201 			/* NO CRC OR SEQ NO */
202 			undomap->next_offset += bytes;
203 			hammer_modify_buffer_done(buffer);
204 			hammer_stats_undo += bytes;
205 			continue;
206 		}
207 
208 		/*
209 		 * Calculate the actual payload and recalculate the size
210 		 * of the media structure as necessary.
211 		 */
212 		if (n > len) {
213 			n = len;
214 			bytes = ((n + HAMMER_HEAD_ALIGN_MASK) &
215 				 ~HAMMER_HEAD_ALIGN_MASK) +
216 				(int)sizeof(struct hammer_fifo_undo) +
217 				(int)sizeof(struct hammer_fifo_tail);
218 		}
219 		if (hammer_debug_general & 0x0080) {
220 			kprintf("undo %016llx %d %d\n",
221 				(long long)next_offset, bytes, n);
222 		}
223 
224 		undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
225 		undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
226 		undo->head.hdr_size = bytes;
227 		undo->head.hdr_seq = hmp->undo_seqno++;
228 		undo->head.hdr_crc = 0;
229 		undo->undo_offset = zone_off;
230 		undo->undo_data_bytes = n;
231 		bcopy(base, undo + 1, n);
232 
233 		tail = (void *)((char *)undo + bytes - sizeof(*tail));
234 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
235 		tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
236 		tail->tail_size = bytes;
237 
238 		KKASSERT(bytes >= sizeof(undo->head));
239 		undo->head.hdr_crc = crc32(undo, HAMMER_FIFO_HEAD_CRCOFF) ^
240 			     crc32(&undo->head + 1, bytes - sizeof(undo->head));
241 		undomap->next_offset += bytes;
242 		hammer_stats_undo += bytes;
243 
244 		/*
245 		 * Before we finish off the buffer we have to deal with any
246 		 * junk between the end of the media structure we just laid
247 		 * down and the UNDO alignment boundary.  We do this by laying
248 		 * down a dummy PAD.  Even though we will probably overwrite
249 		 * it almost immediately we have to do this so recovery runs
250 		 * can iterate the UNDO space without having to depend on
251 		 * the indices in the volume header.
252 		 *
253 		 * This dummy PAD will be overwritten on the next undo so
254 		 * we do not adjust undomap->next_offset.
255 		 */
256 		bytes = HAMMER_UNDO_ALIGN -
257 			((int)undomap->next_offset & HAMMER_UNDO_MASK);
258 		if (bytes != HAMMER_UNDO_ALIGN) {
259 			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
260 			undo = (void *)(tail + 1);
261 			tail = (void *)((char *)undo + bytes - sizeof(*tail));
262 			if ((void *)undo != (void *)tail) {
263 				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
264 				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
265 				tail->tail_size = bytes;
266 			}
267 			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
268 			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
269 			undo->head.hdr_size = bytes;
270 			/* NO CRC OR SEQ NO */
271 		}
272 		hammer_modify_buffer_done(buffer);
273 
274 		/*
275 		 * Adjust for loop
276 		 */
277 		len -= n;
278 		base = (char *)base + n;
279 		zone_off += n;
280 	}
281 	hammer_modify_volume_done(root_volume);
282 	hammer_unlock(&hmp->undo_lock);
283 
284 	if (buffer)
285 		hammer_rel_buffer(buffer, 0);
286 	return(error);
287 }
288 
289 /*
290  * Preformat a new UNDO block.  We could read the old one in but we get
291  * better performance if we just pre-format a new one.
292  *
293  * The recovery code always works forwards so the caller just makes sure the
294  * seqno is not contiguous with prior UNDOs or ancient UNDOs now being
295  * overwritten.
296  *
297  * The preformatted UNDO headers use the smallest possible sector size
298  * (512) to ensure that any missed media writes are caught.
299  *
300  * NOTE: Also used by the REDO code.
301  */
302 void
303 hammer_format_undo(void *base, u_int32_t seqno)
304 {
305 	hammer_fifo_head_t head;
306 	hammer_fifo_tail_t tail;
307 	int i;
308 	int bytes = HAMMER_UNDO_ALIGN;
309 
310 	bzero(base, HAMMER_BUFSIZE);
311 
312 	for (i = 0; i < HAMMER_BUFSIZE; i += bytes) {
313 		head = (void *)((char *)base + i);
314 		tail = (void *)((char *)head + bytes - sizeof(*tail));
315 
316 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
317 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
318 		head->hdr_size = bytes;
319 		head->hdr_seq = seqno++;
320 		head->hdr_crc = 0;
321 
322 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
323 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
324 		tail->tail_size = bytes;
325 
326 		head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
327 			     crc32(head + 1, bytes - sizeof(*head));
328 	}
329 }
330 
331 /*
332  * HAMMER version 4+ conversion support.
333  *
334  * Convert a HAMMER version < 4 UNDO FIFO area to a 4+ UNDO FIFO area.
335  * The 4+ UNDO FIFO area is backwards compatible.  The conversion is
336  * needed to initialize the sequence space and place headers on the
337  * new 512-byte undo boundary.
338  */
339 int
340 hammer_upgrade_undo_4(hammer_transaction_t trans)
341 {
342 	hammer_mount_t hmp;
343 	hammer_volume_t root_volume;
344 	hammer_blockmap_t undomap;
345 	hammer_buffer_t buffer = NULL;
346 	hammer_fifo_head_t head;
347 	hammer_fifo_tail_t tail;
348 	hammer_off_t next_offset;
349 	u_int32_t seqno;
350 	int error;
351 	int bytes;
352 
353 	hmp = trans->hmp;
354 
355 	root_volume = trans->rootvol;
356 
357 	/* no undo recursion */
358 	hammer_lock_ex(&hmp->undo_lock);
359 	hammer_modify_volume_noundo(NULL, root_volume);
360 
361 	/*
362 	 * Adjust the in-core undomap and the on-disk undomap.
363 	 */
364 	next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
365 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
366 	undomap->next_offset = next_offset;
367 	undomap->first_offset = next_offset;
368 
369 	undomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
370 	undomap->next_offset = next_offset;
371 	undomap->first_offset = next_offset;
372 
373 	/*
374 	 * Loop over the entire UNDO space creating DUMMY entries.  Sequence
375 	 * numbers are assigned.
376 	 */
377 	seqno = 0;
378 	bytes = HAMMER_UNDO_ALIGN;
379 
380 	while (next_offset != undomap->alloc_offset) {
381 		head = hammer_bnew(hmp, next_offset, &error, &buffer);
382 		if (error)
383 			break;
384 		hammer_modify_buffer_noundo(NULL, buffer);
385 		tail = (void *)((char *)head + bytes - sizeof(*tail));
386 
387 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
388 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
389 		head->hdr_size = bytes;
390 		head->hdr_seq = seqno;
391 		head->hdr_crc = 0;
392 
393 		tail = (void *)((char *)head + bytes - sizeof(*tail));
394 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
395 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
396 		tail->tail_size = bytes;
397 
398 		head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
399 			     crc32(head + 1, bytes - sizeof(*head));
400 		hammer_modify_buffer_done(buffer);
401 
402 		hammer_stats_undo += bytes;
403 		next_offset += HAMMER_UNDO_ALIGN;
404 		++seqno;
405 	}
406 
407 	/*
408 	 * The sequence number will be the next sequence number to lay down.
409 	 */
410 	hmp->undo_seqno = seqno;
411 	kprintf("version upgrade seqno start %08x\n", seqno);
412 
413 	hammer_modify_volume_done(root_volume);
414 	hammer_unlock(&hmp->undo_lock);
415 
416 	if (buffer)
417 		hammer_rel_buffer(buffer, 0);
418 	return (error);
419 }
420 
421 /*
422  * UNDO HISTORY API
423  *
424  * It is not necessary to layout an undo record for the same address space
425  * multiple times.  Maintain a cache of recent undo's.
426  */
427 
428 /*
429  * Enter an undo into the history.  Return EALREADY if the request completely
430  * covers a previous request.
431  */
432 int
433 hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
434 {
435 	hammer_undo_t node;
436 	hammer_undo_t onode __debugvar;
437 
438 	node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
439 	if (node) {
440 		TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
441 		TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
442 		if (bytes <= node->bytes)
443 			return(EALREADY);
444 		node->bytes = bytes;
445 		return(0);
446 	}
447 	if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
448 		node = &hmp->undos[hmp->undo_alloc++];
449 	} else {
450 		node = TAILQ_FIRST(&hmp->undo_lru_list);
451 		TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
452 		RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
453 	}
454 	node->offset = offset;
455 	node->bytes = bytes;
456 	TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
457 	onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
458 	KKASSERT(onode == NULL);
459 	return(0);
460 }
461 
462 void
463 hammer_clear_undo_history(hammer_mount_t hmp)
464 {
465 	RB_INIT(&hmp->rb_undo_root);
466 	TAILQ_INIT(&hmp->undo_lru_list);
467 	hmp->undo_alloc = 0;
468 }
469 
470 /*
471  * Return how much of the undo FIFO has been used
472  *
473  * The calculation includes undo FIFO space still reserved from a previous
474  * flush (because it will still be run on recovery if a crash occurs and
475  * we can't overwrite it yet).
476  */
477 int64_t
478 hammer_undo_used(hammer_transaction_t trans)
479 {
480 	hammer_blockmap_t cundomap;
481 	hammer_blockmap_t dundomap;
482 	int64_t max_bytes __debugvar;
483 	int64_t bytes;
484 
485 	cundomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
486 	dundomap = &trans->rootvol->ondisk->
487 				vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
488 
489 	if (dundomap->first_offset <= cundomap->next_offset) {
490 		bytes = cundomap->next_offset - dundomap->first_offset;
491 	} else {
492 		bytes = cundomap->alloc_offset - dundomap->first_offset +
493 		        (cundomap->next_offset & HAMMER_OFF_LONG_MASK);
494 	}
495 	max_bytes = cundomap->alloc_offset & HAMMER_OFF_SHORT_MASK;
496 	KKASSERT(bytes <= max_bytes);
497 	return(bytes);
498 }
499 
500 /*
501  * Return how much of the undo FIFO is available for new records.
502  */
503 int64_t
504 hammer_undo_space(hammer_transaction_t trans)
505 {
506 	hammer_blockmap_t rootmap;
507 	int64_t max_bytes;
508 
509 	rootmap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
510 	max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
511 	return(max_bytes - hammer_undo_used(trans));
512 }
513 
514 int64_t
515 hammer_undo_max(hammer_mount_t hmp)
516 {
517 	hammer_blockmap_t rootmap;
518 	int64_t max_bytes;
519 
520 	rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
521 	max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
522 
523 	return(max_bytes);
524 }
525 
526 /*
527  * Returns 1 if the undo buffer should be reclaimed on release.  The
528  * only undo buffer we do NOT want to reclaim is the one at the current
529  * append offset.
530  */
531 int
532 hammer_undo_reclaim(hammer_io_t io)
533 {
534 	hammer_blockmap_t undomap;
535 	hammer_off_t next_offset;
536 
537 	undomap = &io->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
538 	next_offset = undomap->next_offset & ~HAMMER_BUFMASK64;
539 	if (((struct hammer_buffer *)io)->zoneX_offset == next_offset)
540 		return(0);
541 	return(1);
542 }
543 
544 static int
545 hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
546 {
547         if (node1->offset < node2->offset)
548                 return(-1);
549         if (node1->offset > node2->offset)
550                 return(1);
551         return(0);
552 }
553 
554