xref: /dragonfly/sys/vfs/hammer/hammer_undo.c (revision e0ecab34)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_undo.c,v 1.20 2008/07/18 00:19:53 dillon Exp $
35  */
36 
37 /*
38  * HAMMER undo - undo buffer/FIFO management.
39  */
40 
41 #include "hammer.h"
42 
43 static int hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2);
44 
45 RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
46              hammer_und_rb_compare, hammer_off_t, offset);
47 
48 /*
49  * Convert a zone-3 undo offset into a zone-2 buffer offset.
50  */
51 hammer_off_t
52 hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
53 {
54 	hammer_volume_t root_volume;
55 	hammer_blockmap_t undomap;
56 	hammer_off_t result_offset;
57 	int i;
58 
59 	KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
60 	root_volume = hammer_get_root_volume(hmp, errorp);
61 	if (*errorp)
62 		return(0);
63 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
64 	KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
65 	KKASSERT (zone3_off < undomap->alloc_offset);
66 
67 	i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_LARGEBLOCK_SIZE;
68 	result_offset = root_volume->ondisk->vol0_undo_array[i] +
69 			(zone3_off & HAMMER_LARGEBLOCK_MASK64);
70 
71 	hammer_rel_volume(root_volume, 0);
72 	return(result_offset);
73 }
74 
75 /*
76  * Generate UNDO record(s) for the block of data at the specified zone1
77  * or zone2 offset.
78  *
79  * The recovery code will execute UNDOs in reverse order, allowing overlaps.
80  * All the UNDOs are executed together so if we already laid one down we
81  * do not have to lay another one down for the same range.
82  *
83  * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
84  * will be laid down for any unused space.  UNDO FIFO media structures
85  * will implement the hdr_seq field (it used to be reserved01), and
86  * both flush and recovery mechanics will be very different.
87  *
88  * WARNING!  See also hammer_generate_redo() in hammer_redo.c
89  */
90 int
91 hammer_generate_undo(hammer_transaction_t trans,
92 		     hammer_off_t zone_off, void *base, int len)
93 {
94 	hammer_mount_t hmp;
95 	hammer_volume_t root_volume;
96 	hammer_blockmap_t undomap;
97 	hammer_buffer_t buffer = NULL;
98 	hammer_fifo_undo_t undo;
99 	hammer_fifo_tail_t tail;
100 	hammer_off_t next_offset;
101 	int error;
102 	int bytes;
103 	int n;
104 
105 	hmp = trans->hmp;
106 
107 	/*
108 	 * A SYNC record may be required before we can lay down a general
109 	 * UNDO.  This ensures that the nominal recovery span contains
110 	 * at least one SYNC record telling the recovery code how far
111 	 * out-of-span it must go to run the REDOs.
112 	 */
113 	if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
114 	    hmp->version >= HAMMER_VOL_VERSION_FOUR) {
115 		hammer_generate_redo_sync(trans);
116 	}
117 
118 	/*
119 	 * Enter the offset into our undo history.  If there is an existing
120 	 * undo we do not have to generate a new one.
121 	 */
122 	if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
123 		return(0);
124 
125 	root_volume = trans->rootvol;
126 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
127 
128 	/* no undo recursion */
129 	hammer_modify_volume(NULL, root_volume, NULL, 0);
130 	hammer_lock_ex(&hmp->undo_lock);
131 
132 	/* undo had better not roll over (loose test) */
133 	if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
134 		panic("hammer: insufficient undo FIFO space!");
135 
136 	/*
137 	 * Loop until the undo for the entire range has been laid down.
138 	 */
139 	while (len) {
140 		/*
141 		 * Fetch the layout offset in the UNDO FIFO, wrap it as
142 		 * necessary.
143 		 */
144 		if (undomap->next_offset == undomap->alloc_offset) {
145 			undomap->next_offset =
146 				HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
147 		}
148 		next_offset = undomap->next_offset;
149 
150 		/*
151 		 * This is a tail-chasing FIFO, when we hit the start of a new
152 		 * buffer we don't have to read it in.
153 		 */
154 		if ((next_offset & HAMMER_BUFMASK) == 0) {
155 			undo = hammer_bnew(hmp, next_offset, &error, &buffer);
156 			hammer_format_undo(undo, hmp->undo_seqno ^ 0x40000000);
157 		} else {
158 			undo = hammer_bread(hmp, next_offset, &error, &buffer);
159 		}
160 		if (error)
161 			break;
162 		hammer_modify_buffer(NULL, buffer, NULL, 0);
163 
164 		/*
165 		 * Calculate how big a media structure fits up to the next
166 		 * alignment point and how large a data payload we can
167 		 * accomodate.
168 		 *
169 		 * If n calculates to 0 or negative there is no room for
170 		 * anything but a PAD.
171 		 */
172 		bytes = HAMMER_UNDO_ALIGN -
173 			((int)next_offset & HAMMER_UNDO_MASK);
174 		n = bytes -
175 		    (int)sizeof(struct hammer_fifo_undo) -
176 		    (int)sizeof(struct hammer_fifo_tail);
177 
178 		/*
179 		 * If available space is insufficient for any payload
180 		 * we have to lay down a PAD.
181 		 *
182 		 * The minimum PAD is 8 bytes and the head and tail will
183 		 * overlap each other in that case.  PADs do not have
184 		 * sequence numbers or CRCs.
185 		 *
186 		 * A PAD may not start on a boundary.  That is, every
187 		 * 512-byte block in the UNDO/REDO FIFO must begin with
188 		 * a record containing a sequence number.
189 		 */
190 		if (n <= 0) {
191 			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
192 			KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
193 			tail = (void *)((char *)undo + bytes - sizeof(*tail));
194 			if ((void *)undo != (void *)tail) {
195 				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
196 				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
197 				tail->tail_size = bytes;
198 			}
199 			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
200 			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
201 			undo->head.hdr_size = bytes;
202 			/* NO CRC OR SEQ NO */
203 			undomap->next_offset += bytes;
204 			hammer_modify_buffer_done(buffer);
205 			hammer_stats_undo += bytes;
206 			continue;
207 		}
208 
209 		/*
210 		 * Calculate the actual payload and recalculate the size
211 		 * of the media structure as necessary.
212 		 */
213 		if (n > len) {
214 			n = len;
215 			bytes = ((n + HAMMER_HEAD_ALIGN_MASK) &
216 				 ~HAMMER_HEAD_ALIGN_MASK) +
217 				(int)sizeof(struct hammer_fifo_undo) +
218 				(int)sizeof(struct hammer_fifo_tail);
219 		}
220 		if (hammer_debug_general & 0x0080) {
221 			kprintf("undo %016llx %d %d\n",
222 				(long long)next_offset, bytes, n);
223 		}
224 
225 		undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
226 		undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
227 		undo->head.hdr_size = bytes;
228 		undo->head.hdr_seq = hmp->undo_seqno++;
229 		undo->head.hdr_crc = 0;
230 		undo->undo_offset = zone_off;
231 		undo->undo_data_bytes = n;
232 		bcopy(base, undo + 1, n);
233 
234 		tail = (void *)((char *)undo + bytes - sizeof(*tail));
235 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
236 		tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
237 		tail->tail_size = bytes;
238 
239 		KKASSERT(bytes >= sizeof(undo->head));
240 		undo->head.hdr_crc = crc32(undo, HAMMER_FIFO_HEAD_CRCOFF) ^
241 			     crc32(&undo->head + 1, bytes - sizeof(undo->head));
242 		undomap->next_offset += bytes;
243 		hammer_stats_undo += bytes;
244 
245 		/*
246 		 * Before we finish off the buffer we have to deal with any
247 		 * junk between the end of the media structure we just laid
248 		 * down and the UNDO alignment boundary.  We do this by laying
249 		 * down a dummy PAD.  Even though we will probably overwrite
250 		 * it almost immediately we have to do this so recovery runs
251 		 * can iterate the UNDO space without having to depend on
252 		 * the indices in the volume header.
253 		 *
254 		 * This dummy PAD will be overwritten on the next undo so
255 		 * we do not adjust undomap->next_offset.
256 		 */
257 		bytes = HAMMER_UNDO_ALIGN -
258 			((int)undomap->next_offset & HAMMER_UNDO_MASK);
259 		if (bytes != HAMMER_UNDO_ALIGN) {
260 			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
261 			undo = (void *)(tail + 1);
262 			tail = (void *)((char *)undo + bytes - sizeof(*tail));
263 			if ((void *)undo != (void *)tail) {
264 				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
265 				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
266 				tail->tail_size = bytes;
267 			}
268 			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
269 			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
270 			undo->head.hdr_size = bytes;
271 			/* NO CRC OR SEQ NO */
272 		}
273 		hammer_modify_buffer_done(buffer);
274 
275 		/*
276 		 * Adjust for loop
277 		 */
278 		len -= n;
279 		base = (char *)base + n;
280 		zone_off += n;
281 	}
282 	hammer_modify_volume_done(root_volume);
283 	hammer_unlock(&hmp->undo_lock);
284 
285 	if (buffer)
286 		hammer_rel_buffer(buffer, 0);
287 	return(error);
288 }
289 
290 /*
291  * Preformat a new UNDO block.  We could read the old one in but we get
292  * better performance if we just pre-format a new one.
293  *
294  * The recovery code always works forwards so the caller just makes sure the
295  * seqno is not contiguous with prior UNDOs or ancient UNDOs now being
296  * overwritten.
297  *
298  * The preformatted UNDO headers use the smallest possible sector size
299  * (512) to ensure that any missed media writes are caught.
300  *
301  * NOTE: Also used by the REDO code.
302  */
303 void
304 hammer_format_undo(void *base, u_int32_t seqno)
305 {
306 	hammer_fifo_head_t head;
307 	hammer_fifo_tail_t tail;
308 	int i;
309 	int bytes = HAMMER_UNDO_ALIGN;
310 
311 	bzero(base, HAMMER_BUFSIZE);
312 
313 	for (i = 0; i < HAMMER_BUFSIZE; i += bytes) {
314 		head = (void *)((char *)base + i);
315 		tail = (void *)((char *)head + bytes - sizeof(*tail));
316 
317 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
318 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
319 		head->hdr_size = bytes;
320 		head->hdr_seq = seqno++;
321 		head->hdr_crc = 0;
322 
323 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
324 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
325 		tail->tail_size = bytes;
326 
327 		head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
328 			     crc32(head + 1, bytes - sizeof(*head));
329 	}
330 }
331 
332 /*
333  * HAMMER version 4+ conversion support.
334  *
335  * Convert a HAMMER version < 4 UNDO FIFO area to a 4+ UNDO FIFO area.
336  * The 4+ UNDO FIFO area is backwards compatible.  The conversion is
337  * needed to initialize the sequence space and place headers on the
338  * new 512-byte undo boundary.
339  */
340 int
341 hammer_upgrade_undo_4(hammer_transaction_t trans)
342 {
343 	hammer_mount_t hmp;
344 	hammer_volume_t root_volume;
345 	hammer_blockmap_t undomap;
346 	hammer_buffer_t buffer = NULL;
347 	hammer_fifo_head_t head;
348 	hammer_fifo_tail_t tail;
349 	hammer_off_t next_offset;
350 	u_int32_t seqno;
351 	int error;
352 	int bytes;
353 
354 	hmp = trans->hmp;
355 
356 	root_volume = trans->rootvol;
357 
358 	/* no undo recursion */
359 	hammer_lock_ex(&hmp->undo_lock);
360 	hammer_modify_volume(NULL, root_volume, NULL, 0);
361 
362 	/*
363 	 * Adjust the in-core undomap and the on-disk undomap.
364 	 */
365 	next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
366 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
367 	undomap->next_offset = next_offset;
368 	undomap->first_offset = next_offset;
369 
370 	undomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
371 	undomap->next_offset = next_offset;
372 	undomap->first_offset = next_offset;
373 
374 	/*
375 	 * Loop over the entire UNDO space creating DUMMY entries.  Sequence
376 	 * numbers are assigned.
377 	 */
378 	seqno = 0;
379 	bytes = HAMMER_UNDO_ALIGN;
380 
381 	while (next_offset != undomap->alloc_offset) {
382 		head = hammer_bnew(hmp, next_offset, &error, &buffer);
383 		if (error)
384 			break;
385 		hammer_modify_buffer(NULL, buffer, NULL, 0);
386 		tail = (void *)((char *)head + bytes - sizeof(*tail));
387 
388 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
389 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
390 		head->hdr_size = bytes;
391 		head->hdr_seq = seqno;
392 		head->hdr_crc = 0;
393 
394 		tail = (void *)((char *)head + bytes - sizeof(*tail));
395 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
396 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
397 		tail->tail_size = bytes;
398 
399 		head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
400 			     crc32(head + 1, bytes - sizeof(*head));
401 		hammer_modify_buffer_done(buffer);
402 
403 		hammer_stats_undo += bytes;
404 		next_offset += HAMMER_UNDO_ALIGN;
405 		++seqno;
406 	}
407 
408 	/*
409 	 * The sequence number will be the next sequence number to lay down.
410 	 */
411 	hmp->undo_seqno = seqno;
412 	kprintf("version upgrade seqno start %08x\n", seqno);
413 
414 	hammer_modify_volume_done(root_volume);
415 	hammer_unlock(&hmp->undo_lock);
416 
417 	if (buffer)
418 		hammer_rel_buffer(buffer, 0);
419 	return (error);
420 }
421 
422 /*
423  * UNDO HISTORY API
424  *
425  * It is not necessary to layout an undo record for the same address space
426  * multiple times.  Maintain a cache of recent undo's.
427  */
428 
429 /*
430  * Enter an undo into the history.  Return EALREADY if the request completely
431  * covers a previous request.
432  */
433 int
434 hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
435 {
436 	hammer_undo_t node;
437 	hammer_undo_t onode;
438 
439 	node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
440 	if (node) {
441 		TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
442 		TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
443 		if (bytes <= node->bytes)
444 			return(EALREADY);
445 		node->bytes = bytes;
446 		return(0);
447 	}
448 	if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
449 		node = &hmp->undos[hmp->undo_alloc++];
450 	} else {
451 		node = TAILQ_FIRST(&hmp->undo_lru_list);
452 		TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
453 		RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
454 	}
455 	node->offset = offset;
456 	node->bytes = bytes;
457 	TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
458 	onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
459 	KKASSERT(onode == NULL);
460 	return(0);
461 }
462 
463 void
464 hammer_clear_undo_history(hammer_mount_t hmp)
465 {
466 	RB_INIT(&hmp->rb_undo_root);
467 	TAILQ_INIT(&hmp->undo_lru_list);
468 	hmp->undo_alloc = 0;
469 }
470 
471 /*
472  * Return how much of the undo FIFO has been used
473  *
474  * The calculation includes undo FIFO space still reserved from a previous
475  * flush (because it will still be run on recovery if a crash occurs and
476  * we can't overwrite it yet).
477  */
478 int64_t
479 hammer_undo_used(hammer_transaction_t trans)
480 {
481 	hammer_blockmap_t cundomap;
482 	hammer_blockmap_t dundomap;
483 	int64_t max_bytes;
484 	int64_t bytes;
485 
486 	cundomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
487 	dundomap = &trans->rootvol->ondisk->
488 				vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
489 
490 	if (dundomap->first_offset <= cundomap->next_offset) {
491 		bytes = cundomap->next_offset - dundomap->first_offset;
492 	} else {
493 		bytes = cundomap->alloc_offset - dundomap->first_offset +
494 		        (cundomap->next_offset & HAMMER_OFF_LONG_MASK);
495 	}
496 	max_bytes = cundomap->alloc_offset & HAMMER_OFF_SHORT_MASK;
497 	KKASSERT(bytes <= max_bytes);
498 	return(bytes);
499 }
500 
501 /*
502  * Return how much of the undo FIFO is available for new records.
503  */
504 int64_t
505 hammer_undo_space(hammer_transaction_t trans)
506 {
507 	hammer_blockmap_t rootmap;
508 	int64_t max_bytes;
509 
510 	rootmap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
511 	max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
512 	return(max_bytes - hammer_undo_used(trans));
513 }
514 
515 int64_t
516 hammer_undo_max(hammer_mount_t hmp)
517 {
518 	hammer_blockmap_t rootmap;
519 	int64_t max_bytes;
520 
521 	rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
522 	max_bytes = rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK;
523 
524 	return(max_bytes);
525 }
526 
527 /*
528  * Returns 1 if the undo buffer should be reclaimed on release.  The
529  * only undo buffer we do NOT want to reclaim is the one at the current
530  * append offset.
531  */
532 int
533 hammer_undo_reclaim(hammer_io_t io)
534 {
535 	hammer_blockmap_t undomap;
536 	hammer_off_t next_offset;
537 
538 	undomap = &io->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
539 	next_offset = undomap->next_offset & ~HAMMER_BUFMASK64;
540 	if (((struct hammer_buffer *)io)->zoneX_offset == next_offset)
541 		return(0);
542 	return(1);
543 }
544 
545 static int
546 hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
547 {
548         if (node1->offset < node2->offset)
549                 return(-1);
550         if (node1->offset > node2->offset)
551                 return(1);
552         return(0);
553 }
554 
555