xref: /dragonfly/sys/vfs/hammer/hammer_undo.c (revision 65cc0652)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * HAMMER undo - undo buffer/FIFO management.
37  */
38 
39 #include "hammer.h"
40 
41 static int
42 hammer_und_rb_compare(hammer_undo_t node1, hammer_undo_t node2)
43 {
44         if (node1->offset < node2->offset)
45                 return(-1);
46         if (node1->offset > node2->offset)
47                 return(1);
48         return(0);
49 }
50 
51 RB_GENERATE2(hammer_und_rb_tree, hammer_undo, rb_node,
52              hammer_und_rb_compare, hammer_off_t, offset);
53 
54 /*
55  * Convert a zone-3 undo offset into a zone-2 buffer offset.
56  */
57 hammer_off_t
58 hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
59 {
60 	hammer_volume_t root_volume;
61 	hammer_blockmap_t undomap __debugvar;
62 	hammer_off_t result_offset;
63 
64 	KKASSERT(hammer_is_zone_undo(zone3_off));
65 	root_volume = hammer_get_root_volume(hmp, errorp);
66 	if (*errorp)
67 		return(0);
68 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
69 	KKASSERT(hammer_is_zone_undo(undomap->alloc_offset));
70 	KKASSERT(zone3_off < undomap->alloc_offset);
71 
72 	result_offset = hammer_xlate_to_undo(root_volume->ondisk, zone3_off);
73 
74 	hammer_rel_volume(root_volume, 0);
75 	return(result_offset);
76 }
77 
78 /*
79  * Generate UNDO record(s) for the block of data at the specified zone1
80  * or zone2 offset.
81  *
82  * The recovery code will execute UNDOs in reverse order, allowing overlaps.
83  * All the UNDOs are executed together so if we already laid one down we
84  * do not have to lay another one down for the same range.
85  *
86  * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
87  * will be laid down for any unused space.  UNDO FIFO media structures
88  * will implement the hdr_seq field (it used to be reserved01), and
89  * both flush and recovery mechanics will be very different.
90  *
91  * WARNING!  See also hammer_generate_redo() in hammer_redo.c
92  */
93 int
94 hammer_generate_undo(hammer_transaction_t trans,
95 		     hammer_off_t zone_off, void *base, int len)
96 {
97 	hammer_mount_t hmp;
98 	hammer_volume_t root_volume;
99 	hammer_blockmap_t undomap;
100 	hammer_buffer_t buffer = NULL;
101 	hammer_fifo_undo_t undo;
102 	hammer_fifo_tail_t tail;
103 	hammer_off_t next_offset;
104 	int error;
105 	int bytes;
106 	int n;
107 
108 	hmp = trans->hmp;
109 
110 	/*
111 	 * A SYNC record may be required before we can lay down a general
112 	 * UNDO.  This ensures that the nominal recovery span contains
113 	 * at least one SYNC record telling the recovery code how far
114 	 * out-of-span it must go to run the REDOs.
115 	 */
116 	if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
117 	    hmp->version >= HAMMER_VOL_VERSION_FOUR) {
118 		hammer_generate_redo_sync(trans);
119 	}
120 
121 	/*
122 	 * Enter the offset into our undo history.  If there is an existing
123 	 * undo we do not have to generate a new one.
124 	 */
125 	if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
126 		return(0);
127 
128 	root_volume = trans->rootvol;
129 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
130 
131 	/* no undo recursion */
132 	hammer_modify_volume_noundo(NULL, root_volume);
133 	hammer_lock_ex(&hmp->undo_lock);
134 
135 	/* undo had better not roll over (loose test) */
136 	if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
137 		hpanic("insufficient UNDO/REDO FIFO space for undo!");
138 
139 	/*
140 	 * Loop until the undo for the entire range has been laid down.
141 	 */
142 	while (len) {
143 		/*
144 		 * Fetch the layout offset in the UNDO FIFO, wrap it as
145 		 * necessary.
146 		 */
147 		if (undomap->next_offset == undomap->alloc_offset)
148 			undomap->next_offset = HAMMER_ENCODE_UNDO(0);
149 		next_offset = undomap->next_offset;
150 
151 		/*
152 		 * This is a tail-chasing FIFO, when we hit the start of a new
153 		 * buffer we don't have to read it in.
154 		 */
155 		if ((next_offset & HAMMER_BUFMASK) == 0) {
156 			undo = hammer_bnew(hmp, next_offset, &error, &buffer);
157 			hammer_format_undo(hmp, undo,
158 					   hmp->undo_seqno ^ 0x40000000);
159 		} else {
160 			undo = hammer_bread(hmp, next_offset, &error, &buffer);
161 		}
162 		if (error)
163 			break;
164 		/* no undo recursion */
165 		hammer_modify_buffer_noundo(NULL, buffer);
166 
167 		/*
168 		 * Calculate how big a media structure fits up to the next
169 		 * alignment point and how large a data payload we can
170 		 * accomodate.
171 		 *
172 		 * If n calculates to 0 or negative there is no room for
173 		 * anything but a PAD.
174 		 */
175 		bytes = HAMMER_UNDO_ALIGN -
176 			((int)next_offset & HAMMER_UNDO_MASK);
177 		n = bytes -
178 		    (int)sizeof(struct hammer_fifo_undo) -
179 		    (int)sizeof(struct hammer_fifo_tail);
180 
181 		/*
182 		 * If available space is insufficient for any payload
183 		 * we have to lay down a PAD.
184 		 *
185 		 * The minimum PAD is 8 bytes and the head and tail will
186 		 * overlap each other in that case.  PADs do not have
187 		 * sequence numbers or CRCs.
188 		 *
189 		 * A PAD may not start on a boundary.  That is, every
190 		 * 512-byte block in the UNDO/REDO FIFO must begin with
191 		 * a record containing a sequence number.
192 		 */
193 		if (n <= 0) {
194 			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
195 			KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
196 			tail = (void *)((char *)undo + bytes - sizeof(*tail));
197 			if ((void *)undo != (void *)tail) {
198 				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
199 				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
200 				tail->tail_size = bytes;
201 			}
202 			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
203 			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
204 			undo->head.hdr_size = bytes;
205 			/* NO CRC OR SEQ NO */
206 			undomap->next_offset += bytes;
207 			hammer_modify_buffer_done(buffer);
208 			hammer_stats_undo += bytes;
209 			continue;
210 		}
211 
212 		/*
213 		 * Calculate the actual payload and recalculate the size
214 		 * of the media structure as necessary.
215 		 */
216 		if (n > len) {
217 			n = len;
218 			bytes = HAMMER_HEAD_DOALIGN(n) +
219 				(int)sizeof(struct hammer_fifo_undo) +
220 				(int)sizeof(struct hammer_fifo_tail);
221 		}
222 		if (hammer_debug_general & 0x0080) {
223 			hdkprintf("undo %016jx %d %d\n",
224 				(intmax_t)next_offset, bytes, n);
225 		}
226 
227 		undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
228 		undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
229 		undo->head.hdr_size = bytes;
230 		undo->head.hdr_seq = hmp->undo_seqno++;
231 		undo->head.hdr_crc = 0;
232 		undo->undo_offset = zone_off;
233 		undo->undo_data_bytes = n;
234 		bcopy(base, undo + 1, n);
235 
236 		tail = (void *)((char *)undo + bytes - sizeof(*tail));
237 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
238 		tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
239 		tail->tail_size = bytes;
240 
241 		KKASSERT(bytes >= sizeof(undo->head));
242 		hammer_crc_set_fifo_head(hmp->version, &undo->head, bytes);
243 		undomap->next_offset += bytes;
244 		hammer_stats_undo += bytes;
245 
246 		/*
247 		 * Before we finish off the buffer we have to deal with any
248 		 * junk between the end of the media structure we just laid
249 		 * down and the UNDO alignment boundary.  We do this by laying
250 		 * down a dummy PAD.  Even though we will probably overwrite
251 		 * it almost immediately we have to do this so recovery runs
252 		 * can iterate the UNDO space without having to depend on
253 		 * the indices in the volume header.
254 		 *
255 		 * This dummy PAD will be overwritten on the next undo so
256 		 * we do not adjust undomap->next_offset.
257 		 */
258 		bytes = HAMMER_UNDO_ALIGN -
259 			((int)undomap->next_offset & HAMMER_UNDO_MASK);
260 		if (bytes != HAMMER_UNDO_ALIGN) {
261 			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
262 			undo = (void *)(tail + 1);
263 			tail = (void *)((char *)undo + bytes - sizeof(*tail));
264 			if ((void *)undo != (void *)tail) {
265 				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
266 				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
267 				tail->tail_size = bytes;
268 			}
269 			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
270 			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
271 			undo->head.hdr_size = bytes;
272 			/* NO CRC OR SEQ NO */
273 		}
274 		hammer_modify_buffer_done(buffer);
275 
276 		/*
277 		 * Adjust for loop
278 		 */
279 		len -= n;
280 		base = (char *)base + n;
281 		zone_off += n;
282 	}
283 	hammer_modify_volume_done(root_volume);
284 	hammer_unlock(&hmp->undo_lock);
285 
286 	if (buffer)
287 		hammer_rel_buffer(buffer, 0);
288 	return(error);
289 }
290 
291 /*
292  * Preformat a new UNDO block.  We could read the old one in but we get
293  * better performance if we just pre-format a new one.
294  *
295  * The recovery code always works forwards so the caller just makes sure the
296  * seqno is not contiguous with prior UNDOs or ancient UNDOs now being
297  * overwritten.
298  *
299  * The preformatted UNDO headers use the smallest possible sector size
300  * (512) to ensure that any missed media writes are caught.
301  *
302  * NOTE: Also used by the REDO code.
303  */
304 void
305 hammer_format_undo(hammer_mount_t hmp, void *base, uint32_t seqno)
306 {
307 	hammer_fifo_head_t head;
308 	hammer_fifo_tail_t tail;
309 	int i;
310 	int bytes = HAMMER_UNDO_ALIGN;
311 
312 	bzero(base, HAMMER_BUFSIZE);
313 
314 	for (i = 0; i < HAMMER_BUFSIZE; i += bytes) {
315 		head = (void *)((char *)base + i);
316 		tail = (void *)((char *)head + bytes - sizeof(*tail));
317 
318 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
319 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
320 		head->hdr_size = bytes;
321 		head->hdr_seq = seqno++;
322 		head->hdr_crc = 0;
323 
324 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
325 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
326 		tail->tail_size = bytes;
327 
328 		hammer_crc_set_fifo_head(hmp->version, head, bytes);
329 	}
330 }
331 
332 /*
333  * HAMMER version 4+ conversion support.
334  *
335  * Convert a HAMMER version < 4 UNDO FIFO area to a 4+ UNDO FIFO area.
336  * The 4+ UNDO FIFO area is backwards compatible.  The conversion is
337  * needed to initialize the sequence space and place headers on the
338  * new 512-byte undo boundary.
339  */
340 int
341 hammer_upgrade_undo_4(hammer_transaction_t trans)
342 {
343 	hammer_mount_t hmp;
344 	hammer_volume_t root_volume;
345 	hammer_blockmap_t undomap;
346 	hammer_buffer_t buffer = NULL;
347 	hammer_fifo_head_t head;
348 	hammer_fifo_tail_t tail;
349 	hammer_off_t next_offset;
350 	uint32_t seqno;
351 	int error;
352 	int bytes;
353 
354 	hmp = trans->hmp;
355 
356 	root_volume = trans->rootvol;
357 
358 	/* no undo recursion */
359 	hammer_lock_ex(&hmp->undo_lock);
360 	hammer_modify_volume_noundo(NULL, root_volume);
361 
362 	/*
363 	 * Adjust the in-core undomap and the on-disk undomap.
364 	 */
365 	next_offset = HAMMER_ENCODE_UNDO(0);
366 	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
367 	undomap->next_offset = next_offset;
368 	undomap->first_offset = next_offset;
369 
370 	undomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
371 	undomap->next_offset = next_offset;
372 	undomap->first_offset = next_offset;
373 
374 	/*
375 	 * Loop over the entire UNDO space creating DUMMY entries.  Sequence
376 	 * numbers are assigned.
377 	 */
378 	seqno = 0;
379 	bytes = HAMMER_UNDO_ALIGN;
380 
381 	while (next_offset != undomap->alloc_offset) {
382 		head = hammer_bnew(hmp, next_offset, &error, &buffer);
383 		if (error)
384 			break;
385 		hammer_modify_buffer_noundo(NULL, buffer);
386 		tail = (void *)((char *)head + bytes - sizeof(*tail));
387 
388 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
389 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
390 		head->hdr_size = bytes;
391 		head->hdr_seq = seqno;
392 		head->hdr_crc = 0;
393 
394 		tail = (void *)((char *)head + bytes - sizeof(*tail));
395 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
396 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
397 		tail->tail_size = bytes;
398 
399 		hammer_crc_set_fifo_head(hmp->version, head, bytes);
400 		hammer_modify_buffer_done(buffer);
401 
402 		hammer_stats_undo += bytes;
403 		next_offset += HAMMER_UNDO_ALIGN;
404 		++seqno;
405 	}
406 
407 	/*
408 	 * The sequence number will be the next sequence number to lay down.
409 	 */
410 	hmp->undo_seqno = seqno;
411 	hmkprintf(hmp, "version upgrade seqno start %08x\n", seqno);
412 
413 	hammer_modify_volume_done(root_volume);
414 	hammer_unlock(&hmp->undo_lock);
415 
416 	if (buffer)
417 		hammer_rel_buffer(buffer, 0);
418 	return (error);
419 }
420 
421 /*
422  * UNDO HISTORY API
423  *
424  * It is not necessary to layout an undo record for the same address space
425  * multiple times.  Maintain a cache of recent undo's.
426  */
427 
428 /*
429  * Enter an undo into the history.  Return EALREADY if the request completely
430  * covers a previous request.
431  */
432 int
433 hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
434 {
435 	hammer_undo_t node;
436 	hammer_undo_t onode __debugvar;
437 
438 	node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
439 	if (node) {
440 		TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
441 		TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
442 		if (bytes <= node->bytes)
443 			return(EALREADY);
444 		node->bytes = bytes;
445 		return(0);
446 	}
447 	if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
448 		node = &hmp->undos[hmp->undo_alloc++];
449 	} else {
450 		node = TAILQ_FIRST(&hmp->undo_lru_list);
451 		TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
452 		RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
453 	}
454 	node->offset = offset;
455 	node->bytes = bytes;
456 	TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
457 	onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
458 	KKASSERT(onode == NULL);
459 	return(0);
460 }
461 
462 void
463 hammer_clear_undo_history(hammer_mount_t hmp)
464 {
465 	RB_INIT(&hmp->rb_undo_root);
466 	TAILQ_INIT(&hmp->undo_lru_list);
467 	hmp->undo_alloc = 0;
468 }
469 
470 /*
471  * Return how much of the undo FIFO has been used
472  *
473  * The calculation includes undo FIFO space still reserved from a previous
474  * flush (because it will still be run on recovery if a crash occurs and
475  * we can't overwrite it yet).
476  */
477 int64_t
478 hammer_undo_used(hammer_transaction_t trans)
479 {
480 	hammer_blockmap_t cundomap;
481 	hammer_blockmap_t dundomap;
482 	int64_t max_bytes __debugvar;
483 	int64_t bytes;
484 
485 	cundomap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
486 	dundomap = &trans->rootvol->ondisk->
487 				vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
488 
489 	if (dundomap->first_offset <= cundomap->next_offset) {
490 		bytes = cundomap->next_offset - dundomap->first_offset;
491 	} else {
492 		bytes = cundomap->alloc_offset - dundomap->first_offset +
493 			HAMMER_OFF_LONG_ENCODE(cundomap->next_offset);
494 	}
495 	max_bytes = HAMMER_OFF_SHORT_ENCODE(cundomap->alloc_offset);
496 	KKASSERT(bytes <= max_bytes);
497 	return(bytes);
498 }
499 
500 /*
501  * Return how much of the undo FIFO is available for new records.
502  */
503 int64_t
504 hammer_undo_space(hammer_transaction_t trans)
505 {
506 	hammer_blockmap_t rootmap;
507 	int64_t max_bytes;
508 
509 	rootmap = &trans->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
510 	max_bytes = HAMMER_OFF_SHORT_ENCODE(rootmap->alloc_offset);
511 	return(max_bytes - hammer_undo_used(trans));
512 }
513 
514 int64_t
515 hammer_undo_max(hammer_mount_t hmp)
516 {
517 	hammer_blockmap_t rootmap;
518 	int64_t max_bytes;
519 
520 	rootmap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
521 	max_bytes = HAMMER_OFF_SHORT_ENCODE(rootmap->alloc_offset);
522 
523 	return(max_bytes);
524 }
525 
526 /*
527  * Returns 1 if the undo buffer should be reclaimed on release.  The
528  * only undo buffer we do NOT want to reclaim is the one at the current
529  * append offset.
530  */
531 int
532 hammer_undo_reclaim(hammer_io_t io)
533 {
534 	hammer_blockmap_t undomap;
535 	hammer_off_t next_offset;
536 
537 	undomap = &io->hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
538 	next_offset = undomap->next_offset & ~HAMMER_BUFMASK64;
539 	if (HAMMER_ITOB(io)->zoneX_offset == next_offset)
540 		return(0);
541 	return(1);
542 }
543