xref: /netbsd/external/cddl/osnet/dist/uts/common/fs/zfs/zil.c (revision 93f3d2b8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24  * Copyright (c) 2014 Integros [integros.com]
25  */
26 
27 /* Portions Copyright 2010 Robert Milkowski */
28 
29 #include <sys/zfs_context.h>
30 #include <sys/spa.h>
31 #include <sys/dmu.h>
32 #include <sys/zap.h>
33 #include <sys/arc.h>
34 #include <sys/stat.h>
35 #include <sys/resource.h>
36 #include <sys/zil.h>
37 #include <sys/zil_impl.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/dmu_tx.h>
41 #include <sys/dsl_pool.h>
42 
43 /*
44  * The zfs intent log (ZIL) saves transaction records of system calls
45  * that change the file system in memory with enough information
46  * to be able to replay them. These are stored in memory until
47  * either the DMU transaction group (txg) commits them to the stable pool
48  * and they can be discarded, or they are flushed to the stable log
49  * (also in the pool) due to a fsync, O_DSYNC or other synchronous
50  * requirement. In the event of a panic or power fail then those log
51  * records (transactions) are replayed.
52  *
53  * There is one ZIL per file system. Its on-disk (pool) format consists
54  * of 3 parts:
55  *
56  * 	- ZIL header
57  * 	- ZIL blocks
58  * 	- ZIL records
59  *
60  * A log record holds a system call transaction. Log blocks can
61  * hold many log records and the blocks are chained together.
62  * Each ZIL block contains a block pointer (blkptr_t) to the next
63  * ZIL block in the chain. The ZIL header points to the first
64  * block in the chain. Note there is not a fixed place in the pool
65  * to hold blocks. They are dynamically allocated and freed as
66  * needed from the blocks available. Figure X shows the ZIL structure:
67  */
68 
69 /*
70  * Disable intent logging replay.  This global ZIL switch affects all pools.
71  */
72 int zil_replay_disable = 0;
73 SYSCTL_DECL(_vfs_zfs);
74 SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RWTUN,
75     &zil_replay_disable, 0, "Disable intent logging replay");
76 
77 /*
78  * Tunable parameter for debugging or performance analysis.  Setting
79  * zfs_nocacheflush will cause corruption on power loss if a volatile
80  * out-of-order write cache is enabled.
81  */
82 boolean_t zfs_nocacheflush = B_FALSE;
83 SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN,
84     &zfs_nocacheflush, 0, "Disable cache flush");
85 boolean_t zfs_trim_enabled = B_TRUE;
86 SYSCTL_DECL(_vfs_zfs_trim);
87 SYSCTL_INT(_vfs_zfs_trim, OID_AUTO, enabled, CTLFLAG_RDTUN, &zfs_trim_enabled, 0,
88     "Enable ZFS TRIM");
89 
90 /*
91  * Limit SLOG write size per commit executed with synchronous priority.
92  * Any writes above that executed with lower (asynchronous) priority to
93  * limit potential SLOG device abuse by single active ZIL writer.
94  */
95 uint64_t zil_slog_limit = 768 * 1024;
96 SYSCTL_QUAD(_vfs_zfs, OID_AUTO, zil_slog_limit, CTLFLAG_RWTUN,
97     &zil_slog_limit, 0, "Maximal SLOG commit size with sync priority");
98 
99 static kmem_cache_t *zil_lwb_cache;
100 
101 #define	LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
102     sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
103 
104 
105 /*
106  * ziltest is by and large an ugly hack, but very useful in
107  * checking replay without tedious work.
108  * When running ziltest we want to keep all itx's and so maintain
109  * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
110  * We subtract TXG_CONCURRENT_STATES to allow for common code.
111  */
112 #define	ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES)
113 
114 static int
zil_bp_compare(const void * x1,const void * x2)115 zil_bp_compare(const void *x1, const void *x2)
116 {
117 	const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
118 	const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
119 
120 	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
121 		return (-1);
122 	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
123 		return (1);
124 
125 	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
126 		return (-1);
127 	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
128 		return (1);
129 
130 	return (0);
131 }
132 
133 static void
zil_bp_tree_init(zilog_t * zilog)134 zil_bp_tree_init(zilog_t *zilog)
135 {
136 	avl_create(&zilog->zl_bp_tree, zil_bp_compare,
137 	    sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
138 }
139 
140 static void
zil_bp_tree_fini(zilog_t * zilog)141 zil_bp_tree_fini(zilog_t *zilog)
142 {
143 	avl_tree_t *t = &zilog->zl_bp_tree;
144 	zil_bp_node_t *zn;
145 	void *cookie = NULL;
146 
147 	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
148 		kmem_free(zn, sizeof (zil_bp_node_t));
149 
150 	avl_destroy(t);
151 }
152 
153 int
zil_bp_tree_add(zilog_t * zilog,const blkptr_t * bp)154 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
155 {
156 	avl_tree_t *t = &zilog->zl_bp_tree;
157 	const dva_t *dva;
158 	zil_bp_node_t *zn;
159 	avl_index_t where;
160 
161 	if (BP_IS_EMBEDDED(bp))
162 		return (0);
163 
164 	dva = BP_IDENTITY(bp);
165 
166 	if (avl_find(t, dva, &where) != NULL)
167 		return (SET_ERROR(EEXIST));
168 
169 	zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
170 	zn->zn_dva = *dva;
171 	avl_insert(t, zn, where);
172 
173 	return (0);
174 }
175 
176 static zil_header_t *
zil_header_in_syncing_context(zilog_t * zilog)177 zil_header_in_syncing_context(zilog_t *zilog)
178 {
179 	return ((zil_header_t *)zilog->zl_header);
180 }
181 
182 static void
zil_init_log_chain(zilog_t * zilog,blkptr_t * bp)183 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
184 {
185 	zio_cksum_t *zc = &bp->blk_cksum;
186 
187 	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
188 	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
189 	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
190 	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
191 }
192 
193 /*
194  * Read a log block and make sure it's valid.
195  */
196 static int
zil_read_log_block(zilog_t * zilog,const blkptr_t * bp,blkptr_t * nbp,void * dst,char ** end)197 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
198     char **end)
199 {
200 	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
201 	arc_flags_t aflags = ARC_FLAG_WAIT;
202 	arc_buf_t *abuf = NULL;
203 	zbookmark_phys_t zb;
204 	int error;
205 
206 	if (zilog->zl_header->zh_claim_txg == 0)
207 		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
208 
209 	if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
210 		zio_flags |= ZIO_FLAG_SPECULATIVE;
211 
212 	SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
213 	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
214 
215 	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
216 	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
217 
218 	if (error == 0) {
219 		zio_cksum_t cksum = bp->blk_cksum;
220 
221 		/*
222 		 * Validate the checksummed log block.
223 		 *
224 		 * Sequence numbers should be... sequential.  The checksum
225 		 * verifier for the next block should be bp's checksum plus 1.
226 		 *
227 		 * Also check the log chain linkage and size used.
228 		 */
229 		cksum.zc_word[ZIL_ZC_SEQ]++;
230 
231 		if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
232 			zil_chain_t *zilc = abuf->b_data;
233 			char *lr = (char *)(zilc + 1);
234 			uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
235 
236 			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
237 			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
238 				error = SET_ERROR(ECKSUM);
239 			} else {
240 				ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
241 				bcopy(lr, dst, len);
242 				*end = (char *)dst + len;
243 				*nbp = zilc->zc_next_blk;
244 			}
245 		} else {
246 			char *lr = abuf->b_data;
247 			uint64_t size = BP_GET_LSIZE(bp);
248 			zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
249 
250 			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
251 			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
252 			    (zilc->zc_nused > (size - sizeof (*zilc)))) {
253 				error = SET_ERROR(ECKSUM);
254 			} else {
255 				ASSERT3U(zilc->zc_nused, <=,
256 				    SPA_OLD_MAXBLOCKSIZE);
257 				bcopy(lr, dst, zilc->zc_nused);
258 				*end = (char *)dst + zilc->zc_nused;
259 				*nbp = zilc->zc_next_blk;
260 			}
261 		}
262 
263 		arc_buf_destroy(abuf, &abuf);
264 	}
265 
266 	return (error);
267 }
268 
269 /*
270  * Read a TX_WRITE log data block.
271  */
272 static int
zil_read_log_data(zilog_t * zilog,const lr_write_t * lr,void * wbuf)273 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
274 {
275 	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
276 	const blkptr_t *bp = &lr->lr_blkptr;
277 	arc_flags_t aflags = ARC_FLAG_WAIT;
278 	arc_buf_t *abuf = NULL;
279 	zbookmark_phys_t zb;
280 	int error;
281 
282 	if (BP_IS_HOLE(bp)) {
283 		if (wbuf != NULL)
284 			bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
285 		return (0);
286 	}
287 
288 	if (zilog->zl_header->zh_claim_txg == 0)
289 		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
290 
291 	SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
292 	    ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
293 
294 	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
295 	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
296 
297 	if (error == 0) {
298 		if (wbuf != NULL)
299 			bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
300 		arc_buf_destroy(abuf, &abuf);
301 	}
302 
303 	return (error);
304 }
305 
306 /*
307  * Parse the intent log, and call parse_func for each valid record within.
308  */
309 int
zil_parse(zilog_t * zilog,zil_parse_blk_func_t * parse_blk_func,zil_parse_lr_func_t * parse_lr_func,void * arg,uint64_t txg)310 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
311     zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
312 {
313 	const zil_header_t *zh = zilog->zl_header;
314 	boolean_t claimed = !!zh->zh_claim_txg;
315 	uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
316 	uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
317 	uint64_t max_blk_seq = 0;
318 	uint64_t max_lr_seq = 0;
319 	uint64_t blk_count = 0;
320 	uint64_t lr_count = 0;
321 	blkptr_t blk, next_blk;
322 	char *lrbuf, *lrp;
323 	int error = 0;
324 
325 	/*
326 	 * Old logs didn't record the maximum zh_claim_lr_seq.
327 	 */
328 	if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
329 		claim_lr_seq = UINT64_MAX;
330 
331 	/*
332 	 * Starting at the block pointed to by zh_log we read the log chain.
333 	 * For each block in the chain we strongly check that block to
334 	 * ensure its validity.  We stop when an invalid block is found.
335 	 * For each block pointer in the chain we call parse_blk_func().
336 	 * For each record in each valid block we call parse_lr_func().
337 	 * If the log has been claimed, stop if we encounter a sequence
338 	 * number greater than the highest claimed sequence number.
339 	 */
340 	lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
341 	zil_bp_tree_init(zilog);
342 
343 	for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
344 		uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
345 		int reclen;
346 		char *end;
347 
348 		if (blk_seq > claim_blk_seq)
349 			break;
350 		if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
351 			break;
352 		ASSERT3U(max_blk_seq, <, blk_seq);
353 		max_blk_seq = blk_seq;
354 		blk_count++;
355 
356 		if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
357 			break;
358 
359 		error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
360 		if (error != 0)
361 			break;
362 
363 		for (lrp = lrbuf; lrp < end; lrp += reclen) {
364 			lr_t *lr = (lr_t *)lrp;
365 			reclen = lr->lrc_reclen;
366 			ASSERT3U(reclen, >=, sizeof (lr_t));
367 			if (lr->lrc_seq > claim_lr_seq)
368 				goto done;
369 			if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
370 				goto done;
371 			ASSERT3U(max_lr_seq, <, lr->lrc_seq);
372 			max_lr_seq = lr->lrc_seq;
373 			lr_count++;
374 		}
375 	}
376 done:
377 	zilog->zl_parse_error = error;
378 	zilog->zl_parse_blk_seq = max_blk_seq;
379 	zilog->zl_parse_lr_seq = max_lr_seq;
380 	zilog->zl_parse_blk_count = blk_count;
381 	zilog->zl_parse_lr_count = lr_count;
382 
383 	ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
384 	    (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq));
385 
386 	zil_bp_tree_fini(zilog);
387 	zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
388 
389 	return (error);
390 }
391 
392 static int
zil_claim_log_block(zilog_t * zilog,blkptr_t * bp,void * tx,uint64_t first_txg)393 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
394 {
395 	/*
396 	 * Claim log block if not already committed and not already claimed.
397 	 * If tx == NULL, just verify that the block is claimable.
398 	 */
399 	if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
400 	    zil_bp_tree_add(zilog, bp) != 0)
401 		return (0);
402 
403 	return (zio_wait(zio_claim(NULL, zilog->zl_spa,
404 	    tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
405 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
406 }
407 
408 static int
zil_claim_log_record(zilog_t * zilog,lr_t * lrc,void * tx,uint64_t first_txg)409 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
410 {
411 	lr_write_t *lr = (lr_write_t *)lrc;
412 	int error;
413 
414 	if (lrc->lrc_txtype != TX_WRITE)
415 		return (0);
416 
417 	/*
418 	 * If the block is not readable, don't claim it.  This can happen
419 	 * in normal operation when a log block is written to disk before
420 	 * some of the dmu_sync() blocks it points to.  In this case, the
421 	 * transaction cannot have been committed to anyone (we would have
422 	 * waited for all writes to be stable first), so it is semantically
423 	 * correct to declare this the end of the log.
424 	 */
425 	if (lr->lr_blkptr.blk_birth >= first_txg &&
426 	    (error = zil_read_log_data(zilog, lr, NULL)) != 0)
427 		return (error);
428 	return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
429 }
430 
431 /* ARGSUSED */
432 static int
zil_free_log_block(zilog_t * zilog,blkptr_t * bp,void * tx,uint64_t claim_txg)433 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
434 {
435 	zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
436 
437 	return (0);
438 }
439 
440 static int
zil_free_log_record(zilog_t * zilog,lr_t * lrc,void * tx,uint64_t claim_txg)441 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
442 {
443 	lr_write_t *lr = (lr_write_t *)lrc;
444 	blkptr_t *bp = &lr->lr_blkptr;
445 
446 	/*
447 	 * If we previously claimed it, we need to free it.
448 	 */
449 	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
450 	    bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
451 	    !BP_IS_HOLE(bp))
452 		zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
453 
454 	return (0);
455 }
456 
457 static lwb_t *
zil_alloc_lwb(zilog_t * zilog,blkptr_t * bp,boolean_t slog,uint64_t txg)458 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg)
459 {
460 	lwb_t *lwb;
461 
462 	lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
463 	lwb->lwb_zilog = zilog;
464 	lwb->lwb_blk = *bp;
465 	lwb->lwb_slog = slog;
466 	lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
467 	lwb->lwb_max_txg = txg;
468 	lwb->lwb_zio = NULL;
469 	lwb->lwb_tx = NULL;
470 	if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
471 		lwb->lwb_nused = sizeof (zil_chain_t);
472 		lwb->lwb_sz = BP_GET_LSIZE(bp);
473 	} else {
474 		lwb->lwb_nused = 0;
475 		lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
476 	}
477 
478 	mutex_enter(&zilog->zl_lock);
479 	list_insert_tail(&zilog->zl_lwb_list, lwb);
480 	mutex_exit(&zilog->zl_lock);
481 
482 	return (lwb);
483 }
484 
485 /*
486  * Called when we create in-memory log transactions so that we know
487  * to cleanup the itxs at the end of spa_sync().
488  */
489 void
zilog_dirty(zilog_t * zilog,uint64_t txg)490 zilog_dirty(zilog_t *zilog, uint64_t txg)
491 {
492 	dsl_pool_t *dp = zilog->zl_dmu_pool;
493 	dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
494 
495 	if (ds->ds_is_snapshot)
496 		panic("dirtying snapshot!");
497 
498 	if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
499 		/* up the hold count until we can be written out */
500 		dmu_buf_add_ref(ds->ds_dbuf, zilog);
501 	}
502 }
503 
504 /*
505  * Determine if the zil is dirty in the specified txg. Callers wanting to
506  * ensure that the dirty state does not change must hold the itxg_lock for
507  * the specified txg. Holding the lock will ensure that the zil cannot be
508  * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
509  * state.
510  */
511 boolean_t
zilog_is_dirty_in_txg(zilog_t * zilog,uint64_t txg)512 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
513 {
514 	dsl_pool_t *dp = zilog->zl_dmu_pool;
515 
516 	if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
517 		return (B_TRUE);
518 	return (B_FALSE);
519 }
520 
521 /*
522  * Determine if the zil is dirty. The zil is considered dirty if it has
523  * any pending itx records that have not been cleaned by zil_clean().
524  */
525 boolean_t
zilog_is_dirty(zilog_t * zilog)526 zilog_is_dirty(zilog_t *zilog)
527 {
528 	dsl_pool_t *dp = zilog->zl_dmu_pool;
529 
530 	for (int t = 0; t < TXG_SIZE; t++) {
531 		if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
532 			return (B_TRUE);
533 	}
534 	return (B_FALSE);
535 }
536 
537 /*
538  * Create an on-disk intent log.
539  */
540 static lwb_t *
zil_create(zilog_t * zilog)541 zil_create(zilog_t *zilog)
542 {
543 	const zil_header_t *zh = zilog->zl_header;
544 	lwb_t *lwb = NULL;
545 	uint64_t txg = 0;
546 	dmu_tx_t *tx = NULL;
547 	blkptr_t blk;
548 	int error = 0;
549 	boolean_t slog = FALSE;
550 
551 	/*
552 	 * Wait for any previous destroy to complete.
553 	 */
554 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
555 
556 	ASSERT(zh->zh_claim_txg == 0);
557 	ASSERT(zh->zh_replay_seq == 0);
558 
559 	blk = zh->zh_log;
560 
561 	/*
562 	 * Allocate an initial log block if:
563 	 *    - there isn't one already
564 	 *    - the existing block is the wrong endianess
565 	 */
566 	if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
567 		tx = dmu_tx_create(zilog->zl_os);
568 		VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
569 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
570 		txg = dmu_tx_get_txg(tx);
571 
572 		if (!BP_IS_HOLE(&blk)) {
573 			zio_free_zil(zilog->zl_spa, txg, &blk);
574 			BP_ZERO(&blk);
575 		}
576 
577 		error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
578 		    ZIL_MIN_BLKSZ, &slog);
579 
580 		if (error == 0)
581 			zil_init_log_chain(zilog, &blk);
582 	}
583 
584 	/*
585 	 * Allocate a log write buffer (lwb) for the first log block.
586 	 */
587 	if (error == 0)
588 		lwb = zil_alloc_lwb(zilog, &blk, slog, txg);
589 
590 	/*
591 	 * If we just allocated the first log block, commit our transaction
592 	 * and wait for zil_sync() to stuff the block poiner into zh_log.
593 	 * (zh is part of the MOS, so we cannot modify it in open context.)
594 	 */
595 	if (tx != NULL) {
596 		dmu_tx_commit(tx);
597 		txg_wait_synced(zilog->zl_dmu_pool, txg);
598 	}
599 
600 	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
601 
602 	return (lwb);
603 }
604 
605 /*
606  * In one tx, free all log blocks and clear the log header.
607  * If keep_first is set, then we're replaying a log with no content.
608  * We want to keep the first block, however, so that the first
609  * synchronous transaction doesn't require a txg_wait_synced()
610  * in zil_create().  We don't need to txg_wait_synced() here either
611  * when keep_first is set, because both zil_create() and zil_destroy()
612  * will wait for any in-progress destroys to complete.
613  */
614 void
zil_destroy(zilog_t * zilog,boolean_t keep_first)615 zil_destroy(zilog_t *zilog, boolean_t keep_first)
616 {
617 	const zil_header_t *zh = zilog->zl_header;
618 	lwb_t *lwb;
619 	dmu_tx_t *tx;
620 	uint64_t txg;
621 
622 	/*
623 	 * Wait for any previous destroy to complete.
624 	 */
625 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
626 
627 	zilog->zl_old_header = *zh;		/* debugging aid */
628 
629 	if (BP_IS_HOLE(&zh->zh_log))
630 		return;
631 
632 	tx = dmu_tx_create(zilog->zl_os);
633 	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
634 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
635 	txg = dmu_tx_get_txg(tx);
636 
637 	mutex_enter(&zilog->zl_lock);
638 
639 	ASSERT3U(zilog->zl_destroy_txg, <, txg);
640 	zilog->zl_destroy_txg = txg;
641 	zilog->zl_keep_first = keep_first;
642 
643 	if (!list_is_empty(&zilog->zl_lwb_list)) {
644 		ASSERT(zh->zh_claim_txg == 0);
645 		VERIFY(!keep_first);
646 		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
647 			list_remove(&zilog->zl_lwb_list, lwb);
648 			if (lwb->lwb_buf != NULL)
649 				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
650 			zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
651 			kmem_cache_free(zil_lwb_cache, lwb);
652 		}
653 	} else if (!keep_first) {
654 		zil_destroy_sync(zilog, tx);
655 	}
656 	mutex_exit(&zilog->zl_lock);
657 
658 	dmu_tx_commit(tx);
659 }
660 
661 void
zil_destroy_sync(zilog_t * zilog,dmu_tx_t * tx)662 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
663 {
664 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
665 	(void) zil_parse(zilog, zil_free_log_block,
666 	    zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
667 }
668 
669 int
zil_claim(dsl_pool_t * dp,dsl_dataset_t * ds,void * txarg)670 zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
671 {
672 	dmu_tx_t *tx = txarg;
673 	uint64_t first_txg = dmu_tx_get_txg(tx);
674 	zilog_t *zilog;
675 	zil_header_t *zh;
676 	objset_t *os;
677 	int error;
678 
679 	error = dmu_objset_own_obj(dp, ds->ds_object,
680 	    DMU_OST_ANY, B_FALSE, FTAG, &os);
681 	if (error != 0) {
682 		/*
683 		 * EBUSY indicates that the objset is inconsistent, in which
684 		 * case it can not have a ZIL.
685 		 */
686 		if (error != EBUSY) {
687 			cmn_err(CE_WARN, "can't open objset for %llu, error %u",
688 			    (unsigned long long)ds->ds_object, error);
689 		}
690 		return (0);
691 	}
692 
693 	zilog = dmu_objset_zil(os);
694 	zh = zil_header_in_syncing_context(zilog);
695 
696 	if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
697 		if (!BP_IS_HOLE(&zh->zh_log))
698 			zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
699 		BP_ZERO(&zh->zh_log);
700 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
701 		dmu_objset_disown(os, FTAG);
702 		return (0);
703 	}
704 
705 	/*
706 	 * Claim all log blocks if we haven't already done so, and remember
707 	 * the highest claimed sequence number.  This ensures that if we can
708 	 * read only part of the log now (e.g. due to a missing device),
709 	 * but we can read the entire log later, we will not try to replay
710 	 * or destroy beyond the last block we successfully claimed.
711 	 */
712 	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
713 	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
714 		(void) zil_parse(zilog, zil_claim_log_block,
715 		    zil_claim_log_record, tx, first_txg);
716 		zh->zh_claim_txg = first_txg;
717 		zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
718 		zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
719 		if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
720 			zh->zh_flags |= ZIL_REPLAY_NEEDED;
721 		zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
722 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
723 	}
724 
725 	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
726 	dmu_objset_disown(os, FTAG);
727 	return (0);
728 }
729 
730 /*
731  * Check the log by walking the log chain.
732  * Checksum errors are ok as they indicate the end of the chain.
733  * Any other error (no device or read failure) returns an error.
734  */
735 /* ARGSUSED */
736 int
zil_check_log_chain(dsl_pool_t * dp,dsl_dataset_t * ds,void * tx)737 zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
738 {
739 	zilog_t *zilog;
740 	objset_t *os;
741 	blkptr_t *bp;
742 	int error;
743 
744 	ASSERT(tx == NULL);
745 
746 	error = dmu_objset_from_ds(ds, &os);
747 	if (error != 0) {
748 		cmn_err(CE_WARN, "can't open objset %llu, error %d",
749 		    (unsigned long long)ds->ds_object, error);
750 		return (0);
751 	}
752 
753 	zilog = dmu_objset_zil(os);
754 	bp = (blkptr_t *)&zilog->zl_header->zh_log;
755 
756 	/*
757 	 * Check the first block and determine if it's on a log device
758 	 * which may have been removed or faulted prior to loading this
759 	 * pool.  If so, there's no point in checking the rest of the log
760 	 * as its content should have already been synced to the pool.
761 	 */
762 	if (!BP_IS_HOLE(bp)) {
763 		vdev_t *vd;
764 		boolean_t valid = B_TRUE;
765 
766 		spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
767 		vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
768 		if (vd->vdev_islog && vdev_is_dead(vd))
769 			valid = vdev_log_state_valid(vd);
770 		spa_config_exit(os->os_spa, SCL_STATE, FTAG);
771 
772 		if (!valid)
773 			return (0);
774 	}
775 
776 	/*
777 	 * Because tx == NULL, zil_claim_log_block() will not actually claim
778 	 * any blocks, but just determine whether it is possible to do so.
779 	 * In addition to checking the log chain, zil_claim_log_block()
780 	 * will invoke zio_claim() with a done func of spa_claim_notify(),
781 	 * which will update spa_max_claim_txg.  See spa_load() for details.
782 	 */
783 	error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
784 	    zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
785 
786 	return ((error == ECKSUM || error == ENOENT) ? 0 : error);
787 }
788 
789 static int
zil_vdev_compare(const void * x1,const void * x2)790 zil_vdev_compare(const void *x1, const void *x2)
791 {
792 	const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
793 	const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
794 
795 	if (v1 < v2)
796 		return (-1);
797 	if (v1 > v2)
798 		return (1);
799 
800 	return (0);
801 }
802 
803 void
zil_add_block(zilog_t * zilog,const blkptr_t * bp)804 zil_add_block(zilog_t *zilog, const blkptr_t *bp)
805 {
806 	avl_tree_t *t = &zilog->zl_vdev_tree;
807 	avl_index_t where;
808 	zil_vdev_node_t *zv, zvsearch;
809 	int ndvas = BP_GET_NDVAS(bp);
810 	int i;
811 
812 	if (zfs_nocacheflush)
813 		return;
814 
815 	ASSERT(zilog->zl_writer);
816 
817 	/*
818 	 * Even though we're zl_writer, we still need a lock because the
819 	 * zl_get_data() callbacks may have dmu_sync() done callbacks
820 	 * that will run concurrently.
821 	 */
822 	mutex_enter(&zilog->zl_vdev_lock);
823 	for (i = 0; i < ndvas; i++) {
824 		zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
825 		if (avl_find(t, &zvsearch, &where) == NULL) {
826 			zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
827 			zv->zv_vdev = zvsearch.zv_vdev;
828 			avl_insert(t, zv, where);
829 		}
830 	}
831 	mutex_exit(&zilog->zl_vdev_lock);
832 }
833 
834 static void
zil_flush_vdevs(zilog_t * zilog)835 zil_flush_vdevs(zilog_t *zilog)
836 {
837 	spa_t *spa = zilog->zl_spa;
838 	avl_tree_t *t = &zilog->zl_vdev_tree;
839 	void *cookie = NULL;
840 	zil_vdev_node_t *zv;
841 	zio_t *zio = NULL;
842 
843 	ASSERT(zilog->zl_writer);
844 
845 	/*
846 	 * We don't need zl_vdev_lock here because we're the zl_writer,
847 	 * and all zl_get_data() callbacks are done.
848 	 */
849 	if (avl_numnodes(t) == 0)
850 		return;
851 
852 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
853 
854 	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
855 		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
856 		if (vd != NULL && !vd->vdev_nowritecache) {
857 			if (zio == NULL)
858 				zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
859 			zio_flush(zio, vd);
860 		}
861 		kmem_free(zv, sizeof (*zv));
862 	}
863 
864 	/*
865 	 * Wait for all the flushes to complete.  Not all devices actually
866 	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
867 	 */
868 	if (zio)
869 		(void) zio_wait(zio);
870 
871 	spa_config_exit(spa, SCL_STATE, FTAG);
872 }
873 
874 /*
875  * Function called when a log block write completes
876  */
877 static void
zil_lwb_write_done(zio_t * zio)878 zil_lwb_write_done(zio_t *zio)
879 {
880 	lwb_t *lwb = zio->io_private;
881 	zilog_t *zilog = lwb->lwb_zilog;
882 	dmu_tx_t *tx = lwb->lwb_tx;
883 
884 	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
885 	ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
886 	ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
887 	ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
888 	ASSERT(!BP_IS_GANG(zio->io_bp));
889 	ASSERT(!BP_IS_HOLE(zio->io_bp));
890 	ASSERT(BP_GET_FILL(zio->io_bp) == 0);
891 
892 	/*
893 	 * Ensure the lwb buffer pointer is cleared before releasing
894 	 * the txg. If we have had an allocation failure and
895 	 * the txg is waiting to sync then we want want zil_sync()
896 	 * to remove the lwb so that it's not picked up as the next new
897 	 * one in zil_commit_writer(). zil_sync() will only remove
898 	 * the lwb if lwb_buf is null.
899 	 */
900 	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
901 	mutex_enter(&zilog->zl_lock);
902 	lwb->lwb_buf = NULL;
903 	lwb->lwb_tx = NULL;
904 	mutex_exit(&zilog->zl_lock);
905 
906 	/*
907 	 * Now that we've written this log block, we have a stable pointer
908 	 * to the next block in the chain, so it's OK to let the txg in
909 	 * which we allocated the next block sync.
910 	 */
911 	dmu_tx_commit(tx);
912 }
913 
914 /*
915  * Initialize the io for a log block.
916  */
917 static void
zil_lwb_write_init(zilog_t * zilog,lwb_t * lwb)918 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
919 {
920 	zbookmark_phys_t zb;
921 	zio_priority_t prio;
922 
923 	SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
924 	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
925 	    lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
926 
927 	if (zilog->zl_root_zio == NULL) {
928 		zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
929 		    ZIO_FLAG_CANFAIL);
930 	}
931 	if (lwb->lwb_zio == NULL) {
932 		if (zilog->zl_cur_used <= zil_slog_limit || !lwb->lwb_slog)
933 			prio = ZIO_PRIORITY_SYNC_WRITE;
934 		else
935 			prio = ZIO_PRIORITY_ASYNC_WRITE;
936 		lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
937 		    0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk),
938 		    zil_lwb_write_done, lwb, prio,
939 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
940 	}
941 }
942 
943 /*
944  * Define a limited set of intent log block sizes.
945  *
946  * These must be a multiple of 4KB. Note only the amount used (again
947  * aligned to 4KB) actually gets written. However, we can't always just
948  * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
949  */
950 uint64_t zil_block_buckets[] = {
951     4096,		/* non TX_WRITE */
952     8192+4096,		/* data base */
953     32*1024 + 4096, 	/* NFS writes */
954     UINT64_MAX
955 };
956 
957 /*
958  * Start a log block write and advance to the next log block.
959  * Calls are serialized.
960  */
961 static lwb_t *
zil_lwb_write_start(zilog_t * zilog,lwb_t * lwb,boolean_t last)962 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb, boolean_t last)
963 {
964 	lwb_t *nlwb = NULL;
965 	zil_chain_t *zilc;
966 	spa_t *spa = zilog->zl_spa;
967 	blkptr_t *bp;
968 	dmu_tx_t *tx;
969 	uint64_t txg;
970 	uint64_t zil_blksz, wsz;
971 	int i, error;
972 	boolean_t slog;
973 
974 	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
975 		zilc = (zil_chain_t *)lwb->lwb_buf;
976 		bp = &zilc->zc_next_blk;
977 	} else {
978 		zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
979 		bp = &zilc->zc_next_blk;
980 	}
981 
982 	ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
983 
984 	/*
985 	 * Allocate the next block and save its address in this block
986 	 * before writing it in order to establish the log chain.
987 	 * Note that if the allocation of nlwb synced before we wrote
988 	 * the block that points at it (lwb), we'd leak it if we crashed.
989 	 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
990 	 * We dirty the dataset to ensure that zil_sync() will be called
991 	 * to clean up in the event of allocation failure or I/O failure.
992 	 */
993 	tx = dmu_tx_create(zilog->zl_os);
994 	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
995 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
996 	txg = dmu_tx_get_txg(tx);
997 
998 	lwb->lwb_tx = tx;
999 
1000 	/*
1001 	 * Log blocks are pre-allocated. Here we select the size of the next
1002 	 * block, based on size used in the last block.
1003 	 * - first find the smallest bucket that will fit the block from a
1004 	 *   limited set of block sizes. This is because it's faster to write
1005 	 *   blocks allocated from the same metaslab as they are adjacent or
1006 	 *   close.
1007 	 * - next find the maximum from the new suggested size and an array of
1008 	 *   previous sizes. This lessens a picket fence effect of wrongly
1009 	 *   guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
1010 	 *   requests.
1011 	 *
1012 	 * Note we only write what is used, but we can't just allocate
1013 	 * the maximum block size because we can exhaust the available
1014 	 * pool log space.
1015 	 */
1016 	zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
1017 	for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
1018 		continue;
1019 	zil_blksz = zil_block_buckets[i];
1020 	if (zil_blksz == UINT64_MAX)
1021 		zil_blksz = SPA_OLD_MAXBLOCKSIZE;
1022 	zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
1023 	for (i = 0; i < ZIL_PREV_BLKS; i++)
1024 		zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
1025 	zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
1026 
1027 	BP_ZERO(bp);
1028 	/* pass the old blkptr in order to spread log blocks across devs */
1029 	error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, &slog);
1030 	if (error == 0) {
1031 		ASSERT3U(bp->blk_birth, ==, txg);
1032 		bp->blk_cksum = lwb->lwb_blk.blk_cksum;
1033 		bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
1034 
1035 		/*
1036 		 * Allocate a new log write buffer (lwb).
1037 		 */
1038 		nlwb = zil_alloc_lwb(zilog, bp, slog, txg);
1039 
1040 		/* Record the block for later vdev flushing */
1041 		zil_add_block(zilog, &lwb->lwb_blk);
1042 	}
1043 
1044 	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
1045 		/* For Slim ZIL only write what is used. */
1046 		wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
1047 		ASSERT3U(wsz, <=, lwb->lwb_sz);
1048 		zio_shrink(lwb->lwb_zio, wsz);
1049 
1050 	} else {
1051 		wsz = lwb->lwb_sz;
1052 	}
1053 
1054 	zilc->zc_pad = 0;
1055 	zilc->zc_nused = lwb->lwb_nused;
1056 	zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
1057 
1058 	/*
1059 	 * clear unused data for security
1060 	 */
1061 	bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
1062 
1063 	if (last)
1064 		lwb->lwb_zio->io_pipeline &= ~ZIO_STAGE_ISSUE_ASYNC;
1065 	zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */
1066 
1067 	/*
1068 	 * If there was an allocation failure then nlwb will be null which
1069 	 * forces a txg_wait_synced().
1070 	 */
1071 	return (nlwb);
1072 }
1073 
1074 static lwb_t *
zil_lwb_commit(zilog_t * zilog,itx_t * itx,lwb_t * lwb)1075 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1076 {
1077 	lr_t *lrcb, *lrc = &itx->itx_lr; /* common log record */
1078 	lr_write_t *lrwb, *lrw = (lr_write_t *)lrc;
1079 	char *lr_buf;
1080 	uint64_t txg = lrc->lrc_txg;
1081 	uint64_t reclen = lrc->lrc_reclen;
1082 	uint64_t dlen = 0;
1083 	uint64_t dnow, lwb_sp;
1084 
1085 	if (lwb == NULL)
1086 		return (NULL);
1087 
1088 	ASSERT(lwb->lwb_buf != NULL);
1089 
1090 	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
1091 		dlen = P2ROUNDUP_TYPED(
1092 		    lrw->lr_length, sizeof (uint64_t), uint64_t);
1093 
1094 	zilog->zl_cur_used += (reclen + dlen);
1095 
1096 	zil_lwb_write_init(zilog, lwb);
1097 
1098 cont:
1099 	/*
1100 	 * If this record won't fit in the current log block, start a new one.
1101 	 * For WR_NEED_COPY optimize layout for minimal number of chunks, but
1102 	 * try to keep wasted space withing reasonable range (12%).
1103 	 */
1104 	lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
1105 	if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
1106 	    lwb_sp < ZIL_MAX_LOG_DATA / 8 && (dlen % ZIL_MAX_LOG_DATA == 0 ||
1107 	    lwb_sp < reclen + dlen % ZIL_MAX_LOG_DATA))) {
1108 		lwb = zil_lwb_write_start(zilog, lwb, B_FALSE);
1109 		if (lwb == NULL)
1110 			return (NULL);
1111 		zil_lwb_write_init(zilog, lwb);
1112 		ASSERT(LWB_EMPTY(lwb));
1113 		lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
1114 		ASSERT3U(reclen + MIN(dlen, sizeof(uint64_t)), <=, lwb_sp);
1115 	}
1116 
1117 	dnow = MIN(dlen, lwb_sp - reclen);
1118 	lr_buf = lwb->lwb_buf + lwb->lwb_nused;
1119 	bcopy(lrc, lr_buf, reclen);
1120 	lrcb = (lr_t *)lr_buf;
1121 	lrwb = (lr_write_t *)lrcb;
1122 
1123 	/*
1124 	 * If it's a write, fetch the data or get its blkptr as appropriate.
1125 	 */
1126 	if (lrc->lrc_txtype == TX_WRITE) {
1127 		if (txg > spa_freeze_txg(zilog->zl_spa))
1128 			txg_wait_synced(zilog->zl_dmu_pool, txg);
1129 		if (itx->itx_wr_state != WR_COPIED) {
1130 			char *dbuf;
1131 			int error;
1132 
1133 			if (itx->itx_wr_state == WR_NEED_COPY) {
1134 				dbuf = lr_buf + reclen;
1135 				lrcb->lrc_reclen += dnow;
1136 				if (lrwb->lr_length > dnow)
1137 					lrwb->lr_length = dnow;
1138 				lrw->lr_offset += dnow;
1139 				lrw->lr_length -= dnow;
1140 			} else {
1141 				ASSERT(itx->itx_wr_state == WR_INDIRECT);
1142 				dbuf = NULL;
1143 			}
1144 			error = zilog->zl_get_data(
1145 			    itx->itx_private, lrwb, dbuf, lwb->lwb_zio);
1146 			if (error == EIO) {
1147 				txg_wait_synced(zilog->zl_dmu_pool, txg);
1148 				return (lwb);
1149 			}
1150 			if (error != 0) {
1151 				ASSERT(error == ENOENT || error == EEXIST ||
1152 				    error == EALREADY);
1153 				return (lwb);
1154 			}
1155 		}
1156 	}
1157 
1158 	/*
1159 	 * We're actually making an entry, so update lrc_seq to be the
1160 	 * log record sequence number.  Note that this is generally not
1161 	 * equal to the itx sequence number because not all transactions
1162 	 * are synchronous, and sometimes spa_sync() gets there first.
1163 	 */
1164 	lrcb->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
1165 	lwb->lwb_nused += reclen + dnow;
1166 	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
1167 	ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
1168 	ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
1169 
1170 	dlen -= dnow;
1171 	if (dlen > 0) {
1172 		zilog->zl_cur_used += reclen;
1173 		goto cont;
1174 	}
1175 
1176 	return (lwb);
1177 }
1178 
1179 itx_t *
zil_itx_create(uint64_t txtype,size_t lrsize)1180 zil_itx_create(uint64_t txtype, size_t lrsize)
1181 {
1182 	itx_t *itx;
1183 
1184 	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
1185 
1186 	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
1187 	itx->itx_lr.lrc_txtype = txtype;
1188 	itx->itx_lr.lrc_reclen = lrsize;
1189 	itx->itx_lr.lrc_seq = 0;	/* defensive */
1190 	itx->itx_sync = B_TRUE;		/* default is synchronous */
1191 
1192 	return (itx);
1193 }
1194 
1195 void
zil_itx_destroy(itx_t * itx)1196 zil_itx_destroy(itx_t *itx)
1197 {
1198 	kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen);
1199 }
1200 
1201 /*
1202  * Free up the sync and async itxs. The itxs_t has already been detached
1203  * so no locks are needed.
1204  */
1205 static void
zil_itxg_clean(itxs_t * itxs)1206 zil_itxg_clean(itxs_t *itxs)
1207 {
1208 	itx_t *itx;
1209 	list_t *list;
1210 	avl_tree_t *t;
1211 	void *cookie;
1212 	itx_async_node_t *ian;
1213 
1214 	list = &itxs->i_sync_list;
1215 	while ((itx = list_head(list)) != NULL) {
1216 		list_remove(list, itx);
1217 		kmem_free(itx, offsetof(itx_t, itx_lr) +
1218 		    itx->itx_lr.lrc_reclen);
1219 	}
1220 
1221 	cookie = NULL;
1222 	t = &itxs->i_async_tree;
1223 	while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1224 		list = &ian->ia_list;
1225 		while ((itx = list_head(list)) != NULL) {
1226 			list_remove(list, itx);
1227 			kmem_free(itx, offsetof(itx_t, itx_lr) +
1228 			    itx->itx_lr.lrc_reclen);
1229 		}
1230 		list_destroy(list);
1231 		kmem_free(ian, sizeof (itx_async_node_t));
1232 	}
1233 	avl_destroy(t);
1234 
1235 	kmem_free(itxs, sizeof (itxs_t));
1236 }
1237 
1238 static int
zil_aitx_compare(const void * x1,const void * x2)1239 zil_aitx_compare(const void *x1, const void *x2)
1240 {
1241 	const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
1242 	const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
1243 
1244 	if (o1 < o2)
1245 		return (-1);
1246 	if (o1 > o2)
1247 		return (1);
1248 
1249 	return (0);
1250 }
1251 
1252 /*
1253  * Remove all async itx with the given oid.
1254  */
1255 static void
zil_remove_async(zilog_t * zilog,uint64_t oid)1256 zil_remove_async(zilog_t *zilog, uint64_t oid)
1257 {
1258 	uint64_t otxg, txg;
1259 	itx_async_node_t *ian;
1260 	avl_tree_t *t;
1261 	avl_index_t where;
1262 	list_t clean_list;
1263 	itx_t *itx;
1264 
1265 	ASSERT(oid != 0);
1266 	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1267 
1268 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1269 		otxg = ZILTEST_TXG;
1270 	else
1271 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1272 
1273 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1274 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1275 
1276 		mutex_enter(&itxg->itxg_lock);
1277 		if (itxg->itxg_txg != txg) {
1278 			mutex_exit(&itxg->itxg_lock);
1279 			continue;
1280 		}
1281 
1282 		/*
1283 		 * Locate the object node and append its list.
1284 		 */
1285 		t = &itxg->itxg_itxs->i_async_tree;
1286 		ian = avl_find(t, &oid, &where);
1287 		if (ian != NULL)
1288 			list_move_tail(&clean_list, &ian->ia_list);
1289 		mutex_exit(&itxg->itxg_lock);
1290 	}
1291 	while ((itx = list_head(&clean_list)) != NULL) {
1292 		list_remove(&clean_list, itx);
1293 		kmem_free(itx, offsetof(itx_t, itx_lr) +
1294 		    itx->itx_lr.lrc_reclen);
1295 	}
1296 	list_destroy(&clean_list);
1297 }
1298 
1299 void
zil_itx_assign(zilog_t * zilog,itx_t * itx,dmu_tx_t * tx)1300 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1301 {
1302 	uint64_t txg;
1303 	itxg_t *itxg;
1304 	itxs_t *itxs, *clean = NULL;
1305 
1306 	/*
1307 	 * Object ids can be re-instantiated in the next txg so
1308 	 * remove any async transactions to avoid future leaks.
1309 	 * This can happen if a fsync occurs on the re-instantiated
1310 	 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1311 	 * the new file data and flushes a write record for the old object.
1312 	 */
1313 	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE)
1314 		zil_remove_async(zilog, itx->itx_oid);
1315 
1316 	/*
1317 	 * Ensure the data of a renamed file is committed before the rename.
1318 	 */
1319 	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
1320 		zil_async_to_sync(zilog, itx->itx_oid);
1321 
1322 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
1323 		txg = ZILTEST_TXG;
1324 	else
1325 		txg = dmu_tx_get_txg(tx);
1326 
1327 	itxg = &zilog->zl_itxg[txg & TXG_MASK];
1328 	mutex_enter(&itxg->itxg_lock);
1329 	itxs = itxg->itxg_itxs;
1330 	if (itxg->itxg_txg != txg) {
1331 		if (itxs != NULL) {
1332 			/*
1333 			 * The zil_clean callback hasn't got around to cleaning
1334 			 * this itxg. Save the itxs for release below.
1335 			 * This should be rare.
1336 			 */
1337 			clean = itxg->itxg_itxs;
1338 		}
1339 		itxg->itxg_txg = txg;
1340 		itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP);
1341 
1342 		list_create(&itxs->i_sync_list, sizeof (itx_t),
1343 		    offsetof(itx_t, itx_node));
1344 		avl_create(&itxs->i_async_tree, zil_aitx_compare,
1345 		    sizeof (itx_async_node_t),
1346 		    offsetof(itx_async_node_t, ia_node));
1347 	}
1348 	if (itx->itx_sync) {
1349 		list_insert_tail(&itxs->i_sync_list, itx);
1350 	} else {
1351 		avl_tree_t *t = &itxs->i_async_tree;
1352 		uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid;
1353 		itx_async_node_t *ian;
1354 		avl_index_t where;
1355 
1356 		ian = avl_find(t, &foid, &where);
1357 		if (ian == NULL) {
1358 			ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP);
1359 			list_create(&ian->ia_list, sizeof (itx_t),
1360 			    offsetof(itx_t, itx_node));
1361 			ian->ia_foid = foid;
1362 			avl_insert(t, ian, where);
1363 		}
1364 		list_insert_tail(&ian->ia_list, itx);
1365 	}
1366 
1367 	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
1368 	zilog_dirty(zilog, txg);
1369 	mutex_exit(&itxg->itxg_lock);
1370 
1371 	/* Release the old itxs now we've dropped the lock */
1372 	if (clean != NULL)
1373 		zil_itxg_clean(clean);
1374 }
1375 
1376 /*
1377  * If there are any in-memory intent log transactions which have now been
1378  * synced then start up a taskq to free them. We should only do this after we
1379  * have written out the uberblocks (i.e. txg has been comitted) so that
1380  * don't inadvertently clean out in-memory log records that would be required
1381  * by zil_commit().
1382  */
1383 void
zil_clean(zilog_t * zilog,uint64_t synced_txg)1384 zil_clean(zilog_t *zilog, uint64_t synced_txg)
1385 {
1386 	itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1387 	itxs_t *clean_me;
1388 
1389 	mutex_enter(&itxg->itxg_lock);
1390 	if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
1391 		mutex_exit(&itxg->itxg_lock);
1392 		return;
1393 	}
1394 	ASSERT3U(itxg->itxg_txg, <=, synced_txg);
1395 	ASSERT(itxg->itxg_txg != 0);
1396 	ASSERT(zilog->zl_clean_taskq != NULL);
1397 	clean_me = itxg->itxg_itxs;
1398 	itxg->itxg_itxs = NULL;
1399 	itxg->itxg_txg = 0;
1400 	mutex_exit(&itxg->itxg_lock);
1401 	/*
1402 	 * Preferably start a task queue to free up the old itxs but
1403 	 * if taskq_dispatch can't allocate resources to do that then
1404 	 * free it in-line. This should be rare. Note, using TQ_SLEEP
1405 	 * created a bad performance problem.
1406 	 */
1407 	if (taskq_dispatch(zilog->zl_clean_taskq,
1408 	    (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0)
1409 		zil_itxg_clean(clean_me);
1410 }
1411 
1412 /*
1413  * Get the list of itxs to commit into zl_itx_commit_list.
1414  */
1415 static void
zil_get_commit_list(zilog_t * zilog)1416 zil_get_commit_list(zilog_t *zilog)
1417 {
1418 	uint64_t otxg, txg;
1419 	list_t *commit_list = &zilog->zl_itx_commit_list;
1420 
1421 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1422 		otxg = ZILTEST_TXG;
1423 	else
1424 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1425 
1426 	/*
1427 	 * This is inherently racy, since there is nothing to prevent
1428 	 * the last synced txg from changing. That's okay since we'll
1429 	 * only commit things in the future.
1430 	 */
1431 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1432 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1433 
1434 		mutex_enter(&itxg->itxg_lock);
1435 		if (itxg->itxg_txg != txg) {
1436 			mutex_exit(&itxg->itxg_lock);
1437 			continue;
1438 		}
1439 
1440 		/*
1441 		 * If we're adding itx records to the zl_itx_commit_list,
1442 		 * then the zil better be dirty in this "txg". We can assert
1443 		 * that here since we're holding the itxg_lock which will
1444 		 * prevent spa_sync from cleaning it. Once we add the itxs
1445 		 * to the zl_itx_commit_list we must commit it to disk even
1446 		 * if it's unnecessary (i.e. the txg was synced).
1447 		 */
1448 		ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
1449 		    spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
1450 		list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
1451 
1452 		mutex_exit(&itxg->itxg_lock);
1453 	}
1454 }
1455 
1456 /*
1457  * Move the async itxs for a specified object to commit into sync lists.
1458  */
1459 void
zil_async_to_sync(zilog_t * zilog,uint64_t foid)1460 zil_async_to_sync(zilog_t *zilog, uint64_t foid)
1461 {
1462 	uint64_t otxg, txg;
1463 	itx_async_node_t *ian;
1464 	avl_tree_t *t;
1465 	avl_index_t where;
1466 
1467 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1468 		otxg = ZILTEST_TXG;
1469 	else
1470 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1471 
1472 	/*
1473 	 * This is inherently racy, since there is nothing to prevent
1474 	 * the last synced txg from changing.
1475 	 */
1476 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1477 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1478 
1479 		mutex_enter(&itxg->itxg_lock);
1480 		if (itxg->itxg_txg != txg) {
1481 			mutex_exit(&itxg->itxg_lock);
1482 			continue;
1483 		}
1484 
1485 		/*
1486 		 * If a foid is specified then find that node and append its
1487 		 * list. Otherwise walk the tree appending all the lists
1488 		 * to the sync list. We add to the end rather than the
1489 		 * beginning to ensure the create has happened.
1490 		 */
1491 		t = &itxg->itxg_itxs->i_async_tree;
1492 		if (foid != 0) {
1493 			ian = avl_find(t, &foid, &where);
1494 			if (ian != NULL) {
1495 				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1496 				    &ian->ia_list);
1497 			}
1498 		} else {
1499 			void *cookie = NULL;
1500 
1501 			while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1502 				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1503 				    &ian->ia_list);
1504 				list_destroy(&ian->ia_list);
1505 				kmem_free(ian, sizeof (itx_async_node_t));
1506 			}
1507 		}
1508 		mutex_exit(&itxg->itxg_lock);
1509 	}
1510 }
1511 
1512 static void
zil_commit_writer(zilog_t * zilog)1513 zil_commit_writer(zilog_t *zilog)
1514 {
1515 	uint64_t txg;
1516 	itx_t *itx;
1517 	lwb_t *lwb;
1518 	spa_t *spa = zilog->zl_spa;
1519 	int error = 0;
1520 
1521 	ASSERT(zilog->zl_root_zio == NULL);
1522 
1523 	mutex_exit(&zilog->zl_lock);
1524 
1525 	zil_get_commit_list(zilog);
1526 
1527 	/*
1528 	 * Return if there's nothing to commit before we dirty the fs by
1529 	 * calling zil_create().
1530 	 */
1531 	if (list_head(&zilog->zl_itx_commit_list) == NULL) {
1532 		mutex_enter(&zilog->zl_lock);
1533 		return;
1534 	}
1535 
1536 	if (zilog->zl_suspend) {
1537 		lwb = NULL;
1538 	} else {
1539 		lwb = list_tail(&zilog->zl_lwb_list);
1540 		if (lwb == NULL)
1541 			lwb = zil_create(zilog);
1542 	}
1543 
1544 	DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1545 	while (itx = list_head(&zilog->zl_itx_commit_list)) {
1546 		txg = itx->itx_lr.lrc_txg;
1547 		ASSERT3U(txg, !=, 0);
1548 
1549 		/*
1550 		 * This is inherently racy and may result in us writing
1551 		 * out a log block for a txg that was just synced. This is
1552 		 * ok since we'll end cleaning up that log block the next
1553 		 * time we call zil_sync().
1554 		 */
1555 		if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
1556 			lwb = zil_lwb_commit(zilog, itx, lwb);
1557 		list_remove(&zilog->zl_itx_commit_list, itx);
1558 		kmem_free(itx, offsetof(itx_t, itx_lr)
1559 		    + itx->itx_lr.lrc_reclen);
1560 	}
1561 	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1562 
1563 	/* write the last block out */
1564 	if (lwb != NULL && lwb->lwb_zio != NULL)
1565 		lwb = zil_lwb_write_start(zilog, lwb, B_TRUE);
1566 
1567 	zilog->zl_cur_used = 0;
1568 
1569 	/*
1570 	 * Wait if necessary for the log blocks to be on stable storage.
1571 	 */
1572 	if (zilog->zl_root_zio) {
1573 		error = zio_wait(zilog->zl_root_zio);
1574 		zilog->zl_root_zio = NULL;
1575 		zil_flush_vdevs(zilog);
1576 	}
1577 
1578 	if (error || lwb == NULL)
1579 		txg_wait_synced(zilog->zl_dmu_pool, 0);
1580 
1581 	mutex_enter(&zilog->zl_lock);
1582 
1583 	/*
1584 	 * Remember the highest committed log sequence number for ztest.
1585 	 * We only update this value when all the log writes succeeded,
1586 	 * because ztest wants to ASSERT that it got the whole log chain.
1587 	 */
1588 	if (error == 0 && lwb != NULL)
1589 		zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1590 }
1591 
1592 /*
1593  * Commit zfs transactions to stable storage.
1594  * If foid is 0 push out all transactions, otherwise push only those
1595  * for that object or might reference that object.
1596  *
1597  * itxs are committed in batches. In a heavily stressed zil there will be
1598  * a commit writer thread who is writing out a bunch of itxs to the log
1599  * for a set of committing threads (cthreads) in the same batch as the writer.
1600  * Those cthreads are all waiting on the same cv for that batch.
1601  *
1602  * There will also be a different and growing batch of threads that are
1603  * waiting to commit (qthreads). When the committing batch completes
1604  * a transition occurs such that the cthreads exit and the qthreads become
1605  * cthreads. One of the new cthreads becomes the writer thread for the
1606  * batch. Any new threads arriving become new qthreads.
1607  *
1608  * Only 2 condition variables are needed and there's no transition
1609  * between the two cvs needed. They just flip-flop between qthreads
1610  * and cthreads.
1611  *
1612  * Using this scheme we can efficiently wakeup up only those threads
1613  * that have been committed.
1614  */
1615 void
zil_commit(zilog_t * zilog,uint64_t foid)1616 zil_commit(zilog_t *zilog, uint64_t foid)
1617 {
1618 	uint64_t mybatch;
1619 
1620 	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
1621 		return;
1622 
1623 	/* move the async itxs for the foid to the sync queues */
1624 	zil_async_to_sync(zilog, foid);
1625 
1626 	mutex_enter(&zilog->zl_lock);
1627 	mybatch = zilog->zl_next_batch;
1628 	while (zilog->zl_writer) {
1629 		cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock);
1630 		if (mybatch <= zilog->zl_com_batch) {
1631 			mutex_exit(&zilog->zl_lock);
1632 			return;
1633 		}
1634 	}
1635 
1636 	zilog->zl_next_batch++;
1637 	zilog->zl_writer = B_TRUE;
1638 	zil_commit_writer(zilog);
1639 	zilog->zl_com_batch = mybatch;
1640 	zilog->zl_writer = B_FALSE;
1641 	mutex_exit(&zilog->zl_lock);
1642 
1643 	/* wake up one thread to become the next writer */
1644 	cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
1645 
1646 	/* wake up all threads waiting for this batch to be committed */
1647 	cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
1648 }
1649 
1650 /*
1651  * Called in syncing context to free committed log blocks and update log header.
1652  */
1653 void
zil_sync(zilog_t * zilog,dmu_tx_t * tx)1654 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1655 {
1656 	zil_header_t *zh = zil_header_in_syncing_context(zilog);
1657 	uint64_t txg = dmu_tx_get_txg(tx);
1658 	spa_t *spa = zilog->zl_spa;
1659 	uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1660 	lwb_t *lwb;
1661 
1662 	/*
1663 	 * We don't zero out zl_destroy_txg, so make sure we don't try
1664 	 * to destroy it twice.
1665 	 */
1666 	if (spa_sync_pass(spa) != 1)
1667 		return;
1668 
1669 	mutex_enter(&zilog->zl_lock);
1670 
1671 	ASSERT(zilog->zl_stop_sync == 0);
1672 
1673 	if (*replayed_seq != 0) {
1674 		ASSERT(zh->zh_replay_seq < *replayed_seq);
1675 		zh->zh_replay_seq = *replayed_seq;
1676 		*replayed_seq = 0;
1677 	}
1678 
1679 	if (zilog->zl_destroy_txg == txg) {
1680 		blkptr_t blk = zh->zh_log;
1681 
1682 		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1683 
1684 		bzero(zh, sizeof (zil_header_t));
1685 		bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1686 
1687 		if (zilog->zl_keep_first) {
1688 			/*
1689 			 * If this block was part of log chain that couldn't
1690 			 * be claimed because a device was missing during
1691 			 * zil_claim(), but that device later returns,
1692 			 * then this block could erroneously appear valid.
1693 			 * To guard against this, assign a new GUID to the new
1694 			 * log chain so it doesn't matter what blk points to.
1695 			 */
1696 			zil_init_log_chain(zilog, &blk);
1697 			zh->zh_log = blk;
1698 		}
1699 	}
1700 
1701 	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1702 		zh->zh_log = lwb->lwb_blk;
1703 		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1704 			break;
1705 		list_remove(&zilog->zl_lwb_list, lwb);
1706 		zio_free_zil(spa, txg, &lwb->lwb_blk);
1707 		kmem_cache_free(zil_lwb_cache, lwb);
1708 
1709 		/*
1710 		 * If we don't have anything left in the lwb list then
1711 		 * we've had an allocation failure and we need to zero
1712 		 * out the zil_header blkptr so that we don't end
1713 		 * up freeing the same block twice.
1714 		 */
1715 		if (list_head(&zilog->zl_lwb_list) == NULL)
1716 			BP_ZERO(&zh->zh_log);
1717 	}
1718 	mutex_exit(&zilog->zl_lock);
1719 }
1720 
1721 void
zil_init(void)1722 zil_init(void)
1723 {
1724 	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1725 	    sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1726 }
1727 
1728 void
zil_fini(void)1729 zil_fini(void)
1730 {
1731 	kmem_cache_destroy(zil_lwb_cache);
1732 }
1733 
1734 void
zil_set_sync(zilog_t * zilog,uint64_t sync)1735 zil_set_sync(zilog_t *zilog, uint64_t sync)
1736 {
1737 	zilog->zl_sync = sync;
1738 }
1739 
1740 void
zil_set_logbias(zilog_t * zilog,uint64_t logbias)1741 zil_set_logbias(zilog_t *zilog, uint64_t logbias)
1742 {
1743 	zilog->zl_logbias = logbias;
1744 }
1745 
1746 zilog_t *
zil_alloc(objset_t * os,zil_header_t * zh_phys)1747 zil_alloc(objset_t *os, zil_header_t *zh_phys)
1748 {
1749 	zilog_t *zilog;
1750 
1751 	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1752 
1753 	zilog->zl_header = zh_phys;
1754 	zilog->zl_os = os;
1755 	zilog->zl_spa = dmu_objset_spa(os);
1756 	zilog->zl_dmu_pool = dmu_objset_pool(os);
1757 	zilog->zl_destroy_txg = TXG_INITIAL - 1;
1758 	zilog->zl_logbias = dmu_objset_logbias(os);
1759 	zilog->zl_sync = dmu_objset_syncprop(os);
1760 	zilog->zl_next_batch = 1;
1761 
1762 	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1763 
1764 	for (int i = 0; i < TXG_SIZE; i++) {
1765 		mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
1766 		    MUTEX_DEFAULT, NULL);
1767 	}
1768 
1769 	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1770 	    offsetof(lwb_t, lwb_node));
1771 
1772 	list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
1773 	    offsetof(itx_t, itx_node));
1774 
1775 	mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1776 
1777 	avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1778 	    sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1779 
1780 	cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1781 	cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1782 	cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL);
1783 	cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL);
1784 
1785 	return (zilog);
1786 }
1787 
1788 void
zil_free(zilog_t * zilog)1789 zil_free(zilog_t *zilog)
1790 {
1791 	zilog->zl_stop_sync = 1;
1792 
1793 	ASSERT0(zilog->zl_suspend);
1794 	ASSERT0(zilog->zl_suspending);
1795 
1796 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1797 	list_destroy(&zilog->zl_lwb_list);
1798 
1799 	avl_destroy(&zilog->zl_vdev_tree);
1800 	mutex_destroy(&zilog->zl_vdev_lock);
1801 
1802 	ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
1803 	list_destroy(&zilog->zl_itx_commit_list);
1804 
1805 	for (int i = 0; i < TXG_SIZE; i++) {
1806 		/*
1807 		 * It's possible for an itx to be generated that doesn't dirty
1808 		 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1809 		 * callback to remove the entry. We remove those here.
1810 		 *
1811 		 * Also free up the ziltest itxs.
1812 		 */
1813 		if (zilog->zl_itxg[i].itxg_itxs)
1814 			zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
1815 		mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
1816 	}
1817 
1818 	mutex_destroy(&zilog->zl_lock);
1819 
1820 	cv_destroy(&zilog->zl_cv_writer);
1821 	cv_destroy(&zilog->zl_cv_suspend);
1822 	cv_destroy(&zilog->zl_cv_batch[0]);
1823 	cv_destroy(&zilog->zl_cv_batch[1]);
1824 
1825 	kmem_free(zilog, sizeof (zilog_t));
1826 }
1827 
1828 /*
1829  * Open an intent log.
1830  */
1831 zilog_t *
zil_open(objset_t * os,zil_get_data_t * get_data)1832 zil_open(objset_t *os, zil_get_data_t *get_data)
1833 {
1834 	zilog_t *zilog = dmu_objset_zil(os);
1835 
1836 	ASSERT(zilog->zl_clean_taskq == NULL);
1837 	ASSERT(zilog->zl_get_data == NULL);
1838 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1839 
1840 	zilog->zl_get_data = get_data;
1841 	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1842 	    2, 2, TASKQ_PREPOPULATE);
1843 
1844 	return (zilog);
1845 }
1846 
1847 /*
1848  * Close an intent log.
1849  */
1850 void
zil_close(zilog_t * zilog)1851 zil_close(zilog_t *zilog)
1852 {
1853 	lwb_t *lwb;
1854 	uint64_t txg = 0;
1855 
1856 	zil_commit(zilog, 0); /* commit all itx */
1857 
1858 	/*
1859 	 * The lwb_max_txg for the stubby lwb will reflect the last activity
1860 	 * for the zil.  After a txg_wait_synced() on the txg we know all the
1861 	 * callbacks have occurred that may clean the zil.  Only then can we
1862 	 * destroy the zl_clean_taskq.
1863 	 */
1864 	mutex_enter(&zilog->zl_lock);
1865 	lwb = list_tail(&zilog->zl_lwb_list);
1866 	if (lwb != NULL)
1867 		txg = lwb->lwb_max_txg;
1868 	mutex_exit(&zilog->zl_lock);
1869 	if (txg)
1870 		txg_wait_synced(zilog->zl_dmu_pool, txg);
1871 
1872 	if (zilog_is_dirty(zilog))
1873 		zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg);
1874 	VERIFY(!zilog_is_dirty(zilog));
1875 
1876 	taskq_destroy(zilog->zl_clean_taskq);
1877 	zilog->zl_clean_taskq = NULL;
1878 	zilog->zl_get_data = NULL;
1879 
1880 	/*
1881 	 * We should have only one LWB left on the list; remove it now.
1882 	 */
1883 	mutex_enter(&zilog->zl_lock);
1884 	lwb = list_head(&zilog->zl_lwb_list);
1885 	if (lwb != NULL) {
1886 		ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
1887 		list_remove(&zilog->zl_lwb_list, lwb);
1888 		zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1889 		kmem_cache_free(zil_lwb_cache, lwb);
1890 	}
1891 	mutex_exit(&zilog->zl_lock);
1892 }
1893 
1894 static char *suspend_tag = "zil suspending";
1895 
1896 /*
1897  * Suspend an intent log.  While in suspended mode, we still honor
1898  * synchronous semantics, but we rely on txg_wait_synced() to do it.
1899  * On old version pools, we suspend the log briefly when taking a
1900  * snapshot so that it will have an empty intent log.
1901  *
1902  * Long holds are not really intended to be used the way we do here --
1903  * held for such a short time.  A concurrent caller of dsl_dataset_long_held()
1904  * could fail.  Therefore we take pains to only put a long hold if it is
1905  * actually necessary.  Fortunately, it will only be necessary if the
1906  * objset is currently mounted (or the ZVOL equivalent).  In that case it
1907  * will already have a long hold, so we are not really making things any worse.
1908  *
1909  * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
1910  * zvol_state_t), and use their mechanism to prevent their hold from being
1911  * dropped (e.g. VFS_HOLD()).  However, that would be even more pain for
1912  * very little gain.
1913  *
1914  * if cookiep == NULL, this does both the suspend & resume.
1915  * Otherwise, it returns with the dataset "long held", and the cookie
1916  * should be passed into zil_resume().
1917  */
1918 int
zil_suspend(const char * osname,void ** cookiep)1919 zil_suspend(const char *osname, void **cookiep)
1920 {
1921 	objset_t *os;
1922 	zilog_t *zilog;
1923 	const zil_header_t *zh;
1924 	int error;
1925 
1926 	error = dmu_objset_hold(osname, suspend_tag, &os);
1927 	if (error != 0)
1928 		return (error);
1929 	zilog = dmu_objset_zil(os);
1930 
1931 	mutex_enter(&zilog->zl_lock);
1932 	zh = zilog->zl_header;
1933 
1934 	if (zh->zh_flags & ZIL_REPLAY_NEEDED) {		/* unplayed log */
1935 		mutex_exit(&zilog->zl_lock);
1936 		dmu_objset_rele(os, suspend_tag);
1937 		return (SET_ERROR(EBUSY));
1938 	}
1939 
1940 	/*
1941 	 * Don't put a long hold in the cases where we can avoid it.  This
1942 	 * is when there is no cookie so we are doing a suspend & resume
1943 	 * (i.e. called from zil_vdev_offline()), and there's nothing to do
1944 	 * for the suspend because it's already suspended, or there's no ZIL.
1945 	 */
1946 	if (cookiep == NULL && !zilog->zl_suspending &&
1947 	    (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
1948 		mutex_exit(&zilog->zl_lock);
1949 		dmu_objset_rele(os, suspend_tag);
1950 		return (0);
1951 	}
1952 
1953 	dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
1954 	dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
1955 
1956 	zilog->zl_suspend++;
1957 
1958 	if (zilog->zl_suspend > 1) {
1959 		/*
1960 		 * Someone else is already suspending it.
1961 		 * Just wait for them to finish.
1962 		 */
1963 
1964 		while (zilog->zl_suspending)
1965 			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1966 		mutex_exit(&zilog->zl_lock);
1967 
1968 		if (cookiep == NULL)
1969 			zil_resume(os);
1970 		else
1971 			*cookiep = os;
1972 		return (0);
1973 	}
1974 
1975 	/*
1976 	 * If there is no pointer to an on-disk block, this ZIL must not
1977 	 * be active (e.g. filesystem not mounted), so there's nothing
1978 	 * to clean up.
1979 	 */
1980 	if (BP_IS_HOLE(&zh->zh_log)) {
1981 		ASSERT(cookiep != NULL); /* fast path already handled */
1982 
1983 		*cookiep = os;
1984 		mutex_exit(&zilog->zl_lock);
1985 		return (0);
1986 	}
1987 
1988 	zilog->zl_suspending = B_TRUE;
1989 	mutex_exit(&zilog->zl_lock);
1990 
1991 	zil_commit(zilog, 0);
1992 
1993 	zil_destroy(zilog, B_FALSE);
1994 
1995 	mutex_enter(&zilog->zl_lock);
1996 	zilog->zl_suspending = B_FALSE;
1997 	cv_broadcast(&zilog->zl_cv_suspend);
1998 	mutex_exit(&zilog->zl_lock);
1999 
2000 	if (cookiep == NULL)
2001 		zil_resume(os);
2002 	else
2003 		*cookiep = os;
2004 	return (0);
2005 }
2006 
2007 void
zil_resume(void * cookie)2008 zil_resume(void *cookie)
2009 {
2010 	objset_t *os = cookie;
2011 	zilog_t *zilog = dmu_objset_zil(os);
2012 
2013 	mutex_enter(&zilog->zl_lock);
2014 	ASSERT(zilog->zl_suspend != 0);
2015 	zilog->zl_suspend--;
2016 	mutex_exit(&zilog->zl_lock);
2017 	dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
2018 	dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
2019 }
2020 
2021 typedef struct zil_replay_arg {
2022 	zil_replay_func_t **zr_replay;
2023 	void		*zr_arg;
2024 	boolean_t	zr_byteswap;
2025 	char		*zr_lr;
2026 } zil_replay_arg_t;
2027 
2028 static int
zil_replay_error(zilog_t * zilog,lr_t * lr,int error)2029 zil_replay_error(zilog_t *zilog, lr_t *lr, int error)
2030 {
2031 	char name[ZFS_MAX_DATASET_NAME_LEN];
2032 
2033 	zilog->zl_replaying_seq--;	/* didn't actually replay this one */
2034 
2035 	dmu_objset_name(zilog->zl_os, name);
2036 
2037 	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
2038 	    "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
2039 	    (u_longlong_t)lr->lrc_seq,
2040 	    (u_longlong_t)(lr->lrc_txtype & ~TX_CI),
2041 	    (lr->lrc_txtype & TX_CI) ? "CI" : "");
2042 
2043 	return (error);
2044 }
2045 
2046 static int
zil_replay_log_record(zilog_t * zilog,lr_t * lr,void * zra,uint64_t claim_txg)2047 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
2048 {
2049 	zil_replay_arg_t *zr = zra;
2050 	const zil_header_t *zh = zilog->zl_header;
2051 	uint64_t reclen = lr->lrc_reclen;
2052 	uint64_t txtype = lr->lrc_txtype;
2053 	int error = 0;
2054 
2055 	zilog->zl_replaying_seq = lr->lrc_seq;
2056 
2057 	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
2058 		return (0);
2059 
2060 	if (lr->lrc_txg < claim_txg)		/* already committed */
2061 		return (0);
2062 
2063 	/* Strip case-insensitive bit, still present in log record */
2064 	txtype &= ~TX_CI;
2065 
2066 	if (txtype == 0 || txtype >= TX_MAX_TYPE)
2067 		return (zil_replay_error(zilog, lr, EINVAL));
2068 
2069 	/*
2070 	 * If this record type can be logged out of order, the object
2071 	 * (lr_foid) may no longer exist.  That's legitimate, not an error.
2072 	 */
2073 	if (TX_OOO(txtype)) {
2074 		error = dmu_object_info(zilog->zl_os,
2075 		    ((lr_ooo_t *)lr)->lr_foid, NULL);
2076 		if (error == ENOENT || error == EEXIST)
2077 			return (0);
2078 	}
2079 
2080 	/*
2081 	 * Make a copy of the data so we can revise and extend it.
2082 	 */
2083 	bcopy(lr, zr->zr_lr, reclen);
2084 
2085 	/*
2086 	 * If this is a TX_WRITE with a blkptr, suck in the data.
2087 	 */
2088 	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
2089 		error = zil_read_log_data(zilog, (lr_write_t *)lr,
2090 		    zr->zr_lr + reclen);
2091 		if (error != 0)
2092 			return (zil_replay_error(zilog, lr, error));
2093 	}
2094 
2095 	/*
2096 	 * The log block containing this lr may have been byteswapped
2097 	 * so that we can easily examine common fields like lrc_txtype.
2098 	 * However, the log is a mix of different record types, and only the
2099 	 * replay vectors know how to byteswap their records.  Therefore, if
2100 	 * the lr was byteswapped, undo it before invoking the replay vector.
2101 	 */
2102 	if (zr->zr_byteswap)
2103 		byteswap_uint64_array(zr->zr_lr, reclen);
2104 
2105 	/*
2106 	 * We must now do two things atomically: replay this log record,
2107 	 * and update the log header sequence number to reflect the fact that
2108 	 * we did so. At the end of each replay function the sequence number
2109 	 * is updated if we are in replay mode.
2110 	 */
2111 	error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
2112 	if (error != 0) {
2113 		/*
2114 		 * The DMU's dnode layer doesn't see removes until the txg
2115 		 * commits, so a subsequent claim can spuriously fail with
2116 		 * EEXIST. So if we receive any error we try syncing out
2117 		 * any removes then retry the transaction.  Note that we
2118 		 * specify B_FALSE for byteswap now, so we don't do it twice.
2119 		 */
2120 		txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
2121 		error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
2122 		if (error != 0)
2123 			return (zil_replay_error(zilog, lr, error));
2124 	}
2125 	return (0);
2126 }
2127 
2128 /* ARGSUSED */
2129 static int
zil_incr_blks(zilog_t * zilog,blkptr_t * bp,void * arg,uint64_t claim_txg)2130 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
2131 {
2132 	zilog->zl_replay_blks++;
2133 
2134 	return (0);
2135 }
2136 
2137 /*
2138  * If this dataset has a non-empty intent log, replay it and destroy it.
2139  */
2140 void
zil_replay(objset_t * os,void * arg,zil_replay_func_t * replay_func[TX_MAX_TYPE])2141 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
2142 {
2143 	zilog_t *zilog = dmu_objset_zil(os);
2144 	const zil_header_t *zh = zilog->zl_header;
2145 	zil_replay_arg_t zr;
2146 
2147 	if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
2148 		zil_destroy(zilog, B_TRUE);
2149 		return;
2150 	}
2151 
2152 	zr.zr_replay = replay_func;
2153 	zr.zr_arg = arg;
2154 	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
2155 	zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
2156 
2157 	/*
2158 	 * Wait for in-progress removes to sync before starting replay.
2159 	 */
2160 	txg_wait_synced(zilog->zl_dmu_pool, 0);
2161 
2162 	zilog->zl_replay = B_TRUE;
2163 	zilog->zl_replay_time = ddi_get_lbolt();
2164 	ASSERT(zilog->zl_replay_blks == 0);
2165 	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
2166 	    zh->zh_claim_txg);
2167 	kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
2168 
2169 	zil_destroy(zilog, B_FALSE);
2170 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
2171 	zilog->zl_replay = B_FALSE;
2172 }
2173 
2174 boolean_t
zil_replaying(zilog_t * zilog,dmu_tx_t * tx)2175 zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
2176 {
2177 	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2178 		return (B_TRUE);
2179 
2180 	if (zilog->zl_replay) {
2181 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
2182 		zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
2183 		    zilog->zl_replaying_seq;
2184 		return (B_TRUE);
2185 	}
2186 
2187 	return (B_FALSE);
2188 }
2189 
2190 /* ARGSUSED */
2191 int
zil_vdev_offline(const char * osname,void * arg)2192 zil_vdev_offline(const char *osname, void *arg)
2193 {
2194 	int error;
2195 
2196 	error = zil_suspend(osname, NULL);
2197 	if (error != 0)
2198 		return (SET_ERROR(EEXIST));
2199 	return (0);
2200 }
2201