xref: /illumos-gate/usr/src/uts/common/fs/ufs/lufs_debug.c (revision 03831d35)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 #pragma ident	"%Z%%M%	%I%	%E% SMI"
23 
24 /*
25  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #include <sys/systm.h>
30 #include <sys/types.h>
31 #include <sys/vnode.h>
32 #include <sys/buf.h>
33 #include <sys/ddi.h>
34 #include <sys/errno.h>
35 #include <sys/sysmacros.h>
36 #include <sys/debug.h>
37 #include <sys/kmem.h>
38 #include <sys/conf.h>
39 #include <sys/proc.h>
40 #include <sys/cmn_err.h>
41 #include <sys/fs/ufs_inode.h>
42 #include <sys/fs/ufs_filio.h>
43 #include <sys/fs/ufs_log.h>
44 
45 
46 #ifdef	DEBUG
47 
48 /*
49  * DEBUG ROUTINES
50  *	THESE ROUTINES ARE ONLY USED WHEN ASSERTS ARE ENABLED
51  */
52 
53 static	kmutex_t	toptracelock;
54 static	int		toptraceindex;
55 int			toptracemax	= 1024;	/* global so it can be set */
56 struct toptrace {
57 	enum delta_type	dtyp;
58 	kthread_t	*thread;
59 	dev_t		dev;
60 	long		arg2;
61 	long		arg3;
62 	long long	arg1;
63 } *toptrace;
64 
65 static void
66 top_trace(enum delta_type dtyp, dev_t dev, long long arg1, long arg2, long arg3)
67 {
68 	if (toptrace == NULL) {
69 		toptraceindex = 0;
70 		toptrace = kmem_zalloc((size_t)
71 		    (sizeof (struct toptrace) * toptracemax), KM_SLEEP);
72 	}
73 	mutex_enter(&toptracelock);
74 	toptrace[toptraceindex].dtyp = dtyp;
75 	toptrace[toptraceindex].thread = curthread;
76 	toptrace[toptraceindex].dev = dev;
77 	toptrace[toptraceindex].arg1 = arg1;
78 	toptrace[toptraceindex].arg2 = arg2;
79 	toptrace[toptraceindex].arg3 = arg3;
80 	if (++toptraceindex == toptracemax)
81 		toptraceindex = 0;
82 	else {
83 		toptrace[toptraceindex].dtyp = (enum delta_type)-1;
84 		toptrace[toptraceindex].thread = (kthread_t *)-1;
85 		toptrace[toptraceindex].dev = (dev_t)-1;
86 		toptrace[toptraceindex].arg1 = -1;
87 		toptrace[toptraceindex].arg2 = -1;
88 	}
89 
90 	mutex_exit(&toptracelock);
91 }
92 
93 /*
94  * add a range into the metadata map
95  */
96 void
97 top_mataadd(ufsvfs_t *ufsvfsp, offset_t mof, off_t nb)
98 {
99 	ml_unit_t	*ul	= ufsvfsp->vfs_log;
100 
101 	ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
102 	deltamap_add(ul->un_matamap, mof, nb, 0, 0, 0, NULL);
103 }
104 
105 /*
106  * delete a range from the metadata map
107  */
108 void
109 top_matadel(ufsvfs_t *ufsvfsp, offset_t mof, off_t nb)
110 {
111 	ml_unit_t	*ul	= ufsvfsp->vfs_log;
112 
113 	ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
114 	ASSERT(!matamap_overlap(ul->un_deltamap, mof, nb));
115 	deltamap_del(ul->un_matamap, mof, nb);
116 }
117 
118 /*
119  * clear the entries from the metadata map
120  */
121 void
122 top_mataclr(ufsvfs_t *ufsvfsp)
123 {
124 	ml_unit_t	*ul	= ufsvfsp->vfs_log;
125 
126 	ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
127 	map_free_entries(ul->un_matamap);
128 	map_free_entries(ul->un_deltamap);
129 }
130 
131 int
132 top_begin_debug(ml_unit_t *ul, top_t topid, ulong_t size)
133 {
134 	threadtrans_t *tp;
135 
136 	if (ul->un_debug & MT_TRACE)
137 		top_trace(DT_BOT, ul->un_dev,
138 				(long long)topid, (long)size, (long)0);
139 
140 	ASSERT(curthread->t_flag & T_DONTBLOCK);
141 
142 	tp = tsd_get(topkey);
143 	if (tp == NULL) {
144 		tp = kmem_zalloc(sizeof (threadtrans_t), KM_SLEEP);
145 		(void) tsd_set(topkey, tp);
146 	}
147 	tp->topid  = topid;
148 	tp->esize  = size;
149 	tp->rsize  = 0;
150 	tp->dev    = ul->un_dev;
151 	return (1);
152 }
153 
154 int
155 top_end_debug(ml_unit_t *ul, mt_map_t *mtm, top_t topid, ulong_t size)
156 {
157 	threadtrans_t *tp;
158 
159 	ASSERT(curthread->t_flag & T_DONTBLOCK);
160 
161 	ASSERT((tp = (threadtrans_t *)tsd_get(topkey)) != NULL);
162 
163 	ASSERT((tp->dev == ul->un_dev) && (tp->topid == topid) &&
164 	    (tp->esize == size));
165 
166 	ASSERT(((ul->un_debug & MT_SIZE) == 0) || (tp->rsize <= tp->esize));
167 
168 	mtm->mtm_tops->mtm_top_num[topid]++;
169 	mtm->mtm_tops->mtm_top_size_etot[topid] += tp->esize;
170 	mtm->mtm_tops->mtm_top_size_rtot[topid] += tp->rsize;
171 
172 	if (tp->rsize > mtm->mtm_tops->mtm_top_size_max[topid])
173 		mtm->mtm_tops->mtm_top_size_max[topid] = tp->rsize;
174 	if (mtm->mtm_tops->mtm_top_size_min[topid] == 0)
175 			mtm->mtm_tops->mtm_top_size_min[topid] =
176 			    tp->rsize;
177 	else
178 		if (tp->rsize < mtm->mtm_tops->mtm_top_size_min[topid])
179 			mtm->mtm_tops->mtm_top_size_min[topid] =
180 			    tp->rsize;
181 
182 	if (ul->un_debug & MT_TRACE)
183 		top_trace(DT_EOT, ul->un_dev, (long long)topid,
184 		    (long)tp->rsize, (long)0);
185 
186 	return (1);
187 }
188 
189 int
190 top_delta_debug(
191 	ml_unit_t *ul,
192 	offset_t mof,
193 	off_t nb,
194 	delta_t dtyp)
195 {
196 	struct threadtrans	*tp;
197 
198 	ASSERT(curthread->t_flag & T_DONTBLOCK);
199 
200 	/*
201 	 * check for delta contained fully within matamap
202 	 */
203 	ASSERT((ul->un_matamap == NULL) ||
204 		matamap_within(ul->un_matamap, mof, nb));
205 
206 	/*
207 	 * maintain transaction info
208 	 */
209 	if (ul->un_debug & MT_TRANSACT)
210 		ul->un_logmap->mtm_tops->mtm_delta_num[dtyp]++;
211 
212 	/*
213 	 * check transaction stuff
214 	 */
215 	if (ul->un_debug & MT_TRANSACT) {
216 		tp = (struct threadtrans *)tsd_get(topkey);
217 		ASSERT(tp);
218 		switch (dtyp) {
219 		case DT_CANCEL:
220 		case DT_ABZERO:
221 			if (!matamap_within(ul->un_deltamap, mof, nb))
222 				tp->rsize += sizeof (struct delta);
223 			break;
224 		default:
225 			if (!matamap_within(ul->un_deltamap, mof, nb))
226 				tp->rsize += nb + sizeof (struct delta);
227 			break;
228 		}
229 	} else
230 		return (1);
231 
232 	if (ul->un_debug & MT_TRACE)
233 		top_trace(dtyp, ul->un_dev, mof, (long)nb, (long)0);
234 
235 	return (1);
236 }
237 
238 int
239 top_roll_debug(ml_unit_t *ul)
240 {
241 	logmap_roll_dev(ul);
242 	return (1);
243 }
244 
245 int
246 top_init_debug(void)
247 {
248 	mutex_init(&toptracelock, NULL, MUTEX_DEFAULT, NULL);
249 	return (1);
250 }
251 
252 struct topstats_link {
253 	struct topstats_link	*ts_next;
254 	dev_t			ts_dev;
255 	struct topstats		ts_stats;
256 };
257 struct topstats_link *topstats_anchor = NULL;
258 
259 /*
260  * DEBUG ROUTINES
261  *	from debug portion of *_map.c
262  */
263 /*
264  * scan test support
265  */
266 int
267 logmap_logscan_debug(mt_map_t *mtm, mapentry_t *age)
268 {
269 	mapentry_t	*me;
270 	ml_unit_t	*ul;
271 	off_t		head, trimroll, lof;
272 
273 	/*
274 	 * remember location of youngest rolled delta
275 	 */
276 	mutex_enter(&mtm->mtm_mutex);
277 	ul = mtm->mtm_ul;
278 	head = ul->un_head_lof;
279 	trimroll = mtm->mtm_trimrlof;
280 	for (me = age; me; me = me->me_agenext) {
281 		lof = me->me_lof;
282 		if (trimroll == 0)
283 			trimroll = lof;
284 		if (lof >= head) {
285 			if (trimroll >= head && trimroll <= lof)
286 				trimroll = lof;
287 		} else {
288 			if (trimroll <= lof || trimroll >= head)
289 				trimroll = lof;
290 		}
291 	}
292 	mtm->mtm_trimrlof = trimroll;
293 	mutex_exit(&mtm->mtm_mutex);
294 	return (1);
295 }
296 
297 /*
298  * scan test support
299  */
300 int
301 logmap_logscan_commit_debug(off_t lof, mt_map_t *mtm)
302 {
303 	off_t	oldtrimc, newtrimc, trimroll;
304 
305 	trimroll = mtm->mtm_trimrlof;
306 	oldtrimc = mtm->mtm_trimclof;
307 	newtrimc = mtm->mtm_trimclof = dbtob(btod(lof));
308 
309 	/*
310 	 * can't trim prior to transaction w/rolled delta
311 	 */
312 	if (trimroll)
313 		if (newtrimc >= oldtrimc) {
314 			if (trimroll <= newtrimc && trimroll >= oldtrimc)
315 				mtm->mtm_trimalof = newtrimc;
316 		} else {
317 			if (trimroll >= oldtrimc || trimroll <= newtrimc)
318 				mtm->mtm_trimalof = newtrimc;
319 		}
320 	return (1);
321 }
322 
323 int
324 logmap_logscan_add_debug(struct delta *dp, mt_map_t *mtm)
325 {
326 	if ((dp->d_typ == DT_AB) || (dp->d_typ == DT_INODE))
327 		mtm->mtm_trimalof = mtm->mtm_trimclof;
328 	return (1);
329 }
330 
331 /*
332  * log-read after log-write
333  */
334 int
335 map_check_ldl_write(ml_unit_t *ul, caddr_t va, offset_t vamof, mapentry_t *me)
336 {
337 	caddr_t		bufp;
338 
339 	ASSERT(me->me_nb);
340 	ASSERT((me->me_flags & ME_AGE) == 0);
341 
342 	/* Alloc a buf */
343 	bufp = kmem_alloc(me->me_nb, KM_SLEEP);
344 
345 	/* Do the read */
346 	me->me_agenext = NULL;
347 	if (ldl_read(ul, bufp, me->me_mof, me->me_nb, me) == 0) {
348 		ASSERT(bcmp(bufp, va + (me->me_mof - vamof), me->me_nb) == 0);
349 	}
350 
351 	kmem_free(bufp, me->me_nb);
352 	return (1);
353 }
354 
355 /*
356  * Cleanup a map struct
357  */
358 int
359 map_put_debug(mt_map_t *mtm)
360 {
361 	struct topstats_link	*tsl, **ptsl;
362 
363 	if (mtm->mtm_tops == NULL)
364 		return (1);
365 
366 	/* Don't free this, cause the next snarf will want it */
367 	if ((lufs_debug & MT_TRANSACT) != 0)
368 		return (1);
369 
370 	ptsl = &topstats_anchor;
371 	tsl = topstats_anchor;
372 	while (tsl) {
373 		if (mtm->mtm_tops == &tsl->ts_stats) {
374 			mtm->mtm_tops = NULL;
375 			*ptsl = tsl->ts_next;
376 			kmem_free(tsl, sizeof (*tsl));
377 			return (1);
378 		}
379 		ptsl = &tsl->ts_next;
380 		tsl = tsl->ts_next;
381 	}
382 
383 	return (1);
384 }
385 
386 int
387 map_get_debug(ml_unit_t *ul, mt_map_t *mtm)
388 {
389 	struct topstats_link	*tsl;
390 
391 	if ((ul->un_debug & MT_TRANSACT) == 0)
392 		return (1);
393 
394 	if (mtm->mtm_type != logmaptype)
395 		return (1);
396 
397 	tsl = topstats_anchor;
398 	while (tsl) {
399 		if (tsl->ts_dev == ul->un_dev) {
400 			mtm->mtm_tops = &(tsl->ts_stats);
401 			return (1);
402 		}
403 		tsl = tsl->ts_next;
404 	}
405 
406 	tsl = kmem_zalloc(sizeof (*tsl), KM_SLEEP);
407 	tsl->ts_dev = ul->un_dev;
408 	tsl->ts_next = topstats_anchor;
409 	topstats_anchor = tsl;
410 	mtm->mtm_tops = &tsl->ts_stats;
411 	return (1);
412 }
413 
414 /*
415  * check a map's list
416  */
417 int
418 map_check_linkage(mt_map_t *mtm)
419 {
420 	int		i;
421 	int		hashed;
422 	int		nexted;
423 	int		preved;
424 	int		ncancel;
425 	mapentry_t	*me;
426 	off_t		olof;
427 	off_t		firstlof;
428 	int		wrapped;
429 
430 	mutex_enter(&mtm->mtm_mutex);
431 
432 	ASSERT(mtm->mtm_nme >= 0);
433 
434 	/*
435 	 * verify the entries on the hash
436 	 */
437 	hashed = 0;
438 	for (i = 0; i < mtm->mtm_nhash; ++i) {
439 		for (me = *(mtm->mtm_hash+i); me; me = me->me_hash) {
440 			++hashed;
441 			ASSERT(me->me_flags & ME_HASH);
442 			ASSERT((me->me_flags & ME_LIST) == 0);
443 		}
444 	}
445 	ASSERT(hashed >= mtm->mtm_nme);
446 	/*
447 	 * verify the doubly linked list of all entries
448 	 */
449 	nexted = 0;
450 	for (me = mtm->mtm_next; me != (mapentry_t *)mtm; me = me->me_next)
451 		nexted++;
452 	preved = 0;
453 	for (me = mtm->mtm_prev; me != (mapentry_t *)mtm; me = me->me_prev)
454 		preved++;
455 	ASSERT(nexted == preved);
456 	ASSERT(nexted == hashed);
457 
458 	/*
459 	 * verify the cancel list
460 	 */
461 	ncancel = 0;
462 	for (me = mtm->mtm_cancel; me; me = me->me_cancel) {
463 		++ncancel;
464 		ASSERT(me->me_flags & ME_CANCEL);
465 	}
466 	/*
467 	 * verify the logmap's log offsets
468 	 */
469 	if (mtm->mtm_type == logmaptype) {
470 		olof = mtm->mtm_next->me_lof;
471 		firstlof = olof;
472 		wrapped = 0;
473 		/*
474 		 * Make sure to skip any mapentries whose me_lof = 0
475 		 * and me_type == DT_CANCEL, these are mapentries
476 		 * in place just to mark user block deletions as not
477 		 * available for allocate within the same moby transaction
478 		 * in case we crash before it is comitted.  Skip these
479 		 * entries in the checks below as they are not applicable.
480 		 */
481 		for (me = mtm->mtm_next->me_next;
482 		    me != (mapentry_t *)mtm;
483 		    me = me->me_next) {
484 
485 			if (me->me_lof == 0 && me->me_dt == DT_CANCEL)
486 				continue;
487 			if (firstlof == 0) {
488 				olof = me->me_lof;
489 				firstlof = olof;
490 				if (me->me_next != (mapentry_t *)mtm)
491 					me = me->me_next;
492 				continue;
493 			}
494 			ASSERT(me->me_lof != olof);
495 
496 			if (wrapped) {
497 				ASSERT(me->me_lof > olof);
498 				ASSERT(me->me_lof < firstlof);
499 				olof = me->me_lof;
500 				continue;
501 			}
502 			if (me->me_lof < olof) {
503 				ASSERT(me->me_lof < firstlof);
504 				wrapped = 1;
505 				olof = me->me_lof;
506 				continue;
507 			}
508 			ASSERT(me->me_lof > firstlof);
509 			ASSERT(me->me_lof < mtm->mtm_ul->un_eol_lof);
510 			olof = me->me_lof;
511 		}
512 	}
513 
514 	mutex_exit(&mtm->mtm_mutex);
515 	return (1);
516 }
517 
518 /*
519  * check for overlap
520  */
521 int
522 matamap_overlap(mt_map_t *mtm, offset_t mof, off_t nb)
523 {
524 	off_t		hnb;
525 	mapentry_t	*me;
526 	mapentry_t	**mep;
527 
528 	for (hnb = 0; nb; nb -= hnb, mof += hnb) {
529 
530 		hnb = MAPBLOCKSIZE - (mof & MAPBLOCKOFF);
531 		if (hnb > nb)
532 			hnb = nb;
533 		/*
534 		 * search for dup entry
535 		 */
536 		mep = MAP_HASH(mof, mtm);
537 		mutex_enter(&mtm->mtm_mutex);
538 		for (me = *mep; me; me = me->me_hash)
539 			if (DATAoverlapME(mof, hnb, me))
540 				break;
541 		mutex_exit(&mtm->mtm_mutex);
542 
543 		/*
544 		 * overlap detected
545 		 */
546 		if (me)
547 			return (1);
548 	}
549 	return (0);
550 }
551 /*
552  * check for within
553  */
554 int
555 matamap_within(mt_map_t *mtm, offset_t mof, off_t nb)
556 {
557 	off_t		hnb;
558 	mapentry_t	*me;
559 	mapentry_t	**mep;
560 	int		scans	= 0;
561 	int		withins	= 0;
562 
563 	for (hnb = 0; nb && scans == withins; nb -= hnb, mof += hnb) {
564 		scans++;
565 
566 		hnb = MAPBLOCKSIZE - (mof & MAPBLOCKOFF);
567 		if (hnb > nb)
568 			hnb = nb;
569 		/*
570 		 * search for within entry
571 		 */
572 		mep = MAP_HASH(mof, mtm);
573 		mutex_enter(&mtm->mtm_mutex);
574 		for (me = *mep; me; me = me->me_hash)
575 			if (DATAwithinME(mof, hnb, me)) {
576 				withins++;
577 				break;
578 			}
579 		mutex_exit(&mtm->mtm_mutex);
580 	}
581 	return (scans == withins);
582 }
583 
584 int
585 ldl_sethead_debug(ml_unit_t *ul)
586 {
587 	mt_map_t	*mtm	= ul->un_logmap;
588 	off_t		trimr	= mtm->mtm_trimrlof;
589 	off_t		head	= ul->un_head_lof;
590 	off_t		tail	= ul->un_tail_lof;
591 
592 	if (head <= tail) {
593 		if (trimr < head || trimr >= tail)
594 			mtm->mtm_trimrlof = 0;
595 	} else {
596 		if (trimr >= tail && trimr < head)
597 			mtm->mtm_trimrlof = 0;
598 	}
599 	return (1);
600 }
601 
602 int
603 lufs_initialize_debug(ml_odunit_t *ud)
604 {
605 	ud->od_debug = lufs_debug;
606 	return (1);
607 }
608 
609 #endif	/* DEBUG */
610 
611 /*
612  * lufs_debug controls the debug level for TSufs, and is only used
613  * for a debug kernel. It's referenced by ufs_ioctl() and so is
614  * not under #ifdef DEBUG compilation.
615  */
616 uint_t lufs_debug;
617