xref: /dragonfly/sbin/hammer/misc.c (revision 017817f0)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/misc.c,v 1.5 2008/06/26 04:07:57 dillon Exp $
35  */
36 
37 #include "hammer.h"
38 
39 /*
40  * (taken from /usr/src/sys/vfs/hammer/hammer_btree.c)
41  *
42  * Compare two B-Tree elements, return -N, 0, or +N (e.g. similar to strcmp).
43  *
44  * Note that for this particular function a return value of -1, 0, or +1
45  * can denote a match if delete_tid is otherwise discounted.  A delete_tid
46  * of zero is considered to be 'infinity' in comparisons.
47  *
48  * See also hammer_rec_rb_compare() and hammer_rec_cmp() in hammer_object.c.
49  */
50 int
51 hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2)
52 {
53 	if (key1->localization < key2->localization)
54 		return(-5);
55 	if (key1->localization > key2->localization)
56 		return(5);
57 
58 	if (key1->obj_id < key2->obj_id)
59 		return(-4);
60 	if (key1->obj_id > key2->obj_id)
61 		return(4);
62 
63 	if (key1->rec_type < key2->rec_type)
64 		return(-3);
65 	if (key1->rec_type > key2->rec_type)
66 		return(3);
67 
68 	if (key1->key < key2->key)
69 		return(-2);
70 	if (key1->key > key2->key)
71 		return(2);
72 
73 	if (key1->create_tid == 0) {
74 		if (key2->create_tid == 0)
75 			return(0);
76 		return(1);
77 	}
78 	if (key2->create_tid == 0)
79 		return(-1);
80 	if (key1->create_tid < key2->create_tid)
81 		return(-1);
82 	if (key1->create_tid > key2->create_tid)
83 		return(1);
84 	return(0);
85 }
86 
87 void
88 hammer_key_beg_init(hammer_base_elm_t base)
89 {
90 	bzero(base, sizeof(*base));
91 
92 	base->localization = HAMMER_MIN_LOCALIZATION;
93 	base->obj_id = HAMMER_MIN_OBJID;
94 	base->key = HAMMER_MIN_KEY;
95 	base->create_tid = 1;
96 	base->rec_type = HAMMER_MIN_RECTYPE;
97 }
98 
99 void
100 hammer_key_end_init(hammer_base_elm_t base)
101 {
102 	bzero(base, sizeof(*base));
103 
104 	base->localization = HAMMER_MAX_LOCALIZATION;
105 	base->obj_id = HAMMER_MAX_OBJID;
106 	base->key = HAMMER_MAX_KEY;
107 	base->create_tid = HAMMER_MAX_TID;
108 	base->rec_type = HAMMER_MAX_RECTYPE;
109 }
110 
111 int
112 hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf)
113 {
114 	hammer_crc_t crc;
115 
116 	if (leaf->data_len == 0) {
117 		crc = 0;
118 	} else {
119 		switch(leaf->base.rec_type) {
120 		case HAMMER_RECTYPE_INODE:
121 			if (leaf->data_len != sizeof(struct hammer_inode_data))
122 				return(0);
123 			crc = crc32(data, HAMMER_INODE_CRCSIZE);
124 			break;
125 		default:
126 			crc = crc32(data, leaf->data_len);
127 			break;
128 		}
129 	}
130 	return (leaf->data_crc == crc);
131 }
132 
133 int
134 getyn(void)
135 {
136 	char buf[256];
137 	int len;
138 
139 	if (fgets(buf, sizeof(buf), stdin) == NULL)
140 		return(0);
141 	len = strlen(buf);
142 	while (len && (buf[len-1] == '\n' || buf[len-1] == '\r'))
143 		--len;
144 	buf[len] = 0;
145 	if (strcmp(buf, "y") == 0 ||
146 	    strcmp(buf, "yes") == 0 ||
147 	    strcmp(buf, "Y") == 0 ||
148 	    strcmp(buf, "YES") == 0) {
149 		return(1);
150 	}
151 	return(0);
152 }
153 
154 const char *
155 sizetostr(off_t size)
156 {
157 	static char buf[32];
158 
159 	if (size < 1024 / 2) {
160 		snprintf(buf, sizeof(buf), "%6.2fB", (double)size);
161 	} else if (size < 1024 * 1024 / 2) {
162 		snprintf(buf, sizeof(buf), "%6.2fKB",
163 			(double)size / 1024);
164 	} else if (size < 1024 * 1024 * 1024LL / 2) {
165 		snprintf(buf, sizeof(buf), "%6.2fMB",
166 			(double)size / (1024 * 1024));
167 	} else if (size < 1024 * 1024 * 1024LL * 1024LL / 2) {
168 		snprintf(buf, sizeof(buf), "%6.2fGB",
169 			(double)size / (1024 * 1024 * 1024LL));
170 	} else {
171 		snprintf(buf, sizeof(buf), "%6.2fTB",
172 			(double)size / (1024 * 1024 * 1024LL * 1024LL));
173 	}
174 	return(buf);
175 }
176 
177 int
178 hammer_fs_to_vol(const char *fs, struct hammer_ioc_volume_list *p)
179 {
180 	struct hammer_ioc_volume_list ioc;
181 	int fd;
182 
183 	fd = open(fs, O_RDONLY);
184 	if (fd < 0) {
185 		perror("open");
186 		return(-1);
187 	}
188 
189 	bzero(&ioc, sizeof(ioc));
190 	ioc.nvols = HAMMER_MAX_VOLUMES;
191 	ioc.vols = malloc(ioc.nvols * sizeof(*ioc.vols));
192 	if (ioc.vols == NULL) {
193 		perror("malloc");
194 		close(fd);
195 		return(-1);
196 	}
197 
198 	if (ioctl(fd, HAMMERIOC_LIST_VOLUMES, &ioc) < 0) {
199 		perror("ioctl");
200 		close(fd);
201 		free(ioc.vols);
202 		return(-1);
203 	}
204 
205 	bcopy(&ioc, p, sizeof(ioc));
206 	close(fd);
207 
208 	return(0);
209 }
210 
211 int
212 hammer_fs_to_rootvol(const char *fs, char *buf, int len)
213 {
214 	struct hammer_ioc_volume_list ioc;
215 	int i;
216 
217 	if (hammer_fs_to_vol(fs, &ioc) == -1)
218 		return(-1);
219 
220 	for (i = 0; i < ioc.nvols; i++) {
221 		if (ioc.vols[i].vol_no == HAMMER_ROOT_VOLNO) {
222 			strlcpy(buf, ioc.vols[i].device_name, len);
223 			break;
224 		}
225 	}
226 	assert(i != ioc.nvols);  /* root volume must exist */
227 
228 	free(ioc.vols);
229 	return(0);
230 }
231 
232 /*
233  * Functions and data structure for zone statistics
234  */
235 /*
236  * Each layer1 needs ((2^19) / 64) = 8192 uint64_t.
237  */
238 #define HAMMER_LAYER1_UINT64 8192
239 #define HAMMER_LAYER1_BYTES (HAMMER_LAYER1_UINT64 * sizeof(uint64_t))
240 
241 static int *l1_max = NULL;
242 static uint64_t **l1_bits = NULL;
243 
244 static __inline
245 int
246 hammer_set_layer_bits(uint64_t *bits, int i)
247 {
248 	int q, r;
249 
250 	q = i >> 6;
251 	r = i & ((1 << 6) - 1);
252 
253 	bits += q;
254 	if (!((*bits) & ((uint64_t)1 << r))) {
255 		(*bits) |= ((uint64_t)1 << r);
256 		return(1);
257 	}
258 	return(0);  /* already seen this block */
259 }
260 
261 static
262 void
263 hammer_extend_layer1_bits(int vol, int newsiz, int oldsiz)
264 {
265 	uint64_t *p;
266 
267 	assert(newsiz > oldsiz);
268 	assert(newsiz > 0 && oldsiz >= 0);
269 
270 	p = l1_bits[vol];
271 	if (p == NULL)
272 		p = malloc(HAMMER_LAYER1_BYTES * newsiz);
273 	else
274 		p = realloc(p, HAMMER_LAYER1_BYTES * newsiz);
275 	if (p == NULL)
276 		err(1, "alloc");
277 	l1_bits[vol] = p;
278 
279 	p += HAMMER_LAYER1_UINT64 * oldsiz;
280 	bzero(p, HAMMER_LAYER1_BYTES * (newsiz - oldsiz));
281 }
282 
283 struct zone_stat*
284 hammer_init_zone_stat(void)
285 {
286 	return(calloc(HAMMER_MAX_ZONES, sizeof(struct zone_stat)));
287 }
288 
289 struct zone_stat*
290 hammer_init_zone_stat_bits(void)
291 {
292 	int i;
293 
294 	l1_max = calloc(HAMMER_MAX_VOLUMES, sizeof(int));
295 	if (l1_max == NULL)
296 		err(1, "calloc");
297 
298 	l1_bits = calloc(HAMMER_MAX_VOLUMES, sizeof(uint64_t*));
299 	if (l1_bits == NULL)
300 		err(1, "calloc");
301 
302 	for (i = 0; i < HAMMER_MAX_VOLUMES; i++) {
303 		l1_max[i] = -1;  /* +1 needs to be 0 */
304 		l1_bits[i] = NULL;
305 	}
306 	return(hammer_init_zone_stat());
307 }
308 
309 void
310 hammer_cleanup_zone_stat(struct zone_stat *stats)
311 {
312 	int i;
313 
314 	if (l1_bits) {
315 		for (i = 0; i < HAMMER_MAX_VOLUMES; i++) {
316 			free(l1_bits[i]);
317 			l1_bits[i] = NULL;
318 		}
319 	}
320 
321 	free(l1_bits);
322 	l1_bits = NULL;
323 
324 	free(l1_max);
325 	l1_max = NULL;
326 
327 	free(stats);
328 }
329 
330 static
331 void
332 _hammer_add_zone_stat(struct zone_stat *stats, int zone,
333 	hammer_off_t bytes, int new_block, int new_item)
334 {
335 	struct zone_stat *sp = stats + zone;
336 
337 	if (new_block)
338 		sp->blocks++;
339 	if (new_item)
340 		sp->items++;
341 	sp->used += bytes;
342 }
343 
344 void
345 hammer_add_zone_stat(struct zone_stat *stats, hammer_off_t offset,
346 	hammer_off_t bytes)
347 {
348 	int zone, vol, i, j, new_block;
349 	uint64_t *p;
350 
351 	offset &= ~HAMMER_BIGBLOCK_MASK64;
352 	zone = HAMMER_ZONE_DECODE(offset);
353 	vol = HAMMER_VOL_DECODE(offset);
354 
355 	offset &= HAMMER_OFF_SHORT_MASK;  /* cut off volume bits from layer1 */
356 	i = HAMMER_BLOCKMAP_LAYER1_INDEX(offset);
357 	j = HAMMER_BLOCKMAP_LAYER2_INDEX(offset);
358 
359 	if (i > l1_max[vol]) {
360 		assert(i < 1024);  /* no >1024 layer1 per volume */
361 		hammer_extend_layer1_bits(vol, i + 1, l1_max[vol] + 1);
362 		l1_max[vol] = i;
363 	}
364 
365 	p = l1_bits[vol] + i * HAMMER_LAYER1_UINT64;
366 	new_block = hammer_set_layer_bits(p, j);
367 	_hammer_add_zone_stat(stats, zone, bytes, new_block, 1);
368 }
369 
370 /*
371  * If the same layer2 is used more than once the result will be wrong.
372  */
373 void
374 hammer_add_zone_stat_layer2(struct zone_stat *stats,
375 	hammer_blockmap_layer2_t layer2)
376 {
377 	_hammer_add_zone_stat(stats, layer2->zone,
378 		HAMMER_BIGBLOCK_SIZE - layer2->bytes_free, 1, 0);
379 }
380 
381 static __inline
382 double
383 _calc_used_percentage(hammer_off_t blocks, hammer_off_t used)
384 {
385 	double res;
386 
387 	if (blocks)
388 		res = ((double)(used * 100)) / (blocks << HAMMER_BIGBLOCK_BITS);
389 	else
390 		res = 0;
391 	return(res);
392 }
393 
394 void
395 hammer_print_zone_stat(const struct zone_stat *stats)
396 {
397 	int i;
398 	hammer_off_t total_blocks = 0;
399 	hammer_off_t total_items = 0;
400 	hammer_off_t total_used = 0;
401 	const struct zone_stat *p = stats;
402 #define INDENT ""
403 
404 	printf("HAMMER zone statistics\n");
405 	printf(INDENT"zone #             "
406 		"blocks       items              used[B]             used[%%]\n");
407 	for (i = 0; i < HAMMER_MAX_ZONES; i++) {
408 		printf(INDENT"zone %-2d %-10s %-12ju %-18ju %-19ju %g\n",
409 			i, zone_labels[i], p->blocks, p->items, p->used,
410 			_calc_used_percentage(p->blocks, p->used));
411 		total_blocks += p->blocks;
412 		total_items += p->items;
413 		total_used += p->used;
414 		p++;
415 	}
416 
417 	/*
418 	 * Remember that zone0 is always 0% used and zone15 is
419 	 * always 100% used.
420 	 */
421 	printf(INDENT"--------------------------------------------------------------------------------\n");
422 	printf(INDENT"total              %-12ju %-18ju %-19ju %g\n",
423 		(uintmax_t)total_blocks,
424 		(uintmax_t)total_items,
425 		(uintmax_t)total_used,
426 		_calc_used_percentage(total_blocks, total_used));
427 }
428