xref: /dragonfly/sbin/hammer/blockmap.c (revision 38b720cd)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/blockmap.c,v 1.2 2008/06/17 04:03:38 dillon Exp $
35  */
36 
37 #include "hammer_util.h"
38 
39 /*
40  * Allocate big-blocks using our poor-man's volume->vol_free_off.
41  * We are bootstrapping the freemap itself and cannot update it yet.
42  */
43 hammer_off_t
44 bootstrap_bigblock(struct volume_info *volume)
45 {
46 	hammer_off_t result_offset;
47 
48 	assert_volume_offset(volume);
49 	result_offset = volume->vol_free_off;
50 
51 	volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
52 
53 	return(result_offset);
54 }
55 
56 /*
57  * Allocate a big-block for zone-3 for UNDO/REDO FIFO.
58  */
59 hammer_off_t
60 alloc_undo_bigblock(struct volume_info *volume)
61 {
62 	hammer_blockmap_t freemap;
63 	struct buffer_info *buffer1 = NULL;
64 	struct buffer_info *buffer2 = NULL;
65 	hammer_blockmap_layer1_t layer1;
66 	hammer_blockmap_layer2_t layer2;
67 	hammer_off_t layer1_offset;
68 	hammer_off_t layer2_offset;
69 	hammer_off_t result_offset;
70 
71 	/* Only root volume needs formatting */
72 	assert(volume->vol_no == HAMMER_ROOT_VOLNO);
73 
74 	result_offset = bootstrap_bigblock(volume);
75 	freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
76 
77 	/*
78 	 * Dive layer 1.
79 	 */
80 	layer1_offset = freemap->phys_offset +
81 			HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
82 	layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
83 	assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
84 	--layer1->blocks_free;
85 	hammer_crc_set_layer1(layer1);
86 	buffer1->cache.modified = 1;
87 
88 	/*
89 	 * Dive layer 2, each entry represents a big-block.
90 	 */
91 	layer2_offset = layer1->phys_offset +
92 			HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
93 	layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
94 	assert(layer2->zone == 0);
95 	layer2->zone = HAMMER_ZONE_UNDO_INDEX;
96 	layer2->append_off = HAMMER_BIGBLOCK_SIZE;
97 	layer2->bytes_free = 0;
98 	hammer_crc_set_layer2(layer2);
99 	buffer2->cache.modified = 1;
100 
101 	--volume->ondisk->vol0_stat_freebigblocks;
102 
103 	rel_buffer(buffer1);
104 	rel_buffer(buffer2);
105 
106 	return(result_offset);
107 }
108 
109 /*
110  * Allocate a chunk of data out of a blockmap.  This is a simplified
111  * version which uses next_offset as a simple allocation iterator.
112  */
113 void *
114 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
115 	       struct buffer_info **bufferp)
116 {
117 	struct volume_info *volume;
118 	hammer_blockmap_t blockmap;
119 	hammer_blockmap_t freemap;
120 	struct buffer_info *buffer1 = NULL;
121 	struct buffer_info *buffer2 = NULL;
122 	hammer_blockmap_layer1_t layer1;
123 	hammer_blockmap_layer2_t layer2;
124 	hammer_off_t layer1_offset;
125 	hammer_off_t layer2_offset;
126 	hammer_off_t chunk_offset;
127 	void *ptr;
128 
129 	volume = get_root_volume();
130 
131 	blockmap = &volume->ondisk->vol0_blockmap[zone];
132 	freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
133 	assert(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
134 
135 	/*
136 	 * Alignment and buffer-boundary issues.  If the allocation would
137 	 * cross a buffer boundary we have to skip to the next buffer.
138 	 */
139 	bytes = HAMMER_DATA_DOALIGN(bytes);
140 	assert(bytes > 0 && bytes <= HAMMER_BUFSIZE);  /* not HAMMER_XBUFSIZE */
141 	assert(hammer_is_zone2_mapped_index(zone));
142 
143 again:
144 	assert(blockmap->next_offset != HAMMER_ZONE_ENCODE(zone + 1, 0));
145 
146 	if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
147 	    ~HAMMER_BUFMASK64) {
148 		blockmap->next_offset = (blockmap->next_offset + bytes - 1) &
149 				        ~HAMMER_BUFMASK64;
150 	}
151 	chunk_offset = blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
152 
153 	/*
154 	 * Dive layer 1.
155 	 */
156 	layer1_offset = freemap->phys_offset +
157 			HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
158 	layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
159 	assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
160 	assert(!(chunk_offset == 0 && layer1->blocks_free == 0));
161 
162 	/*
163 	 * Dive layer 2, each entry represents a big-block.
164 	 */
165 	layer2_offset = layer1->phys_offset +
166 			HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
167 	layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
168 
169 	if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
170 		fprintf(stderr, "alloc_blockmap: layer2 ran out of space!\n");
171 		exit(1);
172 	}
173 
174 	/*
175 	 * If we are entering a new big-block assign ownership to our
176 	 * zone.  If the big-block is owned by another zone skip it.
177 	 */
178 	if (layer2->zone == 0) {
179 		--layer1->blocks_free;
180 		hammer_crc_set_layer1(layer1);
181 		layer2->zone = zone;
182 		--volume->ondisk->vol0_stat_freebigblocks;
183 		assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
184 		assert(layer2->append_off == 0);
185 	}
186 	if (layer2->zone != zone) {
187 		blockmap->next_offset =
188 			HAMMER_ZONE_LAYER2_NEXT_OFFSET(blockmap->next_offset);
189 		goto again;
190 	}
191 
192 	assert(layer2->append_off == chunk_offset);
193 	layer2->bytes_free -= bytes;
194 	*result_offp = blockmap->next_offset;
195 	blockmap->next_offset += bytes;
196 	layer2->append_off = (int)blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
197 	hammer_crc_set_layer2(layer2);
198 
199 	ptr = get_buffer_data(*result_offp, bufferp, 0);
200 	(*bufferp)->cache.modified = 1;
201 
202 	buffer1->cache.modified = 1;
203 	buffer2->cache.modified = 1;
204 
205 	rel_buffer(buffer1);
206 	rel_buffer(buffer2);
207 	return(ptr);
208 }
209 
210 hammer_off_t
211 blockmap_lookup(hammer_off_t zone_offset,
212 		hammer_blockmap_layer1_t save_layer1,
213 		hammer_blockmap_layer2_t save_layer2,
214 		int *errorp)
215 {
216 	struct volume_info *root_volume = NULL;
217 	hammer_blockmap_t blockmap;
218 	hammer_blockmap_t freemap;
219 	hammer_blockmap_layer1_t layer1;
220 	hammer_blockmap_layer2_t layer2;
221 	struct buffer_info *buffer1 = NULL;
222 	struct buffer_info *buffer2 = NULL;
223 	hammer_off_t layer1_offset;
224 	hammer_off_t layer2_offset;
225 	hammer_off_t result_offset;
226 	int zone;
227 	int i;
228 	int error = 0;
229 
230 	if (save_layer1)
231 		bzero(save_layer1, sizeof(*save_layer1));
232 	if (save_layer2)
233 		bzero(save_layer2, sizeof(*save_layer2));
234 
235 	zone = HAMMER_ZONE_DECODE(zone_offset);
236 
237 	if (zone <= HAMMER_ZONE_RAW_VOLUME_INDEX)
238 		error = -1;
239 	if (zone >= HAMMER_MAX_ZONES)
240 		error = -2;
241 	if (error) {
242 		result_offset = HAMMER_OFF_BAD;
243 		goto done;
244 	}
245 
246 	root_volume = get_root_volume();
247 	blockmap = &root_volume->ondisk->vol0_blockmap[zone];
248 
249 	if (zone == HAMMER_ZONE_RAW_BUFFER_INDEX) {
250 		result_offset = zone_offset;
251 	} else if (zone == HAMMER_ZONE_UNDO_INDEX) {
252 		i = HAMMER_OFF_SHORT_ENCODE(zone_offset) / HAMMER_BIGBLOCK_SIZE;
253 		if (zone_offset >= blockmap->alloc_offset) {
254 			error = -3;
255 			result_offset = HAMMER_OFF_BAD;
256 			goto done;
257 		}
258 		result_offset = root_volume->ondisk->vol0_undo_array[i] +
259 				(zone_offset & HAMMER_BIGBLOCK_MASK64);
260 	} else {
261 		result_offset = hammer_xlate_to_zone2(zone_offset);
262 	}
263 
264 	/*
265 	 * The blockmap should match the requested zone (else the volume
266 	 * header is mashed).
267 	 */
268 	if (HAMMER_ZONE_FREEMAP_INDEX != zone &&
269 	    HAMMER_ZONE_DECODE(blockmap->alloc_offset) != zone) {
270 		error = -4;
271 		goto done;
272 	}
273 
274 	/*
275 	 * Validate that the big-block is assigned to the zone.  Also
276 	 * assign save_layer{1,2}.
277 	 */
278 
279 	freemap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
280 	/*
281 	 * Dive layer 1.
282 	 */
283 	layer1_offset = freemap->phys_offset +
284 			HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
285 	layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
286 	if (layer1 == NULL) {
287 		error = -5;
288 		goto done;
289 	}
290 	if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
291 		error = -6;
292 		goto done;
293 	}
294 
295 	if (save_layer1)
296 		*save_layer1 = *layer1;
297 
298 	/*
299 	 * Dive layer 2, each entry represents a big-block.
300 	 */
301 	layer2_offset = layer1->phys_offset +
302 			HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
303 	layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
304 
305 	if (layer2 == NULL) {
306 		error = -7;
307 		goto done;
308 	}
309 	if (layer2->zone != zone) {
310 		error = -8;
311 		goto done;
312 	}
313 	if (save_layer2)
314 		*save_layer2 = *layer2;
315 
316 done:
317 	rel_buffer(buffer1);
318 	rel_buffer(buffer2);
319 
320 	if (errorp)
321 		*errorp = error;
322 
323 	return(result_offset);
324 }
325 
326