1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sbin/hammer/ondisk.c,v 1.1 2007/10/16 18:30:53 dillon Exp $ 35 */ 36 37 #include "newfs_hammer.h" 38 39 static void initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head, 40 u_int64_t type); 41 static void alloc_new_buffer(struct cluster_info *cluster, hammer_alist_t live, 42 u_int64_t type, int32_t nelements); 43 #if 0 44 static void readhammerbuf(struct volume_info *vol, void *data, 45 int64_t offset); 46 #endif 47 static void writehammerbuf(struct volume_info *vol, const void *data, 48 int64_t offset); 49 50 /* 51 * Lookup the requested information structure and related on-disk buffer. 52 * Except for getvolume(), these functions will create and initialize any 53 * missing info structures. 54 */ 55 struct volume_info * 56 get_volume(int32_t vol_no) 57 { 58 struct volume_info *vol; 59 struct hammer_volume_ondisk *ondisk; 60 61 for (vol = VolBase; vol; vol = vol->next) { 62 if (vol->vol_no == vol_no) 63 break; 64 } 65 if (vol && vol->ondisk == NULL) { 66 vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE); 67 bzero(ondisk, HAMMER_BUFSIZE); 68 if (UsingSuperClusters) { 69 vol->alist.config = &Vol_super_alist_config; 70 vol->alist.meta = ondisk->vol_almeta.super; 71 vol->alist.info = vol; 72 hammer_alist_init(&vol->alist); 73 } else { 74 vol->alist.config = &Vol_normal_alist_config; 75 vol->alist.meta = ondisk->vol_almeta.normal; 76 hammer_alist_init(&vol->alist); 77 } 78 } 79 return(vol); 80 } 81 82 struct supercl_info * 83 get_supercl(struct volume_info *vol, int32_t scl_no) 84 { 85 struct hammer_supercl_ondisk *ondisk; 86 struct supercl_info *scl; 87 int32_t scl_group; 88 int64_t scl_group_size; 89 90 assert(UsingSuperClusters); 91 92 for (scl = vol->supercl_base; scl; scl = scl->next) { 93 if (scl->scl_no == scl_no) 94 break; 95 } 96 if (scl == NULL) { 97 /* 98 * Allocate the scl 99 */ 100 scl = malloc(sizeof(*scl)); 101 bzero(scl, sizeof(*scl)); 102 scl->scl_no = scl_no; 103 scl->next = vol->supercl_base; 104 scl->volume = vol; 105 vol->supercl_base = scl; 106 107 /* 108 * Calculate the super-cluster's offset in the volume. 109 * 110 * The arrangement is [scl * N][N * 32768 clusters], repeat. 111 * N is typically 16. 112 */ 113 scl_group = scl_no / HAMMER_VOL_SUPERCLUSTER_GROUP; 114 scl_group_size = ((int64_t)HAMMER_BUFSIZE * 115 HAMMER_VOL_SUPERCLUSTER_GROUP) + 116 ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP * 117 ClusterSize * HAMMER_SCL_MAXCLUSTERS); 118 scl->scl_offset = vol->vol_cluster_off + 119 scl_group * scl_group_size + 120 (scl_no % HAMMER_VOL_SUPERCLUSTER_GROUP) * 121 HAMMER_BUFSIZE; 122 } 123 if (scl->ondisk == NULL) { 124 scl->ondisk = ondisk = malloc(HAMMER_BUFSIZE); 125 bzero(ondisk, HAMMER_BUFSIZE); 126 scl->alist.config = &Supercl_alist_config; 127 scl->alist.meta = ondisk->scl_meta; 128 hammer_alist_init(&scl->alist); 129 } 130 return(scl); 131 } 132 133 struct cluster_info * 134 get_cluster(struct volume_info *vol, int32_t clu_no) 135 { 136 struct hammer_cluster_ondisk *ondisk; 137 struct cluster_info *cl; 138 int32_t scl_group; 139 int64_t scl_group_size; 140 141 for (cl = vol->cluster_base; cl; cl = cl->next) { 142 if (cl->clu_no == clu_no) 143 break; 144 } 145 if (cl == NULL) { 146 /* 147 * Allocate the cluster 148 */ 149 cl = malloc(sizeof(*cl)); 150 bzero(cl, sizeof(*cl)); 151 cl->clu_no = clu_no; 152 cl->next = vol->cluster_base; 153 if (UsingSuperClusters) { 154 cl->supercl = get_supercl(vol, clu_no / HAMMER_SCL_MAXCLUSTERS); 155 } 156 cl->volume = vol; 157 vol->cluster_base = cl; 158 159 /* 160 * Calculate the cluster's offset in the volume 161 * 162 * The arrangement is [scl * N][N * 32768 clusters], repeat. 163 * N is typically 16. 164 * 165 * Note that the cluster offset calculation is slightly 166 * different from the supercluster offset calculation due 167 * to the way the grouping works. 168 */ 169 if (UsingSuperClusters) { 170 scl_group = clu_no / HAMMER_VOL_SUPERCLUSTER_GROUP / 171 HAMMER_SCL_MAXCLUSTERS; 172 scl_group_size = 173 ((int64_t)HAMMER_BUFSIZE * 174 HAMMER_VOL_SUPERCLUSTER_GROUP) + 175 ((int64_t)HAMMER_VOL_SUPERCLUSTER_GROUP * 176 ClusterSize * HAMMER_SCL_MAXCLUSTERS); 177 scl_group_size += HAMMER_VOL_SUPERCLUSTER_GROUP * 178 HAMMER_BUFSIZE; 179 cl->clu_offset = 180 vol->vol_cluster_off + 181 scl_group * scl_group_size + 182 (HAMMER_BUFSIZE * HAMMER_VOL_SUPERCLUSTER_GROUP) + 183 ((int64_t)clu_no % ((int64_t)HAMMER_SCL_MAXCLUSTERS * HAMMER_VOL_SUPERCLUSTER_GROUP)) * 184 HAMMER_BUFSIZE; 185 } else { 186 cl->clu_offset = vol->vol_cluster_off + 187 (int64_t)clu_no * ClusterSize; 188 } 189 } 190 if (cl->ondisk == NULL) { 191 cl->ondisk = ondisk = malloc(HAMMER_BUFSIZE); 192 bzero(ondisk, HAMMER_BUFSIZE); 193 cl->alist_master.config = &Clu_master_alist_config; 194 cl->alist_master.meta = ondisk->clu_master_meta; 195 hammer_alist_init(&cl->alist_master); 196 cl->alist_btree.config = &Clu_slave_alist_config; 197 cl->alist_btree.meta = ondisk->clu_btree_meta; 198 cl->alist_btree.info = cl; 199 hammer_alist_init(&cl->alist_btree); 200 cl->alist_record.config = &Clu_slave_alist_config; 201 cl->alist_record.meta = ondisk->clu_record_meta; 202 cl->alist_record.info = cl; 203 hammer_alist_init(&cl->alist_record); 204 cl->alist_mdata.config = &Clu_slave_alist_config; 205 cl->alist_mdata.meta = ondisk->clu_mdata_meta; 206 cl->alist_mdata.info = cl; 207 hammer_alist_init(&cl->alist_mdata); 208 } 209 return(cl); 210 } 211 212 struct buffer_info * 213 get_buffer(struct cluster_info *cl, int32_t buf_no, int64_t buf_type) 214 { 215 hammer_fsbuf_ondisk_t ondisk; 216 struct buffer_info *buf; 217 218 /* 219 * Find the buffer. Note that buffer 0 corresponds to the cluster 220 * header and should never be requested. 221 */ 222 assert(buf_no != 0); 223 for (buf = cl->buffer_base; buf; buf = buf->next) { 224 if (buf->buf_no == buf_no) 225 break; 226 } 227 if (buf == NULL) { 228 buf = malloc(sizeof(*buf)); 229 bzero(buf, sizeof(*buf)); 230 buf->buf_no = buf_no; 231 buf->buf_offset = cl->clu_offset + buf_no * HAMMER_BUFSIZE; 232 buf->cluster = cl; 233 buf->volume = cl->volume; 234 buf->next = cl->buffer_base; 235 cl->buffer_base = buf; 236 } 237 if (buf->ondisk == NULL) { 238 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE); 239 bzero(ondisk, HAMMER_BUFSIZE); 240 buf->alist.config = &Buf_alist_config; 241 buf->alist.meta = ondisk->head.buf_almeta; 242 initbuffer(&buf->alist, &ondisk->head, buf_type); 243 } 244 return(buf); 245 } 246 247 /* 248 * Allocate HAMMER elements - btree nodes, data storage, and record elements 249 */ 250 void * 251 alloc_btree_element(struct cluster_info *cluster, int32_t *offp) 252 { 253 struct buffer_info *buf; 254 hammer_alist_t live; 255 int32_t elm_no; 256 void *item; 257 258 live = &cluster->alist_btree; 259 elm_no = hammer_alist_alloc_fwd(live, 1, cluster->ondisk->idx_index); 260 if (elm_no == HAMMER_ALIST_BLOCK_NONE) 261 elm_no = hammer_alist_alloc_fwd(live, 1, 0); 262 if (elm_no == HAMMER_ALIST_BLOCK_NONE) { 263 alloc_new_buffer(cluster, live, 264 HAMMER_FSBUF_BTREE, HAMMER_BTREE_NODES); 265 elm_no = hammer_alist_alloc(live, 1); 266 assert(elm_no != HAMMER_ALIST_BLOCK_NONE); 267 } 268 cluster->ondisk->idx_index = elm_no; 269 buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0); 270 assert(buf->ondisk->head.buf_type != 0); 271 item = &buf->ondisk->btree.nodes[elm_no & HAMMER_FSBUF_BLKMASK]; 272 *offp = buf->buf_no * HAMMER_BUFSIZE + 273 ((char *)item - (char *)buf->ondisk); 274 return(item); 275 } 276 277 void * 278 alloc_data_element(struct cluster_info *cluster, int32_t bytes, int32_t *offp) 279 { 280 struct buffer_info *buf; 281 hammer_alist_t live; 282 int32_t elm_no; 283 int32_t nblks = (bytes + HAMMER_DATA_BLKMASK) & ~HAMMER_DATA_BLKMASK; 284 void *item; 285 286 /* 287 * Try to allocate a btree-node. If elm_no is HAMMER_ALIST_BLOCK_NONE 288 * and buf is non-NULL we have to initialize a new buffer's a-list. 289 */ 290 live = &cluster->alist_mdata; 291 elm_no = hammer_alist_alloc_fwd(live, nblks, cluster->ondisk->idx_data); 292 if (elm_no == HAMMER_ALIST_BLOCK_NONE) 293 elm_no = hammer_alist_alloc_fwd(live, 1, 0); 294 if (elm_no == HAMMER_ALIST_BLOCK_NONE) { 295 alloc_new_buffer(cluster, live, 296 HAMMER_FSBUF_DATA, HAMMER_DATA_NODES); 297 elm_no = hammer_alist_alloc(live, nblks); 298 assert(elm_no != HAMMER_ALIST_BLOCK_NONE); 299 } 300 cluster->ondisk->idx_index = elm_no; 301 buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0); 302 assert(buf->ondisk->head.buf_type != 0); 303 item = &buf->ondisk->data.data[elm_no & HAMMER_FSBUF_BLKMASK]; 304 *offp = buf->buf_no * HAMMER_BUFSIZE + 305 ((char *)item - (char *)buf->ondisk); 306 return(item); 307 } 308 309 void * 310 alloc_record_element(struct cluster_info *cluster, int32_t *offp) 311 { 312 struct buffer_info *buf; 313 hammer_alist_t live; 314 int32_t elm_no; 315 void *item; 316 317 live = &cluster->alist_record; 318 elm_no = hammer_alist_alloc_rev(live, 1, cluster->ondisk->idx_record); 319 if (elm_no == HAMMER_ALIST_BLOCK_NONE) 320 elm_no = hammer_alist_alloc_rev(live, 1,HAMMER_ALIST_BLOCK_MAX); 321 if (elm_no == HAMMER_ALIST_BLOCK_NONE) { 322 alloc_new_buffer(cluster, live, 323 HAMMER_FSBUF_RECORDS, HAMMER_RECORD_NODES); 324 elm_no = hammer_alist_alloc(live, 1); 325 assert(elm_no != HAMMER_ALIST_BLOCK_NONE); 326 } 327 cluster->ondisk->idx_record = elm_no; 328 buf = get_buffer(cluster, elm_no / HAMMER_FSBUF_MAXBLKS, 0); 329 assert(buf->ondisk->head.buf_type != 0); 330 item = &buf->ondisk->record.recs[elm_no & HAMMER_FSBUF_BLKMASK]; 331 *offp = buf->buf_no * HAMMER_BUFSIZE + 332 ((char *)item - (char *)buf->ondisk); 333 return(item); 334 } 335 336 static void 337 alloc_new_buffer(struct cluster_info *cluster, hammer_alist_t live, 338 u_int64_t type, int32_t nelements) 339 { 340 int32_t buf_no; 341 struct buffer_info *buf; 342 343 buf_no = hammer_alist_alloc(&cluster->alist_master, 1); 344 assert(buf_no != HAMMER_ALIST_BLOCK_NONE); 345 buf = get_buffer(cluster, buf_no, type); 346 hammer_alist_free(live, buf_no * HAMMER_FSBUF_MAXBLKS, nelements); 347 } 348 349 /* 350 * Flush various tracking structures to disk 351 */ 352 353 /* 354 * Flush various tracking structures to disk 355 */ 356 void 357 flush_all_volumes(void) 358 { 359 struct volume_info *vol; 360 361 for (vol = VolBase; vol; vol = vol->next) 362 flush_volume(vol); 363 } 364 365 void 366 flush_volume(struct volume_info *vol) 367 { 368 struct supercl_info *supercl; 369 struct cluster_info *cl; 370 371 for (supercl = vol->supercl_base; supercl; supercl = supercl->next) 372 flush_supercl(supercl); 373 for (cl = vol->cluster_base; cl; cl = cl->next) 374 flush_cluster(cl); 375 writehammerbuf(vol, vol->ondisk, 0); 376 } 377 378 void 379 flush_supercl(struct supercl_info *supercl) 380 { 381 int64_t supercl_offset; 382 383 supercl_offset = supercl->scl_offset; 384 writehammerbuf(supercl->volume, supercl->ondisk, supercl_offset); 385 } 386 387 void 388 flush_cluster(struct cluster_info *cl) 389 { 390 struct buffer_info *buf; 391 int64_t cluster_offset; 392 393 for (buf = cl->buffer_base; buf; buf = buf->next) 394 flush_buffer(buf); 395 cluster_offset = cl->clu_offset; 396 writehammerbuf(cl->volume, cl->ondisk, cluster_offset); 397 } 398 399 void 400 flush_buffer(struct buffer_info *buf) 401 { 402 int64_t buffer_offset; 403 404 buffer_offset = buf->buf_offset + buf->cluster->clu_offset; 405 writehammerbuf(buf->volume, buf->ondisk, buffer_offset); 406 } 407 408 /* 409 * Generic buffer initialization 410 */ 411 static void 412 initbuffer(hammer_alist_t live, hammer_fsbuf_head_t head, u_int64_t type) 413 { 414 head->buf_type = type; 415 hammer_alist_init(live); 416 } 417 418 #if 0 419 /* 420 * Core I/O operations 421 */ 422 static void 423 readhammerbuf(struct volume_info *vol, void *data, int64_t offset) 424 { 425 ssize_t n; 426 427 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset); 428 if (n != HAMMER_BUFSIZE) 429 err(1, "Read volume %d (%s)", vol->vol_no, vol->name); 430 } 431 432 #endif 433 434 static void 435 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset) 436 { 437 ssize_t n; 438 439 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset); 440 if (n != HAMMER_BUFSIZE) 441 err(1, "Write volume %d (%s)", vol->vol_no, vol->name); 442 } 443 444