1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_disk.h,v 1.4 2007/11/02 00:57:15 dillon Exp $ 35 */ 36 37 #ifndef _SYS_UUID_H_ 38 #include <sys/uuid.h> 39 #endif 40 41 /* 42 * The structures below represent the on-disk format for a HAMMER 43 * filesystem. Note that all fields for on-disk structures are naturally 44 * aligned. The host endian format is used - compatibility is possible 45 * if the implementation detects reversed endian and adjusts data accordingly. 46 * 47 * Most of HAMMER revolves around the concept of an object identifier. An 48 * obj_id is a 64 bit quantity which uniquely identifies a filesystem object 49 * FOR THE ENTIRE LIFE OF THE FILESYSTEM. This uniqueness allows backups 50 * and mirrors to retain varying amounts of filesystem history by removing 51 * any possibility of conflict through identifier reuse. 52 * 53 * A HAMMER filesystem may spam multiple volumes. 54 * 55 * A HAMMER filesystem uses a 16K filesystem buffer size. All filesystem 56 * I/O is done in multiples of 16K. Most buffer-sized headers such as those 57 * used by volumes, super-clusters, clusters, and basic filesystem buffers 58 * use fixed-sized A-lists which are heavily dependant on HAMMER_BUFSIZE. 59 */ 60 #define HAMMER_BUFSIZE 16384 61 #define HAMMER_BUFMASK (HAMMER_BUFSIZE - 1) 62 63 /* 64 * Hammer transction ids are 64 bit unsigned integers and are usually 65 * synchronized with the time of day in nanoseconds. 66 */ 67 typedef u_int64_t hammer_tid_t; 68 69 /* 70 * Most HAMMER data structures are embedded in 16K filesystem buffers. 71 * All filesystem buffers except those designated as pure-data buffers 72 * contain this 128-byte header. 73 * 74 * This structure contains an embedded A-List used to manage space within 75 * the filesystem buffer. It is not used by volume or cluster header 76 * buffers, or by pure-data buffers. The granularity is variable and 77 * depends on the type of filesystem buffer. BLKSIZE is just a minimum. 78 */ 79 80 #define HAMMER_FSBUF_HEAD_SIZE 128 81 #define HAMMER_FSBUF_MAXBLKS 256 82 #define HAMMER_FSBUF_BLKMASK (HAMMER_FSBUF_MAXBLKS - 1) 83 #define HAMMER_FSBUF_METAELMS HAMMER_ALIST_METAELMS_256_1LYR /* 11 */ 84 85 struct hammer_fsbuf_head { 86 u_int64_t buf_type; 87 u_int32_t buf_crc; 88 u_int32_t buf_reserved07; 89 u_int32_t reserved[6]; 90 struct hammer_almeta buf_almeta[HAMMER_FSBUF_METAELMS]; 91 }; 92 93 typedef struct hammer_fsbuf_head *hammer_fsbuf_head_t; 94 95 /* 96 * Note: Pure-data buffers contain pure-data and have no buf_type. 97 * Piecemeal data buffers do have a header and use HAMMER_FSBUF_DATA. 98 */ 99 #define HAMMER_FSBUF_VOLUME 0xC8414D4DC5523031ULL /* HAMMER01 */ 100 #define HAMMER_FSBUF_SUPERCL 0xC8414D52C3555052ULL /* HAMRSUPR */ 101 #define HAMMER_FSBUF_CLUSTER 0xC8414D52C34C5553ULL /* HAMRCLUS */ 102 #define HAMMER_FSBUF_RECORDS 0xC8414D52D2454353ULL /* HAMRRECS */ 103 #define HAMMER_FSBUF_BTREE 0xC8414D52C2545245ULL /* HAMRBTRE */ 104 #define HAMMER_FSBUF_DATA 0xC8414D52C4415441ULL /* HAMRDATA */ 105 106 #define HAMMER_FSBUF_VOLUME_REV 0x313052C54D4D41C8ULL /* (reverse endian) */ 107 108 /* 109 * The B-Tree structures need hammer_fsbuf_head. 110 */ 111 #include "hammer_btree.h" 112 113 /* 114 * HAMMER Volume header 115 * 116 * A HAMMER filesystem is built from any number of block devices, Each block 117 * device contains a volume header followed by however many super-clusters 118 * and clusters fit into the volume. Clusters cannot be migrated but the 119 * data they contain can, so HAMMER can use a truncated cluster for any 120 * extra space at the end of the volume. 121 * 122 * The volume containing the root cluster is designated as the master volume. 123 * The root cluster designation can be moved to any volume. 124 * 125 * The volume header takes up an entire 16K filesystem buffer and includes 126 * a one or two-layered A-list to manage the clusters making up the volume. 127 * A volume containing up to 32768 clusters (2TB) can be managed with a 128 * single-layered A-list. A two-layer A-list is capable of managing up 129 * to 16384 super-clusters with each super-cluster containing 32768 clusters 130 * (32768 TB per volume total). The number of volumes is limited to 32768 131 * but it only takes 512 to fill out a 64 bit address space so for all 132 * intents and purposes the filesystem has no limits. 133 * 134 * cluster addressing within a volume depends on whether a single or 135 * duel-layer A-list is used. If a duel-layer A-list is used a 16K 136 * super-cluster buffer is needed for every 16384 clusters in the volume. 137 * However, because the A-list's hinting is grouped in multiples of 16 138 * we group 16 super-cluster buffers together (starting just after the 139 * volume header), followed by 16384x16 clusters, and repeat. 140 * 141 * NOTE: A 32768-element single-layer and 16384-element duel-layer A-list 142 * is the same size. 143 */ 144 #define HAMMER_VOL_MAXCLUSTERS 32768 /* 1-layer */ 145 #define HAMMER_VOL_MAXSUPERCLUSTERS 16384 /* 2-layer */ 146 #define HAMMER_VOL_SUPERCLUSTER_GROUP 16 147 #define HAMMER_VOL_METAELMS_1LYR HAMMER_ALIST_METAELMS_32K_1LYR 148 #define HAMMER_VOL_METAELMS_2LYR HAMMER_ALIST_METAELMS_16K_2LYR 149 150 struct hammer_volume_ondisk { 151 struct hammer_fsbuf_head head; 152 int64_t vol_beg; /* byte offset of first cl/supercl in volume */ 153 int64_t vol_end; /* byte offset of volume EOF */ 154 int64_t vol_locked; /* reserved clusters are >= this offset */ 155 156 uuid_t vol_fsid; /* identify filesystem */ 157 uuid_t vol_fstype; /* identify filesystem type */ 158 char vol_name[64]; /* Name of volume */ 159 160 int32_t vol_no; /* volume number within filesystem */ 161 int32_t vol_count; /* number of volumes making up FS */ 162 163 u_int32_t vol_version; /* version control information */ 164 u_int32_t vol_reserved01; 165 u_int32_t vol_flags; /* volume flags */ 166 u_int32_t vol_rootvol; /* which volume is the root volume? */ 167 168 int32_t vol_clsize; /* cluster size (same for all volumes) */ 169 int32_t vol_nclusters; 170 u_int32_t vol_reserved06; 171 u_int32_t vol_reserved07; 172 173 int32_t vol_stat_blocksize; /* for statfs only */ 174 int64_t vol_stat_bytes; /* for statfs only */ 175 int64_t vol_stat_inodes; /* for statfs only */ 176 177 /* 178 * These fields are initialized and space is reserved in every 179 * volume making up a HAMMER filesytem, but only the master volume 180 * contains valid data. 181 */ 182 int32_t vol0_root_clu_no; /* root cluster no (index) in rootvol */ 183 hammer_tid_t vol0_root_clu_id; /* root cluster id */ 184 hammer_tid_t vol0_nexttid; /* next TID */ 185 u_int64_t vol0_recid; /* fs-wide record id allocator */ 186 187 char reserved[1024]; 188 189 /* 190 * Meta elements for the volume header's A-list, which is either a 191 * 1-layer A-list capable of managing 32768 clusters, or a 2-layer 192 * A-list capable of managing 16384 super-clusters (each of which 193 * can handle 32768 clusters). 194 */ 195 union { 196 struct hammer_almeta super[HAMMER_VOL_METAELMS_2LYR]; 197 struct hammer_almeta normal[HAMMER_VOL_METAELMS_1LYR]; 198 } vol_almeta; 199 u_int32_t vol0_bitmap[1024]; 200 }; 201 202 #define HAMMER_VOLF_VALID 0x0001 /* valid entry */ 203 #define HAMMER_VOLF_OPEN 0x0002 /* volume is open */ 204 #define HAMMER_VOLF_USINGSUPERCL 0x0004 /* using superclusters */ 205 206 /* 207 * HAMMER Super-cluster header 208 * 209 * A super-cluster is used to increase the maximum size of a volume. 210 * HAMMER's volume header can manage up to 32768 direct clusters or 211 * 16384 super-clusters. Each super-cluster (which is basically just 212 * a 16K filesystem buffer) can manage up to 32768 clusters. So adding 213 * a super-cluster layer allows a HAMMER volume to be sized upwards of 214 * around 32768TB instead of 2TB. 215 * 216 * Any volume initially formatted to be over 32G reserves space for the layer 217 * but the layer is only enabled if the volume exceeds 2TB. 218 */ 219 #define HAMMER_SUPERCL_METAELMS HAMMER_ALIST_METAELMS_32K_1LYR 220 #define HAMMER_SCL_MAXCLUSTERS HAMMER_VOL_MAXCLUSTERS 221 222 struct hammer_supercl_ondisk { 223 struct hammer_fsbuf_head head; 224 uuid_t vol_fsid; /* identify filesystem - sanity check */ 225 uuid_t vol_fstype; /* identify filesystem type - sanity check */ 226 int32_t reserved[1024]; 227 228 struct hammer_almeta scl_meta[HAMMER_SUPERCL_METAELMS]; 229 }; 230 231 /* 232 * HAMMER Cluster header 233 * 234 * A cluster is limited to 64MB and is made up of 4096 16K filesystem 235 * buffers. The cluster header contains four A-lists to manage these 236 * buffers. 237 * 238 * master_alist - This is a non-layered A-list which manages pure-data 239 * allocations and allocations on behalf of other A-lists. 240 * 241 * btree_alist - This is a layered A-list which manages filesystem buffers 242 * containing B-Tree nodes. 243 * 244 * record_alist - This is a layered A-list which manages filesystem buffers 245 * containing records. 246 * 247 * mdata_alist - This is a layered A-list which manages filesystem buffers 248 * containing piecemeal record data. 249 * 250 * General storage management works like this: All the A-lists except the 251 * master start in an all-allocated state. Now lets say you wish to allocate 252 * a B-Tree node out the btree_alist. If the allocation fails you allocate 253 * a pure data block out of master_alist and then free that block in 254 * btree_alist, thereby assigning more space to the btree_alist, and then 255 * retry your allocation out of the btree_alist. In the reverse direction, 256 * filesystem buffers can be garbage collected back to master_alist simply 257 * by doing whole-buffer allocations in btree_alist and then freeing the 258 * space in master_alist. The whole-buffer-allocation approach to garbage 259 * collection works because A-list allocations are always power-of-2 sized 260 * and aligned. 261 */ 262 #define HAMMER_CLU_MAXBUFFERS 4096 263 #define HAMMER_CLU_MASTER_METAELMS HAMMER_ALIST_METAELMS_4K_1LYR 264 #define HAMMER_CLU_SLAVE_METAELMS HAMMER_ALIST_METAELMS_4K_2LYR 265 #define HAMMER_CLU_MAXBYTES (HAMMER_CLU_MAXBUFFERS * HAMMER_BUFSIZE) 266 267 struct hammer_cluster_ondisk { 268 struct hammer_fsbuf_head head; 269 uuid_t vol_fsid; /* identify filesystem - sanity check */ 270 uuid_t vol_fstype; /* identify filesystem type - sanity check */ 271 272 u_int64_t clu_gen; /* identify generation number of cluster */ 273 u_int64_t clu_unused01; 274 275 hammer_tid_t clu_id; /* unique cluster self identification */ 276 int32_t vol_no; /* cluster contained in volume (sanity) */ 277 u_int32_t clu_flags; /* cluster flags */ 278 279 int32_t clu_start; /* start of data (byte offset) */ 280 int32_t clu_limit; /* end of data (byte offset) */ 281 int32_t clu_no; /* cluster index in volume (sanity) */ 282 u_int32_t clu_reserved03; 283 284 u_int32_t clu_reserved04; 285 u_int32_t clu_reserved05; 286 u_int32_t clu_reserved06; 287 u_int32_t clu_reserved07; 288 289 int32_t idx_data; /* data append point (element no) */ 290 int32_t idx_index; /* index append point (element no) */ 291 int32_t idx_record; /* record prepend point (element no) */ 292 u_int32_t idx_reserved03; 293 294 /* 295 * Specify the range of information stored in this cluster as two 296 * btree elements. These elements exist as separate records that 297 * point to us in the parent cluster's B-Tree. 298 * 299 * Note that clu_btree_end is range-inclusive, not range-exclusive. 300 * i.e. 0-1023 instead of 0,1024. 301 */ 302 struct hammer_base_elm clu_btree_beg; 303 struct hammer_base_elm clu_btree_end; 304 305 /* 306 * The cluster's B-Tree root can change as a side effect of insertion 307 * and deletion operations so store an offset instead of embedding 308 * the root node. 309 */ 310 int32_t clu_btree_root; 311 int32_t clu_btree_parent_vol_no; 312 int32_t clu_btree_parent_clu_no; 313 hammer_tid_t clu_btree_parent_clu_id; 314 315 u_int64_t synchronized_rec_id; 316 317 struct hammer_almeta clu_master_meta[HAMMER_CLU_MASTER_METAELMS]; 318 struct hammer_almeta clu_btree_meta[HAMMER_CLU_SLAVE_METAELMS]; 319 struct hammer_almeta clu_record_meta[HAMMER_CLU_SLAVE_METAELMS]; 320 struct hammer_almeta clu_mdata_meta[HAMMER_CLU_SLAVE_METAELMS]; 321 }; 322 323 /* 324 * HAMMER records are 96 byte entities encoded into 16K filesystem buffers. 325 * Each record has a 64 byte header and a 32 byte extension. 170 records 326 * fit into each buffer. Storage is managed by the buffer's A-List. 327 * 328 * Each record may have an explicit data reference to a block of data up 329 * to 2^31-1 bytes in size within the current cluster. Note that multiple 330 * records may share the same or overlapping data references. 331 */ 332 333 /* 334 * All HAMMER records have a common 64-byte base and a 32-byte extension. 335 * 336 * Many HAMMER record types reference out-of-band data within the cluster. 337 * This data can also be stored in-band in the record itself if it is small 338 * enough. Either way, (data_offset, data_len) points to it. 339 * 340 * Key comparison order: obj_id, rec_type, key, create_tid 341 */ 342 struct hammer_base_record { 343 /* 344 * 40 byte base element info - same base as used in B-Tree internal 345 * and leaf node element arrays. 346 * 347 * Fields: obj_id, key, create_tid, delete_tid, rec_type, obj_type, 348 * reserved07. 349 */ 350 struct hammer_base_elm base; /* 00 base element info */ 351 352 int32_t data_len; /* 28 size of data (remainder zero-fill) */ 353 u_int32_t data_crc; /* 2C data sanity check */ 354 u_int64_t rec_id; /* 30 record id (iterator for recovery) */ 355 int32_t data_offset; /* 38 cluster-relative data reference or 0 */ 356 u_int32_t reserved07; /* 3C */ 357 /* 40 */ 358 }; 359 360 /* 361 * Record types are fairly straightforward. The B-Tree includes the record 362 * type in its index sort. 363 * 364 * In particular please note that it is possible to create a pseudo- 365 * filesystem within a HAMMER filesystem by creating a special object 366 * type within a directory. Pseudo-filesystems are used as replication 367 * targets and even though they are built within a HAMMER filesystem they 368 * get their own obj_id space (and thus can serve as a replication target) 369 * and look like a mount point to the system. 370 */ 371 #define HAMMER_RECTYPE_UNKNOWN 0 372 #define HAMMER_RECTYPE_INODE 1 /* inode in obj_id space */ 373 #define HAMMER_RECTYPE_PSEUDO_INODE 2 /* pseudo filesysem */ 374 #define HAMMER_RECTYPE_CLUSTER 3 /* cluster reference */ 375 #define HAMMER_RECTYPE_DATA_CREATE 0x10 376 #define HAMMER_RECTYPE_DATA_ZEROFILL 0x11 377 #define HAMMER_RECTYPE_DATA_DELETE 0x12 378 #define HAMMER_RECTYPE_DATA_UPDATE 0x13 379 #define HAMMER_RECTYPE_DIR_CREATE 0x20 380 #define HAMMER_RECTYPE_DIR_DELETE 0x22 381 #define HAMMER_RECTYPE_DIR_UPDATE 0x23 382 #define HAMMER_RECTYPE_DB_CREATE 0x30 383 #define HAMMER_RECTYPE_DB_DELETE 0x32 384 #define HAMMER_RECTYPE_DB_UPDATE 0x33 385 #define HAMMER_RECTYPE_EXT_CREATE 0x40 /* ext attributes */ 386 #define HAMMER_RECTYPE_EXT_DELETE 0x42 387 #define HAMMER_RECTYPE_EXT_UPDATE 0x43 388 389 #define HAMMER_OBJTYPE_DIRECTORY 1 390 #define HAMMER_OBJTYPE_REGFILE 2 391 #define HAMMER_OBJTYPE_DBFILE 3 392 #define HAMMER_OBJTYPE_FIFO 4 393 #define HAMMER_OBJTYPE_CDEV 5 394 #define HAMMER_OBJTYPE_BDEV 6 395 #define HAMMER_OBJTYPE_SOFTLINK 7 396 #define HAMMER_OBJTYPE_PSEUDOFS 8 /* pseudo filesystem obj */ 397 398 #define HAMMER_OBJTYPE_CLUSTER_FLAG 0x20 399 #define HAMMER_OBJTYPE_CLUSTER_BEG 0x20 400 #define HAMMER_OBJTYPE_CLUSTER_END 0x21 401 402 /* 403 * Generic full-sized record 404 */ 405 struct hammer_generic_record { 406 struct hammer_base_record base; 407 char filler[32]; 408 }; 409 410 /* 411 * A HAMMER inode record. 412 * 413 * This forms the basis for a filesystem object. obj_id is the inode number, 414 * key1 represents the pseudo filesystem id for security partitioning 415 * (preventing cross-links and/or restricting a NFS export and specifying the 416 * security policy), and key2 represents the data retention policy id. 417 * 418 * Inode numbers are 64 bit quantities which uniquely identify a filesystem 419 * object for the ENTIRE life of the filesystem, even after the object has 420 * been deleted. For all intents and purposes inode numbers are simply 421 * allocated by incrementing a sequence space. 422 * 423 * There is an important distinction between the data stored in the inode 424 * record and the record's data reference. The record references a 425 * hammer_inode_data structure but the filesystem object size and hard link 426 * count is stored in the inode record itself. This allows multiple inodes 427 * to share the same hammer_inode_data structure. This is possible because 428 * any modifications will lay out new data. The HAMMER implementation need 429 * not use the data-sharing ability when laying down new records. 430 * 431 * A HAMMER inode is subject to the same historical storage requirements 432 * as any other record. In particular any change in filesystem or hard link 433 * count will lay down a new inode record when the filesystem is synced to 434 * disk. This can lead to a lot of junk records which get cleaned up by 435 * the data retention policy. 436 * 437 * The ino_atime and ino_mtime fields are a special case. Modifications to 438 * these fields do NOT lay down a new record by default, though the values 439 * are effectively frozen for snapshots which access historical versions 440 * of the inode record due to other operations. This means that atime will 441 * not necessarily be accurate in snapshots, backups, or mirrors. mtime 442 * will be accurate in backups and mirrors since it can be regenerated from 443 * the mirroring stream. 444 * 445 * Because nlinks is historically retained the hardlink count will be 446 * accurate when accessing a HAMMER filesystem snapshot. 447 */ 448 struct hammer_inode_record { 449 struct hammer_base_record base; 450 u_int64_t ino_atime; /* last access time (not historical) */ 451 u_int64_t ino_mtime; /* last modified time (not historical) */ 452 u_int64_t ino_size; /* filesystem object size */ 453 u_int64_t ino_nlinks; /* hard links */ 454 }; 455 456 /* 457 * Data records specify the entire contents of a regular file object, 458 * including attributes. Small amounts of data can theoretically be 459 * embedded in the record itself but the use of this ability verses using 460 * an out-of-band data reference depends on the implementation. 461 */ 462 struct hammer_data_record { 463 struct hammer_base_record base; 464 char filler[32]; 465 }; 466 467 /* 468 * A directory entry specifies the HAMMER filesystem object id, a copy of 469 * the file type, and file name (either embedded or as out-of-band data). 470 * If the file name is short enough to fit into den_name[] (including a 471 * terminating nul) then it will be embedded in the record, otherwise it 472 * is stored out-of-band. The base record's data reference always points 473 * to the nul-terminated filename regardless. 474 * 475 * Directory entries are indexed with a 128 bit namekey rather then an 476 * offset. A portion of the namekey is an iterator or randomizer to deal 477 * with collisions. 478 */ 479 struct hammer_entry_record { 480 struct hammer_base_record base; 481 u_int64_t obj_id; /* object being referenced */ 482 u_int64_t reserved01; 483 u_int8_t den_type; /* cached file type */ 484 char den_name[15]; /* short file names fit in record */ 485 }; 486 487 /* 488 * Hammer rollup record 489 */ 490 union hammer_record_ondisk { 491 struct hammer_base_record base; 492 struct hammer_generic_record generic; 493 struct hammer_inode_record inode; 494 struct hammer_data_record data; 495 struct hammer_entry_record entry; 496 }; 497 498 typedef union hammer_record_ondisk *hammer_record_ondisk_t; 499 500 /* 501 * Filesystem buffer for records 502 */ 503 #define HAMMER_RECORD_NODES \ 504 ((HAMMER_BUFSIZE - sizeof(struct hammer_fsbuf_head)) / \ 505 sizeof(union hammer_record_ondisk)) 506 507 struct hammer_fsbuf_recs { 508 struct hammer_fsbuf_head head; 509 char unused[32]; 510 union hammer_record_ondisk recs[HAMMER_RECORD_NODES]; 511 }; 512 513 /* 514 * Filesystem buffer for piecemeal data. Note that this does not apply 515 * to dedicated pure-data buffers as such buffers do not have a header. 516 */ 517 518 #define HAMMER_DATA_SIZE (HAMMER_BUFSIZE - sizeof(struct hammer_fsbuf_head)) 519 #define HAMMER_DATA_BLKSIZE 64 520 #define HAMMER_DATA_BLKMASK (HAMMER_DATA_BLKSIZE-1) 521 #define HAMMER_DATA_NODES (HAMMER_DATA_SIZE / HAMMER_DATA_BLKSIZE) 522 523 struct hammer_fsbuf_data { 524 struct hammer_fsbuf_head head; 525 u_int8_t data[HAMMER_DATA_NODES][HAMMER_DATA_BLKSIZE]; 526 }; 527 528 /* 529 * Filesystem buffer rollup 530 */ 531 union hammer_fsbuf_ondisk { 532 struct hammer_fsbuf_head head; 533 struct hammer_fsbuf_btree btree; 534 struct hammer_fsbuf_recs record; 535 struct hammer_fsbuf_data data; 536 }; 537 538 typedef union hammer_fsbuf_ondisk *hammer_fsbuf_ondisk_t; 539 540 /* 541 * HAMMER UNIX Attribute data 542 * 543 * The data reference in a HAMMER inode record points to this structure. Any 544 * modifications to the contents of this structure will result in a record 545 * replacement operation. 546 * 547 * state_sum allows a filesystem object to be validated to a degree by 548 * generating a checksum of all of its pieces (in no particular order) and 549 * checking it against this field. 550 */ 551 struct hammer_inode_data { 552 u_int16_t version; /* inode data version */ 553 u_int16_t mode; /* basic unix permissions */ 554 u_int32_t uflags; /* chflags */ 555 u_int64_t reserved01; 556 u_int64_t reserved02; 557 u_int64_t state_sum; /* cumulative checksum */ 558 uuid_t uid; 559 uuid_t gid; 560 }; 561 562 #define HAMMER_INODE_DATA_VERSION 1 563 564 /* 565 * Rollup various structures embedded as record data 566 */ 567 union hammer_data_ondisk { 568 struct hammer_inode_data inode; 569 }; 570 571