1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_disk.h,v 1.12 2007/12/14 08:05:39 dillon Exp $ 35 */ 36 37 #ifndef _SYS_UUID_H_ 38 #include <sys/uuid.h> 39 #endif 40 41 /* 42 * The structures below represent the on-disk format for a HAMMER 43 * filesystem. Note that all fields for on-disk structures are naturally 44 * aligned. The host endian format is used - compatibility is possible 45 * if the implementation detects reversed endian and adjusts data accordingly. 46 * 47 * Most of HAMMER revolves around the concept of an object identifier. An 48 * obj_id is a 64 bit quantity which uniquely identifies a filesystem object 49 * FOR THE ENTIRE LIFE OF THE FILESYSTEM. This uniqueness allows backups 50 * and mirrors to retain varying amounts of filesystem history by removing 51 * any possibility of conflict through identifier reuse. 52 * 53 * A HAMMER filesystem may spam multiple volumes. 54 * 55 * A HAMMER filesystem uses a 16K filesystem buffer size. All filesystem 56 * I/O is done in multiples of 16K. Most buffer-sized headers such as those 57 * used by volumes, super-clusters, clusters, and basic filesystem buffers 58 * use fixed-sized A-lists which are heavily dependant on HAMMER_BUFSIZE. 59 */ 60 #define HAMMER_BUFSIZE 16384 61 #define HAMMER_BUFMASK (HAMMER_BUFSIZE - 1) 62 63 /* 64 * Hammer transction ids are 64 bit unsigned integers and are usually 65 * synchronized with the time of day in nanoseconds. 66 */ 67 typedef u_int64_t hammer_tid_t; 68 69 #define HAMMER_MAX_TID 0xFFFFFFFFFFFFFFFFULL 70 #define HAMMER_MIN_KEY -0x8000000000000000LL 71 #define HAMMER_MAX_KEY 0x7FFFFFFFFFFFFFFFLL 72 73 /* 74 * Most HAMMER data structures are embedded in 16K filesystem buffers. 75 * All filesystem buffers except those designated as pure-data buffers 76 * contain this 128-byte header. 77 * 78 * This structure contains an embedded A-List used to manage space within 79 * the filesystem buffer. It is not used by volume or cluster header 80 * buffers, or by pure-data buffers. The granularity is variable and 81 * depends on the type of filesystem buffer. BLKSIZE is just a minimum. 82 */ 83 84 #define HAMMER_FSBUF_HEAD_SIZE 128 85 #define HAMMER_FSBUF_MAXBLKS 256 86 #define HAMMER_FSBUF_BLKMASK (HAMMER_FSBUF_MAXBLKS - 1) 87 #define HAMMER_FSBUF_METAELMS HAMMER_ALIST_METAELMS_256_1LYR /* 11 */ 88 89 struct hammer_fsbuf_head { 90 u_int64_t buf_type; 91 u_int32_t buf_crc; 92 u_int32_t buf_reserved07; 93 u_int32_t reserved[6]; 94 struct hammer_almeta buf_almeta[HAMMER_FSBUF_METAELMS]; 95 }; 96 97 typedef struct hammer_fsbuf_head *hammer_fsbuf_head_t; 98 99 /* 100 * Note: Pure-data buffers contain pure-data and have no buf_type. 101 * Piecemeal data buffers do have a header and use HAMMER_FSBUF_DATA. 102 */ 103 #define HAMMER_FSBUF_VOLUME 0xC8414D4DC5523031ULL /* HAMMER01 */ 104 #define HAMMER_FSBUF_SUPERCL 0xC8414D52C3555052ULL /* HAMRSUPR */ 105 #define HAMMER_FSBUF_CLUSTER 0xC8414D52C34C5553ULL /* HAMRCLUS */ 106 #define HAMMER_FSBUF_RECORDS 0xC8414D52D2454353ULL /* HAMRRECS */ 107 #define HAMMER_FSBUF_BTREE 0xC8414D52C2545245ULL /* HAMRBTRE */ 108 #define HAMMER_FSBUF_DATA 0xC8414D52C4415441ULL /* HAMRDATA */ 109 110 #define HAMMER_FSBUF_VOLUME_REV 0x313052C54D4D41C8ULL /* (reverse endian) */ 111 112 /* 113 * The B-Tree structures need hammer_fsbuf_head. 114 */ 115 #include "hammer_btree.h" 116 117 /* 118 * HAMMER Volume header 119 * 120 * A HAMMER filesystem is built from any number of block devices, Each block 121 * device contains a volume header followed by however many super-clusters 122 * and clusters fit into the volume. Clusters cannot be migrated but the 123 * data they contain can, so HAMMER can use a truncated cluster for any 124 * extra space at the end of the volume. 125 * 126 * The volume containing the root cluster is designated as the master volume. 127 * The root cluster designation can be moved to any volume. 128 * 129 * The volume header takes up an entire 16K filesystem buffer and includes 130 * a one or two-layered A-list to manage the clusters making up the volume. 131 * A volume containing up to 32768 clusters (2TB) can be managed with a 132 * single-layered A-list. A two-layer A-list is capable of managing up 133 * to 4096 super-clusters with each super-cluster containing 32768 clusters 134 * (8192 TB per volume total). The number of volumes is limited to 32768 135 * but it only takes 512 to fill out a 64 bit address space so for all 136 * intents and purposes the filesystem has no limits. 137 * 138 * cluster addressing within a volume depends on whether a single or 139 * duel-layer A-list is used. If a duel-layer A-list is used a 16K 140 * super-cluster buffer is needed for every 32768 clusters in the volume. 141 * However, because the A-list's hinting is grouped in multiples of 16 142 * we group 16 super-cluster buffers together (starting just after the 143 * volume header), followed by 16384x16 clusters, and repeat. 144 * 145 * The number of super-clusters is limited to 4096 because the A-list's 146 * master radix is stored as a 32 bit signed quantity which will overflow 147 * if more then 4096*32768 elements is specified. XXX 148 * 149 * NOTE: A 32768-element single-layer and 16384-element duel-layer A-list 150 * is the same size. 151 * 152 * Special field notes: 153 * 154 * vol_bot_beg - offset of boot area (mem_beg - bot_beg bytes) 155 * vol_mem_beg - offset of memory log (clu_beg - mem_beg bytes) 156 * vol_clo_beg - offset of cluster #0 in volume 157 * 158 * The memory log area allows a kernel to cache new records and data 159 * in memory without allocating space in the actual filesystem to hold 160 * the records and data. In the event that a filesystem becomes full, 161 * any records remaining in memory can be flushed to the memory log 162 * area. This allows the kernel to immediately return success. 163 */ 164 #define HAMMER_VOL_MAXCLUSTERS 32768 /* 1-layer */ 165 #define HAMMER_VOL_MAXSUPERCLUSTERS 4096 /* 2-layer */ 166 #define HAMMER_VOL_SUPERCLUSTER_GROUP 16 167 #define HAMMER_VOL_METAELMS_1LYR HAMMER_ALIST_METAELMS_32K_1LYR 168 #define HAMMER_VOL_METAELMS_2LYR HAMMER_ALIST_METAELMS_16K_2LYR 169 170 #define HAMMER_BOOT_MINBYTES (32*1024) 171 #define HAMMER_BOOT_NOMBYTES (64LL*1024*1024) 172 #define HAMMER_BOOT_MAXBYTES (256LL*1024*1024) 173 174 #define HAMMER_MEM_MINBYTES (256*1024) 175 #define HAMMER_MEM_NOMBYTES (1LL*1024*1024*1024) 176 #define HAMMER_MEM_MAXBYTES (64LL*1024*1024*1024) 177 178 struct hammer_volume_ondisk { 179 struct hammer_fsbuf_head head; 180 int64_t vol_bot_beg; /* byte offset of boot area or 0 */ 181 int64_t vol_mem_beg; /* byte offset of memory log or 0 */ 182 int64_t vol_clo_beg; /* byte offset of first cl/supercl in volume */ 183 int64_t vol_clo_end; /* byte offset of volume EOF */ 184 int64_t vol_locked; /* reserved clusters are >= this offset */ 185 186 uuid_t vol_fsid; /* identify filesystem */ 187 uuid_t vol_fstype; /* identify filesystem type */ 188 char vol_name[64]; /* Name of volume */ 189 190 int32_t vol_no; /* volume number within filesystem */ 191 int32_t vol_count; /* number of volumes making up FS */ 192 193 u_int32_t vol_version; /* version control information */ 194 u_int32_t vol_reserved01; 195 u_int32_t vol_flags; /* volume flags */ 196 u_int32_t vol_rootvol; /* which volume is the root volume? */ 197 198 int32_t vol_clsize; /* cluster size (same for all volumes) */ 199 int32_t vol_nclusters; 200 u_int32_t vol_reserved06; 201 u_int32_t vol_reserved07; 202 203 int32_t vol_blocksize; /* for statfs only */ 204 int64_t vol_nblocks; /* total allocatable hammer bufs */ 205 206 /* 207 * This statistical information can get out of sync after a crash 208 * and is recovered slowly. 209 */ 210 int64_t vol_stat_bytes; /* for statfs only */ 211 int64_t unused08; /* for statfs only */ 212 int64_t vol_stat_data_bufs; /* hammer bufs allocated to data */ 213 int64_t vol_stat_rec_bufs; /* hammer bufs allocated to records */ 214 int64_t vol_stat_idx_bufs; /* hammer bufs allocated to B-Tree */ 215 216 /* 217 * These fields are initialized and space is reserved in every 218 * volume making up a HAMMER filesytem, but only the master volume 219 * contains valid data. 220 */ 221 int64_t vol0_stat_bytes; /* for statfs only */ 222 int64_t vol0_stat_inodes; /* for statfs only */ 223 int64_t vol0_stat_data_bufs; /* hammer bufs allocated to data */ 224 int64_t vol0_stat_rec_bufs; /* hammer bufs allocated to records */ 225 int64_t vol0_stat_idx_bufs; /* hammer bufs allocated to B-Tree */ 226 227 int32_t vol0_root_clu_no; /* root cluster no (index) in rootvol */ 228 hammer_tid_t vol0_root_clu_id; /* root cluster id */ 229 hammer_tid_t vol0_nexttid; /* next TID */ 230 u_int64_t vol0_recid; /* fs-wide record id allocator */ 231 u_int64_t vol0_synchronized_rec_id; /* XXX */ 232 233 char reserved[1024]; 234 235 /* 236 * Meta elements for the volume header's A-list, which is either a 237 * 1-layer A-list capable of managing 32768 clusters, or a 2-layer 238 * A-list capable of managing 16384 super-clusters (each of which 239 * can handle 32768 clusters). 240 */ 241 union { 242 struct hammer_almeta super[HAMMER_VOL_METAELMS_2LYR]; 243 struct hammer_almeta normal[HAMMER_VOL_METAELMS_1LYR]; 244 } vol_almeta; 245 u_int32_t vol0_bitmap[1024]; 246 }; 247 248 typedef struct hammer_volume_ondisk *hammer_volume_ondisk_t; 249 250 #define HAMMER_VOLF_VALID 0x0001 /* valid entry */ 251 #define HAMMER_VOLF_OPEN 0x0002 /* volume is open */ 252 #define HAMMER_VOLF_USINGSUPERCL 0x0004 /* using superclusters */ 253 254 /* 255 * HAMMER Super-cluster header 256 * 257 * A super-cluster is used to increase the maximum size of a volume. 258 * HAMMER's volume header can manage up to 32768 direct clusters or 259 * 16384 super-clusters. Each super-cluster (which is basically just 260 * a 16K filesystem buffer) can manage up to 32768 clusters. So adding 261 * a super-cluster layer allows a HAMMER volume to be sized upwards of 262 * around 32768TB instead of 2TB. 263 * 264 * Any volume initially formatted to be over 32G reserves space for the layer 265 * but the layer is only enabled if the volume exceeds 2TB. 266 */ 267 #define HAMMER_SUPERCL_METAELMS HAMMER_ALIST_METAELMS_32K_1LYR 268 #define HAMMER_SCL_MAXCLUSTERS HAMMER_VOL_MAXCLUSTERS 269 270 struct hammer_supercl_ondisk { 271 struct hammer_fsbuf_head head; 272 uuid_t vol_fsid; /* identify filesystem - sanity check */ 273 uuid_t vol_fstype; /* identify filesystem type - sanity check */ 274 int32_t reserved[1024]; 275 276 struct hammer_almeta scl_meta[HAMMER_SUPERCL_METAELMS]; 277 }; 278 279 typedef struct hammer_supercl_ondisk *hammer_supercl_ondisk_t; 280 281 /* 282 * HAMMER Cluster header 283 * 284 * A cluster is limited to 64MB and is made up of 4096 16K filesystem 285 * buffers. The cluster header contains four A-lists to manage these 286 * buffers. 287 * 288 * master_alist - This is a non-layered A-list which manages pure-data 289 * allocations and allocations on behalf of other A-lists. 290 * 291 * btree_alist - This is a layered A-list which manages filesystem buffers 292 * containing B-Tree nodes. 293 * 294 * record_alist - This is a layered A-list which manages filesystem buffers 295 * containing records. 296 * 297 * mdata_alist - This is a layered A-list which manages filesystem buffers 298 * containing piecemeal record data. 299 * 300 * General storage management works like this: All the A-lists except the 301 * master start in an all-allocated state. Now lets say you wish to allocate 302 * a B-Tree node out the btree_alist. If the allocation fails you allocate 303 * a pure data block out of master_alist and then free that block in 304 * btree_alist, thereby assigning more space to the btree_alist, and then 305 * retry your allocation out of the btree_alist. In the reverse direction, 306 * filesystem buffers can be garbage collected back to master_alist simply 307 * by doing whole-buffer allocations in btree_alist and then freeing the 308 * space in master_alist. The whole-buffer-allocation approach to garbage 309 * collection works because A-list allocations are always power-of-2 sized 310 * and aligned. 311 */ 312 #define HAMMER_CLU_MAXBUFFERS 4096 313 #define HAMMER_CLU_MASTER_METAELMS HAMMER_ALIST_METAELMS_4K_1LYR 314 #define HAMMER_CLU_SLAVE_METAELMS HAMMER_ALIST_METAELMS_4K_2LYR 315 #define HAMMER_CLU_MAXBYTES (HAMMER_CLU_MAXBUFFERS * HAMMER_BUFSIZE) 316 317 struct hammer_cluster_ondisk { 318 struct hammer_fsbuf_head head; 319 uuid_t vol_fsid; /* identify filesystem - sanity check */ 320 uuid_t vol_fstype; /* identify filesystem type - sanity check */ 321 322 hammer_tid_t clu_id; /* unique cluster self identification */ 323 hammer_tid_t clu_gen; /* generation number */ 324 int32_t vol_no; /* cluster contained in volume (sanity) */ 325 u_int32_t clu_flags; /* cluster flags */ 326 327 int32_t clu_start; /* start of data (byte offset) */ 328 int32_t clu_limit; /* end of data (byte offset) */ 329 int32_t clu_no; /* cluster index in volume (sanity) */ 330 u_int32_t clu_reserved03; 331 332 u_int32_t clu_reserved04; 333 u_int32_t clu_reserved05; 334 u_int32_t clu_reserved06; 335 u_int32_t clu_reserved07; 336 337 /* 338 * These fields are heuristics to aid in locality of reference 339 * allocations. 340 */ 341 int32_t idx_data; /* data append point (element no) */ 342 int32_t idx_index; /* index append point (element no) */ 343 int32_t idx_record; /* record prepend point (element no) */ 344 int32_t idx_ldata; /* large block data append pt (buf_no) */ 345 346 /* 347 * These fields can become out of sync after a filesystem crash 348 * and are cleaned up in the background. They are used for 349 * reporting only. 350 */ 351 int32_t stat_inodes; /* number of inodes in cluster */ 352 int32_t stat_data_bufs; /* hammer bufs allocated to data */ 353 int32_t stat_rec_bufs; /* hammer bufs allocated to records */ 354 int32_t stat_idx_bufs; /* hammer bufs allocated to B-Tree */ 355 356 /* 357 * Specify the range of information stored in this cluster as two 358 * btree elements. These elements match the left and right 359 * boundary elements in the internal B-Tree node of the parent 360 * cluster that points to the root of our cluster. Because these 361 * are boundary elements, the right boundary is range-NONinclusive. 362 */ 363 struct hammer_base_elm clu_btree_beg; 364 struct hammer_base_elm clu_btree_end; 365 366 /* 367 * The cluster's B-Tree root can change as a side effect of insertion 368 * and deletion operations so store an offset instead of embedding 369 * the root node. The parent_offset is stale if the generation number 370 * does not match. 371 * 372 * Parent linkages are explicit. 373 */ 374 int32_t clu_btree_root; 375 int32_t clu_btree_parent_vol_no; 376 int32_t clu_btree_parent_clu_no; 377 int32_t clu_btree_parent_offset; 378 hammer_tid_t clu_btree_parent_clu_gen; 379 380 u_int64_t synchronized_rec_id; 381 382 struct hammer_almeta clu_master_meta[HAMMER_CLU_MASTER_METAELMS]; 383 struct hammer_almeta clu_btree_meta[HAMMER_CLU_SLAVE_METAELMS]; 384 struct hammer_almeta clu_record_meta[HAMMER_CLU_SLAVE_METAELMS]; 385 struct hammer_almeta clu_mdata_meta[HAMMER_CLU_SLAVE_METAELMS]; 386 }; 387 388 typedef struct hammer_cluster_ondisk *hammer_cluster_ondisk_t; 389 390 #define HAMMER_CLUF_OPEN 0x0001 /* cluster is dirty */ 391 392 /* 393 * HAMMER records are 96 byte entities encoded into 16K filesystem buffers. 394 * Each record has a 64 byte header and a 32 byte extension. 170 records 395 * fit into each buffer. Storage is managed by the buffer's A-List. 396 * 397 * Each record may have an explicit data reference to a block of data up 398 * to 2^31-1 bytes in size within the current cluster. Note that multiple 399 * records may share the same or overlapping data references. 400 */ 401 402 /* 403 * All HAMMER records have a common 64-byte base and a 32-byte extension. 404 * 405 * Many HAMMER record types reference out-of-band data within the cluster. 406 * This data can also be stored in-band in the record itself if it is small 407 * enough. Either way, (data_offset, data_len) points to it. 408 * 409 * Key comparison order: obj_id, rec_type, key, create_tid 410 */ 411 struct hammer_base_record { 412 /* 413 * 40 byte base element info - same base as used in B-Tree internal 414 * and leaf node element arrays. 415 * 416 * Fields: obj_id, key, create_tid, delete_tid, rec_type, obj_type, 417 * reserved07. 418 */ 419 struct hammer_base_elm base; /* 00 base element info */ 420 421 int32_t data_len; /* 28 size of data (remainder zero-fill) */ 422 u_int32_t data_crc; /* 2C data sanity check */ 423 u_int64_t rec_id; /* 30 record id (iterator for recovery) */ 424 int32_t data_offset; /* 38 cluster-relative data reference or 0 */ 425 u_int32_t reserved07; /* 3C */ 426 /* 40 */ 427 }; 428 429 /* 430 * Record types are fairly straightforward. The B-Tree includes the record 431 * type in its index sort. 432 * 433 * In particular please note that it is possible to create a pseudo- 434 * filesystem within a HAMMER filesystem by creating a special object 435 * type within a directory. Pseudo-filesystems are used as replication 436 * targets and even though they are built within a HAMMER filesystem they 437 * get their own obj_id space (and thus can serve as a replication target) 438 * and look like a mount point to the system. 439 * 440 * Inter-cluster records are special-cased in the B-Tree. These records 441 * are referenced from a B-Tree INTERNAL node, NOT A LEAF. This means 442 * that the element in the B-Tree node is actually a boundary element whos 443 * base element fields, including rec_type, reflect the boundary, NOT 444 * the inter-cluster record type. 445 * 446 * HAMMER_RECTYPE_CLUSTER - only set in the actual inter-cluster record, 447 * not set in the left or right boundary elements around the inter-cluster 448 * reference of an internal node in the B-Tree (because doing so would 449 * interfere with the boundary tests). 450 */ 451 #define HAMMER_RECTYPE_UNKNOWN 0 452 #define HAMMER_RECTYPE_LOWEST 1 /* lowest record type avail */ 453 #define HAMMER_RECTYPE_INODE 1 /* inode in obj_id space */ 454 #define HAMMER_RECTYPE_PSEUDO_INODE 2 /* pseudo filesysem */ 455 #define HAMMER_RECTYPE_CLUSTER 3 /* inter-cluster reference */ 456 #define HAMMER_RECTYPE_DATA 0x10 457 #define HAMMER_RECTYPE_DIRENTRY 0x11 458 #define HAMMER_RECTYPE_DB 0x12 459 #define HAMMER_RECTYPE_EXT 0x13 /* ext attributes */ 460 461 #define HAMMER_OBJTYPE_UNKNOWN 0 /* (never exists on-disk) */ 462 #define HAMMER_OBJTYPE_DIRECTORY 1 463 #define HAMMER_OBJTYPE_REGFILE 2 464 #define HAMMER_OBJTYPE_DBFILE 3 465 #define HAMMER_OBJTYPE_FIFO 4 466 #define HAMMER_OBJTYPE_CDEV 5 467 #define HAMMER_OBJTYPE_BDEV 6 468 #define HAMMER_OBJTYPE_SOFTLINK 7 469 #define HAMMER_OBJTYPE_PSEUDOFS 8 /* pseudo filesystem obj */ 470 471 /* 472 * Generic full-sized record 473 */ 474 struct hammer_generic_record { 475 struct hammer_base_record base; 476 char filler[32]; 477 }; 478 479 /* 480 * A HAMMER inode record. 481 * 482 * This forms the basis for a filesystem object. obj_id is the inode number, 483 * key1 represents the pseudo filesystem id for security partitioning 484 * (preventing cross-links and/or restricting a NFS export and specifying the 485 * security policy), and key2 represents the data retention policy id. 486 * 487 * Inode numbers are 64 bit quantities which uniquely identify a filesystem 488 * object for the ENTIRE life of the filesystem, even after the object has 489 * been deleted. For all intents and purposes inode numbers are simply 490 * allocated by incrementing a sequence space. 491 * 492 * There is an important distinction between the data stored in the inode 493 * record and the record's data reference. The record references a 494 * hammer_inode_data structure but the filesystem object size and hard link 495 * count is stored in the inode record itself. This allows multiple inodes 496 * to share the same hammer_inode_data structure. This is possible because 497 * any modifications will lay out new data. The HAMMER implementation need 498 * not use the data-sharing ability when laying down new records. 499 * 500 * A HAMMER inode is subject to the same historical storage requirements 501 * as any other record. In particular any change in filesystem or hard link 502 * count will lay down a new inode record when the filesystem is synced to 503 * disk. This can lead to a lot of junk records which get cleaned up by 504 * the data retention policy. 505 * 506 * The ino_atime and ino_mtime fields are a special case. Modifications to 507 * these fields do NOT lay down a new record by default, though the values 508 * are effectively frozen for snapshots which access historical versions 509 * of the inode record due to other operations. This means that atime will 510 * not necessarily be accurate in snapshots, backups, or mirrors. mtime 511 * will be accurate in backups and mirrors since it can be regenerated from 512 * the mirroring stream. 513 * 514 * Because nlinks is historically retained the hardlink count will be 515 * accurate when accessing a HAMMER filesystem snapshot. 516 */ 517 struct hammer_inode_record { 518 struct hammer_base_record base; 519 u_int64_t ino_atime; /* last access time (not historical) */ 520 u_int64_t ino_mtime; /* last modified time (not historical) */ 521 u_int64_t ino_size; /* filesystem object size */ 522 u_int64_t ino_nlinks; /* hard links */ 523 }; 524 525 /* 526 * Data records specify the entire contents of a regular file object, 527 * including attributes. Small amounts of data can theoretically be 528 * embedded in the record itself but the use of this ability verses using 529 * an out-of-band data reference depends on the implementation. 530 */ 531 struct hammer_data_record { 532 struct hammer_base_record base; 533 char filler[32]; 534 }; 535 536 /* 537 * A directory entry specifies the HAMMER filesystem object id, a copy of 538 * the file type, and file name (either embedded or as out-of-band data). 539 * If the file name is short enough to fit into den_name[] (including a 540 * terminating nul) then it will be embedded in the record, otherwise it 541 * is stored out-of-band. The base record's data reference always points 542 * to the nul-terminated filename regardless. 543 * 544 * Directory entries are indexed with a 128 bit namekey rather then an 545 * offset. A portion of the namekey is an iterator or randomizer to deal 546 * with collisions. 547 * 548 * NOTE: base.base.obj_type holds the filesystem object type of obj_id, 549 * e.g. a den_type equivalent. 550 * 551 * NOTE: den_name / the filename data reference is NOT terminated with \0. 552 * 553 */ 554 struct hammer_entry_record { 555 struct hammer_base_record base; 556 u_int64_t obj_id; /* object being referenced */ 557 u_int64_t reserved01; 558 char den_name[16]; /* short file names fit in record */ 559 }; 560 561 /* 562 * Hammer rollup record 563 */ 564 union hammer_record_ondisk { 565 struct hammer_base_record base; 566 struct hammer_generic_record generic; 567 struct hammer_inode_record inode; 568 struct hammer_data_record data; 569 struct hammer_entry_record entry; 570 }; 571 572 typedef union hammer_record_ondisk *hammer_record_ondisk_t; 573 574 /* 575 * Filesystem buffer for records 576 */ 577 #define HAMMER_RECORD_NODES \ 578 ((HAMMER_BUFSIZE - sizeof(struct hammer_fsbuf_head) - 32) / \ 579 sizeof(union hammer_record_ondisk)) 580 581 #define HAMMER_RECORD_SIZE (64+32) 582 583 struct hammer_fsbuf_recs { 584 struct hammer_fsbuf_head head; 585 char unused[32]; 586 union hammer_record_ondisk recs[HAMMER_RECORD_NODES]; 587 }; 588 589 /* 590 * Filesystem buffer for piecemeal data. Note that this does not apply 591 * to dedicated pure-data buffers as such buffers do not have a header. 592 */ 593 594 #define HAMMER_DATA_SIZE (HAMMER_BUFSIZE - sizeof(struct hammer_fsbuf_head)) 595 #define HAMMER_DATA_BLKSIZE 64 596 #define HAMMER_DATA_BLKMASK (HAMMER_DATA_BLKSIZE-1) 597 #define HAMMER_DATA_NODES (HAMMER_DATA_SIZE / HAMMER_DATA_BLKSIZE) 598 599 struct hammer_fsbuf_data { 600 struct hammer_fsbuf_head head; 601 u_int8_t data[HAMMER_DATA_NODES][HAMMER_DATA_BLKSIZE]; 602 }; 603 604 /* 605 * Filesystem buffer rollup 606 */ 607 union hammer_fsbuf_ondisk { 608 struct hammer_fsbuf_head head; 609 struct hammer_fsbuf_btree btree; 610 struct hammer_fsbuf_recs record; 611 struct hammer_fsbuf_data data; 612 }; 613 614 typedef union hammer_fsbuf_ondisk *hammer_fsbuf_ondisk_t; 615 616 /* 617 * HAMMER UNIX Attribute data 618 * 619 * The data reference in a HAMMER inode record points to this structure. Any 620 * modifications to the contents of this structure will result in a record 621 * replacement operation. 622 * 623 * state_sum allows a filesystem object to be validated to a degree by 624 * generating a checksum of all of its pieces (in no particular order) and 625 * checking it against this field. 626 * 627 * short_data_off allows a small amount of data to be embedded in the 628 * hammer_inode_data structure. HAMMER typically uses this to represent 629 * up to 64 bytes of data, or to hold symlinks. Remember that allocations 630 * are in powers of 2 so 64, 192, 448, or 960 bytes of embedded data is 631 * support (64+64, 64+192, 64+448 64+960). 632 * 633 * parent_obj_id is only valid for directories (which cannot be hard-linked), 634 * and specifies the parent directory obj_id. This field will also be set 635 * for non-directory inodes as a recovery aid, but can wind up specifying 636 * stale information. However, since object id's are not reused, the worse 637 * that happens is that the recovery code is unable to use it. 638 */ 639 struct hammer_inode_data { 640 u_int16_t version; /* inode data version */ 641 u_int16_t mode; /* basic unix permissions */ 642 u_int32_t uflags; /* chflags */ 643 u_int16_t short_data_off; /* degenerate data case */ 644 u_int16_t short_data_len; 645 u_int32_t state_sum; 646 u_int64_t ctime; 647 u_int64_t parent_obj_id;/* parent directory obj_id */ 648 uuid_t uid; 649 uuid_t gid; 650 /* XXX device, softlink extension */ 651 }; 652 653 #define HAMMER_INODE_DATA_VERSION 1 654 655 /* 656 * Rollup various structures embedded as record data 657 */ 658 union hammer_data_ondisk { 659 struct hammer_inode_data inode; 660 }; 661 662