xref: /dragonfly/sys/vfs/hammer/hammer_disk.h (revision 70705abf)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_disk.h,v 1.6 2007/11/19 00:53:40 dillon Exp $
35  */
36 
37 #ifndef _SYS_UUID_H_
38 #include <sys/uuid.h>
39 #endif
40 
41 /*
42  * The structures below represent the on-disk format for a HAMMER
43  * filesystem.  Note that all fields for on-disk structures are naturally
44  * aligned.  The host endian format is used - compatibility is possible
45  * if the implementation detects reversed endian and adjusts data accordingly.
46  *
47  * Most of HAMMER revolves around the concept of an object identifier.  An
48  * obj_id is a 64 bit quantity which uniquely identifies a filesystem object
49  * FOR THE ENTIRE LIFE OF THE FILESYSTEM.  This uniqueness allows backups
50  * and mirrors to retain varying amounts of filesystem history by removing
51  * any possibility of conflict through identifier reuse.
52  *
53  * A HAMMER filesystem may spam multiple volumes.
54  *
55  * A HAMMER filesystem uses a 16K filesystem buffer size.  All filesystem
56  * I/O is done in multiples of 16K.  Most buffer-sized headers such as those
57  * used by volumes, super-clusters, clusters, and basic filesystem buffers
58  * use fixed-sized A-lists which are heavily dependant on HAMMER_BUFSIZE.
59  */
60 #define HAMMER_BUFSIZE	16384
61 #define HAMMER_BUFMASK	(HAMMER_BUFSIZE - 1)
62 
63 /*
64  * Hammer transction ids are 64 bit unsigned integers and are usually
65  * synchronized with the time of day in nanoseconds.
66  */
67 typedef u_int64_t hammer_tid_t;
68 
69 #define HAMMER_MAX_TID	0xFFFFFFFFFFFFFFFFULL
70 
71 /*
72  * Most HAMMER data structures are embedded in 16K filesystem buffers.
73  * All filesystem buffers except those designated as pure-data buffers
74  * contain this 128-byte header.
75  *
76  * This structure contains an embedded A-List used to manage space within
77  * the filesystem buffer.  It is not used by volume or cluster header
78  * buffers, or by pure-data buffers.  The granularity is variable and
79  * depends on the type of filesystem buffer.  BLKSIZE is just a minimum.
80  */
81 
82 #define HAMMER_FSBUF_HEAD_SIZE	128
83 #define HAMMER_FSBUF_MAXBLKS	256
84 #define HAMMER_FSBUF_BLKMASK	(HAMMER_FSBUF_MAXBLKS - 1)
85 #define HAMMER_FSBUF_METAELMS	HAMMER_ALIST_METAELMS_256_1LYR	/* 11 */
86 
87 struct hammer_fsbuf_head {
88 	u_int64_t buf_type;
89 	u_int32_t buf_crc;
90 	u_int32_t buf_reserved07;
91 	u_int32_t reserved[6];
92 	struct hammer_almeta buf_almeta[HAMMER_FSBUF_METAELMS];
93 };
94 
95 typedef struct hammer_fsbuf_head *hammer_fsbuf_head_t;
96 
97 /*
98  * Note: Pure-data buffers contain pure-data and have no buf_type.
99  * Piecemeal data buffers do have a header and use HAMMER_FSBUF_DATA.
100  */
101 #define HAMMER_FSBUF_VOLUME	0xC8414D4DC5523031ULL	/* HAMMER01 */
102 #define HAMMER_FSBUF_SUPERCL	0xC8414D52C3555052ULL	/* HAMRSUPR */
103 #define HAMMER_FSBUF_CLUSTER	0xC8414D52C34C5553ULL	/* HAMRCLUS */
104 #define HAMMER_FSBUF_RECORDS	0xC8414D52D2454353ULL	/* HAMRRECS */
105 #define HAMMER_FSBUF_BTREE	0xC8414D52C2545245ULL	/* HAMRBTRE */
106 #define HAMMER_FSBUF_DATA	0xC8414D52C4415441ULL	/* HAMRDATA */
107 
108 #define HAMMER_FSBUF_VOLUME_REV	0x313052C54D4D41C8ULL	/* (reverse endian) */
109 
110 /*
111  * The B-Tree structures need hammer_fsbuf_head.
112  */
113 #include "hammer_btree.h"
114 
115 /*
116  * HAMMER Volume header
117  *
118  * A HAMMER filesystem is built from any number of block devices,  Each block
119  * device contains a volume header followed by however many super-clusters
120  * and clusters fit into the volume.  Clusters cannot be migrated but the
121  * data they contain can, so HAMMER can use a truncated cluster for any
122  * extra space at the end of the volume.
123  *
124  * The volume containing the root cluster is designated as the master volume.
125  * The root cluster designation can be moved to any volume.
126  *
127  * The volume header takes up an entire 16K filesystem buffer and includes
128  * a one or two-layered A-list to manage the clusters making up the volume.
129  * A volume containing up to 32768 clusters (2TB) can be managed with a
130  * single-layered A-list.  A two-layer A-list is capable of managing up
131  * to 16384 super-clusters with each super-cluster containing 32768 clusters
132  * (32768 TB per volume total).  The number of volumes is limited to 32768
133  * but it only takes 512 to fill out a 64 bit address space so for all
134  * intents and purposes the filesystem has no limits.
135  *
136  * cluster addressing within a volume depends on whether a single or
137  * duel-layer A-list is used.  If a duel-layer A-list is used a 16K
138  * super-cluster buffer is needed for every 16384 clusters in the volume.
139  * However, because the A-list's hinting is grouped in multiples of 16
140  * we group 16 super-cluster buffers together (starting just after the
141  * volume header), followed by 16384x16 clusters, and repeat.
142  *
143  * NOTE: A 32768-element single-layer and 16384-element duel-layer A-list
144  * is the same size.
145  */
146 #define HAMMER_VOL_MAXCLUSTERS		32768	/* 1-layer */
147 #define HAMMER_VOL_MAXSUPERCLUSTERS	16384	/* 2-layer */
148 #define HAMMER_VOL_SUPERCLUSTER_GROUP	16
149 #define HAMMER_VOL_METAELMS_1LYR	HAMMER_ALIST_METAELMS_32K_1LYR
150 #define HAMMER_VOL_METAELMS_2LYR	HAMMER_ALIST_METAELMS_16K_2LYR
151 
152 struct hammer_volume_ondisk {
153 	struct hammer_fsbuf_head head;
154 	int64_t vol_beg;	/* byte offset of first cl/supercl in volume */
155 	int64_t vol_end;	/* byte offset of volume EOF */
156 	int64_t vol_locked;	/* reserved clusters are >= this offset */
157 
158 	uuid_t    vol_fsid;	/* identify filesystem */
159 	uuid_t    vol_fstype;	/* identify filesystem type */
160 	char	  vol_name[64];	/* Name of volume */
161 
162 	int32_t vol_no;		/* volume number within filesystem */
163 	int32_t vol_count;	/* number of volumes making up FS */
164 
165 	u_int32_t vol_version;	/* version control information */
166 	u_int32_t vol_reserved01;
167 	u_int32_t vol_flags;	/* volume flags */
168 	u_int32_t vol_rootvol;	/* which volume is the root volume? */
169 
170 	int32_t vol_clsize;	/* cluster size (same for all volumes) */
171 	int32_t vol_nclusters;
172 	u_int32_t vol_reserved06;
173 	u_int32_t vol_reserved07;
174 
175 	int32_t vol_stat_blocksize;	/* for statfs only */
176 	int64_t	vol_stat_bytes;		/* for statfs only */
177 	int64_t vol_stat_inodes;	/* for statfs only */
178 
179 	/*
180 	 * These fields are initialized and space is reserved in every
181 	 * volume making up a HAMMER filesytem, but only the master volume
182 	 * contains valid data.
183 	 */
184 	int32_t vol0_root_clu_no;	/* root cluster no (index) in rootvol */
185 	hammer_tid_t vol0_root_clu_id;	/* root cluster id */
186 	hammer_tid_t vol0_nexttid;	/* next TID */
187 	u_int64_t vol0_recid;		/* fs-wide record id allocator */
188 	u_int64_t vol0_synchronized_rec_id; /* XXX */
189 
190 	char	reserved[1024];
191 
192 	/*
193 	 * Meta elements for the volume header's A-list, which is either a
194 	 * 1-layer A-list capable of managing 32768 clusters, or a 2-layer
195 	 * A-list capable of managing 16384 super-clusters (each of which
196 	 * can handle 32768 clusters).
197 	 */
198 	union {
199 		struct hammer_almeta	super[HAMMER_VOL_METAELMS_2LYR];
200 		struct hammer_almeta	normal[HAMMER_VOL_METAELMS_1LYR];
201 	} vol_almeta;
202 	u_int32_t	vol0_bitmap[1024];
203 };
204 
205 typedef struct hammer_volume_ondisk *hammer_volume_ondisk_t;
206 
207 #define HAMMER_VOLF_VALID		0x0001	/* valid entry */
208 #define HAMMER_VOLF_OPEN		0x0002	/* volume is open */
209 #define HAMMER_VOLF_USINGSUPERCL	0x0004	/* using superclusters */
210 
211 /*
212  * HAMMER Super-cluster header
213  *
214  * A super-cluster is used to increase the maximum size of a volume.
215  * HAMMER's volume header can manage up to 32768 direct clusters or
216  * 16384 super-clusters.  Each super-cluster (which is basically just
217  * a 16K filesystem buffer) can manage up to 32768 clusters.  So adding
218  * a super-cluster layer allows a HAMMER volume to be sized upwards of
219  * around 32768TB instead of 2TB.
220  *
221  * Any volume initially formatted to be over 32G reserves space for the layer
222  * but the layer is only enabled if the volume exceeds 2TB.
223  */
224 #define HAMMER_SUPERCL_METAELMS		HAMMER_ALIST_METAELMS_32K_1LYR
225 #define HAMMER_SCL_MAXCLUSTERS		HAMMER_VOL_MAXCLUSTERS
226 
227 struct hammer_supercl_ondisk {
228 	struct hammer_fsbuf_head head;
229 	uuid_t	vol_fsid;	/* identify filesystem - sanity check */
230 	uuid_t	vol_fstype;	/* identify filesystem type - sanity check */
231 	int32_t reserved[1024];
232 
233 	struct hammer_almeta	scl_meta[HAMMER_SUPERCL_METAELMS];
234 };
235 
236 typedef struct hammer_supercl_ondisk *hammer_supercl_ondisk_t;
237 
238 /*
239  * HAMMER Cluster header
240  *
241  * A cluster is limited to 64MB and is made up of 4096 16K filesystem
242  * buffers.  The cluster header contains four A-lists to manage these
243  * buffers.
244  *
245  * master_alist - This is a non-layered A-list which manages pure-data
246  *		  allocations and allocations on behalf of other A-lists.
247  *
248  * btree_alist  - This is a layered A-list which manages filesystem buffers
249  *		  containing B-Tree nodes.
250  *
251  * record_alist - This is a layered A-list which manages filesystem buffers
252  *		  containing records.
253  *
254  * mdata_alist  - This is a layered A-list which manages filesystem buffers
255  *		  containing piecemeal record data.
256  *
257  * General storage management works like this:  All the A-lists except the
258  * master start in an all-allocated state.  Now lets say you wish to allocate
259  * a B-Tree node out the btree_alist.  If the allocation fails you allocate
260  * a pure data block out of master_alist and then free that  block in
261  * btree_alist, thereby assigning more space to the btree_alist, and then
262  * retry your allocation out of the btree_alist.  In the reverse direction,
263  * filesystem buffers can be garbage collected back to master_alist simply
264  * by doing whole-buffer allocations in btree_alist and then freeing the
265  * space in master_alist.  The whole-buffer-allocation approach to garbage
266  * collection works because A-list allocations are always power-of-2 sized
267  * and aligned.
268  */
269 #define HAMMER_CLU_MAXBUFFERS		4096
270 #define HAMMER_CLU_MASTER_METAELMS	HAMMER_ALIST_METAELMS_4K_1LYR
271 #define HAMMER_CLU_SLAVE_METAELMS	HAMMER_ALIST_METAELMS_4K_2LYR
272 #define HAMMER_CLU_MAXBYTES		(HAMMER_CLU_MAXBUFFERS * HAMMER_BUFSIZE)
273 
274 struct hammer_cluster_ondisk {
275 	struct hammer_fsbuf_head head;
276 	uuid_t	vol_fsid;	/* identify filesystem - sanity check */
277 	uuid_t	vol_fstype;	/* identify filesystem type - sanity check */
278 
279 	hammer_tid_t clu_id;	/* unique cluster self identification */
280 	hammer_tid_t clu_gen;	/* generation number */
281 	int32_t vol_no;		/* cluster contained in volume (sanity) */
282 	u_int32_t clu_flags;	/* cluster flags */
283 
284 	int32_t clu_start;	/* start of data (byte offset) */
285 	int32_t clu_limit;	/* end of data (byte offset) */
286 	int32_t clu_no;		/* cluster index in volume (sanity) */
287 	u_int32_t clu_reserved03;
288 
289 	u_int32_t clu_reserved04;
290 	u_int32_t clu_reserved05;
291 	u_int32_t clu_reserved06;
292 	u_int32_t clu_reserved07;
293 
294 	int32_t idx_data;	/* data append point (element no) */
295 	int32_t idx_index;	/* index append point (element no) */
296 	int32_t idx_record;	/* record prepend point (element no) */
297 	u_int32_t idx_reserved03;
298 
299 	/*
300 	 * Specify the range of information stored in this cluster as two
301 	 * btree elements.   These elements match the left and right
302 	 * boundary elements in the internal B-Tree node of the parent
303 	 * cluster that points to the root of our cluster.  Because these
304 	 * are boundary elements, the right boundary is range-NONinclusive.
305 	 */
306 	struct hammer_base_elm clu_btree_beg;
307 	struct hammer_base_elm clu_btree_end;
308 
309 	/*
310 	 * The cluster's B-Tree root can change as a side effect of insertion
311 	 * and deletion operations so store an offset instead of embedding
312 	 * the root node.  The parent_offset is stale if the generation number
313 	 * does not match.
314 	 *
315 	 * Parent linkages are explicit.
316 	 */
317 	int32_t		clu_btree_root;
318 	int32_t		clu_btree_parent_vol_no;
319 	int32_t		clu_btree_parent_clu_no;
320 	int32_t		clu_btree_parent_offset;
321 	hammer_tid_t	clu_btree_parent_clu_gen;
322 
323 	u_int64_t synchronized_rec_id;
324 
325 	struct hammer_almeta	clu_master_meta[HAMMER_CLU_MASTER_METAELMS];
326 	struct hammer_almeta	clu_btree_meta[HAMMER_CLU_SLAVE_METAELMS];
327 	struct hammer_almeta	clu_record_meta[HAMMER_CLU_SLAVE_METAELMS];
328 	struct hammer_almeta	clu_mdata_meta[HAMMER_CLU_SLAVE_METAELMS];
329 };
330 
331 typedef struct hammer_cluster_ondisk *hammer_cluster_ondisk_t;
332 
333 /*
334  * HAMMER records are 96 byte entities encoded into 16K filesystem buffers.
335  * Each record has a 64 byte header and a 32 byte extension.  170 records
336  * fit into each buffer.  Storage is managed by the buffer's A-List.
337  *
338  * Each record may have an explicit data reference to a block of data up
339  * to 2^31-1 bytes in size within the current cluster.  Note that multiple
340  * records may share the same or overlapping data references.
341  */
342 
343 /*
344  * All HAMMER records have a common 64-byte base and a 32-byte extension.
345  *
346  * Many HAMMER record types reference out-of-band data within the cluster.
347  * This data can also be stored in-band in the record itself if it is small
348  * enough.  Either way, (data_offset, data_len) points to it.
349  *
350  * Key comparison order:  obj_id, rec_type, key, create_tid
351  */
352 struct hammer_base_record {
353 	/*
354 	 * 40 byte base element info - same base as used in B-Tree internal
355 	 * and leaf node element arrays.
356 	 *
357 	 * Fields: obj_id, key, create_tid, delete_tid, rec_type, obj_type,
358 	 *	   reserved07.
359 	 */
360 	struct hammer_base_elm base; /* 00 base element info */
361 
362 	int32_t data_len;	/* 28 size of data (remainder zero-fill) */
363 	u_int32_t data_crc;	/* 2C data sanity check */
364 	u_int64_t rec_id;	/* 30 record id (iterator for recovery) */
365 	int32_t	  data_offset;	/* 38 cluster-relative data reference or 0 */
366 	u_int32_t reserved07;	/* 3C */
367 				/* 40 */
368 };
369 
370 /*
371  * Record types are fairly straightforward.  The B-Tree includes the record
372  * type in its index sort.
373  *
374  * In particular please note that it is possible to create a pseudo-
375  * filesystem within a HAMMER filesystem by creating a special object
376  * type within a directory.  Pseudo-filesystems are used as replication
377  * targets and even though they are built within a HAMMER filesystem they
378  * get their own obj_id space (and thus can serve as a replication target)
379  * and look like a mount point to the system.
380  *
381  * Inter-cluster records are special-cased in the B-Tree.  These records
382  * are referenced from a B-Tree INTERNAL node, NOT A LEAF.  This means
383  * that the element in the B-Tree node is actually a boundary element whos
384  * base element fields, including rec_type, reflect the boundary, NOT
385  * the inter-cluster record type.
386  *
387  * HAMMER_RECTYPE_CLUSTER - only set in the actual inter-cluster record,
388  * not set in the left or right boundary elements around the inter-cluster
389  * reference of an internal node in the B-Tree (because doing so would
390  * interfere with the boundary tests).
391  */
392 #define HAMMER_RECTYPE_UNKNOWN		0
393 #define HAMMER_RECTYPE_LOWEST		1	/* lowest record type avail */
394 #define HAMMER_RECTYPE_INODE		1	/* inode in obj_id space */
395 #define HAMMER_RECTYPE_PSEUDO_INODE	2	/* pseudo filesysem */
396 #define HAMMER_RECTYPE_CLUSTER		3	/* inter-cluster reference */
397 #define HAMMER_RECTYPE_DATA		0x10
398 #define HAMMER_RECTYPE_DIRENTRY		0x11
399 #define HAMMER_RECTYPE_DB		0x12
400 #define HAMMER_RECTYPE_EXT		0x13	/* ext attributes */
401 
402 #define HAMMER_OBJTYPE_UNKNOWN		0	/* (never exists on-disk) */
403 #define HAMMER_OBJTYPE_DIRECTORY	1
404 #define HAMMER_OBJTYPE_REGFILE		2
405 #define HAMMER_OBJTYPE_DBFILE		3
406 #define HAMMER_OBJTYPE_FIFO		4
407 #define HAMMER_OBJTYPE_CDEV		5
408 #define HAMMER_OBJTYPE_BDEV		6
409 #define HAMMER_OBJTYPE_SOFTLINK		7
410 #define HAMMER_OBJTYPE_PSEUDOFS		8	/* pseudo filesystem obj */
411 
412 /*
413  * Generic full-sized record
414  */
415 struct hammer_generic_record {
416 	struct hammer_base_record base;
417 	char filler[32];
418 };
419 
420 /*
421  * A HAMMER inode record.
422  *
423  * This forms the basis for a filesystem object.  obj_id is the inode number,
424  * key1 represents the pseudo filesystem id for security partitioning
425  * (preventing cross-links and/or restricting a NFS export and specifying the
426  * security policy), and key2 represents the data retention policy id.
427  *
428  * Inode numbers are 64 bit quantities which uniquely identify a filesystem
429  * object for the ENTIRE life of the filesystem, even after the object has
430  * been deleted.  For all intents and purposes inode numbers are simply
431  * allocated by incrementing a sequence space.
432  *
433  * There is an important distinction between the data stored in the inode
434  * record and the record's data reference.  The record references a
435  * hammer_inode_data structure but the filesystem object size and hard link
436  * count is stored in the inode record itself.  This allows multiple inodes
437  * to share the same hammer_inode_data structure.  This is possible because
438  * any modifications will lay out new data.  The HAMMER implementation need
439  * not use the data-sharing ability when laying down new records.
440  *
441  * A HAMMER inode is subject to the same historical storage requirements
442  * as any other record.  In particular any change in filesystem or hard link
443  * count will lay down a new inode record when the filesystem is synced to
444  * disk.  This can lead to a lot of junk records which get cleaned up by
445  * the data retention policy.
446  *
447  * The ino_atime and ino_mtime fields are a special case.  Modifications to
448  * these fields do NOT lay down a new record by default, though the values
449  * are effectively frozen for snapshots which access historical versions
450  * of the inode record due to other operations.  This means that atime will
451  * not necessarily be accurate in snapshots, backups, or mirrors.  mtime
452  * will be accurate in backups and mirrors since it can be regenerated from
453  * the mirroring stream.
454  *
455  * Because nlinks is historically retained the hardlink count will be
456  * accurate when accessing a HAMMER filesystem snapshot.
457  */
458 struct hammer_inode_record {
459 	struct hammer_base_record base;
460 	u_int64_t ino_atime;	/* last access time (not historical) */
461 	u_int64_t ino_mtime;	/* last modified time (not historical) */
462 	u_int64_t ino_size;	/* filesystem object size */
463 	u_int64_t ino_nlinks;	/* hard links */
464 };
465 
466 /*
467  * Data records specify the entire contents of a regular file object,
468  * including attributes.  Small amounts of data can theoretically be
469  * embedded in the record itself but the use of this ability verses using
470  * an out-of-band data reference depends on the implementation.
471  */
472 struct hammer_data_record {
473 	struct hammer_base_record base;
474 	char filler[32];
475 };
476 
477 /*
478  * A directory entry specifies the HAMMER filesystem object id, a copy of
479  * the file type, and file name (either embedded or as out-of-band data).
480  * If the file name is short enough to fit into den_name[] (including a
481  * terminating nul) then it will be embedded in the record, otherwise it
482  * is stored out-of-band.  The base record's data reference always points
483  * to the nul-terminated filename regardless.
484  *
485  * Directory entries are indexed with a 128 bit namekey rather then an
486  * offset.  A portion of the namekey is an iterator or randomizer to deal
487  * with collisions.
488  *
489  * Note that base.base.obj_type holds the filesystem object type of obj_id,
490  * e.g. a den_type equivalent.
491  *
492  */
493 struct hammer_entry_record {
494 	struct hammer_base_record base;
495 	u_int64_t obj_id;		/* object being referenced */
496 	u_int64_t reserved01;
497 	char	  den_name[16];		/* short file names fit in record */
498 };
499 
500 /*
501  * Hammer rollup record
502  */
503 union hammer_record_ondisk {
504 	struct hammer_base_record	base;
505 	struct hammer_generic_record	generic;
506 	struct hammer_inode_record	inode;
507 	struct hammer_data_record	data;
508 	struct hammer_entry_record	entry;
509 };
510 
511 typedef union hammer_record_ondisk *hammer_record_ondisk_t;
512 
513 /*
514  * Filesystem buffer for records
515  */
516 #define HAMMER_RECORD_NODES	\
517 	((HAMMER_BUFSIZE - sizeof(struct hammer_fsbuf_head)) / \
518 	sizeof(union hammer_record_ondisk))
519 
520 struct hammer_fsbuf_recs {
521 	struct hammer_fsbuf_head	head;
522 	char				unused[32];
523 	union hammer_record_ondisk	recs[HAMMER_RECORD_NODES];
524 };
525 
526 /*
527  * Filesystem buffer for piecemeal data.  Note that this does not apply
528  * to dedicated pure-data buffers as such buffers do not have a header.
529  */
530 
531 #define HAMMER_DATA_SIZE	(HAMMER_BUFSIZE - sizeof(struct hammer_fsbuf_head))
532 #define HAMMER_DATA_BLKSIZE	64
533 #define HAMMER_DATA_BLKMASK	(HAMMER_DATA_BLKSIZE-1)
534 #define HAMMER_DATA_NODES	(HAMMER_DATA_SIZE / HAMMER_DATA_BLKSIZE)
535 
536 struct hammer_fsbuf_data {
537 	struct hammer_fsbuf_head head;
538 	u_int8_t		data[HAMMER_DATA_NODES][HAMMER_DATA_BLKSIZE];
539 };
540 
541 /*
542  * Filesystem buffer rollup
543  */
544 union hammer_fsbuf_ondisk {
545 	struct hammer_fsbuf_head	head;
546 	struct hammer_fsbuf_btree	btree;
547 	struct hammer_fsbuf_recs	record;
548 	struct hammer_fsbuf_data	data;
549 };
550 
551 typedef union hammer_fsbuf_ondisk *hammer_fsbuf_ondisk_t;
552 
553 /*
554  * HAMMER UNIX Attribute data
555  *
556  * The data reference in a HAMMER inode record points to this structure.  Any
557  * modifications to the contents of this structure will result in a record
558  * replacement operation.
559  *
560  * state_sum allows a filesystem object to be validated to a degree by
561  * generating a checksum of all of its pieces (in no particular order) and
562  * checking it against this field.
563  *
564  * short_data_off allows a small amount of data to be embedded in the
565  * hammer_inode_data structure.  HAMMER typically uses this to represent
566  * up to 64 bytes of data, or to hold symlinks.  Remember that allocations
567  * are in powers of 2 so 64, 192, 448, or 960 bytes of embedded data is
568  * support (64+64, 64+192, 64+448 64+960).
569  *
570  * parent_obj_id is only valid for directories (which cannot be hard-linked),
571  * and specifies the parent directory obj_id.  This field will also be set
572  * for non-directory inodes as a recovery aid, but can wind up specifying
573  * stale information.  However, since object id's are not reused, the worse
574  * that happens is that the recovery code is unable to use it.
575  */
576 struct hammer_inode_data {
577 	u_int16_t version;	/* inode data version */
578 	u_int16_t mode;		/* basic unix permissions */
579 	u_int32_t uflags;	/* chflags */
580 	u_int16_t short_data_off; /* degenerate data case */
581 	u_int16_t short_data_len;
582 	u_int32_t state_sum;
583 	u_int64_t ctime;
584 	u_int64_t parent_obj_id;/* parent directory obj_id */
585 	uuid_t	uid;
586 	uuid_t	gid;
587 	/* XXX device, softlink extension */
588 };
589 
590 #define HAMMER_INODE_DATA_VERSION	1
591 
592 /*
593  * Rollup various structures embedded as record data
594  */
595 union hammer_data_ondisk {
596 	struct hammer_inode_data inode;
597 };
598 
599