xref: /dragonfly/sys/vfs/hammer/hammer_object.c (revision 0cfebe3d)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.34 2008/02/24 19:48:45 dillon Exp $
35  */
36 
37 #include "hammer.h"
38 
39 static int hammer_mem_add(hammer_transaction_t trans, hammer_record_t record);
40 static int hammer_mem_lookup(hammer_cursor_t cursor, hammer_inode_t ip);
41 static int hammer_mem_first(hammer_cursor_t cursor, hammer_inode_t ip);
42 
43 /*
44  * Red-black tree support.
45  */
46 static int
47 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
48 {
49 	if (rec1->rec.base.base.rec_type < rec2->rec.base.base.rec_type)
50 		return(-1);
51 	if (rec1->rec.base.base.rec_type > rec2->rec.base.base.rec_type)
52 		return(1);
53 
54 	if (rec1->rec.base.base.key < rec2->rec.base.base.key)
55 		return(-1);
56 	if (rec1->rec.base.base.key > rec2->rec.base.base.key)
57 		return(1);
58 
59 	if (rec1->rec.base.base.create_tid == 0) {
60 		if (rec2->rec.base.base.create_tid == 0)
61 			return(0);
62 		return(1);
63 	}
64 	if (rec2->rec.base.base.create_tid == 0)
65 		return(-1);
66 
67 	if (rec1->rec.base.base.create_tid < rec2->rec.base.base.create_tid)
68 		return(-1);
69 	if (rec1->rec.base.base.create_tid > rec2->rec.base.base.create_tid)
70 		return(1);
71         return(0);
72 }
73 
74 static int
75 hammer_rec_compare(hammer_base_elm_t info, hammer_record_t rec)
76 {
77 	if (info->rec_type < rec->rec.base.base.rec_type)
78 		return(-3);
79 	if (info->rec_type > rec->rec.base.base.rec_type)
80 		return(3);
81 
82         if (info->key < rec->rec.base.base.key)
83                 return(-2);
84         if (info->key > rec->rec.base.base.key)
85                 return(2);
86 
87 	if (info->create_tid == 0) {
88 		if (rec->rec.base.base.create_tid == 0)
89 			return(0);
90 		return(1);
91 	}
92 	if (rec->rec.base.base.create_tid == 0)
93 		return(-1);
94 	if (info->create_tid < rec->rec.base.base.create_tid)
95 		return(-1);
96 	if (info->create_tid > rec->rec.base.base.create_tid)
97 		return(1);
98         return(0);
99 }
100 
101 /*
102  * RB_SCAN comparison code for hammer_mem_first().  The argument order
103  * is reversed so the comparison result has to be negated.  key_beg and
104  * key_end are both range-inclusive.
105  *
106  * The creation timestamp can cause hammer_rec_compare() to return -1 or +1.
107  * These do not stop the scan.
108  *
109  * Localized deletions are not cached in-memory.
110  */
111 static
112 int
113 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
114 {
115 	hammer_cursor_t cursor = data;
116 	int r;
117 
118 	r = hammer_rec_compare(&cursor->key_beg, rec);
119 	if (r > 1)
120 		return(-1);
121 	r = hammer_rec_compare(&cursor->key_end, rec);
122 	if (r < -1)
123 		return(1);
124 	return(0);
125 }
126 
127 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
128 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
129 		    hammer_rec_compare, hammer_base_elm_t);
130 
131 /*
132  * Allocate a record for the caller to finish filling in.  The record is
133  * returned referenced.
134  */
135 hammer_record_t
136 hammer_alloc_mem_record(hammer_inode_t ip)
137 {
138 	hammer_record_t record;
139 
140 	++hammer_count_records;
141 	record = kmalloc(sizeof(*record), M_HAMMER, M_WAITOK|M_ZERO);
142 	record->ip = ip;
143 	record->rec.base.base.btype = HAMMER_BTREE_TYPE_RECORD;
144 	hammer_ref(&record->lock);
145 	return (record);
146 }
147 
148 /*
149  * Release a memory record.  Records marked for deletion are immediately
150  * removed from the RB-Tree but otherwise left intact until the last ref
151  * goes away.
152  */
153 void
154 hammer_rel_mem_record(struct hammer_record *record)
155 {
156 	hammer_unref(&record->lock);
157 
158 	if (record->flags & HAMMER_RECF_DELETED) {
159 		if (record->flags & HAMMER_RECF_ONRBTREE) {
160 			RB_REMOVE(hammer_rec_rb_tree, &record->ip->rec_tree,
161 				  record);
162 			record->flags &= ~HAMMER_RECF_ONRBTREE;
163 		}
164 		if (record->lock.refs == 0) {
165 			if (record->flags & HAMMER_RECF_ALLOCDATA) {
166 				--hammer_count_record_datas;
167 				kfree(record->data, M_HAMMER);
168 				record->flags &= ~HAMMER_RECF_ALLOCDATA;
169 			}
170 			record->data = NULL;
171 			--hammer_count_records;
172 			kfree(record, M_HAMMER);
173 			return;
174 		}
175 	}
176 
177 	/*
178 	 * If someone wanted the record wake them up.
179 	 */
180 	if (record->flags & HAMMER_RECF_WANTED) {
181 		record->flags &= ~HAMMER_RECF_WANTED;
182 		wakeup(record);
183 	}
184 }
185 
186 /*
187  * Lookup an in-memory record given the key specified in the cursor.  Works
188  * just like hammer_btree_lookup() but operates on an inode's in-memory
189  * record list.
190  *
191  * The lookup must fail if the record is marked for deferred deletion.
192  */
193 static
194 int
195 hammer_mem_lookup(hammer_cursor_t cursor, hammer_inode_t ip)
196 {
197 	int error;
198 
199 	if (cursor->iprec) {
200 		hammer_rel_mem_record(cursor->iprec);
201 		cursor->iprec = NULL;
202 	}
203 	if (cursor->ip) {
204 		hammer_rec_rb_tree_scan_info_done(&cursor->scan,
205 						  &cursor->ip->rec_tree);
206 	}
207 	cursor->ip = ip;
208 	hammer_rec_rb_tree_scan_info_link(&cursor->scan, &ip->rec_tree);
209 	cursor->scan.node = NULL;
210 	cursor->iprec = hammer_rec_rb_tree_RB_LOOKUP_INFO(
211 				&ip->rec_tree, &cursor->key_beg);
212 	if (cursor->iprec == NULL) {
213 		error = ENOENT;
214 	} else {
215 		hammer_ref(&cursor->iprec->lock);
216 		error = 0;
217 	}
218 	return(error);
219 }
220 
221 /*
222  * hammer_mem_first() - locate the first in-memory record matching the
223  * cursor.
224  *
225  * The RB_SCAN function we use is designed as a callback.  We terminate it
226  * (return -1) as soon as we get a match.
227  */
228 static
229 int
230 hammer_rec_scan_callback(hammer_record_t rec, void *data)
231 {
232 	hammer_cursor_t cursor = data;
233 
234 	/*
235 	 * We terminate on success, so this should be NULL on entry.
236 	 */
237 	KKASSERT(cursor->iprec == NULL);
238 
239 	/*
240 	 * Skip if the record was marked deleted
241 	 */
242 	if (rec->flags & HAMMER_RECF_DELETED)
243 		return(0);
244 
245 	/*
246 	 * Skip if not visible due to our as-of TID
247 	 */
248         if (cursor->flags & HAMMER_CURSOR_ASOF) {
249                 if (cursor->asof < rec->rec.base.base.create_tid)
250                         return(0);
251                 if (rec->rec.base.base.delete_tid &&
252 		    cursor->asof >= rec->rec.base.base.delete_tid) {
253                         return(0);
254 		}
255         }
256 
257 	/*
258 	 * Block if currently being synchronized to disk, otherwise we
259 	 * may get a duplicate.  Wakeup the syncer if it's stuck on
260 	 * the record.
261 	 */
262 	hammer_ref(&rec->lock);
263 	++rec->blocked;
264 	while (rec->flags & HAMMER_RECF_SYNCING) {
265 		rec->flags |= HAMMER_RECF_WANTED;
266 		tsleep(rec, 0, "hmrrc2", 0);
267 	}
268 	--rec->blocked;
269 
270 	/*
271 	 * The record may have been deleted while we were blocked.
272 	 */
273 	if (rec->flags & HAMMER_RECF_DELETED) {
274 		hammer_rel_mem_record(cursor->iprec);
275 		return(0);
276 	}
277 
278 	/*
279 	 * Set the matching record and stop the scan.
280 	 */
281 	cursor->iprec = rec;
282 	return(-1);
283 }
284 
285 static
286 int
287 hammer_mem_first(hammer_cursor_t cursor, hammer_inode_t ip)
288 {
289 	if (cursor->iprec) {
290 		hammer_rel_mem_record(cursor->iprec);
291 		cursor->iprec = NULL;
292 	}
293 	if (cursor->ip) {
294 		hammer_rec_rb_tree_scan_info_done(&cursor->scan,
295 						  &cursor->ip->rec_tree);
296 	}
297 	cursor->ip = ip;
298 	hammer_rec_rb_tree_scan_info_link(&cursor->scan, &ip->rec_tree);
299 
300 	cursor->scan.node = NULL;
301 	hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
302 				   hammer_rec_scan_callback, cursor);
303 
304 	/*
305 	 * Adjust scan.node and keep it linked into the RB-tree so we can
306 	 * hold the cursor through third party modifications of the RB-tree.
307 	 */
308 	if (cursor->iprec) {
309 		cursor->scan.node = hammer_rec_rb_tree_RB_NEXT(cursor->iprec);
310 		return(0);
311 	}
312 	return(ENOENT);
313 }
314 
315 void
316 hammer_mem_done(hammer_cursor_t cursor)
317 {
318 	if (cursor->ip) {
319 		hammer_rec_rb_tree_scan_info_done(&cursor->scan,
320 						  &cursor->ip->rec_tree);
321 		cursor->ip = NULL;
322 	}
323         if (cursor->iprec) {
324 		hammer_rel_mem_record(cursor->iprec);
325 		cursor->iprec = NULL;
326 	}
327 }
328 
329 /************************************************************************
330  *		     HAMMER IN-MEMORY RECORD FUNCTIONS			*
331  ************************************************************************
332  *
333  * These functions manipulate in-memory records.  Such records typically
334  * exist prior to being committed to disk or indexed via the on-disk B-Tree.
335  */
336 
337 /*
338  * Add a directory entry (dip,ncp) which references inode (ip).
339  *
340  * Note that the low 32 bits of the namekey are set temporarily to create
341  * a unique in-memory record, and may be modified a second time when the
342  * record is synchronized to disk.  In particular, the low 32 bits cannot be
343  * all 0's when synching to disk, which is not handled here.
344  */
345 int
346 hammer_ip_add_directory(struct hammer_transaction *trans,
347 		     struct hammer_inode *dip, struct namecache *ncp,
348 		     struct hammer_inode *ip)
349 {
350 	hammer_record_t record;
351 	int error;
352 	int bytes;
353 
354 	record = hammer_alloc_mem_record(dip);
355 
356 	bytes = ncp->nc_nlen;	/* NOTE: terminating \0 is NOT included */
357 	if (++trans->hmp->namekey_iterator == 0)
358 		++trans->hmp->namekey_iterator;
359 
360 	record->rec.entry.base.base.obj_id = dip->obj_id;
361 	record->rec.entry.base.base.key =
362 		hammer_directory_namekey(ncp->nc_name, bytes);
363 	record->rec.entry.base.base.key += trans->hmp->namekey_iterator;
364 	record->rec.entry.base.base.create_tid = trans->tid;
365 	record->rec.entry.base.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
366 	record->rec.entry.base.base.obj_type = ip->ino_rec.base.base.obj_type;
367 	record->rec.entry.obj_id = ip->obj_id;
368 	record->data = (void *)ncp->nc_name;
369 	record->rec.entry.base.data_len = bytes;
370 	++ip->ino_rec.ino_nlinks;
371 	hammer_modify_inode(trans, ip, HAMMER_INODE_RDIRTY);
372 	/* NOTE: copies record->data */
373 	error = hammer_mem_add(trans, record);
374 	return(error);
375 }
376 
377 /*
378  * Delete the directory entry and update the inode link count.  The
379  * cursor must be seeked to the directory entry record being deleted.
380  *
381  * NOTE: HAMMER_CURSOR_DELETE may not have been set.  XXX remove flag.
382  *
383  * This function can return EDEADLK requiring the caller to terminate
384  * the cursor and retry.
385  */
386 int
387 hammer_ip_del_directory(struct hammer_transaction *trans,
388 		     hammer_cursor_t cursor, struct hammer_inode *dip,
389 		     struct hammer_inode *ip)
390 {
391 	int error;
392 
393 	error = hammer_ip_delete_record(cursor, trans->tid);
394 
395 	/*
396 	 * One less link.  The file may still be open in the OS even after
397 	 * all links have gone away so we only try to sync if the OS has
398 	 * no references and nlinks falls to 0.
399 	 *
400 	 * We have to terminate the cursor before syncing the inode to
401 	 * avoid deadlocking against ourselves.
402 	 */
403 	if (error == 0) {
404 		--ip->ino_rec.ino_nlinks;
405 		hammer_modify_inode(trans, ip, HAMMER_INODE_RDIRTY);
406 		if (ip->ino_rec.ino_nlinks == 0 &&
407 		    (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
408 			hammer_done_cursor(cursor);
409 			hammer_sync_inode(ip, MNT_NOWAIT, 1);
410 		}
411 
412 	}
413 	return(error);
414 }
415 
416 /*
417  * Add a record to an inode.
418  *
419  * The caller must allocate the record with hammer_alloc_mem_record(ip) and
420  * initialize the following additional fields:
421  *
422  * record->rec.entry.base.base.key
423  * record->rec.entry.base.base.rec_type
424  * record->rec.entry.base.base.data_len
425  * record->data		(a copy will be kmalloc'd if it cannot be embedded)
426  */
427 int
428 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
429 {
430 	hammer_inode_t ip = record->ip;
431 	int error;
432 
433 	record->rec.base.base.obj_id = ip->obj_id;
434 	record->rec.base.base.create_tid = trans->tid;
435 	record->rec.base.base.obj_type = ip->ino_rec.base.base.obj_type;
436 
437 	hammer_modify_inode(trans, ip, HAMMER_INODE_RDIRTY);
438 	/* NOTE: copies record->data */
439 	error = hammer_mem_add(trans, record);
440 	return(error);
441 }
442 
443 /*
444  * Sync data from a buffer cache buffer (typically) to the filesystem.  This
445  * is called via the strategy called from a cached data source.  This code
446  * is responsible for actually writing a data record out to the disk.
447  *
448  * This can only occur non-historically (i.e. 'current' data only).
449  *
450  * The file offset must be HAMMER_BUFSIZE aligned but the data length
451  * can be truncated.  The record (currently) always represents a BUFSIZE
452  * swath of space whether the data is truncated or not.
453  */
454 int
455 hammer_ip_sync_data(hammer_transaction_t trans, hammer_inode_t ip,
456 		       int64_t offset, void *data, int bytes)
457 {
458 	struct hammer_cursor cursor;
459 	hammer_record_ondisk_t rec;
460 	union hammer_btree_elm elm;
461 	hammer_off_t rec_offset;
462 	void *bdata;
463 	int error;
464 
465 	KKASSERT((offset & HAMMER_BUFMASK) == 0);
466 retry:
467 	error = hammer_init_cursor_hmp(&cursor, &ip->cache[0], ip->hmp);
468 	if (error)
469 		return(error);
470 	cursor.key_beg.obj_id = ip->obj_id;
471 	cursor.key_beg.key = offset + bytes;
472 	cursor.key_beg.create_tid = trans->tid;
473 	cursor.key_beg.delete_tid = 0;
474 	cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
475 	cursor.asof = trans->tid;
476 	cursor.flags |= HAMMER_CURSOR_INSERT;
477 
478 	/*
479 	 * Issue a lookup to position the cursor.
480 	 */
481 	error = hammer_btree_lookup(&cursor);
482 	if (error == 0) {
483 		kprintf("hammer_ip_sync_data: duplicate data at "
484 			"(%lld,%d) tid %016llx\n",
485 			offset, bytes, trans->tid);
486 		hammer_print_btree_elm(&cursor.node->ondisk->elms[cursor.index],
487 				       HAMMER_BTREE_TYPE_LEAF, cursor.index);
488 		panic("Duplicate data");
489 		error = EIO;
490 	}
491 	if (error != ENOENT)
492 		goto done;
493 
494 	/*
495 	 * Allocate record and data space.  HAMMER_RECTYPE_DATA records
496 	 * can cross buffer boundaries so we may have to split our bcopy.
497 	 */
498 	rec = hammer_alloc_record(ip->hmp, &rec_offset, HAMMER_RECTYPE_DATA,
499 				  &cursor.record_buffer,
500 				  bytes, &bdata,
501 				  &cursor.data_buffer, &error);
502 	if (rec == NULL)
503 		goto done;
504 	if (hammer_debug_general & 0x1000)
505 		kprintf("OOB RECOR2 DATA REC %016llx DATA %016llx LEN=%d\n", rec_offset, rec->base.data_off, rec->base.data_len);
506 
507 	/*
508 	 * Fill everything in and insert our B-Tree node.
509 	 *
510 	 * NOTE: hammer_alloc_record() has already marked the related
511 	 * buffers as modified.  If we do it again we will generate
512 	 * unnecessary undo elements.
513 	 */
514 	rec->base.base.btype = HAMMER_BTREE_TYPE_RECORD;
515 	rec->base.base.obj_id = ip->obj_id;
516 	rec->base.base.key = offset + bytes;
517 	rec->base.base.create_tid = trans->tid;
518 	rec->base.base.delete_tid = 0;
519 	rec->base.base.rec_type = HAMMER_RECTYPE_DATA;
520 	rec->base.data_crc = crc32(data, bytes);
521 	KKASSERT(rec->base.data_len == bytes);
522 
523 	bcopy(data, bdata, bytes);
524 
525 	elm.leaf.base = rec->base.base;
526 	elm.leaf.rec_offset = rec_offset;
527 	elm.leaf.data_offset = rec->base.data_off;
528 	elm.leaf.data_len = bytes;
529 	elm.leaf.data_crc = rec->base.data_crc;
530 
531 	/*
532 	 * Data records can wind up on-disk before the inode itself is
533 	 * on-disk.  One must assume data records may be on-disk if either
534 	 * HAMMER_INODE_DONDISK or HAMMER_INODE_ONDISK is set
535 	 */
536 	ip->flags |= HAMMER_INODE_DONDISK;
537 
538 	error = hammer_btree_insert(&cursor, &elm);
539 	if (error == 0)
540 		goto done;
541 
542 	hammer_blockmap_free(ip->hmp, rec_offset, HAMMER_RECORD_SIZE);
543 done:
544 	hammer_done_cursor(&cursor);
545 	if (error == EDEADLK)
546 		goto retry;
547 	return(error);
548 }
549 
550 /*
551  * Sync an in-memory record to the disk.  This is typically called via fsync
552  * from a cached record source.  This code is responsible for actually
553  * writing a record out to the disk.
554  */
555 int
556 hammer_ip_sync_record(hammer_record_t record)
557 {
558 	struct hammer_cursor cursor;
559 	hammer_record_ondisk_t rec;
560 	hammer_mount_t hmp;
561 	union hammer_btree_elm elm;
562 	hammer_off_t rec_offset;
563 	void *bdata;
564 	int error;
565 
566 	hmp = record->ip->hmp;
567 retry:
568 	/*
569 	 * If the record has been deleted or is being synchronized, stop.
570 	 * Interlock with the syncing flag.
571 	 */
572 	if (record->flags & (HAMMER_RECF_DELETED | HAMMER_RECF_SYNCING))
573 		return(0);
574 	record->flags |= HAMMER_RECF_SYNCING;
575 
576 	/*
577 	 * If someone other then us is referencing the record and not
578 	 * blocking waiting for us, we have to wait until they finish.
579 	 *
580 	 * It is possible the record got destroyed while we were blocked.
581 	 */
582 	if (record->lock.refs > record->blocked + 1) {
583 		while (record->lock.refs > record->blocked + 1) {
584 			record->flags |= HAMMER_RECF_WANTED;
585 			tsleep(record, 0, "hmrrc1", 0);
586 		}
587 		if (record->flags & HAMMER_RECF_DELETED)
588 			return(0);
589 	}
590 
591 	/*
592 	 * Get a cursor
593 	 */
594 	error = hammer_init_cursor_hmp(&cursor, &record->ip->cache[0], hmp);
595 	if (error)
596 		return(error);
597 	cursor.key_beg = record->rec.base.base;
598 	cursor.flags |= HAMMER_CURSOR_INSERT;
599 
600 	/*
601 	 * Issue a lookup to position the cursor and locate the cluster.  The
602 	 * target key should not exist.  If we are creating a directory entry
603 	 * we may have to iterate the low 32 bits of the key to find an unused
604 	 * key.
605 	 */
606 	for (;;) {
607 		error = hammer_btree_lookup(&cursor);
608 		if (error)
609 			break;
610 		if (record->rec.base.base.rec_type != HAMMER_RECTYPE_DIRENTRY) {
611 			kprintf("hammer_ip_sync_record: duplicate rec "
612 				"at (%016llx)\n", record->rec.base.base.key);
613 			Debugger("duplicate record1");
614 			error = EIO;
615 			break;
616 		}
617 		if (++hmp->namekey_iterator == 0)
618 			++hmp->namekey_iterator;
619 		record->rec.base.base.key &= ~(0xFFFFFFFFLL);
620 		record->rec.base.base.key |= hmp->namekey_iterator;
621 		cursor.key_beg.key = record->rec.base.base.key;
622 	}
623 	if (error != ENOENT)
624 		goto done;
625 
626 	/*
627 	 * Mark the record as undergoing synchronization.  Our cursor is
628 	 * holding a locked B-Tree node for the insertion which interlocks
629 	 * anyone trying to access this record.
630 	 *
631 	 * XXX There is still a race present related to iterations.  An
632 	 * iteration may process the record, a sync may occur, and then
633 	 * later process the B-Tree element for the same record.
634 	 *
635 	 * We do not try to synchronize a deleted record.
636 	 */
637 	if (record->flags & HAMMER_RECF_DELETED) {
638 		error = 0;
639 		goto done;
640 	}
641 
642 	/*
643 	 * Allocate the record and data.  The result buffers will be
644 	 * marked as being modified and further calls to
645 	 * hammer_modify_buffer() will result in unneeded UNDO records.
646 	 *
647 	 * Support zero-fill records (data == NULL and data_len != 0)
648 	 */
649 	if (record->data == NULL) {
650 		rec = hammer_alloc_record(hmp, &rec_offset,
651 					  record->rec.base.base.rec_type,
652 					  &cursor.record_buffer,
653 					  0, &bdata,
654 					  NULL, &error);
655 		if (hammer_debug_general & 0x1000)
656 			kprintf("NULL RECORD DATA\n");
657 	} else if (record->flags & HAMMER_RECF_INBAND) {
658 		rec = hammer_alloc_record(hmp, &rec_offset,
659 					  record->rec.base.base.rec_type,
660 					  &cursor.record_buffer,
661 					  record->rec.base.data_len, &bdata,
662 					  NULL, &error);
663 		if (hammer_debug_general & 0x1000)
664 			kprintf("INBAND RECORD DATA %016llx DATA %016llx LEN=%d\n", rec_offset, rec->base.data_off, record->rec.base.data_len);
665 	} else {
666 		rec = hammer_alloc_record(hmp, &rec_offset,
667 					  record->rec.base.base.rec_type,
668 					  &cursor.record_buffer,
669 					  record->rec.base.data_len, &bdata,
670 					  &cursor.data_buffer, &error);
671 		if (hammer_debug_general & 0x1000)
672 			kprintf("OOB RECORD DATA REC %016llx DATA %016llx LEN=%d\n", rec_offset, rec->base.data_off, record->rec.base.data_len);
673 	}
674 
675 	if (rec == NULL)
676 		goto done;
677 
678 	/*
679 	 * Fill in the remaining fields and insert our B-Tree node.
680 	 */
681 	rec->base.base = record->rec.base.base;
682 	bcopy(&record->rec.base + 1, &rec->base + 1,
683 	      HAMMER_RECORD_SIZE - sizeof(record->rec.base));
684 
685 	/*
686 	 * Copy the data and deal with zero-fill support.
687 	 */
688 	if (record->data) {
689 		rec->base.data_crc = crc32(record->data, rec->base.data_len);
690 		bcopy(record->data, bdata, rec->base.data_len);
691 	} else {
692 		rec->base.data_len = record->rec.base.data_len;
693 	}
694 
695 	elm.leaf.base = record->rec.base.base;
696 	elm.leaf.rec_offset = rec_offset;
697 	elm.leaf.data_offset = rec->base.data_off;
698 	elm.leaf.data_len = rec->base.data_len;
699 	elm.leaf.data_crc = rec->base.data_crc;
700 
701 	error = hammer_btree_insert(&cursor, &elm);
702 
703 	/*
704 	 * Clean up on success, or fall through on error.
705 	 */
706 	if (error == 0) {
707 		record->flags |= HAMMER_RECF_DELETED;
708 		goto done;
709 	}
710 
711 	/*
712 	 * Try to unwind the allocation
713 	 */
714 	hammer_blockmap_free(hmp, rec_offset, HAMMER_RECORD_SIZE);
715 done:
716 	record->flags &= ~HAMMER_RECF_SYNCING;
717 	hammer_done_cursor(&cursor);
718 	if (error == EDEADLK)
719 		goto retry;
720 	return(error);
721 }
722 
723 /*
724  * Add the record to the inode's rec_tree.  The low 32 bits of a directory
725  * entry's key is used to deal with hash collisions in the upper 32 bits.
726  * A unique 64 bit key is generated in-memory and may be regenerated a
727  * second time when the directory record is flushed to the on-disk B-Tree.
728  *
729  * A referenced record is passed to this function.  This function
730  * eats the reference.  If an error occurs the record will be deleted.
731  *
732  * A copy of the temporary record->data pointer provided by the caller
733  * will be made.
734  */
735 static
736 int
737 hammer_mem_add(struct hammer_transaction *trans, hammer_record_t record)
738 {
739 	void *data;
740 	int bytes;
741 	int reclen;
742 
743 	/*
744 	 * Make a private copy of record->data
745 	 */
746 	if (record->data) {
747 		/*
748 		 * Try to embed the data in extra space in the record
749 		 * union, otherwise allocate a copy.
750 		 */
751 		bytes = record->rec.base.data_len;
752 		switch(record->rec.base.base.rec_type) {
753 		case HAMMER_RECTYPE_DIRENTRY:
754 			reclen = offsetof(struct hammer_entry_record, name[0]);
755 			break;
756 		case HAMMER_RECTYPE_DATA:
757 			reclen = offsetof(struct hammer_data_record, data[0]);
758 			break;
759 		default:
760 			reclen = sizeof(record->rec);
761 			break;
762 		}
763 		if (reclen + bytes <= HAMMER_RECORD_SIZE) {
764 			bcopy(record->data, (char *)&record->rec + reclen,
765 			      bytes);
766 			record->data = (void *)((char *)&record->rec + reclen);
767 			record->flags |= HAMMER_RECF_INBAND;
768 		} else {
769 			++hammer_count_record_datas;
770 			data = kmalloc(bytes, M_HAMMER, M_WAITOK);
771 			record->flags |= HAMMER_RECF_ALLOCDATA;
772 			bcopy(record->data, data, bytes);
773 			record->data = data;
774 		}
775 	}
776 
777 	/*
778 	 * Insert into the RB tree, find an unused iterator if this is
779 	 * a directory entry.
780 	 */
781 	while (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
782 		if (record->rec.base.base.rec_type != HAMMER_RECTYPE_DIRENTRY){
783 			record->flags |= HAMMER_RECF_DELETED;
784 			hammer_rel_mem_record(record);
785 			return (EEXIST);
786 		}
787 		if (++trans->hmp->namekey_iterator == 0)
788 			++trans->hmp->namekey_iterator;
789 		record->rec.base.base.key &= ~(0xFFFFFFFFLL);
790 		record->rec.base.base.key |= trans->hmp->namekey_iterator;
791 	}
792 	record->flags |= HAMMER_RECF_ONRBTREE;
793 	hammer_modify_inode(trans, record->ip, HAMMER_INODE_XDIRTY);
794 	hammer_rel_mem_record(record);
795 	return(0);
796 }
797 
798 /************************************************************************
799  *		     HAMMER INODE MERGED-RECORD FUNCTIONS		*
800  ************************************************************************
801  *
802  * These functions augment the B-Tree scanning functions in hammer_btree.c
803  * by merging in-memory records with on-disk records.
804  */
805 
806 /*
807  * Locate a particular record either in-memory or on-disk.
808  *
809  * NOTE: This is basically a standalone routine, hammer_ip_next() may
810  * NOT be called to iterate results.
811  */
812 int
813 hammer_ip_lookup(hammer_cursor_t cursor, struct hammer_inode *ip)
814 {
815 	int error;
816 
817 	/*
818 	 * If the element is in-memory return it without searching the
819 	 * on-disk B-Tree
820 	 */
821 	error = hammer_mem_lookup(cursor, ip);
822 	if (error == 0) {
823 		cursor->record = &cursor->iprec->rec;
824 		return(error);
825 	}
826 	if (error != ENOENT)
827 		return(error);
828 
829 	/*
830 	 * If the inode has on-disk components search the on-disk B-Tree.
831 	 */
832 	if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
833 		return(error);
834 	error = hammer_btree_lookup(cursor);
835 	if (error == 0)
836 		error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_RECORD);
837 	return(error);
838 }
839 
840 /*
841  * Locate the first record within the cursor's key_beg/key_end range,
842  * restricted to a particular inode.  0 is returned on success, ENOENT
843  * if no records matched the requested range, or some other error.
844  *
845  * When 0 is returned hammer_ip_next() may be used to iterate additional
846  * records within the requested range.
847  *
848  * This function can return EDEADLK, requiring the caller to terminate
849  * the cursor and try again.
850  */
851 int
852 hammer_ip_first(hammer_cursor_t cursor, struct hammer_inode *ip)
853 {
854 	int error;
855 
856 	/*
857 	 * Clean up fields and setup for merged scan
858 	 */
859 	cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
860 	cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
861 	cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
862 	if (cursor->iprec) {
863 		hammer_rel_mem_record(cursor->iprec);
864 		cursor->iprec = NULL;
865 	}
866 
867 	/*
868 	 * Search the on-disk B-Tree.  hammer_btree_lookup() only does an
869 	 * exact lookup so if we get ENOENT we have to call the iterate
870 	 * function to validate the first record after the begin key.
871 	 *
872 	 * The ATEDISK flag is used by hammer_btree_iterate to determine
873 	 * whether it must index forwards or not.  It is also used here
874 	 * to select the next record from in-memory or on-disk.
875 	 *
876 	 * EDEADLK can only occur if the lookup hit an empty internal
877 	 * element and couldn't delete it.  Since this could only occur
878 	 * in-range, we can just iterate from the failure point.
879 	 */
880 	if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
881 		error = hammer_btree_lookup(cursor);
882 		if (error == ENOENT || error == EDEADLK) {
883 			cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
884 			error = hammer_btree_iterate(cursor);
885 		}
886 		if (error && error != ENOENT)
887 			return(error);
888 		if (error == 0) {
889 			cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
890 			cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
891 		} else {
892 			cursor->flags |= HAMMER_CURSOR_ATEDISK;
893 		}
894 	}
895 
896 	/*
897 	 * Search the in-memory record list (Red-Black tree).  Unlike the
898 	 * B-Tree search, mem_first checks for records in the range.
899 	 */
900 	error = hammer_mem_first(cursor, ip);
901 	if (error && error != ENOENT)
902 		return(error);
903 	if (error == 0) {
904 		cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
905 		cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
906 	}
907 
908 	/*
909 	 * This will return the first matching record.
910 	 */
911 	return(hammer_ip_next(cursor));
912 }
913 
914 /*
915  * Retrieve the next record in a merged iteration within the bounds of the
916  * cursor.  This call may be made multiple times after the cursor has been
917  * initially searched with hammer_ip_first().
918  *
919  * 0 is returned on success, ENOENT if no further records match the
920  * requested range, or some other error code is returned.
921  */
922 int
923 hammer_ip_next(hammer_cursor_t cursor)
924 {
925 	hammer_btree_elm_t elm;
926 	hammer_record_t rec;
927 	int error;
928 	int r;
929 
930 	/*
931 	 * Load the current on-disk and in-memory record.  If we ate any
932 	 * records we have to get the next one.
933 	 *
934 	 * If we deleted the last on-disk record we had scanned ATEDISK will
935 	 * be clear and DELBTREE will be set, forcing a call to iterate. The
936 	 * fact that ATEDISK is clear causes iterate to re-test the 'current'
937 	 * element.  If ATEDISK is set, iterate will skip the 'current'
938 	 * element.
939 	 *
940 	 * Get the next on-disk record
941 	 */
942 	if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
943 		if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
944 			error = hammer_btree_iterate(cursor);
945 			cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
946 			if (error == 0)
947 				cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
948 			else
949 				cursor->flags |= HAMMER_CURSOR_DISKEOF |
950 						 HAMMER_CURSOR_ATEDISK;
951 		}
952 	}
953 
954 	/*
955 	 * Get the next in-memory record.  The record can be ripped out
956 	 * of the RB tree so we maintain a scan_info structure to track
957 	 * the next node.
958 	 *
959 	 * hammer_rec_scan_cmp:  Is the record still in our general range,
960 	 *			 (non-inclusive of snapshot exclusions)?
961 	 * hammer_rec_scan_callback: Is the record in our snapshot?
962 	 */
963 	if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
964 		if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
965 			if (cursor->iprec) {
966 				hammer_rel_mem_record(cursor->iprec);
967 				cursor->iprec = NULL;
968 			}
969 			rec = cursor->scan.node;	/* next node */
970 			while (rec) {
971 				if (hammer_rec_scan_cmp(rec, cursor) != 0)
972 					break;
973 				if (hammer_rec_scan_callback(rec, cursor) != 0)
974 					break;
975 				rec = hammer_rec_rb_tree_RB_NEXT(rec);
976 			}
977 			if (cursor->iprec) {
978 				KKASSERT(cursor->iprec == rec);
979 				cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
980 				cursor->scan.node =
981 					hammer_rec_rb_tree_RB_NEXT(rec);
982 			} else {
983 				cursor->flags |= HAMMER_CURSOR_MEMEOF;
984 			}
985 		}
986 	}
987 
988 	/*
989 	 * Extract either the disk or memory record depending on their
990 	 * relative position.
991 	 */
992 	error = 0;
993 	switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
994 	case 0:
995 		/*
996 		 * Both entries valid
997 		 */
998 		elm = &cursor->node->ondisk->elms[cursor->index];
999 		r = hammer_btree_cmp(&elm->base, &cursor->iprec->rec.base.base);
1000 		if (r < 0) {
1001 			error = hammer_btree_extract(cursor,
1002 						     HAMMER_CURSOR_GET_RECORD);
1003 			cursor->flags |= HAMMER_CURSOR_ATEDISK;
1004 			break;
1005 		}
1006 		/* fall through to the memory entry */
1007 	case HAMMER_CURSOR_ATEDISK:
1008 		/*
1009 		 * Only the memory entry is valid
1010 		 */
1011 		cursor->record = &cursor->iprec->rec;
1012 		cursor->flags |= HAMMER_CURSOR_ATEMEM;
1013 		break;
1014 	case HAMMER_CURSOR_ATEMEM:
1015 		/*
1016 		 * Only the disk entry is valid
1017 		 */
1018 		error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_RECORD);
1019 		cursor->flags |= HAMMER_CURSOR_ATEDISK;
1020 		break;
1021 	default:
1022 		/*
1023 		 * Neither entry is valid
1024 		 *
1025 		 * XXX error not set properly
1026 		 */
1027 		cursor->record = NULL;
1028 		error = ENOENT;
1029 		break;
1030 	}
1031 	return(error);
1032 }
1033 
1034 /*
1035  * Resolve the cursor->data pointer for the current cursor position in
1036  * a merged iteration.
1037  */
1038 int
1039 hammer_ip_resolve_data(hammer_cursor_t cursor)
1040 {
1041 	int error;
1042 
1043 	if (cursor->iprec && cursor->record == &cursor->iprec->rec) {
1044 		cursor->data = cursor->iprec->data;
1045 		error = 0;
1046 	} else {
1047 		error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1048 	}
1049 	return(error);
1050 }
1051 
1052 int
1053 hammer_ip_resolve_record_and_data(hammer_cursor_t cursor)
1054 {
1055 	int error;
1056 
1057 	if (cursor->iprec && cursor->record == &cursor->iprec->rec) {
1058 		cursor->data = cursor->iprec->data;
1059 		error = 0;
1060 	} else {
1061 		error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
1062 						     HAMMER_CURSOR_GET_RECORD);
1063 	}
1064 	return(error);
1065 }
1066 
1067 /*
1068  * Delete all records within the specified range for inode ip.
1069  *
1070  * NOTE: An unaligned range will cause new records to be added to cover
1071  * the edge cases. (XXX not implemented yet).
1072  *
1073  * NOTE: ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1074  *
1075  * NOTE: Record keys for regular file data have to be special-cased since
1076  * they indicate the end of the range (key = base + bytes).
1077  */
1078 int
1079 hammer_ip_delete_range(hammer_transaction_t trans, hammer_inode_t ip,
1080 		       int64_t ran_beg, int64_t ran_end)
1081 {
1082 	struct hammer_cursor cursor;
1083 	hammer_record_ondisk_t rec;
1084 	hammer_base_elm_t base;
1085 	int error;
1086 	int64_t off;
1087 
1088 retry:
1089 	hammer_init_cursor_hmp(&cursor, &ip->cache[0], ip->hmp);
1090 
1091 	cursor.key_beg.obj_id = ip->obj_id;
1092 	cursor.key_beg.create_tid = 0;
1093 	cursor.key_beg.delete_tid = 0;
1094 	cursor.key_beg.obj_type = 0;
1095 	cursor.asof = ip->obj_asof;
1096 	cursor.flags |= HAMMER_CURSOR_ASOF;
1097 
1098 	cursor.key_end = cursor.key_beg;
1099 	if (ip->ino_rec.base.base.obj_type == HAMMER_OBJTYPE_DBFILE) {
1100 		cursor.key_beg.key = ran_beg;
1101 		cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
1102 		cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
1103 		cursor.key_end.key = ran_end;
1104 	} else {
1105 		/*
1106 		 * The key in the B-Tree is (base+bytes), so the first possible
1107 		 * matching key is ran_beg + 1.
1108 		 */
1109 		int64_t tmp64;
1110 
1111 		cursor.key_beg.key = ran_beg + 1;
1112 		cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
1113 		cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
1114 
1115 		tmp64 = ran_end + MAXPHYS + 1;	/* work around GCC-4 bug */
1116 		if (tmp64 < ran_end)
1117 			cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1118 		else
1119 			cursor.key_end.key = ran_end + MAXPHYS + 1;
1120 	}
1121 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
1122 
1123 	error = hammer_ip_first(&cursor, ip);
1124 
1125 	/*
1126 	 * Iterate through matching records and mark them as deleted.
1127 	 */
1128 	while (error == 0) {
1129 		rec = cursor.record;
1130 		base = &rec->base.base;
1131 
1132 		KKASSERT(base->delete_tid == 0);
1133 
1134 		/*
1135 		 * There may be overlap cases for regular file data.  Also
1136 		 * remember the key for a regular file record is the offset
1137 		 * of the last byte of the record (base + len - 1), NOT the
1138 		 * base offset.
1139 		 */
1140 #if 0
1141 		kprintf("delete_range rec_type %02x\n", base->rec_type);
1142 #endif
1143 		if (base->rec_type == HAMMER_RECTYPE_DATA) {
1144 #if 0
1145 			kprintf("delete_range loop key %016llx\n",
1146 				base->key - rec->base.data_len);
1147 #endif
1148 			off = base->key - rec->base.data_len;
1149 			/*
1150 			 * Check the left edge case.  We currently do not
1151 			 * split existing records.
1152 			 */
1153 			if (off < ran_beg) {
1154 				panic("hammer left edge case %016llx %d\n",
1155 					base->key, rec->base.data_len);
1156 			}
1157 
1158 			/*
1159 			 * Check the right edge case.  Note that the
1160 			 * record can be completely out of bounds, which
1161 			 * terminates the search.
1162 			 *
1163 			 * base->key is exclusive of the right edge while
1164 			 * ran_end is inclusive of the right edge.  The
1165 			 * (key - data_len) left boundary is inclusive.
1166 			 *
1167 			 * XXX theory-check this test at some point, are
1168 			 * we missing a + 1 somewhere?  Note that ran_end
1169 			 * could overflow.
1170 			 */
1171 			if (base->key - 1 > ran_end) {
1172 				if (base->key - rec->base.data_len > ran_end)
1173 					break;
1174 				panic("hammer right edge case\n");
1175 			}
1176 		}
1177 
1178 		/*
1179 		 * Mark the record and B-Tree entry as deleted.  This will
1180 		 * also physically delete the B-Tree entry, record, and
1181 		 * data if the retention policy dictates.  The function
1182 		 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1183 		 * uses to perform a fixup.
1184 		 */
1185 		error = hammer_ip_delete_record(&cursor, trans->tid);
1186 		if (error)
1187 			break;
1188 		error = hammer_ip_next(&cursor);
1189 	}
1190 	hammer_done_cursor(&cursor);
1191 	if (error == EDEADLK)
1192 		goto retry;
1193 	if (error == ENOENT)
1194 		error = 0;
1195 	return(error);
1196 }
1197 
1198 /*
1199  * Delete all records associated with an inode except the inode record
1200  * itself.
1201  */
1202 int
1203 hammer_ip_delete_range_all(hammer_transaction_t trans, hammer_inode_t ip)
1204 {
1205 	struct hammer_cursor cursor;
1206 	hammer_record_ondisk_t rec;
1207 	hammer_base_elm_t base;
1208 	int error;
1209 
1210 retry:
1211 	hammer_init_cursor_hmp(&cursor, &ip->cache[0], ip->hmp);
1212 
1213 	cursor.key_beg.obj_id = ip->obj_id;
1214 	cursor.key_beg.create_tid = 0;
1215 	cursor.key_beg.delete_tid = 0;
1216 	cursor.key_beg.obj_type = 0;
1217 	cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1218 	cursor.key_beg.key = HAMMER_MIN_KEY;
1219 
1220 	cursor.key_end = cursor.key_beg;
1221 	cursor.key_end.rec_type = 0xFFFF;
1222 	cursor.key_end.key = HAMMER_MAX_KEY;
1223 
1224 	cursor.asof = ip->obj_asof;
1225 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1226 
1227 	error = hammer_ip_first(&cursor, ip);
1228 
1229 	/*
1230 	 * Iterate through matching records and mark them as deleted.
1231 	 */
1232 	while (error == 0) {
1233 		rec = cursor.record;
1234 		base = &rec->base.base;
1235 
1236 		KKASSERT(base->delete_tid == 0);
1237 
1238 		/*
1239 		 * Mark the record and B-Tree entry as deleted.  This will
1240 		 * also physically delete the B-Tree entry, record, and
1241 		 * data if the retention policy dictates.  The function
1242 		 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1243 		 * uses to perform a fixup.
1244 		 */
1245 		error = hammer_ip_delete_record(&cursor, trans->tid);
1246 		if (error)
1247 			break;
1248 		error = hammer_ip_next(&cursor);
1249 	}
1250 	hammer_done_cursor(&cursor);
1251 	if (error == EDEADLK)
1252 		goto retry;
1253 	if (error == ENOENT)
1254 		error = 0;
1255 	return(error);
1256 }
1257 
1258 /*
1259  * Delete the record at the current cursor.  On success the cursor will
1260  * be positioned appropriately for an iteration but may no longer be at
1261  * a leaf node.
1262  *
1263  * NOTE: This can return EDEADLK, requiring the caller to terminate the
1264  * cursor and retry.
1265  */
1266 int
1267 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_tid_t tid)
1268 {
1269 	hammer_btree_elm_t elm;
1270 	hammer_mount_t hmp;
1271 	int error;
1272 	int dodelete;
1273 
1274 	/*
1275 	 * In-memory (unsynchronized) records can simply be freed.
1276 	 */
1277 	if (cursor->record == &cursor->iprec->rec) {
1278 		cursor->iprec->flags |= HAMMER_RECF_DELETED;
1279 		return(0);
1280 	}
1281 
1282 	/*
1283 	 * On-disk records are marked as deleted by updating their delete_tid.
1284 	 * This does not effect their position in the B-Tree (which is based
1285 	 * on their create_tid).
1286 	 */
1287 	error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_RECORD);
1288 	elm = NULL;
1289 	hmp = cursor->node->hmp;
1290 
1291 	dodelete = 0;
1292 	if (error == 0) {
1293 		error = hammer_cursor_upgrade(cursor);
1294 		if (error == 0) {
1295 			hammer_modify_node(cursor->node);
1296 			elm = &cursor->node->ondisk->elms[cursor->index];
1297 			elm->leaf.base.delete_tid = tid;
1298 			hammer_modify_buffer(cursor->record_buffer, &cursor->record->base.base.delete_tid, sizeof(hammer_tid_t));
1299 			cursor->record->base.base.delete_tid = tid;
1300 		}
1301 	}
1302 
1303 	/*
1304 	 * If we were mounted with the nohistory option, we physically
1305 	 * delete the record.
1306 	 */
1307 	if (hmp->hflags & HMNT_NOHISTORY)
1308 		dodelete = 1;
1309 
1310 	if (error == 0 && dodelete) {
1311 		error = hammer_delete_at_cursor(cursor, NULL);
1312 		if (error) {
1313 			panic("hammer_ip_delete_record: unable to physically delete the record!\n");
1314 			error = 0;
1315 		}
1316 	}
1317 	return(error);
1318 }
1319 
1320 int
1321 hammer_delete_at_cursor(hammer_cursor_t cursor, int64_t *stat_bytes)
1322 {
1323 	hammer_btree_elm_t elm;
1324 	hammer_off_t rec_offset;
1325 	hammer_off_t data_offset;
1326 	int32_t data_len;
1327 	u_int8_t rec_type;
1328 	int error;
1329 
1330 	elm = &cursor->node->ondisk->elms[cursor->index];
1331 	KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
1332 
1333 	rec_offset = elm->leaf.rec_offset;
1334 	data_offset = elm->leaf.data_offset;
1335 	data_len = elm->leaf.data_len;
1336 	rec_type = elm->leaf.base.rec_type;
1337 
1338 	error = hammer_btree_delete(cursor);
1339 	if (error == 0) {
1340 		/*
1341 		 * This forces a fixup for the iteration because
1342 		 * the cursor is now either sitting at the 'next'
1343 		 * element or sitting at the end of a leaf.
1344 		 */
1345 		if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1346 			cursor->flags |= HAMMER_CURSOR_DELBTREE;
1347 			cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1348 		}
1349 	}
1350 	if (error == 0) {
1351 		hammer_blockmap_free(cursor->node->hmp, rec_offset,
1352 				     sizeof(union hammer_record_ondisk));
1353 	}
1354 	if (error == 0) {
1355 		switch(data_offset & HAMMER_OFF_ZONE_MASK) {
1356 		case HAMMER_ZONE_LARGE_DATA:
1357 		case HAMMER_ZONE_SMALL_DATA:
1358 			hammer_blockmap_free(cursor->node->hmp,
1359 					     data_offset, data_len);
1360 			break;
1361 		default:
1362 			break;
1363 		}
1364 	}
1365 #if 0
1366 	kprintf("hammer_delete_at_cursor: %d:%d:%08x %08x/%d "
1367 		"(%d remain in cluster)\n",
1368 		cluster->volume->vol_no, cluster->clu_no,
1369 		rec_offset, data_offset, data_len,
1370 		cluster->ondisk->stat_records);
1371 #endif
1372 	return (error);
1373 }
1374 
1375 /*
1376  * Determine whether a directory is empty or not.  Returns 0 if the directory
1377  * is empty, ENOTEMPTY if it isn't, plus other possible errors.
1378  */
1379 int
1380 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
1381 {
1382 	struct hammer_cursor cursor;
1383 	int error;
1384 
1385 	hammer_init_cursor_hmp(&cursor, &ip->cache[0], ip->hmp);
1386 
1387 	cursor.key_beg.obj_id = ip->obj_id;
1388 	cursor.key_beg.create_tid = 0;
1389 	cursor.key_beg.delete_tid = 0;
1390 	cursor.key_beg.obj_type = 0;
1391 	cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1392 	cursor.key_beg.key = HAMMER_MIN_KEY;
1393 
1394 	cursor.key_end = cursor.key_beg;
1395 	cursor.key_end.rec_type = 0xFFFF;
1396 	cursor.key_end.key = HAMMER_MAX_KEY;
1397 
1398 	cursor.asof = ip->obj_asof;
1399 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1400 
1401 	error = hammer_ip_first(&cursor, ip);
1402 	if (error == ENOENT)
1403 		error = 0;
1404 	else if (error == 0)
1405 		error = ENOTEMPTY;
1406 	hammer_done_cursor(&cursor);
1407 	return(error);
1408 }
1409 
1410