1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_prune.c,v 1.12 2008/07/04 07:25:36 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 39 /* 40 * Iterate through the specified range of object ids and remove any 41 * deleted records that fall entirely within a prune modulo. 42 * 43 * A reverse iteration is used to prevent overlapping records from being 44 * created during the iteration due to alignments. This also allows us 45 * to adjust alignments without blowing up the B-Tree. 46 */ 47 static int prune_should_delete(struct hammer_ioc_prune *prune, 48 hammer_btree_leaf_elm_t elm); 49 static void prune_check_nlinks(hammer_cursor_t cursor, 50 hammer_btree_leaf_elm_t elm); 51 52 int 53 hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip, 54 struct hammer_ioc_prune *prune) 55 { 56 struct hammer_cursor cursor; 57 hammer_btree_leaf_elm_t elm; 58 struct hammer_ioc_prune_elm *copy_elms; 59 struct hammer_ioc_prune_elm *user_elms; 60 int error; 61 int isdir; 62 int elm_array_size; 63 64 if (prune->nelms < 0 || prune->nelms > HAMMER_MAX_PRUNE_ELMS) 65 return(EINVAL); 66 if ((prune->key_beg.localization | prune->key_end.localization) & 67 HAMMER_LOCALIZE_PSEUDOFS_MASK) { 68 return(EINVAL); 69 } 70 if (prune->key_beg.localization > prune->key_end.localization) 71 return(EINVAL); 72 if (prune->key_beg.localization == prune->key_end.localization) { 73 if (prune->key_beg.obj_id > prune->key_end.obj_id) 74 return(EINVAL); 75 /* key-space limitations - no check needed */ 76 } 77 if ((prune->head.flags & HAMMER_IOC_PRUNE_ALL) && prune->nelms) 78 return(EINVAL); 79 80 prune->key_cur.localization = prune->key_end.localization + 81 ip->obj_localization; 82 prune->key_cur.obj_id = prune->key_end.obj_id; 83 prune->key_cur.key = HAMMER_MAX_KEY; 84 85 /* 86 * Copy element array from userland 87 */ 88 elm_array_size = sizeof(*copy_elms) * prune->nelms; 89 user_elms = prune->elms; 90 copy_elms = kmalloc(elm_array_size, M_TEMP, M_WAITOK); 91 if ((error = copyin(user_elms, copy_elms, elm_array_size)) != 0) 92 goto failed; 93 prune->elms = copy_elms; 94 95 /* 96 * Scan backwards. Retries typically occur if a deadlock is detected. 97 */ 98 retry: 99 error = hammer_init_cursor(trans, &cursor, NULL, NULL); 100 if (error) { 101 hammer_done_cursor(&cursor); 102 goto failed; 103 } 104 cursor.key_beg.localization = prune->key_beg.localization + 105 ip->obj_localization; 106 cursor.key_beg.obj_id = prune->key_beg.obj_id; 107 cursor.key_beg.key = HAMMER_MIN_KEY; 108 cursor.key_beg.create_tid = 1; 109 cursor.key_beg.delete_tid = 0; 110 cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE; 111 cursor.key_beg.obj_type = 0; 112 113 cursor.key_end.localization = prune->key_cur.localization; 114 cursor.key_end.obj_id = prune->key_cur.obj_id; 115 cursor.key_end.key = prune->key_cur.key; 116 cursor.key_end.create_tid = HAMMER_MAX_TID - 1; 117 cursor.key_end.delete_tid = 0; 118 cursor.key_end.rec_type = HAMMER_MAX_RECTYPE; 119 cursor.key_end.obj_type = 0; 120 121 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE; 122 cursor.flags |= HAMMER_CURSOR_BACKEND; 123 124 /* 125 * This flag allows the B-Tree code to clean up loose ends. 126 */ 127 cursor.flags |= HAMMER_CURSOR_PRUNING; 128 129 hammer_sync_lock_sh(trans); 130 error = hammer_btree_last(&cursor); 131 132 while (error == 0) { 133 /* 134 * Check for work 135 */ 136 elm = &cursor.node->ondisk->elms[cursor.index].leaf; 137 prune->key_cur = elm->base; 138 139 /* 140 * Yield to more important tasks 141 */ 142 if ((error = hammer_signal_check(trans->hmp)) != 0) 143 break; 144 if (trans->hmp->sync_lock.wanted) { 145 hammer_sync_unlock(trans); 146 tsleep(trans, 0, "hmrslo", hz / 10); 147 hammer_sync_lock_sh(trans); 148 } 149 if (hammer_flusher_meta_limit(trans->hmp) || 150 hammer_flusher_undo_exhausted(trans, 2)) { 151 error = EWOULDBLOCK; 152 break; 153 } 154 155 if (prune->stat_oldest_tid > elm->base.create_tid) 156 prune->stat_oldest_tid = elm->base.create_tid; 157 158 if (hammer_debug_general & 0x0200) { 159 kprintf("check %016llx %016llx cre=%016llx del=%016llx\n", 160 elm->base.obj_id, 161 elm->base.key, 162 elm->base.create_tid, 163 elm->base.delete_tid); 164 } 165 166 if (prune_should_delete(prune, elm)) { 167 if (hammer_debug_general & 0x0200) { 168 kprintf("check %016llx %016llx: DELETE\n", 169 elm->base.obj_id, elm->base.key); 170 } 171 172 /* 173 * NOTE: This can return EDEADLK 174 * 175 * Acquiring the sync lock guarantees that the 176 * operation will not cross a synchronization 177 * boundary (see the flusher). 178 */ 179 isdir = (elm->base.rec_type == HAMMER_RECTYPE_DIRENTRY); 180 181 error = hammer_delete_at_cursor(&cursor, 182 HAMMER_DELETE_DESTROY, 183 &prune->stat_bytes); 184 if (error) 185 break; 186 187 if (isdir) 188 ++prune->stat_dirrecords; 189 else 190 ++prune->stat_rawrecords; 191 192 /* 193 * The current record might now be the one after 194 * the one we deleted, set ATEDISK to force us 195 * to skip it (since we are iterating backwards). 196 */ 197 cursor.flags |= HAMMER_CURSOR_ATEDISK; 198 } else { 199 /* 200 * Nothing to delete, but we may have to check other 201 * things. 202 */ 203 prune_check_nlinks(&cursor, elm); 204 cursor.flags |= HAMMER_CURSOR_ATEDISK; 205 if (hammer_debug_general & 0x0100) { 206 kprintf("check %016llx %016llx: SKIP\n", 207 elm->base.obj_id, elm->base.key); 208 } 209 } 210 ++prune->stat_scanrecords; 211 error = hammer_btree_iterate_reverse(&cursor); 212 } 213 hammer_sync_unlock(trans); 214 if (error == ENOENT) 215 error = 0; 216 hammer_done_cursor(&cursor); 217 if (error == EWOULDBLOCK) { 218 hammer_flusher_sync(trans->hmp); 219 goto retry; 220 } 221 if (error == EDEADLK) 222 goto retry; 223 if (error == EINTR) { 224 prune->head.flags |= HAMMER_IOC_HEAD_INTR; 225 error = 0; 226 } 227 failed: 228 prune->key_cur.localization &= HAMMER_LOCALIZE_MASK; 229 prune->elms = user_elms; 230 kfree(copy_elms, M_TEMP); 231 return(error); 232 } 233 234 /* 235 * Check pruning list. The list must be sorted in descending order. 236 * 237 * Return non-zero if the record should be deleted. 238 */ 239 static int 240 prune_should_delete(struct hammer_ioc_prune *prune, hammer_btree_leaf_elm_t elm) 241 { 242 struct hammer_ioc_prune_elm *scan; 243 int i; 244 245 /* 246 * If pruning everything remove all records with a non-zero 247 * delete_tid. 248 */ 249 if (prune->head.flags & HAMMER_IOC_PRUNE_ALL) { 250 if (elm->base.delete_tid != 0) 251 return(1); 252 return(0); 253 } 254 255 for (i = 0; i < prune->nelms; ++i) { 256 scan = &prune->elms[i]; 257 258 /* 259 * Check for loop termination. 260 */ 261 if (elm->base.create_tid >= scan->end_tid || 262 elm->base.delete_tid > scan->end_tid) { 263 break; 264 } 265 266 /* 267 * Determine if we can delete the record. 268 */ 269 if (elm->base.delete_tid && 270 elm->base.create_tid >= scan->beg_tid && 271 elm->base.delete_tid <= scan->end_tid && 272 (elm->base.create_tid - scan->beg_tid) / scan->mod_tid == 273 (elm->base.delete_tid - scan->beg_tid) / scan->mod_tid) { 274 return(1); 275 } 276 } 277 return(0); 278 } 279 280 /* 281 * Dangling inodes can occur if processes are holding open descriptors on 282 * deleted files as-of when a machine crashes. When we find one simply 283 * acquire the inode and release it. The inode handling code will then 284 * do the right thing. 285 */ 286 static 287 void 288 prune_check_nlinks(hammer_cursor_t cursor, hammer_btree_leaf_elm_t elm) 289 { 290 hammer_inode_t ip; 291 int error; 292 293 if (elm->base.rec_type != HAMMER_RECTYPE_INODE) 294 return; 295 if (elm->base.delete_tid != 0) 296 return; 297 if (hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA)) 298 return; 299 if (cursor->data->inode.nlinks) 300 return; 301 hammer_cursor_downgrade(cursor); 302 hammer_sync_unlock(cursor->trans); 303 ip = hammer_get_inode(cursor->trans, NULL, elm->base.obj_id, 304 HAMMER_MAX_TID, 305 elm->base.localization & HAMMER_LOCALIZE_PSEUDOFS_MASK, 306 0, &error); 307 if (ip) { 308 kprintf("pruning disconnected inode %016llx\n", 309 elm->base.obj_id); 310 hammer_rel_inode(ip, 0); 311 } else { 312 kprintf("unable to prune disconnected inode %016llx\n", 313 elm->base.obj_id); 314 } 315 hammer_sync_lock_sh(cursor->trans); 316 } 317 318 #if 0 319 320 /* 321 * NOTE: THIS CODE HAS BEEN REMOVED! Pruning no longer attempts to realign 322 * adjacent records because it seriously interferes with every 323 * mirroring algorithm I could come up with. 324 * 325 * This means that historical accesses beyond the first snapshot 326 * softlink should be on snapshot boundaries only. Historical 327 * accesses from "now" to the first snapshot softlink continue to 328 * be fine-grained. 329 * 330 * NOTE: It also looks like there's a bug in the removed code. It is believed 331 * that create_tid can sometimes get set to 0xffffffffffffffff. Just as 332 * well we no longer try to do this fancy shit. Probably the attempt to 333 * correct the rhb is blowing up the cursor's indexing or addressing mapping. 334 * 335 * Align the record to cover any gaps created through the deletion of 336 * records within the pruning space. If we were to just delete the records 337 * there would be gaps which in turn would cause a snapshot that is NOT on 338 * a pruning boundary to appear corrupt to the user. Forcing alignment 339 * of the create_tid and delete_tid for retained records 'reconnects' 340 * the previously contiguous space, making it contiguous again after the 341 * deletions. 342 * 343 * The use of a reverse iteration allows us to safely align the records and 344 * related elements without creating temporary overlaps. XXX we should 345 * add ordering dependancies for record buffers to guarantee consistency 346 * during recovery. 347 */ 348 static int 349 realign_prune(struct hammer_ioc_prune *prune, 350 hammer_cursor_t cursor, int realign_cre, int realign_del) 351 { 352 struct hammer_ioc_prune_elm *scan; 353 hammer_btree_elm_t elm; 354 hammer_tid_t delta; 355 hammer_tid_t tid; 356 int error; 357 358 hammer_cursor_downgrade(cursor); 359 360 elm = &cursor->node->ondisk->elms[cursor->index]; 361 ++prune->stat_realignments; 362 363 /* 364 * Align the create_tid. By doing a reverse iteration we guarantee 365 * that all records after our current record have already been 366 * aligned, allowing us to safely correct the right-hand-boundary 367 * (because no record to our right is otherwise exactly matching 368 * will have a create_tid to the left of our aligned create_tid). 369 */ 370 error = 0; 371 if (realign_cre >= 0) { 372 scan = &prune->elms[realign_cre]; 373 374 delta = (elm->leaf.base.create_tid - scan->beg_tid) % 375 scan->mod_tid; 376 if (delta) { 377 tid = elm->leaf.base.create_tid - delta + scan->mod_tid; 378 379 /* can EDEADLK */ 380 error = hammer_btree_correct_rhb(cursor, tid + 1); 381 if (error == 0) { 382 error = hammer_btree_extract(cursor, 383 HAMMER_CURSOR_GET_LEAF); 384 } 385 if (error == 0) { 386 /* can EDEADLK */ 387 error = hammer_cursor_upgrade(cursor); 388 } 389 if (error == 0) { 390 hammer_modify_node(cursor->trans, cursor->node, 391 &elm->leaf.base.create_tid, 392 sizeof(elm->leaf.base.create_tid)); 393 elm->leaf.base.create_tid = tid; 394 hammer_modify_node_done(cursor->node); 395 } 396 } 397 } 398 399 /* 400 * Align the delete_tid. This only occurs if the record is historical 401 * was deleted at some point. Realigning the delete_tid does not 402 * move the record within the B-Tree but may cause it to temporarily 403 * overlap a record that has not yet been pruned. 404 */ 405 if (error == 0 && realign_del >= 0) { 406 scan = &prune->elms[realign_del]; 407 408 delta = (elm->leaf.base.delete_tid - scan->beg_tid) % 409 scan->mod_tid; 410 if (delta) { 411 error = hammer_btree_extract(cursor, 412 HAMMER_CURSOR_GET_LEAF); 413 if (error == 0) { 414 hammer_modify_node(cursor->trans, cursor->node, 415 &elm->leaf.base.delete_tid, 416 sizeof(elm->leaf.base.delete_tid)); 417 elm->leaf.base.delete_tid = 418 elm->leaf.base.delete_tid - 419 delta + scan->mod_tid; 420 hammer_modify_node_done(cursor->node); 421 } 422 } 423 } 424 return (error); 425 } 426 427 #endif 428