1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_transaction.c,v 1.25 2008/09/23 21:03:52 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 39 static u_int32_t ocp_allocbit(hammer_objid_cache_t ocp, u_int32_t n); 40 41 42 /* 43 * Start a standard transaction. 44 */ 45 void 46 hammer_start_transaction(struct hammer_transaction *trans, 47 struct hammer_mount *hmp) 48 { 49 struct timeval tv; 50 int error; 51 52 trans->type = HAMMER_TRANS_STD; 53 trans->hmp = hmp; 54 trans->rootvol = hammer_get_root_volume(hmp, &error); 55 KKASSERT(error == 0); 56 trans->tid = 0; 57 trans->sync_lock_refs = 0; 58 trans->flags = 0; 59 60 getmicrotime(&tv); 61 trans->time = (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec; 62 trans->time32 = (u_int32_t)tv.tv_sec; 63 } 64 65 /* 66 * Start a simple read-only transaction. This will not stall. 67 */ 68 void 69 hammer_simple_transaction(struct hammer_transaction *trans, 70 struct hammer_mount *hmp) 71 { 72 struct timeval tv; 73 int error; 74 75 trans->type = HAMMER_TRANS_RO; 76 trans->hmp = hmp; 77 trans->rootvol = hammer_get_root_volume(hmp, &error); 78 KKASSERT(error == 0); 79 trans->tid = 0; 80 trans->sync_lock_refs = 0; 81 trans->flags = 0; 82 83 getmicrotime(&tv); 84 trans->time = (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec; 85 trans->time32 = (u_int32_t)tv.tv_sec; 86 } 87 88 /* 89 * Start a transaction using a particular TID. Used by the sync code. 90 * This does not stall. 91 * 92 * This routine may only be called from the flusher thread. We predispose 93 * sync_lock_refs, implying serialization against the synchronization stage 94 * (which the flusher is responsible for). 95 */ 96 void 97 hammer_start_transaction_fls(struct hammer_transaction *trans, 98 struct hammer_mount *hmp) 99 { 100 struct timeval tv; 101 int error; 102 103 bzero(trans, sizeof(*trans)); 104 105 trans->type = HAMMER_TRANS_FLS; 106 trans->hmp = hmp; 107 trans->rootvol = hammer_get_root_volume(hmp, &error); 108 KKASSERT(error == 0); 109 trans->tid = hammer_alloc_tid(hmp, 1); 110 trans->sync_lock_refs = 1; 111 trans->flags = 0; 112 113 getmicrotime(&tv); 114 trans->time = (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec; 115 trans->time32 = (u_int32_t)tv.tv_sec; 116 } 117 118 void 119 hammer_done_transaction(struct hammer_transaction *trans) 120 { 121 int expected_lock_refs; 122 123 hammer_rel_volume(trans->rootvol, 0); 124 trans->rootvol = NULL; 125 expected_lock_refs = (trans->type == HAMMER_TRANS_FLS) ? 1 : 0; 126 KKASSERT(trans->sync_lock_refs == expected_lock_refs); 127 trans->sync_lock_refs = 0; 128 if (trans->type != HAMMER_TRANS_FLS) { 129 if (trans->flags & HAMMER_TRANSF_NEWINODE) 130 hammer_inode_waitreclaims(trans); 131 /* 132 else if (trans->flags & HAMMER_TRANSF_DIDIO) 133 hammer_inode_waitreclaims(trans); 134 */ 135 } 136 } 137 138 /* 139 * Allocate (count) TIDs. If running in multi-master mode the returned 140 * base will be aligned to a 16-count plus the master id (0-15). 141 * Multi-master mode allows non-conflicting to run and new objects to be 142 * created on multiple masters in parallel. The transaction id identifies 143 * the original master. The object_id is also subject to this rule in 144 * order to allow objects to be created on multiple masters in parallel. 145 * 146 * Directories may pre-allocate a large number of object ids (100,000). 147 * 148 * NOTE: There is no longer a requirement that successive transaction 149 * ids be 2 apart for separator generation. 150 * 151 * NOTE: When called by pseudo-backends such as ioctls the allocated 152 * TID will be larger then the current flush TID, if a flush is running, 153 * so any mirroring will pick the records up on a later flush. 154 */ 155 hammer_tid_t 156 hammer_alloc_tid(hammer_mount_t hmp, int count) 157 { 158 hammer_tid_t tid; 159 160 if (hmp->master_id < 0) { 161 tid = hmp->next_tid + 1; 162 hmp->next_tid = tid + count; 163 } else { 164 tid = (hmp->next_tid + HAMMER_MAX_MASTERS) & 165 ~(hammer_tid_t)(HAMMER_MAX_MASTERS - 1); 166 hmp->next_tid = tid + count * HAMMER_MAX_MASTERS; 167 tid |= hmp->master_id; 168 } 169 if (tid >= 0xFFFFFFFFFF000000ULL) 170 panic("hammer_start_transaction: Ran out of TIDs!"); 171 if (hammer_debug_tid) 172 kprintf("alloc_tid %016llx\n", (long long)tid); 173 return(tid); 174 } 175 176 /* 177 * Allocate an object id. 178 * 179 * We use the upper OBJID_CACHE_BITS bits of the namekey to try to match 180 * the low bits of the objid we allocate. 181 */ 182 hammer_tid_t 183 hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip, int64_t namekey) 184 { 185 hammer_objid_cache_t ocp; 186 hammer_tid_t tid; 187 u_int32_t n; 188 189 while ((ocp = dip->objid_cache) == NULL) { 190 if (hmp->objid_cache_count < OBJID_CACHE_SIZE) { 191 ocp = kmalloc(sizeof(*ocp), hmp->m_misc, 192 M_WAITOK|M_ZERO); 193 ocp->base_tid = hammer_alloc_tid(hmp, 194 OBJID_CACHE_BULK * 2); 195 ocp->base_tid += OBJID_CACHE_BULK_MASK64; 196 ocp->base_tid &= ~OBJID_CACHE_BULK_MASK64; 197 /* may have blocked, recheck */ 198 if (dip->objid_cache == NULL) { 199 TAILQ_INSERT_TAIL(&hmp->objid_cache_list, 200 ocp, entry); 201 ++hmp->objid_cache_count; 202 dip->objid_cache = ocp; 203 ocp->dip = dip; 204 } else { 205 kfree(ocp, hmp->m_misc); 206 } 207 } else { 208 /* 209 * Steal one from another directory? 210 * 211 * Throw away ocp's that are more then half full, they 212 * aren't worth stealing. 213 */ 214 ocp = TAILQ_FIRST(&hmp->objid_cache_list); 215 if (ocp->dip) 216 ocp->dip->objid_cache = NULL; 217 if (ocp->count >= OBJID_CACHE_BULK / 2) { 218 TAILQ_REMOVE(&hmp->objid_cache_list, 219 ocp, entry); 220 --hmp->objid_cache_count; 221 kfree(ocp, hmp->m_misc); 222 } else { 223 dip->objid_cache = ocp; 224 ocp->dip = dip; 225 } 226 } 227 } 228 TAILQ_REMOVE(&hmp->objid_cache_list, ocp, entry); 229 230 /* 231 * Allocate inode numbers uniformly. 232 */ 233 234 n = (namekey >> (63 - OBJID_CACHE_BULK_BITS)) & OBJID_CACHE_BULK_MASK; 235 n = ocp_allocbit(ocp, n); 236 tid = ocp->base_tid + n; 237 238 #if 0 239 /* 240 * The TID is incremented by 1 or by 16 depending what mode the 241 * mount is operating in. 242 */ 243 ocp->next_tid += (hmp->master_id < 0) ? 1 : HAMMER_MAX_MASTERS; 244 #endif 245 if (ocp->count >= OBJID_CACHE_BULK * 3 / 4) { 246 dip->objid_cache = NULL; 247 --hmp->objid_cache_count; 248 ocp->dip = NULL; 249 kfree(ocp, hmp->m_misc); 250 } else { 251 TAILQ_INSERT_TAIL(&hmp->objid_cache_list, ocp, entry); 252 } 253 return(tid); 254 } 255 256 /* 257 * Allocate a bit starting with bit n. Wrap if necessary. 258 * 259 * This routine is only ever called if a bit is available somewhere 260 * in the bitmap. 261 */ 262 static u_int32_t 263 ocp_allocbit(hammer_objid_cache_t ocp, u_int32_t n) 264 { 265 u_int32_t n0; 266 267 n0 = (n >> 5) & 31; 268 n &= 31; 269 270 while (ocp->bm1[n0] & (1 << n)) { 271 if (ocp->bm0 & (1 << n0)) { 272 n0 = (n0 + 1) & 31; 273 n = 0; 274 } else if (++n == 32) { 275 n0 = (n0 + 1) & 31; 276 n = 0; 277 } 278 } 279 ++ocp->count; 280 ocp->bm1[n0] |= 1 << n; 281 if (ocp->bm1[n0] == 0xFFFFFFFFU) 282 ocp->bm0 |= 1 << n0; 283 return((n0 << 5) + n); 284 } 285 286 void 287 hammer_clear_objid(hammer_inode_t dip) 288 { 289 hammer_objid_cache_t ocp; 290 291 if ((ocp = dip->objid_cache) != NULL) { 292 dip->objid_cache = NULL; 293 ocp->dip = NULL; 294 TAILQ_REMOVE(&dip->hmp->objid_cache_list, ocp, entry); 295 TAILQ_INSERT_HEAD(&dip->hmp->objid_cache_list, ocp, entry); 296 } 297 } 298 299 void 300 hammer_destroy_objid_cache(hammer_mount_t hmp) 301 { 302 hammer_objid_cache_t ocp; 303 304 while ((ocp = TAILQ_FIRST(&hmp->objid_cache_list)) != NULL) { 305 TAILQ_REMOVE(&hmp->objid_cache_list, ocp, entry); 306 if (ocp->dip) 307 ocp->dip->objid_cache = NULL; 308 kfree(ocp, hmp->m_misc); 309 --hmp->objid_cache_count; 310 } 311 KKASSERT(hmp->objid_cache_count == 0); 312 } 313 314