1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include "hammer.h"
36 
37 static uint32_t ocp_allocbit(hammer_objid_cache_t ocp, uint32_t n);
38 
39 
40 /*
41  * Start a standard transaction.
42  *
43  * May be called without fs_token
44  */
45 void
46 hammer_start_transaction(hammer_transaction_t trans, hammer_mount_t hmp)
47 {
48 	struct timeval tv;
49 	int error;
50 
51 	trans->type = HAMMER_TRANS_STD;
52 	trans->hmp = hmp;
53 	trans->rootvol = hammer_get_root_volume(hmp, &error);
54 	KKASSERT(error == 0);
55 	trans->tid = 0;
56 	trans->sync_lock_refs = 0;
57 	trans->flags = 0;
58 
59 	getmicrotime(&tv);
60 	trans->time = (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec;
61 	trans->time32 = (uint32_t)tv.tv_sec;
62 }
63 
64 /*
65  * Start a simple read-only transaction.  This will not stall.
66  *
67  * May be called without fs_token
68  */
69 void
70 hammer_simple_transaction(hammer_transaction_t trans, hammer_mount_t hmp)
71 {
72 	struct timeval tv;
73 	int error;
74 
75 	trans->type = HAMMER_TRANS_RO;
76 	trans->hmp = hmp;
77 	trans->rootvol = hammer_get_root_volume(hmp, &error);
78 	KKASSERT(error == 0);
79 	trans->tid = 0;
80 	trans->sync_lock_refs = 0;
81 	trans->flags = 0;
82 
83 	getmicrotime(&tv);
84 	trans->time = (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec;
85 	trans->time32 = (uint32_t)tv.tv_sec;
86 }
87 
88 /*
89  * Start a transaction using a particular TID.  Used by the sync code.
90  * This does not stall.
91  *
92  * This routine may only be called from the flusher thread.  We predispose
93  * sync_lock_refs, implying serialization against the synchronization stage
94  * (which the flusher is responsible for).
95  */
96 void
97 hammer_start_transaction_fls(hammer_transaction_t trans, hammer_mount_t hmp)
98 {
99 	struct timeval tv;
100 	int error;
101 
102 	bzero(trans, sizeof(*trans));
103 
104 	trans->type = HAMMER_TRANS_FLS;
105 	trans->hmp = hmp;
106 	trans->rootvol = hammer_get_root_volume(hmp, &error);
107 	KKASSERT(error == 0);
108 	trans->tid = hammer_alloc_tid(hmp, 1);
109 	trans->sync_lock_refs = 1;
110 	trans->flags = 0;
111 
112 	getmicrotime(&tv);
113 	trans->time = (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec;
114 	trans->time32 = (uint32_t)tv.tv_sec;
115 }
116 
117 /*
118  * May be called without fs_token
119  */
120 void
121 hammer_done_transaction(hammer_transaction_t trans)
122 {
123 	int expected_lock_refs __debugvar;
124 
125 	hammer_rel_volume(trans->rootvol, 0);
126 	trans->rootvol = NULL;
127 	expected_lock_refs = (trans->type == HAMMER_TRANS_FLS) ? 1 : 0;
128 	KKASSERT(trans->sync_lock_refs == expected_lock_refs);
129 	trans->sync_lock_refs = 0;
130 	if (trans->type != HAMMER_TRANS_FLS) {
131 		if (trans->flags & HAMMER_TRANSF_NEWINODE) {
132 			lwkt_gettoken(&trans->hmp->fs_token);
133 			hammer_inode_waitreclaims(trans);
134 			lwkt_reltoken(&trans->hmp->fs_token);
135 		}
136 	}
137 }
138 
139 /*
140  * Allocate (count) TIDs.  If running in multi-master mode the returned
141  * base will be aligned to a 16-count plus the master id (0-15).
142  * Multi-master mode allows non-conflicting to run and new objects to be
143  * created on multiple masters in parallel.  The transaction id identifies
144  * the original master.  The object_id is also subject to this rule in
145  * order to allow objects to be created on multiple masters in parallel.
146  *
147  * Directories may pre-allocate a large number of object ids (100,000).
148  *
149  * NOTE: There is no longer a requirement that successive transaction
150  *	 ids be 2 apart for separator generation.
151  *
152  * NOTE: When called by pseudo-backends such as ioctls the allocated
153  *	 TID will be larger then the current flush TID, if a flush is running,
154  *	 so any mirroring will pick the records up on a later flush.
155  *
156  * NOTE: HAMMER1 does not support multi-master clustering as of 2015.
157  */
158 hammer_tid_t
159 hammer_alloc_tid(hammer_mount_t hmp, int count)
160 {
161 	hammer_tid_t tid;
162 
163 	if (hmp->master_id < 0) {
164 		tid = hmp->next_tid + 1;
165 		hmp->next_tid = tid + count;
166 	} else {
167 		tid = (hmp->next_tid + HAMMER_MAX_MASTERS) &
168 		      ~(hammer_tid_t)(HAMMER_MAX_MASTERS - 1);
169 		hmp->next_tid = tid + count * HAMMER_MAX_MASTERS;
170 		tid |= hmp->master_id;
171 	}
172 	if (tid >= 0xFFFFFFFFFF000000ULL)
173 		hpanic("Ran out of TIDs!");
174 	if (hammer_debug_tid)
175 		hdkprintf("%016jx\n", (intmax_t)tid);
176 	return(tid);
177 }
178 
179 /*
180  * Allocate an object id.
181  *
182  * We use the upper OBJID_CACHE_BITS bits of the namekey to try to match
183  * the low bits of the objid we allocate.
184  */
185 hammer_tid_t
186 hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip, int64_t namekey)
187 {
188 	hammer_objid_cache_t ocp;
189 	hammer_tid_t tid;
190 	uint32_t n;
191 
192 	while ((ocp = dip->objid_cache) == NULL) {
193 		if (hmp->objid_cache_count < OBJID_CACHE_SIZE) {
194 			ocp = kmalloc(sizeof(*ocp), hmp->m_misc,
195 				      M_WAITOK|M_ZERO);
196 			ocp->base_tid = hammer_alloc_tid(hmp,
197 							OBJID_CACHE_BULK * 2);
198 			ocp->base_tid += OBJID_CACHE_BULK_MASK64;
199 			ocp->base_tid &= ~OBJID_CACHE_BULK_MASK64;
200 			/* may have blocked, recheck */
201 			if (dip->objid_cache == NULL) {
202 				TAILQ_INSERT_TAIL(&hmp->objid_cache_list,
203 						  ocp, entry);
204 				++hmp->objid_cache_count;
205 				dip->objid_cache = ocp;
206 				ocp->dip = dip;
207 			} else {
208 				kfree(ocp, hmp->m_misc);
209 			}
210 		} else {
211 			/*
212 			 * Steal one from another directory?
213 			 *
214 			 * Throw away ocp's that are more then half full, they
215 			 * aren't worth stealing.
216 			 */
217 			ocp = TAILQ_FIRST(&hmp->objid_cache_list);
218 			if (ocp->dip)
219 				ocp->dip->objid_cache = NULL;
220 			if (ocp->count >= OBJID_CACHE_BULK / 2) {
221 				TAILQ_REMOVE(&hmp->objid_cache_list,
222 					     ocp, entry);
223 				--hmp->objid_cache_count;
224 				kfree(ocp, hmp->m_misc);
225 			} else {
226 				dip->objid_cache = ocp;
227 				ocp->dip = dip;
228 			}
229 		}
230 	}
231 	TAILQ_REMOVE(&hmp->objid_cache_list, ocp, entry);
232 
233 	/*
234 	 * Allocate inode numbers uniformly.
235 	 */
236 
237 	n = (namekey >> (63 - OBJID_CACHE_BULK_BITS)) & OBJID_CACHE_BULK_MASK;
238 	n = ocp_allocbit(ocp, n);
239 	tid = ocp->base_tid + n;
240 
241 #if 0
242 	/*
243 	 * The TID is incremented by 1 or by 16 depending what mode the
244 	 * mount is operating in.
245 	 */
246 	ocp->next_tid += (hmp->master_id < 0) ? 1 : HAMMER_MAX_MASTERS;
247 #endif
248 	if (ocp->count >= OBJID_CACHE_BULK * 3 / 4) {
249 		dip->objid_cache = NULL;
250 		--hmp->objid_cache_count;
251 		ocp->dip = NULL;
252 		kfree(ocp, hmp->m_misc);
253 	} else {
254 		TAILQ_INSERT_TAIL(&hmp->objid_cache_list, ocp, entry);
255 	}
256 	return(tid);
257 }
258 
259 /*
260  * Allocate a bit starting with bit n.  Wrap if necessary.
261  *
262  * This routine is only ever called if a bit is available somewhere
263  * in the bitmap.
264  */
265 static uint32_t
266 ocp_allocbit(hammer_objid_cache_t ocp, uint32_t n)
267 {
268 	uint32_t n0;
269 
270 	n0 = (n >> 5) & 31;
271 	n &= 31;
272 
273 	while (ocp->bm1[n0] & (1 << n)) {
274 		if (ocp->bm0 & (1 << n0)) {
275 			n0 = (n0 + 1) & 31;
276 			n = 0;
277 		} else if (++n == 32) {
278 			n0 = (n0 + 1) & 31;
279 			n = 0;
280 		}
281 	}
282 	++ocp->count;
283 	ocp->bm1[n0] |= 1 << n;
284 	if (ocp->bm1[n0] == 0xFFFFFFFFU)
285 		ocp->bm0 |= 1 << n0;
286 	return((n0 << 5) + n);
287 }
288 
289 void
290 hammer_clear_objid(hammer_inode_t dip)
291 {
292 	hammer_objid_cache_t ocp;
293 
294 	if ((ocp = dip->objid_cache) != NULL) {
295 		dip->objid_cache = NULL;
296 		ocp->dip = NULL;
297 		TAILQ_REMOVE(&dip->hmp->objid_cache_list, ocp, entry);
298 		TAILQ_INSERT_HEAD(&dip->hmp->objid_cache_list, ocp, entry);
299 	}
300 }
301 
302 void
303 hammer_destroy_objid_cache(hammer_mount_t hmp)
304 {
305 	hammer_objid_cache_t ocp;
306 
307 	while ((ocp = TAILQ_FIRST(&hmp->objid_cache_list)) != NULL) {
308 		TAILQ_REMOVE(&hmp->objid_cache_list, ocp, entry);
309 		if (ocp->dip)
310 			ocp->dip->objid_cache = NULL;
311 		kfree(ocp, hmp->m_misc);
312 		--hmp->objid_cache_count;
313 	}
314 	KKASSERT(hmp->objid_cache_count == 0);
315 }
316 
317