xref: /linux/fs/bcachefs/btree_locking.h (revision f1625637)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
4 
5 /*
6  * Only for internal btree use:
7  *
8  * The btree iterator tracks what locks it wants to take, and what locks it
9  * currently has - here we have wrappers for locking/unlocking btree nodes and
10  * updating the iterator state
11  */
12 
13 #include "btree_iter.h"
14 #include "six.h"
15 
16 void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
17 
18 void bch2_trans_unlock_noassert(struct btree_trans *);
19 
is_btree_node(struct btree_path * path,unsigned l)20 static inline bool is_btree_node(struct btree_path *path, unsigned l)
21 {
22 	return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
23 }
24 
btree_trans_stats(struct btree_trans * trans)25 static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
26 {
27 	return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
28 		? &trans->c->btree_transaction_stats[trans->fn_idx]
29 		: NULL;
30 }
31 
32 /* matches six lock types */
33 enum btree_node_locked_type {
34 	BTREE_NODE_UNLOCKED		= -1,
35 	BTREE_NODE_READ_LOCKED		= SIX_LOCK_read,
36 	BTREE_NODE_INTENT_LOCKED	= SIX_LOCK_intent,
37 	BTREE_NODE_WRITE_LOCKED		= SIX_LOCK_write,
38 };
39 
btree_node_locked_type(struct btree_path * path,unsigned level)40 static inline int btree_node_locked_type(struct btree_path *path,
41 					 unsigned level)
42 {
43 	return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
44 }
45 
btree_node_write_locked(struct btree_path * path,unsigned l)46 static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
47 {
48 	return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
49 }
50 
btree_node_intent_locked(struct btree_path * path,unsigned l)51 static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
52 {
53 	return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
54 }
55 
btree_node_read_locked(struct btree_path * path,unsigned l)56 static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
57 {
58 	return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
59 }
60 
btree_node_locked(struct btree_path * path,unsigned level)61 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
62 {
63 	return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
64 }
65 
mark_btree_node_locked_noreset(struct btree_path * path,unsigned level,enum btree_node_locked_type type)66 static inline void mark_btree_node_locked_noreset(struct btree_path *path,
67 						  unsigned level,
68 						  enum btree_node_locked_type type)
69 {
70 	/* relying on this to avoid a branch */
71 	BUILD_BUG_ON(SIX_LOCK_read   != 0);
72 	BUILD_BUG_ON(SIX_LOCK_intent != 1);
73 
74 	path->nodes_locked &= ~(3U << (level << 1));
75 	path->nodes_locked |= (type + 1) << (level << 1);
76 }
77 
mark_btree_node_unlocked(struct btree_path * path,unsigned level)78 static inline void mark_btree_node_unlocked(struct btree_path *path,
79 					    unsigned level)
80 {
81 	EBUG_ON(btree_node_write_locked(path, level));
82 	mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
83 }
84 
mark_btree_node_locked(struct btree_trans * trans,struct btree_path * path,unsigned level,enum btree_node_locked_type type)85 static inline void mark_btree_node_locked(struct btree_trans *trans,
86 					  struct btree_path *path,
87 					  unsigned level,
88 					  enum btree_node_locked_type type)
89 {
90 	mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
91 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
92 	path->l[level].lock_taken_time = local_clock();
93 #endif
94 }
95 
__btree_lock_want(struct btree_path * path,int level)96 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
97 {
98 	return level < path->locks_want
99 		? SIX_LOCK_intent
100 		: SIX_LOCK_read;
101 }
102 
103 static inline enum btree_node_locked_type
btree_lock_want(struct btree_path * path,int level)104 btree_lock_want(struct btree_path *path, int level)
105 {
106 	if (level < path->level)
107 		return BTREE_NODE_UNLOCKED;
108 	if (level < path->locks_want)
109 		return BTREE_NODE_INTENT_LOCKED;
110 	if (level == path->level)
111 		return BTREE_NODE_READ_LOCKED;
112 	return BTREE_NODE_UNLOCKED;
113 }
114 
btree_trans_lock_hold_time_update(struct btree_trans * trans,struct btree_path * path,unsigned level)115 static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
116 					      struct btree_path *path, unsigned level)
117 {
118 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
119 	__bch2_time_stats_update(&btree_trans_stats(trans)->lock_hold_times,
120 				 path->l[level].lock_taken_time,
121 				 local_clock());
122 #endif
123 }
124 
125 /* unlock: */
126 
btree_node_unlock(struct btree_trans * trans,struct btree_path * path,unsigned level)127 static inline void btree_node_unlock(struct btree_trans *trans,
128 				     struct btree_path *path, unsigned level)
129 {
130 	int lock_type = btree_node_locked_type(path, level);
131 
132 	EBUG_ON(level >= BTREE_MAX_DEPTH);
133 	EBUG_ON(lock_type == BTREE_NODE_WRITE_LOCKED);
134 
135 	if (lock_type != BTREE_NODE_UNLOCKED) {
136 		six_unlock_type(&path->l[level].b->c.lock, lock_type);
137 		btree_trans_lock_hold_time_update(trans, path, level);
138 	}
139 	mark_btree_node_unlocked(path, level);
140 }
141 
btree_path_lowest_level_locked(struct btree_path * path)142 static inline int btree_path_lowest_level_locked(struct btree_path *path)
143 {
144 	return __ffs(path->nodes_locked) >> 1;
145 }
146 
btree_path_highest_level_locked(struct btree_path * path)147 static inline int btree_path_highest_level_locked(struct btree_path *path)
148 {
149 	return __fls(path->nodes_locked) >> 1;
150 }
151 
__bch2_btree_path_unlock(struct btree_trans * trans,struct btree_path * path)152 static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
153 					    struct btree_path *path)
154 {
155 	btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
156 
157 	while (path->nodes_locked)
158 		btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
159 }
160 
161 /*
162  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
163  * succeed:
164  */
165 static inline void
bch2_btree_node_unlock_write_inlined(struct btree_trans * trans,struct btree_path * path,struct btree * b)166 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
167 				     struct btree *b)
168 {
169 	struct btree_path *linked;
170 	unsigned i;
171 
172 	EBUG_ON(path->l[b->c.level].b != b);
173 	EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
174 	EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
175 
176 	mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
177 
178 	trans_for_each_path_with_node(trans, b, linked, i)
179 		linked->l[b->c.level].lock_seq++;
180 
181 	six_unlock_write(&b->c.lock);
182 }
183 
184 void bch2_btree_node_unlock_write(struct btree_trans *,
185 			struct btree_path *, struct btree *);
186 
187 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
188 
189 /* lock: */
190 
trans_set_locked(struct btree_trans * trans)191 static inline void trans_set_locked(struct btree_trans *trans)
192 {
193 	if (!trans->locked) {
194 		lock_acquire_exclusive(&trans->dep_map, 0, 0, NULL, _THIS_IP_);
195 		trans->locked = true;
196 		trans->last_unlock_ip = 0;
197 
198 		trans->pf_memalloc_nofs = (current->flags & PF_MEMALLOC_NOFS) != 0;
199 		current->flags |= PF_MEMALLOC_NOFS;
200 	}
201 }
202 
trans_set_unlocked(struct btree_trans * trans)203 static inline void trans_set_unlocked(struct btree_trans *trans)
204 {
205 	if (trans->locked) {
206 		lock_release(&trans->dep_map, _THIS_IP_);
207 		trans->locked = false;
208 		trans->last_unlock_ip = _RET_IP_;
209 
210 		if (!trans->pf_memalloc_nofs)
211 			current->flags &= ~PF_MEMALLOC_NOFS;
212 	}
213 }
214 
__btree_node_lock_nopath(struct btree_trans * trans,struct btree_bkey_cached_common * b,enum six_lock_type type,bool lock_may_not_fail,unsigned long ip)215 static inline int __btree_node_lock_nopath(struct btree_trans *trans,
216 					 struct btree_bkey_cached_common *b,
217 					 enum six_lock_type type,
218 					 bool lock_may_not_fail,
219 					 unsigned long ip)
220 {
221 	trans->lock_may_not_fail = lock_may_not_fail;
222 	trans->lock_must_abort	= false;
223 	trans->locking		= b;
224 
225 	int ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
226 				     bch2_six_check_for_deadlock, trans, ip);
227 	WRITE_ONCE(trans->locking, NULL);
228 	WRITE_ONCE(trans->locking_wait.start_time, 0);
229 
230 	if (!ret)
231 		trace_btree_path_lock(trans, _THIS_IP_, b);
232 	return ret;
233 }
234 
235 static inline int __must_check
btree_node_lock_nopath(struct btree_trans * trans,struct btree_bkey_cached_common * b,enum six_lock_type type,unsigned long ip)236 btree_node_lock_nopath(struct btree_trans *trans,
237 		       struct btree_bkey_cached_common *b,
238 		       enum six_lock_type type,
239 		       unsigned long ip)
240 {
241 	return __btree_node_lock_nopath(trans, b, type, false, ip);
242 }
243 
btree_node_lock_nopath_nofail(struct btree_trans * trans,struct btree_bkey_cached_common * b,enum six_lock_type type)244 static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
245 					 struct btree_bkey_cached_common *b,
246 					 enum six_lock_type type)
247 {
248 	int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
249 
250 	BUG_ON(ret);
251 }
252 
253 /*
254  * Lock a btree node if we already have it locked on one of our linked
255  * iterators:
256  */
btree_node_lock_increment(struct btree_trans * trans,struct btree_bkey_cached_common * b,unsigned level,enum btree_node_locked_type want)257 static inline bool btree_node_lock_increment(struct btree_trans *trans,
258 					     struct btree_bkey_cached_common *b,
259 					     unsigned level,
260 					     enum btree_node_locked_type want)
261 {
262 	struct btree_path *path;
263 	unsigned i;
264 
265 	trans_for_each_path(trans, path, i)
266 		if (&path->l[level].b->c == b &&
267 		    btree_node_locked_type(path, level) >= want) {
268 			six_lock_increment(&b->lock, (enum six_lock_type) want);
269 			return true;
270 		}
271 
272 	return false;
273 }
274 
btree_node_lock(struct btree_trans * trans,struct btree_path * path,struct btree_bkey_cached_common * b,unsigned level,enum six_lock_type type,unsigned long ip)275 static inline int btree_node_lock(struct btree_trans *trans,
276 			struct btree_path *path,
277 			struct btree_bkey_cached_common *b,
278 			unsigned level,
279 			enum six_lock_type type,
280 			unsigned long ip)
281 {
282 	int ret = 0;
283 
284 	EBUG_ON(level >= BTREE_MAX_DEPTH);
285 	bch2_trans_verify_not_unlocked(trans);
286 
287 	if (likely(six_trylock_type(&b->lock, type)) ||
288 	    btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
289 	    !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
290 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
291 		path->l[b->level].lock_taken_time = local_clock();
292 #endif
293 	}
294 
295 	return ret;
296 }
297 
298 int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
299 				 struct btree_bkey_cached_common *b, bool);
300 
__btree_node_lock_write(struct btree_trans * trans,struct btree_path * path,struct btree_bkey_cached_common * b,bool lock_may_not_fail)301 static inline int __btree_node_lock_write(struct btree_trans *trans,
302 					  struct btree_path *path,
303 					  struct btree_bkey_cached_common *b,
304 					  bool lock_may_not_fail)
305 {
306 	EBUG_ON(&path->l[b->level].b->c != b);
307 	EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
308 	EBUG_ON(!btree_node_intent_locked(path, b->level));
309 
310 	/*
311 	 * six locks are unfair, and read locks block while a thread wants a
312 	 * write lock: thus, we need to tell the cycle detector we have a write
313 	 * lock _before_ taking the lock:
314 	 */
315 	mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
316 
317 	return likely(six_trylock_write(&b->lock))
318 		? 0
319 		: __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
320 }
321 
322 static inline int __must_check
bch2_btree_node_lock_write(struct btree_trans * trans,struct btree_path * path,struct btree_bkey_cached_common * b)323 bch2_btree_node_lock_write(struct btree_trans *trans,
324 			   struct btree_path *path,
325 			   struct btree_bkey_cached_common *b)
326 {
327 	return __btree_node_lock_write(trans, path, b, false);
328 }
329 
330 void bch2_btree_node_lock_write_nofail(struct btree_trans *,
331 				       struct btree_path *,
332 				       struct btree_bkey_cached_common *);
333 
334 /* relock: */
335 
336 bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *);
337 int __bch2_btree_path_relock(struct btree_trans *,
338 			     struct btree_path *, unsigned long);
339 
bch2_btree_path_relock(struct btree_trans * trans,struct btree_path * path,unsigned long trace_ip)340 static inline int bch2_btree_path_relock(struct btree_trans *trans,
341 				struct btree_path *path, unsigned long trace_ip)
342 {
343 	return btree_node_locked(path, path->level)
344 		? 0
345 		: __bch2_btree_path_relock(trans, path, trace_ip);
346 }
347 
348 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
349 
bch2_btree_node_relock(struct btree_trans * trans,struct btree_path * path,unsigned level)350 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
351 					  struct btree_path *path, unsigned level)
352 {
353 	EBUG_ON(btree_node_locked(path, level) &&
354 		!btree_node_write_locked(path, level) &&
355 		btree_node_locked_type(path, level) != __btree_lock_want(path, level));
356 
357 	return likely(btree_node_locked(path, level)) ||
358 		(!IS_ERR_OR_NULL(path->l[level].b) &&
359 		 __bch2_btree_node_relock(trans, path, level, true));
360 }
361 
bch2_btree_node_relock_notrace(struct btree_trans * trans,struct btree_path * path,unsigned level)362 static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
363 						  struct btree_path *path, unsigned level)
364 {
365 	EBUG_ON(btree_node_locked(path, level) &&
366 		!btree_node_write_locked(path, level) &&
367 		btree_node_locked_type(path, level) != __btree_lock_want(path, level));
368 
369 	return likely(btree_node_locked(path, level)) ||
370 		(!IS_ERR_OR_NULL(path->l[level].b) &&
371 		 __bch2_btree_node_relock(trans, path, level, false));
372 }
373 
374 /* upgrade */
375 
376 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
377 			       struct btree_path *, unsigned,
378 			       struct get_locks_fail *);
379 
380 bool __bch2_btree_path_upgrade(struct btree_trans *,
381 			       struct btree_path *, unsigned,
382 			       struct get_locks_fail *);
383 
bch2_btree_path_upgrade(struct btree_trans * trans,struct btree_path * path,unsigned new_locks_want)384 static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
385 					  struct btree_path *path,
386 					  unsigned new_locks_want)
387 {
388 	struct get_locks_fail f = {};
389 	unsigned old_locks_want = path->locks_want;
390 
391 	new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
392 
393 	if (path->locks_want < new_locks_want
394 	    ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
395 	    : path->nodes_locked)
396 		return 0;
397 
398 	trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
399 			old_locks_want, new_locks_want, &f);
400 	return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
401 }
402 
403 /* misc: */
404 
btree_path_set_should_be_locked(struct btree_trans * trans,struct btree_path * path)405 static inline void btree_path_set_should_be_locked(struct btree_trans *trans, struct btree_path *path)
406 {
407 	EBUG_ON(!btree_node_locked(path, path->level));
408 	EBUG_ON(path->uptodate);
409 
410 	path->should_be_locked = true;
411 	trace_btree_path_should_be_locked(trans, path);
412 }
413 
__btree_path_set_level_up(struct btree_trans * trans,struct btree_path * path,unsigned l)414 static inline void __btree_path_set_level_up(struct btree_trans *trans,
415 				      struct btree_path *path,
416 				      unsigned l)
417 {
418 	btree_node_unlock(trans, path, l);
419 	path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
420 }
421 
btree_path_set_level_up(struct btree_trans * trans,struct btree_path * path)422 static inline void btree_path_set_level_up(struct btree_trans *trans,
423 				    struct btree_path *path)
424 {
425 	__btree_path_set_level_up(trans, path, path->level++);
426 	btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
427 }
428 
429 /* debug */
430 
431 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
432 				struct btree_path *,
433 				struct btree_bkey_cached_common *b,
434 				unsigned);
435 
436 int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
437 
438 #ifdef CONFIG_BCACHEFS_DEBUG
439 void bch2_btree_path_verify_locks(struct btree_path *);
440 void bch2_trans_verify_locks(struct btree_trans *);
441 #else
bch2_btree_path_verify_locks(struct btree_path * path)442 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
bch2_trans_verify_locks(struct btree_trans * trans)443 static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
444 #endif
445 
446 #endif /* _BCACHEFS_BTREE_LOCKING_H */
447