1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright 2004-2011 Red Hat, Inc.
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/fs.h>
10 #include <linux/dlm.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/delay.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/sched/signal.h>
16
17 #include "incore.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "recovery.h"
21 #include "util.h"
22 #include "sys.h"
23 #include "trace_gfs2.h"
24
25 /**
26 * gfs2_update_stats - Update time based stats
27 * @s: The stats to update (local or global)
28 * @index: The index inside @s
29 * @sample: New data to include
30 */
gfs2_update_stats(struct gfs2_lkstats * s,unsigned index,s64 sample)31 static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
32 s64 sample)
33 {
34 /*
35 * @delta is the difference between the current rtt sample and the
36 * running average srtt. We add 1/8 of that to the srtt in order to
37 * update the current srtt estimate. The variance estimate is a bit
38 * more complicated. We subtract the current variance estimate from
39 * the abs value of the @delta and add 1/4 of that to the running
40 * total. That's equivalent to 3/4 of the current variance
41 * estimate plus 1/4 of the abs of @delta.
42 *
43 * Note that the index points at the array entry containing the
44 * smoothed mean value, and the variance is always in the following
45 * entry
46 *
47 * Reference: TCP/IP Illustrated, vol 2, p. 831,832
48 * All times are in units of integer nanoseconds. Unlike the TCP/IP
49 * case, they are not scaled fixed point.
50 */
51
52 s64 delta = sample - s->stats[index];
53 s->stats[index] += (delta >> 3);
54 index++;
55 s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2;
56 }
57
58 /**
59 * gfs2_update_reply_times - Update locking statistics
60 * @gl: The glock to update
61 *
62 * This assumes that gl->gl_dstamp has been set earlier.
63 *
64 * The rtt (lock round trip time) is an estimate of the time
65 * taken to perform a dlm lock request. We update it on each
66 * reply from the dlm.
67 *
68 * The blocking flag is set on the glock for all dlm requests
69 * which may potentially block due to lock requests from other nodes.
70 * DLM requests where the current lock state is exclusive, the
71 * requested state is null (or unlocked) or where the TRY or
72 * TRY_1CB flags are set are classified as non-blocking. All
73 * other DLM requests are counted as (potentially) blocking.
74 */
gfs2_update_reply_times(struct gfs2_glock * gl)75 static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
76 {
77 struct gfs2_pcpu_lkstats *lks;
78 const unsigned gltype = gl->gl_name.ln_type;
79 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ?
80 GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
81 s64 rtt;
82
83 preempt_disable();
84 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
85 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
86 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */
87 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */
88 preempt_enable();
89
90 trace_gfs2_glock_lock_time(gl, rtt);
91 }
92
93 /**
94 * gfs2_update_request_times - Update locking statistics
95 * @gl: The glock to update
96 *
97 * The irt (lock inter-request times) measures the average time
98 * between requests to the dlm. It is updated immediately before
99 * each dlm call.
100 */
101
gfs2_update_request_times(struct gfs2_glock * gl)102 static inline void gfs2_update_request_times(struct gfs2_glock *gl)
103 {
104 struct gfs2_pcpu_lkstats *lks;
105 const unsigned gltype = gl->gl_name.ln_type;
106 ktime_t dstamp;
107 s64 irt;
108
109 preempt_disable();
110 dstamp = gl->gl_dstamp;
111 gl->gl_dstamp = ktime_get_real();
112 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
113 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
114 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */
115 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */
116 preempt_enable();
117 }
118
gdlm_ast(void * arg)119 static void gdlm_ast(void *arg)
120 {
121 struct gfs2_glock *gl = arg;
122 unsigned ret = gl->gl_state;
123
124 gfs2_update_reply_times(gl);
125 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
126
127 if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
128 memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
129
130 switch (gl->gl_lksb.sb_status) {
131 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
132 if (gl->gl_ops->go_free)
133 gl->gl_ops->go_free(gl);
134 gfs2_glock_free(gl);
135 return;
136 case -DLM_ECANCEL: /* Cancel while getting lock */
137 ret |= LM_OUT_CANCELED;
138 goto out;
139 case -EAGAIN: /* Try lock fails */
140 case -EDEADLK: /* Deadlock detected */
141 goto out;
142 case -ETIMEDOUT: /* Canceled due to timeout */
143 ret |= LM_OUT_ERROR;
144 goto out;
145 case 0: /* Success */
146 break;
147 default: /* Something unexpected */
148 BUG();
149 }
150
151 ret = gl->gl_req;
152 if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
153 if (gl->gl_req == LM_ST_SHARED)
154 ret = LM_ST_DEFERRED;
155 else if (gl->gl_req == LM_ST_DEFERRED)
156 ret = LM_ST_SHARED;
157 else
158 BUG();
159 }
160
161 set_bit(GLF_INITIAL, &gl->gl_flags);
162 gfs2_glock_complete(gl, ret);
163 return;
164 out:
165 if (!test_bit(GLF_INITIAL, &gl->gl_flags))
166 gl->gl_lksb.sb_lkid = 0;
167 gfs2_glock_complete(gl, ret);
168 }
169
gdlm_bast(void * arg,int mode)170 static void gdlm_bast(void *arg, int mode)
171 {
172 struct gfs2_glock *gl = arg;
173
174 switch (mode) {
175 case DLM_LOCK_EX:
176 gfs2_glock_cb(gl, LM_ST_UNLOCKED);
177 break;
178 case DLM_LOCK_CW:
179 gfs2_glock_cb(gl, LM_ST_DEFERRED);
180 break;
181 case DLM_LOCK_PR:
182 gfs2_glock_cb(gl, LM_ST_SHARED);
183 break;
184 default:
185 fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode);
186 BUG();
187 }
188 }
189
190 /* convert gfs lock-state to dlm lock-mode */
191
make_mode(struct gfs2_sbd * sdp,const unsigned int lmstate)192 static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate)
193 {
194 switch (lmstate) {
195 case LM_ST_UNLOCKED:
196 return DLM_LOCK_NL;
197 case LM_ST_EXCLUSIVE:
198 return DLM_LOCK_EX;
199 case LM_ST_DEFERRED:
200 return DLM_LOCK_CW;
201 case LM_ST_SHARED:
202 return DLM_LOCK_PR;
203 }
204 fs_err(sdp, "unknown LM state %d\n", lmstate);
205 BUG();
206 return -1;
207 }
208
make_flags(struct gfs2_glock * gl,const unsigned int gfs_flags,const int req)209 static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
210 const int req)
211 {
212 u32 lkf = 0;
213
214 if (gl->gl_lksb.sb_lvbptr)
215 lkf |= DLM_LKF_VALBLK;
216
217 if (gfs_flags & LM_FLAG_TRY)
218 lkf |= DLM_LKF_NOQUEUE;
219
220 if (gfs_flags & LM_FLAG_TRY_1CB) {
221 lkf |= DLM_LKF_NOQUEUE;
222 lkf |= DLM_LKF_NOQUEUEBAST;
223 }
224
225 if (gfs_flags & LM_FLAG_PRIORITY) {
226 lkf |= DLM_LKF_NOORDER;
227 lkf |= DLM_LKF_HEADQUE;
228 }
229
230 if (gfs_flags & LM_FLAG_ANY) {
231 if (req == DLM_LOCK_PR)
232 lkf |= DLM_LKF_ALTCW;
233 else if (req == DLM_LOCK_CW)
234 lkf |= DLM_LKF_ALTPR;
235 else
236 BUG();
237 }
238
239 if (gl->gl_lksb.sb_lkid != 0) {
240 lkf |= DLM_LKF_CONVERT;
241 if (test_bit(GLF_BLOCKING, &gl->gl_flags))
242 lkf |= DLM_LKF_QUECVT;
243 }
244
245 return lkf;
246 }
247
gfs2_reverse_hex(char * c,u64 value)248 static void gfs2_reverse_hex(char *c, u64 value)
249 {
250 *c = '0';
251 while (value) {
252 *c-- = hex_asc[value & 0x0f];
253 value >>= 4;
254 }
255 }
256
gdlm_lock(struct gfs2_glock * gl,unsigned int req_state,unsigned int flags)257 static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
258 unsigned int flags)
259 {
260 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
261 int req;
262 u32 lkf;
263 char strname[GDLM_STRNAME_BYTES] = "";
264
265 req = make_mode(gl->gl_name.ln_sbd, req_state);
266 lkf = make_flags(gl, flags, req);
267 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
268 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
269 if (gl->gl_lksb.sb_lkid) {
270 gfs2_update_request_times(gl);
271 } else {
272 memset(strname, ' ', GDLM_STRNAME_BYTES - 1);
273 strname[GDLM_STRNAME_BYTES - 1] = '\0';
274 gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type);
275 gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number);
276 gl->gl_dstamp = ktime_get_real();
277 }
278 /*
279 * Submit the actual lock request.
280 */
281
282 return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
283 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
284 }
285
gdlm_put_lock(struct gfs2_glock * gl)286 static void gdlm_put_lock(struct gfs2_glock *gl)
287 {
288 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
289 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
290 int error;
291
292 if (gl->gl_lksb.sb_lkid == 0) {
293 gfs2_glock_free(gl);
294 return;
295 }
296
297 clear_bit(GLF_BLOCKING, &gl->gl_flags);
298 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
299 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
300 gfs2_update_request_times(gl);
301
302 /* don't want to skip dlm_unlock writing the lvb when lock has one */
303
304 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
305 !gl->gl_lksb.sb_lvbptr) {
306 gfs2_glock_free(gl);
307 return;
308 }
309
310 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
311 NULL, gl);
312 if (error) {
313 fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
314 gl->gl_name.ln_type,
315 (unsigned long long)gl->gl_name.ln_number, error);
316 return;
317 }
318 }
319
gdlm_cancel(struct gfs2_glock * gl)320 static void gdlm_cancel(struct gfs2_glock *gl)
321 {
322 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
323 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
324 }
325
326 /*
327 * dlm/gfs2 recovery coordination using dlm_recover callbacks
328 *
329 * 0. gfs2 checks for another cluster node withdraw, needing journal replay
330 * 1. dlm_controld sees lockspace members change
331 * 2. dlm_controld blocks dlm-kernel locking activity
332 * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
333 * 4. dlm_controld starts and finishes its own user level recovery
334 * 5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery
335 * 6. dlm_recoverd notifies gfs2 of failed nodes (recover_slot)
336 * 7. dlm_recoverd does its own lock recovery
337 * 8. dlm_recoverd unblocks dlm-kernel locking activity
338 * 9. dlm_recoverd notifies gfs2 when done (recover_done with new generation)
339 * 10. gfs2_control updates control_lock lvb with new generation and jid bits
340 * 11. gfs2_control enqueues journals for gfs2_recover to recover (maybe none)
341 * 12. gfs2_recover dequeues and recovers journals of failed nodes
342 * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result)
343 * 14. gfs2_control updates control_lock lvb jid bits for recovered journals
344 * 15. gfs2_control unblocks normal locking when all journals are recovered
345 *
346 * - failures during recovery
347 *
348 * recover_prep() may set BLOCK_LOCKS (step 3) again before gfs2_control
349 * clears BLOCK_LOCKS (step 15), e.g. another node fails while still
350 * recovering for a prior failure. gfs2_control needs a way to detect
351 * this so it can leave BLOCK_LOCKS set in step 15. This is managed using
352 * the recover_block and recover_start values.
353 *
354 * recover_done() provides a new lockspace generation number each time it
355 * is called (step 9). This generation number is saved as recover_start.
356 * When recover_prep() is called, it sets BLOCK_LOCKS and sets
357 * recover_block = recover_start. So, while recover_block is equal to
358 * recover_start, BLOCK_LOCKS should remain set. (recover_spin must
359 * be held around the BLOCK_LOCKS/recover_block/recover_start logic.)
360 *
361 * - more specific gfs2 steps in sequence above
362 *
363 * 3. recover_prep sets BLOCK_LOCKS and sets recover_block = recover_start
364 * 6. recover_slot records any failed jids (maybe none)
365 * 9. recover_done sets recover_start = new generation number
366 * 10. gfs2_control sets control_lock lvb = new gen + bits for failed jids
367 * 12. gfs2_recover does journal recoveries for failed jids identified above
368 * 14. gfs2_control clears control_lock lvb bits for recovered jids
369 * 15. gfs2_control checks if recover_block == recover_start (step 3 occured
370 * again) then do nothing, otherwise if recover_start > recover_block
371 * then clear BLOCK_LOCKS.
372 *
373 * - parallel recovery steps across all nodes
374 *
375 * All nodes attempt to update the control_lock lvb with the new generation
376 * number and jid bits, but only the first to get the control_lock EX will
377 * do so; others will see that it's already done (lvb already contains new
378 * generation number.)
379 *
380 * . All nodes get the same recover_prep/recover_slot/recover_done callbacks
381 * . All nodes attempt to set control_lock lvb gen + bits for the new gen
382 * . One node gets control_lock first and writes the lvb, others see it's done
383 * . All nodes attempt to recover jids for which they see control_lock bits set
384 * . One node succeeds for a jid, and that one clears the jid bit in the lvb
385 * . All nodes will eventually see all lvb bits clear and unblock locks
386 *
387 * - is there a problem with clearing an lvb bit that should be set
388 * and missing a journal recovery?
389 *
390 * 1. jid fails
391 * 2. lvb bit set for step 1
392 * 3. jid recovered for step 1
393 * 4. jid taken again (new mount)
394 * 5. jid fails (for step 4)
395 * 6. lvb bit set for step 5 (will already be set)
396 * 7. lvb bit cleared for step 3
397 *
398 * This is not a problem because the failure in step 5 does not
399 * require recovery, because the mount in step 4 could not have
400 * progressed far enough to unblock locks and access the fs. The
401 * control_mount() function waits for all recoveries to be complete
402 * for the latest lockspace generation before ever unblocking locks
403 * and returning. The mount in step 4 waits until the recovery in
404 * step 1 is done.
405 *
406 * - special case of first mounter: first node to mount the fs
407 *
408 * The first node to mount a gfs2 fs needs to check all the journals
409 * and recover any that need recovery before other nodes are allowed
410 * to mount the fs. (Others may begin mounting, but they must wait
411 * for the first mounter to be done before taking locks on the fs
412 * or accessing the fs.) This has two parts:
413 *
414 * 1. The mounted_lock tells a node it's the first to mount the fs.
415 * Each node holds the mounted_lock in PR while it's mounted.
416 * Each node tries to acquire the mounted_lock in EX when it mounts.
417 * If a node is granted the mounted_lock EX it means there are no
418 * other mounted nodes (no PR locks exist), and it is the first mounter.
419 * The mounted_lock is demoted to PR when first recovery is done, so
420 * others will fail to get an EX lock, but will get a PR lock.
421 *
422 * 2. The control_lock blocks others in control_mount() while the first
423 * mounter is doing first mount recovery of all journals.
424 * A mounting node needs to acquire control_lock in EX mode before
425 * it can proceed. The first mounter holds control_lock in EX while doing
426 * the first mount recovery, blocking mounts from other nodes, then demotes
427 * control_lock to NL when it's done (others_may_mount/first_done),
428 * allowing other nodes to continue mounting.
429 *
430 * first mounter:
431 * control_lock EX/NOQUEUE success
432 * mounted_lock EX/NOQUEUE success (no other PR, so no other mounters)
433 * set first=1
434 * do first mounter recovery
435 * mounted_lock EX->PR
436 * control_lock EX->NL, write lvb generation
437 *
438 * other mounter:
439 * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry)
440 * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR)
441 * mounted_lock PR/NOQUEUE success
442 * read lvb generation
443 * control_lock EX->NL
444 * set first=0
445 *
446 * - mount during recovery
447 *
448 * If a node mounts while others are doing recovery (not first mounter),
449 * the mounting node will get its initial recover_done() callback without
450 * having seen any previous failures/callbacks.
451 *
452 * It must wait for all recoveries preceding its mount to be finished
453 * before it unblocks locks. It does this by repeating the "other mounter"
454 * steps above until the lvb generation number is >= its mount generation
455 * number (from initial recover_done) and all lvb bits are clear.
456 *
457 * - control_lock lvb format
458 *
459 * 4 bytes generation number: the latest dlm lockspace generation number
460 * from recover_done callback. Indicates the jid bitmap has been updated
461 * to reflect all slot failures through that generation.
462 * 4 bytes unused.
463 * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates
464 * that jid N needs recovery.
465 */
466
467 #define JID_BITMAP_OFFSET 8 /* 4 byte generation number + 4 byte unused */
468
control_lvb_read(struct lm_lockstruct * ls,uint32_t * lvb_gen,char * lvb_bits)469 static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen,
470 char *lvb_bits)
471 {
472 __le32 gen;
473 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE);
474 memcpy(&gen, lvb_bits, sizeof(__le32));
475 *lvb_gen = le32_to_cpu(gen);
476 }
477
control_lvb_write(struct lm_lockstruct * ls,uint32_t lvb_gen,char * lvb_bits)478 static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
479 char *lvb_bits)
480 {
481 __le32 gen;
482 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE);
483 gen = cpu_to_le32(lvb_gen);
484 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32));
485 }
486
all_jid_bits_clear(char * lvb)487 static int all_jid_bits_clear(char *lvb)
488 {
489 return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0,
490 GDLM_LVB_SIZE - JID_BITMAP_OFFSET);
491 }
492
sync_wait_cb(void * arg)493 static void sync_wait_cb(void *arg)
494 {
495 struct lm_lockstruct *ls = arg;
496 complete(&ls->ls_sync_wait);
497 }
498
sync_unlock(struct gfs2_sbd * sdp,struct dlm_lksb * lksb,char * name)499 static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
500 {
501 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
502 int error;
503
504 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
505 if (error) {
506 fs_err(sdp, "%s lkid %x error %d\n",
507 name, lksb->sb_lkid, error);
508 return error;
509 }
510
511 wait_for_completion(&ls->ls_sync_wait);
512
513 if (lksb->sb_status != -DLM_EUNLOCK) {
514 fs_err(sdp, "%s lkid %x status %d\n",
515 name, lksb->sb_lkid, lksb->sb_status);
516 return -1;
517 }
518 return 0;
519 }
520
sync_lock(struct gfs2_sbd * sdp,int mode,uint32_t flags,unsigned int num,struct dlm_lksb * lksb,char * name)521 static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
522 unsigned int num, struct dlm_lksb *lksb, char *name)
523 {
524 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
525 char strname[GDLM_STRNAME_BYTES];
526 int error, status;
527
528 memset(strname, 0, GDLM_STRNAME_BYTES);
529 snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
530
531 error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
532 strname, GDLM_STRNAME_BYTES - 1,
533 0, sync_wait_cb, ls, NULL);
534 if (error) {
535 fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
536 name, lksb->sb_lkid, flags, mode, error);
537 return error;
538 }
539
540 wait_for_completion(&ls->ls_sync_wait);
541
542 status = lksb->sb_status;
543
544 if (status && status != -EAGAIN) {
545 fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n",
546 name, lksb->sb_lkid, flags, mode, status);
547 }
548
549 return status;
550 }
551
mounted_unlock(struct gfs2_sbd * sdp)552 static int mounted_unlock(struct gfs2_sbd *sdp)
553 {
554 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
555 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock");
556 }
557
mounted_lock(struct gfs2_sbd * sdp,int mode,uint32_t flags)558 static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
559 {
560 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
561 return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK,
562 &ls->ls_mounted_lksb, "mounted_lock");
563 }
564
control_unlock(struct gfs2_sbd * sdp)565 static int control_unlock(struct gfs2_sbd *sdp)
566 {
567 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
568 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock");
569 }
570
control_lock(struct gfs2_sbd * sdp,int mode,uint32_t flags)571 static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
572 {
573 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
574 return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK,
575 &ls->ls_control_lksb, "control_lock");
576 }
577
578 /**
579 * remote_withdraw - react to a node withdrawing from the file system
580 * @sdp: The superblock
581 */
remote_withdraw(struct gfs2_sbd * sdp)582 static void remote_withdraw(struct gfs2_sbd *sdp)
583 {
584 struct gfs2_jdesc *jd;
585 int ret = 0, count = 0;
586
587 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
588 if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
589 continue;
590 ret = gfs2_recover_journal(jd, true);
591 if (ret)
592 break;
593 count++;
594 }
595
596 /* Now drop the additional reference we acquired */
597 fs_err(sdp, "Journals checked: %d, ret = %d.\n", count, ret);
598 }
599
gfs2_control_func(struct work_struct * work)600 static void gfs2_control_func(struct work_struct *work)
601 {
602 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
603 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
604 uint32_t block_gen, start_gen, lvb_gen, flags;
605 int recover_set = 0;
606 int write_lvb = 0;
607 int recover_size;
608 int i, error;
609
610 /* First check for other nodes that may have done a withdraw. */
611 if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) {
612 remote_withdraw(sdp);
613 clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
614 return;
615 }
616
617 spin_lock(&ls->ls_recover_spin);
618 /*
619 * No MOUNT_DONE means we're still mounting; control_mount()
620 * will set this flag, after which this thread will take over
621 * all further clearing of BLOCK_LOCKS.
622 *
623 * FIRST_MOUNT means this node is doing first mounter recovery,
624 * for which recovery control is handled by
625 * control_mount()/control_first_done(), not this thread.
626 */
627 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
628 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
629 spin_unlock(&ls->ls_recover_spin);
630 return;
631 }
632 block_gen = ls->ls_recover_block;
633 start_gen = ls->ls_recover_start;
634 spin_unlock(&ls->ls_recover_spin);
635
636 /*
637 * Equal block_gen and start_gen implies we are between
638 * recover_prep and recover_done callbacks, which means
639 * dlm recovery is in progress and dlm locking is blocked.
640 * There's no point trying to do any work until recover_done.
641 */
642
643 if (block_gen == start_gen)
644 return;
645
646 /*
647 * Propagate recover_submit[] and recover_result[] to lvb:
648 * dlm_recoverd adds to recover_submit[] jids needing recovery
649 * gfs2_recover adds to recover_result[] journal recovery results
650 *
651 * set lvb bit for jids in recover_submit[] if the lvb has not
652 * yet been updated for the generation of the failure
653 *
654 * clear lvb bit for jids in recover_result[] if the result of
655 * the journal recovery is SUCCESS
656 */
657
658 error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
659 if (error) {
660 fs_err(sdp, "control lock EX error %d\n", error);
661 return;
662 }
663
664 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
665
666 spin_lock(&ls->ls_recover_spin);
667 if (block_gen != ls->ls_recover_block ||
668 start_gen != ls->ls_recover_start) {
669 fs_info(sdp, "recover generation %u block1 %u %u\n",
670 start_gen, block_gen, ls->ls_recover_block);
671 spin_unlock(&ls->ls_recover_spin);
672 control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
673 return;
674 }
675
676 recover_size = ls->ls_recover_size;
677
678 if (lvb_gen <= start_gen) {
679 /*
680 * Clear lvb bits for jids we've successfully recovered.
681 * Because all nodes attempt to recover failed journals,
682 * a journal can be recovered multiple times successfully
683 * in succession. Only the first will really do recovery,
684 * the others find it clean, but still report a successful
685 * recovery. So, another node may have already recovered
686 * the jid and cleared the lvb bit for it.
687 */
688 for (i = 0; i < recover_size; i++) {
689 if (ls->ls_recover_result[i] != LM_RD_SUCCESS)
690 continue;
691
692 ls->ls_recover_result[i] = 0;
693
694 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET))
695 continue;
696
697 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
698 write_lvb = 1;
699 }
700 }
701
702 if (lvb_gen == start_gen) {
703 /*
704 * Failed slots before start_gen are already set in lvb.
705 */
706 for (i = 0; i < recover_size; i++) {
707 if (!ls->ls_recover_submit[i])
708 continue;
709 if (ls->ls_recover_submit[i] < lvb_gen)
710 ls->ls_recover_submit[i] = 0;
711 }
712 } else if (lvb_gen < start_gen) {
713 /*
714 * Failed slots before start_gen are not yet set in lvb.
715 */
716 for (i = 0; i < recover_size; i++) {
717 if (!ls->ls_recover_submit[i])
718 continue;
719 if (ls->ls_recover_submit[i] < start_gen) {
720 ls->ls_recover_submit[i] = 0;
721 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
722 }
723 }
724 /* even if there are no bits to set, we need to write the
725 latest generation to the lvb */
726 write_lvb = 1;
727 } else {
728 /*
729 * we should be getting a recover_done() for lvb_gen soon
730 */
731 }
732 spin_unlock(&ls->ls_recover_spin);
733
734 if (write_lvb) {
735 control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
736 flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
737 } else {
738 flags = DLM_LKF_CONVERT;
739 }
740
741 error = control_lock(sdp, DLM_LOCK_NL, flags);
742 if (error) {
743 fs_err(sdp, "control lock NL error %d\n", error);
744 return;
745 }
746
747 /*
748 * Everyone will see jid bits set in the lvb, run gfs2_recover_set(),
749 * and clear a jid bit in the lvb if the recovery is a success.
750 * Eventually all journals will be recovered, all jid bits will
751 * be cleared in the lvb, and everyone will clear BLOCK_LOCKS.
752 */
753
754 for (i = 0; i < recover_size; i++) {
755 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) {
756 fs_info(sdp, "recover generation %u jid %d\n",
757 start_gen, i);
758 gfs2_recover_set(sdp, i);
759 recover_set++;
760 }
761 }
762 if (recover_set)
763 return;
764
765 /*
766 * No more jid bits set in lvb, all recovery is done, unblock locks
767 * (unless a new recover_prep callback has occured blocking locks
768 * again while working above)
769 */
770
771 spin_lock(&ls->ls_recover_spin);
772 if (ls->ls_recover_block == block_gen &&
773 ls->ls_recover_start == start_gen) {
774 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
775 spin_unlock(&ls->ls_recover_spin);
776 fs_info(sdp, "recover generation %u done\n", start_gen);
777 gfs2_glock_thaw(sdp);
778 } else {
779 fs_info(sdp, "recover generation %u block2 %u %u\n",
780 start_gen, block_gen, ls->ls_recover_block);
781 spin_unlock(&ls->ls_recover_spin);
782 }
783 }
784
control_mount(struct gfs2_sbd * sdp)785 static int control_mount(struct gfs2_sbd *sdp)
786 {
787 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
788 uint32_t start_gen, block_gen, mount_gen, lvb_gen;
789 int mounted_mode;
790 int retries = 0;
791 int error;
792
793 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb));
794 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb));
795 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE);
796 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb;
797 init_completion(&ls->ls_sync_wait);
798
799 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
800
801 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK);
802 if (error) {
803 fs_err(sdp, "control_mount control_lock NL error %d\n", error);
804 return error;
805 }
806
807 error = mounted_lock(sdp, DLM_LOCK_NL, 0);
808 if (error) {
809 fs_err(sdp, "control_mount mounted_lock NL error %d\n", error);
810 control_unlock(sdp);
811 return error;
812 }
813 mounted_mode = DLM_LOCK_NL;
814
815 restart:
816 if (retries++ && signal_pending(current)) {
817 error = -EINTR;
818 goto fail;
819 }
820
821 /*
822 * We always start with both locks in NL. control_lock is
823 * demoted to NL below so we don't need to do it here.
824 */
825
826 if (mounted_mode != DLM_LOCK_NL) {
827 error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
828 if (error)
829 goto fail;
830 mounted_mode = DLM_LOCK_NL;
831 }
832
833 /*
834 * Other nodes need to do some work in dlm recovery and gfs2_control
835 * before the recover_done and control_lock will be ready for us below.
836 * A delay here is not required but often avoids having to retry.
837 */
838
839 msleep_interruptible(500);
840
841 /*
842 * Acquire control_lock in EX and mounted_lock in either EX or PR.
843 * control_lock lvb keeps track of any pending journal recoveries.
844 * mounted_lock indicates if any other nodes have the fs mounted.
845 */
846
847 error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK);
848 if (error == -EAGAIN) {
849 goto restart;
850 } else if (error) {
851 fs_err(sdp, "control_mount control_lock EX error %d\n", error);
852 goto fail;
853 }
854
855 /**
856 * If we're a spectator, we don't want to take the lock in EX because
857 * we cannot do the first-mount responsibility it implies: recovery.
858 */
859 if (sdp->sd_args.ar_spectator)
860 goto locks_done;
861
862 error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
863 if (!error) {
864 mounted_mode = DLM_LOCK_EX;
865 goto locks_done;
866 } else if (error != -EAGAIN) {
867 fs_err(sdp, "control_mount mounted_lock EX error %d\n", error);
868 goto fail;
869 }
870
871 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
872 if (!error) {
873 mounted_mode = DLM_LOCK_PR;
874 goto locks_done;
875 } else {
876 /* not even -EAGAIN should happen here */
877 fs_err(sdp, "control_mount mounted_lock PR error %d\n", error);
878 goto fail;
879 }
880
881 locks_done:
882 /*
883 * If we got both locks above in EX, then we're the first mounter.
884 * If not, then we need to wait for the control_lock lvb to be
885 * updated by other mounted nodes to reflect our mount generation.
886 *
887 * In simple first mounter cases, first mounter will see zero lvb_gen,
888 * but in cases where all existing nodes leave/fail before mounting
889 * nodes finish control_mount, then all nodes will be mounting and
890 * lvb_gen will be non-zero.
891 */
892
893 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
894
895 if (lvb_gen == 0xFFFFFFFF) {
896 /* special value to force mount attempts to fail */
897 fs_err(sdp, "control_mount control_lock disabled\n");
898 error = -EINVAL;
899 goto fail;
900 }
901
902 if (mounted_mode == DLM_LOCK_EX) {
903 /* first mounter, keep both EX while doing first recovery */
904 spin_lock(&ls->ls_recover_spin);
905 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
906 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
907 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
908 spin_unlock(&ls->ls_recover_spin);
909 fs_info(sdp, "first mounter control generation %u\n", lvb_gen);
910 return 0;
911 }
912
913 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
914 if (error)
915 goto fail;
916
917 /*
918 * We are not first mounter, now we need to wait for the control_lock
919 * lvb generation to be >= the generation from our first recover_done
920 * and all lvb bits to be clear (no pending journal recoveries.)
921 */
922
923 if (!all_jid_bits_clear(ls->ls_lvb_bits)) {
924 /* journals need recovery, wait until all are clear */
925 fs_info(sdp, "control_mount wait for journal recovery\n");
926 goto restart;
927 }
928
929 spin_lock(&ls->ls_recover_spin);
930 block_gen = ls->ls_recover_block;
931 start_gen = ls->ls_recover_start;
932 mount_gen = ls->ls_recover_mount;
933
934 if (lvb_gen < mount_gen) {
935 /* wait for mounted nodes to update control_lock lvb to our
936 generation, which might include new recovery bits set */
937 if (sdp->sd_args.ar_spectator) {
938 fs_info(sdp, "Recovery is required. Waiting for a "
939 "non-spectator to mount.\n");
940 msleep_interruptible(1000);
941 } else {
942 fs_info(sdp, "control_mount wait1 block %u start %u "
943 "mount %u lvb %u flags %lx\n", block_gen,
944 start_gen, mount_gen, lvb_gen,
945 ls->ls_recover_flags);
946 }
947 spin_unlock(&ls->ls_recover_spin);
948 goto restart;
949 }
950
951 if (lvb_gen != start_gen) {
952 /* wait for mounted nodes to update control_lock lvb to the
953 latest recovery generation */
954 fs_info(sdp, "control_mount wait2 block %u start %u mount %u "
955 "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
956 lvb_gen, ls->ls_recover_flags);
957 spin_unlock(&ls->ls_recover_spin);
958 goto restart;
959 }
960
961 if (block_gen == start_gen) {
962 /* dlm recovery in progress, wait for it to finish */
963 fs_info(sdp, "control_mount wait3 block %u start %u mount %u "
964 "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
965 lvb_gen, ls->ls_recover_flags);
966 spin_unlock(&ls->ls_recover_spin);
967 goto restart;
968 }
969
970 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
971 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
972 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
973 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
974 spin_unlock(&ls->ls_recover_spin);
975 return 0;
976
977 fail:
978 mounted_unlock(sdp);
979 control_unlock(sdp);
980 return error;
981 }
982
control_first_done(struct gfs2_sbd * sdp)983 static int control_first_done(struct gfs2_sbd *sdp)
984 {
985 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
986 uint32_t start_gen, block_gen;
987 int error;
988
989 restart:
990 spin_lock(&ls->ls_recover_spin);
991 start_gen = ls->ls_recover_start;
992 block_gen = ls->ls_recover_block;
993
994 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) ||
995 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
996 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
997 /* sanity check, should not happen */
998 fs_err(sdp, "control_first_done start %u block %u flags %lx\n",
999 start_gen, block_gen, ls->ls_recover_flags);
1000 spin_unlock(&ls->ls_recover_spin);
1001 control_unlock(sdp);
1002 return -1;
1003 }
1004
1005 if (start_gen == block_gen) {
1006 /*
1007 * Wait for the end of a dlm recovery cycle to switch from
1008 * first mounter recovery. We can ignore any recover_slot
1009 * callbacks between the recover_prep and next recover_done
1010 * because we are still the first mounter and any failed nodes
1011 * have not fully mounted, so they don't need recovery.
1012 */
1013 spin_unlock(&ls->ls_recover_spin);
1014 fs_info(sdp, "control_first_done wait gen %u\n", start_gen);
1015
1016 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY,
1017 TASK_UNINTERRUPTIBLE);
1018 goto restart;
1019 }
1020
1021 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
1022 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags);
1023 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
1024 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
1025 spin_unlock(&ls->ls_recover_spin);
1026
1027 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE);
1028 control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
1029
1030 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
1031 if (error)
1032 fs_err(sdp, "control_first_done mounted PR error %d\n", error);
1033
1034 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
1035 if (error)
1036 fs_err(sdp, "control_first_done control NL error %d\n", error);
1037
1038 return error;
1039 }
1040
1041 /*
1042 * Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC)
1043 * to accomodate the largest slot number. (NB dlm slot numbers start at 1,
1044 * gfs2 jids start at 0, so jid = slot - 1)
1045 */
1046
1047 #define RECOVER_SIZE_INC 16
1048
set_recover_size(struct gfs2_sbd * sdp,struct dlm_slot * slots,int num_slots)1049 static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1050 int num_slots)
1051 {
1052 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1053 uint32_t *submit = NULL;
1054 uint32_t *result = NULL;
1055 uint32_t old_size, new_size;
1056 int i, max_jid;
1057
1058 if (!ls->ls_lvb_bits) {
1059 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
1060 if (!ls->ls_lvb_bits)
1061 return -ENOMEM;
1062 }
1063
1064 max_jid = 0;
1065 for (i = 0; i < num_slots; i++) {
1066 if (max_jid < slots[i].slot - 1)
1067 max_jid = slots[i].slot - 1;
1068 }
1069
1070 old_size = ls->ls_recover_size;
1071 new_size = old_size;
1072 while (new_size < max_jid + 1)
1073 new_size += RECOVER_SIZE_INC;
1074 if (new_size == old_size)
1075 return 0;
1076
1077 submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1078 result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1079 if (!submit || !result) {
1080 kfree(submit);
1081 kfree(result);
1082 return -ENOMEM;
1083 }
1084
1085 spin_lock(&ls->ls_recover_spin);
1086 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t));
1087 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t));
1088 kfree(ls->ls_recover_submit);
1089 kfree(ls->ls_recover_result);
1090 ls->ls_recover_submit = submit;
1091 ls->ls_recover_result = result;
1092 ls->ls_recover_size = new_size;
1093 spin_unlock(&ls->ls_recover_spin);
1094 return 0;
1095 }
1096
free_recover_size(struct lm_lockstruct * ls)1097 static void free_recover_size(struct lm_lockstruct *ls)
1098 {
1099 kfree(ls->ls_lvb_bits);
1100 kfree(ls->ls_recover_submit);
1101 kfree(ls->ls_recover_result);
1102 ls->ls_recover_submit = NULL;
1103 ls->ls_recover_result = NULL;
1104 ls->ls_recover_size = 0;
1105 ls->ls_lvb_bits = NULL;
1106 }
1107
1108 /* dlm calls before it does lock recovery */
1109
gdlm_recover_prep(void * arg)1110 static void gdlm_recover_prep(void *arg)
1111 {
1112 struct gfs2_sbd *sdp = arg;
1113 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1114
1115 if (gfs2_withdrawn(sdp)) {
1116 fs_err(sdp, "recover_prep ignored due to withdraw.\n");
1117 return;
1118 }
1119 spin_lock(&ls->ls_recover_spin);
1120 ls->ls_recover_block = ls->ls_recover_start;
1121 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1122
1123 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
1124 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1125 spin_unlock(&ls->ls_recover_spin);
1126 return;
1127 }
1128 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
1129 spin_unlock(&ls->ls_recover_spin);
1130 }
1131
1132 /* dlm calls after recover_prep has been completed on all lockspace members;
1133 identifies slot/jid of failed member */
1134
gdlm_recover_slot(void * arg,struct dlm_slot * slot)1135 static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
1136 {
1137 struct gfs2_sbd *sdp = arg;
1138 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1139 int jid = slot->slot - 1;
1140
1141 if (gfs2_withdrawn(sdp)) {
1142 fs_err(sdp, "recover_slot jid %d ignored due to withdraw.\n",
1143 jid);
1144 return;
1145 }
1146 spin_lock(&ls->ls_recover_spin);
1147 if (ls->ls_recover_size < jid + 1) {
1148 fs_err(sdp, "recover_slot jid %d gen %u short size %d\n",
1149 jid, ls->ls_recover_block, ls->ls_recover_size);
1150 spin_unlock(&ls->ls_recover_spin);
1151 return;
1152 }
1153
1154 if (ls->ls_recover_submit[jid]) {
1155 fs_info(sdp, "recover_slot jid %d gen %u prev %u\n",
1156 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]);
1157 }
1158 ls->ls_recover_submit[jid] = ls->ls_recover_block;
1159 spin_unlock(&ls->ls_recover_spin);
1160 }
1161
1162 /* dlm calls after recover_slot and after it completes lock recovery */
1163
gdlm_recover_done(void * arg,struct dlm_slot * slots,int num_slots,int our_slot,uint32_t generation)1164 static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
1165 int our_slot, uint32_t generation)
1166 {
1167 struct gfs2_sbd *sdp = arg;
1168 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1169
1170 if (gfs2_withdrawn(sdp)) {
1171 fs_err(sdp, "recover_done ignored due to withdraw.\n");
1172 return;
1173 }
1174 /* ensure the ls jid arrays are large enough */
1175 set_recover_size(sdp, slots, num_slots);
1176
1177 spin_lock(&ls->ls_recover_spin);
1178 ls->ls_recover_start = generation;
1179
1180 if (!ls->ls_recover_mount) {
1181 ls->ls_recover_mount = generation;
1182 ls->ls_jid = our_slot - 1;
1183 }
1184
1185 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1186 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
1187
1188 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1189 smp_mb__after_atomic();
1190 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY);
1191 spin_unlock(&ls->ls_recover_spin);
1192 }
1193
1194 /* gfs2_recover thread has a journal recovery result */
1195
gdlm_recovery_result(struct gfs2_sbd * sdp,unsigned int jid,unsigned int result)1196 static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
1197 unsigned int result)
1198 {
1199 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1200
1201 if (gfs2_withdrawn(sdp)) {
1202 fs_err(sdp, "recovery_result jid %d ignored due to withdraw.\n",
1203 jid);
1204 return;
1205 }
1206 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1207 return;
1208
1209 /* don't care about the recovery of own journal during mount */
1210 if (jid == ls->ls_jid)
1211 return;
1212
1213 spin_lock(&ls->ls_recover_spin);
1214 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1215 spin_unlock(&ls->ls_recover_spin);
1216 return;
1217 }
1218 if (ls->ls_recover_size < jid + 1) {
1219 fs_err(sdp, "recovery_result jid %d short size %d\n",
1220 jid, ls->ls_recover_size);
1221 spin_unlock(&ls->ls_recover_spin);
1222 return;
1223 }
1224
1225 fs_info(sdp, "recover jid %d result %s\n", jid,
1226 result == LM_RD_GAVEUP ? "busy" : "success");
1227
1228 ls->ls_recover_result[jid] = result;
1229
1230 /* GAVEUP means another node is recovering the journal; delay our
1231 next attempt to recover it, to give the other node a chance to
1232 finish before trying again */
1233
1234 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1235 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work,
1236 result == LM_RD_GAVEUP ? HZ : 0);
1237 spin_unlock(&ls->ls_recover_spin);
1238 }
1239
1240 static const struct dlm_lockspace_ops gdlm_lockspace_ops = {
1241 .recover_prep = gdlm_recover_prep,
1242 .recover_slot = gdlm_recover_slot,
1243 .recover_done = gdlm_recover_done,
1244 };
1245
gdlm_mount(struct gfs2_sbd * sdp,const char * table)1246 static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
1247 {
1248 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1249 char cluster[GFS2_LOCKNAME_LEN];
1250 const char *fsname;
1251 uint32_t flags;
1252 int error, ops_result;
1253
1254 /*
1255 * initialize everything
1256 */
1257
1258 INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
1259 spin_lock_init(&ls->ls_recover_spin);
1260 ls->ls_recover_flags = 0;
1261 ls->ls_recover_mount = 0;
1262 ls->ls_recover_start = 0;
1263 ls->ls_recover_block = 0;
1264 ls->ls_recover_size = 0;
1265 ls->ls_recover_submit = NULL;
1266 ls->ls_recover_result = NULL;
1267 ls->ls_lvb_bits = NULL;
1268
1269 error = set_recover_size(sdp, NULL, 0);
1270 if (error)
1271 goto fail;
1272
1273 /*
1274 * prepare dlm_new_lockspace args
1275 */
1276
1277 fsname = strchr(table, ':');
1278 if (!fsname) {
1279 fs_info(sdp, "no fsname found\n");
1280 error = -EINVAL;
1281 goto fail_free;
1282 }
1283 memset(cluster, 0, sizeof(cluster));
1284 memcpy(cluster, table, strlen(table) - strlen(fsname));
1285 fsname++;
1286
1287 flags = DLM_LSFL_FS | DLM_LSFL_NEWEXCL;
1288
1289 /*
1290 * create/join lockspace
1291 */
1292
1293 error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
1294 &gdlm_lockspace_ops, sdp, &ops_result,
1295 &ls->ls_dlm);
1296 if (error) {
1297 fs_err(sdp, "dlm_new_lockspace error %d\n", error);
1298 goto fail_free;
1299 }
1300
1301 if (ops_result < 0) {
1302 /*
1303 * dlm does not support ops callbacks,
1304 * old dlm_controld/gfs_controld are used, try without ops.
1305 */
1306 fs_info(sdp, "dlm lockspace ops not used\n");
1307 free_recover_size(ls);
1308 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags);
1309 return 0;
1310 }
1311
1312 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) {
1313 fs_err(sdp, "dlm lockspace ops disallow jid preset\n");
1314 error = -EINVAL;
1315 goto fail_release;
1316 }
1317
1318 /*
1319 * control_mount() uses control_lock to determine first mounter,
1320 * and for later mounts, waits for any recoveries to be cleared.
1321 */
1322
1323 error = control_mount(sdp);
1324 if (error) {
1325 fs_err(sdp, "mount control error %d\n", error);
1326 goto fail_release;
1327 }
1328
1329 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
1330 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
1331 smp_mb__after_atomic();
1332 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
1333 return 0;
1334
1335 fail_release:
1336 dlm_release_lockspace(ls->ls_dlm, 2);
1337 fail_free:
1338 free_recover_size(ls);
1339 fail:
1340 return error;
1341 }
1342
gdlm_first_done(struct gfs2_sbd * sdp)1343 static void gdlm_first_done(struct gfs2_sbd *sdp)
1344 {
1345 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1346 int error;
1347
1348 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1349 return;
1350
1351 error = control_first_done(sdp);
1352 if (error)
1353 fs_err(sdp, "mount first_done error %d\n", error);
1354 }
1355
gdlm_unmount(struct gfs2_sbd * sdp)1356 static void gdlm_unmount(struct gfs2_sbd *sdp)
1357 {
1358 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1359
1360 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1361 goto release;
1362
1363 /* wait for gfs2_control_wq to be done with this mount */
1364
1365 spin_lock(&ls->ls_recover_spin);
1366 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags);
1367 spin_unlock(&ls->ls_recover_spin);
1368 flush_delayed_work(&sdp->sd_control_work);
1369
1370 /* mounted_lock and control_lock will be purged in dlm recovery */
1371 release:
1372 if (ls->ls_dlm) {
1373 dlm_release_lockspace(ls->ls_dlm, 2);
1374 ls->ls_dlm = NULL;
1375 }
1376
1377 free_recover_size(ls);
1378 }
1379
1380 static const match_table_t dlm_tokens = {
1381 { Opt_jid, "jid=%d"},
1382 { Opt_id, "id=%d"},
1383 { Opt_first, "first=%d"},
1384 { Opt_nodir, "nodir=%d"},
1385 { Opt_err, NULL },
1386 };
1387
1388 const struct lm_lockops gfs2_dlm_ops = {
1389 .lm_proto_name = "lock_dlm",
1390 .lm_mount = gdlm_mount,
1391 .lm_first_done = gdlm_first_done,
1392 .lm_recovery_result = gdlm_recovery_result,
1393 .lm_unmount = gdlm_unmount,
1394 .lm_put_lock = gdlm_put_lock,
1395 .lm_lock = gdlm_lock,
1396 .lm_cancel = gdlm_cancel,
1397 .lm_tokens = &dlm_tokens,
1398 };
1399
1400