1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14 
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29 
30 struct workqueue_struct *gfs2_freeze_wq;
31 
32 extern struct workqueue_struct *gfs2_control_wq;
33 
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36 	fs_err(gl->gl_name.ln_sbd,
37 	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
38 	       "state 0x%lx\n",
39 	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
40 	       bh->b_page->mapping, bh->b_page->flags);
41 	fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42 	       gl->gl_name.ln_type, gl->gl_name.ln_number,
43 	       gfs2_glock2aspace(gl));
44 	gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
45 	gfs2_withdraw(gl->gl_name.ln_sbd);
46 }
47 
48 /**
49  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
50  * @gl: the glock
51  * @fsync: set when called from fsync (not all buffers will be clean)
52  * @nr_revokes: Number of buffers to revoke
53  *
54  * None of the buffers should be dirty, locked, or pinned.
55  */
56 
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)57 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
58 			     unsigned int nr_revokes)
59 {
60 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
61 	struct list_head *head = &gl->gl_ail_list;
62 	struct gfs2_bufdata *bd, *tmp;
63 	struct buffer_head *bh;
64 	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
65 
66 	gfs2_log_lock(sdp);
67 	spin_lock(&sdp->sd_ail_lock);
68 	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
69 		if (nr_revokes == 0)
70 			break;
71 		bh = bd->bd_bh;
72 		if (bh->b_state & b_state) {
73 			if (fsync)
74 				continue;
75 			gfs2_ail_error(gl, bh);
76 		}
77 		gfs2_trans_add_revoke(sdp, bd);
78 		nr_revokes--;
79 	}
80 	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
81 	spin_unlock(&sdp->sd_ail_lock);
82 	gfs2_log_unlock(sdp);
83 }
84 
85 
gfs2_ail_empty_gl(struct gfs2_glock * gl)86 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
87 {
88 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
89 	struct gfs2_trans tr;
90 	unsigned int revokes;
91 	int ret;
92 
93 	revokes = atomic_read(&gl->gl_ail_count);
94 
95 	if (!revokes) {
96 		bool have_revokes;
97 		bool log_in_flight;
98 
99 		/*
100 		 * We have nothing on the ail, but there could be revokes on
101 		 * the sdp revoke queue, in which case, we still want to flush
102 		 * the log and wait for it to finish.
103 		 *
104 		 * If the sdp revoke list is empty too, we might still have an
105 		 * io outstanding for writing revokes, so we should wait for
106 		 * it before returning.
107 		 *
108 		 * If none of these conditions are true, our revokes are all
109 		 * flushed and we can return.
110 		 */
111 		gfs2_log_lock(sdp);
112 		have_revokes = !list_empty(&sdp->sd_log_revokes);
113 		log_in_flight = atomic_read(&sdp->sd_log_in_flight);
114 		gfs2_log_unlock(sdp);
115 		if (have_revokes)
116 			goto flush;
117 		if (log_in_flight)
118 			log_flush_wait(sdp);
119 		return 0;
120 	}
121 
122 	memset(&tr, 0, sizeof(tr));
123 	set_bit(TR_ONSTACK, &tr.tr_flags);
124 	ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
125 	if (ret)
126 		goto flush;
127 	__gfs2_ail_flush(gl, 0, revokes);
128 	gfs2_trans_end(sdp);
129 
130 flush:
131 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
132 		       GFS2_LFC_AIL_EMPTY_GL);
133 	return 0;
134 }
135 
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)136 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
137 {
138 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
139 	unsigned int revokes = atomic_read(&gl->gl_ail_count);
140 	int ret;
141 
142 	if (!revokes)
143 		return;
144 
145 	ret = gfs2_trans_begin(sdp, 0, revokes);
146 	if (ret)
147 		return;
148 	__gfs2_ail_flush(gl, fsync, revokes);
149 	gfs2_trans_end(sdp);
150 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
151 		       GFS2_LFC_AIL_FLUSH);
152 }
153 
154 /**
155  * gfs2_rgrp_metasync - sync out the metadata of a resource group
156  * @gl: the glock protecting the resource group
157  *
158  */
159 
gfs2_rgrp_metasync(struct gfs2_glock * gl)160 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
161 {
162 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
163 	struct address_space *metamapping = &sdp->sd_aspace;
164 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
165 	const unsigned bsize = sdp->sd_sb.sb_bsize;
166 	loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
167 	loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
168 	int error;
169 
170 	filemap_fdatawrite_range(metamapping, start, end);
171 	error = filemap_fdatawait_range(metamapping, start, end);
172 	WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
173 	mapping_set_error(metamapping, error);
174 	if (error)
175 		gfs2_io_error(sdp);
176 	return error;
177 }
178 
179 /**
180  * rgrp_go_sync - sync out the metadata for this glock
181  * @gl: the glock
182  *
183  * Called when demoting or unlocking an EX glock.  We must flush
184  * to disk all dirty buffers/pages relating to this glock, and must not
185  * return to caller to demote/unlock the glock until I/O is complete.
186  */
187 
rgrp_go_sync(struct gfs2_glock * gl)188 static int rgrp_go_sync(struct gfs2_glock *gl)
189 {
190 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
191 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
192 	int error;
193 
194 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
195 		return 0;
196 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
197 
198 	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
199 		       GFS2_LFC_RGRP_GO_SYNC);
200 	error = gfs2_rgrp_metasync(gl);
201 	if (!error)
202 		error = gfs2_ail_empty_gl(gl);
203 	gfs2_free_clones(rgd);
204 	return error;
205 }
206 
207 /**
208  * rgrp_go_inval - invalidate the metadata for this glock
209  * @gl: the glock
210  * @flags:
211  *
212  * We never used LM_ST_DEFERRED with resource groups, so that we
213  * should always see the metadata flag set here.
214  *
215  */
216 
rgrp_go_inval(struct gfs2_glock * gl,int flags)217 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
218 {
219 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
220 	struct address_space *mapping = &sdp->sd_aspace;
221 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
222 	const unsigned bsize = sdp->sd_sb.sb_bsize;
223 	loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
224 	loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
225 
226 	gfs2_rgrp_brelse(rgd);
227 	WARN_ON_ONCE(!(flags & DIO_METADATA));
228 	truncate_inode_pages_range(mapping, start, end);
229 	rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
230 }
231 
gfs2_rgrp_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)232 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
233 			      const char *fs_id_buf)
234 {
235 	struct gfs2_rgrpd *rgd = gl->gl_object;
236 
237 	if (rgd)
238 		gfs2_rgrp_dump(seq, rgd, fs_id_buf);
239 }
240 
gfs2_glock2inode(struct gfs2_glock * gl)241 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
242 {
243 	struct gfs2_inode *ip;
244 
245 	spin_lock(&gl->gl_lockref.lock);
246 	ip = gl->gl_object;
247 	if (ip)
248 		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
249 	spin_unlock(&gl->gl_lockref.lock);
250 	return ip;
251 }
252 
gfs2_glock2rgrp(struct gfs2_glock * gl)253 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
254 {
255 	struct gfs2_rgrpd *rgd;
256 
257 	spin_lock(&gl->gl_lockref.lock);
258 	rgd = gl->gl_object;
259 	spin_unlock(&gl->gl_lockref.lock);
260 
261 	return rgd;
262 }
263 
gfs2_clear_glop_pending(struct gfs2_inode * ip)264 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
265 {
266 	if (!ip)
267 		return;
268 
269 	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
270 	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
271 }
272 
273 /**
274  * gfs2_inode_metasync - sync out the metadata of an inode
275  * @gl: the glock protecting the inode
276  *
277  */
gfs2_inode_metasync(struct gfs2_glock * gl)278 int gfs2_inode_metasync(struct gfs2_glock *gl)
279 {
280 	struct address_space *metamapping = gfs2_glock2aspace(gl);
281 	int error;
282 
283 	filemap_fdatawrite(metamapping);
284 	error = filemap_fdatawait(metamapping);
285 	if (error)
286 		gfs2_io_error(gl->gl_name.ln_sbd);
287 	return error;
288 }
289 
290 /**
291  * inode_go_sync - Sync the dirty metadata of an inode
292  * @gl: the glock protecting the inode
293  *
294  */
295 
inode_go_sync(struct gfs2_glock * gl)296 static int inode_go_sync(struct gfs2_glock *gl)
297 {
298 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
299 	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
300 	struct address_space *metamapping = gfs2_glock2aspace(gl);
301 	int error = 0, ret;
302 
303 	if (isreg) {
304 		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
305 			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
306 		inode_dio_wait(&ip->i_inode);
307 	}
308 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
309 		goto out;
310 
311 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
312 
313 	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
314 		       GFS2_LFC_INODE_GO_SYNC);
315 	filemap_fdatawrite(metamapping);
316 	if (isreg) {
317 		struct address_space *mapping = ip->i_inode.i_mapping;
318 		filemap_fdatawrite(mapping);
319 		error = filemap_fdatawait(mapping);
320 		mapping_set_error(mapping, error);
321 	}
322 	ret = gfs2_inode_metasync(gl);
323 	if (!error)
324 		error = ret;
325 	gfs2_ail_empty_gl(gl);
326 	/*
327 	 * Writeback of the data mapping may cause the dirty flag to be set
328 	 * so we have to clear it again here.
329 	 */
330 	smp_mb__before_atomic();
331 	clear_bit(GLF_DIRTY, &gl->gl_flags);
332 
333 out:
334 	gfs2_clear_glop_pending(ip);
335 	return error;
336 }
337 
338 /**
339  * inode_go_inval - prepare a inode glock to be released
340  * @gl: the glock
341  * @flags:
342  *
343  * Normally we invalidate everything, but if we are moving into
344  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
345  * can keep hold of the metadata, since it won't have changed.
346  *
347  */
348 
inode_go_inval(struct gfs2_glock * gl,int flags)349 static void inode_go_inval(struct gfs2_glock *gl, int flags)
350 {
351 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
352 
353 	if (flags & DIO_METADATA) {
354 		struct address_space *mapping = gfs2_glock2aspace(gl);
355 		truncate_inode_pages(mapping, 0);
356 		if (ip) {
357 			set_bit(GIF_INVALID, &ip->i_flags);
358 			forget_all_cached_acls(&ip->i_inode);
359 			security_inode_invalidate_secctx(&ip->i_inode);
360 			gfs2_dir_hash_inval(ip);
361 		}
362 	}
363 
364 	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
365 		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
366 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
367 			       GFS2_LFC_INODE_GO_INVAL);
368 		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
369 	}
370 	if (ip && S_ISREG(ip->i_inode.i_mode))
371 		truncate_inode_pages(ip->i_inode.i_mapping, 0);
372 
373 	gfs2_clear_glop_pending(ip);
374 }
375 
376 /**
377  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
378  * @gl: the glock
379  *
380  * Returns: 1 if it's ok
381  */
382 
inode_go_demote_ok(const struct gfs2_glock * gl)383 static int inode_go_demote_ok(const struct gfs2_glock *gl)
384 {
385 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
386 
387 	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
388 		return 0;
389 
390 	return 1;
391 }
392 
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)393 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
394 {
395 	const struct gfs2_dinode *str = buf;
396 	struct timespec64 atime;
397 	u16 height, depth;
398 	umode_t mode = be32_to_cpu(str->di_mode);
399 	bool is_new = ip->i_inode.i_flags & I_NEW;
400 
401 	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
402 		goto corrupt;
403 	if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode)))
404 		goto corrupt;
405 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
406 	ip->i_inode.i_mode = mode;
407 	if (is_new) {
408 		ip->i_inode.i_rdev = 0;
409 		switch (mode & S_IFMT) {
410 		case S_IFBLK:
411 		case S_IFCHR:
412 			ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
413 						   be32_to_cpu(str->di_minor));
414 			break;
415 		}
416 	}
417 
418 	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
419 	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
420 	set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
421 	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
422 	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
423 	atime.tv_sec = be64_to_cpu(str->di_atime);
424 	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
425 	if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
426 		ip->i_inode.i_atime = atime;
427 	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
428 	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
429 	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
430 	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
431 
432 	ip->i_goal = be64_to_cpu(str->di_goal_meta);
433 	ip->i_generation = be64_to_cpu(str->di_generation);
434 
435 	ip->i_diskflags = be32_to_cpu(str->di_flags);
436 	ip->i_eattr = be64_to_cpu(str->di_eattr);
437 	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
438 	gfs2_set_inode_flags(&ip->i_inode);
439 	height = be16_to_cpu(str->di_height);
440 	if (unlikely(height > GFS2_MAX_META_HEIGHT))
441 		goto corrupt;
442 	ip->i_height = (u8)height;
443 
444 	depth = be16_to_cpu(str->di_depth);
445 	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
446 		goto corrupt;
447 	ip->i_depth = (u8)depth;
448 	ip->i_entries = be32_to_cpu(str->di_entries);
449 
450 	if (S_ISREG(ip->i_inode.i_mode))
451 		gfs2_set_aops(&ip->i_inode);
452 
453 	return 0;
454 corrupt:
455 	gfs2_consist_inode(ip);
456 	return -EIO;
457 }
458 
459 /**
460  * gfs2_inode_refresh - Refresh the incore copy of the dinode
461  * @ip: The GFS2 inode
462  *
463  * Returns: errno
464  */
465 
gfs2_inode_refresh(struct gfs2_inode * ip)466 int gfs2_inode_refresh(struct gfs2_inode *ip)
467 {
468 	struct buffer_head *dibh;
469 	int error;
470 
471 	error = gfs2_meta_inode_buffer(ip, &dibh);
472 	if (error)
473 		return error;
474 
475 	error = gfs2_dinode_in(ip, dibh->b_data);
476 	brelse(dibh);
477 	clear_bit(GIF_INVALID, &ip->i_flags);
478 
479 	return error;
480 }
481 
482 /**
483  * inode_go_lock - operation done after an inode lock is locked by a process
484  * @gh: The glock holder
485  *
486  * Returns: errno
487  */
488 
inode_go_lock(struct gfs2_holder * gh)489 static int inode_go_lock(struct gfs2_holder *gh)
490 {
491 	struct gfs2_glock *gl = gh->gh_gl;
492 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
493 	struct gfs2_inode *ip = gl->gl_object;
494 	int error = 0;
495 
496 	if (!ip || (gh->gh_flags & GL_SKIP))
497 		return 0;
498 
499 	if (test_bit(GIF_INVALID, &ip->i_flags)) {
500 		error = gfs2_inode_refresh(ip);
501 		if (error)
502 			return error;
503 	}
504 
505 	if (gh->gh_state != LM_ST_DEFERRED)
506 		inode_dio_wait(&ip->i_inode);
507 
508 	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
509 	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
510 	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
511 		spin_lock(&sdp->sd_trunc_lock);
512 		if (list_empty(&ip->i_trunc_list))
513 			list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
514 		spin_unlock(&sdp->sd_trunc_lock);
515 		wake_up(&sdp->sd_quota_wait);
516 		return 1;
517 	}
518 
519 	return error;
520 }
521 
522 /**
523  * inode_go_dump - print information about an inode
524  * @seq: The iterator
525  * @gl: The glock
526  * @fs_id_buf: file system id (may be empty)
527  *
528  */
529 
inode_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)530 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
531 			  const char *fs_id_buf)
532 {
533 	struct gfs2_inode *ip = gl->gl_object;
534 	struct inode *inode = &ip->i_inode;
535 	unsigned long nrpages;
536 
537 	if (ip == NULL)
538 		return;
539 
540 	xa_lock_irq(&inode->i_data.i_pages);
541 	nrpages = inode->i_data.nrpages;
542 	xa_unlock_irq(&inode->i_data.i_pages);
543 
544 	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
545 		       "p:%lu\n", fs_id_buf,
546 		  (unsigned long long)ip->i_no_formal_ino,
547 		  (unsigned long long)ip->i_no_addr,
548 		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
549 		  (unsigned int)ip->i_diskflags,
550 		  (unsigned long long)i_size_read(inode), nrpages);
551 }
552 
553 /**
554  * freeze_go_sync - promote/demote the freeze glock
555  * @gl: the glock
556  */
557 
freeze_go_sync(struct gfs2_glock * gl)558 static int freeze_go_sync(struct gfs2_glock *gl)
559 {
560 	int error = 0;
561 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
562 
563 	/*
564 	 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
565 	 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
566 	 * all the nodes should have the freeze glock in SH mode and they all
567 	 * call do_xmote: One for EX and the others for UN. They ALL must
568 	 * freeze locally, and they ALL must queue freeze work. The freeze_work
569 	 * calls freeze_func, which tries to reacquire the freeze glock in SH,
570 	 * effectively waiting for the thaw on the node who holds it in EX.
571 	 * Once thawed, the work func acquires the freeze glock in
572 	 * SH and everybody goes back to thawed.
573 	 */
574 	if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
575 	    !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
576 		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
577 		error = freeze_super(sdp->sd_vfs);
578 		if (error) {
579 			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
580 				error);
581 			if (gfs2_withdrawn(sdp)) {
582 				atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
583 				return 0;
584 			}
585 			gfs2_assert_withdraw(sdp, 0);
586 		}
587 		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
588 		if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
589 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
590 				       GFS2_LFC_FREEZE_GO_SYNC);
591 		else /* read-only mounts */
592 			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
593 	}
594 	return 0;
595 }
596 
597 /**
598  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
599  * @gl: the glock
600  */
freeze_go_xmote_bh(struct gfs2_glock * gl)601 static int freeze_go_xmote_bh(struct gfs2_glock *gl)
602 {
603 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
604 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
605 	struct gfs2_glock *j_gl = ip->i_gl;
606 	struct gfs2_log_header_host head;
607 	int error;
608 
609 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
610 		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
611 
612 		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
613 		if (error)
614 			gfs2_consist(sdp);
615 		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
616 			gfs2_consist(sdp);
617 
618 		/*  Initialize some head of the log stuff  */
619 		if (!gfs2_withdrawn(sdp)) {
620 			sdp->sd_log_sequence = head.lh_sequence + 1;
621 			gfs2_log_pointers_init(sdp, head.lh_blkno);
622 		}
623 	}
624 	return 0;
625 }
626 
627 /**
628  * freeze_go_demote_ok
629  * @gl: the glock
630  *
631  * Always returns 0
632  */
633 
freeze_go_demote_ok(const struct gfs2_glock * gl)634 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
635 {
636 	return 0;
637 }
638 
639 /**
640  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
641  * @gl: the glock
642  * @remote: true if this came from a different cluster node
643  *
644  * gl_lockref.lock lock is held while calling this
645  */
iopen_go_callback(struct gfs2_glock * gl,bool remote)646 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
647 {
648 	struct gfs2_inode *ip = gl->gl_object;
649 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
650 
651 	if (!remote || sb_rdonly(sdp->sd_vfs))
652 		return;
653 
654 	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
655 	    gl->gl_state == LM_ST_SHARED && ip) {
656 		gl->gl_lockref.count++;
657 		if (!queue_delayed_work(gfs2_delete_workqueue,
658 					&gl->gl_delete, 0))
659 			gl->gl_lockref.count--;
660 	}
661 }
662 
iopen_go_demote_ok(const struct gfs2_glock * gl)663 static int iopen_go_demote_ok(const struct gfs2_glock *gl)
664 {
665        return !gfs2_delete_work_queued(gl);
666 }
667 
668 /**
669  * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
670  * @gl: glock being freed
671  *
672  * For now, this is only used for the journal inode glock. In withdraw
673  * situations, we need to wait for the glock to be freed so that we know
674  * other nodes may proceed with recovery / journal replay.
675  */
inode_go_free(struct gfs2_glock * gl)676 static void inode_go_free(struct gfs2_glock *gl)
677 {
678 	/* Note that we cannot reference gl_object because it's already set
679 	 * to NULL by this point in its lifecycle. */
680 	if (!test_bit(GLF_FREEING, &gl->gl_flags))
681 		return;
682 	clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
683 	wake_up_bit(&gl->gl_flags, GLF_FREEING);
684 }
685 
686 /**
687  * nondisk_go_callback - used to signal when a node did a withdraw
688  * @gl: the nondisk glock
689  * @remote: true if this came from a different cluster node
690  *
691  */
nondisk_go_callback(struct gfs2_glock * gl,bool remote)692 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
693 {
694 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
695 
696 	/* Ignore the callback unless it's from another node, and it's the
697 	   live lock. */
698 	if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
699 		return;
700 
701 	/* First order of business is to cancel the demote request. We don't
702 	 * really want to demote a nondisk glock. At best it's just to inform
703 	 * us of another node's withdraw. We'll keep it in SH mode. */
704 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
705 	clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
706 
707 	/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
708 	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
709 	    test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
710 	    test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
711 		return;
712 
713 	/* We only care when a node wants us to unlock, because that means
714 	 * they want a journal recovered. */
715 	if (gl->gl_demote_state != LM_ST_UNLOCKED)
716 		return;
717 
718 	if (sdp->sd_args.ar_spectator) {
719 		fs_warn(sdp, "Spectator node cannot recover journals.\n");
720 		return;
721 	}
722 
723 	fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
724 	set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
725 	/*
726 	 * We can't call remote_withdraw directly here or gfs2_recover_journal
727 	 * because this is called from the glock unlock function and the
728 	 * remote_withdraw needs to enqueue and dequeue the same "live" glock
729 	 * we were called from. So we queue it to the control work queue in
730 	 * lock_dlm.
731 	 */
732 	queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
733 }
734 
735 const struct gfs2_glock_operations gfs2_meta_glops = {
736 	.go_type = LM_TYPE_META,
737 	.go_flags = GLOF_NONDISK,
738 };
739 
740 const struct gfs2_glock_operations gfs2_inode_glops = {
741 	.go_sync = inode_go_sync,
742 	.go_inval = inode_go_inval,
743 	.go_demote_ok = inode_go_demote_ok,
744 	.go_lock = inode_go_lock,
745 	.go_dump = inode_go_dump,
746 	.go_type = LM_TYPE_INODE,
747 	.go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
748 	.go_free = inode_go_free,
749 };
750 
751 const struct gfs2_glock_operations gfs2_rgrp_glops = {
752 	.go_sync = rgrp_go_sync,
753 	.go_inval = rgrp_go_inval,
754 	.go_lock = gfs2_rgrp_go_lock,
755 	.go_dump = gfs2_rgrp_go_dump,
756 	.go_type = LM_TYPE_RGRP,
757 	.go_flags = GLOF_LVB,
758 };
759 
760 const struct gfs2_glock_operations gfs2_freeze_glops = {
761 	.go_sync = freeze_go_sync,
762 	.go_xmote_bh = freeze_go_xmote_bh,
763 	.go_demote_ok = freeze_go_demote_ok,
764 	.go_type = LM_TYPE_NONDISK,
765 	.go_flags = GLOF_NONDISK,
766 };
767 
768 const struct gfs2_glock_operations gfs2_iopen_glops = {
769 	.go_type = LM_TYPE_IOPEN,
770 	.go_callback = iopen_go_callback,
771 	.go_demote_ok = iopen_go_demote_ok,
772 	.go_flags = GLOF_LRU | GLOF_NONDISK,
773 	.go_subclass = 1,
774 };
775 
776 const struct gfs2_glock_operations gfs2_flock_glops = {
777 	.go_type = LM_TYPE_FLOCK,
778 	.go_flags = GLOF_LRU | GLOF_NONDISK,
779 };
780 
781 const struct gfs2_glock_operations gfs2_nondisk_glops = {
782 	.go_type = LM_TYPE_NONDISK,
783 	.go_flags = GLOF_NONDISK,
784 	.go_callback = nondisk_go_callback,
785 };
786 
787 const struct gfs2_glock_operations gfs2_quota_glops = {
788 	.go_type = LM_TYPE_QUOTA,
789 	.go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
790 };
791 
792 const struct gfs2_glock_operations gfs2_journal_glops = {
793 	.go_type = LM_TYPE_JOURNAL,
794 	.go_flags = GLOF_NONDISK,
795 };
796 
797 const struct gfs2_glock_operations *gfs2_glops_list[] = {
798 	[LM_TYPE_META] = &gfs2_meta_glops,
799 	[LM_TYPE_INODE] = &gfs2_inode_glops,
800 	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
801 	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
802 	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
803 	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
804 	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
805 	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
806 };
807 
808