xref: /linux/fs/gfs2/super.c (revision dd093fb0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/bio.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
27 
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 #include "lops.h"
46 
47 enum dinode_demise {
48 	SHOULD_DELETE_DINODE,
49 	SHOULD_NOT_DELETE_DINODE,
50 	SHOULD_DEFER_EVICTION,
51 };
52 
53 /**
54  * gfs2_jindex_free - Clear all the journal index information
55  * @sdp: The GFS2 superblock
56  *
57  */
58 
59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
60 {
61 	struct list_head list;
62 	struct gfs2_jdesc *jd;
63 
64 	spin_lock(&sdp->sd_jindex_spin);
65 	list_add(&list, &sdp->sd_jindex_list);
66 	list_del_init(&sdp->sd_jindex_list);
67 	sdp->sd_journals = 0;
68 	spin_unlock(&sdp->sd_jindex_spin);
69 
70 	sdp->sd_jdesc = NULL;
71 	while (!list_empty(&list)) {
72 		jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
73 		gfs2_free_journal_extents(jd);
74 		list_del(&jd->jd_list);
75 		iput(jd->jd_inode);
76 		jd->jd_inode = NULL;
77 		kfree(jd);
78 	}
79 }
80 
81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
82 {
83 	struct gfs2_jdesc *jd;
84 
85 	list_for_each_entry(jd, head, jd_list) {
86 		if (jd->jd_jid == jid)
87 			return jd;
88 	}
89 	return NULL;
90 }
91 
92 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
93 {
94 	struct gfs2_jdesc *jd;
95 
96 	spin_lock(&sdp->sd_jindex_spin);
97 	jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
98 	spin_unlock(&sdp->sd_jindex_spin);
99 
100 	return jd;
101 }
102 
103 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
104 {
105 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
106 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
107 	u64 size = i_size_read(jd->jd_inode);
108 
109 	if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
110 		return -EIO;
111 
112 	jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
113 
114 	if (gfs2_write_alloc_required(ip, 0, size)) {
115 		gfs2_consist_inode(ip);
116 		return -EIO;
117 	}
118 
119 	return 0;
120 }
121 
122 /**
123  * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
124  * @sdp: the filesystem
125  *
126  * Returns: errno
127  */
128 
129 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
130 {
131 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
132 	struct gfs2_glock *j_gl = ip->i_gl;
133 	struct gfs2_log_header_host head;
134 	int error;
135 
136 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
137 	if (gfs2_withdrawn(sdp))
138 		return -EIO;
139 
140 	error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
141 	if (error || gfs2_withdrawn(sdp))
142 		return error;
143 
144 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
145 		gfs2_consist(sdp);
146 		return -EIO;
147 	}
148 
149 	/*  Initialize some head of the log stuff  */
150 	sdp->sd_log_sequence = head.lh_sequence + 1;
151 	gfs2_log_pointers_init(sdp, head.lh_blkno);
152 
153 	error = gfs2_quota_init(sdp);
154 	if (!error && !gfs2_withdrawn(sdp))
155 		set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
156 	return error;
157 }
158 
159 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
160 {
161 	const struct gfs2_statfs_change *str = buf;
162 
163 	sc->sc_total = be64_to_cpu(str->sc_total);
164 	sc->sc_free = be64_to_cpu(str->sc_free);
165 	sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
166 }
167 
168 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
169 {
170 	struct gfs2_statfs_change *str = buf;
171 
172 	str->sc_total = cpu_to_be64(sc->sc_total);
173 	str->sc_free = cpu_to_be64(sc->sc_free);
174 	str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
175 }
176 
177 int gfs2_statfs_init(struct gfs2_sbd *sdp)
178 {
179 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
180 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
181 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
182 	struct buffer_head *m_bh;
183 	struct gfs2_holder gh;
184 	int error;
185 
186 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
187 				   &gh);
188 	if (error)
189 		return error;
190 
191 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
192 	if (error)
193 		goto out;
194 
195 	if (sdp->sd_args.ar_spectator) {
196 		spin_lock(&sdp->sd_statfs_spin);
197 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
198 				      sizeof(struct gfs2_dinode));
199 		spin_unlock(&sdp->sd_statfs_spin);
200 	} else {
201 		spin_lock(&sdp->sd_statfs_spin);
202 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
203 				      sizeof(struct gfs2_dinode));
204 		gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data +
205 				      sizeof(struct gfs2_dinode));
206 		spin_unlock(&sdp->sd_statfs_spin);
207 
208 	}
209 
210 	brelse(m_bh);
211 out:
212 	gfs2_glock_dq_uninit(&gh);
213 	return 0;
214 }
215 
216 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
217 			s64 dinodes)
218 {
219 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
220 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
221 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
222 	s64 x, y;
223 	int need_sync = 0;
224 
225 	gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
226 
227 	spin_lock(&sdp->sd_statfs_spin);
228 	l_sc->sc_total += total;
229 	l_sc->sc_free += free;
230 	l_sc->sc_dinodes += dinodes;
231 	gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data +
232 			       sizeof(struct gfs2_dinode));
233 	if (sdp->sd_args.ar_statfs_percent) {
234 		x = 100 * l_sc->sc_free;
235 		y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
236 		if (x >= y || x <= -y)
237 			need_sync = 1;
238 	}
239 	spin_unlock(&sdp->sd_statfs_spin);
240 
241 	if (need_sync)
242 		gfs2_wake_up_statfs(sdp);
243 }
244 
245 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh)
246 {
247 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
248 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
249 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
250 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
251 
252 	gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
253 	gfs2_trans_add_meta(m_ip->i_gl, m_bh);
254 
255 	spin_lock(&sdp->sd_statfs_spin);
256 	m_sc->sc_total += l_sc->sc_total;
257 	m_sc->sc_free += l_sc->sc_free;
258 	m_sc->sc_dinodes += l_sc->sc_dinodes;
259 	memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
260 	memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode),
261 	       0, sizeof(struct gfs2_statfs_change));
262 	gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
263 	spin_unlock(&sdp->sd_statfs_spin);
264 }
265 
266 int gfs2_statfs_sync(struct super_block *sb, int type)
267 {
268 	struct gfs2_sbd *sdp = sb->s_fs_info;
269 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
270 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
271 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
272 	struct gfs2_holder gh;
273 	struct buffer_head *m_bh;
274 	int error;
275 
276 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
277 				   &gh);
278 	if (error)
279 		goto out;
280 
281 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
282 	if (error)
283 		goto out_unlock;
284 
285 	spin_lock(&sdp->sd_statfs_spin);
286 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
287 			      sizeof(struct gfs2_dinode));
288 	if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
289 		spin_unlock(&sdp->sd_statfs_spin);
290 		goto out_bh;
291 	}
292 	spin_unlock(&sdp->sd_statfs_spin);
293 
294 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
295 	if (error)
296 		goto out_bh;
297 
298 	update_statfs(sdp, m_bh);
299 	sdp->sd_statfs_force_sync = 0;
300 
301 	gfs2_trans_end(sdp);
302 
303 out_bh:
304 	brelse(m_bh);
305 out_unlock:
306 	gfs2_glock_dq_uninit(&gh);
307 out:
308 	return error;
309 }
310 
311 struct lfcc {
312 	struct list_head list;
313 	struct gfs2_holder gh;
314 };
315 
316 /**
317  * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
318  *                            journals are clean
319  * @sdp: the file system
320  *
321  * Returns: errno
322  */
323 
324 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
325 {
326 	struct gfs2_inode *ip;
327 	struct gfs2_jdesc *jd;
328 	struct lfcc *lfcc;
329 	LIST_HEAD(list);
330 	struct gfs2_log_header_host lh;
331 	int error;
332 
333 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
334 		lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
335 		if (!lfcc) {
336 			error = -ENOMEM;
337 			goto out;
338 		}
339 		ip = GFS2_I(jd->jd_inode);
340 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
341 		if (error) {
342 			kfree(lfcc);
343 			goto out;
344 		}
345 		list_add(&lfcc->list, &list);
346 	}
347 
348 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
349 				   LM_FLAG_NOEXP | GL_NOPID,
350 				   &sdp->sd_freeze_gh);
351 	if (error)
352 		goto out;
353 
354 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
355 		error = gfs2_jdesc_check(jd);
356 		if (error)
357 			break;
358 		error = gfs2_find_jhead(jd, &lh, false);
359 		if (error)
360 			break;
361 		if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
362 			error = -EBUSY;
363 			break;
364 		}
365 	}
366 
367 	if (error)
368 		gfs2_freeze_unlock(&sdp->sd_freeze_gh);
369 
370 out:
371 	while (!list_empty(&list)) {
372 		lfcc = list_first_entry(&list, struct lfcc, list);
373 		list_del(&lfcc->list);
374 		gfs2_glock_dq_uninit(&lfcc->gh);
375 		kfree(lfcc);
376 	}
377 	return error;
378 }
379 
380 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
381 {
382 	const struct inode *inode = &ip->i_inode;
383 	struct gfs2_dinode *str = buf;
384 
385 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
386 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
387 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
388 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
389 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
390 	str->di_mode = cpu_to_be32(inode->i_mode);
391 	str->di_uid = cpu_to_be32(i_uid_read(inode));
392 	str->di_gid = cpu_to_be32(i_gid_read(inode));
393 	str->di_nlink = cpu_to_be32(inode->i_nlink);
394 	str->di_size = cpu_to_be64(i_size_read(inode));
395 	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
396 	str->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
397 	str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
398 	str->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
399 
400 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
401 	str->di_goal_data = cpu_to_be64(ip->i_goal);
402 	str->di_generation = cpu_to_be64(ip->i_generation);
403 
404 	str->di_flags = cpu_to_be32(ip->i_diskflags);
405 	str->di_height = cpu_to_be16(ip->i_height);
406 	str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
407 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
408 					     GFS2_FORMAT_DE : 0);
409 	str->di_depth = cpu_to_be16(ip->i_depth);
410 	str->di_entries = cpu_to_be32(ip->i_entries);
411 
412 	str->di_eattr = cpu_to_be64(ip->i_eattr);
413 	str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
414 	str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
415 	str->di_ctime_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
416 }
417 
418 /**
419  * gfs2_write_inode - Make sure the inode is stable on the disk
420  * @inode: The inode
421  * @wbc: The writeback control structure
422  *
423  * Returns: errno
424  */
425 
426 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
427 {
428 	struct gfs2_inode *ip = GFS2_I(inode);
429 	struct gfs2_sbd *sdp = GFS2_SB(inode);
430 	struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
431 	struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
432 	int ret = 0;
433 	bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
434 
435 	if (flush_all)
436 		gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
437 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
438 			       GFS2_LFC_WRITE_INODE);
439 	if (bdi->wb.dirty_exceeded)
440 		gfs2_ail1_flush(sdp, wbc);
441 	else
442 		filemap_fdatawrite(metamapping);
443 	if (flush_all)
444 		ret = filemap_fdatawait(metamapping);
445 	if (ret)
446 		mark_inode_dirty_sync(inode);
447 	else {
448 		spin_lock(&inode->i_lock);
449 		if (!(inode->i_flags & I_DIRTY))
450 			gfs2_ordered_del_inode(ip);
451 		spin_unlock(&inode->i_lock);
452 	}
453 	return ret;
454 }
455 
456 /**
457  * gfs2_dirty_inode - check for atime updates
458  * @inode: The inode in question
459  * @flags: The type of dirty
460  *
461  * Unfortunately it can be called under any combination of inode
462  * glock and transaction lock, so we have to check carefully.
463  *
464  * At the moment this deals only with atime - it should be possible
465  * to expand that role in future, once a review of the locking has
466  * been carried out.
467  */
468 
469 static void gfs2_dirty_inode(struct inode *inode, int flags)
470 {
471 	struct gfs2_inode *ip = GFS2_I(inode);
472 	struct gfs2_sbd *sdp = GFS2_SB(inode);
473 	struct buffer_head *bh;
474 	struct gfs2_holder gh;
475 	int need_unlock = 0;
476 	int need_endtrans = 0;
477 	int ret;
478 
479 	if (unlikely(!ip->i_gl)) {
480 		/* This can only happen during incomplete inode creation. */
481 		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
482 		return;
483 	}
484 
485 	if (unlikely(gfs2_withdrawn(sdp)))
486 		return;
487 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
488 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
489 		if (ret) {
490 			fs_err(sdp, "dirty_inode: glock %d\n", ret);
491 			gfs2_dump_glock(NULL, ip->i_gl, true);
492 			return;
493 		}
494 		need_unlock = 1;
495 	} else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
496 		return;
497 
498 	if (current->journal_info == NULL) {
499 		ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
500 		if (ret) {
501 			fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
502 			goto out;
503 		}
504 		need_endtrans = 1;
505 	}
506 
507 	ret = gfs2_meta_inode_buffer(ip, &bh);
508 	if (ret == 0) {
509 		gfs2_trans_add_meta(ip->i_gl, bh);
510 		gfs2_dinode_out(ip, bh->b_data);
511 		brelse(bh);
512 	}
513 
514 	if (need_endtrans)
515 		gfs2_trans_end(sdp);
516 out:
517 	if (need_unlock)
518 		gfs2_glock_dq_uninit(&gh);
519 }
520 
521 /**
522  * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
523  * @sdp: the filesystem
524  *
525  * Returns: errno
526  */
527 
528 void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
529 {
530 	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
531 
532 	gfs2_flush_delete_work(sdp);
533 	if (!log_write_allowed && current == sdp->sd_quotad_process)
534 		fs_warn(sdp, "The quotad daemon is withdrawing.\n");
535 	else if (sdp->sd_quotad_process)
536 		kthread_stop(sdp->sd_quotad_process);
537 	sdp->sd_quotad_process = NULL;
538 
539 	if (!log_write_allowed && current == sdp->sd_logd_process)
540 		fs_warn(sdp, "The logd daemon is withdrawing.\n");
541 	else if (sdp->sd_logd_process)
542 		kthread_stop(sdp->sd_logd_process);
543 	sdp->sd_logd_process = NULL;
544 
545 	if (log_write_allowed) {
546 		gfs2_quota_sync(sdp->sd_vfs, 0);
547 		gfs2_statfs_sync(sdp->sd_vfs, 0);
548 
549 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
550 			       GFS2_LFC_MAKE_FS_RO);
551 		wait_event_timeout(sdp->sd_log_waitq,
552 				   gfs2_log_is_empty(sdp),
553 				   HZ * 5);
554 		gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
555 	} else {
556 		wait_event_timeout(sdp->sd_log_waitq,
557 				   gfs2_log_is_empty(sdp),
558 				   HZ * 5);
559 	}
560 	gfs2_quota_cleanup(sdp);
561 
562 	if (!log_write_allowed)
563 		sdp->sd_vfs->s_flags |= SB_RDONLY;
564 }
565 
566 /**
567  * gfs2_put_super - Unmount the filesystem
568  * @sb: The VFS superblock
569  *
570  */
571 
572 static void gfs2_put_super(struct super_block *sb)
573 {
574 	struct gfs2_sbd *sdp = sb->s_fs_info;
575 	struct gfs2_jdesc *jd;
576 
577 	/* No more recovery requests */
578 	set_bit(SDF_NORECOVERY, &sdp->sd_flags);
579 	smp_mb();
580 
581 	/* Wait on outstanding recovery */
582 restart:
583 	spin_lock(&sdp->sd_jindex_spin);
584 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
585 		if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
586 			continue;
587 		spin_unlock(&sdp->sd_jindex_spin);
588 		wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
589 			    TASK_UNINTERRUPTIBLE);
590 		goto restart;
591 	}
592 	spin_unlock(&sdp->sd_jindex_spin);
593 
594 	if (!sb_rdonly(sb)) {
595 		gfs2_make_fs_ro(sdp);
596 	}
597 	WARN_ON(gfs2_withdrawing(sdp));
598 
599 	/*  At this point, we're through modifying the disk  */
600 
601 	/*  Release stuff  */
602 
603 	iput(sdp->sd_jindex);
604 	iput(sdp->sd_statfs_inode);
605 	iput(sdp->sd_rindex);
606 	iput(sdp->sd_quota_inode);
607 
608 	gfs2_glock_put(sdp->sd_rename_gl);
609 	gfs2_glock_put(sdp->sd_freeze_gl);
610 
611 	if (!sdp->sd_args.ar_spectator) {
612 		if (gfs2_holder_initialized(&sdp->sd_journal_gh))
613 			gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
614 		if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
615 			gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
616 		brelse(sdp->sd_sc_bh);
617 		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
618 		gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
619 		free_local_statfs_inodes(sdp);
620 		iput(sdp->sd_qc_inode);
621 	}
622 
623 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
624 	gfs2_clear_rgrpd(sdp);
625 	gfs2_jindex_free(sdp);
626 	/*  Take apart glock structures and buffer lists  */
627 	gfs2_gl_hash_clear(sdp);
628 	truncate_inode_pages_final(&sdp->sd_aspace);
629 	gfs2_delete_debugfs_file(sdp);
630 	/*  Unmount the locking protocol  */
631 	gfs2_lm_unmount(sdp);
632 
633 	/*  At this point, we're through participating in the lockspace  */
634 	gfs2_sys_fs_del(sdp);
635 	free_sbd(sdp);
636 }
637 
638 /**
639  * gfs2_sync_fs - sync the filesystem
640  * @sb: the superblock
641  * @wait: true to wait for completion
642  *
643  * Flushes the log to disk.
644  */
645 
646 static int gfs2_sync_fs(struct super_block *sb, int wait)
647 {
648 	struct gfs2_sbd *sdp = sb->s_fs_info;
649 
650 	gfs2_quota_sync(sb, -1);
651 	if (wait)
652 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
653 			       GFS2_LFC_SYNC_FS);
654 	return sdp->sd_log_error;
655 }
656 
657 void gfs2_freeze_func(struct work_struct *work)
658 {
659 	int error;
660 	struct gfs2_holder freeze_gh;
661 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
662 	struct super_block *sb = sdp->sd_vfs;
663 
664 	atomic_inc(&sb->s_active);
665 	error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
666 	if (error) {
667 		gfs2_assert_withdraw(sdp, 0);
668 	} else {
669 		atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
670 		error = thaw_super(sb);
671 		if (error) {
672 			fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
673 				error);
674 			gfs2_assert_withdraw(sdp, 0);
675 		}
676 		gfs2_freeze_unlock(&freeze_gh);
677 	}
678 	deactivate_super(sb);
679 	clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
680 	wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
681 	return;
682 }
683 
684 /**
685  * gfs2_freeze - prevent further writes to the filesystem
686  * @sb: the VFS structure for the filesystem
687  *
688  */
689 
690 static int gfs2_freeze(struct super_block *sb)
691 {
692 	struct gfs2_sbd *sdp = sb->s_fs_info;
693 	int error;
694 
695 	mutex_lock(&sdp->sd_freeze_mutex);
696 	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
697 		error = -EBUSY;
698 		goto out;
699 	}
700 
701 	for (;;) {
702 		if (gfs2_withdrawn(sdp)) {
703 			error = -EINVAL;
704 			goto out;
705 		}
706 
707 		error = gfs2_lock_fs_check_clean(sdp);
708 		if (!error)
709 			break;
710 
711 		if (error == -EBUSY)
712 			fs_err(sdp, "waiting for recovery before freeze\n");
713 		else if (error == -EIO) {
714 			fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
715 			       "to recovery error.\n");
716 			goto out;
717 		} else {
718 			fs_err(sdp, "error freezing FS: %d\n", error);
719 		}
720 		fs_err(sdp, "retrying...\n");
721 		msleep(1000);
722 	}
723 	set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
724 out:
725 	mutex_unlock(&sdp->sd_freeze_mutex);
726 	return error;
727 }
728 
729 /**
730  * gfs2_unfreeze - reallow writes to the filesystem
731  * @sb: the VFS structure for the filesystem
732  *
733  */
734 
735 static int gfs2_unfreeze(struct super_block *sb)
736 {
737 	struct gfs2_sbd *sdp = sb->s_fs_info;
738 
739 	mutex_lock(&sdp->sd_freeze_mutex);
740 	if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
741 	    !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
742 		mutex_unlock(&sdp->sd_freeze_mutex);
743 		return -EINVAL;
744 	}
745 
746 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
747 	mutex_unlock(&sdp->sd_freeze_mutex);
748 	return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
749 }
750 
751 /**
752  * statfs_slow_fill - fill in the sg for a given RG
753  * @rgd: the RG
754  * @sc: the sc structure
755  *
756  * Returns: 0 on success, -ESTALE if the LVB is invalid
757  */
758 
759 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
760 			    struct gfs2_statfs_change_host *sc)
761 {
762 	gfs2_rgrp_verify(rgd);
763 	sc->sc_total += rgd->rd_data;
764 	sc->sc_free += rgd->rd_free;
765 	sc->sc_dinodes += rgd->rd_dinodes;
766 	return 0;
767 }
768 
769 /**
770  * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
771  * @sdp: the filesystem
772  * @sc: the sc info that will be returned
773  *
774  * Any error (other than a signal) will cause this routine to fall back
775  * to the synchronous version.
776  *
777  * FIXME: This really shouldn't busy wait like this.
778  *
779  * Returns: errno
780  */
781 
782 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
783 {
784 	struct gfs2_rgrpd *rgd_next;
785 	struct gfs2_holder *gha, *gh;
786 	unsigned int slots = 64;
787 	unsigned int x;
788 	int done;
789 	int error = 0, err;
790 
791 	memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
792 	gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
793 	if (!gha)
794 		return -ENOMEM;
795 	for (x = 0; x < slots; x++)
796 		gfs2_holder_mark_uninitialized(gha + x);
797 
798 	rgd_next = gfs2_rgrpd_get_first(sdp);
799 
800 	for (;;) {
801 		done = 1;
802 
803 		for (x = 0; x < slots; x++) {
804 			gh = gha + x;
805 
806 			if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
807 				err = gfs2_glock_wait(gh);
808 				if (err) {
809 					gfs2_holder_uninit(gh);
810 					error = err;
811 				} else {
812 					if (!error) {
813 						struct gfs2_rgrpd *rgd =
814 							gfs2_glock2rgrp(gh->gh_gl);
815 
816 						error = statfs_slow_fill(rgd, sc);
817 					}
818 					gfs2_glock_dq_uninit(gh);
819 				}
820 			}
821 
822 			if (gfs2_holder_initialized(gh))
823 				done = 0;
824 			else if (rgd_next && !error) {
825 				error = gfs2_glock_nq_init(rgd_next->rd_gl,
826 							   LM_ST_SHARED,
827 							   GL_ASYNC,
828 							   gh);
829 				rgd_next = gfs2_rgrpd_get_next(rgd_next);
830 				done = 0;
831 			}
832 
833 			if (signal_pending(current))
834 				error = -ERESTARTSYS;
835 		}
836 
837 		if (done)
838 			break;
839 
840 		yield();
841 	}
842 
843 	kfree(gha);
844 	return error;
845 }
846 
847 /**
848  * gfs2_statfs_i - Do a statfs
849  * @sdp: the filesystem
850  * @sc: the sc structure
851  *
852  * Returns: errno
853  */
854 
855 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
856 {
857 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
858 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
859 
860 	spin_lock(&sdp->sd_statfs_spin);
861 
862 	*sc = *m_sc;
863 	sc->sc_total += l_sc->sc_total;
864 	sc->sc_free += l_sc->sc_free;
865 	sc->sc_dinodes += l_sc->sc_dinodes;
866 
867 	spin_unlock(&sdp->sd_statfs_spin);
868 
869 	if (sc->sc_free < 0)
870 		sc->sc_free = 0;
871 	if (sc->sc_free > sc->sc_total)
872 		sc->sc_free = sc->sc_total;
873 	if (sc->sc_dinodes < 0)
874 		sc->sc_dinodes = 0;
875 
876 	return 0;
877 }
878 
879 /**
880  * gfs2_statfs - Gather and return stats about the filesystem
881  * @dentry: The name of the link
882  * @buf: The buffer
883  *
884  * Returns: 0 on success or error code
885  */
886 
887 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
888 {
889 	struct super_block *sb = dentry->d_sb;
890 	struct gfs2_sbd *sdp = sb->s_fs_info;
891 	struct gfs2_statfs_change_host sc;
892 	int error;
893 
894 	error = gfs2_rindex_update(sdp);
895 	if (error)
896 		return error;
897 
898 	if (gfs2_tune_get(sdp, gt_statfs_slow))
899 		error = gfs2_statfs_slow(sdp, &sc);
900 	else
901 		error = gfs2_statfs_i(sdp, &sc);
902 
903 	if (error)
904 		return error;
905 
906 	buf->f_type = GFS2_MAGIC;
907 	buf->f_bsize = sdp->sd_sb.sb_bsize;
908 	buf->f_blocks = sc.sc_total;
909 	buf->f_bfree = sc.sc_free;
910 	buf->f_bavail = sc.sc_free;
911 	buf->f_files = sc.sc_dinodes + sc.sc_free;
912 	buf->f_ffree = sc.sc_free;
913 	buf->f_namelen = GFS2_FNAMESIZE;
914 
915 	return 0;
916 }
917 
918 /**
919  * gfs2_drop_inode - Drop an inode (test for remote unlink)
920  * @inode: The inode to drop
921  *
922  * If we've received a callback on an iopen lock then it's because a
923  * remote node tried to deallocate the inode but failed due to this node
924  * still having the inode open. Here we mark the link count zero
925  * since we know that it must have reached zero if the GLF_DEMOTE flag
926  * is set on the iopen glock. If we didn't do a disk read since the
927  * remote node removed the final link then we might otherwise miss
928  * this event. This check ensures that this node will deallocate the
929  * inode's blocks, or alternatively pass the baton on to another
930  * node for later deallocation.
931  */
932 
933 static int gfs2_drop_inode(struct inode *inode)
934 {
935 	struct gfs2_inode *ip = GFS2_I(inode);
936 
937 	if (inode->i_nlink &&
938 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
939 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
940 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
941 			clear_nlink(inode);
942 	}
943 
944 	/*
945 	 * When under memory pressure when an inode's link count has dropped to
946 	 * zero, defer deleting the inode to the delete workqueue.  This avoids
947 	 * calling into DLM under memory pressure, which can deadlock.
948 	 */
949 	if (!inode->i_nlink &&
950 	    unlikely(current->flags & PF_MEMALLOC) &&
951 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
952 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
953 
954 		gfs2_glock_hold(gl);
955 		if (!gfs2_queue_delete_work(gl, 0))
956 			gfs2_glock_queue_put(gl);
957 		return 0;
958 	}
959 
960 	return generic_drop_inode(inode);
961 }
962 
963 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
964 {
965 	do {
966 		if (d1 == d2)
967 			return 1;
968 		d1 = d1->d_parent;
969 	} while (!IS_ROOT(d1));
970 	return 0;
971 }
972 
973 /**
974  * gfs2_show_options - Show mount options for /proc/mounts
975  * @s: seq_file structure
976  * @root: root of this (sub)tree
977  *
978  * Returns: 0 on success or error code
979  */
980 
981 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
982 {
983 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
984 	struct gfs2_args *args = &sdp->sd_args;
985 	int val;
986 
987 	if (is_ancestor(root, sdp->sd_master_dir))
988 		seq_puts(s, ",meta");
989 	if (args->ar_lockproto[0])
990 		seq_show_option(s, "lockproto", args->ar_lockproto);
991 	if (args->ar_locktable[0])
992 		seq_show_option(s, "locktable", args->ar_locktable);
993 	if (args->ar_hostdata[0])
994 		seq_show_option(s, "hostdata", args->ar_hostdata);
995 	if (args->ar_spectator)
996 		seq_puts(s, ",spectator");
997 	if (args->ar_localflocks)
998 		seq_puts(s, ",localflocks");
999 	if (args->ar_debug)
1000 		seq_puts(s, ",debug");
1001 	if (args->ar_posix_acl)
1002 		seq_puts(s, ",acl");
1003 	if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1004 		char *state;
1005 		switch (args->ar_quota) {
1006 		case GFS2_QUOTA_OFF:
1007 			state = "off";
1008 			break;
1009 		case GFS2_QUOTA_ACCOUNT:
1010 			state = "account";
1011 			break;
1012 		case GFS2_QUOTA_ON:
1013 			state = "on";
1014 			break;
1015 		default:
1016 			state = "unknown";
1017 			break;
1018 		}
1019 		seq_printf(s, ",quota=%s", state);
1020 	}
1021 	if (args->ar_suiddir)
1022 		seq_puts(s, ",suiddir");
1023 	if (args->ar_data != GFS2_DATA_DEFAULT) {
1024 		char *state;
1025 		switch (args->ar_data) {
1026 		case GFS2_DATA_WRITEBACK:
1027 			state = "writeback";
1028 			break;
1029 		case GFS2_DATA_ORDERED:
1030 			state = "ordered";
1031 			break;
1032 		default:
1033 			state = "unknown";
1034 			break;
1035 		}
1036 		seq_printf(s, ",data=%s", state);
1037 	}
1038 	if (args->ar_discard)
1039 		seq_puts(s, ",discard");
1040 	val = sdp->sd_tune.gt_logd_secs;
1041 	if (val != 30)
1042 		seq_printf(s, ",commit=%d", val);
1043 	val = sdp->sd_tune.gt_statfs_quantum;
1044 	if (val != 30)
1045 		seq_printf(s, ",statfs_quantum=%d", val);
1046 	else if (sdp->sd_tune.gt_statfs_slow)
1047 		seq_puts(s, ",statfs_quantum=0");
1048 	val = sdp->sd_tune.gt_quota_quantum;
1049 	if (val != 60)
1050 		seq_printf(s, ",quota_quantum=%d", val);
1051 	if (args->ar_statfs_percent)
1052 		seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1053 	if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1054 		const char *state;
1055 
1056 		switch (args->ar_errors) {
1057 		case GFS2_ERRORS_WITHDRAW:
1058 			state = "withdraw";
1059 			break;
1060 		case GFS2_ERRORS_PANIC:
1061 			state = "panic";
1062 			break;
1063 		default:
1064 			state = "unknown";
1065 			break;
1066 		}
1067 		seq_printf(s, ",errors=%s", state);
1068 	}
1069 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1070 		seq_puts(s, ",nobarrier");
1071 	if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1072 		seq_puts(s, ",demote_interface_used");
1073 	if (args->ar_rgrplvb)
1074 		seq_puts(s, ",rgrplvb");
1075 	if (args->ar_loccookie)
1076 		seq_puts(s, ",loccookie");
1077 	return 0;
1078 }
1079 
1080 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1081 {
1082 	struct inode *inode = &ip->i_inode;
1083 	struct gfs2_glock *gl = ip->i_gl;
1084 
1085 	if (unlikely(!gl)) {
1086 		/* This can only happen during incomplete inode creation. */
1087 		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
1088 		return;
1089 	}
1090 
1091 	truncate_inode_pages(gfs2_glock2aspace(gl), 0);
1092 	truncate_inode_pages(&inode->i_data, 0);
1093 
1094 	if (atomic_read(&gl->gl_revokes) == 0) {
1095 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
1096 		clear_bit(GLF_DIRTY, &gl->gl_flags);
1097 	}
1098 }
1099 
1100 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1101 {
1102 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1103 	struct gfs2_rgrpd *rgd;
1104 	struct gfs2_holder gh;
1105 	int error;
1106 
1107 	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1108 		gfs2_consist_inode(ip);
1109 		return -EIO;
1110 	}
1111 
1112 	error = gfs2_rindex_update(sdp);
1113 	if (error)
1114 		return error;
1115 
1116 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1117 	if (error)
1118 		return error;
1119 
1120 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1121 	if (!rgd) {
1122 		gfs2_consist_inode(ip);
1123 		error = -EIO;
1124 		goto out_qs;
1125 	}
1126 
1127 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1128 				   LM_FLAG_NODE_SCOPE, &gh);
1129 	if (error)
1130 		goto out_qs;
1131 
1132 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1133 				 sdp->sd_jdesc->jd_blocks);
1134 	if (error)
1135 		goto out_rg_gunlock;
1136 
1137 	gfs2_free_di(rgd, ip);
1138 
1139 	gfs2_final_release_pages(ip);
1140 
1141 	gfs2_trans_end(sdp);
1142 
1143 out_rg_gunlock:
1144 	gfs2_glock_dq_uninit(&gh);
1145 out_qs:
1146 	gfs2_quota_unhold(ip);
1147 	return error;
1148 }
1149 
1150 /**
1151  * gfs2_glock_put_eventually
1152  * @gl:	The glock to put
1153  *
1154  * When under memory pressure, trigger a deferred glock put to make sure we
1155  * won't call into DLM and deadlock.  Otherwise, put the glock directly.
1156  */
1157 
1158 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1159 {
1160 	if (current->flags & PF_MEMALLOC)
1161 		gfs2_glock_queue_put(gl);
1162 	else
1163 		gfs2_glock_put(gl);
1164 }
1165 
1166 static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1167 {
1168 	struct gfs2_inode *ip = GFS2_I(inode);
1169 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1170 	struct gfs2_holder *gh = &ip->i_iopen_gh;
1171 	long timeout = 5 * HZ;
1172 	int error;
1173 
1174 	gh->gh_flags |= GL_NOCACHE;
1175 	gfs2_glock_dq_wait(gh);
1176 
1177 	/*
1178 	 * If there are no other lock holders, we'll get the lock immediately.
1179 	 * Otherwise, the other nodes holding the lock will be notified about
1180 	 * our locking request.  If they don't have the inode open, they'll
1181 	 * evict the cached inode and release the lock.  Otherwise, if they
1182 	 * poke the inode glock, we'll take this as an indication that they
1183 	 * still need the iopen glock and that they'll take care of deleting
1184 	 * the inode when they're done.  As a last resort, if another node
1185 	 * keeps holding the iopen glock without showing any activity on the
1186 	 * inode glock, we'll eventually time out.
1187 	 *
1188 	 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1189 	 * locking request as an optimization to notify lock holders as soon as
1190 	 * possible.  Without that flag, they'd be notified implicitly by the
1191 	 * second locking request.
1192 	 */
1193 
1194 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1195 	error = gfs2_glock_nq(gh);
1196 	if (error != GLR_TRYFAILED)
1197 		return !error;
1198 
1199 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1200 	error = gfs2_glock_nq(gh);
1201 	if (error)
1202 		return false;
1203 
1204 	timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1205 		!test_bit(HIF_WAIT, &gh->gh_iflags) ||
1206 		test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1207 		timeout);
1208 	if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1209 		gfs2_glock_dq(gh);
1210 		return false;
1211 	}
1212 	return gfs2_glock_holder_ready(gh) == 0;
1213 }
1214 
1215 /**
1216  * evict_should_delete - determine whether the inode is eligible for deletion
1217  * @inode: The inode to evict
1218  * @gh: The glock holder structure
1219  *
1220  * This function determines whether the evicted inode is eligible to be deleted
1221  * and locks the inode glock.
1222  *
1223  * Returns: the fate of the dinode
1224  */
1225 static enum dinode_demise evict_should_delete(struct inode *inode,
1226 					      struct gfs2_holder *gh)
1227 {
1228 	struct gfs2_inode *ip = GFS2_I(inode);
1229 	struct super_block *sb = inode->i_sb;
1230 	struct gfs2_sbd *sdp = sb->s_fs_info;
1231 	int ret;
1232 
1233 	if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags)))
1234 		goto should_delete;
1235 
1236 	if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1237 		return SHOULD_DEFER_EVICTION;
1238 
1239 	/* Deletes should never happen under memory pressure anymore.  */
1240 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1241 		return SHOULD_DEFER_EVICTION;
1242 
1243 	/* Must not read inode block until block type has been verified */
1244 	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1245 	if (unlikely(ret)) {
1246 		glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1247 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1248 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1249 		return SHOULD_DEFER_EVICTION;
1250 	}
1251 
1252 	if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1253 		return SHOULD_NOT_DELETE_DINODE;
1254 	ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1255 	if (ret)
1256 		return SHOULD_NOT_DELETE_DINODE;
1257 
1258 	ret = gfs2_instantiate(gh);
1259 	if (ret)
1260 		return SHOULD_NOT_DELETE_DINODE;
1261 
1262 	/*
1263 	 * The inode may have been recreated in the meantime.
1264 	 */
1265 	if (inode->i_nlink)
1266 		return SHOULD_NOT_DELETE_DINODE;
1267 
1268 should_delete:
1269 	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1270 	    test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1271 		if (!gfs2_upgrade_iopen_glock(inode)) {
1272 			gfs2_holder_uninit(&ip->i_iopen_gh);
1273 			return SHOULD_NOT_DELETE_DINODE;
1274 		}
1275 	}
1276 	return SHOULD_DELETE_DINODE;
1277 }
1278 
1279 /**
1280  * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1281  * @inode: The inode to evict
1282  */
1283 static int evict_unlinked_inode(struct inode *inode)
1284 {
1285 	struct gfs2_inode *ip = GFS2_I(inode);
1286 	int ret;
1287 
1288 	if (S_ISDIR(inode->i_mode) &&
1289 	    (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1290 		ret = gfs2_dir_exhash_dealloc(ip);
1291 		if (ret)
1292 			goto out;
1293 	}
1294 
1295 	if (ip->i_eattr) {
1296 		ret = gfs2_ea_dealloc(ip);
1297 		if (ret)
1298 			goto out;
1299 	}
1300 
1301 	if (!gfs2_is_stuffed(ip)) {
1302 		ret = gfs2_file_dealloc(ip);
1303 		if (ret)
1304 			goto out;
1305 	}
1306 
1307 	if (ip->i_gl)
1308 		gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1309 
1310 	/*
1311 	 * As soon as we clear the bitmap for the dinode, gfs2_create_inode()
1312 	 * can get called to recreate it, or even gfs2_inode_lookup() if the
1313 	 * inode was recreated on another node in the meantime.
1314 	 *
1315 	 * However, inserting the new inode into the inode hash table will not
1316 	 * succeed until the old inode is removed, and that only happens after
1317 	 * ->evict_inode() returns.  The new inode is attached to its inode and
1318 	 *  iopen glocks after inserting it into the inode hash table, so at
1319 	 *  that point we can be sure that both glocks are unused.
1320 	 */
1321 
1322 	ret = gfs2_dinode_dealloc(ip);
1323 out:
1324 	return ret;
1325 }
1326 
1327 /*
1328  * evict_linked_inode - evict an inode whose dinode has not been unlinked
1329  * @inode: The inode to evict
1330  */
1331 static int evict_linked_inode(struct inode *inode)
1332 {
1333 	struct super_block *sb = inode->i_sb;
1334 	struct gfs2_sbd *sdp = sb->s_fs_info;
1335 	struct gfs2_inode *ip = GFS2_I(inode);
1336 	struct address_space *metamapping;
1337 	int ret;
1338 
1339 	gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1340 		       GFS2_LFC_EVICT_INODE);
1341 	metamapping = gfs2_glock2aspace(ip->i_gl);
1342 	if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1343 		filemap_fdatawrite(metamapping);
1344 		filemap_fdatawait(metamapping);
1345 	}
1346 	write_inode_now(inode, 1);
1347 	gfs2_ail_flush(ip->i_gl, 0);
1348 
1349 	ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1350 	if (ret)
1351 		return ret;
1352 
1353 	/* Needs to be done before glock release & also in a transaction */
1354 	truncate_inode_pages(&inode->i_data, 0);
1355 	truncate_inode_pages(metamapping, 0);
1356 	gfs2_trans_end(sdp);
1357 	return 0;
1358 }
1359 
1360 /**
1361  * gfs2_evict_inode - Remove an inode from cache
1362  * @inode: The inode to evict
1363  *
1364  * There are three cases to consider:
1365  * 1. i_nlink == 0, we are final opener (and must deallocate)
1366  * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1367  * 3. i_nlink > 0
1368  *
1369  * If the fs is read only, then we have to treat all cases as per #3
1370  * since we are unable to do any deallocation. The inode will be
1371  * deallocated by the next read/write node to attempt an allocation
1372  * in the same resource group
1373  *
1374  * We have to (at the moment) hold the inodes main lock to cover
1375  * the gap between unlocking the shared lock on the iopen lock and
1376  * taking the exclusive lock. I'd rather do a shared -> exclusive
1377  * conversion on the iopen lock, but we can change that later. This
1378  * is safe, just less efficient.
1379  */
1380 
1381 static void gfs2_evict_inode(struct inode *inode)
1382 {
1383 	struct super_block *sb = inode->i_sb;
1384 	struct gfs2_sbd *sdp = sb->s_fs_info;
1385 	struct gfs2_inode *ip = GFS2_I(inode);
1386 	struct gfs2_holder gh;
1387 	int ret;
1388 
1389 	if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr)
1390 		goto out;
1391 
1392 	gfs2_holder_mark_uninitialized(&gh);
1393 	ret = evict_should_delete(inode, &gh);
1394 	if (ret == SHOULD_DEFER_EVICTION)
1395 		goto out;
1396 	if (ret == SHOULD_DELETE_DINODE)
1397 		ret = evict_unlinked_inode(inode);
1398 	else
1399 		ret = evict_linked_inode(inode);
1400 
1401 	if (gfs2_rs_active(&ip->i_res))
1402 		gfs2_rs_deltree(&ip->i_res);
1403 
1404 	if (gfs2_holder_initialized(&gh)) {
1405 		glock_clear_object(ip->i_gl, ip);
1406 		gfs2_glock_dq_uninit(&gh);
1407 	}
1408 	if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1409 		fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1410 out:
1411 	truncate_inode_pages_final(&inode->i_data);
1412 	if (ip->i_qadata)
1413 		gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1414 	gfs2_rs_deltree(&ip->i_res);
1415 	gfs2_ordered_del_inode(ip);
1416 	clear_inode(inode);
1417 	gfs2_dir_hash_inval(ip);
1418 	if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1419 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1420 
1421 		glock_clear_object(gl, ip);
1422 		gfs2_glock_hold(gl);
1423 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1424 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1425 		gfs2_glock_put_eventually(gl);
1426 	}
1427 	if (ip->i_gl) {
1428 		glock_clear_object(ip->i_gl, ip);
1429 		wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1430 		gfs2_glock_add_to_lru(ip->i_gl);
1431 		gfs2_glock_put_eventually(ip->i_gl);
1432 		ip->i_gl = NULL;
1433 	}
1434 }
1435 
1436 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1437 {
1438 	struct gfs2_inode *ip;
1439 
1440 	ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL);
1441 	if (!ip)
1442 		return NULL;
1443 	ip->i_no_addr = 0;
1444 	ip->i_flags = 0;
1445 	ip->i_gl = NULL;
1446 	gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1447 	memset(&ip->i_res, 0, sizeof(ip->i_res));
1448 	RB_CLEAR_NODE(&ip->i_res.rs_node);
1449 	ip->i_rahead = 0;
1450 	return &ip->i_inode;
1451 }
1452 
1453 static void gfs2_free_inode(struct inode *inode)
1454 {
1455 	kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1456 }
1457 
1458 extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1459 {
1460 	struct local_statfs_inode *lsi, *safe;
1461 
1462 	/* Run through the statfs inodes list to iput and free memory */
1463 	list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1464 		if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1465 			sdp->sd_sc_inode = NULL; /* belongs to this node */
1466 		if (lsi->si_sc_inode)
1467 			iput(lsi->si_sc_inode);
1468 		list_del(&lsi->si_list);
1469 		kfree(lsi);
1470 	}
1471 }
1472 
1473 extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1474 					     unsigned int index)
1475 {
1476 	struct local_statfs_inode *lsi;
1477 
1478 	/* Return the local (per node) statfs inode in the
1479 	 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1480 	list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1481 		if (lsi->si_jid == index)
1482 			return lsi->si_sc_inode;
1483 	}
1484 	return NULL;
1485 }
1486 
1487 const struct super_operations gfs2_super_ops = {
1488 	.alloc_inode		= gfs2_alloc_inode,
1489 	.free_inode		= gfs2_free_inode,
1490 	.write_inode		= gfs2_write_inode,
1491 	.dirty_inode		= gfs2_dirty_inode,
1492 	.evict_inode		= gfs2_evict_inode,
1493 	.put_super		= gfs2_put_super,
1494 	.sync_fs		= gfs2_sync_fs,
1495 	.freeze_super		= gfs2_freeze,
1496 	.thaw_super		= gfs2_unfreeze,
1497 	.statfs			= gfs2_statfs,
1498 	.drop_inode		= gfs2_drop_inode,
1499 	.show_options		= gfs2_show_options,
1500 };
1501 
1502