xref: /linux/fs/gfs2/quota.c (revision edd13270)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 /*
8  * Quota change tags are associated with each transaction that allocates or
9  * deallocates space.  Those changes are accumulated locally to each node (in a
10  * per-node file) and then are periodically synced to the quota file.  This
11  * avoids the bottleneck of constantly touching the quota file, but introduces
12  * fuzziness in the current usage value of IDs that are being used on different
13  * nodes in the cluster simultaneously.  So, it is possible for a user on
14  * multiple nodes to overrun their quota, but that overrun is controlable.
15  * Since quota tags are part of transactions, there is no need for a quota check
16  * program to be run on node crashes or anything like that.
17  *
18  * There are couple of knobs that let the administrator manage the quota
19  * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
20  * sitting on one node before being synced to the quota file.  (The default is
21  * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
22  * of quota file syncs increases as the user moves closer to their limit.  The
23  * more frequent the syncs, the more accurate the quota enforcement, but that
24  * means that there is more contention between the nodes for the quota file.
25  * The default value is one.  This sets the maximum theoretical quota overrun
26  * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
27  * practice, the maximum overrun you see should be much less.)  A "quota_scale"
28  * number greater than one makes quota syncs more frequent and reduces the
29  * maximum overrun.  Numbers less than one (but greater than zero) make quota
30  * syncs less frequent.
31  *
32  * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
33  * the quota file, so it is not being constantly read.
34  */
35 
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/mm.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
45 #include <linux/fs.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/kthread.h>
49 #include <linux/freezer.h>
50 #include <linux/quota.h>
51 #include <linux/dqblk_xfs.h>
52 #include <linux/lockref.h>
53 #include <linux/list_lru.h>
54 #include <linux/rcupdate.h>
55 #include <linux/rculist_bl.h>
56 #include <linux/bit_spinlock.h>
57 #include <linux/jhash.h>
58 #include <linux/vmalloc.h>
59 
60 #include "gfs2.h"
61 #include "incore.h"
62 #include "bmap.h"
63 #include "glock.h"
64 #include "glops.h"
65 #include "log.h"
66 #include "meta_io.h"
67 #include "quota.h"
68 #include "rgrp.h"
69 #include "super.h"
70 #include "trans.h"
71 #include "inode.h"
72 #include "util.h"
73 
74 #define GFS2_QD_HASH_SHIFT      12
75 #define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
76 #define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
77 
78 #define QC_CHANGE 0
79 #define QC_SYNC 1
80 
81 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
82 /*                     -> sd_bitmap_lock                              */
83 static DEFINE_SPINLOCK(qd_lock);
84 struct list_lru gfs2_qd_lru;
85 
86 static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
87 
gfs2_qd_hash(const struct gfs2_sbd * sdp,const struct kqid qid)88 static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
89 				 const struct kqid qid)
90 {
91 	unsigned int h;
92 
93 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
94 	h = jhash(&qid, sizeof(struct kqid), h);
95 
96 	return h & GFS2_QD_HASH_MASK;
97 }
98 
spin_lock_bucket(unsigned int hash)99 static inline void spin_lock_bucket(unsigned int hash)
100 {
101         hlist_bl_lock(&qd_hash_table[hash]);
102 }
103 
spin_unlock_bucket(unsigned int hash)104 static inline void spin_unlock_bucket(unsigned int hash)
105 {
106         hlist_bl_unlock(&qd_hash_table[hash]);
107 }
108 
gfs2_qd_dealloc(struct rcu_head * rcu)109 static void gfs2_qd_dealloc(struct rcu_head *rcu)
110 {
111 	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
112 	struct gfs2_sbd *sdp = qd->qd_sbd;
113 
114 	kmem_cache_free(gfs2_quotad_cachep, qd);
115 	if (atomic_dec_and_test(&sdp->sd_quota_count))
116 		wake_up(&sdp->sd_kill_wait);
117 }
118 
gfs2_qd_dispose(struct gfs2_quota_data * qd)119 static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
120 {
121 	struct gfs2_sbd *sdp = qd->qd_sbd;
122 
123 	spin_lock(&qd_lock);
124 	list_del(&qd->qd_list);
125 	spin_unlock(&qd_lock);
126 
127 	spin_lock_bucket(qd->qd_hash);
128 	hlist_bl_del_rcu(&qd->qd_hlist);
129 	spin_unlock_bucket(qd->qd_hash);
130 
131 	if (!gfs2_withdrawing_or_withdrawn(sdp)) {
132 		gfs2_assert_warn(sdp, !qd->qd_change);
133 		gfs2_assert_warn(sdp, !qd->qd_slot_ref);
134 		gfs2_assert_warn(sdp, !qd->qd_bh_count);
135 	}
136 
137 	gfs2_glock_put(qd->qd_gl);
138 	call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
139 }
140 
gfs2_qd_list_dispose(struct list_head * list)141 static void gfs2_qd_list_dispose(struct list_head *list)
142 {
143 	struct gfs2_quota_data *qd;
144 
145 	while (!list_empty(list)) {
146 		qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
147 		list_del(&qd->qd_lru);
148 
149 		gfs2_qd_dispose(qd);
150 	}
151 }
152 
153 
gfs2_qd_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)154 static enum lru_status gfs2_qd_isolate(struct list_head *item,
155 		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
156 {
157 	struct list_head *dispose = arg;
158 	struct gfs2_quota_data *qd =
159 		list_entry(item, struct gfs2_quota_data, qd_lru);
160 	enum lru_status status;
161 
162 	if (!spin_trylock(&qd->qd_lockref.lock))
163 		return LRU_SKIP;
164 
165 	status = LRU_SKIP;
166 	if (qd->qd_lockref.count == 0) {
167 		lockref_mark_dead(&qd->qd_lockref);
168 		list_lru_isolate_move(lru, &qd->qd_lru, dispose);
169 		status = LRU_REMOVED;
170 	}
171 
172 	spin_unlock(&qd->qd_lockref.lock);
173 	return status;
174 }
175 
gfs2_qd_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)176 static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
177 					 struct shrink_control *sc)
178 {
179 	LIST_HEAD(dispose);
180 	unsigned long freed;
181 
182 	if (!(sc->gfp_mask & __GFP_FS))
183 		return SHRINK_STOP;
184 
185 	freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
186 				     gfs2_qd_isolate, &dispose);
187 
188 	gfs2_qd_list_dispose(&dispose);
189 
190 	return freed;
191 }
192 
gfs2_qd_shrink_count(struct shrinker * shrink,struct shrink_control * sc)193 static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
194 					  struct shrink_control *sc)
195 {
196 	return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
197 }
198 
199 static struct shrinker *gfs2_qd_shrinker;
200 
gfs2_qd_shrinker_init(void)201 int __init gfs2_qd_shrinker_init(void)
202 {
203 	gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd");
204 	if (!gfs2_qd_shrinker)
205 		return -ENOMEM;
206 
207 	gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count;
208 	gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan;
209 
210 	shrinker_register(gfs2_qd_shrinker);
211 
212 	return 0;
213 }
214 
gfs2_qd_shrinker_exit(void)215 void gfs2_qd_shrinker_exit(void)
216 {
217 	shrinker_free(gfs2_qd_shrinker);
218 }
219 
qd2index(struct gfs2_quota_data * qd)220 static u64 qd2index(struct gfs2_quota_data *qd)
221 {
222 	struct kqid qid = qd->qd_id;
223 	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
224 		((qid.type == USRQUOTA) ? 0 : 1);
225 }
226 
qd2offset(struct gfs2_quota_data * qd)227 static u64 qd2offset(struct gfs2_quota_data *qd)
228 {
229 	return qd2index(qd) * sizeof(struct gfs2_quota);
230 }
231 
qd_alloc(unsigned hash,struct gfs2_sbd * sdp,struct kqid qid)232 static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
233 {
234 	struct gfs2_quota_data *qd;
235 	int error;
236 
237 	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
238 	if (!qd)
239 		return NULL;
240 
241 	qd->qd_sbd = sdp;
242 	qd->qd_lockref.count = 0;
243 	spin_lock_init(&qd->qd_lockref.lock);
244 	qd->qd_id = qid;
245 	qd->qd_slot = -1;
246 	INIT_LIST_HEAD(&qd->qd_lru);
247 	qd->qd_hash = hash;
248 
249 	error = gfs2_glock_get(sdp, qd2index(qd),
250 			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
251 	if (error)
252 		goto fail;
253 
254 	return qd;
255 
256 fail:
257 	kmem_cache_free(gfs2_quotad_cachep, qd);
258 	return NULL;
259 }
260 
gfs2_qd_search_bucket(unsigned int hash,const struct gfs2_sbd * sdp,struct kqid qid)261 static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
262 						     const struct gfs2_sbd *sdp,
263 						     struct kqid qid)
264 {
265 	struct gfs2_quota_data *qd;
266 	struct hlist_bl_node *h;
267 
268 	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
269 		if (!qid_eq(qd->qd_id, qid))
270 			continue;
271 		if (qd->qd_sbd != sdp)
272 			continue;
273 		if (lockref_get_not_dead(&qd->qd_lockref)) {
274 			list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
275 			return qd;
276 		}
277 	}
278 
279 	return NULL;
280 }
281 
282 
qd_get(struct gfs2_sbd * sdp,struct kqid qid,struct gfs2_quota_data ** qdp)283 static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
284 		  struct gfs2_quota_data **qdp)
285 {
286 	struct gfs2_quota_data *qd, *new_qd;
287 	unsigned int hash = gfs2_qd_hash(sdp, qid);
288 
289 	rcu_read_lock();
290 	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
291 	rcu_read_unlock();
292 
293 	if (qd)
294 		return 0;
295 
296 	new_qd = qd_alloc(hash, sdp, qid);
297 	if (!new_qd)
298 		return -ENOMEM;
299 
300 	spin_lock(&qd_lock);
301 	spin_lock_bucket(hash);
302 	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
303 	if (qd == NULL) {
304 		new_qd->qd_lockref.count++;
305 		*qdp = new_qd;
306 		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
307 		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
308 		atomic_inc(&sdp->sd_quota_count);
309 	}
310 	spin_unlock_bucket(hash);
311 	spin_unlock(&qd_lock);
312 
313 	if (qd) {
314 		gfs2_glock_put(new_qd->qd_gl);
315 		kmem_cache_free(gfs2_quotad_cachep, new_qd);
316 	}
317 
318 	return 0;
319 }
320 
321 
qd_hold(struct gfs2_quota_data * qd)322 static void qd_hold(struct gfs2_quota_data *qd)
323 {
324 	struct gfs2_sbd *sdp = qd->qd_sbd;
325 	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
326 	lockref_get(&qd->qd_lockref);
327 }
328 
qd_put(struct gfs2_quota_data * qd)329 static void qd_put(struct gfs2_quota_data *qd)
330 {
331 	struct gfs2_sbd *sdp;
332 
333 	if (lockref_put_or_lock(&qd->qd_lockref))
334 		return;
335 
336 	BUG_ON(__lockref_is_dead(&qd->qd_lockref));
337 	sdp = qd->qd_sbd;
338 	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
339 		lockref_mark_dead(&qd->qd_lockref);
340 		spin_unlock(&qd->qd_lockref.lock);
341 
342 		gfs2_qd_dispose(qd);
343 		return;
344 	}
345 
346 	qd->qd_lockref.count = 0;
347 	list_lru_add_obj(&gfs2_qd_lru, &qd->qd_lru);
348 	spin_unlock(&qd->qd_lockref.lock);
349 }
350 
slot_get(struct gfs2_quota_data * qd)351 static int slot_get(struct gfs2_quota_data *qd)
352 {
353 	struct gfs2_sbd *sdp = qd->qd_sbd;
354 	unsigned int bit;
355 	int error = 0;
356 
357 	spin_lock(&sdp->sd_bitmap_lock);
358 	if (qd->qd_slot_ref == 0) {
359 		bit = find_first_zero_bit(sdp->sd_quota_bitmap,
360 					  sdp->sd_quota_slots);
361 		if (bit >= sdp->sd_quota_slots) {
362 			error = -ENOSPC;
363 			goto out;
364 		}
365 		set_bit(bit, sdp->sd_quota_bitmap);
366 		qd->qd_slot = bit;
367 	}
368 	qd->qd_slot_ref++;
369 out:
370 	spin_unlock(&sdp->sd_bitmap_lock);
371 	return error;
372 }
373 
slot_hold(struct gfs2_quota_data * qd)374 static void slot_hold(struct gfs2_quota_data *qd)
375 {
376 	struct gfs2_sbd *sdp = qd->qd_sbd;
377 
378 	spin_lock(&sdp->sd_bitmap_lock);
379 	gfs2_assert(sdp, qd->qd_slot_ref);
380 	qd->qd_slot_ref++;
381 	spin_unlock(&sdp->sd_bitmap_lock);
382 }
383 
slot_put(struct gfs2_quota_data * qd)384 static void slot_put(struct gfs2_quota_data *qd)
385 {
386 	struct gfs2_sbd *sdp = qd->qd_sbd;
387 
388 	spin_lock(&sdp->sd_bitmap_lock);
389 	gfs2_assert(sdp, qd->qd_slot_ref);
390 	if (!--qd->qd_slot_ref) {
391 		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
392 		qd->qd_slot = -1;
393 	}
394 	spin_unlock(&sdp->sd_bitmap_lock);
395 }
396 
bh_get(struct gfs2_quota_data * qd)397 static int bh_get(struct gfs2_quota_data *qd)
398 {
399 	struct gfs2_sbd *sdp = qd->qd_sbd;
400 	struct inode *inode = sdp->sd_qc_inode;
401 	struct gfs2_inode *ip = GFS2_I(inode);
402 	unsigned int block, offset;
403 	struct buffer_head *bh;
404 	struct iomap iomap = { };
405 	int error;
406 
407 	mutex_lock(&sdp->sd_quota_mutex);
408 
409 	if (qd->qd_bh_count++) {
410 		mutex_unlock(&sdp->sd_quota_mutex);
411 		return 0;
412 	}
413 
414 	block = qd->qd_slot / sdp->sd_qc_per_block;
415 	offset = qd->qd_slot % sdp->sd_qc_per_block;
416 
417 	error = gfs2_iomap_get(inode,
418 			       (loff_t)block << inode->i_blkbits,
419 			       i_blocksize(inode), &iomap);
420 	if (error)
421 		goto fail;
422 	error = -ENOENT;
423 	if (iomap.type != IOMAP_MAPPED)
424 		goto fail;
425 
426 	error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
427 			       DIO_WAIT, 0, &bh);
428 	if (error)
429 		goto fail;
430 	error = -EIO;
431 	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
432 		goto fail_brelse;
433 
434 	qd->qd_bh = bh;
435 	qd->qd_bh_qc = (struct gfs2_quota_change *)
436 		(bh->b_data + sizeof(struct gfs2_meta_header) +
437 		 offset * sizeof(struct gfs2_quota_change));
438 
439 	mutex_unlock(&sdp->sd_quota_mutex);
440 
441 	return 0;
442 
443 fail_brelse:
444 	brelse(bh);
445 fail:
446 	qd->qd_bh_count--;
447 	mutex_unlock(&sdp->sd_quota_mutex);
448 	return error;
449 }
450 
bh_put(struct gfs2_quota_data * qd)451 static void bh_put(struct gfs2_quota_data *qd)
452 {
453 	struct gfs2_sbd *sdp = qd->qd_sbd;
454 
455 	mutex_lock(&sdp->sd_quota_mutex);
456 	gfs2_assert(sdp, qd->qd_bh_count);
457 	if (!--qd->qd_bh_count) {
458 		brelse(qd->qd_bh);
459 		qd->qd_bh = NULL;
460 		qd->qd_bh_qc = NULL;
461 	}
462 	mutex_unlock(&sdp->sd_quota_mutex);
463 }
464 
qd_check_sync(struct gfs2_sbd * sdp,struct gfs2_quota_data * qd,u64 * sync_gen)465 static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
466 			 u64 *sync_gen)
467 {
468 	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
469 	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
470 	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
471 		return 0;
472 
473 	/*
474 	 * If qd_change is 0 it means a pending quota change was negated.
475 	 * We should not sync it, but we still have a qd reference and slot
476 	 * reference taken by gfs2_quota_change -> do_qc that need to be put.
477 	 */
478 	if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
479 		slot_put(qd);
480 		qd_put(qd);
481 		return 0;
482 	}
483 
484 	if (!lockref_get_not_dead(&qd->qd_lockref))
485 		return 0;
486 
487 	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
488 	set_bit(QDF_LOCKED, &qd->qd_flags);
489 	qd->qd_change_sync = qd->qd_change;
490 	slot_hold(qd);
491 	return 1;
492 }
493 
qd_bh_get_or_undo(struct gfs2_sbd * sdp,struct gfs2_quota_data * qd)494 static int qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
495 {
496 	int error;
497 
498 	error = bh_get(qd);
499 	if (!error)
500 		return 0;
501 
502 	clear_bit(QDF_LOCKED, &qd->qd_flags);
503 	slot_put(qd);
504 	qd_put(qd);
505 	return error;
506 }
507 
qd_fish(struct gfs2_sbd * sdp,struct gfs2_quota_data ** qdp)508 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
509 {
510 	struct gfs2_quota_data *qd = NULL, *iter;
511 	int error;
512 
513 	*qdp = NULL;
514 
515 	if (sb_rdonly(sdp->sd_vfs))
516 		return 0;
517 
518 	spin_lock(&qd_lock);
519 
520 	list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
521 		if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
522 			qd = iter;
523 			break;
524 		}
525 	}
526 
527 	spin_unlock(&qd_lock);
528 
529 	if (qd) {
530 		error = qd_bh_get_or_undo(sdp, qd);
531 		if (error)
532 			return error;
533 		*qdp = qd;
534 	}
535 
536 	return 0;
537 }
538 
qdsb_put(struct gfs2_quota_data * qd)539 static void qdsb_put(struct gfs2_quota_data *qd)
540 {
541 	bh_put(qd);
542 	slot_put(qd);
543 	qd_put(qd);
544 }
545 
qd_unlock(struct gfs2_quota_data * qd)546 static void qd_unlock(struct gfs2_quota_data *qd)
547 {
548 	gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
549 	clear_bit(QDF_LOCKED, &qd->qd_flags);
550 	qdsb_put(qd);
551 }
552 
qdsb_get(struct gfs2_sbd * sdp,struct kqid qid,struct gfs2_quota_data ** qdp)553 static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
554 		    struct gfs2_quota_data **qdp)
555 {
556 	int error;
557 
558 	error = qd_get(sdp, qid, qdp);
559 	if (error)
560 		return error;
561 
562 	error = slot_get(*qdp);
563 	if (error)
564 		goto fail;
565 
566 	error = bh_get(*qdp);
567 	if (error)
568 		goto fail_slot;
569 
570 	return 0;
571 
572 fail_slot:
573 	slot_put(*qdp);
574 fail:
575 	qd_put(*qdp);
576 	return error;
577 }
578 
579 /**
580  * gfs2_qa_get - make sure we have a quota allocations data structure,
581  *               if necessary
582  * @ip: the inode for this reservation
583  */
gfs2_qa_get(struct gfs2_inode * ip)584 int gfs2_qa_get(struct gfs2_inode *ip)
585 {
586 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
587 	struct inode *inode = &ip->i_inode;
588 
589 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
590 		return 0;
591 
592 	spin_lock(&inode->i_lock);
593 	if (ip->i_qadata == NULL) {
594 		struct gfs2_qadata *tmp;
595 
596 		spin_unlock(&inode->i_lock);
597 		tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
598 		if (!tmp)
599 			return -ENOMEM;
600 
601 		spin_lock(&inode->i_lock);
602 		if (ip->i_qadata == NULL)
603 			ip->i_qadata = tmp;
604 		else
605 			kmem_cache_free(gfs2_qadata_cachep, tmp);
606 	}
607 	ip->i_qadata->qa_ref++;
608 	spin_unlock(&inode->i_lock);
609 	return 0;
610 }
611 
gfs2_qa_put(struct gfs2_inode * ip)612 void gfs2_qa_put(struct gfs2_inode *ip)
613 {
614 	struct inode *inode = &ip->i_inode;
615 
616 	spin_lock(&inode->i_lock);
617 	if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
618 		kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
619 		ip->i_qadata = NULL;
620 	}
621 	spin_unlock(&inode->i_lock);
622 }
623 
gfs2_quota_hold(struct gfs2_inode * ip,kuid_t uid,kgid_t gid)624 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
625 {
626 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
627 	struct gfs2_quota_data **qd;
628 	int error;
629 
630 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
631 		return 0;
632 
633 	error = gfs2_qa_get(ip);
634 	if (error)
635 		return error;
636 
637 	qd = ip->i_qadata->qa_qd;
638 
639 	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
640 	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
641 		error = -EIO;
642 		gfs2_qa_put(ip);
643 		goto out;
644 	}
645 
646 	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
647 	if (error)
648 		goto out_unhold;
649 	ip->i_qadata->qa_qd_num++;
650 	qd++;
651 
652 	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
653 	if (error)
654 		goto out_unhold;
655 	ip->i_qadata->qa_qd_num++;
656 	qd++;
657 
658 	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
659 	    !uid_eq(uid, ip->i_inode.i_uid)) {
660 		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
661 		if (error)
662 			goto out_unhold;
663 		ip->i_qadata->qa_qd_num++;
664 		qd++;
665 	}
666 
667 	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
668 	    !gid_eq(gid, ip->i_inode.i_gid)) {
669 		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
670 		if (error)
671 			goto out_unhold;
672 		ip->i_qadata->qa_qd_num++;
673 		qd++;
674 	}
675 
676 out_unhold:
677 	if (error)
678 		gfs2_quota_unhold(ip);
679 out:
680 	return error;
681 }
682 
gfs2_quota_unhold(struct gfs2_inode * ip)683 void gfs2_quota_unhold(struct gfs2_inode *ip)
684 {
685 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
686 	u32 x;
687 
688 	if (ip->i_qadata == NULL)
689 		return;
690 
691 	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
692 
693 	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
694 		qdsb_put(ip->i_qadata->qa_qd[x]);
695 		ip->i_qadata->qa_qd[x] = NULL;
696 	}
697 	ip->i_qadata->qa_qd_num = 0;
698 	gfs2_qa_put(ip);
699 }
700 
sort_qd(const void * a,const void * b)701 static int sort_qd(const void *a, const void *b)
702 {
703 	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
704 	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
705 
706 	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
707 		return -1;
708 	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
709 		return 1;
710 	return 0;
711 }
712 
do_qc(struct gfs2_quota_data * qd,s64 change,int qc_type)713 static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
714 {
715 	struct gfs2_sbd *sdp = qd->qd_sbd;
716 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
717 	struct gfs2_quota_change *qc = qd->qd_bh_qc;
718 	s64 x;
719 
720 	mutex_lock(&sdp->sd_quota_mutex);
721 	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
722 
723 	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
724 		qc->qc_change = 0;
725 		qc->qc_flags = 0;
726 		if (qd->qd_id.type == USRQUOTA)
727 			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
728 		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
729 	}
730 
731 	x = be64_to_cpu(qc->qc_change) + change;
732 	qc->qc_change = cpu_to_be64(x);
733 
734 	spin_lock(&qd_lock);
735 	qd->qd_change = x;
736 	spin_unlock(&qd_lock);
737 
738 	if (qc_type == QC_CHANGE) {
739 		if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
740 			qd_hold(qd);
741 			slot_hold(qd);
742 		}
743 	} else {
744 		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
745 		clear_bit(QDF_CHANGE, &qd->qd_flags);
746 		qc->qc_flags = 0;
747 		qc->qc_id = 0;
748 		slot_put(qd);
749 		qd_put(qd);
750 	}
751 
752 	if (change < 0) /* Reset quiet flag if we freed some blocks */
753 		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
754 	mutex_unlock(&sdp->sd_quota_mutex);
755 }
756 
gfs2_write_buf_to_page(struct gfs2_sbd * sdp,unsigned long index,unsigned off,void * buf,unsigned bytes)757 static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
758 				  unsigned off, void *buf, unsigned bytes)
759 {
760 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
761 	struct inode *inode = &ip->i_inode;
762 	struct address_space *mapping = inode->i_mapping;
763 	struct folio *folio;
764 	struct buffer_head *bh;
765 	u64 blk;
766 	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
767 	unsigned to_write = bytes, pg_off = off;
768 
769 	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
770 	boff = off % bsize;
771 
772 	folio = filemap_grab_folio(mapping, index);
773 	if (IS_ERR(folio))
774 		return PTR_ERR(folio);
775 	bh = folio_buffers(folio);
776 	if (!bh)
777 		bh = create_empty_buffers(folio, bsize, 0);
778 
779 	for (;;) {
780 		/* Find the beginning block within the folio */
781 		if (pg_off >= ((bnum * bsize) + bsize)) {
782 			bh = bh->b_this_page;
783 			bnum++;
784 			blk++;
785 			continue;
786 		}
787 		if (!buffer_mapped(bh)) {
788 			gfs2_block_map(inode, blk, bh, 1);
789 			if (!buffer_mapped(bh))
790 				goto unlock_out;
791 			/* If it's a newly allocated disk block, zero it */
792 			if (buffer_new(bh))
793 				folio_zero_range(folio, bnum * bsize,
794 						bh->b_size);
795 		}
796 		if (folio_test_uptodate(folio))
797 			set_buffer_uptodate(bh);
798 		if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
799 			goto unlock_out;
800 		gfs2_trans_add_data(ip->i_gl, bh);
801 
802 		/* If we need to write to the next block as well */
803 		if (to_write > (bsize - boff)) {
804 			pg_off += (bsize - boff);
805 			to_write -= (bsize - boff);
806 			boff = pg_off % bsize;
807 			continue;
808 		}
809 		break;
810 	}
811 
812 	/* Write to the folio, now that we have setup the buffer(s) */
813 	memcpy_to_folio(folio, off, buf, bytes);
814 	flush_dcache_folio(folio);
815 	folio_unlock(folio);
816 	folio_put(folio);
817 
818 	return 0;
819 
820 unlock_out:
821 	folio_unlock(folio);
822 	folio_put(folio);
823 	return -EIO;
824 }
825 
gfs2_write_disk_quota(struct gfs2_sbd * sdp,struct gfs2_quota * qp,loff_t loc)826 static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp,
827 				 loff_t loc)
828 {
829 	unsigned long pg_beg;
830 	unsigned pg_off, nbytes, overflow = 0;
831 	int error;
832 	void *ptr;
833 
834 	nbytes = sizeof(struct gfs2_quota);
835 
836 	pg_beg = loc >> PAGE_SHIFT;
837 	pg_off = offset_in_page(loc);
838 
839 	/* If the quota straddles a page boundary, split the write in two */
840 	if ((pg_off + nbytes) > PAGE_SIZE)
841 		overflow = (pg_off + nbytes) - PAGE_SIZE;
842 
843 	ptr = qp;
844 	error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr,
845 				       nbytes - overflow);
846 	/* If there's an overflow, write the remaining bytes to the next page */
847 	if (!error && overflow)
848 		error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0,
849 					       ptr + nbytes - overflow,
850 					       overflow);
851 	return error;
852 }
853 
854 /**
855  * gfs2_adjust_quota - adjust record of current block usage
856  * @sdp: The superblock
857  * @loc: Offset of the entry in the quota file
858  * @change: The amount of usage change to record
859  * @qd: The quota data
860  * @fdq: The updated limits to record
861  *
862  * This function was mostly borrowed from gfs2_block_truncate_page which was
863  * in turn mostly borrowed from ext3
864  *
865  * Returns: 0 or -ve on error
866  */
867 
gfs2_adjust_quota(struct gfs2_sbd * sdp,loff_t loc,s64 change,struct gfs2_quota_data * qd,struct qc_dqblk * fdq)868 static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
869 			     s64 change, struct gfs2_quota_data *qd,
870 			     struct qc_dqblk *fdq)
871 {
872 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
873 	struct inode *inode = &ip->i_inode;
874 	struct gfs2_quota q;
875 	int err;
876 	u64 size;
877 
878 	if (gfs2_is_stuffed(ip)) {
879 		err = gfs2_unstuff_dinode(ip);
880 		if (err)
881 			return err;
882 	}
883 
884 	memset(&q, 0, sizeof(struct gfs2_quota));
885 	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
886 	if (err < 0)
887 		return err;
888 
889 	loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
890 	be64_add_cpu(&q.qu_value, change);
891 	if (((s64)be64_to_cpu(q.qu_value)) < 0)
892 		q.qu_value = 0; /* Never go negative on quota usage */
893 	qd->qd_qb.qb_value = q.qu_value;
894 	if (fdq) {
895 		if (fdq->d_fieldmask & QC_SPC_SOFT) {
896 			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
897 			qd->qd_qb.qb_warn = q.qu_warn;
898 		}
899 		if (fdq->d_fieldmask & QC_SPC_HARD) {
900 			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
901 			qd->qd_qb.qb_limit = q.qu_limit;
902 		}
903 		if (fdq->d_fieldmask & QC_SPACE) {
904 			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
905 			qd->qd_qb.qb_value = q.qu_value;
906 		}
907 	}
908 
909 	err = gfs2_write_disk_quota(sdp, &q, loc);
910 	if (!err) {
911 		size = loc + sizeof(struct gfs2_quota);
912 		if (size > inode->i_size)
913 			i_size_write(inode, size);
914 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
915 		mark_inode_dirty(inode);
916 		set_bit(QDF_REFRESH, &qd->qd_flags);
917 	}
918 
919 	return err;
920 }
921 
do_sync(unsigned int num_qd,struct gfs2_quota_data ** qda)922 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
923 {
924 	struct gfs2_sbd *sdp = (*qda)->qd_sbd;
925 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
926 	struct gfs2_alloc_parms ap = {};
927 	unsigned int data_blocks, ind_blocks;
928 	struct gfs2_holder *ghs, i_gh;
929 	unsigned int qx, x;
930 	struct gfs2_quota_data *qd;
931 	unsigned reserved;
932 	loff_t offset;
933 	unsigned int nalloc = 0, blocks;
934 	int error;
935 
936 	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
937 			      &data_blocks, &ind_blocks);
938 
939 	ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
940 	if (!ghs)
941 		return -ENOMEM;
942 
943 	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
944 	inode_lock(&ip->i_inode);
945 	for (qx = 0; qx < num_qd; qx++) {
946 		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
947 					   GL_NOCACHE, &ghs[qx]);
948 		if (error)
949 			goto out_dq;
950 	}
951 
952 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
953 	if (error)
954 		goto out_dq;
955 
956 	for (x = 0; x < num_qd; x++) {
957 		offset = qd2offset(qda[x]);
958 		if (gfs2_write_alloc_required(ip, offset,
959 					      sizeof(struct gfs2_quota)))
960 			nalloc++;
961 	}
962 
963 	/*
964 	 * 1 blk for unstuffing inode if stuffed. We add this extra
965 	 * block to the reservation unconditionally. If the inode
966 	 * doesn't need unstuffing, the block will be released to the
967 	 * rgrp since it won't be allocated during the transaction
968 	 */
969 	/* +3 in the end for unstuffing block, inode size update block
970 	 * and another block in case quota straddles page boundary and
971 	 * two blocks need to be updated instead of 1 */
972 	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
973 
974 	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
975 	ap.target = reserved;
976 	error = gfs2_inplace_reserve(ip, &ap);
977 	if (error)
978 		goto out_alloc;
979 
980 	if (nalloc)
981 		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
982 
983 	error = gfs2_trans_begin(sdp, blocks, 0);
984 	if (error)
985 		goto out_ipres;
986 
987 	for (x = 0; x < num_qd; x++) {
988 		qd = qda[x];
989 		offset = qd2offset(qd);
990 		error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd,
991 							NULL);
992 		if (error)
993 			goto out_end_trans;
994 
995 		do_qc(qd, -qd->qd_change_sync, QC_SYNC);
996 		set_bit(QDF_REFRESH, &qd->qd_flags);
997 	}
998 
999 out_end_trans:
1000 	gfs2_trans_end(sdp);
1001 out_ipres:
1002 	gfs2_inplace_release(ip);
1003 out_alloc:
1004 	gfs2_glock_dq_uninit(&i_gh);
1005 out_dq:
1006 	while (qx--)
1007 		gfs2_glock_dq_uninit(&ghs[qx]);
1008 	inode_unlock(&ip->i_inode);
1009 	kfree(ghs);
1010 	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
1011 		       GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
1012 	if (!error) {
1013 		for (x = 0; x < num_qd; x++)
1014 			qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen;
1015 	}
1016 	return error;
1017 }
1018 
update_qd(struct gfs2_sbd * sdp,struct gfs2_quota_data * qd)1019 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
1020 {
1021 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1022 	struct gfs2_quota q;
1023 	struct gfs2_quota_lvb *qlvb;
1024 	loff_t pos;
1025 	int error;
1026 
1027 	memset(&q, 0, sizeof(struct gfs2_quota));
1028 	pos = qd2offset(qd);
1029 	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
1030 	if (error < 0)
1031 		return error;
1032 
1033 	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1034 	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
1035 	qlvb->__pad = 0;
1036 	qlvb->qb_limit = q.qu_limit;
1037 	qlvb->qb_warn = q.qu_warn;
1038 	qlvb->qb_value = q.qu_value;
1039 	qd->qd_qb = *qlvb;
1040 
1041 	return 0;
1042 }
1043 
do_glock(struct gfs2_quota_data * qd,int force_refresh,struct gfs2_holder * q_gh)1044 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1045 		    struct gfs2_holder *q_gh)
1046 {
1047 	struct gfs2_sbd *sdp = qd->qd_sbd;
1048 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1049 	struct gfs2_holder i_gh;
1050 	int error;
1051 
1052 	gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
1053 restart:
1054 	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1055 	if (error)
1056 		return error;
1057 
1058 	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1059 		force_refresh = FORCE;
1060 
1061 	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1062 
1063 	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1064 		gfs2_glock_dq_uninit(q_gh);
1065 		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1066 					   GL_NOCACHE, q_gh);
1067 		if (error)
1068 			return error;
1069 
1070 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1071 		if (error)
1072 			goto fail;
1073 
1074 		error = update_qd(sdp, qd);
1075 		if (error)
1076 			goto fail_gunlock;
1077 
1078 		gfs2_glock_dq_uninit(&i_gh);
1079 		gfs2_glock_dq_uninit(q_gh);
1080 		force_refresh = 0;
1081 		goto restart;
1082 	}
1083 
1084 	return 0;
1085 
1086 fail_gunlock:
1087 	gfs2_glock_dq_uninit(&i_gh);
1088 fail:
1089 	gfs2_glock_dq_uninit(q_gh);
1090 	return error;
1091 }
1092 
gfs2_quota_lock(struct gfs2_inode * ip,kuid_t uid,kgid_t gid)1093 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1094 {
1095 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1096 	struct gfs2_quota_data *qd;
1097 	u32 x;
1098 	int error;
1099 
1100 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1101 		return 0;
1102 
1103 	error = gfs2_quota_hold(ip, uid, gid);
1104 	if (error)
1105 		return error;
1106 
1107 	sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1108 	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
1109 
1110 	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1111 		qd = ip->i_qadata->qa_qd[x];
1112 		error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
1113 		if (error)
1114 			break;
1115 	}
1116 
1117 	if (!error)
1118 		set_bit(GIF_QD_LOCKED, &ip->i_flags);
1119 	else {
1120 		while (x--)
1121 			gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1122 		gfs2_quota_unhold(ip);
1123 	}
1124 
1125 	return error;
1126 }
1127 
need_sync(struct gfs2_quota_data * qd)1128 static bool need_sync(struct gfs2_quota_data *qd)
1129 {
1130 	struct gfs2_sbd *sdp = qd->qd_sbd;
1131 	struct gfs2_tune *gt = &sdp->sd_tune;
1132 	s64 value;
1133 	unsigned int num, den;
1134 
1135 	if (!qd->qd_qb.qb_limit)
1136 		return false;
1137 
1138 	spin_lock(&qd_lock);
1139 	value = qd->qd_change;
1140 	spin_unlock(&qd_lock);
1141 
1142 	spin_lock(&gt->gt_spin);
1143 	num = gt->gt_quota_scale_num;
1144 	den = gt->gt_quota_scale_den;
1145 	spin_unlock(&gt->gt_spin);
1146 
1147 	if (value <= 0)
1148 		return false;
1149 	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1150 		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1151 		return false;
1152 	else {
1153 		value *= gfs2_jindex_size(sdp) * num;
1154 		value = div_s64(value, den);
1155 		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1156 		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1157 			return false;
1158 	}
1159 
1160 	return true;
1161 }
1162 
gfs2_quota_unlock(struct gfs2_inode * ip)1163 void gfs2_quota_unlock(struct gfs2_inode *ip)
1164 {
1165 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1166 	struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
1167 	unsigned int count = 0;
1168 	u32 x;
1169 	int found;
1170 
1171 	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1172 		return;
1173 
1174 	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1175 		struct gfs2_quota_data *qd;
1176 		bool sync;
1177 
1178 		qd = ip->i_qadata->qa_qd[x];
1179 		sync = need_sync(qd);
1180 
1181 		gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1182 		if (!sync)
1183 			continue;
1184 
1185 		spin_lock(&qd_lock);
1186 		found = qd_check_sync(sdp, qd, NULL);
1187 		spin_unlock(&qd_lock);
1188 
1189 		if (!found)
1190 			continue;
1191 
1192 		if (!qd_bh_get_or_undo(sdp, qd))
1193 			qda[count++] = qd;
1194 	}
1195 
1196 	if (count) {
1197 		do_sync(count, qda);
1198 		for (x = 0; x < count; x++)
1199 			qd_unlock(qda[x]);
1200 	}
1201 
1202 	gfs2_quota_unhold(ip);
1203 }
1204 
1205 #define MAX_LINE 256
1206 
print_message(struct gfs2_quota_data * qd,char * type)1207 static void print_message(struct gfs2_quota_data *qd, char *type)
1208 {
1209 	struct gfs2_sbd *sdp = qd->qd_sbd;
1210 
1211 	if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) {
1212 		fs_info(sdp, "quota %s for %s %u\n",
1213 			type,
1214 			(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1215 			from_kqid(&init_user_ns, qd->qd_id));
1216 	}
1217 }
1218 
1219 /**
1220  * gfs2_quota_check - check if allocating new blocks will exceed quota
1221  * @ip:  The inode for which this check is being performed
1222  * @uid: The uid to check against
1223  * @gid: The gid to check against
1224  * @ap:  The allocation parameters. ap->target contains the requested
1225  *       blocks. ap->min_target, if set, contains the minimum blks
1226  *       requested.
1227  *
1228  * Returns: 0 on success.
1229  *                  min_req = ap->min_target ? ap->min_target : ap->target;
1230  *                  quota must allow at least min_req blks for success and
1231  *                  ap->allowed is set to the number of blocks allowed
1232  *
1233  *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1234  *                  of blocks available.
1235  */
gfs2_quota_check(struct gfs2_inode * ip,kuid_t uid,kgid_t gid,struct gfs2_alloc_parms * ap)1236 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1237 		     struct gfs2_alloc_parms *ap)
1238 {
1239 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1240 	struct gfs2_quota_data *qd;
1241 	s64 value, warn, limit;
1242 	u32 x;
1243 	int error = 0;
1244 
1245 	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1246 	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1247 		return 0;
1248 
1249 	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1250 		qd = ip->i_qadata->qa_qd[x];
1251 
1252 		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1253 		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1254 			continue;
1255 
1256 		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1257 		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1258 		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1259 		spin_lock(&qd_lock);
1260 		value += qd->qd_change;
1261 		spin_unlock(&qd_lock);
1262 
1263 		if (limit > 0 && (limit - value) < ap->allowed)
1264 			ap->allowed = limit - value;
1265 		/* If we can't meet the target */
1266 		if (limit && limit < (value + (s64)ap->target)) {
1267 			/* If no min_target specified or we don't meet
1268 			 * min_target, return -EDQUOT */
1269 			if (!ap->min_target || ap->min_target > ap->allowed) {
1270 				if (!test_and_set_bit(QDF_QMSG_QUIET,
1271 						      &qd->qd_flags)) {
1272 					print_message(qd, "exceeded");
1273 					quota_send_warning(qd->qd_id,
1274 							   sdp->sd_vfs->s_dev,
1275 							   QUOTA_NL_BHARDWARN);
1276 				}
1277 				error = -EDQUOT;
1278 				break;
1279 			}
1280 		} else if (warn && warn < value &&
1281 			   time_after_eq(jiffies, qd->qd_last_warn +
1282 					 gfs2_tune_get(sdp, gt_quota_warn_period)
1283 					 * HZ)) {
1284 			quota_send_warning(qd->qd_id,
1285 					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1286 			print_message(qd, "warning");
1287 			error = 0;
1288 			qd->qd_last_warn = jiffies;
1289 		}
1290 	}
1291 	return error;
1292 }
1293 
gfs2_quota_change(struct gfs2_inode * ip,s64 change,kuid_t uid,kgid_t gid)1294 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1295 		       kuid_t uid, kgid_t gid)
1296 {
1297 	struct gfs2_quota_data *qd;
1298 	u32 x;
1299 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1300 
1301 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ||
1302 	    gfs2_assert_warn(sdp, change))
1303 		return;
1304 	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1305 		return;
1306 
1307 	if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1308 				 ip->i_qadata->qa_ref > 0))
1309 		return;
1310 	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1311 		qd = ip->i_qadata->qa_qd[x];
1312 
1313 		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1314 		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1315 			do_qc(qd, change, QC_CHANGE);
1316 		}
1317 	}
1318 }
1319 
qd_changed(struct gfs2_sbd * sdp)1320 static bool qd_changed(struct gfs2_sbd *sdp)
1321 {
1322 	struct gfs2_quota_data *qd;
1323 	bool changed = false;
1324 
1325 	spin_lock(&qd_lock);
1326 	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
1327 		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
1328 		    !test_bit(QDF_CHANGE, &qd->qd_flags))
1329 			continue;
1330 
1331 		changed = true;
1332 		break;
1333 	}
1334 	spin_unlock(&qd_lock);
1335 	return changed;
1336 }
1337 
gfs2_quota_sync(struct super_block * sb,int type)1338 int gfs2_quota_sync(struct super_block *sb, int type)
1339 {
1340 	struct gfs2_sbd *sdp = sb->s_fs_info;
1341 	struct gfs2_quota_data **qda;
1342 	unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
1343 	unsigned int num_qd;
1344 	unsigned int x;
1345 	int error = 0;
1346 
1347 	if (!qd_changed(sdp))
1348 		return 0;
1349 
1350 	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1351 	if (!qda)
1352 		return -ENOMEM;
1353 
1354 	mutex_lock(&sdp->sd_quota_sync_mutex);
1355 	sdp->sd_quota_sync_gen++;
1356 
1357 	do {
1358 		num_qd = 0;
1359 
1360 		for (;;) {
1361 			error = qd_fish(sdp, qda + num_qd);
1362 			if (error || !qda[num_qd])
1363 				break;
1364 			if (++num_qd == max_qd)
1365 				break;
1366 		}
1367 
1368 		if (num_qd) {
1369 			if (!error)
1370 				error = do_sync(num_qd, qda);
1371 
1372 			for (x = 0; x < num_qd; x++)
1373 				qd_unlock(qda[x]);
1374 		}
1375 	} while (!error && num_qd == max_qd);
1376 
1377 	mutex_unlock(&sdp->sd_quota_sync_mutex);
1378 	kfree(qda);
1379 
1380 	return error;
1381 }
1382 
gfs2_quota_refresh(struct gfs2_sbd * sdp,struct kqid qid)1383 int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1384 {
1385 	struct gfs2_quota_data *qd;
1386 	struct gfs2_holder q_gh;
1387 	int error;
1388 
1389 	error = qd_get(sdp, qid, &qd);
1390 	if (error)
1391 		return error;
1392 
1393 	error = do_glock(qd, FORCE, &q_gh);
1394 	if (!error)
1395 		gfs2_glock_dq_uninit(&q_gh);
1396 
1397 	qd_put(qd);
1398 	return error;
1399 }
1400 
gfs2_quota_init(struct gfs2_sbd * sdp)1401 int gfs2_quota_init(struct gfs2_sbd *sdp)
1402 {
1403 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1404 	u64 size = i_size_read(sdp->sd_qc_inode);
1405 	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1406 	unsigned int x, slot = 0;
1407 	unsigned int found = 0;
1408 	unsigned int hash;
1409 	unsigned int bm_size;
1410 	u64 dblock;
1411 	u32 extlen = 0;
1412 	int error;
1413 
1414 	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1415 		return -EIO;
1416 
1417 	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1418 	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1419 	bm_size *= sizeof(unsigned long);
1420 	error = -ENOMEM;
1421 	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1422 	if (sdp->sd_quota_bitmap == NULL)
1423 		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1424 						 __GFP_ZERO);
1425 	if (!sdp->sd_quota_bitmap)
1426 		return error;
1427 
1428 	for (x = 0; x < blocks; x++) {
1429 		struct buffer_head *bh;
1430 		const struct gfs2_quota_change *qc;
1431 		unsigned int y;
1432 
1433 		if (!extlen) {
1434 			extlen = 32;
1435 			error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
1436 			if (error)
1437 				goto fail;
1438 		}
1439 		error = -EIO;
1440 		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1441 		if (!bh)
1442 			goto fail;
1443 		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1444 			brelse(bh);
1445 			goto fail;
1446 		}
1447 
1448 		qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1449 		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1450 		     y++, slot++) {
1451 			struct gfs2_quota_data *qd;
1452 			s64 qc_change = be64_to_cpu(qc->qc_change);
1453 			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1454 			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1455 						USRQUOTA : GRPQUOTA;
1456 			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1457 						      be32_to_cpu(qc->qc_id));
1458 			qc++;
1459 			if (!qc_change)
1460 				continue;
1461 
1462 			hash = gfs2_qd_hash(sdp, qc_id);
1463 			qd = qd_alloc(hash, sdp, qc_id);
1464 			if (qd == NULL) {
1465 				brelse(bh);
1466 				goto fail;
1467 			}
1468 
1469 			set_bit(QDF_CHANGE, &qd->qd_flags);
1470 			qd->qd_change = qc_change;
1471 			qd->qd_slot = slot;
1472 			qd->qd_slot_ref = 1;
1473 
1474 			spin_lock(&qd_lock);
1475 			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1476 			list_add(&qd->qd_list, &sdp->sd_quota_list);
1477 			atomic_inc(&sdp->sd_quota_count);
1478 			spin_unlock(&qd_lock);
1479 
1480 			spin_lock_bucket(hash);
1481 			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1482 			spin_unlock_bucket(hash);
1483 
1484 			found++;
1485 		}
1486 
1487 		brelse(bh);
1488 		dblock++;
1489 		extlen--;
1490 	}
1491 
1492 	if (found)
1493 		fs_info(sdp, "found %u quota changes\n", found);
1494 
1495 	return 0;
1496 
1497 fail:
1498 	gfs2_quota_cleanup(sdp);
1499 	return error;
1500 }
1501 
gfs2_quota_cleanup(struct gfs2_sbd * sdp)1502 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1503 {
1504 	struct gfs2_quota_data *qd;
1505 	LIST_HEAD(dispose);
1506 	int count;
1507 
1508 	BUG_ON(!test_bit(SDF_NORECOVERY, &sdp->sd_flags) &&
1509 		test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
1510 
1511 	spin_lock(&qd_lock);
1512 	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
1513 		spin_lock(&qd->qd_lockref.lock);
1514 		if (qd->qd_lockref.count != 0) {
1515 			spin_unlock(&qd->qd_lockref.lock);
1516 			continue;
1517 		}
1518 		lockref_mark_dead(&qd->qd_lockref);
1519 		spin_unlock(&qd->qd_lockref.lock);
1520 
1521 		list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
1522 		list_add(&qd->qd_lru, &dispose);
1523 	}
1524 	spin_unlock(&qd_lock);
1525 
1526 	gfs2_qd_list_dispose(&dispose);
1527 
1528 	wait_event_timeout(sdp->sd_kill_wait,
1529 		(count = atomic_read(&sdp->sd_quota_count)) == 0,
1530 		HZ * 60);
1531 
1532 	if (count != 0)
1533 		fs_err(sdp, "%d left-over quota data objects\n", count);
1534 
1535 	kvfree(sdp->sd_quota_bitmap);
1536 	sdp->sd_quota_bitmap = NULL;
1537 }
1538 
quotad_error(struct gfs2_sbd * sdp,const char * msg,int error)1539 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1540 {
1541 	if (error == 0 || error == -EROFS)
1542 		return;
1543 	if (!gfs2_withdrawing_or_withdrawn(sdp)) {
1544 		if (!cmpxchg(&sdp->sd_log_error, 0, error))
1545 			fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1546 		wake_up(&sdp->sd_logd_waitq);
1547 	}
1548 }
1549 
quotad_check_timeo(struct gfs2_sbd * sdp,const char * msg,int (* fxn)(struct super_block * sb,int type),unsigned long t,unsigned long * timeo,unsigned int * new_timeo)1550 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1551 			       int (*fxn)(struct super_block *sb, int type),
1552 			       unsigned long t, unsigned long *timeo,
1553 			       unsigned int *new_timeo)
1554 {
1555 	if (t >= *timeo) {
1556 		int error = fxn(sdp->sd_vfs, 0);
1557 		quotad_error(sdp, msg, error);
1558 		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1559 	} else {
1560 		*timeo -= t;
1561 	}
1562 }
1563 
gfs2_wake_up_statfs(struct gfs2_sbd * sdp)1564 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1565 	if (!sdp->sd_statfs_force_sync) {
1566 		sdp->sd_statfs_force_sync = 1;
1567 		wake_up(&sdp->sd_quota_wait);
1568 	}
1569 }
1570 
1571 
1572 /**
1573  * gfs2_quotad - Write cached quota changes into the quota file
1574  * @data: Pointer to GFS2 superblock
1575  *
1576  */
1577 
gfs2_quotad(void * data)1578 int gfs2_quotad(void *data)
1579 {
1580 	struct gfs2_sbd *sdp = data;
1581 	struct gfs2_tune *tune = &sdp->sd_tune;
1582 	unsigned long statfs_timeo = 0;
1583 	unsigned long quotad_timeo = 0;
1584 	unsigned long t = 0;
1585 
1586 	set_freezable();
1587 	while (!kthread_should_stop()) {
1588 		if (gfs2_withdrawing_or_withdrawn(sdp))
1589 			break;
1590 
1591 		/* Update the master statfs file */
1592 		if (sdp->sd_statfs_force_sync) {
1593 			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1594 			quotad_error(sdp, "statfs", error);
1595 			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1596 		}
1597 		else
1598 			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1599 				   	   &statfs_timeo,
1600 					   &tune->gt_statfs_quantum);
1601 
1602 		/* Update quota file */
1603 		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1604 				   &quotad_timeo, &tune->gt_quota_quantum);
1605 
1606 		t = min(quotad_timeo, statfs_timeo);
1607 
1608 		t = wait_event_freezable_timeout(sdp->sd_quota_wait,
1609 				sdp->sd_statfs_force_sync ||
1610 				gfs2_withdrawing_or_withdrawn(sdp) ||
1611 				kthread_should_stop(),
1612 				t);
1613 
1614 		if (sdp->sd_statfs_force_sync)
1615 			t = 0;
1616 	}
1617 
1618 	return 0;
1619 }
1620 
gfs2_quota_get_state(struct super_block * sb,struct qc_state * state)1621 static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1622 {
1623 	struct gfs2_sbd *sdp = sb->s_fs_info;
1624 
1625 	memset(state, 0, sizeof(*state));
1626 
1627 	switch (sdp->sd_args.ar_quota) {
1628 	case GFS2_QUOTA_QUIET:
1629 		fallthrough;
1630 	case GFS2_QUOTA_ON:
1631 		state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1632 		state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1633 		fallthrough;
1634 	case GFS2_QUOTA_ACCOUNT:
1635 		state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1636 						  QCI_SYSFILE;
1637 		state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1638 						  QCI_SYSFILE;
1639 		break;
1640 	case GFS2_QUOTA_OFF:
1641 		break;
1642 	}
1643 	if (sdp->sd_quota_inode) {
1644 		state->s_state[USRQUOTA].ino =
1645 					GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1646 		state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1647 	}
1648 	state->s_state[USRQUOTA].nextents = 1;	/* unsupported */
1649 	state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1650 	state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1651 	return 0;
1652 }
1653 
gfs2_get_dqblk(struct super_block * sb,struct kqid qid,struct qc_dqblk * fdq)1654 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1655 			  struct qc_dqblk *fdq)
1656 {
1657 	struct gfs2_sbd *sdp = sb->s_fs_info;
1658 	struct gfs2_quota_lvb *qlvb;
1659 	struct gfs2_quota_data *qd;
1660 	struct gfs2_holder q_gh;
1661 	int error;
1662 
1663 	memset(fdq, 0, sizeof(*fdq));
1664 
1665 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1666 		return -ESRCH; /* Crazy XFS error code */
1667 
1668 	if ((qid.type != USRQUOTA) &&
1669 	    (qid.type != GRPQUOTA))
1670 		return -EINVAL;
1671 
1672 	error = qd_get(sdp, qid, &qd);
1673 	if (error)
1674 		return error;
1675 	error = do_glock(qd, FORCE, &q_gh);
1676 	if (error)
1677 		goto out;
1678 
1679 	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1680 	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1681 	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1682 	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
1683 
1684 	gfs2_glock_dq_uninit(&q_gh);
1685 out:
1686 	qd_put(qd);
1687 	return error;
1688 }
1689 
1690 /* GFS2 only supports a subset of the XFS fields */
1691 #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1692 
gfs2_set_dqblk(struct super_block * sb,struct kqid qid,struct qc_dqblk * fdq)1693 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1694 			  struct qc_dqblk *fdq)
1695 {
1696 	struct gfs2_sbd *sdp = sb->s_fs_info;
1697 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1698 	struct gfs2_quota_data *qd;
1699 	struct gfs2_holder q_gh, i_gh;
1700 	unsigned int data_blocks, ind_blocks;
1701 	unsigned int blocks = 0;
1702 	int alloc_required;
1703 	loff_t offset;
1704 	int error;
1705 
1706 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1707 		return -ESRCH; /* Crazy XFS error code */
1708 
1709 	if ((qid.type != USRQUOTA) &&
1710 	    (qid.type != GRPQUOTA))
1711 		return -EINVAL;
1712 
1713 	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1714 		return -EINVAL;
1715 
1716 	error = qd_get(sdp, qid, &qd);
1717 	if (error)
1718 		return error;
1719 
1720 	error = gfs2_qa_get(ip);
1721 	if (error)
1722 		goto out_put;
1723 
1724 	inode_lock(&ip->i_inode);
1725 	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1726 	if (error)
1727 		goto out_unlockput;
1728 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1729 	if (error)
1730 		goto out_q;
1731 
1732 	/* Check for existing entry, if none then alloc new blocks */
1733 	error = update_qd(sdp, qd);
1734 	if (error)
1735 		goto out_i;
1736 
1737 	/* If nothing has changed, this is a no-op */
1738 	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1739 	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1740 		fdq->d_fieldmask ^= QC_SPC_SOFT;
1741 
1742 	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1743 	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1744 		fdq->d_fieldmask ^= QC_SPC_HARD;
1745 
1746 	if ((fdq->d_fieldmask & QC_SPACE) &&
1747 	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1748 		fdq->d_fieldmask ^= QC_SPACE;
1749 
1750 	if (fdq->d_fieldmask == 0)
1751 		goto out_i;
1752 
1753 	offset = qd2offset(qd);
1754 	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1755 	if (gfs2_is_stuffed(ip))
1756 		alloc_required = 1;
1757 	if (alloc_required) {
1758 		struct gfs2_alloc_parms ap = {};
1759 		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1760 				       &data_blocks, &ind_blocks);
1761 		blocks = 1 + data_blocks + ind_blocks;
1762 		ap.target = blocks;
1763 		error = gfs2_inplace_reserve(ip, &ap);
1764 		if (error)
1765 			goto out_i;
1766 		blocks += gfs2_rg_blocks(ip, blocks);
1767 	}
1768 
1769 	/* Some quotas span block boundaries and can update two blocks,
1770 	   adding an extra block to the transaction to handle such quotas */
1771 	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1772 	if (error)
1773 		goto out_release;
1774 
1775 	/* Apply changes */
1776 	error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq);
1777 	if (!error)
1778 		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1779 
1780 	gfs2_trans_end(sdp);
1781 out_release:
1782 	if (alloc_required)
1783 		gfs2_inplace_release(ip);
1784 out_i:
1785 	gfs2_glock_dq_uninit(&i_gh);
1786 out_q:
1787 	gfs2_glock_dq_uninit(&q_gh);
1788 out_unlockput:
1789 	gfs2_qa_put(ip);
1790 	inode_unlock(&ip->i_inode);
1791 out_put:
1792 	qd_put(qd);
1793 	return error;
1794 }
1795 
1796 const struct quotactl_ops gfs2_quotactl_ops = {
1797 	.quota_sync     = gfs2_quota_sync,
1798 	.get_state	= gfs2_quota_get_state,
1799 	.get_dqblk	= gfs2_get_dqblk,
1800 	.set_dqblk	= gfs2_set_dqblk,
1801 };
1802 
gfs2_quota_hash_init(void)1803 void __init gfs2_quota_hash_init(void)
1804 {
1805 	unsigned i;
1806 
1807 	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1808 		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1809 }
1810