1 /*	$NetBSD: ulfs_quota2.c,v 1.28 2016/07/07 06:55:44 msaitoh Exp $	*/
2 /*  from NetBSD: ufs_quota2.c,v 1.40 2015/03/28 19:24:05 maxv Exp Exp  */
3 /*  from NetBSD: ffs_quota2.c,v 1.5 2015/02/22 14:12:48 maxv Exp  */
4 
5 /*-
6   * Copyright (c) 2010 Manuel Bouyer
7   * All rights reserved.
8   *
9   * Redistribution and use in source and binary forms, with or without
10   * modification, are permitted provided that the following conditions
11   * are met:
12   * 1. Redistributions of source code must retain the above copyright
13   *    notice, this list of conditions and the following disclaimer.
14   * 2. Redistributions in binary form must reproduce the above copyright
15   *    notice, this list of conditions and the following disclaimer in the
16   *    documentation and/or other materials provided with the distribution.
17   *
18   * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19   * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20   * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21   * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22   * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24   * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25   * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26   * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28   * POSSIBILITY OF SUCH DAMAGE.
29   */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.28 2016/07/07 06:55:44 msaitoh Exp $");
33 
34 #include <sys/buf.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/namei.h>
39 #include <sys/file.h>
40 #include <sys/proc.h>
41 #include <sys/vnode.h>
42 #include <sys/mount.h>
43 #include <sys/fstrans.h>
44 #include <sys/kauth.h>
45 #include <sys/quota.h>
46 #include <sys/quotactl.h>
47 #include <sys/timevar.h>
48 
49 #include <ufs/lfs/lfs.h>
50 #include <ufs/lfs/lfs_accessors.h>
51 #include <ufs/lfs/lfs_extern.h>
52 
53 #include <ufs/lfs/ulfs_quota2.h>
54 #include <ufs/lfs/ulfs_inode.h>
55 #include <ufs/lfs/ulfsmount.h>
56 #include <ufs/lfs/ulfs_bswap.h>
57 #include <ufs/lfs/ulfs_extern.h>
58 #include <ufs/lfs/ulfs_quota.h>
59 
60 /*
61  * LOCKING:
62  * Data in the entries are protected by the associated struct dquot's
63  * dq_interlock (this means we can't read or change a quota entry without
64  * grabing a dquot for it).
65  * The header and lists (including pointers in the data entries, and q2e_uid)
66  * are protected by the global dqlock.
67  * the locking order is dq_interlock -> dqlock
68  */
69 
70 static int quota2_bwrite(struct mount *, struct buf *);
71 static int getinoquota2(struct inode *, bool, bool, struct buf **,
72     struct quota2_entry **);
73 static int getq2h(struct ulfsmount *, int, struct buf **,
74     struct quota2_header **, int);
75 static int getq2e(struct ulfsmount *, int, daddr_t, int, struct buf **,
76     struct quota2_entry **, int);
77 static int quota2_walk_list(struct ulfsmount *, struct buf *, int,
78     uint64_t *, int, void *,
79     int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *,
80       uint64_t, void *));
81 
82 static const char *limnames[] = INITQLNAMES;
83 
84 static void
quota2_dict_update_q2e_limits(int objtype,const struct quotaval * val,struct quota2_entry * q2e)85 quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
86     struct quota2_entry *q2e)
87 {
88 	/* make sure we can index q2e_val[] by the fs-independent objtype */
89 	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
90 	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
91 
92 	q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
93 	q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
94 	q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
95 }
96 
97 /*
98  * Convert internal representation to FS-independent representation.
99  * (Note that while the two types are currently identical, the
100  * internal representation is an on-disk struct and the FS-independent
101  * representation is not, and they might diverge in the future.)
102  */
103 static void
q2val_to_quotaval(struct quota2_val * q2v,struct quotaval * qv)104 q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
105 {
106 	qv->qv_softlimit = q2v->q2v_softlimit;
107 	qv->qv_hardlimit = q2v->q2v_hardlimit;
108 	qv->qv_usage = q2v->q2v_cur;
109 	qv->qv_expiretime = q2v->q2v_time;
110 	qv->qv_grace = q2v->q2v_grace;
111 }
112 
113 /*
114  * Convert a quota2entry and default-flag to the FS-independent
115  * representation.
116  */
117 static void
q2e_to_quotaval(struct quota2_entry * q2e,int def,id_t * id,int objtype,struct quotaval * ret)118 q2e_to_quotaval(struct quota2_entry *q2e, int def,
119 	       id_t *id, int objtype, struct quotaval *ret)
120 {
121 	if (def) {
122 		*id = QUOTA_DEFAULTID;
123 	} else {
124 		*id = q2e->q2e_uid;
125 	}
126 
127 	KASSERT(objtype >= 0 && objtype < N_QL);
128 	q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
129 }
130 
131 
132 static int
quota2_bwrite(struct mount * mp,struct buf * bp)133 quota2_bwrite(struct mount *mp, struct buf *bp)
134 {
135 	if (mp->mnt_flag & MNT_SYNCHRONOUS)
136 		return bwrite(bp);
137 	else {
138 		bdwrite(bp);
139 		return 0;
140 	}
141 }
142 
143 static int
getq2h(struct ulfsmount * ump,int type,struct buf ** bpp,struct quota2_header ** q2hp,int flags)144 getq2h(struct ulfsmount *ump, int type,
145     struct buf **bpp, struct quota2_header **q2hp, int flags)
146 {
147 	struct lfs *fs = ump->um_lfs;
148 	const int needswap = ULFS_MPNEEDSWAP(fs);
149 	int error;
150 	struct buf *bp;
151 	struct quota2_header *q2h;
152 
153 	KASSERT(mutex_owned(&lfs_dqlock));
154 	error = bread(ump->um_quotas[type], 0, ump->umq2_bsize, flags, &bp);
155 	if (error)
156 		return error;
157 	if (bp->b_resid != 0)
158 		panic("dq2get: %s quota file truncated", lfs_quotatypes[type]);
159 
160 	q2h = (void *)bp->b_data;
161 	if (ulfs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
162 	    q2h->q2h_type != type)
163 		panic("dq2get: corrupted %s quota header", lfs_quotatypes[type]);
164 	*bpp = bp;
165 	*q2hp = q2h;
166 	return 0;
167 }
168 
169 static int
getq2e(struct ulfsmount * ump,int type,daddr_t lblkno,int blkoffset,struct buf ** bpp,struct quota2_entry ** q2ep,int flags)170 getq2e(struct ulfsmount *ump, int type, daddr_t lblkno, int blkoffset,
171     struct buf **bpp, struct quota2_entry **q2ep, int flags)
172 {
173 	int error;
174 	struct buf *bp;
175 
176 	if (blkoffset & (sizeof(uint64_t) - 1)) {
177 		panic("dq2get: %s quota file corrupted",
178 		    lfs_quotatypes[type]);
179 	}
180 	error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize, flags, &bp);
181 	if (error)
182 		return error;
183 	if (bp->b_resid != 0) {
184 		panic("dq2get: %s quota file corrupted",
185 		    lfs_quotatypes[type]);
186 	}
187 	*q2ep = (void *)((char *)bp->b_data + blkoffset);
188 	*bpp = bp;
189 	return 0;
190 }
191 
192 /* walk a quota entry list, calling the callback for each entry */
193 #define Q2WL_ABORT 0x10000000
194 
195 static int
quota2_walk_list(struct ulfsmount * ump,struct buf * hbp,int type,uint64_t * offp,int flags,void * a,int (* func)(struct ulfsmount *,uint64_t *,struct quota2_entry *,uint64_t,void *))196 quota2_walk_list(struct ulfsmount *ump, struct buf *hbp, int type,
197     uint64_t *offp, int flags, void *a,
198     int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
199 {
200 	struct lfs *fs = ump->um_lfs;
201 	const int needswap = ULFS_MPNEEDSWAP(fs);
202 	daddr_t off = ulfs_rw64(*offp, needswap);
203 	struct buf *bp, *obp = hbp;
204 	int ret = 0, ret2 = 0;
205 	struct quota2_entry *q2e;
206 	daddr_t lblkno, blkoff, olblkno = 0;
207 
208 	KASSERT(mutex_owner(&lfs_dqlock));
209 
210 	while (off != 0) {
211 		lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
212 		blkoff = (off & ump->umq2_bmask);
213 		if (lblkno == 0) {
214 			/* in the header block */
215 			bp = hbp;
216 		} else if (lblkno == olblkno) {
217 			/* still in the same buf */
218 			bp = obp;
219 		} else {
220 			ret = bread(ump->um_quotas[type], lblkno,
221 			    ump->umq2_bsize, flags, &bp);
222 			if (ret)
223 				return ret;
224 			if (bp->b_resid != 0) {
225 				panic("quota2_walk_list: %s quota file corrupted",
226 				    lfs_quotatypes[type]);
227 			}
228 		}
229 		q2e = (void *)((char *)(bp->b_data) + blkoff);
230 		ret = (*func)(ump, offp, q2e, off, a);
231 		if (off != ulfs_rw64(*offp, needswap)) {
232 			/* callback changed parent's pointer, redo */
233 			off = ulfs_rw64(*offp, needswap);
234 			if (bp != hbp && bp != obp)
235 				ret2 = bwrite(bp);
236 		} else {
237 			/* parent if now current */
238 			if (obp != bp && obp != hbp) {
239 				if (flags & B_MODIFY)
240 					ret2 = bwrite(obp);
241 				else
242 					brelse(obp, 0);
243 			}
244 			obp = bp;
245 			olblkno = lblkno;
246 			offp = &(q2e->q2e_next);
247 			off = ulfs_rw64(*offp, needswap);
248 		}
249 		if (ret)
250 			break;
251 		if (ret2) {
252 			ret = ret2;
253 			break;
254 		}
255 	}
256 	if (obp != hbp) {
257 		if (flags & B_MODIFY)
258 			ret2 = bwrite(obp);
259 		else
260 			brelse(obp, 0);
261 	}
262 	if (ret & Q2WL_ABORT)
263 		return 0;
264 	if (ret == 0)
265 		return ret2;
266 	return ret;
267 }
268 
269 int
lfsquota2_umount(struct mount * mp,int flags)270 lfsquota2_umount(struct mount *mp, int flags)
271 {
272 	int i, error;
273 	struct ulfsmount *ump = VFSTOULFS(mp);
274 	struct lfs *fs = ump->um_lfs;
275 
276 	if ((fs->um_flags & ULFS_QUOTA2) == 0)
277 		return 0;
278 
279 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
280 		if (ump->um_quotas[i] != NULLVP) {
281 			error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
282 			    ump->um_cred[i]);
283 			if (error) {
284 				printf("quota2_umount failed: close(%p) %d\n",
285 				    ump->um_quotas[i], error);
286 				return error;
287 			}
288 		}
289 		ump->um_quotas[i] = NULLVP;
290 	}
291 	return 0;
292 }
293 
294 static int
quota2_q2ealloc(struct ulfsmount * ump,int type,uid_t uid,struct dquot * dq)295 quota2_q2ealloc(struct ulfsmount *ump, int type, uid_t uid, struct dquot *dq)
296 {
297 	int error, error2;
298 	struct buf *hbp, *bp;
299 	struct quota2_header *q2h;
300 	struct quota2_entry *q2e;
301 	daddr_t offset;
302 	u_long hash_mask;
303 	struct lfs *fs = ump->um_lfs;
304 	const int needswap = ULFS_MPNEEDSWAP(fs);
305 
306 	KASSERT(mutex_owned(&dq->dq_interlock));
307 	KASSERT(mutex_owned(&lfs_dqlock));
308 	error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
309 	if (error)
310 		return error;
311 	offset = ulfs_rw64(q2h->q2h_free, needswap);
312 	if (offset == 0) {
313 		struct vnode *vp = ump->um_quotas[type];
314 		struct inode *ip = VTOI(vp);
315 		uint64_t size = ip->i_size;
316 		/* need to alocate a new disk block */
317 		error = lfs_balloc(vp, size, ump->umq2_bsize,
318 		    ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
319 		if (error) {
320 			brelse(hbp, 0);
321 			return error;
322 		}
323 		KASSERT((ip->i_size % ump->umq2_bsize) == 0);
324 		ip->i_size += ump->umq2_bsize;
325 		DIP_ASSIGN(ip, size, ip->i_size);
326 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
327 		uvm_vnp_setsize(vp, ip->i_size);
328 		lfsquota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
329 		    needswap);
330 		error = bwrite(bp);
331 		error2 = lfs_update(vp, NULL, NULL, UPDATE_WAIT);
332 		if (error || error2) {
333 			brelse(hbp, 0);
334 			if (error)
335 				return error;
336 			return error2;
337 		}
338 		offset = ulfs_rw64(q2h->q2h_free, needswap);
339 		KASSERT(offset != 0);
340 	}
341 	dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
342 	dq->dq2_blkoff = (offset & ump->umq2_bmask);
343 	if (dq->dq2_lblkno == 0) {
344 		bp = hbp;
345 		q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
346 	} else {
347 		error = getq2e(ump, type, dq->dq2_lblkno,
348 		    dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
349 		if (error) {
350 			brelse(hbp, 0);
351 			return error;
352 		}
353 	}
354 	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
355 	/* remove from free list */
356 	q2h->q2h_free = q2e->q2e_next;
357 
358 	memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
359 	q2e->q2e_uid = ulfs_rw32(uid, needswap);
360 	/* insert in hash list */
361 	q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
362 	q2h->q2h_entries[uid & hash_mask] = ulfs_rw64(offset, needswap);
363 	if (hbp != bp) {
364 		bwrite(hbp);
365 	}
366 	bwrite(bp);
367 	return 0;
368 }
369 
370 static int
getinoquota2(struct inode * ip,bool alloc,bool modify,struct buf ** bpp,struct quota2_entry ** q2ep)371 getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
372     struct quota2_entry **q2ep)
373 {
374 	int error;
375 	int i;
376 	struct dquot *dq;
377 	struct ulfsmount *ump = ip->i_ump;
378 	u_int32_t ino_ids[ULFS_MAXQUOTAS];
379 
380 	error = lfs_getinoquota(ip);
381 	if (error)
382 		return error;
383 
384         ino_ids[ULFS_USRQUOTA] = ip->i_uid;
385         ino_ids[ULFS_GRPQUOTA] = ip->i_gid;
386 	/* first get the interlock for all dquot */
387 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
388 		dq = ip->i_dquot[i];
389 		if (dq == NODQUOT)
390 			continue;
391 		mutex_enter(&dq->dq_interlock);
392 	}
393 	/* now get the corresponding quota entry */
394 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
395 		bpp[i] = NULL;
396 		q2ep[i] = NULL;
397 		dq = ip->i_dquot[i];
398 		if (dq == NODQUOT)
399 			continue;
400 		if (__predict_false(ump->um_quotas[i] == NULL)) {
401 			/*
402 			 * quotas have been turned off. This can happen
403 			 * at umount time.
404 			 */
405 			mutex_exit(&dq->dq_interlock);
406 			lfs_dqrele(NULLVP, dq);
407 			ip->i_dquot[i] = NULL;
408 			continue;
409 		}
410 
411 		if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
412 			if (!alloc) {
413 				continue;
414 			}
415 			/* need to alloc a new on-disk quot */
416 			mutex_enter(&lfs_dqlock);
417 			error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
418 			mutex_exit(&lfs_dqlock);
419 			if (error)
420 				return error;
421 		}
422 		KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
423 		error = getq2e(ump, i, dq->dq2_lblkno,
424 		    dq->dq2_blkoff, &bpp[i], &q2ep[i],
425 		    modify ? B_MODIFY : 0);
426 		if (error)
427 			return error;
428 	}
429 	return 0;
430 }
431 
432 __inline static int __unused
lfsquota2_check_limit(struct quota2_val * q2v,uint64_t change,time_t now)433 lfsquota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
434 {
435 	return lfsquota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
436 	    q2v->q2v_hardlimit, q2v->q2v_time, now);
437 }
438 
439 static int
quota2_check(struct inode * ip,int vtype,int64_t change,kauth_cred_t cred,int flags)440 quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
441     int flags)
442 {
443 	int error;
444 	struct buf *bp[ULFS_MAXQUOTAS];
445 	struct quota2_entry *q2e[ULFS_MAXQUOTAS];
446 	struct quota2_val *q2vp;
447 	struct dquot *dq;
448 	uint64_t ncurblks;
449 	struct ulfsmount *ump = ip->i_ump;
450 	struct lfs *fs = ip->i_lfs;
451 	struct mount *mp = ump->um_mountp;
452 	const int needswap = ULFS_MPNEEDSWAP(fs);
453 	int i;
454 
455 	if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
456 		return error;
457 	if (change == 0) {
458 		for (i = 0; i < ULFS_MAXQUOTAS; i++) {
459 			dq = ip->i_dquot[i];
460 			if (dq == NODQUOT)
461 				continue;
462 			if (bp[i])
463 				brelse(bp[i], 0);
464 			mutex_exit(&dq->dq_interlock);
465 		}
466 		return 0;
467 	}
468 	if (change < 0) {
469 		for (i = 0; i < ULFS_MAXQUOTAS; i++) {
470 			dq = ip->i_dquot[i];
471 			if (dq == NODQUOT)
472 				continue;
473 			if (q2e[i] == NULL) {
474 				mutex_exit(&dq->dq_interlock);
475 				continue;
476 			}
477 			q2vp = &q2e[i]->q2e_val[vtype];
478 			ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
479 			if (ncurblks < -change)
480 				ncurblks = 0;
481 			else
482 				ncurblks += change;
483 			q2vp->q2v_cur = ulfs_rw64(ncurblks, needswap);
484 			quota2_bwrite(mp, bp[i]);
485 			mutex_exit(&dq->dq_interlock);
486 		}
487 		return 0;
488 	}
489 	/* see if the allocation is allowed */
490 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
491 		struct quota2_val q2v;
492 		int ql_stat;
493 		dq = ip->i_dquot[i];
494 		if (dq == NODQUOT)
495 			continue;
496 		KASSERT(q2e[i] != NULL);
497 		lfsquota2_ulfs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
498 		ql_stat = lfsquota2_check_limit(&q2v, change, time_second);
499 
500 		if ((flags & FORCE) == 0 &&
501 		    kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
502 		    KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
503 		    KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
504 			/* enforce this limit */
505 			switch(QL_STATUS(ql_stat)) {
506 			case QL_S_DENY_HARD:
507 				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
508 					uprintf("\n%s: write failed, %s %s "
509 					    "limit reached\n",
510 					    mp->mnt_stat.f_mntonname,
511 					    lfs_quotatypes[i], limnames[vtype]);
512 					dq->dq_flags |= DQ_WARN(vtype);
513 				}
514 				error = EDQUOT;
515 				break;
516 			case QL_S_DENY_GRACE:
517 				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
518 					uprintf("\n%s: write failed, %s %s "
519 					    "limit reached\n",
520 					    mp->mnt_stat.f_mntonname,
521 					    lfs_quotatypes[i], limnames[vtype]);
522 					dq->dq_flags |= DQ_WARN(vtype);
523 				}
524 				error = EDQUOT;
525 				break;
526 			case QL_S_ALLOW_SOFT:
527 				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
528 					uprintf("\n%s: warning, %s %s "
529 					    "quota exceeded\n",
530 					    mp->mnt_stat.f_mntonname,
531 					    lfs_quotatypes[i], limnames[vtype]);
532 					dq->dq_flags |= DQ_WARN(vtype);
533 				}
534 				break;
535 			}
536 		}
537 		/*
538 		 * always do this; we don't know if the allocation will
539 		 * succed or not in the end. if we don't do the allocation
540 		 * q2v_time will be ignored anyway
541 		 */
542 		if (ql_stat & QL_F_CROSS) {
543 			q2v.q2v_time = time_second + q2v.q2v_grace;
544 			lfsquota2_ulfs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
545 			    needswap);
546 		}
547 	}
548 
549 	/* now do the allocation if allowed */
550 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
551 		dq = ip->i_dquot[i];
552 		if (dq == NODQUOT)
553 			continue;
554 		KASSERT(q2e[i] != NULL);
555 		if (error == 0) {
556 			q2vp = &q2e[i]->q2e_val[vtype];
557 			ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
558 			q2vp->q2v_cur = ulfs_rw64(ncurblks + change, needswap);
559 			quota2_bwrite(mp, bp[i]);
560 		} else
561 			brelse(bp[i], 0);
562 		mutex_exit(&dq->dq_interlock);
563 	}
564 	return error;
565 }
566 
567 int
lfs_chkdq2(struct inode * ip,int64_t change,kauth_cred_t cred,int flags)568 lfs_chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
569 {
570 	return quota2_check(ip, QL_BLOCK, change, cred, flags);
571 }
572 
573 int
lfs_chkiq2(struct inode * ip,int32_t change,kauth_cred_t cred,int flags)574 lfs_chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
575 {
576 	return quota2_check(ip, QL_FILE, change, cred, flags);
577 }
578 
579 int
lfsquota2_handle_cmd_put(struct ulfsmount * ump,const struct quotakey * key,const struct quotaval * val)580 lfsquota2_handle_cmd_put(struct ulfsmount *ump, const struct quotakey *key,
581     const struct quotaval *val)
582 {
583 	int error;
584 	struct dquot *dq;
585 	struct quota2_header *q2h;
586 	struct quota2_entry q2e, *q2ep;
587 	struct buf *bp;
588 	struct lfs *fs = ump->um_lfs;
589 	const int needswap = ULFS_MPNEEDSWAP(fs);
590 
591 	/* make sure we can index by the fs-independent idtype */
592 	CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
593 	CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
594 
595 	if (ump->um_quotas[key->qk_idtype] == NULLVP)
596 		return ENODEV;
597 
598 	if (key->qk_id == QUOTA_DEFAULTID) {
599 		mutex_enter(&lfs_dqlock);
600 		error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
601 		if (error) {
602 			mutex_exit(&lfs_dqlock);
603 			goto out_error;
604 		}
605 		lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
606 		quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
607 		lfsquota2_ulfs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
608 		mutex_exit(&lfs_dqlock);
609 		quota2_bwrite(ump->um_mountp, bp);
610 		goto out_error;
611 	}
612 
613 	error = lfs_dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
614 	if (error)
615 		goto out_error;
616 
617 	mutex_enter(&dq->dq_interlock);
618 	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
619 		/* need to alloc a new on-disk quot */
620 		mutex_enter(&lfs_dqlock);
621 		error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
622 		mutex_exit(&lfs_dqlock);
623 		if (error)
624 			goto out_il;
625 	}
626 	KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
627 	error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
628 	    dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
629 	if (error)
630 		goto out_il;
631 
632 	lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
633 	/*
634 	 * Reset time limit if previously had no soft limit or were
635 	 * under it, but now have a soft limit and are over it.
636 	 */
637 	if (val->qv_softlimit &&
638 	    q2e.q2e_val[key->qk_objtype].q2v_cur >= val->qv_softlimit &&
639 	    (q2e.q2e_val[key->qk_objtype].q2v_softlimit == 0 ||
640 	     q2e.q2e_val[key->qk_objtype].q2v_cur < q2e.q2e_val[key->qk_objtype].q2v_softlimit))
641 		q2e.q2e_val[key->qk_objtype].q2v_time = time_second + val->qv_grace;
642 	quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
643 	lfsquota2_ulfs_rwq2e(&q2e, q2ep, needswap);
644 	quota2_bwrite(ump->um_mountp, bp);
645 
646 out_il:
647 	mutex_exit(&dq->dq_interlock);
648 	lfs_dqrele(NULLVP, dq);
649 out_error:
650 	return error;
651 }
652 
653 struct dq2clear_callback {
654 	uid_t id;
655 	struct dquot *dq;
656 	struct quota2_header *q2h;
657 };
658 
659 static int
dq2clear_callback(struct ulfsmount * ump,uint64_t * offp,struct quota2_entry * q2e,uint64_t off,void * v)660 dq2clear_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
661     uint64_t off, void *v)
662 {
663 	struct dq2clear_callback *c = v;
664 	struct lfs *fs = ump->um_lfs;
665 	const int needswap = ULFS_MPNEEDSWAP(fs);
666 	uint64_t myoff;
667 
668 	if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
669 		KASSERT(mutex_owned(&c->dq->dq_interlock));
670 		c->dq->dq2_lblkno = 0;
671 		c->dq->dq2_blkoff = 0;
672 		myoff = *offp;
673 		/* remove from hash list */
674 		*offp = q2e->q2e_next;
675 		/* add to free list */
676 		q2e->q2e_next = c->q2h->q2h_free;
677 		c->q2h->q2h_free = myoff;
678 		return Q2WL_ABORT;
679 	}
680 	return 0;
681 }
682 int
lfsquota2_handle_cmd_del(struct ulfsmount * ump,const struct quotakey * qk)683 lfsquota2_handle_cmd_del(struct ulfsmount *ump, const struct quotakey *qk)
684 {
685 	int idtype;
686 	id_t id;
687 	int objtype;
688 	int error, i, canfree;
689 	struct dquot *dq;
690 	struct quota2_header *q2h;
691 	struct quota2_entry q2e, *q2ep;
692 	struct buf *hbp, *bp;
693 	u_long hash_mask;
694 	struct dq2clear_callback c;
695 
696 	idtype = qk->qk_idtype;
697 	id = qk->qk_id;
698 	objtype = qk->qk_objtype;
699 
700 	if (ump->um_quotas[idtype] == NULLVP)
701 		return ENODEV;
702 	if (id == QUOTA_DEFAULTID)
703 		return EOPNOTSUPP;
704 
705 	/* get the default entry before locking the entry's buffer */
706 	mutex_enter(&lfs_dqlock);
707 	error = getq2h(ump, idtype, &hbp, &q2h, 0);
708 	if (error) {
709 		mutex_exit(&lfs_dqlock);
710 		return error;
711 	}
712 	/* we'll copy to another disk entry, so no need to swap */
713 	memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
714 	mutex_exit(&lfs_dqlock);
715 	brelse(hbp, 0);
716 
717 	error = lfs_dqget(NULLVP, id, ump, idtype, &dq);
718 	if (error)
719 		return error;
720 
721 	mutex_enter(&dq->dq_interlock);
722 	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
723 		/* already clear, nothing to do */
724 		error = ENOENT;
725 		goto out_il;
726 	}
727 
728 	error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
729 	    &bp, &q2ep, B_MODIFY);
730 	if (error)
731 		goto out_error;
732 
733 	/* make sure we can index by the objtype passed in */
734 	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
735 	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
736 
737 	/* clear the requested objtype by copying from the default entry */
738 	q2ep->q2e_val[objtype].q2v_softlimit =
739 		q2e.q2e_val[objtype].q2v_softlimit;
740 	q2ep->q2e_val[objtype].q2v_hardlimit =
741 		q2e.q2e_val[objtype].q2v_hardlimit;
742 	q2ep->q2e_val[objtype].q2v_grace =
743 		q2e.q2e_val[objtype].q2v_grace;
744 	q2ep->q2e_val[objtype].q2v_time = 0;
745 
746 	/* if this entry now contains no information, we can free it */
747 	canfree = 1;
748 	for (i = 0; i < N_QL; i++) {
749 		if (q2ep->q2e_val[i].q2v_cur != 0 ||
750 		    (q2ep->q2e_val[i].q2v_softlimit !=
751 		     q2e.q2e_val[i].q2v_softlimit) ||
752 		    (q2ep->q2e_val[i].q2v_hardlimit !=
753 		     q2e.q2e_val[i].q2v_hardlimit) ||
754 		    (q2ep->q2e_val[i].q2v_grace !=
755 		     q2e.q2e_val[i].q2v_grace)) {
756 			canfree = 0;
757 			break;
758 		}
759 		/* note: do not need to check q2v_time */
760 	}
761 
762 	if (canfree == 0) {
763 		quota2_bwrite(ump->um_mountp, bp);
764 		goto out_error;
765 	}
766 	/* we can free it. release bp so we can walk the list */
767 	brelse(bp, 0);
768 	mutex_enter(&lfs_dqlock);
769 	error = getq2h(ump, idtype, &hbp, &q2h, 0);
770 	if (error)
771 		goto out_dqlock;
772 
773 	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
774 	c.dq = dq;
775 	c.id = id;
776 	c.q2h = q2h;
777 	error = quota2_walk_list(ump, hbp, idtype,
778 	    &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
779 	    dq2clear_callback);
780 
781 	bwrite(hbp);
782 
783 out_dqlock:
784 	mutex_exit(&lfs_dqlock);
785 out_error:
786 out_il:
787 	mutex_exit(&dq->dq_interlock);
788 	lfs_dqrele(NULLVP, dq);
789 	return error;
790 }
791 
792 static int
quota2_fetch_q2e(struct ulfsmount * ump,const struct quotakey * qk,struct quota2_entry * ret)793 quota2_fetch_q2e(struct ulfsmount *ump, const struct quotakey *qk,
794     struct quota2_entry *ret)
795 {
796 	struct dquot *dq;
797 	int error;
798 	struct quota2_entry *q2ep;
799 	struct buf *bp;
800 	struct lfs *fs = ump->um_lfs;
801 	const int needswap = ULFS_MPNEEDSWAP(fs);
802 
803 	error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
804 	if (error)
805 		return error;
806 
807 	mutex_enter(&dq->dq_interlock);
808 	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
809 		mutex_exit(&dq->dq_interlock);
810 		lfs_dqrele(NULLVP, dq);
811 		return ENOENT;
812 	}
813 	error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
814 	    &bp, &q2ep, 0);
815 	if (error) {
816 		mutex_exit(&dq->dq_interlock);
817 		lfs_dqrele(NULLVP, dq);
818 		return error;
819 	}
820 	lfsquota2_ulfs_rwq2e(q2ep, ret, needswap);
821 	brelse(bp, 0);
822 	mutex_exit(&dq->dq_interlock);
823 	lfs_dqrele(NULLVP, dq);
824 
825 	return 0;
826 }
827 
828 static int
quota2_fetch_quotaval(struct ulfsmount * ump,const struct quotakey * qk,struct quotaval * ret)829 quota2_fetch_quotaval(struct ulfsmount *ump, const struct quotakey *qk,
830     struct quotaval *ret)
831 {
832 	struct dquot *dq;
833 	int error;
834 	struct quota2_entry *q2ep, q2e;
835 	struct buf  *bp;
836 	struct lfs *fs = ump->um_lfs;
837 	const int needswap = ULFS_MPNEEDSWAP(fs);
838 	id_t id2;
839 
840 	error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
841 	if (error)
842 		return error;
843 
844 	mutex_enter(&dq->dq_interlock);
845 	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
846 		mutex_exit(&dq->dq_interlock);
847 		lfs_dqrele(NULLVP, dq);
848 		return ENOENT;
849 	}
850 	error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
851 	    &bp, &q2ep, 0);
852 	if (error) {
853 		mutex_exit(&dq->dq_interlock);
854 		lfs_dqrele(NULLVP, dq);
855 		return error;
856 	}
857 	lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
858 	brelse(bp, 0);
859 	mutex_exit(&dq->dq_interlock);
860 	lfs_dqrele(NULLVP, dq);
861 
862 	q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
863 	KASSERT(id2 == qk->qk_id);
864 	return 0;
865 }
866 
867 int
lfsquota2_handle_cmd_get(struct ulfsmount * ump,const struct quotakey * qk,struct quotaval * qv)868 lfsquota2_handle_cmd_get(struct ulfsmount *ump, const struct quotakey *qk,
869     struct quotaval *qv)
870 {
871 	int error;
872 	struct quota2_header *q2h;
873 	struct quota2_entry q2e;
874 	struct buf *bp;
875 	struct lfs *fs = ump->um_lfs;
876 	const int needswap = ULFS_MPNEEDSWAP(fs);
877 	id_t id2;
878 
879 	/*
880 	 * Make sure the FS-independent codes match the internal ones,
881 	 * so we can use the passed-in objtype without having to
882 	 * convert it explicitly to QL_BLOCK/QL_FILE.
883 	 */
884 	CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
885 	CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
886 	CTASSERT(N_QL == 2);
887 
888 	if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
889 		return EINVAL;
890 	}
891 
892 	if (ump->um_quotas[qk->qk_idtype] == NULLVP)
893 		return ENODEV;
894 	if (qk->qk_id == QUOTA_DEFAULTID) {
895 		mutex_enter(&lfs_dqlock);
896 		error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
897 		if (error) {
898 			mutex_exit(&lfs_dqlock);
899 			return error;
900 		}
901 		lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
902 		mutex_exit(&lfs_dqlock);
903 		brelse(bp, 0);
904 		q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
905 				qk->qk_objtype, qv);
906 		(void)id2;
907 	} else
908 		error = quota2_fetch_quotaval(ump, qk, qv);
909 
910 	return error;
911 }
912 
913 /*
914  * Cursor structure we used.
915  *
916  * This will get stored in userland between calls so we must not assume
917  * it isn't arbitrarily corrupted.
918  */
919 struct ulfsq2_cursor {
920 	uint32_t q2c_magic;	/* magic number */
921 	int q2c_hashsize;	/* size of hash table at last go */
922 
923 	int q2c_users_done;	/* true if we've returned all user data */
924 	int q2c_groups_done;	/* true if we've returned all group data */
925 	int q2c_defaults_done;	/* true if we've returned the default values */
926 	int q2c_hashpos;	/* slot to start at in hash table */
927 	int q2c_uidpos;		/* number of ids we've handled */
928 	int q2c_blocks_done;	/* true if we've returned the blocks value */
929 };
930 
931 /*
932  * State of a single cursorget call, or at least the part of it that
933  * needs to be passed around.
934  */
935 struct q2cursor_state {
936 	/* data return pointers */
937 	struct quotakey *keys;
938 	struct quotaval *vals;
939 
940 	/* key/value counters */
941 	unsigned maxkeyvals;
942 	unsigned numkeys;	/* number of keys assigned */
943 
944 	/* ID to key/value conversion state */
945 	int skipfirst;		/* if true skip first key/value */
946 	int skiplast;		/* if true skip last key/value */
947 
948 	/* ID counters */
949 	unsigned maxids;	/* maximum number of IDs to handle */
950 	unsigned numids;	/* number of IDs handled */
951 };
952 
953 /*
954  * Additional structure for getids callback.
955  */
956 struct q2cursor_getids {
957 	struct q2cursor_state *state;
958 	int idtype;
959 	unsigned skip;		/* number of ids to skip over */
960 	unsigned new_skip;	/* number of ids to skip over next time */
961 	unsigned skipped;	/* number skipped so far */
962 	int stopped;		/* true if we stopped quota_walk_list early */
963 };
964 
965 /*
966  * Cursor-related functions
967  */
968 
969 /* magic number */
970 #define Q2C_MAGIC (0xbeebe111)
971 
972 /* extract cursor from caller form */
973 #define Q2CURSOR(qkc) ((struct ulfsq2_cursor *)&qkc->u.qkc_space[0])
974 
975 /*
976  * Check that a cursor we're handed is something like valid. If
977  * someone munges it and it still passes these checks, they'll get
978  * partial or odd results back but won't break anything.
979  */
980 static int
q2cursor_check(struct ulfsq2_cursor * cursor)981 q2cursor_check(struct ulfsq2_cursor *cursor)
982 {
983 	if (cursor->q2c_magic != Q2C_MAGIC) {
984 		return EINVAL;
985 	}
986 	if (cursor->q2c_hashsize < 0) {
987 		return EINVAL;
988 	}
989 
990 	if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
991 		return EINVAL;
992 	}
993 	if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
994 		return EINVAL;
995 	}
996 	if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
997 		return EINVAL;
998 	}
999 	if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
1000 		return EINVAL;
1001 	}
1002 	if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
1003 		return EINVAL;
1004 	}
1005 	return 0;
1006 }
1007 
1008 /*
1009  * Set up the q2cursor state.
1010  */
1011 static void
q2cursor_initstate(struct q2cursor_state * state,struct quotakey * keys,struct quotaval * vals,unsigned maxkeyvals,int blocks_done)1012 q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1013     struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1014 {
1015 	state->keys = keys;
1016 	state->vals = vals;
1017 
1018 	state->maxkeyvals = maxkeyvals;
1019 	state->numkeys = 0;
1020 
1021 	/*
1022 	 * For each ID there are two quotavals to return. If the
1023 	 * maximum number of entries to return is odd, we might want
1024 	 * to skip the first quotaval of the first ID, or the last
1025 	 * quotaval of the last ID, but not both. So the number of IDs
1026 	 * we want is (up to) half the number of return slots we have,
1027 	 * rounded up.
1028 	 */
1029 
1030 	state->maxids = (state->maxkeyvals + 1) / 2;
1031 	state->numids = 0;
1032 	if (state->maxkeyvals % 2) {
1033 		if (blocks_done) {
1034 			state->skipfirst = 1;
1035 			state->skiplast = 0;
1036 		} else {
1037 			state->skipfirst = 0;
1038 			state->skiplast = 1;
1039 		}
1040 	} else {
1041 		state->skipfirst = 0;
1042 		state->skiplast = 0;
1043 	}
1044 }
1045 
1046 /*
1047  * Choose which idtype we're going to work on. If doing a full
1048  * iteration, we do users first, then groups, but either might be
1049  * disabled or marked to skip via cursorsetidtype(), so don't make
1050  * silly assumptions.
1051  */
1052 static int
q2cursor_pickidtype(struct ulfsq2_cursor * cursor,int * idtype_ret)1053 q2cursor_pickidtype(struct ulfsq2_cursor *cursor, int *idtype_ret)
1054 {
1055 	if (cursor->q2c_users_done == 0) {
1056 		*idtype_ret = QUOTA_IDTYPE_USER;
1057 	} else if (cursor->q2c_groups_done == 0) {
1058 		*idtype_ret = QUOTA_IDTYPE_GROUP;
1059 	} else {
1060 		return EAGAIN;
1061 	}
1062 	return 0;
1063 }
1064 
1065 /*
1066  * Add an ID to the current state. Sets up either one or two keys to
1067  * refer to it, depending on whether it's first/last and the setting
1068  * of skipfirst. (skiplast does not need to be explicitly tested)
1069  */
1070 static void
q2cursor_addid(struct q2cursor_state * state,int idtype,id_t id)1071 q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1072 {
1073 	KASSERT(state->numids < state->maxids);
1074 	KASSERT(state->numkeys < state->maxkeyvals);
1075 
1076 	if (!state->skipfirst || state->numkeys > 0) {
1077 		state->keys[state->numkeys].qk_idtype = idtype;
1078 		state->keys[state->numkeys].qk_id = id;
1079 		state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1080 		state->numkeys++;
1081 	}
1082 	if (state->numkeys < state->maxkeyvals) {
1083 		state->keys[state->numkeys].qk_idtype = idtype;
1084 		state->keys[state->numkeys].qk_id = id;
1085 		state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1086 		state->numkeys++;
1087 	} else {
1088 		KASSERT(state->skiplast);
1089 	}
1090 	state->numids++;
1091 }
1092 
1093 /*
1094  * Callback function for getting IDs. Update counting and call addid.
1095  */
1096 static int
q2cursor_getids_callback(struct ulfsmount * ump,uint64_t * offp,struct quota2_entry * q2ep,uint64_t off,void * v)1097 q2cursor_getids_callback(struct ulfsmount *ump, uint64_t *offp,
1098     struct quota2_entry *q2ep, uint64_t off, void *v)
1099 {
1100 	struct q2cursor_getids *gi = v;
1101 	id_t id;
1102 	struct lfs *fs = ump->um_lfs;
1103 	const int needswap = ULFS_MPNEEDSWAP(fs);
1104 
1105 	if (gi->skipped < gi->skip) {
1106 		gi->skipped++;
1107 		return 0;
1108 	}
1109 	id = ulfs_rw32(q2ep->q2e_uid, needswap);
1110 	q2cursor_addid(gi->state, gi->idtype, id);
1111 	gi->new_skip++;
1112 	if (gi->state->numids >= gi->state->maxids) {
1113 		/* got enough ids, stop now */
1114 		gi->stopped = 1;
1115 		return Q2WL_ABORT;
1116 	}
1117 	return 0;
1118 }
1119 
1120 /*
1121  * Fill in a batch of quotakeys by scanning one or more hash chains.
1122  */
1123 static int
q2cursor_getkeys(struct ulfsmount * ump,int idtype,struct ulfsq2_cursor * cursor,struct q2cursor_state * state,int * hashsize_ret,struct quota2_entry * default_q2e_ret)1124 q2cursor_getkeys(struct ulfsmount *ump, int idtype, struct ulfsq2_cursor *cursor,
1125     struct q2cursor_state *state,
1126     int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1127 {
1128 	struct lfs *fs = ump->um_lfs;
1129 	const int needswap = ULFS_MPNEEDSWAP(fs);
1130 	struct buf *hbp;
1131 	struct quota2_header *q2h;
1132 	int quota2_hash_size;
1133 	struct q2cursor_getids gi;
1134 	uint64_t offset;
1135 	int error;
1136 
1137 	/*
1138 	 * Read the header block.
1139 	 */
1140 
1141 	mutex_enter(&lfs_dqlock);
1142 	error = getq2h(ump, idtype, &hbp, &q2h, 0);
1143 	if (error) {
1144 		mutex_exit(&lfs_dqlock);
1145 		return error;
1146 	}
1147 
1148 	/* if the table size has changed, make the caller start over */
1149 	quota2_hash_size = ulfs_rw16(q2h->q2h_hash_size, needswap);
1150 	if (cursor->q2c_hashsize == 0) {
1151 		cursor->q2c_hashsize = quota2_hash_size;
1152 	} else if (cursor->q2c_hashsize != quota2_hash_size) {
1153 		error = EDEADLK;
1154 		goto scanfail;
1155 	}
1156 
1157 	/* grab the entry with the default values out of the header */
1158 	lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1159 
1160 	/* If we haven't done the defaults yet, that goes first. */
1161 	if (cursor->q2c_defaults_done == 0) {
1162 		q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1163 		/* if we read both halves, mark it done */
1164 		if (state->numids < state->maxids || !state->skiplast) {
1165 			cursor->q2c_defaults_done = 1;
1166 		}
1167 	}
1168 
1169 	gi.state = state;
1170 	gi.idtype = idtype;
1171 
1172 	while (state->numids < state->maxids) {
1173 		if (cursor->q2c_hashpos >= quota2_hash_size) {
1174 			/* nothing more left */
1175 			break;
1176 		}
1177 
1178 		/* scan this hash chain */
1179 		gi.skip = cursor->q2c_uidpos;
1180 		gi.new_skip = gi.skip;
1181 		gi.skipped = 0;
1182 		gi.stopped = 0;
1183 		offset = q2h->q2h_entries[cursor->q2c_hashpos];
1184 
1185 		error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1186 		    q2cursor_getids_callback);
1187 		KASSERT(error != Q2WL_ABORT);
1188 		if (error) {
1189 			break;
1190 		}
1191 		if (gi.stopped) {
1192 			/* callback stopped before reading whole chain */
1193 			cursor->q2c_uidpos = gi.new_skip;
1194 			/* if we didn't get both halves, back up */
1195 			if (state->numids == state->maxids && state->skiplast){
1196 				KASSERT(cursor->q2c_uidpos > 0);
1197 				cursor->q2c_uidpos--;
1198 			}
1199 		} else {
1200 			/* read whole chain */
1201 			/* if we got both halves of the last id, advance */
1202 			if (state->numids < state->maxids || !state->skiplast){
1203 				cursor->q2c_uidpos = 0;
1204 				cursor->q2c_hashpos++;
1205 			}
1206 		}
1207 	}
1208 
1209 scanfail:
1210 	mutex_exit(&lfs_dqlock);
1211 	brelse(hbp, 0);
1212 	if (error)
1213 		return error;
1214 
1215 	*hashsize_ret = quota2_hash_size;
1216 	return 0;
1217 }
1218 
1219 /*
1220  * Fetch the quotavals for the quotakeys.
1221  */
1222 static int
q2cursor_getvals(struct ulfsmount * ump,struct q2cursor_state * state,const struct quota2_entry * default_q2e)1223 q2cursor_getvals(struct ulfsmount *ump, struct q2cursor_state *state,
1224     const struct quota2_entry *default_q2e)
1225 {
1226 	int hasid;
1227 	id_t loadedid, id;
1228 	unsigned pos;
1229 	struct quota2_entry q2e;
1230 	int objtype;
1231 	int error;
1232 
1233 	hasid = 0;
1234 	loadedid = 0;
1235 	for (pos = 0; pos < state->numkeys; pos++) {
1236 		id = state->keys[pos].qk_id;
1237 		if (!hasid || id != loadedid) {
1238 			hasid = 1;
1239 			loadedid = id;
1240 			if (id == QUOTA_DEFAULTID) {
1241 				q2e = *default_q2e;
1242 			} else {
1243 				error = quota2_fetch_q2e(ump,
1244 							 &state->keys[pos],
1245 							 &q2e);
1246 				if (error == ENOENT) {
1247 					/* something changed - start over */
1248 					error = EDEADLK;
1249 				}
1250 				if (error) {
1251 					return error;
1252 				}
1253  			}
1254 		}
1255 
1256 
1257 		objtype = state->keys[pos].qk_objtype;
1258 		KASSERT(objtype >= 0 && objtype < N_QL);
1259 		q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1260 	}
1261 
1262 	return 0;
1263 }
1264 
1265 /*
1266  * Handle cursorget.
1267  *
1268  * We can't just read keys and values directly, because we can't walk
1269  * the list with qdlock and grab dq_interlock to read the entries at
1270  * the same time. So we're going to do two passes: one to figure out
1271  * which IDs we want and fill in the keys, and then a second to use
1272  * the keys to fetch the values.
1273  */
1274 int
lfsquota2_handle_cmd_cursorget(struct ulfsmount * ump,struct quotakcursor * qkc,struct quotakey * keys,struct quotaval * vals,unsigned maxreturn,unsigned * ret)1275 lfsquota2_handle_cmd_cursorget(struct ulfsmount *ump, struct quotakcursor *qkc,
1276     struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1277     unsigned *ret)
1278 {
1279 	int error;
1280 	struct ulfsq2_cursor *cursor;
1281 	struct ulfsq2_cursor newcursor;
1282 	struct q2cursor_state state;
1283 	struct quota2_entry default_q2e;
1284 	int idtype;
1285 	int quota2_hash_size = 0; /* XXXuninit */
1286 
1287 	/*
1288 	 * Convert and validate the cursor.
1289 	 */
1290 	cursor = Q2CURSOR(qkc);
1291 	error = q2cursor_check(cursor);
1292 	if (error) {
1293 		return error;
1294 	}
1295 
1296 	/*
1297 	 * Make sure our on-disk codes match the values of the
1298 	 * FS-independent ones. This avoids the need for explicit
1299 	 * conversion (which would be a NOP anyway and thus easily
1300 	 * left out or called in the wrong places...)
1301 	 */
1302 	CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
1303 	CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
1304 	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1305 	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1306 
1307 	/*
1308 	 * If some of the idtypes aren't configured/enabled, arrange
1309 	 * to skip over them.
1310 	 */
1311 	if (cursor->q2c_users_done == 0 &&
1312 	    ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1313 		cursor->q2c_users_done = 1;
1314 	}
1315 	if (cursor->q2c_groups_done == 0 &&
1316 	    ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1317 		cursor->q2c_groups_done = 1;
1318 	}
1319 
1320 	/* Loop over, potentially, both idtypes */
1321 	while (1) {
1322 
1323 		/* Choose id type */
1324 		error = q2cursor_pickidtype(cursor, &idtype);
1325 		if (error == EAGAIN) {
1326 			/* nothing more to do, return 0 */
1327 			*ret = 0;
1328 			return 0;
1329 		}
1330 		KASSERT(ump->um_quotas[idtype] != NULLVP);
1331 
1332 		/*
1333 		 * Initialize the per-call iteration state. Copy the
1334 		 * cursor state so we can update it in place but back
1335 		 * out on error.
1336 		 */
1337 		q2cursor_initstate(&state, keys, vals, maxreturn,
1338 				   cursor->q2c_blocks_done);
1339 		newcursor = *cursor;
1340 
1341 		/* Assign keys */
1342 		error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1343 					 &quota2_hash_size, &default_q2e);
1344 		if (error) {
1345 			return error;
1346 		}
1347 
1348 		/* Now fill in the values. */
1349 		error = q2cursor_getvals(ump, &state, &default_q2e);
1350 		if (error) {
1351 			return error;
1352 		}
1353 
1354 		/*
1355 		 * Now that we aren't going to fail and lose what we
1356 		 * did so far, we can update the cursor state.
1357 		 */
1358 
1359 		if (newcursor.q2c_hashpos >= quota2_hash_size) {
1360 			if (idtype == QUOTA_IDTYPE_USER)
1361 				cursor->q2c_users_done = 1;
1362 			else
1363 				cursor->q2c_groups_done = 1;
1364 
1365 			/* start over on another id type */
1366 			cursor->q2c_hashsize = 0;
1367 			cursor->q2c_defaults_done = 0;
1368 			cursor->q2c_hashpos = 0;
1369 			cursor->q2c_uidpos = 0;
1370 			cursor->q2c_blocks_done = 0;
1371 		} else {
1372 			*cursor = newcursor;
1373 			cursor->q2c_blocks_done = state.skiplast;
1374 		}
1375 
1376 		/*
1377 		 * If we have something to return, return it.
1378 		 * Otherwise, continue to the other idtype, if any,
1379 		 * and only return zero at end of iteration.
1380 		 */
1381 		if (state.numkeys > 0) {
1382 			break;
1383 		}
1384 	}
1385 
1386 	*ret = state.numkeys;
1387 	return 0;
1388 }
1389 
1390 int
lfsquota2_handle_cmd_cursoropen(struct ulfsmount * ump,struct quotakcursor * qkc)1391 lfsquota2_handle_cmd_cursoropen(struct ulfsmount *ump, struct quotakcursor *qkc)
1392 {
1393 	struct ulfsq2_cursor *cursor;
1394 
1395 	CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1396 	cursor = Q2CURSOR(qkc);
1397 
1398 	cursor->q2c_magic = Q2C_MAGIC;
1399 	cursor->q2c_hashsize = 0;
1400 
1401 	cursor->q2c_users_done = 0;
1402 	cursor->q2c_groups_done = 0;
1403 	cursor->q2c_defaults_done = 0;
1404 	cursor->q2c_hashpos = 0;
1405 	cursor->q2c_uidpos = 0;
1406 	cursor->q2c_blocks_done = 0;
1407 	return 0;
1408 }
1409 
1410 int
lfsquota2_handle_cmd_cursorclose(struct ulfsmount * ump,struct quotakcursor * qkc)1411 lfsquota2_handle_cmd_cursorclose(struct ulfsmount *ump, struct quotakcursor *qkc)
1412 {
1413 	struct ulfsq2_cursor *cursor;
1414 	int error;
1415 
1416 	cursor = Q2CURSOR(qkc);
1417 	error = q2cursor_check(cursor);
1418 	if (error) {
1419 		return error;
1420 	}
1421 
1422 	/* nothing to do */
1423 
1424 	return 0;
1425 }
1426 
1427 int
lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount * ump,struct quotakcursor * qkc,int idtype)1428 lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount *ump,
1429     struct quotakcursor *qkc, int idtype)
1430 {
1431 	struct ulfsq2_cursor *cursor;
1432 	int error;
1433 
1434 	cursor = Q2CURSOR(qkc);
1435 	error = q2cursor_check(cursor);
1436 	if (error) {
1437 		return error;
1438 	}
1439 
1440 	switch (idtype) {
1441 	    case QUOTA_IDTYPE_USER:
1442 		cursor->q2c_users_done = 1;
1443 		break;
1444 	    case QUOTA_IDTYPE_GROUP:
1445 		cursor->q2c_groups_done = 1;
1446 		break;
1447 	    default:
1448 		return EINVAL;
1449 	}
1450 
1451 	return 0;
1452 }
1453 
1454 int
lfsquota2_handle_cmd_cursoratend(struct ulfsmount * ump,struct quotakcursor * qkc,int * ret)1455 lfsquota2_handle_cmd_cursoratend(struct ulfsmount *ump, struct quotakcursor *qkc,
1456     int *ret)
1457 {
1458 	struct ulfsq2_cursor *cursor;
1459 	int error;
1460 
1461 	cursor = Q2CURSOR(qkc);
1462 	error = q2cursor_check(cursor);
1463 	if (error) {
1464 		return error;
1465 	}
1466 
1467 	*ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1468 	return 0;
1469 }
1470 
1471 int
lfsquota2_handle_cmd_cursorrewind(struct ulfsmount * ump,struct quotakcursor * qkc)1472 lfsquota2_handle_cmd_cursorrewind(struct ulfsmount *ump, struct quotakcursor *qkc)
1473 {
1474 	struct ulfsq2_cursor *cursor;
1475 	int error;
1476 
1477 	cursor = Q2CURSOR(qkc);
1478 	error = q2cursor_check(cursor);
1479 	if (error) {
1480 		return error;
1481 	}
1482 
1483 	cursor->q2c_hashsize = 0;
1484 
1485 	cursor->q2c_users_done = 0;
1486 	cursor->q2c_groups_done = 0;
1487 	cursor->q2c_defaults_done = 0;
1488 	cursor->q2c_hashpos = 0;
1489 	cursor->q2c_uidpos = 0;
1490 	cursor->q2c_blocks_done = 0;
1491 
1492 	return 0;
1493 }
1494 
1495 int
lfs_q2sync(struct mount * mp)1496 lfs_q2sync(struct mount *mp)
1497 {
1498 	return 0;
1499 }
1500 
1501 struct dq2get_callback {
1502 	uid_t id;
1503 	struct dquot *dq;
1504 };
1505 
1506 static int
dq2get_callback(struct ulfsmount * ump,uint64_t * offp,struct quota2_entry * q2e,uint64_t off,void * v)1507 dq2get_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1508     uint64_t off, void *v)
1509 {
1510 	struct dq2get_callback *c = v;
1511 	daddr_t lblkno;
1512 	int blkoff;
1513 	struct lfs *fs = ump->um_lfs;
1514 	const int needswap = ULFS_MPNEEDSWAP(fs);
1515 
1516 	if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
1517 		KASSERT(mutex_owned(&c->dq->dq_interlock));
1518 		lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1519 		blkoff = (off & ump->umq2_bmask);
1520 		c->dq->dq2_lblkno = lblkno;
1521 		c->dq->dq2_blkoff = blkoff;
1522 		return Q2WL_ABORT;
1523 	}
1524 	return 0;
1525 }
1526 
1527 int
lfs_dq2get(struct vnode * dqvp,u_long id,struct ulfsmount * ump,int type,struct dquot * dq)1528 lfs_dq2get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type,
1529     struct dquot *dq)
1530 {
1531 	struct buf *bp;
1532 	struct quota2_header *q2h;
1533 	int error;
1534 	daddr_t offset;
1535 	u_long hash_mask;
1536 	struct dq2get_callback c = {
1537 		.id = id,
1538 		.dq = dq
1539 	};
1540 
1541 	KASSERT(mutex_owned(&dq->dq_interlock));
1542 	mutex_enter(&lfs_dqlock);
1543 	error = getq2h(ump, type, &bp, &q2h, 0);
1544 	if (error)
1545 		goto out_mutex;
1546 	/* look for our entry */
1547 	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1548 	offset = q2h->q2h_entries[id & hash_mask];
1549 	error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1550 	    dq2get_callback);
1551 	brelse(bp, 0);
1552 out_mutex:
1553 	mutex_exit(&lfs_dqlock);
1554 	return error;
1555 }
1556 
1557 int
lfs_dq2sync(struct vnode * vp,struct dquot * dq)1558 lfs_dq2sync(struct vnode *vp, struct dquot *dq)
1559 {
1560 	return 0;
1561 }
1562 
1563 int
lfs_quota2_mount(struct mount * mp)1564 lfs_quota2_mount(struct mount *mp)
1565 {
1566 	struct ulfsmount *ump = VFSTOULFS(mp);
1567 	struct lfs *fs = ump->um_lfs;
1568 	int error;
1569 	struct vnode *vp;
1570 	struct lwp *l = curlwp;
1571 
1572 	if ((fs->lfs_use_quota2) == 0)
1573 		return 0;
1574 
1575 	fs->um_flags |= ULFS_QUOTA2;
1576 	ump->umq2_bsize = lfs_sb_getbsize(fs);
1577 	ump->umq2_bmask = lfs_sb_getbmask(fs);
1578 	if (fs->lfs_quota_magic != Q2_HEAD_MAGIC) {
1579 		printf("%s: Invalid quota magic number\n",
1580 		    mp->mnt_stat.f_mntonname);
1581 		return EINVAL;
1582 	}
1583 
1584 	error = 0;
1585         if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA)) &&
1586             fs->lfs_quotaino[ULFS_USRQUOTA] == 0) {
1587                 printf("%s: No user quota inode\n",
1588 		    mp->mnt_stat.f_mntonname);
1589                 error = EINVAL;
1590         }
1591         if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA)) &&
1592             fs->lfs_quotaino[ULFS_GRPQUOTA] == 0) {
1593                 printf("%s: No group quota inode\n",
1594 		    mp->mnt_stat.f_mntonname);
1595                 error = EINVAL;
1596         }
1597 	if (error)
1598 		return error;
1599 
1600         if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA) &&
1601 	    ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1602 		error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_USRQUOTA], &vp);
1603 		if (error) {
1604 			printf("%s: can't vget() user quota inode: %d\n",
1605 			    mp->mnt_stat.f_mntonname, error);
1606 			return error;
1607 		}
1608 		ump->um_quotas[ULFS_USRQUOTA] = vp;
1609 		ump->um_cred[ULFS_USRQUOTA] = l->l_cred;
1610 		mutex_enter(vp->v_interlock);
1611 		vp->v_writecount++;
1612 		mutex_exit(vp->v_interlock);
1613 		VOP_UNLOCK(vp);
1614 	}
1615         if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA) &&
1616 	    ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1617 		error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_GRPQUOTA], &vp);
1618 		if (error) {
1619 			vn_close(ump->um_quotas[ULFS_USRQUOTA],
1620 			    FREAD|FWRITE, l->l_cred);
1621 			printf("%s: can't vget() group quota inode: %d\n",
1622 			    mp->mnt_stat.f_mntonname, error);
1623 			return error;
1624 		}
1625 		ump->um_quotas[ULFS_GRPQUOTA] = vp;
1626 		ump->um_cred[ULFS_GRPQUOTA] = l->l_cred;
1627 		mutex_enter(vp->v_interlock);
1628 		vp->v_vflag |= VV_SYSTEM;
1629 		vp->v_writecount++;
1630 		mutex_exit(vp->v_interlock);
1631 		VOP_UNLOCK(vp);
1632 	}
1633 
1634 	mp->mnt_flag |= MNT_QUOTA;
1635 	return 0;
1636 }
1637