xref: /minix/sys/ufs/lfs/ulfs_quota1.c (revision 84d9c625)
1 /*	$NetBSD: ulfs_quota1.c,v 1.6 2013/07/28 01:10:49 dholland Exp $	*/
2 /*  from NetBSD: ufs_quota1.c,v 1.18 2012/02/02 03:00:48 matt Exp  */
3 
4 /*
5  * Copyright (c) 1982, 1986, 1990, 1993, 1995
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Robert Elz at The University of Melbourne.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)ufs_quota.c	8.5 (Berkeley) 5/20/95
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota1.c,v 1.6 2013/07/28 01:10:49 dholland Exp $");
40 
41 #include <sys/param.h>
42 #include <sys/kernel.h>
43 #include <sys/systm.h>
44 #include <sys/namei.h>
45 #include <sys/file.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/kauth.h>
50 
51 #include <ufs/lfs/ulfs_quota1.h>
52 #include <ufs/lfs/ulfs_inode.h>
53 #include <ufs/lfs/ulfsmount.h>
54 #include <ufs/lfs/ulfs_extern.h>
55 #include <ufs/lfs/ulfs_quota.h>
56 
57 static int chkdqchg(struct inode *, int64_t, kauth_cred_t, int);
58 static int chkiqchg(struct inode *, int32_t, kauth_cred_t, int);
59 
60 /*
61  * Update disk usage, and take corrective action.
62  */
63 int
64 lfs_chkdq1(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
65 {
66 	struct dquot *dq;
67 	int i;
68 	int ncurblocks, error;
69 
70 	if ((error = lfs_getinoquota(ip)) != 0)
71 		return error;
72 	if (change == 0)
73 		return (0);
74 	if (change < 0) {
75 		for (i = 0; i < ULFS_MAXQUOTAS; i++) {
76 			if ((dq = ip->i_dquot[i]) == NODQUOT)
77 				continue;
78 			mutex_enter(&dq->dq_interlock);
79 			ncurblocks = dq->dq_curblocks + change;
80 			if (ncurblocks >= 0)
81 				dq->dq_curblocks = ncurblocks;
82 			else
83 				dq->dq_curblocks = 0;
84 			dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
85 			dq->dq_flags |= DQ_MOD;
86 			mutex_exit(&dq->dq_interlock);
87 		}
88 		return (0);
89 	}
90 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
91 		if ((dq = ip->i_dquot[i]) == NODQUOT)
92 			continue;
93 		if ((flags & FORCE) == 0 &&
94 		    kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
95 		    KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT, KAUTH_ARG(i),
96 		    KAUTH_ARG(QL_BLOCK), NULL) != 0) {
97 			mutex_enter(&dq->dq_interlock);
98 			error = chkdqchg(ip, change, cred, i);
99 			mutex_exit(&dq->dq_interlock);
100 			if (error != 0)
101 				return (error);
102 		}
103 	}
104 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
105 		if ((dq = ip->i_dquot[i]) == NODQUOT)
106 			continue;
107 		mutex_enter(&dq->dq_interlock);
108 		dq->dq_curblocks += change;
109 		dq->dq_flags |= DQ_MOD;
110 		mutex_exit(&dq->dq_interlock);
111 	}
112 	return (0);
113 }
114 
115 /*
116  * Check for a valid change to a users allocation.
117  * Issue an error message if appropriate.
118  */
119 static int
120 chkdqchg(struct inode *ip, int64_t change, kauth_cred_t cred, int type)
121 {
122 	struct dquot *dq = ip->i_dquot[type];
123 	long ncurblocks = dq->dq_curblocks + change;
124 
125 	KASSERT(mutex_owned(&dq->dq_interlock));
126 	/*
127 	 * If user would exceed their hard limit, disallow space allocation.
128 	 */
129 	if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
130 		if ((dq->dq_flags & DQ_WARN(QL_BLOCK)) == 0 &&
131 		    ip->i_uid == kauth_cred_geteuid(cred)) {
132 			uprintf("\n%s: write failed, %s disk limit reached\n",
133 			    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
134 			    lfs_quotatypes[type]);
135 			dq->dq_flags |= DQ_WARN(QL_BLOCK);
136 		}
137 		return (EDQUOT);
138 	}
139 	/*
140 	 * If user is over their soft limit for too long, disallow space
141 	 * allocation. Reset time limit as they cross their soft limit.
142 	 */
143 	if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
144 		if (dq->dq_curblocks < dq->dq_bsoftlimit) {
145 			dq->dq_btime =
146 			    time_second + ip->i_ump->umq1_btime[type];
147 			if (ip->i_uid == kauth_cred_geteuid(cred))
148 				uprintf("\n%s: warning, %s %s\n",
149 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
150 				    lfs_quotatypes[type], "disk quota exceeded");
151 			return (0);
152 		}
153 		if (time_second > dq->dq_btime) {
154 			if ((dq->dq_flags & DQ_WARN(QL_BLOCK)) == 0 &&
155 			    ip->i_uid == kauth_cred_geteuid(cred)) {
156 				uprintf("\n%s: write failed, %s %s\n",
157 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
158 				    lfs_quotatypes[type],
159 				    "disk quota exceeded for too long");
160 				dq->dq_flags |= DQ_WARN(QL_BLOCK);
161 			}
162 			return (EDQUOT);
163 		}
164 	}
165 	return (0);
166 }
167 
168 /*
169  * Check the inode limit, applying corrective action.
170  */
171 int
172 lfs_chkiq1(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
173 {
174 	struct dquot *dq;
175 	int i;
176 	int ncurinodes, error;
177 
178 	if ((error = lfs_getinoquota(ip)) != 0)
179 		return error;
180 	if (change == 0)
181 		return (0);
182 	if (change < 0) {
183 		for (i = 0; i < ULFS_MAXQUOTAS; i++) {
184 			if ((dq = ip->i_dquot[i]) == NODQUOT)
185 				continue;
186 			mutex_enter(&dq->dq_interlock);
187 			ncurinodes = dq->dq_curinodes + change;
188 			if (ncurinodes >= 0)
189 				dq->dq_curinodes = ncurinodes;
190 			else
191 				dq->dq_curinodes = 0;
192 			dq->dq_flags &= ~DQ_WARN(QL_FILE);
193 			dq->dq_flags |= DQ_MOD;
194 			mutex_exit(&dq->dq_interlock);
195 		}
196 		return (0);
197 	}
198 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
199 		if ((dq = ip->i_dquot[i]) == NODQUOT)
200 			continue;
201 		if ((flags & FORCE) == 0 && kauth_authorize_system(cred,
202 		    KAUTH_SYSTEM_FS_QUOTA, KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
203 		    KAUTH_ARG(i), KAUTH_ARG(QL_FILE), NULL) != 0) {
204 			mutex_enter(&dq->dq_interlock);
205 			error = chkiqchg(ip, change, cred, i);
206 			mutex_exit(&dq->dq_interlock);
207 			if (error != 0)
208 				return (error);
209 		}
210 	}
211 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
212 		if ((dq = ip->i_dquot[i]) == NODQUOT)
213 			continue;
214 		mutex_enter(&dq->dq_interlock);
215 		dq->dq_curinodes += change;
216 		dq->dq_flags |= DQ_MOD;
217 		mutex_exit(&dq->dq_interlock);
218 	}
219 	return (0);
220 }
221 
222 /*
223  * Check for a valid change to a users allocation.
224  * Issue an error message if appropriate.
225  */
226 static int
227 chkiqchg(struct inode *ip, int32_t change, kauth_cred_t cred, int type)
228 {
229 	struct dquot *dq = ip->i_dquot[type];
230 	long ncurinodes = dq->dq_curinodes + change;
231 
232 	KASSERT(mutex_owned(&dq->dq_interlock));
233 	/*
234 	 * If user would exceed their hard limit, disallow inode allocation.
235 	 */
236 	if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
237 		if ((dq->dq_flags & DQ_WARN(QL_FILE)) == 0 &&
238 		    ip->i_uid == kauth_cred_geteuid(cred)) {
239 			uprintf("\n%s: write failed, %s inode limit reached\n",
240 			    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
241 			    lfs_quotatypes[type]);
242 			dq->dq_flags |= DQ_WARN(QL_FILE);
243 		}
244 		return (EDQUOT);
245 	}
246 	/*
247 	 * If user is over their soft limit for too long, disallow inode
248 	 * allocation. Reset time limit as they cross their soft limit.
249 	 */
250 	if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
251 		if (dq->dq_curinodes < dq->dq_isoftlimit) {
252 			dq->dq_itime =
253 			    time_second + ip->i_ump->umq1_itime[type];
254 			if (ip->i_uid == kauth_cred_geteuid(cred))
255 				uprintf("\n%s: warning, %s %s\n",
256 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
257 				    lfs_quotatypes[type], "inode quota exceeded");
258 			return (0);
259 		}
260 		if (time_second > dq->dq_itime) {
261 			if ((dq->dq_flags & DQ_WARN(QL_FILE)) == 0 &&
262 			    ip->i_uid == kauth_cred_geteuid(cred)) {
263 				uprintf("\n%s: write failed, %s %s\n",
264 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
265 				    lfs_quotatypes[type],
266 				    "inode quota exceeded for too long");
267 				dq->dq_flags |= DQ_WARN(QL_FILE);
268 			}
269 			return (EDQUOT);
270 		}
271 	}
272 	return (0);
273 }
274 
275 int
276 lfsquota1_umount(struct mount *mp, int flags)
277 {
278 	int i, error;
279 	struct ulfsmount *ump = VFSTOULFS(mp);
280 	struct lfs *fs = ump->um_lfs;
281 	struct lwp *l = curlwp;
282 
283 	if ((fs->um_flags & ULFS_QUOTA) == 0)
284 		return 0;
285 
286 	if ((error = vflush(mp, NULLVP, SKIPSYSTEM | flags)) != 0)
287 		return (error);
288 
289 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
290 		if (ump->um_quotas[i] != NULLVP) {
291 			lfsquota1_handle_cmd_quotaoff(l, ump, i);
292 		}
293 	}
294 	return 0;
295 }
296 
297 /*
298  * Code to process quotactl commands.
299  */
300 
301 /*
302  * set up a quota file for a particular file system.
303  */
304 int
305 lfsquota1_handle_cmd_quotaon(struct lwp *l, struct ulfsmount *ump, int type,
306     const char *fname)
307 {
308 	struct mount *mp = ump->um_mountp;
309 	struct lfs *fs = ump->um_lfs;
310 	struct vnode *vp, **vpp, *mvp;
311 	struct dquot *dq;
312 	int error;
313 	struct pathbuf *pb;
314 	struct nameidata nd;
315 
316 	if (fs->um_flags & ULFS_QUOTA2) {
317 		uprintf("%s: quotas v2 already enabled\n",
318 		    mp->mnt_stat.f_mntonname);
319 		return (EBUSY);
320 	}
321 
322 	vpp = &ump->um_quotas[type];
323 
324 	pb = pathbuf_create(fname);
325 	if (pb == NULL) {
326 		return ENOMEM;
327 	}
328 	NDINIT(&nd, LOOKUP, FOLLOW, pb);
329 	if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
330 		pathbuf_destroy(pb);
331 		return error;
332 	}
333 	vp = nd.ni_vp;
334 	pathbuf_destroy(pb);
335 
336 	VOP_UNLOCK(vp);
337 	if (vp->v_type != VREG) {
338 		(void) vn_close(vp, FREAD|FWRITE, l->l_cred);
339 		return (EACCES);
340 	}
341 	if (*vpp != vp)
342 		lfsquota1_handle_cmd_quotaoff(l, ump, type);
343 	mutex_enter(&lfs_dqlock);
344 	while ((ump->umq1_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0)
345 		cv_wait(&lfs_dqcv, &lfs_dqlock);
346 	ump->umq1_qflags[type] |= QTF_OPENING;
347 	mutex_exit(&lfs_dqlock);
348 	mp->mnt_flag |= MNT_QUOTA;
349 	vp->v_vflag |= VV_SYSTEM;	/* XXXSMP */
350 	*vpp = vp;
351 	/*
352 	 * Save the credential of the process that turned on quotas.
353 	 * Set up the time limits for this quota.
354 	 */
355 	kauth_cred_hold(l->l_cred);
356 	ump->um_cred[type] = l->l_cred;
357 	ump->umq1_btime[type] = MAX_DQ_TIME;
358 	ump->umq1_itime[type] = MAX_IQ_TIME;
359 	if (lfs_dqget(NULLVP, 0, ump, type, &dq) == 0) {
360 		if (dq->dq_btime > 0)
361 			ump->umq1_btime[type] = dq->dq_btime;
362 		if (dq->dq_itime > 0)
363 			ump->umq1_itime[type] = dq->dq_itime;
364 		lfs_dqrele(NULLVP, dq);
365 	}
366 	/* Allocate a marker vnode. */
367 	mvp = vnalloc(mp);
368 	/*
369 	 * Search vnodes associated with this mount point,
370 	 * adding references to quota file being opened.
371 	 * NB: only need to add dquot's for inodes being modified.
372 	 */
373 	mutex_enter(&mntvnode_lock);
374 again:
375 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
376 		vmark(mvp, vp);
377 		mutex_enter(vp->v_interlock);
378 		if (VTOI(vp) == NULL || vp->v_mount != mp || vismarker(vp) ||
379 		    vp->v_type == VNON || vp->v_writecount == 0 ||
380 		    (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0) {
381 			mutex_exit(vp->v_interlock);
382 			continue;
383 		}
384 		mutex_exit(&mntvnode_lock);
385 		if (vget(vp, LK_EXCLUSIVE)) {
386 			mutex_enter(&mntvnode_lock);
387 			(void)vunmark(mvp);
388 			goto again;
389 		}
390 		if ((error = lfs_getinoquota(VTOI(vp))) != 0) {
391 			vput(vp);
392 			mutex_enter(&mntvnode_lock);
393 			(void)vunmark(mvp);
394 			break;
395 		}
396 		vput(vp);
397 		mutex_enter(&mntvnode_lock);
398 	}
399 	mutex_exit(&mntvnode_lock);
400 	vnfree(mvp);
401 
402 	mutex_enter(&lfs_dqlock);
403 	ump->umq1_qflags[type] &= ~QTF_OPENING;
404 	cv_broadcast(&lfs_dqcv);
405 	if (error == 0)
406 		fs->um_flags |= ULFS_QUOTA;
407 	mutex_exit(&lfs_dqlock);
408 	if (error)
409 		lfsquota1_handle_cmd_quotaoff(l, ump, type);
410 	return (error);
411 }
412 
413 /*
414  * turn off disk quotas for a filesystem.
415  */
416 int
417 lfsquota1_handle_cmd_quotaoff(struct lwp *l, struct ulfsmount *ump, int type)
418 {
419 	struct mount *mp = ump->um_mountp;
420 	struct lfs *fs = ump->um_lfs;
421 	struct vnode *vp;
422 	struct vnode *qvp, *mvp;
423 	struct dquot *dq;
424 	struct inode *ip;
425 	kauth_cred_t cred;
426 	int i, error;
427 
428 	/* Allocate a marker vnode. */
429 	mvp = vnalloc(mp);
430 
431 	mutex_enter(&lfs_dqlock);
432 	while ((ump->umq1_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0)
433 		cv_wait(&lfs_dqcv, &lfs_dqlock);
434 	if ((qvp = ump->um_quotas[type]) == NULLVP) {
435 		mutex_exit(&lfs_dqlock);
436 		vnfree(mvp);
437 		return (0);
438 	}
439 	ump->umq1_qflags[type] |= QTF_CLOSING;
440 	fs->um_flags &= ~ULFS_QUOTA;
441 	mutex_exit(&lfs_dqlock);
442 	/*
443 	 * Search vnodes associated with this mount point,
444 	 * deleting any references to quota file being closed.
445 	 */
446 	mutex_enter(&mntvnode_lock);
447 again:
448 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
449 		vmark(mvp, vp);
450 		mutex_enter(vp->v_interlock);
451 		if (VTOI(vp) == NULL || vp->v_mount != mp || vismarker(vp) ||
452 		    vp->v_type == VNON ||
453 		    (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0) {
454 			mutex_exit(vp->v_interlock);
455 			continue;
456 		}
457 		mutex_exit(&mntvnode_lock);
458 		if (vget(vp, LK_EXCLUSIVE)) {
459 			mutex_enter(&mntvnode_lock);
460 			(void)vunmark(mvp);
461 			goto again;
462 		}
463 		ip = VTOI(vp);
464 		dq = ip->i_dquot[type];
465 		ip->i_dquot[type] = NODQUOT;
466 		lfs_dqrele(vp, dq);
467 		vput(vp);
468 		mutex_enter(&mntvnode_lock);
469 	}
470 	mutex_exit(&mntvnode_lock);
471 #ifdef DIAGNOSTIC
472 	lfs_dqflush(qvp);
473 #endif
474 	qvp->v_vflag &= ~VV_SYSTEM;
475 	error = vn_close(qvp, FREAD|FWRITE, l->l_cred);
476 	mutex_enter(&lfs_dqlock);
477 	ump->um_quotas[type] = NULLVP;
478 	cred = ump->um_cred[type];
479 	ump->um_cred[type] = NOCRED;
480 	for (i = 0; i < ULFS_MAXQUOTAS; i++)
481 		if (ump->um_quotas[i] != NULLVP)
482 			break;
483 	ump->umq1_qflags[type] &= ~QTF_CLOSING;
484 	cv_broadcast(&lfs_dqcv);
485 	mutex_exit(&lfs_dqlock);
486 	kauth_cred_free(cred);
487 	if (i == ULFS_MAXQUOTAS)
488 		mp->mnt_flag &= ~MNT_QUOTA;
489 	return (error);
490 }
491 
492 int
493 lfsquota1_handle_cmd_get(struct ulfsmount *ump, const struct quotakey *qk,
494     struct quotaval *qv)
495 {
496 	struct dquot *dq;
497 	int error;
498 	struct quotaval blocks, files;
499 	int idtype;
500 	id_t id;
501 
502 	idtype = qk->qk_idtype;
503 	id = qk->qk_id;
504 
505 	if (ump->um_quotas[idtype] == NULLVP)
506 		return ENODEV;
507 
508 	if (id == QUOTA_DEFAULTID) { /* we want the grace period of id 0 */
509 		if ((error = lfs_dqget(NULLVP, 0, ump, idtype, &dq)) != 0)
510 			return error;
511 
512 	} else {
513 		if ((error = lfs_dqget(NULLVP, id, ump, idtype, &dq)) != 0)
514 			return error;
515 	}
516 	lfs_dqblk_to_quotavals(&dq->dq_un.dq1_dqb, &blocks, &files);
517 	lfs_dqrele(NULLVP, dq);
518 	if (id == QUOTA_DEFAULTID) {
519 		if (blocks.qv_expiretime > 0)
520 			blocks.qv_grace = blocks.qv_expiretime;
521 		else
522 			blocks.qv_grace = MAX_DQ_TIME;
523 		if (files.qv_expiretime > 0)
524 			files.qv_grace = files.qv_expiretime;
525 		else
526 			files.qv_grace = MAX_DQ_TIME;
527 	}
528 
529 	switch (qk->qk_objtype) {
530 	    case QUOTA_OBJTYPE_BLOCKS:
531 		*qv = blocks;
532 		break;
533 	    case QUOTA_OBJTYPE_FILES:
534 		*qv = files;
535 		break;
536 	    default:
537 		return EINVAL;
538 	}
539 
540 	return 0;
541 }
542 
543 static uint32_t
544 quota1_encode_limit(uint64_t lim)
545 {
546 	if (lim == QUOTA_NOLIMIT || lim >= 0xffffffff) {
547 		return 0;
548 	}
549 	return lim;
550 }
551 
552 int
553 lfsquota1_handle_cmd_put(struct ulfsmount *ump, const struct quotakey *key,
554     const struct quotaval *val)
555 {
556 	struct dquot *dq;
557 	struct dqblk dqb;
558 	int error;
559 
560 	switch (key->qk_idtype) {
561 	    case QUOTA_IDTYPE_USER:
562 	    case QUOTA_IDTYPE_GROUP:
563 		break;
564 	    default:
565 		return EINVAL;
566 	}
567 
568 	switch (key->qk_objtype) {
569 	    case QUOTA_OBJTYPE_BLOCKS:
570 	    case QUOTA_OBJTYPE_FILES:
571 		break;
572 	    default:
573 		return EINVAL;
574 	}
575 
576 	if (ump->um_quotas[key->qk_idtype] == NULLVP)
577 		return ENODEV;
578 
579 	if (key->qk_id == QUOTA_DEFAULTID) {
580 		/* just update grace times */
581 		id_t id = 0;
582 
583 		if ((error = lfs_dqget(NULLVP, id, ump, key->qk_idtype, &dq)) != 0)
584 			return error;
585 		mutex_enter(&dq->dq_interlock);
586 		if (val->qv_grace != QUOTA_NOTIME) {
587 			if (key->qk_objtype == QUOTA_OBJTYPE_BLOCKS)
588 				ump->umq1_btime[key->qk_idtype] = dq->dq_btime =
589 					val->qv_grace;
590 			if (key->qk_objtype == QUOTA_OBJTYPE_FILES)
591 				ump->umq1_itime[key->qk_idtype] = dq->dq_itime =
592 					val->qv_grace;
593 		}
594 		dq->dq_flags |= DQ_MOD;
595 		mutex_exit(&dq->dq_interlock);
596 		lfs_dqrele(NULLVP, dq);
597 		return 0;
598 	}
599 
600 	if ((error = lfs_dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq)) != 0)
601 		return (error);
602 	mutex_enter(&dq->dq_interlock);
603 	/*
604 	 * Copy all but the current values.
605 	 * Reset time limit if previously had no soft limit or were
606 	 * under it, but now have a soft limit and are over it.
607 	 */
608 	dqb.dqb_curblocks = dq->dq_curblocks;
609 	dqb.dqb_curinodes = dq->dq_curinodes;
610 	dqb.dqb_btime = dq->dq_btime;
611 	dqb.dqb_itime = dq->dq_itime;
612 	if (key->qk_objtype == QUOTA_OBJTYPE_BLOCKS) {
613 		dqb.dqb_bsoftlimit = quota1_encode_limit(val->qv_softlimit);
614 		dqb.dqb_bhardlimit = quota1_encode_limit(val->qv_hardlimit);
615 		dqb.dqb_isoftlimit = dq->dq_isoftlimit;
616 		dqb.dqb_ihardlimit = dq->dq_ihardlimit;
617 	} else {
618 		KASSERT(key->qk_objtype == QUOTA_OBJTYPE_FILES);
619 		dqb.dqb_bsoftlimit = dq->dq_bsoftlimit;
620 		dqb.dqb_bhardlimit = dq->dq_bhardlimit;
621 		dqb.dqb_isoftlimit = quota1_encode_limit(val->qv_softlimit);
622 		dqb.dqb_ihardlimit = quota1_encode_limit(val->qv_hardlimit);
623 	}
624 	if (dq->dq_id == 0 && val->qv_grace != QUOTA_NOTIME) {
625 		/* also update grace time if available */
626 		if (key->qk_objtype == QUOTA_OBJTYPE_BLOCKS) {
627 			ump->umq1_btime[key->qk_idtype] = dqb.dqb_btime =
628 				val->qv_grace;
629 		}
630 		if (key->qk_objtype == QUOTA_OBJTYPE_FILES) {
631 			ump->umq1_itime[key->qk_idtype] = dqb.dqb_itime =
632 				val->qv_grace;
633 		}
634 	}
635 	if (dqb.dqb_bsoftlimit &&
636 	    dq->dq_curblocks >= dqb.dqb_bsoftlimit &&
637 	    (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
638 		dqb.dqb_btime = time_second + ump->umq1_btime[key->qk_idtype];
639 	if (dqb.dqb_isoftlimit &&
640 	    dq->dq_curinodes >= dqb.dqb_isoftlimit &&
641 	    (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
642 		dqb.dqb_itime = time_second + ump->umq1_itime[key->qk_idtype];
643 	dq->dq_un.dq1_dqb = dqb;
644 	if (dq->dq_curblocks < dq->dq_bsoftlimit)
645 		dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
646 	if (dq->dq_curinodes < dq->dq_isoftlimit)
647 		dq->dq_flags &= ~DQ_WARN(QL_FILE);
648 	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
649 	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
650 		dq->dq_flags |= DQ_FAKE;
651 	else
652 		dq->dq_flags &= ~DQ_FAKE;
653 	dq->dq_flags |= DQ_MOD;
654 	mutex_exit(&dq->dq_interlock);
655 	lfs_dqrele(NULLVP, dq);
656 	return (0);
657 }
658 
659 
660 #if 0
661 /*
662  * Q_SETQUOTA - assign an entire dqblk structure.
663  */
664 int
665 setquota1(struct mount *mp, u_long id, int type, struct dqblk *dqb)
666 {
667 	struct dquot *dq;
668 	struct dquot *ndq;
669 	struct ulfsmount *ump = VFSTOULFS(mp);
670 
671 
672 	if ((error = lfs_dqget(NULLVP, id, ump, type, &ndq)) != 0)
673 		return (error);
674 	dq = ndq;
675 	mutex_enter(&dq->dq_interlock);
676 	/*
677 	 * Copy all but the current values.
678 	 * Reset time limit if previously had no soft limit or were
679 	 * under it, but now have a soft limit and are over it.
680 	 */
681 	dqb->dqb_curblocks = dq->dq_curblocks;
682 	dqb->dqb_curinodes = dq->dq_curinodes;
683 	if (dq->dq_id != 0) {
684 		dqb->dqb_btime = dq->dq_btime;
685 		dqb->dqb_itime = dq->dq_itime;
686 	}
687 	if (dqb->dqb_bsoftlimit &&
688 	    dq->dq_curblocks >= dqb->dqb_bsoftlimit &&
689 	    (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
690 		dqb->dqb_btime = time_second + ump->umq1_btime[type];
691 	if (dqb->dqb_isoftlimit &&
692 	    dq->dq_curinodes >= dqb->dqb_isoftlimit &&
693 	    (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
694 		dqb->dqb_itime = time_second + ump->umq1_itime[type];
695 	dq->dq_un.dq1_dqb = *dqb;
696 	if (dq->dq_curblocks < dq->dq_bsoftlimit)
697 		dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
698 	if (dq->dq_curinodes < dq->dq_isoftlimit)
699 		dq->dq_flags &= ~DQ_WARN(QL_FILE);
700 	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
701 	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
702 		dq->dq_flags |= DQ_FAKE;
703 	else
704 		dq->dq_flags &= ~DQ_FAKE;
705 	dq->dq_flags |= DQ_MOD;
706 	mutex_exit(&dq->dq_interlock);
707 	lfs_dqrele(NULLVP, dq);
708 	return (0);
709 }
710 
711 /*
712  * Q_SETUSE - set current inode and block usage.
713  */
714 int
715 setuse(struct mount *mp, u_long id, int type, void *addr)
716 {
717 	struct dquot *dq;
718 	struct ulfsmount *ump = VFSTOULFS(mp);
719 	struct dquot *ndq;
720 	struct dqblk usage;
721 	int error;
722 
723 	error = copyin(addr, (void *)&usage, sizeof (struct dqblk));
724 	if (error)
725 		return (error);
726 	if ((error = lfs_dqget(NULLVP, id, ump, type, &ndq)) != 0)
727 		return (error);
728 	dq = ndq;
729 	mutex_enter(&dq->dq_interlock);
730 	/*
731 	 * Reset time limit if have a soft limit and were
732 	 * previously under it, but are now over it.
733 	 */
734 	if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
735 	    usage.dqb_curblocks >= dq->dq_bsoftlimit)
736 		dq->dq_btime = time_second + ump->umq1_btime[type];
737 	if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
738 	    usage.dqb_curinodes >= dq->dq_isoftlimit)
739 		dq->dq_itime = time_second + ump->umq1_itime[type];
740 	dq->dq_curblocks = usage.dqb_curblocks;
741 	dq->dq_curinodes = usage.dqb_curinodes;
742 	if (dq->dq_curblocks < dq->dq_bsoftlimit)
743 		dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
744 	if (dq->dq_curinodes < dq->dq_isoftlimit)
745 		dq->dq_flags &= ~DQ_WARN(QL_FILE);
746 	dq->dq_flags |= DQ_MOD;
747 	mutex_exit(&dq->dq_interlock);
748 	lfs_dqrele(NULLVP, dq);
749 	return (0);
750 }
751 #endif
752 
753 /*
754  * Q_SYNC - sync quota files to disk.
755  */
756 int
757 lfs_q1sync(struct mount *mp)
758 {
759 	struct ulfsmount *ump = VFSTOULFS(mp);
760 	struct vnode *vp, *mvp;
761 	struct dquot *dq;
762 	int i, error;
763 
764 	/*
765 	 * Check if the mount point has any quotas.
766 	 * If not, simply return.
767 	 */
768 	for (i = 0; i < ULFS_MAXQUOTAS; i++)
769 		if (ump->um_quotas[i] != NULLVP)
770 			break;
771 	if (i == ULFS_MAXQUOTAS)
772 		return (0);
773 
774 	/* Allocate a marker vnode. */
775 	mvp = vnalloc(mp);
776 
777 	/*
778 	 * Search vnodes associated with this mount point,
779 	 * synchronizing any modified dquot structures.
780 	 */
781 	mutex_enter(&mntvnode_lock);
782  again:
783 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
784 		vmark(mvp, vp);
785 		mutex_enter(vp->v_interlock);
786 		if (VTOI(vp) == NULL || vp->v_mount != mp || vismarker(vp) ||
787 		    vp->v_type == VNON ||
788 		    (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0) {
789 			mutex_exit(vp->v_interlock);
790 			continue;
791 		}
792 		mutex_exit(&mntvnode_lock);
793 		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT);
794 		if (error) {
795 			mutex_enter(&mntvnode_lock);
796 			if (error == ENOENT) {
797 				(void)vunmark(mvp);
798 				goto again;
799 			}
800 			continue;
801 		}
802 		for (i = 0; i < ULFS_MAXQUOTAS; i++) {
803 			dq = VTOI(vp)->i_dquot[i];
804 			if (dq == NODQUOT)
805 				continue;
806 			mutex_enter(&dq->dq_interlock);
807 			if (dq->dq_flags & DQ_MOD)
808 				lfs_dq1sync(vp, dq);
809 			mutex_exit(&dq->dq_interlock);
810 		}
811 		vput(vp);
812 		mutex_enter(&mntvnode_lock);
813 	}
814 	mutex_exit(&mntvnode_lock);
815 	vnfree(mvp);
816 	return (0);
817 }
818 
819 /*
820  * Obtain a dquot structure for the specified identifier and quota file
821  * reading the information from the file if necessary.
822  */
823 int
824 lfs_dq1get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type,
825     struct dquot *dq)
826 {
827 	struct iovec aiov;
828 	struct uio auio;
829 	int error;
830 
831 	KASSERT(mutex_owned(&dq->dq_interlock));
832 	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
833 	auio.uio_iov = &aiov;
834 	auio.uio_iovcnt = 1;
835 	aiov.iov_base = (void *)&dq->dq_un.dq1_dqb;
836 	aiov.iov_len = sizeof (struct dqblk);
837 	auio.uio_resid = sizeof (struct dqblk);
838 	auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
839 	auio.uio_rw = UIO_READ;
840 	UIO_SETUP_SYSSPACE(&auio);
841 	error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
842 	if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
843 		memset((void *)&dq->dq_un.dq1_dqb, 0, sizeof(struct dqblk));
844 	VOP_UNLOCK(dqvp);
845 	/*
846 	 * I/O error in reading quota file, release
847 	 * quota structure and reflect problem to caller.
848 	 */
849 	if (error)
850 		return (error);
851 	/*
852 	 * Check for no limit to enforce.
853 	 * Initialize time values if necessary.
854 	 */
855 	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
856 	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
857 		dq->dq_flags |= DQ_FAKE;
858 	if (dq->dq_id != 0) {
859 		if (dq->dq_btime == 0)
860 			dq->dq_btime = time_second + ump->umq1_btime[type];
861 		if (dq->dq_itime == 0)
862 			dq->dq_itime = time_second + ump->umq1_itime[type];
863 	}
864 	return (0);
865 }
866 
867 /*
868  * Update the disk quota in the quota file.
869  */
870 int
871 lfs_dq1sync(struct vnode *vp, struct dquot *dq)
872 {
873 	struct vnode *dqvp;
874 	struct iovec aiov;
875 	struct uio auio;
876 	int error;
877 
878 	if (dq == NODQUOT)
879 		panic("dq1sync: dquot");
880 	KASSERT(mutex_owned(&dq->dq_interlock));
881 	if ((dq->dq_flags & DQ_MOD) == 0)
882 		return (0);
883 	if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
884 		panic("dq1sync: file");
885 	KASSERT(dqvp != vp);
886 	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
887 	auio.uio_iov = &aiov;
888 	auio.uio_iovcnt = 1;
889 	aiov.iov_base = (void *)&dq->dq_un.dq1_dqb;
890 	aiov.iov_len = sizeof (struct dqblk);
891 	auio.uio_resid = sizeof (struct dqblk);
892 	auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
893 	auio.uio_rw = UIO_WRITE;
894 	UIO_SETUP_SYSSPACE(&auio);
895 	error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
896 	if (auio.uio_resid && error == 0)
897 		error = EIO;
898 	dq->dq_flags &= ~DQ_MOD;
899 	VOP_UNLOCK(dqvp);
900 	return (error);
901 }
902