1 /*
2 * Copyright (c) 1982, 1986, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Robert Elz at The University of Melbourne.
7 *
8 * %sccs.include.redist.c%
9 *
10 * @(#)ufs_quota.c 8.5 (Berkeley) 05/20/95
11 */
12 #include <sys/param.h>
13 #include <sys/kernel.h>
14 #include <sys/systm.h>
15 #include <sys/namei.h>
16 #include <sys/malloc.h>
17 #include <sys/file.h>
18 #include <sys/proc.h>
19 #include <sys/vnode.h>
20 #include <sys/mount.h>
21
22 #include <ufs/ufs/quota.h>
23 #include <ufs/ufs/inode.h>
24 #include <ufs/ufs/ufsmount.h>
25 #include <ufs/ufs/ufs_extern.h>
26
27 /*
28 * Quota name to error message mapping.
29 */
30 static char *quotatypes[] = INITQFNAMES;
31
32 /*
33 * Set up the quotas for an inode.
34 *
35 * This routine completely defines the semantics of quotas.
36 * If other criterion want to be used to establish quotas, the
37 * MAXQUOTAS value in quotas.h should be increased, and the
38 * additional dquots set up here.
39 */
40 int
getinoquota(ip)41 getinoquota(ip)
42 register struct inode *ip;
43 {
44 struct ufsmount *ump;
45 struct vnode *vp = ITOV(ip);
46 int error;
47
48 ump = VFSTOUFS(vp->v_mount);
49 /*
50 * Set up the user quota based on file uid.
51 * EINVAL means that quotas are not enabled.
52 */
53 if (ip->i_dquot[USRQUOTA] == NODQUOT &&
54 (error =
55 dqget(vp, ip->i_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) &&
56 error != EINVAL)
57 return (error);
58 /*
59 * Set up the group quota based on file gid.
60 * EINVAL means that quotas are not enabled.
61 */
62 if (ip->i_dquot[GRPQUOTA] == NODQUOT &&
63 (error =
64 dqget(vp, ip->i_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) &&
65 error != EINVAL)
66 return (error);
67 return (0);
68 }
69
70 /*
71 * Update disk usage, and take corrective action.
72 */
73 int
chkdq(ip,change,cred,flags)74 chkdq(ip, change, cred, flags)
75 register struct inode *ip;
76 long change;
77 struct ucred *cred;
78 int flags;
79 {
80 register struct dquot *dq;
81 register int i;
82 int ncurblocks, error;
83
84 #ifdef DIAGNOSTIC
85 if ((flags & CHOWN) == 0)
86 chkdquot(ip);
87 #endif
88 if (change == 0)
89 return (0);
90 if (change < 0) {
91 for (i = 0; i < MAXQUOTAS; i++) {
92 if ((dq = ip->i_dquot[i]) == NODQUOT)
93 continue;
94 while (dq->dq_flags & DQ_LOCK) {
95 dq->dq_flags |= DQ_WANT;
96 sleep((caddr_t)dq, PINOD+1);
97 }
98 ncurblocks = dq->dq_curblocks + change;
99 if (ncurblocks >= 0)
100 dq->dq_curblocks = ncurblocks;
101 else
102 dq->dq_curblocks = 0;
103 dq->dq_flags &= ~DQ_BLKS;
104 dq->dq_flags |= DQ_MOD;
105 }
106 return (0);
107 }
108 if ((flags & FORCE) == 0 && cred->cr_uid != 0) {
109 for (i = 0; i < MAXQUOTAS; i++) {
110 if ((dq = ip->i_dquot[i]) == NODQUOT)
111 continue;
112 if (error = chkdqchg(ip, change, cred, i))
113 return (error);
114 }
115 }
116 for (i = 0; i < MAXQUOTAS; i++) {
117 if ((dq = ip->i_dquot[i]) == NODQUOT)
118 continue;
119 while (dq->dq_flags & DQ_LOCK) {
120 dq->dq_flags |= DQ_WANT;
121 sleep((caddr_t)dq, PINOD+1);
122 }
123 dq->dq_curblocks += change;
124 dq->dq_flags |= DQ_MOD;
125 }
126 return (0);
127 }
128
129 /*
130 * Check for a valid change to a users allocation.
131 * Issue an error message if appropriate.
132 */
133 int
chkdqchg(ip,change,cred,type)134 chkdqchg(ip, change, cred, type)
135 struct inode *ip;
136 long change;
137 struct ucred *cred;
138 int type;
139 {
140 register struct dquot *dq = ip->i_dquot[type];
141 long ncurblocks = dq->dq_curblocks + change;
142
143 /*
144 * If user would exceed their hard limit, disallow space allocation.
145 */
146 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
147 if ((dq->dq_flags & DQ_BLKS) == 0 &&
148 ip->i_uid == cred->cr_uid) {
149 uprintf("\n%s: write failed, %s disk limit reached\n",
150 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
151 quotatypes[type]);
152 dq->dq_flags |= DQ_BLKS;
153 }
154 return (EDQUOT);
155 }
156 /*
157 * If user is over their soft limit for too long, disallow space
158 * allocation. Reset time limit as they cross their soft limit.
159 */
160 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
161 if (dq->dq_curblocks < dq->dq_bsoftlimit) {
162 dq->dq_btime = time.tv_sec +
163 VFSTOUFS(ITOV(ip)->v_mount)->um_btime[type];
164 if (ip->i_uid == cred->cr_uid)
165 uprintf("\n%s: warning, %s %s\n",
166 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
167 quotatypes[type], "disk quota exceeded");
168 return (0);
169 }
170 if (time.tv_sec > dq->dq_btime) {
171 if ((dq->dq_flags & DQ_BLKS) == 0 &&
172 ip->i_uid == cred->cr_uid) {
173 uprintf("\n%s: write failed, %s %s\n",
174 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
175 quotatypes[type],
176 "disk quota exceeded for too long");
177 dq->dq_flags |= DQ_BLKS;
178 }
179 return (EDQUOT);
180 }
181 }
182 return (0);
183 }
184
185 /*
186 * Check the inode limit, applying corrective action.
187 */
188 int
chkiq(ip,change,cred,flags)189 chkiq(ip, change, cred, flags)
190 register struct inode *ip;
191 long change;
192 struct ucred *cred;
193 int flags;
194 {
195 register struct dquot *dq;
196 register int i;
197 int ncurinodes, error;
198
199 #ifdef DIAGNOSTIC
200 if ((flags & CHOWN) == 0)
201 chkdquot(ip);
202 #endif
203 if (change == 0)
204 return (0);
205 if (change < 0) {
206 for (i = 0; i < MAXQUOTAS; i++) {
207 if ((dq = ip->i_dquot[i]) == NODQUOT)
208 continue;
209 while (dq->dq_flags & DQ_LOCK) {
210 dq->dq_flags |= DQ_WANT;
211 sleep((caddr_t)dq, PINOD+1);
212 }
213 ncurinodes = dq->dq_curinodes + change;
214 if (ncurinodes >= 0)
215 dq->dq_curinodes = ncurinodes;
216 else
217 dq->dq_curinodes = 0;
218 dq->dq_flags &= ~DQ_INODS;
219 dq->dq_flags |= DQ_MOD;
220 }
221 return (0);
222 }
223 if ((flags & FORCE) == 0 && cred->cr_uid != 0) {
224 for (i = 0; i < MAXQUOTAS; i++) {
225 if ((dq = ip->i_dquot[i]) == NODQUOT)
226 continue;
227 if (error = chkiqchg(ip, change, cred, i))
228 return (error);
229 }
230 }
231 for (i = 0; i < MAXQUOTAS; i++) {
232 if ((dq = ip->i_dquot[i]) == NODQUOT)
233 continue;
234 while (dq->dq_flags & DQ_LOCK) {
235 dq->dq_flags |= DQ_WANT;
236 sleep((caddr_t)dq, PINOD+1);
237 }
238 dq->dq_curinodes += change;
239 dq->dq_flags |= DQ_MOD;
240 }
241 return (0);
242 }
243
244 /*
245 * Check for a valid change to a users allocation.
246 * Issue an error message if appropriate.
247 */
248 int
chkiqchg(ip,change,cred,type)249 chkiqchg(ip, change, cred, type)
250 struct inode *ip;
251 long change;
252 struct ucred *cred;
253 int type;
254 {
255 register struct dquot *dq = ip->i_dquot[type];
256 long ncurinodes = dq->dq_curinodes + change;
257
258 /*
259 * If user would exceed their hard limit, disallow inode allocation.
260 */
261 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
262 if ((dq->dq_flags & DQ_INODS) == 0 &&
263 ip->i_uid == cred->cr_uid) {
264 uprintf("\n%s: write failed, %s inode limit reached\n",
265 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
266 quotatypes[type]);
267 dq->dq_flags |= DQ_INODS;
268 }
269 return (EDQUOT);
270 }
271 /*
272 * If user is over their soft limit for too long, disallow inode
273 * allocation. Reset time limit as they cross their soft limit.
274 */
275 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
276 if (dq->dq_curinodes < dq->dq_isoftlimit) {
277 dq->dq_itime = time.tv_sec +
278 VFSTOUFS(ITOV(ip)->v_mount)->um_itime[type];
279 if (ip->i_uid == cred->cr_uid)
280 uprintf("\n%s: warning, %s %s\n",
281 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
282 quotatypes[type], "inode quota exceeded");
283 return (0);
284 }
285 if (time.tv_sec > dq->dq_itime) {
286 if ((dq->dq_flags & DQ_INODS) == 0 &&
287 ip->i_uid == cred->cr_uid) {
288 uprintf("\n%s: write failed, %s %s\n",
289 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
290 quotatypes[type],
291 "inode quota exceeded for too long");
292 dq->dq_flags |= DQ_INODS;
293 }
294 return (EDQUOT);
295 }
296 }
297 return (0);
298 }
299
300 #ifdef DIAGNOSTIC
301 /*
302 * On filesystems with quotas enabled, it is an error for a file to change
303 * size and not to have a dquot structure associated with it.
304 */
305 void
chkdquot(ip)306 chkdquot(ip)
307 register struct inode *ip;
308 {
309 struct ufsmount *ump = VFSTOUFS(ITOV(ip)->v_mount);
310 register int i;
311
312 for (i = 0; i < MAXQUOTAS; i++) {
313 if (ump->um_quotas[i] == NULLVP ||
314 (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
315 continue;
316 if (ip->i_dquot[i] == NODQUOT) {
317 vprint("chkdquot: missing dquot", ITOV(ip));
318 panic("missing dquot");
319 }
320 }
321 }
322 #endif
323
324 /*
325 * Code to process quotactl commands.
326 */
327
328 /*
329 * Q_QUOTAON - set up a quota file for a particular file system.
330 */
331 int
quotaon(p,mp,type,fname)332 quotaon(p, mp, type, fname)
333 struct proc *p;
334 struct mount *mp;
335 register int type;
336 caddr_t fname;
337 {
338 struct ufsmount *ump = VFSTOUFS(mp);
339 struct vnode *vp, **vpp;
340 struct vnode *nextvp;
341 struct dquot *dq;
342 int error;
343 struct nameidata nd;
344
345 vpp = &ump->um_quotas[type];
346 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, p);
347 if (error = vn_open(&nd, FREAD|FWRITE, 0))
348 return (error);
349 vp = nd.ni_vp;
350 VOP_UNLOCK(vp, 0, p);
351 if (vp->v_type != VREG) {
352 (void) vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
353 return (EACCES);
354 }
355 if (*vpp != vp)
356 quotaoff(p, mp, type);
357 ump->um_qflags[type] |= QTF_OPENING;
358 mp->mnt_flag |= MNT_QUOTA;
359 vp->v_flag |= VSYSTEM;
360 *vpp = vp;
361 /*
362 * Save the credential of the process that turned on quotas.
363 * Set up the time limits for this quota.
364 */
365 crhold(p->p_ucred);
366 ump->um_cred[type] = p->p_ucred;
367 ump->um_btime[type] = MAX_DQ_TIME;
368 ump->um_itime[type] = MAX_IQ_TIME;
369 if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
370 if (dq->dq_btime > 0)
371 ump->um_btime[type] = dq->dq_btime;
372 if (dq->dq_itime > 0)
373 ump->um_itime[type] = dq->dq_itime;
374 dqrele(NULLVP, dq);
375 }
376 /*
377 * Search vnodes associated with this mount point,
378 * adding references to quota file being opened.
379 * NB: only need to add dquot's for inodes being modified.
380 */
381 again:
382 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nextvp) {
383 nextvp = vp->v_mntvnodes.le_next;
384 if (vp->v_writecount == 0)
385 continue;
386 if (vget(vp, LK_EXCLUSIVE, p))
387 goto again;
388 if (error = getinoquota(VTOI(vp))) {
389 vput(vp);
390 break;
391 }
392 vput(vp);
393 if (vp->v_mntvnodes.le_next != nextvp || vp->v_mount != mp)
394 goto again;
395 }
396 ump->um_qflags[type] &= ~QTF_OPENING;
397 if (error)
398 quotaoff(p, mp, type);
399 return (error);
400 }
401
402 /*
403 * Q_QUOTAOFF - turn off disk quotas for a filesystem.
404 */
405 int
quotaoff(p,mp,type)406 quotaoff(p, mp, type)
407 struct proc *p;
408 struct mount *mp;
409 register int type;
410 {
411 struct vnode *vp;
412 struct vnode *qvp, *nextvp;
413 struct ufsmount *ump = VFSTOUFS(mp);
414 struct dquot *dq;
415 struct inode *ip;
416 int error;
417
418 if ((qvp = ump->um_quotas[type]) == NULLVP)
419 return (0);
420 ump->um_qflags[type] |= QTF_CLOSING;
421 /*
422 * Search vnodes associated with this mount point,
423 * deleting any references to quota file being closed.
424 */
425 again:
426 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nextvp) {
427 nextvp = vp->v_mntvnodes.le_next;
428 if (vget(vp, LK_EXCLUSIVE, p))
429 goto again;
430 ip = VTOI(vp);
431 dq = ip->i_dquot[type];
432 ip->i_dquot[type] = NODQUOT;
433 dqrele(vp, dq);
434 vput(vp);
435 if (vp->v_mntvnodes.le_next != nextvp || vp->v_mount != mp)
436 goto again;
437 }
438 dqflush(qvp);
439 qvp->v_flag &= ~VSYSTEM;
440 error = vn_close(qvp, FREAD|FWRITE, p->p_ucred, p);
441 ump->um_quotas[type] = NULLVP;
442 crfree(ump->um_cred[type]);
443 ump->um_cred[type] = NOCRED;
444 ump->um_qflags[type] &= ~QTF_CLOSING;
445 for (type = 0; type < MAXQUOTAS; type++)
446 if (ump->um_quotas[type] != NULLVP)
447 break;
448 if (type == MAXQUOTAS)
449 mp->mnt_flag &= ~MNT_QUOTA;
450 return (error);
451 }
452
453 /*
454 * Q_GETQUOTA - return current values in a dqblk structure.
455 */
456 int
getquota(mp,id,type,addr)457 getquota(mp, id, type, addr)
458 struct mount *mp;
459 u_long id;
460 int type;
461 caddr_t addr;
462 {
463 struct dquot *dq;
464 int error;
465
466 if (error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq))
467 return (error);
468 error = copyout((caddr_t)&dq->dq_dqb, addr, sizeof (struct dqblk));
469 dqrele(NULLVP, dq);
470 return (error);
471 }
472
473 /*
474 * Q_SETQUOTA - assign an entire dqblk structure.
475 */
476 int
setquota(mp,id,type,addr)477 setquota(mp, id, type, addr)
478 struct mount *mp;
479 u_long id;
480 int type;
481 caddr_t addr;
482 {
483 register struct dquot *dq;
484 struct dquot *ndq;
485 struct ufsmount *ump = VFSTOUFS(mp);
486 struct dqblk newlim;
487 int error;
488
489 if (error = copyin(addr, (caddr_t)&newlim, sizeof (struct dqblk)))
490 return (error);
491 if (error = dqget(NULLVP, id, ump, type, &ndq))
492 return (error);
493 dq = ndq;
494 while (dq->dq_flags & DQ_LOCK) {
495 dq->dq_flags |= DQ_WANT;
496 sleep((caddr_t)dq, PINOD+1);
497 }
498 /*
499 * Copy all but the current values.
500 * Reset time limit if previously had no soft limit or were
501 * under it, but now have a soft limit and are over it.
502 */
503 newlim.dqb_curblocks = dq->dq_curblocks;
504 newlim.dqb_curinodes = dq->dq_curinodes;
505 if (dq->dq_id != 0) {
506 newlim.dqb_btime = dq->dq_btime;
507 newlim.dqb_itime = dq->dq_itime;
508 }
509 if (newlim.dqb_bsoftlimit &&
510 dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
511 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
512 newlim.dqb_btime = time.tv_sec + ump->um_btime[type];
513 if (newlim.dqb_isoftlimit &&
514 dq->dq_curinodes >= newlim.dqb_isoftlimit &&
515 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
516 newlim.dqb_itime = time.tv_sec + ump->um_itime[type];
517 dq->dq_dqb = newlim;
518 if (dq->dq_curblocks < dq->dq_bsoftlimit)
519 dq->dq_flags &= ~DQ_BLKS;
520 if (dq->dq_curinodes < dq->dq_isoftlimit)
521 dq->dq_flags &= ~DQ_INODS;
522 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
523 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
524 dq->dq_flags |= DQ_FAKE;
525 else
526 dq->dq_flags &= ~DQ_FAKE;
527 dq->dq_flags |= DQ_MOD;
528 dqrele(NULLVP, dq);
529 return (0);
530 }
531
532 /*
533 * Q_SETUSE - set current inode and block usage.
534 */
535 int
setuse(mp,id,type,addr)536 setuse(mp, id, type, addr)
537 struct mount *mp;
538 u_long id;
539 int type;
540 caddr_t addr;
541 {
542 register struct dquot *dq;
543 struct ufsmount *ump = VFSTOUFS(mp);
544 struct dquot *ndq;
545 struct dqblk usage;
546 int error;
547
548 if (error = copyin(addr, (caddr_t)&usage, sizeof (struct dqblk)))
549 return (error);
550 if (error = dqget(NULLVP, id, ump, type, &ndq))
551 return (error);
552 dq = ndq;
553 while (dq->dq_flags & DQ_LOCK) {
554 dq->dq_flags |= DQ_WANT;
555 sleep((caddr_t)dq, PINOD+1);
556 }
557 /*
558 * Reset time limit if have a soft limit and were
559 * previously under it, but are now over it.
560 */
561 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
562 usage.dqb_curblocks >= dq->dq_bsoftlimit)
563 dq->dq_btime = time.tv_sec + ump->um_btime[type];
564 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
565 usage.dqb_curinodes >= dq->dq_isoftlimit)
566 dq->dq_itime = time.tv_sec + ump->um_itime[type];
567 dq->dq_curblocks = usage.dqb_curblocks;
568 dq->dq_curinodes = usage.dqb_curinodes;
569 if (dq->dq_curblocks < dq->dq_bsoftlimit)
570 dq->dq_flags &= ~DQ_BLKS;
571 if (dq->dq_curinodes < dq->dq_isoftlimit)
572 dq->dq_flags &= ~DQ_INODS;
573 dq->dq_flags |= DQ_MOD;
574 dqrele(NULLVP, dq);
575 return (0);
576 }
577
578 /*
579 * Q_SYNC - sync quota files to disk.
580 */
581 int
qsync(mp)582 qsync(mp)
583 struct mount *mp;
584 {
585 struct ufsmount *ump = VFSTOUFS(mp);
586 struct proc *p = curproc; /* XXX */
587 struct vnode *vp, *nextvp;
588 struct dquot *dq;
589 int i, error;
590
591 /*
592 * Check if the mount point has any quotas.
593 * If not, simply return.
594 */
595 for (i = 0; i < MAXQUOTAS; i++)
596 if (ump->um_quotas[i] != NULLVP)
597 break;
598 if (i == MAXQUOTAS)
599 return (0);
600 /*
601 * Search vnodes associated with this mount point,
602 * synchronizing any modified dquot structures.
603 */
604 simple_lock(&mntvnode_slock);
605 again:
606 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nextvp) {
607 if (vp->v_mount != mp)
608 goto again;
609 nextvp = vp->v_mntvnodes.le_next;
610 simple_lock(&vp->v_interlock);
611 simple_unlock(&mntvnode_slock);
612 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
613 if (error) {
614 simple_lock(&mntvnode_slock);
615 if (error == ENOENT)
616 goto again;
617 continue;
618 }
619 for (i = 0; i < MAXQUOTAS; i++) {
620 dq = VTOI(vp)->i_dquot[i];
621 if (dq != NODQUOT && (dq->dq_flags & DQ_MOD))
622 dqsync(vp, dq);
623 }
624 vput(vp);
625 simple_lock(&mntvnode_slock);
626 if (vp->v_mntvnodes.le_next != nextvp)
627 goto again;
628 }
629 simple_unlock(&mntvnode_slock);
630 return (0);
631 }
632
633 /*
634 * Code pertaining to management of the in-core dquot data structures.
635 */
636 #define DQHASH(dqvp, id) \
637 (&dqhashtbl[((((int)(dqvp)) >> 8) + id) & dqhash])
638 LIST_HEAD(dqhash, dquot) *dqhashtbl;
639 u_long dqhash;
640
641 /*
642 * Dquot free list.
643 */
644 #define DQUOTINC 5 /* minimum free dquots desired */
645 TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
646 long numdquot, desireddquot = DQUOTINC;
647
648 /*
649 * Initialize the quota system.
650 */
651 void
dqinit()652 dqinit()
653 {
654
655 dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
656 TAILQ_INIT(&dqfreelist);
657 }
658
659 /*
660 * Obtain a dquot structure for the specified identifier and quota file
661 * reading the information from the file if necessary.
662 */
663 int
dqget(vp,id,ump,type,dqp)664 dqget(vp, id, ump, type, dqp)
665 struct vnode *vp;
666 u_long id;
667 register struct ufsmount *ump;
668 register int type;
669 struct dquot **dqp;
670 {
671 struct proc *p = curproc; /* XXX */
672 struct dquot *dq;
673 struct dqhash *dqh;
674 struct vnode *dqvp;
675 struct iovec aiov;
676 struct uio auio;
677 int error;
678
679 dqvp = ump->um_quotas[type];
680 if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
681 *dqp = NODQUOT;
682 return (EINVAL);
683 }
684 /*
685 * Check the cache first.
686 */
687 dqh = DQHASH(dqvp, id);
688 for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) {
689 if (dq->dq_id != id ||
690 dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
691 continue;
692 /*
693 * Cache hit with no references. Take
694 * the structure off the free list.
695 */
696 if (dq->dq_cnt == 0)
697 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
698 DQREF(dq);
699 *dqp = dq;
700 return (0);
701 }
702 /*
703 * Not in cache, allocate a new one.
704 */
705 if (dqfreelist.tqh_first == NODQUOT &&
706 numdquot < MAXQUOTAS * desiredvnodes)
707 desireddquot += DQUOTINC;
708 if (numdquot < desireddquot) {
709 dq = (struct dquot *)malloc(sizeof *dq, M_DQUOT, M_WAITOK);
710 bzero((char *)dq, sizeof *dq);
711 numdquot++;
712 } else {
713 if ((dq = dqfreelist.tqh_first) == NULL) {
714 tablefull("dquot");
715 *dqp = NODQUOT;
716 return (EUSERS);
717 }
718 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
719 panic("free dquot isn't");
720 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
721 LIST_REMOVE(dq, dq_hash);
722 }
723 /*
724 * Initialize the contents of the dquot structure.
725 */
726 if (vp != dqvp)
727 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY, p);
728 LIST_INSERT_HEAD(dqh, dq, dq_hash);
729 DQREF(dq);
730 dq->dq_flags = DQ_LOCK;
731 dq->dq_id = id;
732 dq->dq_ump = ump;
733 dq->dq_type = type;
734 auio.uio_iov = &aiov;
735 auio.uio_iovcnt = 1;
736 aiov.iov_base = (caddr_t)&dq->dq_dqb;
737 aiov.iov_len = sizeof (struct dqblk);
738 auio.uio_resid = sizeof (struct dqblk);
739 auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
740 auio.uio_segflg = UIO_SYSSPACE;
741 auio.uio_rw = UIO_READ;
742 auio.uio_procp = (struct proc *)0;
743 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
744 if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
745 bzero((caddr_t)&dq->dq_dqb, sizeof(struct dqblk));
746 if (vp != dqvp)
747 VOP_UNLOCK(dqvp, 0, p);
748 if (dq->dq_flags & DQ_WANT)
749 wakeup((caddr_t)dq);
750 dq->dq_flags = 0;
751 /*
752 * I/O error in reading quota file, release
753 * quota structure and reflect problem to caller.
754 */
755 if (error) {
756 LIST_REMOVE(dq, dq_hash);
757 dqrele(vp, dq);
758 *dqp = NODQUOT;
759 return (error);
760 }
761 /*
762 * Check for no limit to enforce.
763 * Initialize time values if necessary.
764 */
765 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
766 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
767 dq->dq_flags |= DQ_FAKE;
768 if (dq->dq_id != 0) {
769 if (dq->dq_btime == 0)
770 dq->dq_btime = time.tv_sec + ump->um_btime[type];
771 if (dq->dq_itime == 0)
772 dq->dq_itime = time.tv_sec + ump->um_itime[type];
773 }
774 *dqp = dq;
775 return (0);
776 }
777
778 /*
779 * Obtain a reference to a dquot.
780 */
781 void
dqref(dq)782 dqref(dq)
783 struct dquot *dq;
784 {
785
786 dq->dq_cnt++;
787 }
788
789 /*
790 * Release a reference to a dquot.
791 */
792 void
dqrele(vp,dq)793 dqrele(vp, dq)
794 struct vnode *vp;
795 register struct dquot *dq;
796 {
797
798 if (dq == NODQUOT)
799 return;
800 if (dq->dq_cnt > 1) {
801 dq->dq_cnt--;
802 return;
803 }
804 if (dq->dq_flags & DQ_MOD)
805 (void) dqsync(vp, dq);
806 if (--dq->dq_cnt > 0)
807 return;
808 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
809 }
810
811 /*
812 * Update the disk quota in the quota file.
813 */
814 int
dqsync(vp,dq)815 dqsync(vp, dq)
816 struct vnode *vp;
817 struct dquot *dq;
818 {
819 struct proc *p = curproc; /* XXX */
820 struct vnode *dqvp;
821 struct iovec aiov;
822 struct uio auio;
823 int error;
824
825 if (dq == NODQUOT)
826 panic("dqsync: dquot");
827 if ((dq->dq_flags & DQ_MOD) == 0)
828 return (0);
829 if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
830 panic("dqsync: file");
831 if (vp != dqvp)
832 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY, p);
833 while (dq->dq_flags & DQ_LOCK) {
834 dq->dq_flags |= DQ_WANT;
835 sleep((caddr_t)dq, PINOD+2);
836 if ((dq->dq_flags & DQ_MOD) == 0) {
837 if (vp != dqvp)
838 VOP_UNLOCK(dqvp, 0, p);
839 return (0);
840 }
841 }
842 dq->dq_flags |= DQ_LOCK;
843 auio.uio_iov = &aiov;
844 auio.uio_iovcnt = 1;
845 aiov.iov_base = (caddr_t)&dq->dq_dqb;
846 aiov.iov_len = sizeof (struct dqblk);
847 auio.uio_resid = sizeof (struct dqblk);
848 auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
849 auio.uio_segflg = UIO_SYSSPACE;
850 auio.uio_rw = UIO_WRITE;
851 auio.uio_procp = (struct proc *)0;
852 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
853 if (auio.uio_resid && error == 0)
854 error = EIO;
855 if (dq->dq_flags & DQ_WANT)
856 wakeup((caddr_t)dq);
857 dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT);
858 if (vp != dqvp)
859 VOP_UNLOCK(dqvp, 0, p);
860 return (error);
861 }
862
863 /*
864 * Flush all entries from the cache for a particular vnode.
865 */
866 void
dqflush(vp)867 dqflush(vp)
868 register struct vnode *vp;
869 {
870 register struct dquot *dq, *nextdq;
871 struct dqhash *dqh;
872
873 /*
874 * Move all dquot's that used to refer to this quota
875 * file off their hash chains (they will eventually
876 * fall off the head of the free list and be re-used).
877 */
878 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
879 for (dq = dqh->lh_first; dq; dq = nextdq) {
880 nextdq = dq->dq_hash.le_next;
881 if (dq->dq_ump->um_quotas[dq->dq_type] != vp)
882 continue;
883 if (dq->dq_cnt)
884 panic("dqflush: stray dquot");
885 LIST_REMOVE(dq, dq_hash);
886 dq->dq_ump = (struct ufsmount *)0;
887 }
888 }
889 }
890