1 /* $NetBSD: ufs_quota.c,v 1.30 2003/11/05 10:18:38 hannken Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Robert Elz at The University of Melbourne.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ufs_quota.c,v 1.30 2003/11/05 10:18:38 hannken Exp $");
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/namei.h>
44 #include <sys/malloc.h>
45 #include <sys/file.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49
50 #include <ufs/ufs/quota.h>
51 #include <ufs/ufs/inode.h>
52 #include <ufs/ufs/ufsmount.h>
53 #include <ufs/ufs/ufs_extern.h>
54
55 /*
56 * Quota name to error message mapping.
57 */
58 static char *quotatypes[] = INITQFNAMES;
59
60 /*
61 * Set up the quotas for an inode.
62 *
63 * This routine completely defines the semantics of quotas.
64 * If other criterion want to be used to establish quotas, the
65 * MAXQUOTAS value in quotas.h should be increased, and the
66 * additional dquots set up here.
67 */
68 int
69 getinoquota(ip)
70 struct inode *ip;
71 {
72 struct ufsmount *ump;
73 struct vnode *vp = ITOV(ip);
74 int error;
75
76 ump = VFSTOUFS(vp->v_mount);
77 /*
78 * Set up the user quota based on file uid.
79 * EINVAL means that quotas are not enabled.
80 */
81 if (ip->i_dquot[USRQUOTA] == NODQUOT &&
82 (error =
83 dqget(vp, ip->i_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) &&
84 error != EINVAL)
85 return (error);
86 /*
87 * Set up the group quota based on file gid.
88 * EINVAL means that quotas are not enabled.
89 */
90 if (ip->i_dquot[GRPQUOTA] == NODQUOT &&
91 (error =
92 dqget(vp, ip->i_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) &&
93 error != EINVAL)
94 return (error);
95 return (0);
96 }
97
98 /*
99 * Update disk usage, and take corrective action.
100 */
101 int
102 chkdq(ip, change, cred, flags)
103 struct inode *ip;
104 int64_t change;
105 struct ucred *cred;
106 int flags;
107 {
108 struct dquot *dq;
109 int i;
110 int ncurblocks, error;
111
112 #ifdef DIAGNOSTIC
113 if ((flags & CHOWN) == 0)
114 chkdquot(ip);
115 #endif
116 if (change == 0)
117 return (0);
118 if (change < 0) {
119 for (i = 0; i < MAXQUOTAS; i++) {
120 if ((dq = ip->i_dquot[i]) == NODQUOT)
121 continue;
122 while (dq->dq_flags & DQ_LOCK) {
123 dq->dq_flags |= DQ_WANT;
124 (void) tsleep(dq, PINOD+1, "chkdq", 0);
125 }
126 ncurblocks = dq->dq_curblocks + change;
127 if (ncurblocks >= 0)
128 dq->dq_curblocks = ncurblocks;
129 else
130 dq->dq_curblocks = 0;
131 dq->dq_flags &= ~DQ_BLKS;
132 dq->dq_flags |= DQ_MOD;
133 }
134 return (0);
135 }
136 if ((flags & FORCE) == 0 &&
137 (cred != NOCRED && cred->cr_uid != 0)) {
138 for (i = 0; i < MAXQUOTAS; i++) {
139 if ((dq = ip->i_dquot[i]) == NODQUOT)
140 continue;
141 if ((error = chkdqchg(ip, change, cred, i)) != 0)
142 return (error);
143 }
144 }
145 for (i = 0; i < MAXQUOTAS; i++) {
146 if ((dq = ip->i_dquot[i]) == NODQUOT)
147 continue;
148 while (dq->dq_flags & DQ_LOCK) {
149 dq->dq_flags |= DQ_WANT;
150 (void) tsleep(dq, PINOD+1, "chkdq", 0);
151 }
152 dq->dq_curblocks += change;
153 dq->dq_flags |= DQ_MOD;
154 }
155 return (0);
156 }
157
158 /*
159 * Check for a valid change to a users allocation.
160 * Issue an error message if appropriate.
161 */
162 int
163 chkdqchg(ip, change, cred, type)
164 struct inode *ip;
165 int64_t change;
166 struct ucred *cred;
167 int type;
168 {
169 struct dquot *dq = ip->i_dquot[type];
170 long ncurblocks = dq->dq_curblocks + change;
171
172 /*
173 * If user would exceed their hard limit, disallow space allocation.
174 */
175 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
176 if ((dq->dq_flags & DQ_BLKS) == 0 &&
177 ip->i_uid == cred->cr_uid) {
178 uprintf("\n%s: write failed, %s disk limit reached\n",
179 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
180 quotatypes[type]);
181 dq->dq_flags |= DQ_BLKS;
182 }
183 return (EDQUOT);
184 }
185 /*
186 * If user is over their soft limit for too long, disallow space
187 * allocation. Reset time limit as they cross their soft limit.
188 */
189 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
190 if (dq->dq_curblocks < dq->dq_bsoftlimit) {
191 dq->dq_btime = time.tv_sec +
192 VFSTOUFS(ITOV(ip)->v_mount)->um_btime[type];
193 if (ip->i_uid == cred->cr_uid)
194 uprintf("\n%s: warning, %s %s\n",
195 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
196 quotatypes[type], "disk quota exceeded");
197 return (0);
198 }
199 if (time.tv_sec > dq->dq_btime) {
200 if ((dq->dq_flags & DQ_BLKS) == 0 &&
201 ip->i_uid == cred->cr_uid) {
202 uprintf("\n%s: write failed, %s %s\n",
203 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
204 quotatypes[type],
205 "disk quota exceeded for too long");
206 dq->dq_flags |= DQ_BLKS;
207 }
208 return (EDQUOT);
209 }
210 }
211 return (0);
212 }
213
214 /*
215 * Check the inode limit, applying corrective action.
216 */
217 int
218 chkiq(ip, change, cred, flags)
219 struct inode *ip;
220 int32_t change;
221 struct ucred *cred;
222 int flags;
223 {
224 struct dquot *dq;
225 int i;
226 int ncurinodes, error;
227
228 #ifdef DIAGNOSTIC
229 if ((flags & CHOWN) == 0)
230 chkdquot(ip);
231 #endif
232 if (change == 0)
233 return (0);
234 if (change < 0) {
235 for (i = 0; i < MAXQUOTAS; i++) {
236 if ((dq = ip->i_dquot[i]) == NODQUOT)
237 continue;
238 while (dq->dq_flags & DQ_LOCK) {
239 dq->dq_flags |= DQ_WANT;
240 (void) tsleep(dq, PINOD+1, "chkiq", 0);
241 }
242 ncurinodes = dq->dq_curinodes + change;
243 if (ncurinodes >= 0)
244 dq->dq_curinodes = ncurinodes;
245 else
246 dq->dq_curinodes = 0;
247 dq->dq_flags &= ~DQ_INODS;
248 dq->dq_flags |= DQ_MOD;
249 }
250 return (0);
251 }
252 if ((flags & FORCE) == 0 && cred->cr_uid != 0) {
253 for (i = 0; i < MAXQUOTAS; i++) {
254 if ((dq = ip->i_dquot[i]) == NODQUOT)
255 continue;
256 if ((error = chkiqchg(ip, change, cred, i)) != 0)
257 return (error);
258 }
259 }
260 for (i = 0; i < MAXQUOTAS; i++) {
261 if ((dq = ip->i_dquot[i]) == NODQUOT)
262 continue;
263 while (dq->dq_flags & DQ_LOCK) {
264 dq->dq_flags |= DQ_WANT;
265 (void) tsleep(dq, PINOD+1, "chkiq", 0);
266 }
267 dq->dq_curinodes += change;
268 dq->dq_flags |= DQ_MOD;
269 }
270 return (0);
271 }
272
273 /*
274 * Check for a valid change to a users allocation.
275 * Issue an error message if appropriate.
276 */
277 int
278 chkiqchg(ip, change, cred, type)
279 struct inode *ip;
280 int32_t change;
281 struct ucred *cred;
282 int type;
283 {
284 struct dquot *dq = ip->i_dquot[type];
285 long ncurinodes = dq->dq_curinodes + change;
286
287 /*
288 * If user would exceed their hard limit, disallow inode allocation.
289 */
290 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
291 if ((dq->dq_flags & DQ_INODS) == 0 &&
292 ip->i_uid == cred->cr_uid) {
293 uprintf("\n%s: write failed, %s inode limit reached\n",
294 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
295 quotatypes[type]);
296 dq->dq_flags |= DQ_INODS;
297 }
298 return (EDQUOT);
299 }
300 /*
301 * If user is over their soft limit for too long, disallow inode
302 * allocation. Reset time limit as they cross their soft limit.
303 */
304 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
305 if (dq->dq_curinodes < dq->dq_isoftlimit) {
306 dq->dq_itime = time.tv_sec +
307 VFSTOUFS(ITOV(ip)->v_mount)->um_itime[type];
308 if (ip->i_uid == cred->cr_uid)
309 uprintf("\n%s: warning, %s %s\n",
310 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
311 quotatypes[type], "inode quota exceeded");
312 return (0);
313 }
314 if (time.tv_sec > dq->dq_itime) {
315 if ((dq->dq_flags & DQ_INODS) == 0 &&
316 ip->i_uid == cred->cr_uid) {
317 uprintf("\n%s: write failed, %s %s\n",
318 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
319 quotatypes[type],
320 "inode quota exceeded for too long");
321 dq->dq_flags |= DQ_INODS;
322 }
323 return (EDQUOT);
324 }
325 }
326 return (0);
327 }
328
329 #ifdef DIAGNOSTIC
330 /*
331 * On filesystems with quotas enabled, it is an error for a file to change
332 * size and not to have a dquot structure associated with it.
333 */
334 void
335 chkdquot(ip)
336 struct inode *ip;
337 {
338 struct ufsmount *ump = VFSTOUFS(ITOV(ip)->v_mount);
339 int i;
340
341 for (i = 0; i < MAXQUOTAS; i++) {
342 if (ump->um_quotas[i] == NULLVP ||
343 (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
344 continue;
345 if (ip->i_dquot[i] == NODQUOT) {
346 vprint("chkdquot: missing dquot", ITOV(ip));
347 panic("missing dquot");
348 }
349 }
350 }
351 #endif
352
353 /*
354 * Code to process quotactl commands.
355 */
356
357 /*
358 * Q_QUOTAON - set up a quota file for a particular file system.
359 */
360 int
361 quotaon(p, mp, type, fname)
362 struct proc *p;
363 struct mount *mp;
364 int type;
365 caddr_t fname;
366 {
367 struct ufsmount *ump = VFSTOUFS(mp);
368 struct vnode *vp, **vpp;
369 struct vnode *nextvp;
370 struct dquot *dq;
371 int error;
372 struct nameidata nd;
373
374 vpp = &ump->um_quotas[type];
375 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, p);
376 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0)
377 return (error);
378 vp = nd.ni_vp;
379 VOP_UNLOCK(vp, 0);
380 if (vp->v_type != VREG) {
381 (void) vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
382 return (EACCES);
383 }
384 if (*vpp != vp)
385 quotaoff(p, mp, type);
386 ump->um_qflags[type] |= QTF_OPENING;
387 mp->mnt_flag |= MNT_QUOTA;
388 vp->v_flag |= VSYSTEM;
389 *vpp = vp;
390 /*
391 * Save the credential of the process that turned on quotas.
392 * Set up the time limits for this quota.
393 */
394 crhold(p->p_ucred);
395 ump->um_cred[type] = p->p_ucred;
396 ump->um_btime[type] = MAX_DQ_TIME;
397 ump->um_itime[type] = MAX_IQ_TIME;
398 if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
399 if (dq->dq_btime > 0)
400 ump->um_btime[type] = dq->dq_btime;
401 if (dq->dq_itime > 0)
402 ump->um_itime[type] = dq->dq_itime;
403 dqrele(NULLVP, dq);
404 }
405 /*
406 * Search vnodes associated with this mount point,
407 * adding references to quota file being opened.
408 * NB: only need to add dquot's for inodes being modified.
409 */
410 again:
411 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
412 nextvp = LIST_NEXT(vp, v_mntvnodes);
413 if (vp->v_type == VNON ||vp->v_writecount == 0)
414 continue;
415 if (vget(vp, LK_EXCLUSIVE))
416 goto again;
417 if ((error = getinoquota(VTOI(vp))) != 0) {
418 vput(vp);
419 break;
420 }
421 vput(vp);
422 if (LIST_NEXT(vp, v_mntvnodes) != nextvp || vp->v_mount != mp)
423 goto again;
424 }
425 ump->um_qflags[type] &= ~QTF_OPENING;
426 if (error)
427 quotaoff(p, mp, type);
428 return (error);
429 }
430
431 /*
432 * Q_QUOTAOFF - turn off disk quotas for a filesystem.
433 */
434 int
435 quotaoff(p, mp, type)
436 struct proc *p;
437 struct mount *mp;
438 int type;
439 {
440 struct vnode *vp;
441 struct vnode *qvp, *nextvp;
442 struct ufsmount *ump = VFSTOUFS(mp);
443 struct dquot *dq;
444 struct inode *ip;
445 int error;
446
447 if ((qvp = ump->um_quotas[type]) == NULLVP)
448 return (0);
449 ump->um_qflags[type] |= QTF_CLOSING;
450 /*
451 * Search vnodes associated with this mount point,
452 * deleting any references to quota file being closed.
453 */
454 again:
455 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
456 nextvp = LIST_NEXT(vp, v_mntvnodes);
457 if (vp->v_type == VNON)
458 continue;
459 if (vget(vp, LK_EXCLUSIVE))
460 goto again;
461 ip = VTOI(vp);
462 dq = ip->i_dquot[type];
463 ip->i_dquot[type] = NODQUOT;
464 dqrele(vp, dq);
465 vput(vp);
466 if (LIST_NEXT(vp, v_mntvnodes) != nextvp || vp->v_mount != mp)
467 goto again;
468 }
469 dqflush(qvp);
470 qvp->v_flag &= ~VSYSTEM;
471 error = vn_close(qvp, FREAD|FWRITE, p->p_ucred, p);
472 ump->um_quotas[type] = NULLVP;
473 crfree(ump->um_cred[type]);
474 ump->um_cred[type] = NOCRED;
475 ump->um_qflags[type] &= ~QTF_CLOSING;
476 for (type = 0; type < MAXQUOTAS; type++)
477 if (ump->um_quotas[type] != NULLVP)
478 break;
479 if (type == MAXQUOTAS)
480 mp->mnt_flag &= ~MNT_QUOTA;
481 return (error);
482 }
483
484 /*
485 * Q_GETQUOTA - return current values in a dqblk structure.
486 */
487 int
488 getquota(mp, id, type, addr)
489 struct mount *mp;
490 u_long id;
491 int type;
492 caddr_t addr;
493 {
494 struct dquot *dq;
495 int error;
496
497 if ((error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq)) != 0)
498 return (error);
499 error = copyout((caddr_t)&dq->dq_dqb, addr, sizeof (struct dqblk));
500 dqrele(NULLVP, dq);
501 return (error);
502 }
503
504 /*
505 * Q_SETQUOTA - assign an entire dqblk structure.
506 */
507 int
508 setquota(mp, id, type, addr)
509 struct mount *mp;
510 u_long id;
511 int type;
512 caddr_t addr;
513 {
514 struct dquot *dq;
515 struct dquot *ndq;
516 struct ufsmount *ump = VFSTOUFS(mp);
517 struct dqblk newlim;
518 int error;
519
520 error = copyin(addr, (caddr_t)&newlim, sizeof (struct dqblk));
521 if (error)
522 return (error);
523 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
524 return (error);
525 dq = ndq;
526 while (dq->dq_flags & DQ_LOCK) {
527 dq->dq_flags |= DQ_WANT;
528 (void) tsleep(dq, PINOD+1, "setquota", 0);
529 }
530 /*
531 * Copy all but the current values.
532 * Reset time limit if previously had no soft limit or were
533 * under it, but now have a soft limit and are over it.
534 */
535 newlim.dqb_curblocks = dq->dq_curblocks;
536 newlim.dqb_curinodes = dq->dq_curinodes;
537 if (dq->dq_id != 0) {
538 newlim.dqb_btime = dq->dq_btime;
539 newlim.dqb_itime = dq->dq_itime;
540 }
541 if (newlim.dqb_bsoftlimit &&
542 dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
543 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
544 newlim.dqb_btime = time.tv_sec + ump->um_btime[type];
545 if (newlim.dqb_isoftlimit &&
546 dq->dq_curinodes >= newlim.dqb_isoftlimit &&
547 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
548 newlim.dqb_itime = time.tv_sec + ump->um_itime[type];
549 dq->dq_dqb = newlim;
550 if (dq->dq_curblocks < dq->dq_bsoftlimit)
551 dq->dq_flags &= ~DQ_BLKS;
552 if (dq->dq_curinodes < dq->dq_isoftlimit)
553 dq->dq_flags &= ~DQ_INODS;
554 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
555 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
556 dq->dq_flags |= DQ_FAKE;
557 else
558 dq->dq_flags &= ~DQ_FAKE;
559 dq->dq_flags |= DQ_MOD;
560 dqrele(NULLVP, dq);
561 return (0);
562 }
563
564 /*
565 * Q_SETUSE - set current inode and block usage.
566 */
567 int
568 setuse(mp, id, type, addr)
569 struct mount *mp;
570 u_long id;
571 int type;
572 caddr_t addr;
573 {
574 struct dquot *dq;
575 struct ufsmount *ump = VFSTOUFS(mp);
576 struct dquot *ndq;
577 struct dqblk usage;
578 int error;
579
580 error = copyin(addr, (caddr_t)&usage, sizeof (struct dqblk));
581 if (error)
582 return (error);
583 if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
584 return (error);
585 dq = ndq;
586 while (dq->dq_flags & DQ_LOCK) {
587 dq->dq_flags |= DQ_WANT;
588 (void) tsleep(dq, PINOD+1, "setuse", 0);
589 }
590 /*
591 * Reset time limit if have a soft limit and were
592 * previously under it, but are now over it.
593 */
594 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
595 usage.dqb_curblocks >= dq->dq_bsoftlimit)
596 dq->dq_btime = time.tv_sec + ump->um_btime[type];
597 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
598 usage.dqb_curinodes >= dq->dq_isoftlimit)
599 dq->dq_itime = time.tv_sec + ump->um_itime[type];
600 dq->dq_curblocks = usage.dqb_curblocks;
601 dq->dq_curinodes = usage.dqb_curinodes;
602 if (dq->dq_curblocks < dq->dq_bsoftlimit)
603 dq->dq_flags &= ~DQ_BLKS;
604 if (dq->dq_curinodes < dq->dq_isoftlimit)
605 dq->dq_flags &= ~DQ_INODS;
606 dq->dq_flags |= DQ_MOD;
607 dqrele(NULLVP, dq);
608 return (0);
609 }
610
611 /*
612 * Q_SYNC - sync quota files to disk.
613 */
614 int
615 qsync(mp)
616 struct mount *mp;
617 {
618 struct ufsmount *ump = VFSTOUFS(mp);
619 struct vnode *vp, *nextvp;
620 struct dquot *dq;
621 int i, error;
622
623 /*
624 * Check if the mount point has any quotas.
625 * If not, simply return.
626 */
627 for (i = 0; i < MAXQUOTAS; i++)
628 if (ump->um_quotas[i] != NULLVP)
629 break;
630 if (i == MAXQUOTAS)
631 return (0);
632 /*
633 * Search vnodes associated with this mount point,
634 * synchronizing any modified dquot structures.
635 */
636 simple_lock(&mntvnode_slock);
637 again:
638 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) {
639 if (vp->v_mount != mp)
640 goto again;
641 nextvp = LIST_NEXT(vp, v_mntvnodes);
642 if (vp->v_type == VNON)
643 continue;
644 simple_lock(&vp->v_interlock);
645 simple_unlock(&mntvnode_slock);
646 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
647 if (error) {
648 simple_lock(&mntvnode_slock);
649 if (error == ENOENT)
650 goto again;
651 continue;
652 }
653 for (i = 0; i < MAXQUOTAS; i++) {
654 dq = VTOI(vp)->i_dquot[i];
655 if (dq != NODQUOT && (dq->dq_flags & DQ_MOD))
656 dqsync(vp, dq);
657 }
658 vput(vp);
659 simple_lock(&mntvnode_slock);
660 if (LIST_NEXT(vp, v_mntvnodes) != nextvp)
661 goto again;
662 }
663 simple_unlock(&mntvnode_slock);
664 return (0);
665 }
666
667 /*
668 * Code pertaining to management of the in-core dquot data structures.
669 */
670 #define DQHASH(dqvp, id) \
671 (((((long)(dqvp)) >> 8) + id) & dqhash)
672 LIST_HEAD(dqhashhead, dquot) *dqhashtbl;
673 u_long dqhash;
674
675 /*
676 * Dquot free list.
677 */
678 #define DQUOTINC 5 /* minimum free dquots desired */
679 TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
680 long numdquot, desireddquot = DQUOTINC;
681
682 MALLOC_DEFINE(M_DQUOT, "UFS quota", "UFS quota entries");
683
684 /*
685 * Initialize the quota system.
686 */
687 void
688 dqinit()
689 {
690 dqhashtbl =
691 hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &dqhash);
692 TAILQ_INIT(&dqfreelist);
693 }
694
695 void
696 dqreinit()
697 {
698 struct dquot *dq;
699 struct dqhashhead *oldhash, *hash;
700 struct vnode *dqvp;
701 u_long oldmask, mask, hashval;
702 int i;
703
704 hash = hashinit(desiredvnodes, HASH_LIST, M_DQUOT, M_WAITOK, &mask);
705 oldhash = dqhashtbl;
706 oldmask = dqhash;
707 dqhashtbl = hash;
708 dqhash = mask;
709 for (i = 0; i <= oldmask; i++) {
710 while ((dq = LIST_FIRST(&oldhash[i])) != NULL) {
711 dqvp = dq->dq_ump->um_quotas[dq->dq_type];
712 LIST_REMOVE(dq, dq_hash);
713 hashval = DQHASH(dqvp, dq->dq_id);
714 LIST_INSERT_HEAD(&dqhashtbl[hashval], dq, dq_hash);
715 }
716 }
717 hashdone(oldhash, M_DQUOT);
718 }
719
720 /*
721 * Free resources held by quota system.
722 */
723 void
724 dqdone()
725 {
726 hashdone(dqhashtbl, M_DQUOT);
727 }
728
729 /*
730 * Obtain a dquot structure for the specified identifier and quota file
731 * reading the information from the file if necessary.
732 */
733 int
734 dqget(vp, id, ump, type, dqp)
735 struct vnode *vp;
736 u_long id;
737 struct ufsmount *ump;
738 int type;
739 struct dquot **dqp;
740 {
741 struct dquot *dq;
742 struct dqhashhead *dqh;
743 struct vnode *dqvp;
744 struct iovec aiov;
745 struct uio auio;
746 int error;
747
748 dqvp = ump->um_quotas[type];
749 if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
750 *dqp = NODQUOT;
751 return (EINVAL);
752 }
753 /*
754 * Check the cache first.
755 */
756 dqh = &dqhashtbl[DQHASH(dqvp, id)];
757 LIST_FOREACH(dq, dqh, dq_hash) {
758 if (dq->dq_id != id ||
759 dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
760 continue;
761 /*
762 * Cache hit with no references. Take
763 * the structure off the free list.
764 */
765 if (dq->dq_cnt == 0)
766 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
767 DQREF(dq);
768 *dqp = dq;
769 return (0);
770 }
771 /*
772 * Not in cache, allocate a new one.
773 */
774 if (dqfreelist.tqh_first == NODQUOT &&
775 numdquot < MAXQUOTAS * desiredvnodes)
776 desireddquot += DQUOTINC;
777 if (numdquot < desireddquot) {
778 dq = (struct dquot *)malloc(sizeof *dq, M_DQUOT, M_WAITOK);
779 memset((char *)dq, 0, sizeof *dq);
780 numdquot++;
781 } else {
782 if ((dq = dqfreelist.tqh_first) == NULL) {
783 tablefull("dquot",
784 "increase kern.maxvnodes or NVNODE");
785 *dqp = NODQUOT;
786 return (EUSERS);
787 }
788 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
789 panic("free dquot isn't");
790 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
791 LIST_REMOVE(dq, dq_hash);
792 }
793 /*
794 * Initialize the contents of the dquot structure.
795 */
796 if (vp != dqvp)
797 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
798 LIST_INSERT_HEAD(dqh, dq, dq_hash);
799 DQREF(dq);
800 dq->dq_flags = DQ_LOCK;
801 dq->dq_id = id;
802 dq->dq_ump = ump;
803 dq->dq_type = type;
804 auio.uio_iov = &aiov;
805 auio.uio_iovcnt = 1;
806 aiov.iov_base = (caddr_t)&dq->dq_dqb;
807 aiov.iov_len = sizeof (struct dqblk);
808 auio.uio_resid = sizeof (struct dqblk);
809 auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
810 auio.uio_segflg = UIO_SYSSPACE;
811 auio.uio_rw = UIO_READ;
812 auio.uio_procp = (struct proc *)0;
813 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
814 if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
815 memset((caddr_t)&dq->dq_dqb, 0, sizeof(struct dqblk));
816 if (vp != dqvp)
817 VOP_UNLOCK(dqvp, 0);
818 if (dq->dq_flags & DQ_WANT)
819 wakeup((caddr_t)dq);
820 dq->dq_flags = 0;
821 /*
822 * I/O error in reading quota file, release
823 * quota structure and reflect problem to caller.
824 */
825 if (error) {
826 LIST_REMOVE(dq, dq_hash);
827 dqrele(vp, dq);
828 *dqp = NODQUOT;
829 return (error);
830 }
831 /*
832 * Check for no limit to enforce.
833 * Initialize time values if necessary.
834 */
835 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
836 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
837 dq->dq_flags |= DQ_FAKE;
838 if (dq->dq_id != 0) {
839 if (dq->dq_btime == 0)
840 dq->dq_btime = time.tv_sec + ump->um_btime[type];
841 if (dq->dq_itime == 0)
842 dq->dq_itime = time.tv_sec + ump->um_itime[type];
843 }
844 *dqp = dq;
845 return (0);
846 }
847
848 /*
849 * Obtain a reference to a dquot.
850 */
851 void
852 dqref(dq)
853 struct dquot *dq;
854 {
855
856 dq->dq_cnt++;
857 }
858
859 /*
860 * Release a reference to a dquot.
861 */
862 void
863 dqrele(vp, dq)
864 struct vnode *vp;
865 struct dquot *dq;
866 {
867
868 if (dq == NODQUOT)
869 return;
870 if (dq->dq_cnt > 1) {
871 dq->dq_cnt--;
872 return;
873 }
874 if (dq->dq_flags & DQ_MOD)
875 (void) dqsync(vp, dq);
876 if (--dq->dq_cnt > 0)
877 return;
878 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
879 }
880
881 /*
882 * Update the disk quota in the quota file.
883 */
884 int
885 dqsync(vp, dq)
886 struct vnode *vp;
887 struct dquot *dq;
888 {
889 struct vnode *dqvp;
890 struct mount *mp;
891 struct iovec aiov;
892 struct uio auio;
893 int error;
894
895 if (dq == NODQUOT)
896 panic("dqsync: dquot");
897 if ((dq->dq_flags & DQ_MOD) == 0)
898 return (0);
899 if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
900 panic("dqsync: file");
901 vn_start_write(dqvp, &mp, V_WAIT | V_LOWER);
902 if (vp != dqvp)
903 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
904 while (dq->dq_flags & DQ_LOCK) {
905 dq->dq_flags |= DQ_WANT;
906 (void) tsleep(dq, PINOD+2, "dqsync", 0);
907 if ((dq->dq_flags & DQ_MOD) == 0) {
908 if (vp != dqvp)
909 VOP_UNLOCK(dqvp, 0);
910 vn_finished_write(mp, V_LOWER);
911 return (0);
912 }
913 }
914 dq->dq_flags |= DQ_LOCK;
915 auio.uio_iov = &aiov;
916 auio.uio_iovcnt = 1;
917 aiov.iov_base = (caddr_t)&dq->dq_dqb;
918 aiov.iov_len = sizeof (struct dqblk);
919 auio.uio_resid = sizeof (struct dqblk);
920 auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
921 auio.uio_segflg = UIO_SYSSPACE;
922 auio.uio_rw = UIO_WRITE;
923 auio.uio_procp = (struct proc *)0;
924 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
925 if (auio.uio_resid && error == 0)
926 error = EIO;
927 if (dq->dq_flags & DQ_WANT)
928 wakeup((caddr_t)dq);
929 dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT);
930 if (vp != dqvp)
931 VOP_UNLOCK(dqvp, 0);
932 vn_finished_write(mp, V_LOWER);
933 return (error);
934 }
935
936 /*
937 * Flush all entries from the cache for a particular vnode.
938 */
939 void
940 dqflush(vp)
941 struct vnode *vp;
942 {
943 struct dquot *dq, *nextdq;
944 struct dqhashhead *dqh;
945
946 /*
947 * Move all dquot's that used to refer to this quota
948 * file off their hash chains (they will eventually
949 * fall off the head of the free list and be re-used).
950 */
951 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
952 for (dq = LIST_FIRST(dqh); dq; dq = nextdq) {
953 nextdq = LIST_NEXT(dq, dq_hash);
954 if (dq->dq_ump->um_quotas[dq->dq_type] != vp)
955 continue;
956 if (dq->dq_cnt)
957 panic("dqflush: stray dquot");
958 LIST_REMOVE(dq, dq_hash);
959 dq->dq_ump = NULL;
960 }
961 }
962 }
Cache object: 952d69b9d773597fee3c0ca3f6110a6b
|