1 /*-
2 * Copyright (c) 1982, 1986, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Robert Elz at The University of Melbourne.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include "opt_ffs.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
47 #include <sys/mutex.h>
48 #include <sys/namei.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/socket.h>
52 #include <sys/stat.h>
53 #include <sys/sysctl.h>
54 #include <sys/vnode.h>
55
56 #include <ufs/ufs/extattr.h>
57 #include <ufs/ufs/quota.h>
58 #include <ufs/ufs/inode.h>
59 #include <ufs/ufs/ufsmount.h>
60 #include <ufs/ufs/ufs_extern.h>
61
62 static int unprivileged_get_quota = 0;
63 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_get_quota, CTLFLAG_RW,
64 &unprivileged_get_quota, 0,
65 "Unprivileged processes may retrieve quotas for other uids and gids");
66
67 static MALLOC_DEFINE(M_DQUOT, "ufs_quota", "UFS quota entries");
68
69 /*
70 * Quota name to error message mapping.
71 */
72 static char *quotatypes[] = INITQFNAMES;
73
74 static int chkdqchg(struct inode *, ufs2_daddr_t, struct ucred *, int, int *);
75 static int chkiqchg(struct inode *, int, struct ucred *, int, int *);
76 static int dqget(struct vnode *,
77 u_long, struct ufsmount *, int, struct dquot **);
78 static int dqsync(struct vnode *, struct dquot *);
79 static void dqflush(struct vnode *);
80 static int quotaoff1(struct thread *td, struct mount *mp, int type);
81 static int quotaoff_inchange(struct thread *td, struct mount *mp, int type);
82
83 #ifdef DIAGNOSTIC
84 static void dqref(struct dquot *);
85 static void chkdquot(struct inode *);
86 #endif
87
88 /*
89 * Set up the quotas for an inode.
90 *
91 * This routine completely defines the semantics of quotas.
92 * If other criterion want to be used to establish quotas, the
93 * MAXQUOTAS value in quotas.h should be increased, and the
94 * additional dquots set up here.
95 */
96 int
97 getinoquota(ip)
98 struct inode *ip;
99 {
100 struct ufsmount *ump;
101 struct vnode *vp;
102 int error;
103
104 vp = ITOV(ip);
105
106 /*
107 * Disk quotas must be turned off for system files. Currently
108 * snapshot and quota files.
109 */
110 if ((vp->v_vflag & VV_SYSTEM) != 0)
111 return (0);
112 /*
113 * XXX: Turn off quotas for files with a negative UID or GID.
114 * This prevents the creation of 100GB+ quota files.
115 */
116 if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0)
117 return (0);
118 ump = VFSTOUFS(vp->v_mount);
119 /*
120 * Set up the user quota based on file uid.
121 * EINVAL means that quotas are not enabled.
122 */
123 if ((error =
124 dqget(vp, ip->i_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) &&
125 error != EINVAL)
126 return (error);
127 /*
128 * Set up the group quota based on file gid.
129 * EINVAL means that quotas are not enabled.
130 */
131 if ((error =
132 dqget(vp, ip->i_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) &&
133 error != EINVAL)
134 return (error);
135 return (0);
136 }
137
138 /*
139 * Update disk usage, and take corrective action.
140 */
141 int
142 chkdq(ip, change, cred, flags)
143 struct inode *ip;
144 ufs2_daddr_t change;
145 struct ucred *cred;
146 int flags;
147 {
148 struct dquot *dq;
149 ufs2_daddr_t ncurblocks;
150 struct vnode *vp = ITOV(ip);
151 int i, error, warn, do_check;
152
153 /*
154 * Disk quotas must be turned off for system files. Currently
155 * snapshot and quota files.
156 */
157 if ((vp->v_vflag & VV_SYSTEM) != 0)
158 return (0);
159 /*
160 * XXX: Turn off quotas for files with a negative UID or GID.
161 * This prevents the creation of 100GB+ quota files.
162 */
163 if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0)
164 return (0);
165 #ifdef DIAGNOSTIC
166 if ((flags & CHOWN) == 0)
167 chkdquot(ip);
168 #endif
169 if (change == 0)
170 return (0);
171 if (change < 0) {
172 for (i = 0; i < MAXQUOTAS; i++) {
173 if ((dq = ip->i_dquot[i]) == NODQUOT)
174 continue;
175 DQI_LOCK(dq);
176 DQI_WAIT(dq, PINOD+1, "chkdq1");
177 ncurblocks = dq->dq_curblocks + change;
178 if (ncurblocks >= 0)
179 dq->dq_curblocks = ncurblocks;
180 else
181 dq->dq_curblocks = 0;
182 dq->dq_flags &= ~DQ_BLKS;
183 dq->dq_flags |= DQ_MOD;
184 DQI_UNLOCK(dq);
185 }
186 return (0);
187 }
188 if ((flags & FORCE) == 0 &&
189 priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
190 do_check = 1;
191 else
192 do_check = 0;
193 for (i = 0; i < MAXQUOTAS; i++) {
194 if ((dq = ip->i_dquot[i]) == NODQUOT)
195 continue;
196 warn = 0;
197 DQI_LOCK(dq);
198 DQI_WAIT(dq, PINOD+1, "chkdq2");
199 if (do_check) {
200 error = chkdqchg(ip, change, cred, i, &warn);
201 if (error) {
202 /*
203 * Roll back user quota changes when
204 * group quota failed.
205 */
206 while (i > 0) {
207 --i;
208 dq = ip->i_dquot[i];
209 if (dq == NODQUOT)
210 continue;
211 DQI_LOCK(dq);
212 DQI_WAIT(dq, PINOD+1, "chkdq3");
213 ncurblocks = dq->dq_curblocks - change;
214 if (ncurblocks >= 0)
215 dq->dq_curblocks = ncurblocks;
216 else
217 dq->dq_curblocks = 0;
218 dq->dq_flags &= ~DQ_BLKS;
219 dq->dq_flags |= DQ_MOD;
220 DQI_UNLOCK(dq);
221 }
222 return (error);
223 }
224 }
225 /* Reset timer when crossing soft limit */
226 if (dq->dq_curblocks + change >= dq->dq_bsoftlimit &&
227 dq->dq_curblocks < dq->dq_bsoftlimit)
228 dq->dq_btime = time_second +
229 VFSTOUFS(ITOV(ip)->v_mount)->um_btime[i];
230 dq->dq_curblocks += change;
231 dq->dq_flags |= DQ_MOD;
232 DQI_UNLOCK(dq);
233 if (warn)
234 uprintf("\n%s: warning, %s %s\n",
235 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
236 quotatypes[i], "disk quota exceeded");
237 }
238 return (0);
239 }
240
241 /*
242 * Check for a valid change to a users allocation.
243 * Issue an error message if appropriate.
244 */
245 static int
246 chkdqchg(ip, change, cred, type, warn)
247 struct inode *ip;
248 ufs2_daddr_t change;
249 struct ucred *cred;
250 int type;
251 int *warn;
252 {
253 struct dquot *dq = ip->i_dquot[type];
254 ufs2_daddr_t ncurblocks = dq->dq_curblocks + change;
255
256 /*
257 * If user would exceed their hard limit, disallow space allocation.
258 */
259 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
260 if ((dq->dq_flags & DQ_BLKS) == 0 &&
261 ip->i_uid == cred->cr_uid) {
262 dq->dq_flags |= DQ_BLKS;
263 DQI_UNLOCK(dq);
264 uprintf("\n%s: write failed, %s disk limit reached\n",
265 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
266 quotatypes[type]);
267 return (EDQUOT);
268 }
269 DQI_UNLOCK(dq);
270 return (EDQUOT);
271 }
272 /*
273 * If user is over their soft limit for too long, disallow space
274 * allocation. Reset time limit as they cross their soft limit.
275 */
276 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
277 if (dq->dq_curblocks < dq->dq_bsoftlimit) {
278 dq->dq_btime = time_second +
279 VFSTOUFS(ITOV(ip)->v_mount)->um_btime[type];
280 if (ip->i_uid == cred->cr_uid)
281 *warn = 1;
282 return (0);
283 }
284 if (time_second > dq->dq_btime) {
285 if ((dq->dq_flags & DQ_BLKS) == 0 &&
286 ip->i_uid == cred->cr_uid) {
287 dq->dq_flags |= DQ_BLKS;
288 DQI_UNLOCK(dq);
289 uprintf("\n%s: write failed, %s %s\n",
290 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
291 quotatypes[type],
292 "disk quota exceeded for too long");
293 return (EDQUOT);
294 }
295 DQI_UNLOCK(dq);
296 return (EDQUOT);
297 }
298 }
299 return (0);
300 }
301
302 /*
303 * Check the inode limit, applying corrective action.
304 */
305 int
306 chkiq(ip, change, cred, flags)
307 struct inode *ip;
308 int change;
309 struct ucred *cred;
310 int flags;
311 {
312 struct dquot *dq;
313 ino_t ncurinodes;
314 int i, error, warn, do_check;
315
316 #ifdef DIAGNOSTIC
317 if ((flags & CHOWN) == 0)
318 chkdquot(ip);
319 #endif
320 if (change == 0)
321 return (0);
322 if (change < 0) {
323 for (i = 0; i < MAXQUOTAS; i++) {
324 if ((dq = ip->i_dquot[i]) == NODQUOT)
325 continue;
326 DQI_LOCK(dq);
327 DQI_WAIT(dq, PINOD+1, "chkiq1");
328 ncurinodes = dq->dq_curinodes + change;
329 /* XXX: ncurinodes is unsigned */
330 if (dq->dq_curinodes != 0 && ncurinodes >= 0)
331 dq->dq_curinodes = ncurinodes;
332 else
333 dq->dq_curinodes = 0;
334 dq->dq_flags &= ~DQ_INODS;
335 dq->dq_flags |= DQ_MOD;
336 DQI_UNLOCK(dq);
337 }
338 return (0);
339 }
340 if ((flags & FORCE) == 0 &&
341 priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
342 do_check = 1;
343 else
344 do_check = 0;
345 for (i = 0; i < MAXQUOTAS; i++) {
346 if ((dq = ip->i_dquot[i]) == NODQUOT)
347 continue;
348 warn = 0;
349 DQI_LOCK(dq);
350 DQI_WAIT(dq, PINOD+1, "chkiq2");
351 if (do_check) {
352 error = chkiqchg(ip, change, cred, i, &warn);
353 if (error) {
354 /*
355 * Roll back user quota changes when
356 * group quota failed.
357 */
358 while (i > 0) {
359 --i;
360 dq = ip->i_dquot[i];
361 if (dq == NODQUOT)
362 continue;
363 DQI_LOCK(dq);
364 DQI_WAIT(dq, PINOD+1, "chkiq3");
365 ncurinodes = dq->dq_curinodes - change;
366 /* XXX: ncurinodes is unsigned */
367 if (dq->dq_curinodes != 0 &&
368 ncurinodes >= 0)
369 dq->dq_curinodes = ncurinodes;
370 else
371 dq->dq_curinodes = 0;
372 dq->dq_flags &= ~DQ_INODS;
373 dq->dq_flags |= DQ_MOD;
374 DQI_UNLOCK(dq);
375 }
376 return (error);
377 }
378 }
379 /* Reset timer when crossing soft limit */
380 if (dq->dq_curinodes + change >= dq->dq_isoftlimit &&
381 dq->dq_curinodes < dq->dq_isoftlimit)
382 dq->dq_itime = time_second +
383 VFSTOUFS(ITOV(ip)->v_mount)->um_itime[i];
384 dq->dq_curinodes += change;
385 dq->dq_flags |= DQ_MOD;
386 DQI_UNLOCK(dq);
387 if (warn)
388 uprintf("\n%s: warning, %s %s\n",
389 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
390 quotatypes[i], "inode quota exceeded");
391 }
392 return (0);
393 }
394
395 /*
396 * Check for a valid change to a users allocation.
397 * Issue an error message if appropriate.
398 */
399 static int
400 chkiqchg(ip, change, cred, type, warn)
401 struct inode *ip;
402 int change;
403 struct ucred *cred;
404 int type;
405 int *warn;
406 {
407 struct dquot *dq = ip->i_dquot[type];
408 ino_t ncurinodes = dq->dq_curinodes + change;
409
410 /*
411 * If user would exceed their hard limit, disallow inode allocation.
412 */
413 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
414 if ((dq->dq_flags & DQ_INODS) == 0 &&
415 ip->i_uid == cred->cr_uid) {
416 dq->dq_flags |= DQ_INODS;
417 DQI_UNLOCK(dq);
418 uprintf("\n%s: write failed, %s inode limit reached\n",
419 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
420 quotatypes[type]);
421 return (EDQUOT);
422 }
423 DQI_UNLOCK(dq);
424 return (EDQUOT);
425 }
426 /*
427 * If user is over their soft limit for too long, disallow inode
428 * allocation. Reset time limit as they cross their soft limit.
429 */
430 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
431 if (dq->dq_curinodes < dq->dq_isoftlimit) {
432 dq->dq_itime = time_second +
433 VFSTOUFS(ITOV(ip)->v_mount)->um_itime[type];
434 if (ip->i_uid == cred->cr_uid)
435 *warn = 1;
436 return (0);
437 }
438 if (time_second > dq->dq_itime) {
439 if ((dq->dq_flags & DQ_INODS) == 0 &&
440 ip->i_uid == cred->cr_uid) {
441 dq->dq_flags |= DQ_INODS;
442 DQI_UNLOCK(dq);
443 uprintf("\n%s: write failed, %s %s\n",
444 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
445 quotatypes[type],
446 "inode quota exceeded for too long");
447 return (EDQUOT);
448 }
449 DQI_UNLOCK(dq);
450 return (EDQUOT);
451 }
452 }
453 return (0);
454 }
455
456 #ifdef DIAGNOSTIC
457 /*
458 * On filesystems with quotas enabled, it is an error for a file to change
459 * size and not to have a dquot structure associated with it.
460 */
461 static void
462 chkdquot(ip)
463 struct inode *ip;
464 {
465 struct ufsmount *ump = VFSTOUFS(ITOV(ip)->v_mount);
466 struct vnode *vp = ITOV(ip);
467 int i;
468
469 /*
470 * Disk quotas must be turned off for system files. Currently
471 * these are snapshots and quota files.
472 */
473 if ((vp->v_vflag & VV_SYSTEM) != 0)
474 return;
475 /*
476 * XXX: Turn off quotas for files with a negative UID or GID.
477 * This prevents the creation of 100GB+ quota files.
478 */
479 if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0)
480 return;
481
482 UFS_LOCK(ump);
483 for (i = 0; i < MAXQUOTAS; i++) {
484 if (ump->um_quotas[i] == NULLVP ||
485 (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
486 continue;
487 if (ip->i_dquot[i] == NODQUOT) {
488 UFS_UNLOCK(ump);
489 vprint("chkdquot: missing dquot", ITOV(ip));
490 panic("chkdquot: missing dquot");
491 }
492 }
493 UFS_UNLOCK(ump);
494 }
495 #endif
496
497 /*
498 * Code to process quotactl commands.
499 */
500
501 /*
502 * Q_QUOTAON - set up a quota file for a particular filesystem.
503 */
504 int
505 quotaon(td, mp, type, fname)
506 struct thread *td;
507 struct mount *mp;
508 int type;
509 void *fname;
510 {
511 struct ufsmount *ump;
512 struct vnode *vp, **vpp;
513 struct vnode *mvp;
514 struct dquot *dq;
515 int error, flags, vfslocked;
516 struct nameidata nd;
517
518 error = priv_check(td, PRIV_UFS_QUOTAON);
519 if (error)
520 return (error);
521
522 ump = VFSTOUFS(mp);
523 dq = NODQUOT;
524
525 NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_USERSPACE, fname, td);
526 flags = FREAD | FWRITE;
527 error = vn_open(&nd, &flags, 0, NULL);
528 if (error)
529 return (error);
530 vfslocked = NDHASGIANT(&nd);
531 NDFREE(&nd, NDF_ONLY_PNBUF);
532 vp = nd.ni_vp;
533 VOP_UNLOCK(vp, 0, td);
534 if (vp->v_type != VREG) {
535 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
536 VFS_UNLOCK_GIANT(vfslocked);
537 return (EACCES);
538 }
539
540 UFS_LOCK(ump);
541 if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) {
542 UFS_UNLOCK(ump);
543 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
544 VFS_UNLOCK_GIANT(vfslocked);
545 return (EALREADY);
546 }
547 ump->um_qflags[type] |= QTF_OPENING|QTF_CLOSING;
548 MNT_ILOCK(mp);
549 mp->mnt_flag |= MNT_QUOTA;
550 MNT_IUNLOCK(mp);
551 UFS_UNLOCK(ump);
552
553 vpp = &ump->um_quotas[type];
554 if (*vpp != vp)
555 quotaoff1(td, mp, type);
556
557 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
558 vp->v_vflag |= VV_SYSTEM;
559 VOP_UNLOCK(vp, 0, td);
560 *vpp = vp;
561 VFS_UNLOCK_GIANT(vfslocked);
562 /*
563 * Save the credential of the process that turned on quotas.
564 * Set up the time limits for this quota.
565 */
566 ump->um_cred[type] = crhold(td->td_ucred);
567 ump->um_btime[type] = MAX_DQ_TIME;
568 ump->um_itime[type] = MAX_IQ_TIME;
569 if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
570 if (dq->dq_btime > 0)
571 ump->um_btime[type] = dq->dq_btime;
572 if (dq->dq_itime > 0)
573 ump->um_itime[type] = dq->dq_itime;
574 dqrele(NULLVP, dq);
575 }
576 /*
577 * Allow the getdq from getinoquota below to read the quota
578 * from file.
579 */
580 UFS_LOCK(ump);
581 ump->um_qflags[type] &= ~QTF_CLOSING;
582 UFS_UNLOCK(ump);
583 /*
584 * Search vnodes associated with this mount point,
585 * adding references to quota file being opened.
586 * NB: only need to add dquot's for inodes being modified.
587 */
588 MNT_ILOCK(mp);
589 again:
590 MNT_VNODE_FOREACH(vp, mp, mvp) {
591 VI_LOCK(vp);
592 MNT_IUNLOCK(mp);
593 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
594 MNT_ILOCK(mp);
595 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
596 goto again;
597 }
598 if (vp->v_type == VNON || vp->v_writecount == 0) {
599 VOP_UNLOCK(vp, 0, td);
600 vrele(vp);
601 MNT_ILOCK(mp);
602 continue;
603 }
604 error = getinoquota(VTOI(vp));
605 VOP_UNLOCK(vp, 0, td);
606 vrele(vp);
607 MNT_ILOCK(mp);
608 if (error) {
609 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
610 break;
611 }
612 }
613 MNT_IUNLOCK(mp);
614
615 if (error)
616 quotaoff_inchange(td, mp, type);
617 UFS_LOCK(ump);
618 ump->um_qflags[type] &= ~QTF_OPENING;
619 KASSERT((ump->um_qflags[type] & QTF_CLOSING) == 0,
620 ("quotaon: leaking flags"));
621 UFS_UNLOCK(ump);
622
623 return (error);
624 }
625
626 /*
627 * Main code to turn off disk quotas for a filesystem. Does not change
628 * flags.
629 */
630 static int
631 quotaoff1(td, mp, type)
632 struct thread *td;
633 struct mount *mp;
634 int type;
635 {
636 struct vnode *vp;
637 struct vnode *qvp, *mvp;
638 struct ufsmount *ump;
639 struct dquot *dq;
640 struct inode *ip;
641 struct ucred *cr;
642 int vfslocked;
643 int error;
644
645 ump = VFSTOUFS(mp);
646
647 UFS_LOCK(ump);
648 KASSERT((ump->um_qflags[type] & QTF_CLOSING) != 0,
649 ("quotaoff1: flags are invalid"));
650 if ((qvp = ump->um_quotas[type]) == NULLVP) {
651 UFS_UNLOCK(ump);
652 return (0);
653 }
654 cr = ump->um_cred[type];
655 UFS_UNLOCK(ump);
656
657 /*
658 * Search vnodes associated with this mount point,
659 * deleting any references to quota file being closed.
660 */
661 MNT_ILOCK(mp);
662 again:
663 MNT_VNODE_FOREACH(vp, mp, mvp) {
664 VI_LOCK(vp);
665 MNT_IUNLOCK(mp);
666 if (vp->v_type == VNON) {
667 VI_UNLOCK(vp);
668 MNT_ILOCK(mp);
669 continue;
670 }
671 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
672 MNT_ILOCK(mp);
673 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
674 goto again;
675 }
676 ip = VTOI(vp);
677 dq = ip->i_dquot[type];
678 ip->i_dquot[type] = NODQUOT;
679 dqrele(vp, dq);
680 VOP_UNLOCK(vp, 0, td);
681 vrele(vp);
682 MNT_ILOCK(mp);
683 }
684 MNT_IUNLOCK(mp);
685
686 dqflush(qvp);
687 /* Clear um_quotas before closing the quota vnode to prevent
688 * access to the closed vnode from dqget/dqsync
689 */
690 UFS_LOCK(ump);
691 ump->um_quotas[type] = NULLVP;
692 ump->um_cred[type] = NOCRED;
693 UFS_UNLOCK(ump);
694
695 vfslocked = VFS_LOCK_GIANT(qvp->v_mount);
696 vn_lock(qvp, LK_EXCLUSIVE | LK_RETRY, td);
697 qvp->v_vflag &= ~VV_SYSTEM;
698 VOP_UNLOCK(qvp, 0, td);
699 error = vn_close(qvp, FREAD|FWRITE, td->td_ucred, td);
700 VFS_UNLOCK_GIANT(vfslocked);
701 crfree(cr);
702
703 return (error);
704 }
705
706 /*
707 * Turns off quotas, assumes that ump->um_qflags are already checked
708 * and QTF_CLOSING is set to indicate operation in progress. Fixes
709 * ump->um_qflags and mp->mnt_flag after.
710 */
711 int
712 quotaoff_inchange(td, mp, type)
713 struct thread *td;
714 struct mount *mp;
715 int type;
716 {
717 struct ufsmount *ump;
718 int i;
719 int error;
720
721 error = quotaoff1(td, mp, type);
722
723 ump = VFSTOUFS(mp);
724 UFS_LOCK(ump);
725 ump->um_qflags[type] &= ~QTF_CLOSING;
726 for (i = 0; i < MAXQUOTAS; i++)
727 if (ump->um_quotas[i] != NULLVP)
728 break;
729 if (i == MAXQUOTAS) {
730 MNT_ILOCK(mp);
731 mp->mnt_flag &= ~MNT_QUOTA;
732 MNT_IUNLOCK(mp);
733 }
734 UFS_UNLOCK(ump);
735 return (error);
736 }
737
738 /*
739 * Q_QUOTAOFF - turn off disk quotas for a filesystem.
740 */
741 int
742 quotaoff(td, mp, type)
743 struct thread *td;
744 struct mount *mp;
745 int type;
746 {
747 struct ufsmount *ump;
748 int error;
749
750 error = priv_check(td, PRIV_UFS_QUOTAOFF);
751 if (error)
752 return (error);
753
754 ump = VFSTOUFS(mp);
755 UFS_LOCK(ump);
756 if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) {
757 UFS_UNLOCK(ump);
758 return (EALREADY);
759 }
760 ump->um_qflags[type] |= QTF_CLOSING;
761 UFS_UNLOCK(ump);
762
763 return (quotaoff_inchange(td, mp, type));
764 }
765
766 /*
767 * Q_GETQUOTA - return current values in a dqblk structure.
768 */
769 int
770 getquota(td, mp, id, type, addr)
771 struct thread *td;
772 struct mount *mp;
773 u_long id;
774 int type;
775 void *addr;
776 {
777 struct dquot *dq;
778 int error;
779
780 switch (type) {
781 case USRQUOTA:
782 if ((td->td_ucred->cr_uid != id) && !unprivileged_get_quota) {
783 error = priv_check(td, PRIV_VFS_GETQUOTA);
784 if (error)
785 return (error);
786 }
787 break;
788
789 case GRPQUOTA:
790 if (!groupmember(id, td->td_ucred) &&
791 !unprivileged_get_quota) {
792 error = priv_check(td, PRIV_VFS_GETQUOTA);
793 if (error)
794 return (error);
795 }
796 break;
797
798 default:
799 return (EINVAL);
800 }
801
802 dq = NODQUOT;
803 error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq);
804 if (error)
805 return (error);
806 error = copyout(&dq->dq_dqb, addr, sizeof (struct dqblk));
807 dqrele(NULLVP, dq);
808 return (error);
809 }
810
811 /*
812 * Q_SETQUOTA - assign an entire dqblk structure.
813 */
814 int
815 setquota(td, mp, id, type, addr)
816 struct thread *td;
817 struct mount *mp;
818 u_long id;
819 int type;
820 void *addr;
821 {
822 struct dquot *dq;
823 struct dquot *ndq;
824 struct ufsmount *ump;
825 struct dqblk newlim;
826 int error;
827
828 error = priv_check(td, PRIV_VFS_SETQUOTA);
829 if (error)
830 return (error);
831
832 ump = VFSTOUFS(mp);
833 error = copyin(addr, &newlim, sizeof (struct dqblk));
834 if (error)
835 return (error);
836
837 ndq = NODQUOT;
838 ump = VFSTOUFS(mp);
839
840 error = dqget(NULLVP, id, ump, type, &ndq);
841 if (error)
842 return (error);
843 dq = ndq;
844 DQI_LOCK(dq);
845 DQI_WAIT(dq, PINOD+1, "setqta");
846 /*
847 * Copy all but the current values.
848 * Reset time limit if previously had no soft limit or were
849 * under it, but now have a soft limit and are over it.
850 */
851 newlim.dqb_curblocks = dq->dq_curblocks;
852 newlim.dqb_curinodes = dq->dq_curinodes;
853 if (dq->dq_id != 0) {
854 newlim.dqb_btime = dq->dq_btime;
855 newlim.dqb_itime = dq->dq_itime;
856 }
857 if (newlim.dqb_bsoftlimit &&
858 dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
859 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
860 newlim.dqb_btime = time_second + ump->um_btime[type];
861 if (newlim.dqb_isoftlimit &&
862 dq->dq_curinodes >= newlim.dqb_isoftlimit &&
863 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
864 newlim.dqb_itime = time_second + ump->um_itime[type];
865 dq->dq_dqb = newlim;
866 if (dq->dq_curblocks < dq->dq_bsoftlimit)
867 dq->dq_flags &= ~DQ_BLKS;
868 if (dq->dq_curinodes < dq->dq_isoftlimit)
869 dq->dq_flags &= ~DQ_INODS;
870 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
871 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
872 dq->dq_flags |= DQ_FAKE;
873 else
874 dq->dq_flags &= ~DQ_FAKE;
875 dq->dq_flags |= DQ_MOD;
876 DQI_UNLOCK(dq);
877 dqrele(NULLVP, dq);
878 return (0);
879 }
880
881 /*
882 * Q_SETUSE - set current inode and block usage.
883 */
884 int
885 setuse(td, mp, id, type, addr)
886 struct thread *td;
887 struct mount *mp;
888 u_long id;
889 int type;
890 void *addr;
891 {
892 struct dquot *dq;
893 struct ufsmount *ump;
894 struct dquot *ndq;
895 struct dqblk usage;
896 int error;
897
898 error = priv_check(td, PRIV_UFS_SETUSE);
899 if (error)
900 return (error);
901
902 ump = VFSTOUFS(mp);
903 error = copyin(addr, &usage, sizeof (struct dqblk));
904 if (error)
905 return (error);
906
907 ump = VFSTOUFS(mp);
908 ndq = NODQUOT;
909
910 error = dqget(NULLVP, id, ump, type, &ndq);
911 if (error)
912 return (error);
913 dq = ndq;
914 DQI_LOCK(dq);
915 DQI_WAIT(dq, PINOD+1, "setuse");
916 /*
917 * Reset time limit if have a soft limit and were
918 * previously under it, but are now over it.
919 */
920 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
921 usage.dqb_curblocks >= dq->dq_bsoftlimit)
922 dq->dq_btime = time_second + ump->um_btime[type];
923 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
924 usage.dqb_curinodes >= dq->dq_isoftlimit)
925 dq->dq_itime = time_second + ump->um_itime[type];
926 dq->dq_curblocks = usage.dqb_curblocks;
927 dq->dq_curinodes = usage.dqb_curinodes;
928 if (dq->dq_curblocks < dq->dq_bsoftlimit)
929 dq->dq_flags &= ~DQ_BLKS;
930 if (dq->dq_curinodes < dq->dq_isoftlimit)
931 dq->dq_flags &= ~DQ_INODS;
932 dq->dq_flags |= DQ_MOD;
933 DQI_UNLOCK(dq);
934 dqrele(NULLVP, dq);
935 return (0);
936 }
937
938 /*
939 * Q_SYNC - sync quota files to disk.
940 */
941 int
942 qsync(mp)
943 struct mount *mp;
944 {
945 struct ufsmount *ump = VFSTOUFS(mp);
946 struct thread *td = curthread; /* XXX */
947 struct vnode *vp, *mvp;
948 struct dquot *dq;
949 int i, error;
950
951 /*
952 * Check if the mount point has any quotas.
953 * If not, simply return.
954 */
955 UFS_LOCK(ump);
956 for (i = 0; i < MAXQUOTAS; i++)
957 if (ump->um_quotas[i] != NULLVP)
958 break;
959 UFS_UNLOCK(ump);
960 if (i == MAXQUOTAS)
961 return (0);
962 /*
963 * Search vnodes associated with this mount point,
964 * synchronizing any modified dquot structures.
965 */
966 MNT_ILOCK(mp);
967 again:
968 MNT_VNODE_FOREACH(vp, mp, mvp) {
969 VI_LOCK(vp);
970 MNT_IUNLOCK(mp);
971 if (vp->v_type == VNON) {
972 VI_UNLOCK(vp);
973 MNT_ILOCK(mp);
974 continue;
975 }
976 error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td);
977 if (error) {
978 MNT_ILOCK(mp);
979 if (error == ENOENT) {
980 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
981 goto again;
982 }
983 continue;
984 }
985 for (i = 0; i < MAXQUOTAS; i++) {
986 dq = VTOI(vp)->i_dquot[i];
987 if (dq != NODQUOT)
988 dqsync(vp, dq);
989 }
990 vput(vp);
991 MNT_ILOCK(mp);
992 }
993 MNT_IUNLOCK(mp);
994 return (0);
995 }
996
997 /*
998 * Code pertaining to management of the in-core dquot data structures.
999 */
1000 #define DQHASH(dqvp, id) \
1001 (&dqhashtbl[((((intptr_t)(dqvp)) >> 8) + id) & dqhash])
1002 static LIST_HEAD(dqhash, dquot) *dqhashtbl;
1003 static u_long dqhash;
1004
1005 /*
1006 * Dquot free list.
1007 */
1008 #define DQUOTINC 5 /* minimum free dquots desired */
1009 static TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
1010 static long numdquot, desireddquot = DQUOTINC;
1011
1012 /*
1013 * Lock to protect quota hash, dq free list and dq_cnt ref counters of
1014 * _all_ dqs.
1015 */
1016 struct mtx dqhlock;
1017
1018 #define DQH_LOCK() mtx_lock(&dqhlock)
1019 #define DQH_UNLOCK() mtx_unlock(&dqhlock)
1020
1021 static struct dquot *dqhashfind(struct dqhash *dqh, u_long id,
1022 struct vnode *dqvp);
1023
1024 /*
1025 * Initialize the quota system.
1026 */
1027 void
1028 dqinit()
1029 {
1030
1031 mtx_init(&dqhlock, "dqhlock", NULL, MTX_DEF);
1032 dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
1033 TAILQ_INIT(&dqfreelist);
1034 }
1035
1036 /*
1037 * Shut down the quota system.
1038 */
1039 void
1040 dquninit()
1041 {
1042 struct dquot *dq;
1043
1044 hashdestroy(dqhashtbl, M_DQUOT, dqhash);
1045 while ((dq = TAILQ_FIRST(&dqfreelist)) != NULL) {
1046 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
1047 mtx_destroy(&dq->dq_lock);
1048 free(dq, M_DQUOT);
1049 }
1050 mtx_destroy(&dqhlock);
1051 }
1052
1053 static struct dquot *
1054 dqhashfind(dqh, id, dqvp)
1055 struct dqhash *dqh;
1056 u_long id;
1057 struct vnode *dqvp;
1058 {
1059 struct dquot *dq;
1060
1061 mtx_assert(&dqhlock, MA_OWNED);
1062 LIST_FOREACH(dq, dqh, dq_hash) {
1063 if (dq->dq_id != id ||
1064 dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
1065 continue;
1066 /*
1067 * Cache hit with no references. Take
1068 * the structure off the free list.
1069 */
1070 if (dq->dq_cnt == 0)
1071 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
1072 DQREF(dq);
1073 return (dq);
1074 }
1075 return (NODQUOT);
1076 }
1077
1078 /*
1079 * Obtain a dquot structure for the specified identifier and quota file
1080 * reading the information from the file if necessary.
1081 */
1082 static int
1083 dqget(vp, id, ump, type, dqp)
1084 struct vnode *vp;
1085 u_long id;
1086 struct ufsmount *ump;
1087 int type;
1088 struct dquot **dqp;
1089 {
1090 struct thread *td = curthread; /* XXX */
1091 struct dquot *dq, *dq1;
1092 struct dqhash *dqh;
1093 struct vnode *dqvp;
1094 struct iovec aiov;
1095 struct uio auio;
1096 int vfslocked, dqvplocked, error;
1097
1098 #ifdef DEBUG_VFS_LOCKS
1099 if (vp != NULLVP)
1100 ASSERT_VOP_ELOCKED(vp, "dqget");
1101 #endif
1102
1103 if (vp != NULLVP && *dqp != NODQUOT) {
1104 return (0);
1105 }
1106
1107 /* XXX: Disallow negative id values to prevent the
1108 * creation of 100GB+ quota data files.
1109 */
1110 if ((int)id < 0)
1111 return (EINVAL);
1112
1113 UFS_LOCK(ump);
1114 dqvp = ump->um_quotas[type];
1115 if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
1116 *dqp = NODQUOT;
1117 UFS_UNLOCK(ump);
1118 return (EINVAL);
1119 }
1120 vref(dqvp);
1121 UFS_UNLOCK(ump);
1122 error = 0;
1123 dqvplocked = 0;
1124
1125 /*
1126 * Check the cache first.
1127 */
1128 dqh = DQHASH(dqvp, id);
1129 DQH_LOCK();
1130 dq = dqhashfind(dqh, id, dqvp);
1131 if (dq != NULL) {
1132 DQH_UNLOCK();
1133 hfound: DQI_LOCK(dq);
1134 DQI_WAIT(dq, PINOD+1, "dqget");
1135 DQI_UNLOCK(dq);
1136 if (dq->dq_ump == NULL) {
1137 dqrele(vp, dq);
1138 dq = NODQUOT;
1139 error = EIO;
1140 }
1141 *dqp = dq;
1142 vfslocked = VFS_LOCK_GIANT(dqvp->v_mount);
1143 if (dqvplocked)
1144 vput(dqvp);
1145 else
1146 vrele(dqvp);
1147 VFS_UNLOCK_GIANT(vfslocked);
1148 return (error);
1149 }
1150
1151 /*
1152 * Quota vnode lock is before DQ_LOCK. Acquire dqvp lock there
1153 * since new dq will appear on the hash chain DQ_LOCKed.
1154 */
1155 if (vp != dqvp) {
1156 DQH_UNLOCK();
1157 vn_lock(dqvp, LK_SHARED | LK_RETRY, td);
1158 dqvplocked = 1;
1159 DQH_LOCK();
1160 /*
1161 * Recheck the cache after sleep for quota vnode lock.
1162 */
1163 dq = dqhashfind(dqh, id, dqvp);
1164 if (dq != NULL) {
1165 DQH_UNLOCK();
1166 goto hfound;
1167 }
1168 }
1169
1170 /*
1171 * Not in cache, allocate a new one or take it from the
1172 * free list.
1173 */
1174 if (TAILQ_FIRST(&dqfreelist) == NODQUOT &&
1175 numdquot < MAXQUOTAS * desiredvnodes)
1176 desireddquot += DQUOTINC;
1177 if (numdquot < desireddquot) {
1178 numdquot++;
1179 DQH_UNLOCK();
1180 dq1 = (struct dquot *)malloc(sizeof *dq, M_DQUOT,
1181 M_WAITOK | M_ZERO);
1182 mtx_init(&dq1->dq_lock, "dqlock", NULL, MTX_DEF);
1183 DQH_LOCK();
1184 /*
1185 * Recheck the cache after sleep for memory.
1186 */
1187 dq = dqhashfind(dqh, id, dqvp);
1188 if (dq != NULL) {
1189 numdquot--;
1190 DQH_UNLOCK();
1191 mtx_destroy(&dq1->dq_lock);
1192 free(dq1, M_DQUOT);
1193 goto hfound;
1194 }
1195 dq = dq1;
1196 } else {
1197 if ((dq = TAILQ_FIRST(&dqfreelist)) == NULL) {
1198 DQH_UNLOCK();
1199 tablefull("dquot");
1200 *dqp = NODQUOT;
1201 vfslocked = VFS_LOCK_GIANT(dqvp->v_mount);
1202 if (dqvplocked)
1203 vput(dqvp);
1204 else
1205 vrele(dqvp);
1206 VFS_UNLOCK_GIANT(vfslocked);
1207 return (EUSERS);
1208 }
1209 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
1210 panic("dqget: free dquot isn't %p", dq);
1211 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
1212 if (dq->dq_ump != NULL)
1213 LIST_REMOVE(dq, dq_hash);
1214 }
1215
1216 /*
1217 * Dq is put into hash already locked to prevent parallel
1218 * usage while it is being read from file.
1219 */
1220 dq->dq_flags = DQ_LOCK;
1221 dq->dq_id = id;
1222 dq->dq_type = type;
1223 dq->dq_ump = ump;
1224 LIST_INSERT_HEAD(dqh, dq, dq_hash);
1225 DQREF(dq);
1226 DQH_UNLOCK();
1227
1228 auio.uio_iov = &aiov;
1229 auio.uio_iovcnt = 1;
1230 aiov.iov_base = &dq->dq_dqb;
1231 aiov.iov_len = sizeof (struct dqblk);
1232 auio.uio_resid = sizeof (struct dqblk);
1233 auio.uio_offset = (off_t)id * sizeof (struct dqblk);
1234 auio.uio_segflg = UIO_SYSSPACE;
1235 auio.uio_rw = UIO_READ;
1236 auio.uio_td = (struct thread *)0;
1237
1238 vfslocked = VFS_LOCK_GIANT(dqvp->v_mount);
1239 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
1240 if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
1241 bzero(&dq->dq_dqb, sizeof(struct dqblk));
1242 if (dqvplocked)
1243 vput(dqvp);
1244 else
1245 vrele(dqvp);
1246 VFS_UNLOCK_GIANT(vfslocked);
1247 /*
1248 * I/O error in reading quota file, release
1249 * quota structure and reflect problem to caller.
1250 */
1251 if (error) {
1252 DQH_LOCK();
1253 dq->dq_ump = NULL;
1254 LIST_REMOVE(dq, dq_hash);
1255 DQH_UNLOCK();
1256 DQI_LOCK(dq);
1257 if (dq->dq_flags & DQ_WANT)
1258 wakeup(dq);
1259 dq->dq_flags = 0;
1260 DQI_UNLOCK(dq);
1261 dqrele(vp, dq);
1262 *dqp = NODQUOT;
1263 return (error);
1264 }
1265 DQI_LOCK(dq);
1266 /*
1267 * Check for no limit to enforce.
1268 * Initialize time values if necessary.
1269 */
1270 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
1271 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
1272 dq->dq_flags |= DQ_FAKE;
1273 if (dq->dq_id != 0) {
1274 if (dq->dq_btime == 0) {
1275 dq->dq_btime = time_second + ump->um_btime[type];
1276 if (dq->dq_bsoftlimit &&
1277 dq->dq_curblocks >= dq->dq_bsoftlimit)
1278 dq->dq_flags |= DQ_MOD;
1279 }
1280 if (dq->dq_itime == 0) {
1281 dq->dq_itime = time_second + ump->um_itime[type];
1282 if (dq->dq_isoftlimit &&
1283 dq->dq_curinodes >= dq->dq_isoftlimit)
1284 dq->dq_flags |= DQ_MOD;
1285 }
1286 }
1287 DQI_WAKEUP(dq);
1288 DQI_UNLOCK(dq);
1289 *dqp = dq;
1290 return (0);
1291 }
1292
1293 #ifdef DIAGNOSTIC
1294 /*
1295 * Obtain a reference to a dquot.
1296 */
1297 static void
1298 dqref(dq)
1299 struct dquot *dq;
1300 {
1301
1302 dq->dq_cnt++;
1303 }
1304 #endif
1305
1306 /*
1307 * Release a reference to a dquot.
1308 */
1309 void
1310 dqrele(vp, dq)
1311 struct vnode *vp;
1312 struct dquot *dq;
1313 {
1314
1315 if (dq == NODQUOT)
1316 return;
1317 DQH_LOCK();
1318 if (dq->dq_cnt > 1) {
1319 dq->dq_cnt--;
1320 DQH_UNLOCK();
1321 return;
1322 }
1323 DQH_UNLOCK();
1324 sync:
1325 (void) dqsync(vp, dq);
1326
1327 DQH_LOCK();
1328 if (--dq->dq_cnt > 0)
1329 {
1330 DQH_UNLOCK();
1331 return;
1332 }
1333
1334 /*
1335 * The dq may become dirty after it is synced but before it is
1336 * put to the free list. Checking the DQ_MOD there without
1337 * locking dq should be safe since no other references to the
1338 * dq exist.
1339 */
1340 if ((dq->dq_flags & DQ_MOD) != 0) {
1341 dq->dq_cnt++;
1342 DQH_UNLOCK();
1343 goto sync;
1344 }
1345 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
1346 DQH_UNLOCK();
1347 }
1348
1349 /*
1350 * Update the disk quota in the quota file.
1351 */
1352 static int
1353 dqsync(vp, dq)
1354 struct vnode *vp;
1355 struct dquot *dq;
1356 {
1357 struct thread *td = curthread; /* XXX */
1358 struct vnode *dqvp;
1359 struct iovec aiov;
1360 struct uio auio;
1361 int vfslocked, error;
1362 struct mount *mp;
1363 struct ufsmount *ump;
1364
1365 #ifdef DEBUG_VFS_LOCKS
1366 if (vp != NULL)
1367 ASSERT_VOP_ELOCKED(vp, "dqsync");
1368 #endif
1369
1370 mp = NULL;
1371 error = 0;
1372 if (dq == NODQUOT)
1373 panic("dqsync: dquot");
1374 if ((ump = dq->dq_ump) == NULL)
1375 return (0);
1376 UFS_LOCK(ump);
1377 if ((dqvp = ump->um_quotas[dq->dq_type]) == NULLVP)
1378 panic("dqsync: file");
1379 vref(dqvp);
1380 UFS_UNLOCK(ump);
1381
1382 vfslocked = VFS_LOCK_GIANT(dqvp->v_mount);
1383 DQI_LOCK(dq);
1384 if ((dq->dq_flags & DQ_MOD) == 0) {
1385 DQI_UNLOCK(dq);
1386 vrele(dqvp);
1387 VFS_UNLOCK_GIANT(vfslocked);
1388 return (0);
1389 }
1390 DQI_UNLOCK(dq);
1391
1392 (void) vn_start_secondary_write(dqvp, &mp, V_WAIT);
1393 if (vp != dqvp)
1394 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY, td);
1395
1396 VFS_UNLOCK_GIANT(vfslocked);
1397 DQI_LOCK(dq);
1398 DQI_WAIT(dq, PINOD+2, "dqsync");
1399 if ((dq->dq_flags & DQ_MOD) == 0)
1400 goto out;
1401 dq->dq_flags |= DQ_LOCK;
1402 DQI_UNLOCK(dq);
1403
1404 auio.uio_iov = &aiov;
1405 auio.uio_iovcnt = 1;
1406 aiov.iov_base = &dq->dq_dqb;
1407 aiov.iov_len = sizeof (struct dqblk);
1408 auio.uio_resid = sizeof (struct dqblk);
1409 auio.uio_offset = (off_t)dq->dq_id * sizeof (struct dqblk);
1410 auio.uio_segflg = UIO_SYSSPACE;
1411 auio.uio_rw = UIO_WRITE;
1412 auio.uio_td = (struct thread *)0;
1413 vfslocked = VFS_LOCK_GIANT(dqvp->v_mount);
1414 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
1415 VFS_UNLOCK_GIANT(vfslocked);
1416 if (auio.uio_resid && error == 0)
1417 error = EIO;
1418
1419 DQI_LOCK(dq);
1420 DQI_WAKEUP(dq);
1421 dq->dq_flags &= ~DQ_MOD;
1422 out: DQI_UNLOCK(dq);
1423 vfslocked = VFS_LOCK_GIANT(dqvp->v_mount);
1424 if (vp != dqvp)
1425 vput(dqvp);
1426 else
1427 vrele(dqvp);
1428 vn_finished_secondary_write(mp);
1429 VFS_UNLOCK_GIANT(vfslocked);
1430 return (error);
1431 }
1432
1433 /*
1434 * Flush all entries from the cache for a particular vnode.
1435 */
1436 static void
1437 dqflush(vp)
1438 struct vnode *vp;
1439 {
1440 struct dquot *dq, *nextdq;
1441 struct dqhash *dqh;
1442
1443 /*
1444 * Move all dquot's that used to refer to this quota
1445 * file off their hash chains (they will eventually
1446 * fall off the head of the free list and be re-used).
1447 */
1448 DQH_LOCK();
1449 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
1450 for (dq = LIST_FIRST(dqh); dq; dq = nextdq) {
1451 nextdq = LIST_NEXT(dq, dq_hash);
1452 if (dq->dq_ump->um_quotas[dq->dq_type] != vp)
1453 continue;
1454 if (dq->dq_cnt)
1455 panic("dqflush: stray dquot");
1456 LIST_REMOVE(dq, dq_hash);
1457 dq->dq_ump = (struct ufsmount *)0;
1458 }
1459 }
1460 DQH_UNLOCK();
1461 }
Cache object: 9452bce49e762b445fcef126a2180e9c
|