FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_subr.c
1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
39 * $FreeBSD: releng/5.0/sys/kern/vfs_subr.c 108561 2003-01-02 19:56:45Z phk $
40 */
41
42 /*
43 * External virtual filesystem routines
44 */
45 #include "opt_ddb.h"
46 #include "opt_mac.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/conf.h>
53 #include <sys/eventhandler.h>
54 #include <sys/extattr.h>
55 #include <sys/fcntl.h>
56 #include <sys/kernel.h>
57 #include <sys/kthread.h>
58 #include <sys/mac.h>
59 #include <sys/malloc.h>
60 #include <sys/mount.h>
61 #include <sys/namei.h>
62 #include <sys/stat.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
65 #include <sys/vmmeter.h>
66 #include <sys/vnode.h>
67
68 #include <vm/vm.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_extern.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_page.h>
74 #include <vm/uma.h>
75
76 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
77
78 static void addalias(struct vnode *vp, dev_t nvp_rdev);
79 static void insmntque(struct vnode *vp, struct mount *mp);
80 static void vclean(struct vnode *vp, int flags, struct thread *td);
81 static void vlruvp(struct vnode *vp);
82 static int flushbuflist(struct buf *blist, int flags, struct vnode *vp,
83 int slpflag, int slptimeo, int *errorp);
84 static int vcanrecycle(struct vnode *vp, struct mount **vnmpp);
85
86
87 /*
88 * Number of vnodes in existence. Increased whenever getnewvnode()
89 * allocates a new vnode, never decreased.
90 */
91 static unsigned long numvnodes;
92
93 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
94
95 /*
96 * Conversion tables for conversion from vnode types to inode formats
97 * and back.
98 */
99 enum vtype iftovt_tab[16] = {
100 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
101 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
102 };
103 int vttoif_tab[9] = {
104 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
105 S_IFSOCK, S_IFIFO, S_IFMT,
106 };
107
108 /*
109 * List of vnodes that are ready for recycling.
110 */
111 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
112
113 /*
114 * Minimum number of free vnodes. If there are fewer than this free vnodes,
115 * getnewvnode() will return a newly allocated vnode.
116 */
117 static u_long wantfreevnodes = 25;
118 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
119 /* Number of vnodes in the free list. */
120 static u_long freevnodes;
121 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
122
123 /*
124 * Various variables used for debugging the new implementation of
125 * reassignbuf().
126 * XXX these are probably of (very) limited utility now.
127 */
128 static int reassignbufcalls;
129 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
130 static int nameileafonly;
131 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
132
133 #ifdef ENABLE_VFS_IOOPT
134 /* See NOTES for a description of this setting. */
135 int vfs_ioopt;
136 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
137 #endif
138
139 /*
140 * Cache for the mount type id assigned to NFS. This is used for
141 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
142 */
143 int nfs_mount_type = -1;
144
145 /* To keep more than one thread at a time from running vfs_getnewfsid */
146 static struct mtx mntid_mtx;
147
148 /*
149 * Lock for any access to the following:
150 * vnode_free_list
151 * numvnodes
152 * freevnodes
153 */
154 static struct mtx vnode_free_list_mtx;
155
156 /*
157 * For any iteration/modification of dev->si_hlist (linked through
158 * v_specnext)
159 */
160 static struct mtx spechash_mtx;
161
162 /* Publicly exported FS */
163 struct nfs_public nfs_pub;
164
165 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
166 static uma_zone_t vnode_zone;
167 static uma_zone_t vnodepoll_zone;
168
169 /* Set to 1 to print out reclaim of active vnodes */
170 int prtactive;
171
172 /*
173 * The workitem queue.
174 *
175 * It is useful to delay writes of file data and filesystem metadata
176 * for tens of seconds so that quickly created and deleted files need
177 * not waste disk bandwidth being created and removed. To realize this,
178 * we append vnodes to a "workitem" queue. When running with a soft
179 * updates implementation, most pending metadata dependencies should
180 * not wait for more than a few seconds. Thus, mounted on block devices
181 * are delayed only about a half the time that file data is delayed.
182 * Similarly, directory updates are more critical, so are only delayed
183 * about a third the time that file data is delayed. Thus, there are
184 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
185 * one each second (driven off the filesystem syncer process). The
186 * syncer_delayno variable indicates the next queue that is to be processed.
187 * Items that need to be processed soon are placed in this queue:
188 *
189 * syncer_workitem_pending[syncer_delayno]
190 *
191 * A delay of fifteen seconds is done by placing the request fifteen
192 * entries later in the queue:
193 *
194 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
195 *
196 */
197 static int syncer_delayno;
198 static long syncer_mask;
199 LIST_HEAD(synclist, vnode);
200 static struct synclist *syncer_workitem_pending;
201 /*
202 * The sync_mtx protects:
203 * vp->v_synclist
204 * syncer_delayno
205 * syncer_workitem_pending
206 * rushjob
207 */
208 static struct mtx sync_mtx;
209
210 #define SYNCER_MAXDELAY 32
211 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
212 static int syncdelay = 30; /* max time to delay syncing data */
213 static int filedelay = 30; /* time to delay syncing files */
214 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
215 static int dirdelay = 29; /* time to delay syncing directories */
216 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
217 static int metadelay = 28; /* time to delay syncing metadata */
218 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
219 static int rushjob; /* number of slots to run ASAP */
220 static int stat_rush_requests; /* number of times I/O speeded up */
221 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
222
223 /*
224 * Number of vnodes we want to exist at any one time. This is mostly used
225 * to size hash tables in vnode-related code. It is normally not used in
226 * getnewvnode(), as wantfreevnodes is normally nonzero.)
227 *
228 * XXX desiredvnodes is historical cruft and should not exist.
229 */
230 int desiredvnodes;
231 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
232 &desiredvnodes, 0, "Maximum number of vnodes");
233 static int minvnodes;
234 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
235 &minvnodes, 0, "Minimum number of vnodes");
236 static int vnlru_nowhere;
237 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0,
238 "Number of times the vnlru process ran without success");
239
240 /* Hook for calling soft updates */
241 int (*softdep_process_worklist_hook)(struct mount *);
242
243 /*
244 * This only exists to supress warnings from unlocked specfs accesses. It is
245 * no longer ok to have an unlocked VFS.
246 */
247 #define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
248
249 /* Print lock violations */
250 int vfs_badlock_print = 1;
251
252 /* Panic on violation */
253 int vfs_badlock_panic = 1;
254
255 /* Check for interlock across VOPs */
256 int vfs_badlock_mutex = 1;
257
258 static void
259 vfs_badlock(char *msg, char *str, struct vnode *vp)
260 {
261 if (vfs_badlock_print)
262 printf("%s: %p %s\n", str, vp, msg);
263 if (vfs_badlock_panic)
264 Debugger("Lock violation.\n");
265 }
266
267 void
268 assert_vi_unlocked(struct vnode *vp, char *str)
269 {
270 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
271 vfs_badlock("interlock is locked but should not be", str, vp);
272 }
273
274 void
275 assert_vi_locked(struct vnode *vp, char *str)
276 {
277 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
278 vfs_badlock("interlock is not locked but should be", str, vp);
279 }
280
281 void
282 assert_vop_locked(struct vnode *vp, char *str)
283 {
284 if (vp && !IGNORE_LOCK(vp) && !VOP_ISLOCKED(vp, NULL))
285 vfs_badlock("is not locked but should be", str, vp);
286 }
287
288 void
289 assert_vop_unlocked(struct vnode *vp, char *str)
290 {
291 if (vp && !IGNORE_LOCK(vp) &&
292 VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
293 vfs_badlock("is locked but should not be", str, vp);
294 }
295
296 void
297 assert_vop_elocked(struct vnode *vp, char *str)
298 {
299 if (vp && !IGNORE_LOCK(vp) &&
300 VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
301 vfs_badlock("is not exclusive locked but should be", str, vp);
302 }
303
304 void
305 assert_vop_elocked_other(struct vnode *vp, char *str)
306 {
307 if (vp && !IGNORE_LOCK(vp) &&
308 VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
309 vfs_badlock("is not exclusive locked by another thread",
310 str, vp);
311 }
312
313 void
314 assert_vop_slocked(struct vnode *vp, char *str)
315 {
316 if (vp && !IGNORE_LOCK(vp) &&
317 VOP_ISLOCKED(vp, curthread) != LK_SHARED)
318 vfs_badlock("is not locked shared but should be", str, vp);
319 }
320
321 void
322 vop_rename_pre(void *ap)
323 {
324 struct vop_rename_args *a = ap;
325
326 if (a->a_tvp)
327 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
328 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
329 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
330 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
331
332 /* Check the source (from) */
333 if (a->a_tdvp != a->a_fdvp)
334 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n");
335 if (a->a_tvp != a->a_fvp)
336 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked.\n");
337
338 /* Check the target */
339 if (a->a_tvp)
340 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked.\n");
341
342 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked.\n");
343 }
344
345 void
346 vop_strategy_pre(void *ap)
347 {
348 struct vop_strategy_args *a = ap;
349 struct buf *bp;
350
351 bp = a->a_bp;
352
353 /*
354 * Cluster ops lock their component buffers but not the IO container.
355 */
356 if ((bp->b_flags & B_CLUSTER) != 0)
357 return;
358
359 if (BUF_REFCNT(bp) < 1) {
360 if (vfs_badlock_print)
361 printf("VOP_STRATEGY: bp is not locked but should be.\n");
362 if (vfs_badlock_panic)
363 Debugger("Lock violation.\n");
364 }
365 }
366
367 void
368 vop_lookup_pre(void *ap)
369 {
370 struct vop_lookup_args *a = ap;
371 struct vnode *dvp;
372
373 dvp = a->a_dvp;
374
375 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
376 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
377 }
378
379 void
380 vop_lookup_post(void *ap, int rc)
381 {
382 struct vop_lookup_args *a = ap;
383 struct componentname *cnp;
384 struct vnode *dvp;
385 struct vnode *vp;
386 int flags;
387
388 dvp = a->a_dvp;
389 cnp = a->a_cnp;
390 vp = *(a->a_vpp);
391 flags = cnp->cn_flags;
392
393
394 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
395 /*
396 * If this is the last path component for this lookup and LOCPARENT
397 * is set, OR if there is an error the directory has to be locked.
398 */
399 if ((flags & LOCKPARENT) && (flags & ISLASTCN))
400 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
401 else if (rc != 0)
402 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
403 else if (dvp != vp)
404 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
405
406 if (flags & PDIRUNLOCK)
407 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
408 }
409
410 void
411 vop_unlock_pre(void *ap)
412 {
413 struct vop_unlock_args *a = ap;
414
415 if (a->a_flags & LK_INTERLOCK)
416 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
417
418 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
419 }
420
421 void
422 vop_unlock_post(void *ap, int rc)
423 {
424 struct vop_unlock_args *a = ap;
425
426 if (a->a_flags & LK_INTERLOCK)
427 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
428 }
429
430 void
431 vop_lock_pre(void *ap)
432 {
433 struct vop_lock_args *a = ap;
434
435 if ((a->a_flags & LK_INTERLOCK) == 0)
436 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
437 else
438 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
439 }
440
441 void
442 vop_lock_post(void *ap, int rc)
443 {
444 struct vop_lock_args *a;
445
446 a = ap;
447
448 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
449 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
450 }
451
452 void
453 v_addpollinfo(struct vnode *vp)
454 {
455 vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK);
456 mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
457 }
458
459 /*
460 * Initialize the vnode management data structures.
461 */
462 static void
463 vntblinit(void *dummy __unused)
464 {
465
466 desiredvnodes = maxproc + cnt.v_page_count / 4;
467 minvnodes = desiredvnodes / 4;
468 mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
469 mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF);
470 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
471 mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
472 TAILQ_INIT(&vnode_free_list);
473 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
474 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
475 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
476 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
477 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
478 /*
479 * Initialize the filesystem syncer.
480 */
481 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
482 &syncer_mask);
483 syncer_maxdelay = syncer_mask + 1;
484 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
485 }
486 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
487
488
489 /*
490 * Mark a mount point as busy. Used to synchronize access and to delay
491 * unmounting. Interlock is not released on failure.
492 */
493 int
494 vfs_busy(mp, flags, interlkp, td)
495 struct mount *mp;
496 int flags;
497 struct mtx *interlkp;
498 struct thread *td;
499 {
500 int lkflags;
501
502 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
503 if (flags & LK_NOWAIT)
504 return (ENOENT);
505 mp->mnt_kern_flag |= MNTK_MWAIT;
506 /*
507 * Since all busy locks are shared except the exclusive
508 * lock granted when unmounting, the only place that a
509 * wakeup needs to be done is at the release of the
510 * exclusive lock at the end of dounmount.
511 */
512 msleep(mp, interlkp, PVFS, "vfs_busy", 0);
513 return (ENOENT);
514 }
515 lkflags = LK_SHARED | LK_NOPAUSE;
516 if (interlkp)
517 lkflags |= LK_INTERLOCK;
518 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
519 panic("vfs_busy: unexpected lock failure");
520 return (0);
521 }
522
523 /*
524 * Free a busy filesystem.
525 */
526 void
527 vfs_unbusy(mp, td)
528 struct mount *mp;
529 struct thread *td;
530 {
531
532 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
533 }
534
535 /*
536 * Lookup a mount point by filesystem identifier.
537 */
538 struct mount *
539 vfs_getvfs(fsid)
540 fsid_t *fsid;
541 {
542 register struct mount *mp;
543
544 mtx_lock(&mountlist_mtx);
545 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
546 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
547 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
548 mtx_unlock(&mountlist_mtx);
549 return (mp);
550 }
551 }
552 mtx_unlock(&mountlist_mtx);
553 return ((struct mount *) 0);
554 }
555
556 /*
557 * Get a new unique fsid. Try to make its val[0] unique, since this value
558 * will be used to create fake device numbers for stat(). Also try (but
559 * not so hard) make its val[0] unique mod 2^16, since some emulators only
560 * support 16-bit device numbers. We end up with unique val[0]'s for the
561 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
562 *
563 * Keep in mind that several mounts may be running in parallel. Starting
564 * the search one past where the previous search terminated is both a
565 * micro-optimization and a defense against returning the same fsid to
566 * different mounts.
567 */
568 void
569 vfs_getnewfsid(mp)
570 struct mount *mp;
571 {
572 static u_int16_t mntid_base;
573 fsid_t tfsid;
574 int mtype;
575
576 mtx_lock(&mntid_mtx);
577 mtype = mp->mnt_vfc->vfc_typenum;
578 tfsid.val[1] = mtype;
579 mtype = (mtype & 0xFF) << 24;
580 for (;;) {
581 tfsid.val[0] = makeudev(255,
582 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
583 mntid_base++;
584 if (vfs_getvfs(&tfsid) == NULL)
585 break;
586 }
587 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
588 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
589 mtx_unlock(&mntid_mtx);
590 }
591
592 /*
593 * Knob to control the precision of file timestamps:
594 *
595 * 0 = seconds only; nanoseconds zeroed.
596 * 1 = seconds and nanoseconds, accurate within 1/HZ.
597 * 2 = seconds and nanoseconds, truncated to microseconds.
598 * >=3 = seconds and nanoseconds, maximum precision.
599 */
600 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
601
602 static int timestamp_precision = TSP_SEC;
603 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
604 ×tamp_precision, 0, "");
605
606 /*
607 * Get a current timestamp.
608 */
609 void
610 vfs_timestamp(tsp)
611 struct timespec *tsp;
612 {
613 struct timeval tv;
614
615 switch (timestamp_precision) {
616 case TSP_SEC:
617 tsp->tv_sec = time_second;
618 tsp->tv_nsec = 0;
619 break;
620 case TSP_HZ:
621 getnanotime(tsp);
622 break;
623 case TSP_USEC:
624 microtime(&tv);
625 TIMEVAL_TO_TIMESPEC(&tv, tsp);
626 break;
627 case TSP_NSEC:
628 default:
629 nanotime(tsp);
630 break;
631 }
632 }
633
634 /*
635 * Set vnode attributes to VNOVAL
636 */
637 void
638 vattr_null(vap)
639 register struct vattr *vap;
640 {
641
642 vap->va_type = VNON;
643 vap->va_size = VNOVAL;
644 vap->va_bytes = VNOVAL;
645 vap->va_mode = VNOVAL;
646 vap->va_nlink = VNOVAL;
647 vap->va_uid = VNOVAL;
648 vap->va_gid = VNOVAL;
649 vap->va_fsid = VNOVAL;
650 vap->va_fileid = VNOVAL;
651 vap->va_blocksize = VNOVAL;
652 vap->va_rdev = VNOVAL;
653 vap->va_atime.tv_sec = VNOVAL;
654 vap->va_atime.tv_nsec = VNOVAL;
655 vap->va_mtime.tv_sec = VNOVAL;
656 vap->va_mtime.tv_nsec = VNOVAL;
657 vap->va_ctime.tv_sec = VNOVAL;
658 vap->va_ctime.tv_nsec = VNOVAL;
659 vap->va_birthtime.tv_sec = VNOVAL;
660 vap->va_birthtime.tv_nsec = VNOVAL;
661 vap->va_flags = VNOVAL;
662 vap->va_gen = VNOVAL;
663 vap->va_vaflags = 0;
664 }
665
666 /*
667 * This routine is called when we have too many vnodes. It attempts
668 * to free <count> vnodes and will potentially free vnodes that still
669 * have VM backing store (VM backing store is typically the cause
670 * of a vnode blowout so we want to do this). Therefore, this operation
671 * is not considered cheap.
672 *
673 * A number of conditions may prevent a vnode from being reclaimed.
674 * the buffer cache may have references on the vnode, a directory
675 * vnode may still have references due to the namei cache representing
676 * underlying files, or the vnode may be in active use. It is not
677 * desireable to reuse such vnodes. These conditions may cause the
678 * number of vnodes to reach some minimum value regardless of what
679 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
680 */
681 static int
682 vlrureclaim(struct mount *mp, int count)
683 {
684 struct vnode *vp;
685 int done;
686 int trigger;
687 int usevnodes;
688
689 /*
690 * Calculate the trigger point, don't allow user
691 * screwups to blow us up. This prevents us from
692 * recycling vnodes with lots of resident pages. We
693 * aren't trying to free memory, we are trying to
694 * free vnodes.
695 */
696 usevnodes = desiredvnodes;
697 if (usevnodes <= 0)
698 usevnodes = 1;
699 trigger = cnt.v_page_count * 2 / usevnodes;
700
701 done = 0;
702 mtx_lock(&mntvnode_mtx);
703 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
704 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
705 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
706
707 if (vp->v_type != VNON &&
708 vp->v_type != VBAD &&
709 VI_TRYLOCK(vp)) {
710 if (VMIGHTFREE(vp) && /* critical path opt */
711 (vp->v_object == NULL ||
712 vp->v_object->resident_page_count < trigger)) {
713 mtx_unlock(&mntvnode_mtx);
714 vgonel(vp, curthread);
715 done++;
716 mtx_lock(&mntvnode_mtx);
717 } else
718 VI_UNLOCK(vp);
719 }
720 --count;
721 }
722 mtx_unlock(&mntvnode_mtx);
723 return done;
724 }
725
726 /*
727 * Attempt to recycle vnodes in a context that is always safe to block.
728 * Calling vlrurecycle() from the bowels of filesystem code has some
729 * interesting deadlock problems.
730 */
731 static struct proc *vnlruproc;
732 static int vnlruproc_sig;
733
734 static void
735 vnlru_proc(void)
736 {
737 struct mount *mp, *nmp;
738 int s;
739 int done, take;
740 struct proc *p = vnlruproc;
741 struct thread *td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
742
743 mtx_lock(&Giant);
744
745 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
746 SHUTDOWN_PRI_FIRST);
747
748 s = splbio();
749 for (;;) {
750 kthread_suspend_check(p);
751 mtx_lock(&vnode_free_list_mtx);
752 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
753 mtx_unlock(&vnode_free_list_mtx);
754 vnlruproc_sig = 0;
755 wakeup(&vnlruproc_sig);
756 tsleep(vnlruproc, PVFS, "vlruwt", hz);
757 continue;
758 }
759 mtx_unlock(&vnode_free_list_mtx);
760 done = 0;
761 mtx_lock(&mountlist_mtx);
762 take = 0;
763 TAILQ_FOREACH(mp, &mountlist, mnt_list)
764 take++;
765 take = desiredvnodes / (take * 10);
766 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
767 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
768 nmp = TAILQ_NEXT(mp, mnt_list);
769 continue;
770 }
771 done += vlrureclaim(mp, take);
772 mtx_lock(&mountlist_mtx);
773 nmp = TAILQ_NEXT(mp, mnt_list);
774 vfs_unbusy(mp, td);
775 }
776 mtx_unlock(&mountlist_mtx);
777 if (done == 0) {
778 #if 0
779 /* These messages are temporary debugging aids */
780 if (vnlru_nowhere < 5)
781 printf("vnlru process getting nowhere..\n");
782 else if (vnlru_nowhere == 5)
783 printf("vnlru process messages stopped.\n");
784 #endif
785 vnlru_nowhere++;
786 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
787 }
788 }
789 splx(s);
790 }
791
792 static struct kproc_desc vnlru_kp = {
793 "vnlru",
794 vnlru_proc,
795 &vnlruproc
796 };
797 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
798
799
800 /*
801 * Routines having to do with the management of the vnode table.
802 */
803
804 /*
805 * Check to see if a free vnode can be recycled. If it can,
806 * return it locked with the vn lock, but not interlock. Also
807 * get the vn_start_write lock. Otherwise indicate the error.
808 */
809 static int
810 vcanrecycle(struct vnode *vp, struct mount **vnmpp)
811 {
812 struct thread *td = curthread;
813 vm_object_t object;
814 int error;
815
816 /* Don't recycle if we can't get the interlock */
817 if (!VI_TRYLOCK(vp))
818 return (EWOULDBLOCK);
819
820 /* We should be able to immediately acquire this */
821 /* XXX This looks like it should panic if it fails */
822 if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td) != 0) {
823 if (VOP_ISLOCKED(vp, td))
824 panic("vcanrecycle: locked vnode");
825 return (EWOULDBLOCK);
826 }
827
828 /*
829 * Don't recycle if its filesystem is being suspended.
830 */
831 if (vn_start_write(vp, vnmpp, V_NOWAIT) != 0) {
832 error = EBUSY;
833 goto done;
834 }
835
836 /*
837 * Don't recycle if we still have cached pages.
838 */
839 if (VOP_GETVOBJECT(vp, &object) == 0 &&
840 (object->resident_page_count ||
841 object->ref_count)) {
842 error = EBUSY;
843 goto done;
844 }
845 if (LIST_FIRST(&vp->v_cache_src)) {
846 /*
847 * note: nameileafonly sysctl is temporary,
848 * for debugging only, and will eventually be
849 * removed.
850 */
851 if (nameileafonly > 0) {
852 /*
853 * Do not reuse namei-cached directory
854 * vnodes that have cached
855 * subdirectories.
856 */
857 if (cache_leaf_test(vp) < 0) {
858 error = EISDIR;
859 goto done;
860 }
861 } else if (nameileafonly < 0 ||
862 vmiodirenable == 0) {
863 /*
864 * Do not reuse namei-cached directory
865 * vnodes if nameileafonly is -1 or
866 * if VMIO backing for directories is
867 * turned off (otherwise we reuse them
868 * too quickly).
869 */
870 error = EBUSY;
871 goto done;
872 }
873 }
874 return (0);
875 done:
876 VOP_UNLOCK(vp, 0, td);
877 return (error);
878 }
879
880 /*
881 * Return the next vnode from the free list.
882 */
883 int
884 getnewvnode(tag, mp, vops, vpp)
885 const char *tag;
886 struct mount *mp;
887 vop_t **vops;
888 struct vnode **vpp;
889 {
890 int s;
891 struct thread *td = curthread; /* XXX */
892 struct vnode *vp = NULL;
893 struct vpollinfo *pollinfo = NULL;
894 struct mount *vnmp;
895
896 s = splbio();
897 mtx_lock(&vnode_free_list_mtx);
898
899 /*
900 * Try to reuse vnodes if we hit the max. This situation only
901 * occurs in certain large-memory (2G+) situations. We cannot
902 * attempt to directly reclaim vnodes due to nasty recursion
903 * problems.
904 */
905 while (numvnodes - freevnodes > desiredvnodes) {
906 if (vnlruproc_sig == 0) {
907 vnlruproc_sig = 1; /* avoid unnecessary wakeups */
908 wakeup(vnlruproc);
909 }
910 mtx_unlock(&vnode_free_list_mtx);
911 tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
912 mtx_lock(&vnode_free_list_mtx);
913 }
914
915 /*
916 * Attempt to reuse a vnode already on the free list, allocating
917 * a new vnode if we can't find one or if we have not reached a
918 * good minimum for good LRU performance.
919 */
920
921 if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
922 int error;
923 int count;
924
925 for (count = 0; count < freevnodes; count++) {
926 vp = TAILQ_FIRST(&vnode_free_list);
927
928 KASSERT(vp->v_usecount == 0,
929 ("getnewvnode: free vnode isn't"));
930
931 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
932 /*
933 * We have to drop the free list mtx to avoid lock
934 * order reversals with interlock.
935 */
936 mtx_unlock(&vnode_free_list_mtx);
937 error = vcanrecycle(vp, &vnmp);
938 mtx_lock(&vnode_free_list_mtx);
939 if (error == 0)
940 break;
941 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
942 vp = NULL;
943 }
944 }
945 if (vp) {
946 freevnodes--;
947 mtx_unlock(&vnode_free_list_mtx);
948
949 cache_purge(vp);
950 VI_LOCK(vp);
951 vp->v_iflag |= VI_DOOMED;
952 vp->v_iflag &= ~VI_FREE;
953 if (vp->v_type != VBAD) {
954 VOP_UNLOCK(vp, 0, td);
955 vgonel(vp, td);
956 VI_LOCK(vp);
957 } else {
958 VOP_UNLOCK(vp, 0, td);
959 }
960 vn_finished_write(vnmp);
961
962 #ifdef INVARIANTS
963 {
964 if (vp->v_data)
965 panic("cleaned vnode isn't");
966 if (vp->v_numoutput)
967 panic("Clean vnode has pending I/O's");
968 if (vp->v_writecount != 0)
969 panic("Non-zero write count");
970 }
971 #endif
972 if ((pollinfo = vp->v_pollinfo) != NULL) {
973 /*
974 * To avoid lock order reversals, the call to
975 * uma_zfree() must be delayed until the vnode
976 * interlock is released.
977 */
978 vp->v_pollinfo = NULL;
979 }
980 #ifdef MAC
981 mac_destroy_vnode(vp);
982 #endif
983 vp->v_iflag = 0;
984 vp->v_vflag = 0;
985 vp->v_lastw = 0;
986 vp->v_lasta = 0;
987 vp->v_cstart = 0;
988 vp->v_clen = 0;
989 vp->v_socket = 0;
990 lockdestroy(vp->v_vnlock);
991 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
992 KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
993 KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
994 } else {
995 numvnodes++;
996 mtx_unlock(&vnode_free_list_mtx);
997
998 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
999 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
1000 VI_LOCK(vp);
1001 vp->v_dd = vp;
1002 vp->v_vnlock = &vp->v_lock;
1003 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
1004 cache_purge(vp);
1005 LIST_INIT(&vp->v_cache_src);
1006 TAILQ_INIT(&vp->v_cache_dst);
1007 }
1008
1009 TAILQ_INIT(&vp->v_cleanblkhd);
1010 TAILQ_INIT(&vp->v_dirtyblkhd);
1011 vp->v_type = VNON;
1012 vp->v_tag = tag;
1013 vp->v_op = vops;
1014 *vpp = vp;
1015 vp->v_usecount = 1;
1016 vp->v_data = 0;
1017 vp->v_cachedid = -1;
1018 VI_UNLOCK(vp);
1019 if (pollinfo != NULL) {
1020 mtx_destroy(&pollinfo->vpi_lock);
1021 uma_zfree(vnodepoll_zone, pollinfo);
1022 }
1023 #ifdef MAC
1024 mac_init_vnode(vp);
1025 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1026 mac_associate_vnode_singlelabel(mp, vp);
1027 #endif
1028 insmntque(vp, mp);
1029
1030 return (0);
1031 }
1032
1033 /*
1034 * Move a vnode from one mount queue to another.
1035 */
1036 static void
1037 insmntque(vp, mp)
1038 register struct vnode *vp;
1039 register struct mount *mp;
1040 {
1041
1042 mtx_lock(&mntvnode_mtx);
1043 /*
1044 * Delete from old mount point vnode list, if on one.
1045 */
1046 if (vp->v_mount != NULL)
1047 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
1048 /*
1049 * Insert into list of vnodes for the new mount point, if available.
1050 */
1051 if ((vp->v_mount = mp) == NULL) {
1052 mtx_unlock(&mntvnode_mtx);
1053 return;
1054 }
1055 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1056 mtx_unlock(&mntvnode_mtx);
1057 }
1058
1059 /*
1060 * Update outstanding I/O count and do wakeup if requested.
1061 */
1062 void
1063 vwakeup(bp)
1064 register struct buf *bp;
1065 {
1066 register struct vnode *vp;
1067
1068 bp->b_flags &= ~B_WRITEINPROG;
1069 if ((vp = bp->b_vp)) {
1070 VI_LOCK(vp);
1071 vp->v_numoutput--;
1072 if (vp->v_numoutput < 0)
1073 panic("vwakeup: neg numoutput");
1074 if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
1075 vp->v_iflag &= ~VI_BWAIT;
1076 wakeup(&vp->v_numoutput);
1077 }
1078 VI_UNLOCK(vp);
1079 }
1080 }
1081
1082 /*
1083 * Flush out and invalidate all buffers associated with a vnode.
1084 * Called with the underlying object locked.
1085 */
1086 int
1087 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
1088 struct vnode *vp;
1089 int flags;
1090 struct ucred *cred;
1091 struct thread *td;
1092 int slpflag, slptimeo;
1093 {
1094 struct buf *blist;
1095 int s, error;
1096 vm_object_t object;
1097
1098 GIANT_REQUIRED;
1099
1100 ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1101
1102 VI_LOCK(vp);
1103 if (flags & V_SAVE) {
1104 s = splbio();
1105 while (vp->v_numoutput) {
1106 vp->v_iflag |= VI_BWAIT;
1107 error = msleep(&vp->v_numoutput, VI_MTX(vp),
1108 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
1109 if (error) {
1110 VI_UNLOCK(vp);
1111 splx(s);
1112 return (error);
1113 }
1114 }
1115 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1116 splx(s);
1117 VI_UNLOCK(vp);
1118 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
1119 return (error);
1120 /*
1121 * XXX We could save a lock/unlock if this was only
1122 * enabled under INVARIANTS
1123 */
1124 VI_LOCK(vp);
1125 s = splbio();
1126 if (vp->v_numoutput > 0 ||
1127 !TAILQ_EMPTY(&vp->v_dirtyblkhd))
1128 panic("vinvalbuf: dirty bufs");
1129 }
1130 splx(s);
1131 }
1132 s = splbio();
1133 /*
1134 * If you alter this loop please notice that interlock is dropped and
1135 * reacquired in flushbuflist. Special care is needed to ensure that
1136 * no race conditions occur from this.
1137 */
1138 for (error = 0;;) {
1139 if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
1140 flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1141 if (error)
1142 break;
1143 continue;
1144 }
1145 if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
1146 flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1147 if (error)
1148 break;
1149 continue;
1150 }
1151 break;
1152 }
1153 if (error) {
1154 splx(s);
1155 VI_UNLOCK(vp);
1156 return (error);
1157 }
1158
1159 /*
1160 * Wait for I/O to complete. XXX needs cleaning up. The vnode can
1161 * have write I/O in-progress but if there is a VM object then the
1162 * VM object can also have read-I/O in-progress.
1163 */
1164 do {
1165 while (vp->v_numoutput > 0) {
1166 vp->v_iflag |= VI_BWAIT;
1167 msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
1168 }
1169 VI_UNLOCK(vp);
1170 if (VOP_GETVOBJECT(vp, &object) == 0) {
1171 while (object->paging_in_progress)
1172 vm_object_pip_sleep(object, "vnvlbx");
1173 }
1174 VI_LOCK(vp);
1175 } while (vp->v_numoutput > 0);
1176 VI_UNLOCK(vp);
1177
1178 splx(s);
1179
1180 /*
1181 * Destroy the copy in the VM cache, too.
1182 */
1183 if (VOP_GETVOBJECT(vp, &object) == 0) {
1184 vm_object_page_remove(object, 0, 0,
1185 (flags & V_SAVE) ? TRUE : FALSE);
1186 }
1187
1188 #ifdef INVARIANTS
1189 VI_LOCK(vp);
1190 if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1191 (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
1192 !TAILQ_EMPTY(&vp->v_cleanblkhd)))
1193 panic("vinvalbuf: flush failed");
1194 VI_UNLOCK(vp);
1195 #endif
1196 return (0);
1197 }
1198
1199 /*
1200 * Flush out buffers on the specified list.
1201 *
1202 */
1203 static int
1204 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
1205 struct buf *blist;
1206 int flags;
1207 struct vnode *vp;
1208 int slpflag, slptimeo;
1209 int *errorp;
1210 {
1211 struct buf *bp, *nbp;
1212 int found, error;
1213
1214 ASSERT_VI_LOCKED(vp, "flushbuflist");
1215
1216 for (found = 0, bp = blist; bp; bp = nbp) {
1217 nbp = TAILQ_NEXT(bp, b_vnbufs);
1218 VI_UNLOCK(vp);
1219 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1220 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1221 VI_LOCK(vp);
1222 continue;
1223 }
1224 found += 1;
1225 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
1226 error = BUF_TIMELOCK(bp,
1227 LK_EXCLUSIVE | LK_SLEEPFAIL,
1228 "flushbuf", slpflag, slptimeo);
1229 if (error != ENOLCK)
1230 *errorp = error;
1231 goto done;
1232 }
1233 /*
1234 * XXX Since there are no node locks for NFS, I
1235 * believe there is a slight chance that a delayed
1236 * write will occur while sleeping just above, so
1237 * check for it. Note that vfs_bio_awrite expects
1238 * buffers to reside on a queue, while BUF_WRITE and
1239 * brelse do not.
1240 */
1241 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1242 (flags & V_SAVE)) {
1243
1244 if (bp->b_vp == vp) {
1245 if (bp->b_flags & B_CLUSTEROK) {
1246 BUF_UNLOCK(bp);
1247 vfs_bio_awrite(bp);
1248 } else {
1249 bremfree(bp);
1250 bp->b_flags |= B_ASYNC;
1251 BUF_WRITE(bp);
1252 }
1253 } else {
1254 bremfree(bp);
1255 (void) BUF_WRITE(bp);
1256 }
1257 goto done;
1258 }
1259 bremfree(bp);
1260 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1261 bp->b_flags &= ~B_ASYNC;
1262 brelse(bp);
1263 VI_LOCK(vp);
1264 }
1265 return (found);
1266 done:
1267 VI_LOCK(vp);
1268 return (found);
1269 }
1270
1271 /*
1272 * Truncate a file's buffer and pages to a specified length. This
1273 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1274 * sync activity.
1275 */
1276 int
1277 vtruncbuf(vp, cred, td, length, blksize)
1278 register struct vnode *vp;
1279 struct ucred *cred;
1280 struct thread *td;
1281 off_t length;
1282 int blksize;
1283 {
1284 register struct buf *bp;
1285 struct buf *nbp;
1286 int s, anyfreed;
1287 int trunclbn;
1288
1289 /*
1290 * Round up to the *next* lbn.
1291 */
1292 trunclbn = (length + blksize - 1) / blksize;
1293
1294 s = splbio();
1295 ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1296 restart:
1297 VI_LOCK(vp);
1298 anyfreed = 1;
1299 for (;anyfreed;) {
1300 anyfreed = 0;
1301 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
1302 nbp = TAILQ_NEXT(bp, b_vnbufs);
1303 VI_UNLOCK(vp);
1304 if (bp->b_lblkno >= trunclbn) {
1305 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
1306 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
1307 goto restart;
1308 } else {
1309 bremfree(bp);
1310 bp->b_flags |= (B_INVAL | B_RELBUF);
1311 bp->b_flags &= ~B_ASYNC;
1312 brelse(bp);
1313 anyfreed = 1;
1314 }
1315 if (nbp &&
1316 (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1317 (nbp->b_vp != vp) ||
1318 (nbp->b_flags & B_DELWRI))) {
1319 goto restart;
1320 }
1321 }
1322 VI_LOCK(vp);
1323 }
1324
1325 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1326 nbp = TAILQ_NEXT(bp, b_vnbufs);
1327 VI_UNLOCK(vp);
1328 if (bp->b_lblkno >= trunclbn) {
1329 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
1330 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
1331 goto restart;
1332 } else {
1333 bremfree(bp);
1334 bp->b_flags |= (B_INVAL | B_RELBUF);
1335 bp->b_flags &= ~B_ASYNC;
1336 brelse(bp);
1337 anyfreed = 1;
1338 }
1339 if (nbp &&
1340 (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1341 (nbp->b_vp != vp) ||
1342 (nbp->b_flags & B_DELWRI) == 0)) {
1343 goto restart;
1344 }
1345 }
1346 VI_LOCK(vp);
1347 }
1348 }
1349
1350 if (length > 0) {
1351 restartsync:
1352 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1353 nbp = TAILQ_NEXT(bp, b_vnbufs);
1354 VI_UNLOCK(vp);
1355 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) {
1356 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
1357 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
1358 goto restart;
1359 } else {
1360 bremfree(bp);
1361 if (bp->b_vp == vp) {
1362 bp->b_flags |= B_ASYNC;
1363 } else {
1364 bp->b_flags &= ~B_ASYNC;
1365 }
1366 BUF_WRITE(bp);
1367 }
1368 VI_LOCK(vp);
1369 goto restartsync;
1370 }
1371 VI_LOCK(vp);
1372 }
1373 }
1374
1375 while (vp->v_numoutput > 0) {
1376 vp->v_iflag |= VI_BWAIT;
1377 msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
1378 }
1379 VI_UNLOCK(vp);
1380 splx(s);
1381
1382 vnode_pager_setsize(vp, length);
1383
1384 return (0);
1385 }
1386
1387 /*
1388 * buf_splay() - splay tree core for the clean/dirty list of buffers in
1389 * a vnode.
1390 *
1391 * NOTE: We have to deal with the special case of a background bitmap
1392 * buffer, a situation where two buffers will have the same logical
1393 * block offset. We want (1) only the foreground buffer to be accessed
1394 * in a lookup and (2) must differentiate between the foreground and
1395 * background buffer in the splay tree algorithm because the splay
1396 * tree cannot normally handle multiple entities with the same 'index'.
1397 * We accomplish this by adding differentiating flags to the splay tree's
1398 * numerical domain.
1399 */
1400 static
1401 struct buf *
1402 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1403 {
1404 struct buf dummy;
1405 struct buf *lefttreemax, *righttreemin, *y;
1406
1407 if (root == NULL)
1408 return (NULL);
1409 lefttreemax = righttreemin = &dummy;
1410 for (;;) {
1411 if (lblkno < root->b_lblkno ||
1412 (lblkno == root->b_lblkno &&
1413 (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1414 if ((y = root->b_left) == NULL)
1415 break;
1416 if (lblkno < y->b_lblkno) {
1417 /* Rotate right. */
1418 root->b_left = y->b_right;
1419 y->b_right = root;
1420 root = y;
1421 if ((y = root->b_left) == NULL)
1422 break;
1423 }
1424 /* Link into the new root's right tree. */
1425 righttreemin->b_left = root;
1426 righttreemin = root;
1427 } else if (lblkno > root->b_lblkno ||
1428 (lblkno == root->b_lblkno &&
1429 (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1430 if ((y = root->b_right) == NULL)
1431 break;
1432 if (lblkno > y->b_lblkno) {
1433 /* Rotate left. */
1434 root->b_right = y->b_left;
1435 y->b_left = root;
1436 root = y;
1437 if ((y = root->b_right) == NULL)
1438 break;
1439 }
1440 /* Link into the new root's left tree. */
1441 lefttreemax->b_right = root;
1442 lefttreemax = root;
1443 } else {
1444 break;
1445 }
1446 root = y;
1447 }
1448 /* Assemble the new root. */
1449 lefttreemax->b_right = root->b_left;
1450 righttreemin->b_left = root->b_right;
1451 root->b_left = dummy.b_right;
1452 root->b_right = dummy.b_left;
1453 return (root);
1454 }
1455
1456 static
1457 void
1458 buf_vlist_remove(struct buf *bp)
1459 {
1460 struct vnode *vp = bp->b_vp;
1461 struct buf *root;
1462
1463 ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
1464 if (bp->b_xflags & BX_VNDIRTY) {
1465 if (bp != vp->v_dirtyblkroot) {
1466 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1467 KASSERT(root == bp, ("splay lookup failed during dirty remove"));
1468 }
1469 if (bp->b_left == NULL) {
1470 root = bp->b_right;
1471 } else {
1472 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1473 root->b_right = bp->b_right;
1474 }
1475 vp->v_dirtyblkroot = root;
1476 TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
1477 } else {
1478 /* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
1479 if (bp != vp->v_cleanblkroot) {
1480 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1481 KASSERT(root == bp, ("splay lookup failed during clean remove"));
1482 }
1483 if (bp->b_left == NULL) {
1484 root = bp->b_right;
1485 } else {
1486 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1487 root->b_right = bp->b_right;
1488 }
1489 vp->v_cleanblkroot = root;
1490 TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
1491 }
1492 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1493 }
1494
1495 /*
1496 * Add the buffer to the sorted clean or dirty block list using a
1497 * splay tree algorithm.
1498 *
1499 * NOTE: xflags is passed as a constant, optimizing this inline function!
1500 */
1501 static
1502 void
1503 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
1504 {
1505 struct buf *root;
1506
1507 ASSERT_VI_LOCKED(vp, "buf_vlist_add");
1508 bp->b_xflags |= xflags;
1509 if (xflags & BX_VNDIRTY) {
1510 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1511 if (root == NULL) {
1512 bp->b_left = NULL;
1513 bp->b_right = NULL;
1514 TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
1515 } else if (bp->b_lblkno < root->b_lblkno ||
1516 (bp->b_lblkno == root->b_lblkno &&
1517 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1518 bp->b_left = root->b_left;
1519 bp->b_right = root;
1520 root->b_left = NULL;
1521 TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1522 } else {
1523 bp->b_right = root->b_right;
1524 bp->b_left = root;
1525 root->b_right = NULL;
1526 TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
1527 root, bp, b_vnbufs);
1528 }
1529 vp->v_dirtyblkroot = bp;
1530 } else {
1531 /* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
1532 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1533 if (root == NULL) {
1534 bp->b_left = NULL;
1535 bp->b_right = NULL;
1536 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
1537 } else if (bp->b_lblkno < root->b_lblkno ||
1538 (bp->b_lblkno == root->b_lblkno &&
1539 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1540 bp->b_left = root->b_left;
1541 bp->b_right = root;
1542 root->b_left = NULL;
1543 TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1544 } else {
1545 bp->b_right = root->b_right;
1546 bp->b_left = root;
1547 root->b_right = NULL;
1548 TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
1549 root, bp, b_vnbufs);
1550 }
1551 vp->v_cleanblkroot = bp;
1552 }
1553 }
1554
1555 #ifndef USE_BUFHASH
1556
1557 /*
1558 * Lookup a buffer using the splay tree. Note that we specifically avoid
1559 * shadow buffers used in background bitmap writes.
1560 *
1561 * This code isn't quite efficient as it could be because we are maintaining
1562 * two sorted lists and do not know which list the block resides in.
1563 */
1564 struct buf *
1565 gbincore(struct vnode *vp, daddr_t lblkno)
1566 {
1567 struct buf *bp;
1568
1569 GIANT_REQUIRED;
1570
1571 ASSERT_VI_LOCKED(vp, "gbincore");
1572 bp = vp->v_cleanblkroot = buf_splay(lblkno, 0, vp->v_cleanblkroot);
1573 if (bp && bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1574 return(bp);
1575 bp = vp->v_dirtyblkroot = buf_splay(lblkno, 0, vp->v_dirtyblkroot);
1576 if (bp && bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1577 return(bp);
1578 return(NULL);
1579 }
1580
1581 #endif
1582
1583 /*
1584 * Associate a buffer with a vnode.
1585 */
1586 void
1587 bgetvp(vp, bp)
1588 register struct vnode *vp;
1589 register struct buf *bp;
1590 {
1591 int s;
1592
1593 KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1594
1595 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1596 ("bgetvp: bp already attached! %p", bp));
1597
1598 VI_LOCK(vp);
1599 vholdl(vp);
1600 bp->b_vp = vp;
1601 bp->b_dev = vn_todev(vp);
1602 /*
1603 * Insert onto list for new vnode.
1604 */
1605 s = splbio();
1606 buf_vlist_add(bp, vp, BX_VNCLEAN);
1607 splx(s);
1608 VI_UNLOCK(vp);
1609 }
1610
1611 /*
1612 * Disassociate a buffer from a vnode.
1613 */
1614 void
1615 brelvp(bp)
1616 register struct buf *bp;
1617 {
1618 struct vnode *vp;
1619 int s;
1620
1621 KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1622
1623 /*
1624 * Delete from old vnode list, if on one.
1625 */
1626 vp = bp->b_vp;
1627 s = splbio();
1628 VI_LOCK(vp);
1629 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1630 buf_vlist_remove(bp);
1631 if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1632 vp->v_iflag &= ~VI_ONWORKLST;
1633 mtx_lock(&sync_mtx);
1634 LIST_REMOVE(vp, v_synclist);
1635 mtx_unlock(&sync_mtx);
1636 }
1637 vdropl(vp);
1638 VI_UNLOCK(vp);
1639 bp->b_vp = (struct vnode *) 0;
1640 if (bp->b_object)
1641 bp->b_object = NULL;
1642 splx(s);
1643 }
1644
1645 /*
1646 * Add an item to the syncer work queue.
1647 */
1648 static void
1649 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
1650 {
1651 int s, slot;
1652
1653 s = splbio();
1654 ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
1655
1656 mtx_lock(&sync_mtx);
1657 if (vp->v_iflag & VI_ONWORKLST)
1658 LIST_REMOVE(vp, v_synclist);
1659 else
1660 vp->v_iflag |= VI_ONWORKLST;
1661
1662 if (delay > syncer_maxdelay - 2)
1663 delay = syncer_maxdelay - 2;
1664 slot = (syncer_delayno + delay) & syncer_mask;
1665
1666 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
1667 mtx_unlock(&sync_mtx);
1668
1669 splx(s);
1670 }
1671
1672 struct proc *updateproc;
1673 static void sched_sync(void);
1674 static struct kproc_desc up_kp = {
1675 "syncer",
1676 sched_sync,
1677 &updateproc
1678 };
1679 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1680
1681 /*
1682 * System filesystem synchronizer daemon.
1683 */
1684 static void
1685 sched_sync(void)
1686 {
1687 struct synclist *slp;
1688 struct vnode *vp;
1689 struct mount *mp;
1690 long starttime;
1691 int s;
1692 struct thread *td = FIRST_THREAD_IN_PROC(updateproc); /* XXXKSE */
1693
1694 mtx_lock(&Giant);
1695
1696 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc,
1697 SHUTDOWN_PRI_LAST);
1698
1699 for (;;) {
1700 kthread_suspend_check(td->td_proc);
1701
1702 starttime = time_second;
1703
1704 /*
1705 * Push files whose dirty time has expired. Be careful
1706 * of interrupt race on slp queue.
1707 */
1708 s = splbio();
1709 mtx_lock(&sync_mtx);
1710 slp = &syncer_workitem_pending[syncer_delayno];
1711 syncer_delayno += 1;
1712 if (syncer_delayno == syncer_maxdelay)
1713 syncer_delayno = 0;
1714 splx(s);
1715
1716 while ((vp = LIST_FIRST(slp)) != NULL) {
1717 mtx_unlock(&sync_mtx);
1718 if (VOP_ISLOCKED(vp, NULL) == 0 &&
1719 vn_start_write(vp, &mp, V_NOWAIT) == 0) {
1720 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1721 (void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1722 VOP_UNLOCK(vp, 0, td);
1723 vn_finished_write(mp);
1724 }
1725 s = splbio();
1726 mtx_lock(&sync_mtx);
1727 if (LIST_FIRST(slp) == vp) {
1728 mtx_unlock(&sync_mtx);
1729 /*
1730 * Note: VFS vnodes can remain on the
1731 * worklist too with no dirty blocks, but
1732 * since sync_fsync() moves it to a different
1733 * slot we are safe.
1734 */
1735 VI_LOCK(vp);
1736 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) &&
1737 !vn_isdisk(vp, NULL)) {
1738 panic("sched_sync: fsync failed "
1739 "vp %p tag %s", vp, vp->v_tag);
1740 }
1741 /*
1742 * Put us back on the worklist. The worklist
1743 * routine will remove us from our current
1744 * position and then add us back in at a later
1745 * position.
1746 */
1747 vn_syncer_add_to_worklist(vp, syncdelay);
1748 VI_UNLOCK(vp);
1749 mtx_lock(&sync_mtx);
1750 }
1751 splx(s);
1752 }
1753 mtx_unlock(&sync_mtx);
1754
1755 /*
1756 * Do soft update processing.
1757 */
1758 if (softdep_process_worklist_hook != NULL)
1759 (*softdep_process_worklist_hook)(NULL);
1760
1761 /*
1762 * The variable rushjob allows the kernel to speed up the
1763 * processing of the filesystem syncer process. A rushjob
1764 * value of N tells the filesystem syncer to process the next
1765 * N seconds worth of work on its queue ASAP. Currently rushjob
1766 * is used by the soft update code to speed up the filesystem
1767 * syncer process when the incore state is getting so far
1768 * ahead of the disk that the kernel memory pool is being
1769 * threatened with exhaustion.
1770 */
1771 mtx_lock(&sync_mtx);
1772 if (rushjob > 0) {
1773 rushjob -= 1;
1774 mtx_unlock(&sync_mtx);
1775 continue;
1776 }
1777 mtx_unlock(&sync_mtx);
1778 /*
1779 * If it has taken us less than a second to process the
1780 * current work, then wait. Otherwise start right over
1781 * again. We can still lose time if any single round
1782 * takes more than two seconds, but it does not really
1783 * matter as we are just trying to generally pace the
1784 * filesystem activity.
1785 */
1786 if (time_second == starttime)
1787 tsleep(&lbolt, PPAUSE, "syncer", 0);
1788 }
1789 }
1790
1791 /*
1792 * Request the syncer daemon to speed up its work.
1793 * We never push it to speed up more than half of its
1794 * normal turn time, otherwise it could take over the cpu.
1795 * XXXKSE only one update?
1796 */
1797 int
1798 speedup_syncer()
1799 {
1800 struct thread *td;
1801 int ret = 0;
1802
1803 td = FIRST_THREAD_IN_PROC(updateproc);
1804 mtx_lock_spin(&sched_lock);
1805 if (td->td_wchan == &lbolt) {
1806 unsleep(td);
1807 TD_CLR_SLEEPING(td);
1808 setrunnable(td);
1809 }
1810 mtx_unlock_spin(&sched_lock);
1811 mtx_lock(&sync_mtx);
1812 if (rushjob < syncdelay / 2) {
1813 rushjob += 1;
1814 stat_rush_requests += 1;
1815 ret = 1;
1816 }
1817 mtx_unlock(&sync_mtx);
1818 return (ret);
1819 }
1820
1821 /*
1822 * Associate a p-buffer with a vnode.
1823 *
1824 * Also sets B_PAGING flag to indicate that vnode is not fully associated
1825 * with the buffer. i.e. the bp has not been linked into the vnode or
1826 * ref-counted.
1827 */
1828 void
1829 pbgetvp(vp, bp)
1830 register struct vnode *vp;
1831 register struct buf *bp;
1832 {
1833
1834 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1835
1836 bp->b_vp = vp;
1837 bp->b_flags |= B_PAGING;
1838 bp->b_dev = vn_todev(vp);
1839 }
1840
1841 /*
1842 * Disassociate a p-buffer from a vnode.
1843 */
1844 void
1845 pbrelvp(bp)
1846 register struct buf *bp;
1847 {
1848
1849 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1850
1851 /* XXX REMOVE ME */
1852 VI_LOCK(bp->b_vp);
1853 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1854 panic(
1855 "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1856 bp,
1857 (int)bp->b_flags
1858 );
1859 }
1860 VI_UNLOCK(bp->b_vp);
1861 bp->b_vp = (struct vnode *) 0;
1862 bp->b_flags &= ~B_PAGING;
1863 }
1864
1865 /*
1866 * Reassign a buffer from one vnode to another.
1867 * Used to assign file specific control information
1868 * (indirect blocks) to the vnode to which they belong.
1869 */
1870 void
1871 reassignbuf(bp, newvp)
1872 register struct buf *bp;
1873 register struct vnode *newvp;
1874 {
1875 int delay;
1876 int s;
1877
1878 if (newvp == NULL) {
1879 printf("reassignbuf: NULL");
1880 return;
1881 }
1882 ++reassignbufcalls;
1883
1884 /*
1885 * B_PAGING flagged buffers cannot be reassigned because their vp
1886 * is not fully linked in.
1887 */
1888 if (bp->b_flags & B_PAGING)
1889 panic("cannot reassign paging buffer");
1890
1891 s = splbio();
1892 /*
1893 * Delete from old vnode list, if on one.
1894 */
1895 VI_LOCK(bp->b_vp);
1896 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1897 buf_vlist_remove(bp);
1898 if (bp->b_vp != newvp) {
1899 vdropl(bp->b_vp);
1900 bp->b_vp = NULL; /* for clarification */
1901 }
1902 }
1903 VI_UNLOCK(bp->b_vp);
1904 /*
1905 * If dirty, put on list of dirty buffers; otherwise insert onto list
1906 * of clean buffers.
1907 */
1908 VI_LOCK(newvp);
1909 if (bp->b_flags & B_DELWRI) {
1910 if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
1911 switch (newvp->v_type) {
1912 case VDIR:
1913 delay = dirdelay;
1914 break;
1915 case VCHR:
1916 if (newvp->v_rdev->si_mountpoint != NULL) {
1917 delay = metadelay;
1918 break;
1919 }
1920 /* FALLTHROUGH */
1921 default:
1922 delay = filedelay;
1923 }
1924 vn_syncer_add_to_worklist(newvp, delay);
1925 }
1926 buf_vlist_add(bp, newvp, BX_VNDIRTY);
1927 } else {
1928 buf_vlist_add(bp, newvp, BX_VNCLEAN);
1929
1930 if ((newvp->v_iflag & VI_ONWORKLST) &&
1931 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1932 mtx_lock(&sync_mtx);
1933 LIST_REMOVE(newvp, v_synclist);
1934 mtx_unlock(&sync_mtx);
1935 newvp->v_iflag &= ~VI_ONWORKLST;
1936 }
1937 }
1938 if (bp->b_vp != newvp) {
1939 bp->b_vp = newvp;
1940 vholdl(bp->b_vp);
1941 }
1942 VI_UNLOCK(newvp);
1943 splx(s);
1944 }
1945
1946 /*
1947 * Create a vnode for a device.
1948 * Used for mounting the root filesystem.
1949 */
1950 int
1951 bdevvp(dev, vpp)
1952 dev_t dev;
1953 struct vnode **vpp;
1954 {
1955 register struct vnode *vp;
1956 struct vnode *nvp;
1957 int error;
1958
1959 if (dev == NODEV) {
1960 *vpp = NULLVP;
1961 return (ENXIO);
1962 }
1963 if (vfinddev(dev, VCHR, vpp))
1964 return (0);
1965 error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
1966 if (error) {
1967 *vpp = NULLVP;
1968 return (error);
1969 }
1970 vp = nvp;
1971 vp->v_type = VCHR;
1972 addalias(vp, dev);
1973 *vpp = vp;
1974 return (0);
1975 }
1976
1977 static void
1978 v_incr_usecount(struct vnode *vp, int delta)
1979 {
1980 vp->v_usecount += delta;
1981 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1982 mtx_lock(&spechash_mtx);
1983 vp->v_rdev->si_usecount += delta;
1984 mtx_unlock(&spechash_mtx);
1985 }
1986 }
1987
1988 /*
1989 * Add vnode to the alias list hung off the dev_t.
1990 *
1991 * The reason for this gunk is that multiple vnodes can reference
1992 * the same physical device, so checking vp->v_usecount to see
1993 * how many users there are is inadequate; the v_usecount for
1994 * the vnodes need to be accumulated. vcount() does that.
1995 */
1996 struct vnode *
1997 addaliasu(nvp, nvp_rdev)
1998 struct vnode *nvp;
1999 udev_t nvp_rdev;
2000 {
2001 struct vnode *ovp;
2002 vop_t **ops;
2003 dev_t dev;
2004
2005 if (nvp->v_type == VBLK)
2006 return (nvp);
2007 if (nvp->v_type != VCHR)
2008 panic("addaliasu on non-special vnode");
2009 dev = udev2dev(nvp_rdev, 0);
2010 /*
2011 * Check to see if we have a bdevvp vnode with no associated
2012 * filesystem. If so, we want to associate the filesystem of
2013 * the new newly instigated vnode with the bdevvp vnode and
2014 * discard the newly created vnode rather than leaving the
2015 * bdevvp vnode lying around with no associated filesystem.
2016 */
2017 if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) {
2018 addalias(nvp, dev);
2019 return (nvp);
2020 }
2021 /*
2022 * Discard unneeded vnode, but save its node specific data.
2023 * Note that if there is a lock, it is carried over in the
2024 * node specific data to the replacement vnode.
2025 */
2026 vref(ovp);
2027 ovp->v_data = nvp->v_data;
2028 ovp->v_tag = nvp->v_tag;
2029 nvp->v_data = NULL;
2030 lockdestroy(ovp->v_vnlock);
2031 lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
2032 nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
2033 ops = ovp->v_op;
2034 ovp->v_op = nvp->v_op;
2035 if (VOP_ISLOCKED(nvp, curthread)) {
2036 VOP_UNLOCK(nvp, 0, curthread);
2037 vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
2038 }
2039 nvp->v_op = ops;
2040 insmntque(ovp, nvp->v_mount);
2041 vrele(nvp);
2042 vgone(nvp);
2043 return (ovp);
2044 }
2045
2046 /* This is a local helper function that do the same as addaliasu, but for a
2047 * dev_t instead of an udev_t. */
2048 static void
2049 addalias(nvp, dev)
2050 struct vnode *nvp;
2051 dev_t dev;
2052 {
2053
2054 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
2055 nvp->v_rdev = dev;
2056 VI_LOCK(nvp);
2057 mtx_lock(&spechash_mtx);
2058 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
2059 dev->si_usecount += nvp->v_usecount;
2060 mtx_unlock(&spechash_mtx);
2061 VI_UNLOCK(nvp);
2062 }
2063
2064 /*
2065 * Grab a particular vnode from the free list, increment its
2066 * reference count and lock it. The vnode lock bit is set if the
2067 * vnode is being eliminated in vgone. The process is awakened
2068 * when the transition is completed, and an error returned to
2069 * indicate that the vnode is no longer usable (possibly having
2070 * been changed to a new filesystem type).
2071 */
2072 int
2073 vget(vp, flags, td)
2074 register struct vnode *vp;
2075 int flags;
2076 struct thread *td;
2077 {
2078 int error;
2079
2080 /*
2081 * If the vnode is in the process of being cleaned out for
2082 * another use, we wait for the cleaning to finish and then
2083 * return failure. Cleaning is determined by checking that
2084 * the VI_XLOCK flag is set.
2085 */
2086 if ((flags & LK_INTERLOCK) == 0)
2087 VI_LOCK(vp);
2088 if (vp->v_iflag & VI_XLOCK && vp->v_vxproc != curthread) {
2089 vp->v_iflag |= VI_XWANT;
2090 msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
2091 return (ENOENT);
2092 }
2093
2094 v_incr_usecount(vp, 1);
2095
2096 if (VSHOULDBUSY(vp))
2097 vbusy(vp);
2098 if (flags & LK_TYPE_MASK) {
2099 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
2100 /*
2101 * must expand vrele here because we do not want
2102 * to call VOP_INACTIVE if the reference count
2103 * drops back to zero since it was never really
2104 * active. We must remove it from the free list
2105 * before sleeping so that multiple processes do
2106 * not try to recycle it.
2107 */
2108 VI_LOCK(vp);
2109 v_incr_usecount(vp, -1);
2110 if (VSHOULDFREE(vp))
2111 vfree(vp);
2112 else
2113 vlruvp(vp);
2114 VI_UNLOCK(vp);
2115 }
2116 return (error);
2117 }
2118 VI_UNLOCK(vp);
2119 return (0);
2120 }
2121
2122 /*
2123 * Increase the reference count of a vnode.
2124 */
2125 void
2126 vref(struct vnode *vp)
2127 {
2128 VI_LOCK(vp);
2129 v_incr_usecount(vp, 1);
2130 VI_UNLOCK(vp);
2131 }
2132
2133 /*
2134 * Return reference count of a vnode.
2135 *
2136 * The results of this call are only guaranteed when some mechanism other
2137 * than the VI lock is used to stop other processes from gaining references
2138 * to the vnode. This may be the case if the caller holds the only reference.
2139 * This is also useful when stale data is acceptable as race conditions may
2140 * be accounted for by some other means.
2141 */
2142 int
2143 vrefcnt(struct vnode *vp)
2144 {
2145 int usecnt;
2146
2147 VI_LOCK(vp);
2148 usecnt = vp->v_usecount;
2149 VI_UNLOCK(vp);
2150
2151 return (usecnt);
2152 }
2153
2154
2155 /*
2156 * Vnode put/release.
2157 * If count drops to zero, call inactive routine and return to freelist.
2158 */
2159 void
2160 vrele(vp)
2161 struct vnode *vp;
2162 {
2163 struct thread *td = curthread; /* XXX */
2164
2165 KASSERT(vp != NULL, ("vrele: null vp"));
2166
2167 VI_LOCK(vp);
2168
2169 /* Skip this v_writecount check if we're going to panic below. */
2170 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2171 ("vrele: missed vn_close"));
2172
2173 if (vp->v_usecount > 1) {
2174
2175 v_incr_usecount(vp, -1);
2176 VI_UNLOCK(vp);
2177
2178 return;
2179 }
2180
2181 if (vp->v_usecount == 1) {
2182 v_incr_usecount(vp, -1);
2183 /*
2184 * We must call VOP_INACTIVE with the node locked.
2185 * If we are doing a vput, the node is already locked,
2186 * but, in the case of vrele, we must explicitly lock
2187 * the vnode before calling VOP_INACTIVE.
2188 */
2189 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0)
2190 VOP_INACTIVE(vp, td);
2191 VI_LOCK(vp);
2192 if (VSHOULDFREE(vp))
2193 vfree(vp);
2194 else
2195 vlruvp(vp);
2196 VI_UNLOCK(vp);
2197
2198 } else {
2199 #ifdef DIAGNOSTIC
2200 vprint("vrele: negative ref count", vp);
2201 #endif
2202 VI_UNLOCK(vp);
2203 panic("vrele: negative ref cnt");
2204 }
2205 }
2206
2207 /*
2208 * Release an already locked vnode. This give the same effects as
2209 * unlock+vrele(), but takes less time and avoids releasing and
2210 * re-aquiring the lock (as vrele() aquires the lock internally.)
2211 */
2212 void
2213 vput(vp)
2214 struct vnode *vp;
2215 {
2216 struct thread *td = curthread; /* XXX */
2217
2218 GIANT_REQUIRED;
2219
2220 KASSERT(vp != NULL, ("vput: null vp"));
2221 VI_LOCK(vp);
2222 /* Skip this v_writecount check if we're going to panic below. */
2223 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2224 ("vput: missed vn_close"));
2225
2226 if (vp->v_usecount > 1) {
2227 v_incr_usecount(vp, -1);
2228 VOP_UNLOCK(vp, LK_INTERLOCK, td);
2229 return;
2230 }
2231
2232 if (vp->v_usecount == 1) {
2233 v_incr_usecount(vp, -1);
2234 /*
2235 * We must call VOP_INACTIVE with the node locked.
2236 * If we are doing a vput, the node is already locked,
2237 * so we just need to release the vnode mutex.
2238 */
2239 VI_UNLOCK(vp);
2240 VOP_INACTIVE(vp, td);
2241 VI_LOCK(vp);
2242 if (VSHOULDFREE(vp))
2243 vfree(vp);
2244 else
2245 vlruvp(vp);
2246 VI_UNLOCK(vp);
2247
2248 } else {
2249 #ifdef DIAGNOSTIC
2250 vprint("vput: negative ref count", vp);
2251 #endif
2252 panic("vput: negative ref cnt");
2253 }
2254 }
2255
2256 /*
2257 * Somebody doesn't want the vnode recycled.
2258 */
2259 void
2260 vhold(struct vnode *vp)
2261 {
2262 VI_LOCK(vp);
2263 vholdl(vp);
2264 VI_UNLOCK(vp);
2265 }
2266
2267 void
2268 vholdl(vp)
2269 register struct vnode *vp;
2270 {
2271 int s;
2272
2273 s = splbio();
2274 vp->v_holdcnt++;
2275 if (VSHOULDBUSY(vp))
2276 vbusy(vp);
2277 splx(s);
2278 }
2279
2280 /*
2281 * Note that there is one less who cares about this vnode. vdrop() is the
2282 * opposite of vhold().
2283 */
2284 void
2285 vdrop(struct vnode *vp)
2286 {
2287 VI_LOCK(vp);
2288 vdropl(vp);
2289 VI_UNLOCK(vp);
2290 }
2291
2292 void
2293 vdropl(vp)
2294 register struct vnode *vp;
2295 {
2296 int s;
2297
2298 s = splbio();
2299 if (vp->v_holdcnt <= 0)
2300 panic("vdrop: holdcnt");
2301 vp->v_holdcnt--;
2302 if (VSHOULDFREE(vp))
2303 vfree(vp);
2304 else
2305 vlruvp(vp);
2306 splx(s);
2307 }
2308
2309 /*
2310 * Remove any vnodes in the vnode table belonging to mount point mp.
2311 *
2312 * If FORCECLOSE is not specified, there should not be any active ones,
2313 * return error if any are found (nb: this is a user error, not a
2314 * system error). If FORCECLOSE is specified, detach any active vnodes
2315 * that are found.
2316 *
2317 * If WRITECLOSE is set, only flush out regular file vnodes open for
2318 * writing.
2319 *
2320 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2321 *
2322 * `rootrefs' specifies the base reference count for the root vnode
2323 * of this filesystem. The root vnode is considered busy if its
2324 * v_usecount exceeds this value. On a successful return, vflush()
2325 * will call vrele() on the root vnode exactly rootrefs times.
2326 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2327 * be zero.
2328 */
2329 #ifdef DIAGNOSTIC
2330 static int busyprt = 0; /* print out busy vnodes */
2331 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2332 #endif
2333
2334 int
2335 vflush(mp, rootrefs, flags)
2336 struct mount *mp;
2337 int rootrefs;
2338 int flags;
2339 {
2340 struct thread *td = curthread; /* XXX */
2341 struct vnode *vp, *nvp, *rootvp = NULL;
2342 struct vattr vattr;
2343 int busy = 0, error;
2344
2345 if (rootrefs > 0) {
2346 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2347 ("vflush: bad args"));
2348 /*
2349 * Get the filesystem root vnode. We can vput() it
2350 * immediately, since with rootrefs > 0, it won't go away.
2351 */
2352 if ((error = VFS_ROOT(mp, &rootvp)) != 0)
2353 return (error);
2354 vput(rootvp);
2355
2356 }
2357 mtx_lock(&mntvnode_mtx);
2358 loop:
2359 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
2360 /*
2361 * Make sure this vnode wasn't reclaimed in getnewvnode().
2362 * Start over if it has (it won't be on the list anymore).
2363 */
2364 if (vp->v_mount != mp)
2365 goto loop;
2366 nvp = TAILQ_NEXT(vp, v_nmntvnodes);
2367
2368 VI_LOCK(vp);
2369 mtx_unlock(&mntvnode_mtx);
2370 vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY, td);
2371 /*
2372 * Skip over a vnodes marked VV_SYSTEM.
2373 */
2374 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2375 VOP_UNLOCK(vp, 0, td);
2376 mtx_lock(&mntvnode_mtx);
2377 continue;
2378 }
2379 /*
2380 * If WRITECLOSE is set, flush out unlinked but still open
2381 * files (even if open only for reading) and regular file
2382 * vnodes open for writing.
2383 */
2384 if (flags & WRITECLOSE) {
2385 error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2386 VI_LOCK(vp);
2387
2388 if ((vp->v_type == VNON ||
2389 (error == 0 && vattr.va_nlink > 0)) &&
2390 (vp->v_writecount == 0 || vp->v_type != VREG)) {
2391 VOP_UNLOCK(vp, LK_INTERLOCK, td);
2392 mtx_lock(&mntvnode_mtx);
2393 continue;
2394 }
2395 } else
2396 VI_LOCK(vp);
2397
2398 VOP_UNLOCK(vp, 0, td);
2399
2400 /*
2401 * With v_usecount == 0, all we need to do is clear out the
2402 * vnode data structures and we are done.
2403 */
2404 if (vp->v_usecount == 0) {
2405 vgonel(vp, td);
2406 mtx_lock(&mntvnode_mtx);
2407 continue;
2408 }
2409
2410 /*
2411 * If FORCECLOSE is set, forcibly close the vnode. For block
2412 * or character devices, revert to an anonymous device. For
2413 * all other files, just kill them.
2414 */
2415 if (flags & FORCECLOSE) {
2416 if (vp->v_type != VCHR) {
2417 vgonel(vp, td);
2418 } else {
2419 vclean(vp, 0, td);
2420 VI_UNLOCK(vp);
2421 vp->v_op = spec_vnodeop_p;
2422 insmntque(vp, (struct mount *) 0);
2423 }
2424 mtx_lock(&mntvnode_mtx);
2425 continue;
2426 }
2427 #ifdef DIAGNOSTIC
2428 if (busyprt)
2429 vprint("vflush: busy vnode", vp);
2430 #endif
2431 VI_UNLOCK(vp);
2432 mtx_lock(&mntvnode_mtx);
2433 busy++;
2434 }
2435 mtx_unlock(&mntvnode_mtx);
2436 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2437 /*
2438 * If just the root vnode is busy, and if its refcount
2439 * is equal to `rootrefs', then go ahead and kill it.
2440 */
2441 VI_LOCK(rootvp);
2442 KASSERT(busy > 0, ("vflush: not busy"));
2443 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
2444 if (busy == 1 && rootvp->v_usecount == rootrefs) {
2445 vgonel(rootvp, td);
2446 busy = 0;
2447 } else
2448 VI_UNLOCK(rootvp);
2449 }
2450 if (busy)
2451 return (EBUSY);
2452 for (; rootrefs > 0; rootrefs--)
2453 vrele(rootvp);
2454 return (0);
2455 }
2456
2457 /*
2458 * This moves a now (likely recyclable) vnode to the end of the
2459 * mountlist. XXX However, it is temporarily disabled until we
2460 * can clean up ffs_sync() and friends, which have loop restart
2461 * conditions which this code causes to operate O(N^2).
2462 */
2463 static void
2464 vlruvp(struct vnode *vp)
2465 {
2466 #if 0
2467 struct mount *mp;
2468
2469 if ((mp = vp->v_mount) != NULL) {
2470 mtx_lock(&mntvnode_mtx);
2471 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2472 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2473 mtx_unlock(&mntvnode_mtx);
2474 }
2475 #endif
2476 }
2477
2478 /*
2479 * Disassociate the underlying filesystem from a vnode.
2480 */
2481 static void
2482 vclean(vp, flags, td)
2483 struct vnode *vp;
2484 int flags;
2485 struct thread *td;
2486 {
2487 int active;
2488
2489 ASSERT_VI_LOCKED(vp, "vclean");
2490 /*
2491 * Check to see if the vnode is in use. If so we have to reference it
2492 * before we clean it out so that its count cannot fall to zero and
2493 * generate a race against ourselves to recycle it.
2494 */
2495 if ((active = vp->v_usecount))
2496 v_incr_usecount(vp, 1);
2497
2498 /*
2499 * Prevent the vnode from being recycled or brought into use while we
2500 * clean it out.
2501 */
2502 if (vp->v_iflag & VI_XLOCK)
2503 panic("vclean: deadlock");
2504 vp->v_iflag |= VI_XLOCK;
2505 vp->v_vxproc = curthread;
2506 /*
2507 * Even if the count is zero, the VOP_INACTIVE routine may still
2508 * have the object locked while it cleans it out. The VOP_LOCK
2509 * ensures that the VOP_INACTIVE routine is done with its work.
2510 * For active vnodes, it ensures that no other activity can
2511 * occur while the underlying object is being cleaned out.
2512 */
2513 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2514
2515 /*
2516 * Clean out any buffers associated with the vnode.
2517 * If the flush fails, just toss the buffers.
2518 */
2519 if (flags & DOCLOSE) {
2520 struct buf *bp;
2521 VI_LOCK(vp);
2522 bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
2523 VI_UNLOCK(vp);
2524 if (bp != NULL)
2525 (void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2526 if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2527 vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2528 }
2529
2530 VOP_DESTROYVOBJECT(vp);
2531
2532 /*
2533 * Any other processes trying to obtain this lock must first
2534 * wait for VXLOCK to clear, then call the new lock operation.
2535 */
2536 VOP_UNLOCK(vp, 0, td);
2537
2538 /*
2539 * If purging an active vnode, it must be closed and
2540 * deactivated before being reclaimed. Note that the
2541 * VOP_INACTIVE will unlock the vnode.
2542 */
2543 if (active) {
2544 if (flags & DOCLOSE)
2545 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2546 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2547 panic("vclean: cannot relock.");
2548 VOP_INACTIVE(vp, td);
2549 }
2550
2551 /*
2552 * Reclaim the vnode.
2553 */
2554 if (VOP_RECLAIM(vp, td))
2555 panic("vclean: cannot reclaim");
2556
2557 if (active) {
2558 /*
2559 * Inline copy of vrele() since VOP_INACTIVE
2560 * has already been called.
2561 */
2562 VI_LOCK(vp);
2563 v_incr_usecount(vp, -1);
2564 if (vp->v_usecount <= 0) {
2565 #ifdef DIAGNOSTIC
2566 if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2567 vprint("vclean: bad ref count", vp);
2568 panic("vclean: ref cnt");
2569 }
2570 #endif
2571 vfree(vp);
2572 }
2573 VI_UNLOCK(vp);
2574 }
2575
2576 cache_purge(vp);
2577 VI_LOCK(vp);
2578 if (VSHOULDFREE(vp))
2579 vfree(vp);
2580
2581 /*
2582 * Done with purge, reset to the standard lock and
2583 * notify sleepers of the grim news.
2584 */
2585 vp->v_vnlock = &vp->v_lock;
2586 vp->v_op = dead_vnodeop_p;
2587 if (vp->v_pollinfo != NULL)
2588 vn_pollgone(vp);
2589 vp->v_tag = "none";
2590 vp->v_iflag &= ~VI_XLOCK;
2591 vp->v_vxproc = NULL;
2592 if (vp->v_iflag & VI_XWANT) {
2593 vp->v_iflag &= ~VI_XWANT;
2594 wakeup(vp);
2595 }
2596 }
2597
2598 /*
2599 * Eliminate all activity associated with the requested vnode
2600 * and with all vnodes aliased to the requested vnode.
2601 */
2602 int
2603 vop_revoke(ap)
2604 struct vop_revoke_args /* {
2605 struct vnode *a_vp;
2606 int a_flags;
2607 } */ *ap;
2608 {
2609 struct vnode *vp, *vq;
2610 dev_t dev;
2611
2612 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2613
2614 vp = ap->a_vp;
2615 VI_LOCK(vp);
2616 /*
2617 * If a vgone (or vclean) is already in progress,
2618 * wait until it is done and return.
2619 */
2620 if (vp->v_iflag & VI_XLOCK) {
2621 vp->v_iflag |= VI_XWANT;
2622 msleep(vp, VI_MTX(vp), PINOD | PDROP,
2623 "vop_revokeall", 0);
2624 return (0);
2625 }
2626 VI_UNLOCK(vp);
2627 dev = vp->v_rdev;
2628 for (;;) {
2629 mtx_lock(&spechash_mtx);
2630 vq = SLIST_FIRST(&dev->si_hlist);
2631 mtx_unlock(&spechash_mtx);
2632 if (!vq)
2633 break;
2634 vgone(vq);
2635 }
2636 return (0);
2637 }
2638
2639 /*
2640 * Recycle an unused vnode to the front of the free list.
2641 * Release the passed interlock if the vnode will be recycled.
2642 */
2643 int
2644 vrecycle(vp, inter_lkp, td)
2645 struct vnode *vp;
2646 struct mtx *inter_lkp;
2647 struct thread *td;
2648 {
2649
2650 VI_LOCK(vp);
2651 if (vp->v_usecount == 0) {
2652 if (inter_lkp) {
2653 mtx_unlock(inter_lkp);
2654 }
2655 vgonel(vp, td);
2656 return (1);
2657 }
2658 VI_UNLOCK(vp);
2659 return (0);
2660 }
2661
2662 /*
2663 * Eliminate all activity associated with a vnode
2664 * in preparation for reuse.
2665 */
2666 void
2667 vgone(vp)
2668 register struct vnode *vp;
2669 {
2670 struct thread *td = curthread; /* XXX */
2671
2672 VI_LOCK(vp);
2673 vgonel(vp, td);
2674 }
2675
2676 /*
2677 * vgone, with the vp interlock held.
2678 */
2679 void
2680 vgonel(vp, td)
2681 struct vnode *vp;
2682 struct thread *td;
2683 {
2684 int s;
2685
2686 /*
2687 * If a vgone (or vclean) is already in progress,
2688 * wait until it is done and return.
2689 */
2690 ASSERT_VI_LOCKED(vp, "vgonel");
2691 if (vp->v_iflag & VI_XLOCK) {
2692 vp->v_iflag |= VI_XWANT;
2693 msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2694 return;
2695 }
2696
2697 /*
2698 * Clean out the filesystem specific data.
2699 */
2700 vclean(vp, DOCLOSE, td);
2701 VI_UNLOCK(vp);
2702
2703 /*
2704 * Delete from old mount point vnode list, if on one.
2705 */
2706 if (vp->v_mount != NULL)
2707 insmntque(vp, (struct mount *)0);
2708 /*
2709 * If special device, remove it from special device alias list
2710 * if it is on one.
2711 */
2712 if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
2713 VI_LOCK(vp);
2714 mtx_lock(&spechash_mtx);
2715 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
2716 vp->v_rdev->si_usecount -= vp->v_usecount;
2717 mtx_unlock(&spechash_mtx);
2718 VI_UNLOCK(vp);
2719 vp->v_rdev = NULL;
2720 }
2721
2722 /*
2723 * If it is on the freelist and not already at the head,
2724 * move it to the head of the list. The test of the
2725 * VDOOMED flag and the reference count of zero is because
2726 * it will be removed from the free list by getnewvnode,
2727 * but will not have its reference count incremented until
2728 * after calling vgone. If the reference count were
2729 * incremented first, vgone would (incorrectly) try to
2730 * close the previous instance of the underlying object.
2731 */
2732 VI_LOCK(vp);
2733 if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2734 s = splbio();
2735 mtx_lock(&vnode_free_list_mtx);
2736 if (vp->v_iflag & VI_FREE) {
2737 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2738 } else {
2739 vp->v_iflag |= VI_FREE;
2740 freevnodes++;
2741 }
2742 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2743 mtx_unlock(&vnode_free_list_mtx);
2744 splx(s);
2745 }
2746
2747 vp->v_type = VBAD;
2748 VI_UNLOCK(vp);
2749 }
2750
2751 /*
2752 * Lookup a vnode by device number.
2753 */
2754 int
2755 vfinddev(dev, type, vpp)
2756 dev_t dev;
2757 enum vtype type;
2758 struct vnode **vpp;
2759 {
2760 struct vnode *vp;
2761
2762 mtx_lock(&spechash_mtx);
2763 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2764 if (type == vp->v_type) {
2765 *vpp = vp;
2766 mtx_unlock(&spechash_mtx);
2767 return (1);
2768 }
2769 }
2770 mtx_unlock(&spechash_mtx);
2771 return (0);
2772 }
2773
2774 /*
2775 * Calculate the total number of references to a special device.
2776 */
2777 int
2778 vcount(vp)
2779 struct vnode *vp;
2780 {
2781 int count;
2782
2783 mtx_lock(&spechash_mtx);
2784 count = vp->v_rdev->si_usecount;
2785 mtx_unlock(&spechash_mtx);
2786 return (count);
2787 }
2788
2789 /*
2790 * Same as above, but using the dev_t as argument
2791 */
2792 int
2793 count_dev(dev)
2794 dev_t dev;
2795 {
2796 struct vnode *vp;
2797
2798 vp = SLIST_FIRST(&dev->si_hlist);
2799 if (vp == NULL)
2800 return (0);
2801 return(vcount(vp));
2802 }
2803
2804 /*
2805 * Print out a description of a vnode.
2806 */
2807 static char *typename[] =
2808 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2809
2810 void
2811 vprint(label, vp)
2812 char *label;
2813 struct vnode *vp;
2814 {
2815 char buf[96];
2816
2817 if (label != NULL)
2818 printf("%s: %p: ", label, (void *)vp);
2819 else
2820 printf("%p: ", (void *)vp);
2821 printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2822 vp->v_tag, typename[vp->v_type], vp->v_usecount,
2823 vp->v_writecount, vp->v_holdcnt);
2824 buf[0] = '\0';
2825 if (vp->v_vflag & VV_ROOT)
2826 strcat(buf, "|VV_ROOT");
2827 if (vp->v_vflag & VV_TEXT)
2828 strcat(buf, "|VV_TEXT");
2829 if (vp->v_vflag & VV_SYSTEM)
2830 strcat(buf, "|VV_SYSTEM");
2831 if (vp->v_iflag & VI_XLOCK)
2832 strcat(buf, "|VI_XLOCK");
2833 if (vp->v_iflag & VI_XWANT)
2834 strcat(buf, "|VI_XWANT");
2835 if (vp->v_iflag & VI_BWAIT)
2836 strcat(buf, "|VI_BWAIT");
2837 if (vp->v_iflag & VI_DOOMED)
2838 strcat(buf, "|VI_DOOMED");
2839 if (vp->v_iflag & VI_FREE)
2840 strcat(buf, "|VI_FREE");
2841 if (vp->v_vflag & VV_OBJBUF)
2842 strcat(buf, "|VV_OBJBUF");
2843 if (buf[0] != '\0')
2844 printf(" flags (%s),", &buf[1]);
2845 lockmgr_printinfo(vp->v_vnlock);
2846 printf("\n");
2847 if (vp->v_data != NULL) {
2848 printf("\t");
2849 VOP_PRINT(vp);
2850 }
2851 }
2852
2853 #ifdef DDB
2854 #include <ddb/ddb.h>
2855 /*
2856 * List all of the locked vnodes in the system.
2857 * Called when debugging the kernel.
2858 */
2859 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2860 {
2861 struct thread *td = curthread; /* XXX */
2862 struct mount *mp, *nmp;
2863 struct vnode *vp;
2864
2865 printf("Locked vnodes\n");
2866 mtx_lock(&mountlist_mtx);
2867 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2868 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
2869 nmp = TAILQ_NEXT(mp, mnt_list);
2870 continue;
2871 }
2872 mtx_lock(&mntvnode_mtx);
2873 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2874 if (VOP_ISLOCKED(vp, NULL))
2875 vprint((char *)0, vp);
2876 }
2877 mtx_unlock(&mntvnode_mtx);
2878 mtx_lock(&mountlist_mtx);
2879 nmp = TAILQ_NEXT(mp, mnt_list);
2880 vfs_unbusy(mp, td);
2881 }
2882 mtx_unlock(&mountlist_mtx);
2883 }
2884 #endif
2885
2886 /*
2887 * Fill in a struct xvfsconf based on a struct vfsconf.
2888 */
2889 static void
2890 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2891 {
2892
2893 strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2894 xvfsp->vfc_typenum = vfsp->vfc_typenum;
2895 xvfsp->vfc_refcount = vfsp->vfc_refcount;
2896 xvfsp->vfc_flags = vfsp->vfc_flags;
2897 /*
2898 * These are unused in userland, we keep them
2899 * to not break binary compatibility.
2900 */
2901 xvfsp->vfc_vfsops = NULL;
2902 xvfsp->vfc_next = NULL;
2903 }
2904
2905 static int
2906 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2907 {
2908 struct vfsconf *vfsp;
2909 struct xvfsconf *xvfsp;
2910 int cnt, error, i;
2911
2912 cnt = 0;
2913 for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next)
2914 cnt++;
2915 xvfsp = malloc(sizeof(struct xvfsconf) * cnt, M_TEMP, M_WAITOK);
2916 /*
2917 * Handle the race that we will have here when struct vfsconf
2918 * will be locked down by using both cnt and checking vfc_next
2919 * against NULL to determine the end of the loop. The race will
2920 * happen because we will have to unlock before calling malloc().
2921 * We are protected by Giant for now.
2922 */
2923 i = 0;
2924 for (vfsp = vfsconf; vfsp != NULL && i < cnt; vfsp = vfsp->vfc_next) {
2925 vfsconf2x(vfsp, xvfsp + i);
2926 i++;
2927 }
2928 error = SYSCTL_OUT(req, xvfsp, sizeof(struct xvfsconf) * i);
2929 free(xvfsp, M_TEMP);
2930 return (error);
2931 }
2932
2933 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2934 "S,xvfsconf", "List of all configured filesystems");
2935
2936 /*
2937 * Top level filesystem related information gathering.
2938 */
2939 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2940
2941 static int
2942 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2943 {
2944 int *name = (int *)arg1 - 1; /* XXX */
2945 u_int namelen = arg2 + 1; /* XXX */
2946 struct vfsconf *vfsp;
2947 struct xvfsconf xvfsp;
2948
2949 printf("WARNING: userland calling deprecated sysctl, "
2950 "please rebuild world\n");
2951
2952 #if 1 || defined(COMPAT_PRELITE2)
2953 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2954 if (namelen == 1)
2955 return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2956 #endif
2957
2958 switch (name[1]) {
2959 case VFS_MAXTYPENUM:
2960 if (namelen != 2)
2961 return (ENOTDIR);
2962 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2963 case VFS_CONF:
2964 if (namelen != 3)
2965 return (ENOTDIR); /* overloaded */
2966 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2967 if (vfsp->vfc_typenum == name[2])
2968 break;
2969 if (vfsp == NULL)
2970 return (EOPNOTSUPP);
2971 vfsconf2x(vfsp, &xvfsp);
2972 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2973 }
2974 return (EOPNOTSUPP);
2975 }
2976
2977 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
2978 "Generic filesystem");
2979
2980 #if 1 || defined(COMPAT_PRELITE2)
2981
2982 static int
2983 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2984 {
2985 int error;
2986 struct vfsconf *vfsp;
2987 struct ovfsconf ovfs;
2988
2989 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
2990 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */
2991 strcpy(ovfs.vfc_name, vfsp->vfc_name);
2992 ovfs.vfc_index = vfsp->vfc_typenum;
2993 ovfs.vfc_refcount = vfsp->vfc_refcount;
2994 ovfs.vfc_flags = vfsp->vfc_flags;
2995 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2996 if (error)
2997 return error;
2998 }
2999 return 0;
3000 }
3001
3002 #endif /* 1 || COMPAT_PRELITE2 */
3003
3004 #define KINFO_VNODESLOP 10
3005 /*
3006 * Dump vnode list (via sysctl).
3007 */
3008 /* ARGSUSED */
3009 static int
3010 sysctl_vnode(SYSCTL_HANDLER_ARGS)
3011 {
3012 struct xvnode *xvn;
3013 struct thread *td = req->td;
3014 struct mount *mp;
3015 struct vnode *vp;
3016 int error, len, n;
3017
3018 /*
3019 * Stale numvnodes access is not fatal here.
3020 */
3021 req->lock = 0;
3022 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3023 if (!req->oldptr)
3024 /* Make an estimate */
3025 return (SYSCTL_OUT(req, 0, len));
3026
3027 sysctl_wire_old_buffer(req, 0);
3028 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3029 n = 0;
3030 mtx_lock(&mountlist_mtx);
3031 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3032 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
3033 continue;
3034 mtx_lock(&mntvnode_mtx);
3035 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3036 if (n == len)
3037 break;
3038 vref(vp);
3039 xvn[n].xv_size = sizeof *xvn;
3040 xvn[n].xv_vnode = vp;
3041 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3042 XV_COPY(usecount);
3043 XV_COPY(writecount);
3044 XV_COPY(holdcnt);
3045 XV_COPY(id);
3046 XV_COPY(mount);
3047 XV_COPY(numoutput);
3048 XV_COPY(type);
3049 #undef XV_COPY
3050 xvn[n].xv_flag = vp->v_vflag;
3051
3052 switch (vp->v_type) {
3053 case VREG:
3054 case VDIR:
3055 case VLNK:
3056 xvn[n].xv_dev = vp->v_cachedfs;
3057 xvn[n].xv_ino = vp->v_cachedid;
3058 break;
3059 case VBLK:
3060 case VCHR:
3061 if (vp->v_rdev == NULL) {
3062 vrele(vp);
3063 continue;
3064 }
3065 xvn[n].xv_dev = dev2udev(vp->v_rdev);
3066 break;
3067 case VSOCK:
3068 xvn[n].xv_socket = vp->v_socket;
3069 break;
3070 case VFIFO:
3071 xvn[n].xv_fifo = vp->v_fifoinfo;
3072 break;
3073 case VNON:
3074 case VBAD:
3075 default:
3076 /* shouldn't happen? */
3077 vrele(vp);
3078 continue;
3079 }
3080 vrele(vp);
3081 ++n;
3082 }
3083 mtx_unlock(&mntvnode_mtx);
3084 mtx_lock(&mountlist_mtx);
3085 vfs_unbusy(mp, td);
3086 if (n == len)
3087 break;
3088 }
3089 mtx_unlock(&mountlist_mtx);
3090
3091 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3092 free(xvn, M_TEMP);
3093 return (error);
3094 }
3095
3096 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3097 0, 0, sysctl_vnode, "S,xvnode", "");
3098
3099 /*
3100 * Check to see if a filesystem is mounted on a block device.
3101 */
3102 int
3103 vfs_mountedon(vp)
3104 struct vnode *vp;
3105 {
3106
3107 if (vp->v_rdev->si_mountpoint != NULL)
3108 return (EBUSY);
3109 return (0);
3110 }
3111
3112 /*
3113 * Unmount all filesystems. The list is traversed in reverse order
3114 * of mounting to avoid dependencies.
3115 */
3116 void
3117 vfs_unmountall()
3118 {
3119 struct mount *mp;
3120 struct thread *td;
3121 int error;
3122
3123 if (curthread != NULL)
3124 td = curthread;
3125 else
3126 td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
3127 /*
3128 * Since this only runs when rebooting, it is not interlocked.
3129 */
3130 while(!TAILQ_EMPTY(&mountlist)) {
3131 mp = TAILQ_LAST(&mountlist, mntlist);
3132 error = dounmount(mp, MNT_FORCE, td);
3133 if (error) {
3134 TAILQ_REMOVE(&mountlist, mp, mnt_list);
3135 printf("unmount of %s failed (",
3136 mp->mnt_stat.f_mntonname);
3137 if (error == EBUSY)
3138 printf("BUSY)\n");
3139 else
3140 printf("%d)\n", error);
3141 } else {
3142 /* The unmount has removed mp from the mountlist */
3143 }
3144 }
3145 }
3146
3147 /*
3148 * perform msync on all vnodes under a mount point
3149 * the mount point must be locked.
3150 */
3151 void
3152 vfs_msync(struct mount *mp, int flags)
3153 {
3154 struct vnode *vp, *nvp;
3155 struct vm_object *obj;
3156 int tries;
3157
3158 GIANT_REQUIRED;
3159
3160 tries = 5;
3161 mtx_lock(&mntvnode_mtx);
3162 loop:
3163 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
3164 if (vp->v_mount != mp) {
3165 if (--tries > 0)
3166 goto loop;
3167 break;
3168 }
3169 nvp = TAILQ_NEXT(vp, v_nmntvnodes);
3170
3171 VI_LOCK(vp);
3172 if (vp->v_iflag & VI_XLOCK) { /* XXX: what if MNT_WAIT? */
3173 VI_UNLOCK(vp);
3174 continue;
3175 }
3176
3177 if ((vp->v_iflag & VI_OBJDIRTY) &&
3178 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3179 mtx_unlock(&mntvnode_mtx);
3180 if (!vget(vp,
3181 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3182 curthread)) {
3183 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */
3184 vput(vp);
3185 mtx_lock(&mntvnode_mtx);
3186 continue;
3187 }
3188
3189 if (VOP_GETVOBJECT(vp, &obj) == 0) {
3190 vm_object_page_clean(obj, 0, 0,
3191 flags == MNT_WAIT ?
3192 OBJPC_SYNC : OBJPC_NOSYNC);
3193 }
3194 vput(vp);
3195 }
3196 mtx_lock(&mntvnode_mtx);
3197 if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3198 if (--tries > 0)
3199 goto loop;
3200 break;
3201 }
3202 } else
3203 VI_UNLOCK(vp);
3204 }
3205 mtx_unlock(&mntvnode_mtx);
3206 }
3207
3208 /*
3209 * Create the VM object needed for VMIO and mmap support. This
3210 * is done for all VREG files in the system. Some filesystems might
3211 * afford the additional metadata buffering capability of the
3212 * VMIO code by making the device node be VMIO mode also.
3213 *
3214 * vp must be locked when vfs_object_create is called.
3215 */
3216 int
3217 vfs_object_create(vp, td, cred)
3218 struct vnode *vp;
3219 struct thread *td;
3220 struct ucred *cred;
3221 {
3222 GIANT_REQUIRED;
3223 return (VOP_CREATEVOBJECT(vp, cred, td));
3224 }
3225
3226 /*
3227 * Mark a vnode as free, putting it up for recycling.
3228 */
3229 void
3230 vfree(vp)
3231 struct vnode *vp;
3232 {
3233 int s;
3234
3235 ASSERT_VI_LOCKED(vp, "vfree");
3236 s = splbio();
3237 mtx_lock(&vnode_free_list_mtx);
3238 KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3239 if (vp->v_iflag & VI_AGE) {
3240 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3241 } else {
3242 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3243 }
3244 freevnodes++;
3245 mtx_unlock(&vnode_free_list_mtx);
3246 vp->v_iflag &= ~VI_AGE;
3247 vp->v_iflag |= VI_FREE;
3248 splx(s);
3249 }
3250
3251 /*
3252 * Opposite of vfree() - mark a vnode as in use.
3253 */
3254 void
3255 vbusy(vp)
3256 struct vnode *vp;
3257 {
3258 int s;
3259
3260 s = splbio();
3261 ASSERT_VI_LOCKED(vp, "vbusy");
3262 KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3263
3264 mtx_lock(&vnode_free_list_mtx);
3265 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3266 freevnodes--;
3267 mtx_unlock(&vnode_free_list_mtx);
3268
3269 vp->v_iflag &= ~(VI_FREE|VI_AGE);
3270 splx(s);
3271 }
3272
3273 /*
3274 * Record a process's interest in events which might happen to
3275 * a vnode. Because poll uses the historic select-style interface
3276 * internally, this routine serves as both the ``check for any
3277 * pending events'' and the ``record my interest in future events''
3278 * functions. (These are done together, while the lock is held,
3279 * to avoid race conditions.)
3280 */
3281 int
3282 vn_pollrecord(vp, td, events)
3283 struct vnode *vp;
3284 struct thread *td;
3285 short events;
3286 {
3287
3288 if (vp->v_pollinfo == NULL)
3289 v_addpollinfo(vp);
3290 mtx_lock(&vp->v_pollinfo->vpi_lock);
3291 if (vp->v_pollinfo->vpi_revents & events) {
3292 /*
3293 * This leaves events we are not interested
3294 * in available for the other process which
3295 * which presumably had requested them
3296 * (otherwise they would never have been
3297 * recorded).
3298 */
3299 events &= vp->v_pollinfo->vpi_revents;
3300 vp->v_pollinfo->vpi_revents &= ~events;
3301
3302 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3303 return events;
3304 }
3305 vp->v_pollinfo->vpi_events |= events;
3306 selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3307 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3308 return 0;
3309 }
3310
3311 /*
3312 * Note the occurrence of an event. If the VN_POLLEVENT macro is used,
3313 * it is possible for us to miss an event due to race conditions, but
3314 * that condition is expected to be rare, so for the moment it is the
3315 * preferred interface.
3316 */
3317 void
3318 vn_pollevent(vp, events)
3319 struct vnode *vp;
3320 short events;
3321 {
3322
3323 if (vp->v_pollinfo == NULL)
3324 v_addpollinfo(vp);
3325 mtx_lock(&vp->v_pollinfo->vpi_lock);
3326 if (vp->v_pollinfo->vpi_events & events) {
3327 /*
3328 * We clear vpi_events so that we don't
3329 * call selwakeup() twice if two events are
3330 * posted before the polling process(es) is
3331 * awakened. This also ensures that we take at
3332 * most one selwakeup() if the polling process
3333 * is no longer interested. However, it does
3334 * mean that only one event can be noticed at
3335 * a time. (Perhaps we should only clear those
3336 * event bits which we note?) XXX
3337 */
3338 vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */
3339 vp->v_pollinfo->vpi_revents |= events;
3340 selwakeup(&vp->v_pollinfo->vpi_selinfo);
3341 }
3342 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3343 }
3344
3345 /*
3346 * Wake up anyone polling on vp because it is being revoked.
3347 * This depends on dead_poll() returning POLLHUP for correct
3348 * behavior.
3349 */
3350 void
3351 vn_pollgone(vp)
3352 struct vnode *vp;
3353 {
3354
3355 mtx_lock(&vp->v_pollinfo->vpi_lock);
3356 VN_KNOTE(vp, NOTE_REVOKE);
3357 if (vp->v_pollinfo->vpi_events) {
3358 vp->v_pollinfo->vpi_events = 0;
3359 selwakeup(&vp->v_pollinfo->vpi_selinfo);
3360 }
3361 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3362 }
3363
3364
3365
3366 /*
3367 * Routine to create and manage a filesystem syncer vnode.
3368 */
3369 #define sync_close ((int (*)(struct vop_close_args *))nullop)
3370 static int sync_fsync(struct vop_fsync_args *);
3371 static int sync_inactive(struct vop_inactive_args *);
3372 static int sync_reclaim(struct vop_reclaim_args *);
3373 static int sync_print(struct vop_print_args *);
3374
3375 static vop_t **sync_vnodeop_p;
3376 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3377 { &vop_default_desc, (vop_t *) vop_eopnotsupp },
3378 { &vop_close_desc, (vop_t *) sync_close }, /* close */
3379 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */
3380 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */
3381 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */
3382 { &vop_lock_desc, (vop_t *) vop_stdlock }, /* lock */
3383 { &vop_unlock_desc, (vop_t *) vop_stdunlock }, /* unlock */
3384 { &vop_print_desc, (vop_t *) sync_print }, /* print */
3385 { &vop_islocked_desc, (vop_t *) vop_stdislocked }, /* islocked */
3386 { NULL, NULL }
3387 };
3388 static struct vnodeopv_desc sync_vnodeop_opv_desc =
3389 { &sync_vnodeop_p, sync_vnodeop_entries };
3390
3391 VNODEOP_SET(sync_vnodeop_opv_desc);
3392
3393 /*
3394 * Create a new filesystem syncer vnode for the specified mount point.
3395 */
3396 int
3397 vfs_allocate_syncvnode(mp)
3398 struct mount *mp;
3399 {
3400 struct vnode *vp;
3401 static long start, incr, next;
3402 int error;
3403
3404 /* Allocate a new vnode */
3405 if ((error = getnewvnode("vfs", mp, sync_vnodeop_p, &vp)) != 0) {
3406 mp->mnt_syncer = NULL;
3407 return (error);
3408 }
3409 vp->v_type = VNON;
3410 /*
3411 * Place the vnode onto the syncer worklist. We attempt to
3412 * scatter them about on the list so that they will go off
3413 * at evenly distributed times even if all the filesystems
3414 * are mounted at once.
3415 */
3416 next += incr;
3417 if (next == 0 || next > syncer_maxdelay) {
3418 start /= 2;
3419 incr /= 2;
3420 if (start == 0) {
3421 start = syncer_maxdelay / 2;
3422 incr = syncer_maxdelay;
3423 }
3424 next = start;
3425 }
3426 VI_LOCK(vp);
3427 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
3428 VI_UNLOCK(vp);
3429 mp->mnt_syncer = vp;
3430 return (0);
3431 }
3432
3433 /*
3434 * Do a lazy sync of the filesystem.
3435 */
3436 static int
3437 sync_fsync(ap)
3438 struct vop_fsync_args /* {
3439 struct vnode *a_vp;
3440 struct ucred *a_cred;
3441 int a_waitfor;
3442 struct thread *a_td;
3443 } */ *ap;
3444 {
3445 struct vnode *syncvp = ap->a_vp;
3446 struct mount *mp = syncvp->v_mount;
3447 struct thread *td = ap->a_td;
3448 int error, asyncflag;
3449
3450 /*
3451 * We only need to do something if this is a lazy evaluation.
3452 */
3453 if (ap->a_waitfor != MNT_LAZY)
3454 return (0);
3455
3456 /*
3457 * Move ourselves to the back of the sync list.
3458 */
3459 VI_LOCK(syncvp);
3460 vn_syncer_add_to_worklist(syncvp, syncdelay);
3461 VI_UNLOCK(syncvp);
3462
3463 /*
3464 * Walk the list of vnodes pushing all that are dirty and
3465 * not already on the sync list.
3466 */
3467 mtx_lock(&mountlist_mtx);
3468 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3469 mtx_unlock(&mountlist_mtx);
3470 return (0);
3471 }
3472 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3473 vfs_unbusy(mp, td);
3474 return (0);
3475 }
3476 asyncflag = mp->mnt_flag & MNT_ASYNC;
3477 mp->mnt_flag &= ~MNT_ASYNC;
3478 vfs_msync(mp, MNT_NOWAIT);
3479 error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3480 if (asyncflag)
3481 mp->mnt_flag |= MNT_ASYNC;
3482 vn_finished_write(mp);
3483 vfs_unbusy(mp, td);
3484 return (error);
3485 }
3486
3487 /*
3488 * The syncer vnode is no referenced.
3489 */
3490 static int
3491 sync_inactive(ap)
3492 struct vop_inactive_args /* {
3493 struct vnode *a_vp;
3494 struct thread *a_td;
3495 } */ *ap;
3496 {
3497
3498 VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3499 vgone(ap->a_vp);
3500 return (0);
3501 }
3502
3503 /*
3504 * The syncer vnode is no longer needed and is being decommissioned.
3505 *
3506 * Modifications to the worklist must be protected at splbio().
3507 */
3508 static int
3509 sync_reclaim(ap)
3510 struct vop_reclaim_args /* {
3511 struct vnode *a_vp;
3512 } */ *ap;
3513 {
3514 struct vnode *vp = ap->a_vp;
3515 int s;
3516
3517 s = splbio();
3518 vp->v_mount->mnt_syncer = NULL;
3519 VI_LOCK(vp);
3520 if (vp->v_iflag & VI_ONWORKLST) {
3521 mtx_lock(&sync_mtx);
3522 LIST_REMOVE(vp, v_synclist);
3523 mtx_unlock(&sync_mtx);
3524 vp->v_iflag &= ~VI_ONWORKLST;
3525 }
3526 VI_UNLOCK(vp);
3527 splx(s);
3528
3529 return (0);
3530 }
3531
3532 /*
3533 * Print out a syncer vnode.
3534 */
3535 static int
3536 sync_print(ap)
3537 struct vop_print_args /* {
3538 struct vnode *a_vp;
3539 } */ *ap;
3540 {
3541 struct vnode *vp = ap->a_vp;
3542
3543 printf("syncer vnode");
3544 if (vp->v_vnlock != NULL)
3545 lockmgr_printinfo(vp->v_vnlock);
3546 printf("\n");
3547 return (0);
3548 }
3549
3550 /*
3551 * extract the dev_t from a VCHR
3552 */
3553 dev_t
3554 vn_todev(vp)
3555 struct vnode *vp;
3556 {
3557 if (vp->v_type != VCHR)
3558 return (NODEV);
3559 return (vp->v_rdev);
3560 }
3561
3562 /*
3563 * Check if vnode represents a disk device
3564 */
3565 int
3566 vn_isdisk(vp, errp)
3567 struct vnode *vp;
3568 int *errp;
3569 {
3570 struct cdevsw *cdevsw;
3571
3572 if (vp->v_type != VCHR) {
3573 if (errp != NULL)
3574 *errp = ENOTBLK;
3575 return (0);
3576 }
3577 if (vp->v_rdev == NULL) {
3578 if (errp != NULL)
3579 *errp = ENXIO;
3580 return (0);
3581 }
3582 cdevsw = devsw(vp->v_rdev);
3583 if (cdevsw == NULL) {
3584 if (errp != NULL)
3585 *errp = ENXIO;
3586 return (0);
3587 }
3588 if (!(cdevsw->d_flags & D_DISK)) {
3589 if (errp != NULL)
3590 *errp = ENOTBLK;
3591 return (0);
3592 }
3593 if (errp != NULL)
3594 *errp = 0;
3595 return (1);
3596 }
3597
3598 /*
3599 * Free data allocated by namei(); see namei(9) for details.
3600 */
3601 void
3602 NDFREE(ndp, flags)
3603 struct nameidata *ndp;
3604 const uint flags;
3605 {
3606 if (!(flags & NDF_NO_FREE_PNBUF) &&
3607 (ndp->ni_cnd.cn_flags & HASBUF)) {
3608 uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3609 ndp->ni_cnd.cn_flags &= ~HASBUF;
3610 }
3611 if (!(flags & NDF_NO_DVP_UNLOCK) &&
3612 (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3613 ndp->ni_dvp != ndp->ni_vp)
3614 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3615 if (!(flags & NDF_NO_DVP_RELE) &&
3616 (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3617 vrele(ndp->ni_dvp);
3618 ndp->ni_dvp = NULL;
3619 }
3620 if (!(flags & NDF_NO_VP_UNLOCK) &&
3621 (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3622 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3623 if (!(flags & NDF_NO_VP_RELE) &&
3624 ndp->ni_vp) {
3625 vrele(ndp->ni_vp);
3626 ndp->ni_vp = NULL;
3627 }
3628 if (!(flags & NDF_NO_STARTDIR_RELE) &&
3629 (ndp->ni_cnd.cn_flags & SAVESTART)) {
3630 vrele(ndp->ni_startdir);
3631 ndp->ni_startdir = NULL;
3632 }
3633 }
3634
3635 /*
3636 * Common filesystem object access control check routine. Accepts a
3637 * vnode's type, "mode", uid and gid, requested access mode, credentials,
3638 * and optional call-by-reference privused argument allowing vaccess()
3639 * to indicate to the caller whether privilege was used to satisfy the
3640 * request (obsoleted). Returns 0 on success, or an errno on failure.
3641 */
3642 int
3643 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3644 enum vtype type;
3645 mode_t file_mode;
3646 uid_t file_uid;
3647 gid_t file_gid;
3648 mode_t acc_mode;
3649 struct ucred *cred;
3650 int *privused;
3651 {
3652 mode_t dac_granted;
3653 #ifdef CAPABILITIES
3654 mode_t cap_granted;
3655 #endif
3656
3657 /*
3658 * Look for a normal, non-privileged way to access the file/directory
3659 * as requested. If it exists, go with that.
3660 */
3661
3662 if (privused != NULL)
3663 *privused = 0;
3664
3665 dac_granted = 0;
3666
3667 /* Check the owner. */
3668 if (cred->cr_uid == file_uid) {
3669 dac_granted |= VADMIN;
3670 if (file_mode & S_IXUSR)
3671 dac_granted |= VEXEC;
3672 if (file_mode & S_IRUSR)
3673 dac_granted |= VREAD;
3674 if (file_mode & S_IWUSR)
3675 dac_granted |= (VWRITE | VAPPEND);
3676
3677 if ((acc_mode & dac_granted) == acc_mode)
3678 return (0);
3679
3680 goto privcheck;
3681 }
3682
3683 /* Otherwise, check the groups (first match) */
3684 if (groupmember(file_gid, cred)) {
3685 if (file_mode & S_IXGRP)
3686 dac_granted |= VEXEC;
3687 if (file_mode & S_IRGRP)
3688 dac_granted |= VREAD;
3689 if (file_mode & S_IWGRP)
3690 dac_granted |= (VWRITE | VAPPEND);
3691
3692 if ((acc_mode & dac_granted) == acc_mode)
3693 return (0);
3694
3695 goto privcheck;
3696 }
3697
3698 /* Otherwise, check everyone else. */
3699 if (file_mode & S_IXOTH)
3700 dac_granted |= VEXEC;
3701 if (file_mode & S_IROTH)
3702 dac_granted |= VREAD;
3703 if (file_mode & S_IWOTH)
3704 dac_granted |= (VWRITE | VAPPEND);
3705 if ((acc_mode & dac_granted) == acc_mode)
3706 return (0);
3707
3708 privcheck:
3709 if (!suser_cred(cred, PRISON_ROOT)) {
3710 /* XXX audit: privilege used */
3711 if (privused != NULL)
3712 *privused = 1;
3713 return (0);
3714 }
3715
3716 #ifdef CAPABILITIES
3717 /*
3718 * Build a capability mask to determine if the set of capabilities
3719 * satisfies the requirements when combined with the granted mask
3720 * from above.
3721 * For each capability, if the capability is required, bitwise
3722 * or the request type onto the cap_granted mask.
3723 */
3724 cap_granted = 0;
3725
3726 if (type == VDIR) {
3727 /*
3728 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3729 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3730 */
3731 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3732 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3733 cap_granted |= VEXEC;
3734 } else {
3735 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3736 !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT))
3737 cap_granted |= VEXEC;
3738 }
3739
3740 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3741 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3742 cap_granted |= VREAD;
3743
3744 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3745 !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
3746 cap_granted |= (VWRITE | VAPPEND);
3747
3748 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3749 !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT))
3750 cap_granted |= VADMIN;
3751
3752 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3753 /* XXX audit: privilege used */
3754 if (privused != NULL)
3755 *privused = 1;
3756 return (0);
3757 }
3758 #endif
3759
3760 return ((acc_mode & VADMIN) ? EPERM : EACCES);
3761 }
3762
3763 /*
3764 * Credential check based on process requesting service, and per-attribute
3765 * permissions.
3766 */
3767 int
3768 extattr_check_cred(struct vnode *vp, int attrnamespace,
3769 struct ucred *cred, struct thread *td, int access)
3770 {
3771
3772 /*
3773 * Kernel-invoked always succeeds.
3774 */
3775 if (cred == NOCRED)
3776 return (0);
3777
3778 /*
3779 * Do not allow privileged processes in jail to directly
3780 * manipulate system attributes.
3781 *
3782 * XXX What capability should apply here?
3783 * Probably CAP_SYS_SETFFLAG.
3784 */
3785 switch (attrnamespace) {
3786 case EXTATTR_NAMESPACE_SYSTEM:
3787 /* Potentially should be: return (EPERM); */
3788 return (suser_cred(cred, 0));
3789 case EXTATTR_NAMESPACE_USER:
3790 return (VOP_ACCESS(vp, access, cred, td));
3791 default:
3792 return (EPERM);
3793 }
3794 }
Cache object: c2ba2dde1c22d4bc03f13edc0adae151
|