FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_subr.c
1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
35 */
36
37 /*
38 * External virtual filesystem routines
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD: src/sys/kern/vfs_subr.c,v 1.522.2.6 2005/05/20 04:16:47 csjp Exp $");
43
44 #include "opt_ddb.h"
45 #include "opt_mac.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/event.h>
53 #include <sys/eventhandler.h>
54 #include <sys/extattr.h>
55 #include <sys/fcntl.h>
56 #include <sys/kdb.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/mac.h>
60 #include <sys/malloc.h>
61 #include <sys/mount.h>
62 #include <sys/namei.h>
63 #include <sys/reboot.h>
64 #include <sys/sleepqueue.h>
65 #include <sys/stat.h>
66 #include <sys/sysctl.h>
67 #include <sys/syslog.h>
68 #include <sys/vmmeter.h>
69 #include <sys/vnode.h>
70
71 #include <vm/vm.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_extern.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <vm/uma.h>
79
80 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
81
82 static void addalias(struct vnode *vp, struct cdev *nvp_rdev);
83 static void delmntque(struct vnode *vp);
84 static void insmntque(struct vnode *vp, struct mount *mp);
85 static void vclean(struct vnode *vp, int flags, struct thread *td);
86 static void vlruvp(struct vnode *vp);
87 static int flushbuflist(struct buf *blist, int flags, struct vnode *vp,
88 int slpflag, int slptimeo, int *errorp);
89 static void syncer_shutdown(void *arg, int howto);
90 static int vtryrecycle(struct vnode *vp);
91 static void vx_lock(struct vnode *vp);
92 static void vx_unlock(struct vnode *vp);
93 static void vgonechrl(struct vnode *vp, struct thread *td);
94
95
96 /*
97 * Number of vnodes in existence. Increased whenever getnewvnode()
98 * allocates a new vnode, never decreased.
99 */
100 static unsigned long numvnodes;
101
102 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
103
104 /*
105 * Conversion tables for conversion from vnode types to inode formats
106 * and back.
107 */
108 enum vtype iftovt_tab[16] = {
109 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
110 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
111 };
112 int vttoif_tab[9] = {
113 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
114 S_IFSOCK, S_IFIFO, S_IFMT,
115 };
116
117 /*
118 * List of vnodes that are ready for recycling.
119 */
120 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
121
122 /*
123 * Minimum number of free vnodes. If there are fewer than this free vnodes,
124 * getnewvnode() will return a newly allocated vnode.
125 */
126 static u_long wantfreevnodes = 25;
127 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
128 /* Number of vnodes in the free list. */
129 static u_long freevnodes;
130 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
131
132 /*
133 * Various variables used for debugging the new implementation of
134 * reassignbuf().
135 * XXX these are probably of (very) limited utility now.
136 */
137 static int reassignbufcalls;
138 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
139 static int nameileafonly;
140 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
141
142 /*
143 * Cache for the mount type id assigned to NFS. This is used for
144 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
145 */
146 int nfs_mount_type = -1;
147
148 /* To keep more than one thread at a time from running vfs_getnewfsid */
149 static struct mtx mntid_mtx;
150
151 /*
152 * Lock for any access to the following:
153 * vnode_free_list
154 * numvnodes
155 * freevnodes
156 */
157 static struct mtx vnode_free_list_mtx;
158
159 /*
160 * For any iteration/modification of dev->si_hlist (linked through
161 * v_specnext)
162 */
163 static struct mtx spechash_mtx;
164
165 /* Publicly exported FS */
166 struct nfs_public nfs_pub;
167
168 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
169 static uma_zone_t vnode_zone;
170 static uma_zone_t vnodepoll_zone;
171
172 /* Set to 1 to print out reclaim of active vnodes */
173 int prtactive;
174
175 /*
176 * The workitem queue.
177 *
178 * It is useful to delay writes of file data and filesystem metadata
179 * for tens of seconds so that quickly created and deleted files need
180 * not waste disk bandwidth being created and removed. To realize this,
181 * we append vnodes to a "workitem" queue. When running with a soft
182 * updates implementation, most pending metadata dependencies should
183 * not wait for more than a few seconds. Thus, mounted on block devices
184 * are delayed only about a half the time that file data is delayed.
185 * Similarly, directory updates are more critical, so are only delayed
186 * about a third the time that file data is delayed. Thus, there are
187 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
188 * one each second (driven off the filesystem syncer process). The
189 * syncer_delayno variable indicates the next queue that is to be processed.
190 * Items that need to be processed soon are placed in this queue:
191 *
192 * syncer_workitem_pending[syncer_delayno]
193 *
194 * A delay of fifteen seconds is done by placing the request fifteen
195 * entries later in the queue:
196 *
197 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
198 *
199 */
200 static int syncer_delayno;
201 static long syncer_mask;
202 LIST_HEAD(synclist, vnode);
203 static struct synclist *syncer_workitem_pending;
204 /*
205 * The sync_mtx protects:
206 * vp->v_synclist
207 * sync_vnode_count
208 * syncer_delayno
209 * syncer_state
210 * syncer_workitem_pending
211 * syncer_worklist_len
212 * rushjob
213 */
214 static struct mtx sync_mtx;
215
216 #define SYNCER_MAXDELAY 32
217 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
218 static int syncdelay = 30; /* max time to delay syncing data */
219 static int filedelay = 30; /* time to delay syncing files */
220 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
221 static int dirdelay = 29; /* time to delay syncing directories */
222 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
223 static int metadelay = 28; /* time to delay syncing metadata */
224 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
225 static int rushjob; /* number of slots to run ASAP */
226 static int stat_rush_requests; /* number of times I/O speeded up */
227 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
228
229 /*
230 * When shutting down the syncer, run it at four times normal speed.
231 */
232 #define SYNCER_SHUTDOWN_SPEEDUP 4
233 static int sync_vnode_count;
234 static int syncer_worklist_len;
235 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
236 syncer_state;
237
238 /*
239 * Number of vnodes we want to exist at any one time. This is mostly used
240 * to size hash tables in vnode-related code. It is normally not used in
241 * getnewvnode(), as wantfreevnodes is normally nonzero.)
242 *
243 * XXX desiredvnodes is historical cruft and should not exist.
244 */
245 int desiredvnodes;
246 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
247 &desiredvnodes, 0, "Maximum number of vnodes");
248 static int minvnodes;
249 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
250 &minvnodes, 0, "Minimum number of vnodes");
251 static int vnlru_nowhere;
252 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
253 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
254
255 /* Hook for calling soft updates. */
256 int (*softdep_process_worklist_hook)(struct mount *);
257
258 /*
259 * Initialize the vnode management data structures.
260 */
261 #ifndef MAXVNODES_MAX
262 #define MAXVNODES_MAX 100000
263 #endif
264 static void
265 vntblinit(void *dummy __unused)
266 {
267
268 /*
269 * Desiredvnodes is a function of the physical memory size and
270 * the kernel's heap size. Specifically, desiredvnodes scales
271 * in proportion to the physical memory size until two fifths
272 * of the kernel's heap size is consumed by vnodes and vm
273 * objects.
274 */
275 desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
276 (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
277 if (desiredvnodes > MAXVNODES_MAX) {
278 if (bootverbose)
279 printf("Reducing kern.maxvnodes %d -> %d\n",
280 desiredvnodes, MAXVNODES_MAX);
281 desiredvnodes = MAXVNODES_MAX;
282 }
283 minvnodes = desiredvnodes / 4;
284 mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
285 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
286 mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
287 TAILQ_INIT(&vnode_free_list);
288 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
289 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
290 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
291 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
292 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
293 /*
294 * Initialize the filesystem syncer.
295 */
296 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
297 &syncer_mask);
298 syncer_maxdelay = syncer_mask + 1;
299 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
300 }
301 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
302
303
304 /*
305 * Mark a mount point as busy. Used to synchronize access and to delay
306 * unmounting. Interlock is not released on failure.
307 */
308 int
309 vfs_busy(mp, flags, interlkp, td)
310 struct mount *mp;
311 int flags;
312 struct mtx *interlkp;
313 struct thread *td;
314 {
315 int lkflags;
316
317 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
318 if (flags & LK_NOWAIT)
319 return (ENOENT);
320 mp->mnt_kern_flag |= MNTK_MWAIT;
321 /*
322 * Since all busy locks are shared except the exclusive
323 * lock granted when unmounting, the only place that a
324 * wakeup needs to be done is at the release of the
325 * exclusive lock at the end of dounmount.
326 */
327 msleep(mp, interlkp, PVFS, "vfs_busy", 0);
328 return (ENOENT);
329 }
330 lkflags = LK_SHARED | LK_NOPAUSE;
331 if (interlkp)
332 lkflags |= LK_INTERLOCK;
333 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
334 panic("vfs_busy: unexpected lock failure");
335 return (0);
336 }
337
338 /*
339 * Free a busy filesystem.
340 */
341 void
342 vfs_unbusy(mp, td)
343 struct mount *mp;
344 struct thread *td;
345 {
346
347 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
348 }
349
350 /*
351 * Lookup a mount point by filesystem identifier.
352 */
353 struct mount *
354 vfs_getvfs(fsid)
355 fsid_t *fsid;
356 {
357 register struct mount *mp;
358
359 mtx_lock(&mountlist_mtx);
360 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
361 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
362 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
363 mtx_unlock(&mountlist_mtx);
364 return (mp);
365 }
366 }
367 mtx_unlock(&mountlist_mtx);
368 return ((struct mount *) 0);
369 }
370
371 /*
372 * Check if a user can access priveledged mount options.
373 */
374 int
375 vfs_suser(struct mount *mp, struct thread *td)
376 {
377 int error;
378
379 if ((mp->mnt_flag & MNT_USER) == 0 ||
380 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
381 if ((error = suser(td)) != 0)
382 return (error);
383 }
384 return (0);
385 }
386
387 /*
388 * Get a new unique fsid. Try to make its val[0] unique, since this value
389 * will be used to create fake device numbers for stat(). Also try (but
390 * not so hard) make its val[0] unique mod 2^16, since some emulators only
391 * support 16-bit device numbers. We end up with unique val[0]'s for the
392 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
393 *
394 * Keep in mind that several mounts may be running in parallel. Starting
395 * the search one past where the previous search terminated is both a
396 * micro-optimization and a defense against returning the same fsid to
397 * different mounts.
398 */
399 void
400 vfs_getnewfsid(mp)
401 struct mount *mp;
402 {
403 static u_int16_t mntid_base;
404 fsid_t tfsid;
405 int mtype;
406
407 mtx_lock(&mntid_mtx);
408 mtype = mp->mnt_vfc->vfc_typenum;
409 tfsid.val[1] = mtype;
410 mtype = (mtype & 0xFF) << 24;
411 for (;;) {
412 tfsid.val[0] = makedev(255,
413 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
414 mntid_base++;
415 if (vfs_getvfs(&tfsid) == NULL)
416 break;
417 }
418 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
419 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
420 mtx_unlock(&mntid_mtx);
421 }
422
423 /*
424 * Knob to control the precision of file timestamps:
425 *
426 * 0 = seconds only; nanoseconds zeroed.
427 * 1 = seconds and nanoseconds, accurate within 1/HZ.
428 * 2 = seconds and nanoseconds, truncated to microseconds.
429 * >=3 = seconds and nanoseconds, maximum precision.
430 */
431 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
432
433 static int timestamp_precision = TSP_SEC;
434 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
435 ×tamp_precision, 0, "");
436
437 /*
438 * Get a current timestamp.
439 */
440 void
441 vfs_timestamp(tsp)
442 struct timespec *tsp;
443 {
444 struct timeval tv;
445
446 switch (timestamp_precision) {
447 case TSP_SEC:
448 tsp->tv_sec = time_second;
449 tsp->tv_nsec = 0;
450 break;
451 case TSP_HZ:
452 getnanotime(tsp);
453 break;
454 case TSP_USEC:
455 microtime(&tv);
456 TIMEVAL_TO_TIMESPEC(&tv, tsp);
457 break;
458 case TSP_NSEC:
459 default:
460 nanotime(tsp);
461 break;
462 }
463 }
464
465 /*
466 * Set vnode attributes to VNOVAL
467 */
468 void
469 vattr_null(vap)
470 register struct vattr *vap;
471 {
472
473 vap->va_type = VNON;
474 vap->va_size = VNOVAL;
475 vap->va_bytes = VNOVAL;
476 vap->va_mode = VNOVAL;
477 vap->va_nlink = VNOVAL;
478 vap->va_uid = VNOVAL;
479 vap->va_gid = VNOVAL;
480 vap->va_fsid = VNOVAL;
481 vap->va_fileid = VNOVAL;
482 vap->va_blocksize = VNOVAL;
483 vap->va_rdev = VNOVAL;
484 vap->va_atime.tv_sec = VNOVAL;
485 vap->va_atime.tv_nsec = VNOVAL;
486 vap->va_mtime.tv_sec = VNOVAL;
487 vap->va_mtime.tv_nsec = VNOVAL;
488 vap->va_ctime.tv_sec = VNOVAL;
489 vap->va_ctime.tv_nsec = VNOVAL;
490 vap->va_birthtime.tv_sec = VNOVAL;
491 vap->va_birthtime.tv_nsec = VNOVAL;
492 vap->va_flags = VNOVAL;
493 vap->va_gen = VNOVAL;
494 vap->va_vaflags = 0;
495 }
496
497 /*
498 * This routine is called when we have too many vnodes. It attempts
499 * to free <count> vnodes and will potentially free vnodes that still
500 * have VM backing store (VM backing store is typically the cause
501 * of a vnode blowout so we want to do this). Therefore, this operation
502 * is not considered cheap.
503 *
504 * A number of conditions may prevent a vnode from being reclaimed.
505 * the buffer cache may have references on the vnode, a directory
506 * vnode may still have references due to the namei cache representing
507 * underlying files, or the vnode may be in active use. It is not
508 * desireable to reuse such vnodes. These conditions may cause the
509 * number of vnodes to reach some minimum value regardless of what
510 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
511 */
512 static int
513 vlrureclaim(struct mount *mp)
514 {
515 struct vnode *vp;
516 int done;
517 int trigger;
518 int usevnodes;
519 int count;
520
521 /*
522 * Calculate the trigger point, don't allow user
523 * screwups to blow us up. This prevents us from
524 * recycling vnodes with lots of resident pages. We
525 * aren't trying to free memory, we are trying to
526 * free vnodes.
527 */
528 usevnodes = desiredvnodes;
529 if (usevnodes <= 0)
530 usevnodes = 1;
531 trigger = cnt.v_page_count * 2 / usevnodes;
532
533 done = 0;
534 MNT_ILOCK(mp);
535 count = mp->mnt_nvnodelistsize / 10 + 1;
536 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
537 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
538 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
539
540 if (vp->v_type != VNON &&
541 vp->v_type != VBAD &&
542 VI_TRYLOCK(vp)) {
543 if (VMIGHTFREE(vp) && /* critical path opt */
544 (vp->v_object == NULL ||
545 vp->v_object->resident_page_count < trigger)) {
546 MNT_IUNLOCK(mp);
547 vgonel(vp, curthread);
548 done++;
549 MNT_ILOCK(mp);
550 } else
551 VI_UNLOCK(vp);
552 }
553 --count;
554 }
555 MNT_IUNLOCK(mp);
556 return done;
557 }
558
559 /*
560 * Attempt to recycle vnodes in a context that is always safe to block.
561 * Calling vlrurecycle() from the bowels of filesystem code has some
562 * interesting deadlock problems.
563 */
564 static struct proc *vnlruproc;
565 static int vnlruproc_sig;
566
567 static void
568 vnlru_proc(void)
569 {
570 struct mount *mp, *nmp;
571 int done;
572 struct proc *p = vnlruproc;
573 struct thread *td = FIRST_THREAD_IN_PROC(p);
574
575 mtx_lock(&Giant);
576
577 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
578 SHUTDOWN_PRI_FIRST);
579
580 for (;;) {
581 kthread_suspend_check(p);
582 mtx_lock(&vnode_free_list_mtx);
583 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
584 mtx_unlock(&vnode_free_list_mtx);
585 vnlruproc_sig = 0;
586 wakeup(&vnlruproc_sig);
587 tsleep(vnlruproc, PVFS, "vlruwt", hz);
588 continue;
589 }
590 mtx_unlock(&vnode_free_list_mtx);
591 done = 0;
592 mtx_lock(&mountlist_mtx);
593 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
594 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
595 nmp = TAILQ_NEXT(mp, mnt_list);
596 continue;
597 }
598 done += vlrureclaim(mp);
599 mtx_lock(&mountlist_mtx);
600 nmp = TAILQ_NEXT(mp, mnt_list);
601 vfs_unbusy(mp, td);
602 }
603 mtx_unlock(&mountlist_mtx);
604 if (done == 0) {
605 #if 0
606 /* These messages are temporary debugging aids */
607 if (vnlru_nowhere < 5)
608 printf("vnlru process getting nowhere..\n");
609 else if (vnlru_nowhere == 5)
610 printf("vnlru process messages stopped.\n");
611 #endif
612 vnlru_nowhere++;
613 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
614 }
615 }
616 }
617
618 static struct kproc_desc vnlru_kp = {
619 "vnlru",
620 vnlru_proc,
621 &vnlruproc
622 };
623 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
624
625
626 /*
627 * Routines having to do with the management of the vnode table.
628 */
629
630 /*
631 * Check to see if a free vnode can be recycled. If it can,
632 * recycle it and return it with the vnode interlock held.
633 */
634 static int
635 vtryrecycle(struct vnode *vp)
636 {
637 struct thread *td = curthread;
638 vm_object_t object;
639 struct mount *vnmp;
640 int error;
641
642 /* Don't recycle if we can't get the interlock */
643 if (!VI_TRYLOCK(vp))
644 return (EWOULDBLOCK);
645 /*
646 * This vnode may found and locked via some other list, if so we
647 * can't recycle it yet.
648 */
649 if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
650 return (EWOULDBLOCK);
651 /*
652 * Don't recycle if its filesystem is being suspended.
653 */
654 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
655 VOP_UNLOCK(vp, 0, td);
656 return (EBUSY);
657 }
658
659 /*
660 * Don't recycle if we still have cached pages.
661 */
662 if (VOP_GETVOBJECT(vp, &object) == 0) {
663 VM_OBJECT_LOCK(object);
664 if (object->resident_page_count ||
665 object->ref_count) {
666 VM_OBJECT_UNLOCK(object);
667 error = EBUSY;
668 goto done;
669 }
670 VM_OBJECT_UNLOCK(object);
671 }
672 if (LIST_FIRST(&vp->v_cache_src)) {
673 /*
674 * note: nameileafonly sysctl is temporary,
675 * for debugging only, and will eventually be
676 * removed.
677 */
678 if (nameileafonly > 0) {
679 /*
680 * Do not reuse namei-cached directory
681 * vnodes that have cached
682 * subdirectories.
683 */
684 if (cache_leaf_test(vp) < 0) {
685 error = EISDIR;
686 goto done;
687 }
688 } else if (nameileafonly < 0 ||
689 vmiodirenable == 0) {
690 /*
691 * Do not reuse namei-cached directory
692 * vnodes if nameileafonly is -1 or
693 * if VMIO backing for directories is
694 * turned off (otherwise we reuse them
695 * too quickly).
696 */
697 error = EBUSY;
698 goto done;
699 }
700 }
701 /*
702 * If we got this far, we need to acquire the interlock and see if
703 * anyone picked up this vnode from another list. If not, we will
704 * mark it with XLOCK via vgonel() so that anyone who does find it
705 * will skip over it.
706 */
707 VI_LOCK(vp);
708 if (VSHOULDBUSY(vp) && (vp->v_iflag & VI_XLOCK) == 0) {
709 VI_UNLOCK(vp);
710 error = EBUSY;
711 goto done;
712 }
713 mtx_lock(&vnode_free_list_mtx);
714 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
715 vp->v_iflag &= ~VI_FREE;
716 mtx_unlock(&vnode_free_list_mtx);
717 vp->v_iflag |= VI_DOOMED;
718 if (vp->v_type != VBAD) {
719 VOP_UNLOCK(vp, 0, td);
720 vgonel(vp, td);
721 VI_LOCK(vp);
722 } else
723 VOP_UNLOCK(vp, 0, td);
724 vn_finished_write(vnmp);
725 return (0);
726 done:
727 VOP_UNLOCK(vp, 0, td);
728 vn_finished_write(vnmp);
729 return (error);
730 }
731
732 /*
733 * Return the next vnode from the free list.
734 */
735 int
736 getnewvnode(tag, mp, vops, vpp)
737 const char *tag;
738 struct mount *mp;
739 vop_t **vops;
740 struct vnode **vpp;
741 {
742 struct vnode *vp = NULL;
743 struct vpollinfo *pollinfo = NULL;
744
745 mtx_lock(&vnode_free_list_mtx);
746
747 /*
748 * Try to reuse vnodes if we hit the max. This situation only
749 * occurs in certain large-memory (2G+) situations. We cannot
750 * attempt to directly reclaim vnodes due to nasty recursion
751 * problems.
752 */
753 while (numvnodes - freevnodes > desiredvnodes) {
754 if (vnlruproc_sig == 0) {
755 vnlruproc_sig = 1; /* avoid unnecessary wakeups */
756 wakeup(vnlruproc);
757 }
758 mtx_unlock(&vnode_free_list_mtx);
759 tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
760 mtx_lock(&vnode_free_list_mtx);
761 }
762
763 /*
764 * Attempt to reuse a vnode already on the free list, allocating
765 * a new vnode if we can't find one or if we have not reached a
766 * good minimum for good LRU performance.
767 */
768
769 if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
770 int error;
771 int count;
772
773 for (count = 0; count < freevnodes; count++) {
774 vp = TAILQ_FIRST(&vnode_free_list);
775
776 KASSERT(vp->v_usecount == 0 &&
777 (vp->v_iflag & VI_DOINGINACT) == 0,
778 ("getnewvnode: free vnode isn't"));
779
780 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
781 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
782 mtx_unlock(&vnode_free_list_mtx);
783 error = vtryrecycle(vp);
784 mtx_lock(&vnode_free_list_mtx);
785 if (error == 0)
786 break;
787 vp = NULL;
788 }
789 }
790 if (vp) {
791 freevnodes--;
792 mtx_unlock(&vnode_free_list_mtx);
793
794 #ifdef INVARIANTS
795 {
796 if (vp->v_data)
797 panic("cleaned vnode isn't");
798 if (vp->v_numoutput)
799 panic("Clean vnode has pending I/O's");
800 if (vp->v_writecount != 0)
801 panic("Non-zero write count");
802 }
803 #endif
804 if ((pollinfo = vp->v_pollinfo) != NULL) {
805 /*
806 * To avoid lock order reversals, the call to
807 * uma_zfree() must be delayed until the vnode
808 * interlock is released.
809 */
810 vp->v_pollinfo = NULL;
811 }
812 #ifdef MAC
813 mac_destroy_vnode(vp);
814 #endif
815 vp->v_iflag = 0;
816 vp->v_vflag = 0;
817 vp->v_lastw = 0;
818 vp->v_lasta = 0;
819 vp->v_cstart = 0;
820 vp->v_clen = 0;
821 vp->v_socket = 0;
822 lockdestroy(vp->v_vnlock);
823 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
824 KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0"));
825 KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
826 KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0"));
827 KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
828 } else {
829 numvnodes++;
830 mtx_unlock(&vnode_free_list_mtx);
831
832 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
833 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
834 VI_LOCK(vp);
835 vp->v_dd = vp;
836 vp->v_vnlock = &vp->v_lock;
837 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
838 cache_purge(vp); /* Sets up v_id. */
839 LIST_INIT(&vp->v_cache_src);
840 TAILQ_INIT(&vp->v_cache_dst);
841 }
842
843 TAILQ_INIT(&vp->v_cleanblkhd);
844 TAILQ_INIT(&vp->v_dirtyblkhd);
845 vp->v_type = VNON;
846 vp->v_tag = tag;
847 vp->v_op = vops;
848 *vpp = vp;
849 vp->v_usecount = 1;
850 vp->v_data = 0;
851 vp->v_cachedid = -1;
852 VI_UNLOCK(vp);
853 if (pollinfo != NULL) {
854 knlist_destroy(&pollinfo->vpi_selinfo.si_note);
855 mtx_destroy(&pollinfo->vpi_lock);
856 uma_zfree(vnodepoll_zone, pollinfo);
857 }
858 #ifdef MAC
859 mac_init_vnode(vp);
860 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
861 mac_associate_vnode_singlelabel(mp, vp);
862 #endif
863 delmntque(vp);
864 if (mp != NULL) {
865 insmntque(vp, mp);
866 vp->v_bsize = mp->mnt_stat.f_iosize;
867 }
868
869 return (0);
870 }
871
872 /*
873 * Delete from old mount point vnode list, if on one.
874 */
875 static void
876 delmntque(struct vnode *vp)
877 {
878 struct mount *mp;
879
880 if (vp->v_mount == NULL)
881 return;
882 mp = vp->v_mount;
883 MNT_ILOCK(mp);
884 vp->v_mount = NULL;
885 KASSERT(mp->mnt_nvnodelistsize > 0,
886 ("bad mount point vnode list size"));
887 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
888 mp->mnt_nvnodelistsize--;
889 MNT_IUNLOCK(mp);
890 }
891
892 /*
893 * Insert into list of vnodes for the new mount point, if available.
894 */
895 static void
896 insmntque(struct vnode *vp, struct mount *mp)
897 {
898
899 vp->v_mount = mp;
900 KASSERT(mp != NULL, ("Don't call insmntque(foo, NULL)"));
901 MNT_ILOCK(vp->v_mount);
902 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
903 mp->mnt_nvnodelistsize++;
904 MNT_IUNLOCK(vp->v_mount);
905 }
906
907 /*
908 * Update outstanding I/O count and do wakeup if requested.
909 */
910 void
911 vwakeup(bp)
912 register struct buf *bp;
913 {
914 register struct vnode *vp;
915
916 bp->b_flags &= ~B_WRITEINPROG;
917 if ((vp = bp->b_vp)) {
918 VI_LOCK(vp);
919 vp->v_numoutput--;
920 if (vp->v_numoutput < 0)
921 panic("vwakeup: neg numoutput");
922 if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
923 vp->v_iflag &= ~VI_BWAIT;
924 wakeup(&vp->v_numoutput);
925 }
926 VI_UNLOCK(vp);
927 }
928 }
929
930 /*
931 * Flush out and invalidate all buffers associated with a vnode.
932 * Called with the underlying object locked.
933 */
934 int
935 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
936 struct vnode *vp;
937 int flags;
938 struct ucred *cred;
939 struct thread *td;
940 int slpflag, slptimeo;
941 {
942 struct buf *blist;
943 int error;
944 vm_object_t object;
945
946 GIANT_REQUIRED;
947
948 ASSERT_VOP_LOCKED(vp, "vinvalbuf");
949
950 VI_LOCK(vp);
951 if (flags & V_SAVE) {
952 while (vp->v_numoutput) {
953 vp->v_iflag |= VI_BWAIT;
954 error = msleep(&vp->v_numoutput, VI_MTX(vp),
955 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
956 if (error) {
957 VI_UNLOCK(vp);
958 return (error);
959 }
960 }
961 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
962 VI_UNLOCK(vp);
963 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
964 return (error);
965 /*
966 * XXX We could save a lock/unlock if this was only
967 * enabled under INVARIANTS
968 */
969 VI_LOCK(vp);
970 if (vp->v_numoutput > 0 ||
971 !TAILQ_EMPTY(&vp->v_dirtyblkhd))
972 panic("vinvalbuf: dirty bufs");
973 }
974 }
975 /*
976 * If you alter this loop please notice that interlock is dropped and
977 * reacquired in flushbuflist. Special care is needed to ensure that
978 * no race conditions occur from this.
979 */
980 for (error = 0;;) {
981 if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
982 flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
983 if (error)
984 break;
985 continue;
986 }
987 if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
988 flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
989 if (error)
990 break;
991 continue;
992 }
993 break;
994 }
995 if (error) {
996 VI_UNLOCK(vp);
997 return (error);
998 }
999
1000 /*
1001 * Wait for I/O to complete. XXX needs cleaning up. The vnode can
1002 * have write I/O in-progress but if there is a VM object then the
1003 * VM object can also have read-I/O in-progress.
1004 */
1005 do {
1006 while (vp->v_numoutput > 0) {
1007 vp->v_iflag |= VI_BWAIT;
1008 msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
1009 }
1010 VI_UNLOCK(vp);
1011 if (VOP_GETVOBJECT(vp, &object) == 0) {
1012 VM_OBJECT_LOCK(object);
1013 vm_object_pip_wait(object, "vnvlbx");
1014 VM_OBJECT_UNLOCK(object);
1015 }
1016 VI_LOCK(vp);
1017 } while (vp->v_numoutput > 0);
1018 VI_UNLOCK(vp);
1019
1020 /*
1021 * Destroy the copy in the VM cache, too.
1022 */
1023 if (VOP_GETVOBJECT(vp, &object) == 0) {
1024 VM_OBJECT_LOCK(object);
1025 vm_object_page_remove(object, 0, 0,
1026 (flags & V_SAVE) ? TRUE : FALSE);
1027 VM_OBJECT_UNLOCK(object);
1028 }
1029
1030 #ifdef INVARIANTS
1031 VI_LOCK(vp);
1032 if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1033 (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
1034 !TAILQ_EMPTY(&vp->v_cleanblkhd)))
1035 panic("vinvalbuf: flush failed");
1036 VI_UNLOCK(vp);
1037 #endif
1038 return (0);
1039 }
1040
1041 /*
1042 * Flush out buffers on the specified list.
1043 *
1044 */
1045 static int
1046 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
1047 struct buf *blist;
1048 int flags;
1049 struct vnode *vp;
1050 int slpflag, slptimeo;
1051 int *errorp;
1052 {
1053 struct buf *bp, *nbp;
1054 int found, error;
1055
1056 ASSERT_VI_LOCKED(vp, "flushbuflist");
1057
1058 for (found = 0, bp = blist; bp; bp = nbp) {
1059 nbp = TAILQ_NEXT(bp, b_vnbufs);
1060 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1061 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1062 continue;
1063 }
1064 found += 1;
1065 error = BUF_TIMELOCK(bp,
1066 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
1067 "flushbuf", slpflag, slptimeo);
1068 if (error) {
1069 if (error != ENOLCK)
1070 *errorp = error;
1071 goto done;
1072 }
1073 /*
1074 * XXX Since there are no node locks for NFS, I
1075 * believe there is a slight chance that a delayed
1076 * write will occur while sleeping just above, so
1077 * check for it. Note that vfs_bio_awrite expects
1078 * buffers to reside on a queue, while bwrite and
1079 * brelse do not.
1080 */
1081 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1082 (flags & V_SAVE)) {
1083
1084 if (bp->b_vp == vp) {
1085 if (bp->b_flags & B_CLUSTEROK) {
1086 vfs_bio_awrite(bp);
1087 } else {
1088 bremfree(bp);
1089 bp->b_flags |= B_ASYNC;
1090 bwrite(bp);
1091 }
1092 } else {
1093 bremfree(bp);
1094 (void) bwrite(bp);
1095 }
1096 goto done;
1097 }
1098 bremfree(bp);
1099 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1100 bp->b_flags &= ~B_ASYNC;
1101 brelse(bp);
1102 VI_LOCK(vp);
1103 }
1104 return (found);
1105 done:
1106 VI_LOCK(vp);
1107 return (found);
1108 }
1109
1110 /*
1111 * Truncate a file's buffer and pages to a specified length. This
1112 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1113 * sync activity.
1114 */
1115 int
1116 vtruncbuf(vp, cred, td, length, blksize)
1117 register struct vnode *vp;
1118 struct ucred *cred;
1119 struct thread *td;
1120 off_t length;
1121 int blksize;
1122 {
1123 register struct buf *bp;
1124 struct buf *nbp;
1125 int anyfreed;
1126 int trunclbn;
1127
1128 /*
1129 * Round up to the *next* lbn.
1130 */
1131 trunclbn = (length + blksize - 1) / blksize;
1132
1133 ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1134 restart:
1135 VI_LOCK(vp);
1136 anyfreed = 1;
1137 for (;anyfreed;) {
1138 anyfreed = 0;
1139 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
1140 nbp = TAILQ_NEXT(bp, b_vnbufs);
1141 if (bp->b_lblkno >= trunclbn) {
1142 if (BUF_LOCK(bp,
1143 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1144 VI_MTX(vp)) == ENOLCK)
1145 goto restart;
1146
1147 bremfree(bp);
1148 bp->b_flags |= (B_INVAL | B_RELBUF);
1149 bp->b_flags &= ~B_ASYNC;
1150 brelse(bp);
1151 anyfreed = 1;
1152
1153 if (nbp &&
1154 (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1155 (nbp->b_vp != vp) ||
1156 (nbp->b_flags & B_DELWRI))) {
1157 goto restart;
1158 }
1159 VI_LOCK(vp);
1160 }
1161 }
1162
1163 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1164 nbp = TAILQ_NEXT(bp, b_vnbufs);
1165 if (bp->b_lblkno >= trunclbn) {
1166 if (BUF_LOCK(bp,
1167 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1168 VI_MTX(vp)) == ENOLCK)
1169 goto restart;
1170 bremfree(bp);
1171 bp->b_flags |= (B_INVAL | B_RELBUF);
1172 bp->b_flags &= ~B_ASYNC;
1173 brelse(bp);
1174 anyfreed = 1;
1175 if (nbp &&
1176 (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1177 (nbp->b_vp != vp) ||
1178 (nbp->b_flags & B_DELWRI) == 0)) {
1179 goto restart;
1180 }
1181 VI_LOCK(vp);
1182 }
1183 }
1184 }
1185
1186 if (length > 0) {
1187 restartsync:
1188 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1189 nbp = TAILQ_NEXT(bp, b_vnbufs);
1190 if (bp->b_lblkno > 0)
1191 continue;
1192 /*
1193 * Since we hold the vnode lock this should only
1194 * fail if we're racing with the buf daemon.
1195 */
1196 if (BUF_LOCK(bp,
1197 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1198 VI_MTX(vp)) == ENOLCK) {
1199 goto restart;
1200 }
1201 KASSERT((bp->b_flags & B_DELWRI),
1202 ("buf(%p) on dirty queue without DELWRI", bp));
1203
1204 bremfree(bp);
1205 bawrite(bp);
1206 VI_LOCK(vp);
1207 goto restartsync;
1208 }
1209 }
1210
1211 while (vp->v_numoutput > 0) {
1212 vp->v_iflag |= VI_BWAIT;
1213 msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
1214 }
1215 VI_UNLOCK(vp);
1216 vnode_pager_setsize(vp, length);
1217
1218 return (0);
1219 }
1220
1221 /*
1222 * buf_splay() - splay tree core for the clean/dirty list of buffers in
1223 * a vnode.
1224 *
1225 * NOTE: We have to deal with the special case of a background bitmap
1226 * buffer, a situation where two buffers will have the same logical
1227 * block offset. We want (1) only the foreground buffer to be accessed
1228 * in a lookup and (2) must differentiate between the foreground and
1229 * background buffer in the splay tree algorithm because the splay
1230 * tree cannot normally handle multiple entities with the same 'index'.
1231 * We accomplish this by adding differentiating flags to the splay tree's
1232 * numerical domain.
1233 */
1234 static
1235 struct buf *
1236 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1237 {
1238 struct buf dummy;
1239 struct buf *lefttreemax, *righttreemin, *y;
1240
1241 if (root == NULL)
1242 return (NULL);
1243 lefttreemax = righttreemin = &dummy;
1244 for (;;) {
1245 if (lblkno < root->b_lblkno ||
1246 (lblkno == root->b_lblkno &&
1247 (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1248 if ((y = root->b_left) == NULL)
1249 break;
1250 if (lblkno < y->b_lblkno) {
1251 /* Rotate right. */
1252 root->b_left = y->b_right;
1253 y->b_right = root;
1254 root = y;
1255 if ((y = root->b_left) == NULL)
1256 break;
1257 }
1258 /* Link into the new root's right tree. */
1259 righttreemin->b_left = root;
1260 righttreemin = root;
1261 } else if (lblkno > root->b_lblkno ||
1262 (lblkno == root->b_lblkno &&
1263 (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1264 if ((y = root->b_right) == NULL)
1265 break;
1266 if (lblkno > y->b_lblkno) {
1267 /* Rotate left. */
1268 root->b_right = y->b_left;
1269 y->b_left = root;
1270 root = y;
1271 if ((y = root->b_right) == NULL)
1272 break;
1273 }
1274 /* Link into the new root's left tree. */
1275 lefttreemax->b_right = root;
1276 lefttreemax = root;
1277 } else {
1278 break;
1279 }
1280 root = y;
1281 }
1282 /* Assemble the new root. */
1283 lefttreemax->b_right = root->b_left;
1284 righttreemin->b_left = root->b_right;
1285 root->b_left = dummy.b_right;
1286 root->b_right = dummy.b_left;
1287 return (root);
1288 }
1289
1290 static
1291 void
1292 buf_vlist_remove(struct buf *bp)
1293 {
1294 struct vnode *vp = bp->b_vp;
1295 struct buf *root;
1296
1297 ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
1298 if (bp->b_xflags & BX_VNDIRTY) {
1299 if (bp != vp->v_dirtyblkroot) {
1300 root = buf_splay(bp->b_lblkno, bp->b_xflags,
1301 vp->v_dirtyblkroot);
1302 KASSERT(root == bp,
1303 ("splay lookup failed during dirty remove"));
1304 }
1305 if (bp->b_left == NULL) {
1306 root = bp->b_right;
1307 } else {
1308 root = buf_splay(bp->b_lblkno, bp->b_xflags,
1309 bp->b_left);
1310 root->b_right = bp->b_right;
1311 }
1312 vp->v_dirtyblkroot = root;
1313 TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
1314 vp->v_dirtybufcnt--;
1315 } else {
1316 /* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
1317 if (bp != vp->v_cleanblkroot) {
1318 root = buf_splay(bp->b_lblkno, bp->b_xflags,
1319 vp->v_cleanblkroot);
1320 KASSERT(root == bp,
1321 ("splay lookup failed during clean remove"));
1322 }
1323 if (bp->b_left == NULL) {
1324 root = bp->b_right;
1325 } else {
1326 root = buf_splay(bp->b_lblkno, bp->b_xflags,
1327 bp->b_left);
1328 root->b_right = bp->b_right;
1329 }
1330 vp->v_cleanblkroot = root;
1331 TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
1332 vp->v_cleanbufcnt--;
1333 }
1334 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1335 }
1336
1337 /*
1338 * Add the buffer to the sorted clean or dirty block list using a
1339 * splay tree algorithm.
1340 *
1341 * NOTE: xflags is passed as a constant, optimizing this inline function!
1342 */
1343 static
1344 void
1345 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
1346 {
1347 struct buf *root;
1348
1349 ASSERT_VI_LOCKED(vp, "buf_vlist_add");
1350 bp->b_xflags |= xflags;
1351 if (xflags & BX_VNDIRTY) {
1352 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1353 if (root == NULL) {
1354 bp->b_left = NULL;
1355 bp->b_right = NULL;
1356 TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
1357 } else if (bp->b_lblkno < root->b_lblkno ||
1358 (bp->b_lblkno == root->b_lblkno &&
1359 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1360 bp->b_left = root->b_left;
1361 bp->b_right = root;
1362 root->b_left = NULL;
1363 TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1364 } else {
1365 bp->b_right = root->b_right;
1366 bp->b_left = root;
1367 root->b_right = NULL;
1368 TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
1369 root, bp, b_vnbufs);
1370 }
1371 vp->v_dirtybufcnt++;
1372 vp->v_dirtyblkroot = bp;
1373 } else {
1374 /* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
1375 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1376 if (root == NULL) {
1377 bp->b_left = NULL;
1378 bp->b_right = NULL;
1379 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
1380 } else if (bp->b_lblkno < root->b_lblkno ||
1381 (bp->b_lblkno == root->b_lblkno &&
1382 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1383 bp->b_left = root->b_left;
1384 bp->b_right = root;
1385 root->b_left = NULL;
1386 TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1387 } else {
1388 bp->b_right = root->b_right;
1389 bp->b_left = root;
1390 root->b_right = NULL;
1391 TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
1392 root, bp, b_vnbufs);
1393 }
1394 vp->v_cleanbufcnt++;
1395 vp->v_cleanblkroot = bp;
1396 }
1397 }
1398
1399 /*
1400 * Lookup a buffer using the splay tree. Note that we specifically avoid
1401 * shadow buffers used in background bitmap writes.
1402 *
1403 * This code isn't quite efficient as it could be because we are maintaining
1404 * two sorted lists and do not know which list the block resides in.
1405 *
1406 * During a "make buildworld" the desired buffer is found at one of
1407 * the roots more than 60% of the time. Thus, checking both roots
1408 * before performing either splay eliminates unnecessary splays on the
1409 * first tree splayed.
1410 */
1411 struct buf *
1412 gbincore(struct vnode *vp, daddr_t lblkno)
1413 {
1414 struct buf *bp;
1415
1416 GIANT_REQUIRED;
1417
1418 ASSERT_VI_LOCKED(vp, "gbincore");
1419 if ((bp = vp->v_cleanblkroot) != NULL &&
1420 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1421 return (bp);
1422 if ((bp = vp->v_dirtyblkroot) != NULL &&
1423 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1424 return (bp);
1425 if ((bp = vp->v_cleanblkroot) != NULL) {
1426 vp->v_cleanblkroot = bp = buf_splay(lblkno, 0, bp);
1427 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1428 return (bp);
1429 }
1430 if ((bp = vp->v_dirtyblkroot) != NULL) {
1431 vp->v_dirtyblkroot = bp = buf_splay(lblkno, 0, bp);
1432 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1433 return (bp);
1434 }
1435 return (NULL);
1436 }
1437
1438 /*
1439 * Associate a buffer with a vnode.
1440 */
1441 void
1442 bgetvp(vp, bp)
1443 register struct vnode *vp;
1444 register struct buf *bp;
1445 {
1446
1447 KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1448
1449 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1450 ("bgetvp: bp already attached! %p", bp));
1451
1452 ASSERT_VI_LOCKED(vp, "bgetvp");
1453 vholdl(vp);
1454 bp->b_vp = vp;
1455 bp->b_dev = vn_todev(vp);
1456 /*
1457 * Insert onto list for new vnode.
1458 */
1459 buf_vlist_add(bp, vp, BX_VNCLEAN);
1460 }
1461
1462 /*
1463 * Disassociate a buffer from a vnode.
1464 */
1465 void
1466 brelvp(bp)
1467 register struct buf *bp;
1468 {
1469 struct vnode *vp;
1470
1471 KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1472
1473 /*
1474 * Delete from old vnode list, if on one.
1475 */
1476 vp = bp->b_vp;
1477 VI_LOCK(vp);
1478 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1479 buf_vlist_remove(bp);
1480 if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1481 vp->v_iflag &= ~VI_ONWORKLST;
1482 mtx_lock(&sync_mtx);
1483 LIST_REMOVE(vp, v_synclist);
1484 syncer_worklist_len--;
1485 mtx_unlock(&sync_mtx);
1486 }
1487 vdropl(vp);
1488 bp->b_vp = (struct vnode *) 0;
1489 if (bp->b_object)
1490 bp->b_object = NULL;
1491 VI_UNLOCK(vp);
1492 }
1493
1494 /*
1495 * Add an item to the syncer work queue.
1496 */
1497 static void
1498 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
1499 {
1500 int slot;
1501
1502 ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
1503
1504 mtx_lock(&sync_mtx);
1505 if (vp->v_iflag & VI_ONWORKLST)
1506 LIST_REMOVE(vp, v_synclist);
1507 else {
1508 vp->v_iflag |= VI_ONWORKLST;
1509 syncer_worklist_len++;
1510 }
1511
1512 if (delay > syncer_maxdelay - 2)
1513 delay = syncer_maxdelay - 2;
1514 slot = (syncer_delayno + delay) & syncer_mask;
1515
1516 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
1517 mtx_unlock(&sync_mtx);
1518 }
1519
1520 static int
1521 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
1522 {
1523 int error, len;
1524
1525 mtx_lock(&sync_mtx);
1526 len = syncer_worklist_len - sync_vnode_count;
1527 mtx_unlock(&sync_mtx);
1528 error = SYSCTL_OUT(req, &len, sizeof(len));
1529 return (error);
1530 }
1531
1532 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
1533 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
1534
1535 struct proc *updateproc;
1536 static void sched_sync(void);
1537 static struct kproc_desc up_kp = {
1538 "syncer",
1539 sched_sync,
1540 &updateproc
1541 };
1542 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1543
1544 /*
1545 * System filesystem synchronizer daemon.
1546 */
1547 static void
1548 sched_sync(void)
1549 {
1550 struct synclist *next;
1551 struct synclist *slp;
1552 struct vnode *vp;
1553 struct mount *mp;
1554 long starttime;
1555 struct thread *td = FIRST_THREAD_IN_PROC(updateproc);
1556 static int dummychan;
1557 int last_work_seen;
1558 int net_worklist_len;
1559 int syncer_final_iter;
1560 int first_printf;
1561
1562 mtx_lock(&Giant);
1563 last_work_seen = 0;
1564 syncer_final_iter = 0;
1565 first_printf = 1;
1566 syncer_state = SYNCER_RUNNING;
1567 starttime = time_second;
1568
1569 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1570 SHUTDOWN_PRI_LAST);
1571
1572 for (;;) {
1573 mtx_lock(&sync_mtx);
1574 if (syncer_state == SYNCER_FINAL_DELAY &&
1575 syncer_final_iter == 0) {
1576 mtx_unlock(&sync_mtx);
1577 kthread_suspend_check(td->td_proc);
1578 mtx_lock(&sync_mtx);
1579 }
1580 net_worklist_len = syncer_worklist_len - sync_vnode_count;
1581 if (syncer_state != SYNCER_RUNNING &&
1582 starttime != time_second) {
1583 if (first_printf) {
1584 printf("\nSyncing disks, vnodes remaining...");
1585 first_printf = 0;
1586 }
1587 printf("%d ", net_worklist_len);
1588 }
1589 starttime = time_second;
1590
1591 /*
1592 * Push files whose dirty time has expired. Be careful
1593 * of interrupt race on slp queue.
1594 *
1595 * Skip over empty worklist slots when shutting down.
1596 */
1597 do {
1598 slp = &syncer_workitem_pending[syncer_delayno];
1599 syncer_delayno += 1;
1600 if (syncer_delayno == syncer_maxdelay)
1601 syncer_delayno = 0;
1602 next = &syncer_workitem_pending[syncer_delayno];
1603 /*
1604 * If the worklist has wrapped since the
1605 * it was emptied of all but syncer vnodes,
1606 * switch to the FINAL_DELAY state and run
1607 * for one more second.
1608 */
1609 if (syncer_state == SYNCER_SHUTTING_DOWN &&
1610 net_worklist_len == 0 &&
1611 last_work_seen == syncer_delayno) {
1612 syncer_state = SYNCER_FINAL_DELAY;
1613 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
1614 }
1615 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
1616 syncer_worklist_len > 0);
1617
1618 /*
1619 * Keep track of the last time there was anything
1620 * on the worklist other than syncer vnodes.
1621 * Return to the SHUTTING_DOWN state if any
1622 * new work appears.
1623 */
1624 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
1625 last_work_seen = syncer_delayno;
1626 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
1627 syncer_state = SYNCER_SHUTTING_DOWN;
1628 while ((vp = LIST_FIRST(slp)) != NULL) {
1629 if (VOP_ISLOCKED(vp, NULL) != 0 ||
1630 vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1631 LIST_REMOVE(vp, v_synclist);
1632 LIST_INSERT_HEAD(next, vp, v_synclist);
1633 continue;
1634 }
1635 if (VI_TRYLOCK(vp) == 0) {
1636 LIST_REMOVE(vp, v_synclist);
1637 LIST_INSERT_HEAD(next, vp, v_synclist);
1638 vn_finished_write(mp);
1639 continue;
1640 }
1641 /*
1642 * We use vhold in case the vnode does not
1643 * successfully sync. vhold prevents the vnode from
1644 * going away when we unlock the sync_mtx so that
1645 * we can acquire the vnode interlock.
1646 */
1647 vholdl(vp);
1648 mtx_unlock(&sync_mtx);
1649 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td);
1650 (void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1651 VOP_UNLOCK(vp, 0, td);
1652 vn_finished_write(mp);
1653 VI_LOCK(vp);
1654 if ((vp->v_iflag & VI_ONWORKLST) != 0) {
1655 /*
1656 * Put us back on the worklist. The worklist
1657 * routine will remove us from our current
1658 * position and then add us back in at a later
1659 * position.
1660 */
1661 vn_syncer_add_to_worklist(vp, syncdelay);
1662 }
1663 vdropl(vp);
1664 VI_UNLOCK(vp);
1665 mtx_lock(&sync_mtx);
1666 }
1667 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
1668 syncer_final_iter--;
1669 mtx_unlock(&sync_mtx);
1670
1671 /*
1672 * Do soft update processing.
1673 */
1674 if (softdep_process_worklist_hook != NULL)
1675 (*softdep_process_worklist_hook)(NULL);
1676
1677 /*
1678 * The variable rushjob allows the kernel to speed up the
1679 * processing of the filesystem syncer process. A rushjob
1680 * value of N tells the filesystem syncer to process the next
1681 * N seconds worth of work on its queue ASAP. Currently rushjob
1682 * is used by the soft update code to speed up the filesystem
1683 * syncer process when the incore state is getting so far
1684 * ahead of the disk that the kernel memory pool is being
1685 * threatened with exhaustion.
1686 */
1687 mtx_lock(&sync_mtx);
1688 if (rushjob > 0) {
1689 rushjob -= 1;
1690 mtx_unlock(&sync_mtx);
1691 continue;
1692 }
1693 mtx_unlock(&sync_mtx);
1694 /*
1695 * Just sleep for a short period if time between
1696 * iterations when shutting down to allow some I/O
1697 * to happen.
1698 *
1699 * If it has taken us less than a second to process the
1700 * current work, then wait. Otherwise start right over
1701 * again. We can still lose time if any single round
1702 * takes more than two seconds, but it does not really
1703 * matter as we are just trying to generally pace the
1704 * filesystem activity.
1705 */
1706 if (syncer_state != SYNCER_RUNNING)
1707 tsleep(&dummychan, PPAUSE, "syncfnl",
1708 hz / SYNCER_SHUTDOWN_SPEEDUP);
1709 else if (time_second == starttime)
1710 tsleep(&lbolt, PPAUSE, "syncer", 0);
1711 }
1712 }
1713
1714 /*
1715 * Request the syncer daemon to speed up its work.
1716 * We never push it to speed up more than half of its
1717 * normal turn time, otherwise it could take over the cpu.
1718 */
1719 int
1720 speedup_syncer()
1721 {
1722 struct thread *td;
1723 int ret = 0;
1724
1725 td = FIRST_THREAD_IN_PROC(updateproc);
1726 sleepq_remove(td, &lbolt);
1727 mtx_lock(&sync_mtx);
1728 if (rushjob < syncdelay / 2) {
1729 rushjob += 1;
1730 stat_rush_requests += 1;
1731 ret = 1;
1732 }
1733 mtx_unlock(&sync_mtx);
1734 return (ret);
1735 }
1736
1737 /*
1738 * Tell the syncer to speed up its work and run though its work
1739 * list several times, then tell it to shut down.
1740 */
1741 static void
1742 syncer_shutdown(void *arg, int howto)
1743 {
1744 struct thread *td;
1745
1746 if (howto & RB_NOSYNC)
1747 return;
1748 td = FIRST_THREAD_IN_PROC(updateproc);
1749 sleepq_remove(td, &lbolt);
1750 mtx_lock(&sync_mtx);
1751 syncer_state = SYNCER_SHUTTING_DOWN;
1752 rushjob = 0;
1753 mtx_unlock(&sync_mtx);
1754 kproc_shutdown(arg, howto);
1755 }
1756
1757 /*
1758 * Associate a p-buffer with a vnode.
1759 *
1760 * Also sets B_PAGING flag to indicate that vnode is not fully associated
1761 * with the buffer. i.e. the bp has not been linked into the vnode or
1762 * ref-counted.
1763 */
1764 void
1765 pbgetvp(vp, bp)
1766 register struct vnode *vp;
1767 register struct buf *bp;
1768 {
1769
1770 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1771
1772 bp->b_vp = vp;
1773 bp->b_object = vp->v_object;
1774 bp->b_flags |= B_PAGING;
1775 bp->b_dev = vn_todev(vp);
1776 }
1777
1778 /*
1779 * Disassociate a p-buffer from a vnode.
1780 */
1781 void
1782 pbrelvp(bp)
1783 register struct buf *bp;
1784 {
1785
1786 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1787
1788 /* XXX REMOVE ME */
1789 VI_LOCK(bp->b_vp);
1790 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1791 panic(
1792 "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1793 bp,
1794 (int)bp->b_flags
1795 );
1796 }
1797 VI_UNLOCK(bp->b_vp);
1798 bp->b_vp = (struct vnode *) 0;
1799 bp->b_object = NULL;
1800 bp->b_flags &= ~B_PAGING;
1801 }
1802
1803 /*
1804 * Reassign a buffer from one vnode to another.
1805 * Used to assign file specific control information
1806 * (indirect blocks) to the vnode to which they belong.
1807 */
1808 void
1809 reassignbuf(struct buf *bp)
1810 {
1811 struct vnode *vp;
1812 int delay;
1813
1814 vp = bp->b_vp;
1815 ++reassignbufcalls;
1816
1817 /*
1818 * B_PAGING flagged buffers cannot be reassigned because their vp
1819 * is not fully linked in.
1820 */
1821 if (bp->b_flags & B_PAGING)
1822 panic("cannot reassign paging buffer");
1823
1824 /*
1825 * Delete from old vnode list, if on one.
1826 */
1827 VI_LOCK(vp);
1828 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1829 buf_vlist_remove(bp);
1830 /*
1831 * If dirty, put on list of dirty buffers; otherwise insert onto list
1832 * of clean buffers.
1833 */
1834 if (bp->b_flags & B_DELWRI) {
1835 if ((vp->v_iflag & VI_ONWORKLST) == 0) {
1836 switch (vp->v_type) {
1837 case VDIR:
1838 delay = dirdelay;
1839 break;
1840 case VCHR:
1841 delay = metadelay;
1842 break;
1843 default:
1844 delay = filedelay;
1845 }
1846 vn_syncer_add_to_worklist(vp, delay);
1847 }
1848 buf_vlist_add(bp, vp, BX_VNDIRTY);
1849 } else {
1850 buf_vlist_add(bp, vp, BX_VNCLEAN);
1851
1852 if ((vp->v_iflag & VI_ONWORKLST) &&
1853 TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1854 mtx_lock(&sync_mtx);
1855 LIST_REMOVE(vp, v_synclist);
1856 syncer_worklist_len--;
1857 mtx_unlock(&sync_mtx);
1858 vp->v_iflag &= ~VI_ONWORKLST;
1859 }
1860 }
1861 VI_UNLOCK(vp);
1862 }
1863
1864 /*
1865 * Create a vnode for a device.
1866 * Used for mounting the root filesystem.
1867 */
1868 int
1869 bdevvp(dev, vpp)
1870 struct cdev *dev;
1871 struct vnode **vpp;
1872 {
1873 register struct vnode *vp;
1874 struct vnode *nvp;
1875 int error;
1876
1877 if (dev == NULL) {
1878 *vpp = NULLVP;
1879 return (ENXIO);
1880 }
1881 if (vfinddev(dev, vpp))
1882 return (0);
1883
1884 error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
1885 if (error) {
1886 *vpp = NULLVP;
1887 return (error);
1888 }
1889 vp = nvp;
1890 vp->v_type = VCHR;
1891 vp->v_bsize = DEV_BSIZE;
1892 addalias(vp, dev);
1893 *vpp = vp;
1894 return (0);
1895 }
1896
1897 static void
1898 v_incr_usecount(struct vnode *vp, int delta)
1899 {
1900
1901 vp->v_usecount += delta;
1902 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1903 mtx_lock(&spechash_mtx);
1904 vp->v_rdev->si_usecount += delta;
1905 mtx_unlock(&spechash_mtx);
1906 }
1907 }
1908
1909 /*
1910 * Add vnode to the alias list hung off the struct cdev *.
1911 *
1912 * The reason for this gunk is that multiple vnodes can reference
1913 * the same physical device, so checking vp->v_usecount to see
1914 * how many users there are is inadequate; the v_usecount for
1915 * the vnodes need to be accumulated. vcount() does that.
1916 */
1917 struct vnode *
1918 addaliasu(nvp, nvp_rdev)
1919 struct vnode *nvp;
1920 dev_t nvp_rdev;
1921 {
1922 struct vnode *ovp;
1923 vop_t **ops;
1924 struct cdev *dev;
1925
1926 if (nvp->v_type == VBLK)
1927 return (nvp);
1928 if (nvp->v_type != VCHR)
1929 panic("addaliasu on non-special vnode");
1930 dev = findcdev(nvp_rdev);
1931 if (dev == NULL)
1932 return (nvp);
1933 /*
1934 * Check to see if we have a bdevvp vnode with no associated
1935 * filesystem. If so, we want to associate the filesystem of
1936 * the new newly instigated vnode with the bdevvp vnode and
1937 * discard the newly created vnode rather than leaving the
1938 * bdevvp vnode lying around with no associated filesystem.
1939 */
1940 if (vfinddev(dev, &ovp) == 0 || ovp->v_data != NULL) {
1941 addalias(nvp, dev);
1942 return (nvp);
1943 }
1944 /*
1945 * Discard unneeded vnode, but save its node specific data.
1946 * Note that if there is a lock, it is carried over in the
1947 * node specific data to the replacement vnode.
1948 */
1949 vref(ovp);
1950 ovp->v_data = nvp->v_data;
1951 ovp->v_tag = nvp->v_tag;
1952 nvp->v_data = NULL;
1953 lockdestroy(ovp->v_vnlock);
1954 lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
1955 nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
1956 ops = ovp->v_op;
1957 ovp->v_op = nvp->v_op;
1958 if (VOP_ISLOCKED(nvp, curthread)) {
1959 VOP_UNLOCK(nvp, 0, curthread);
1960 vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
1961 }
1962 nvp->v_op = ops;
1963 delmntque(ovp);
1964 insmntque(ovp, nvp->v_mount);
1965 vrele(nvp);
1966 vgone(nvp);
1967 return (ovp);
1968 }
1969
1970 /* This is a local helper function that do the same as addaliasu, but for a
1971 * struct cdev *instead of an dev_t. */
1972 static void
1973 addalias(nvp, dev)
1974 struct vnode *nvp;
1975 struct cdev *dev;
1976 {
1977
1978 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
1979 dev_ref(dev);
1980 nvp->v_rdev = dev;
1981 VI_LOCK(nvp);
1982 mtx_lock(&spechash_mtx);
1983 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1984 dev->si_usecount += nvp->v_usecount;
1985 mtx_unlock(&spechash_mtx);
1986 VI_UNLOCK(nvp);
1987 }
1988
1989 /*
1990 * Grab a particular vnode from the free list, increment its
1991 * reference count and lock it. The vnode lock bit is set if the
1992 * vnode is being eliminated in vgone. The process is awakened
1993 * when the transition is completed, and an error returned to
1994 * indicate that the vnode is no longer usable (possibly having
1995 * been changed to a new filesystem type).
1996 */
1997 int
1998 vget(vp, flags, td)
1999 register struct vnode *vp;
2000 int flags;
2001 struct thread *td;
2002 {
2003 int error;
2004
2005 /*
2006 * If the vnode is in the process of being cleaned out for
2007 * another use, we wait for the cleaning to finish and then
2008 * return failure. Cleaning is determined by checking that
2009 * the VI_XLOCK flag is set.
2010 */
2011 if ((flags & LK_INTERLOCK) == 0)
2012 VI_LOCK(vp);
2013 if (vp->v_iflag & VI_XLOCK && vp->v_vxthread != curthread) {
2014 if ((flags & LK_NOWAIT) == 0) {
2015 vp->v_iflag |= VI_XWANT;
2016 msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
2017 return (ENOENT);
2018 }
2019 VI_UNLOCK(vp);
2020 return (EBUSY);
2021 }
2022
2023 v_incr_usecount(vp, 1);
2024
2025 if (VSHOULDBUSY(vp))
2026 vbusy(vp);
2027 if (flags & LK_TYPE_MASK) {
2028 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
2029 /*
2030 * must expand vrele here because we do not want
2031 * to call VOP_INACTIVE if the reference count
2032 * drops back to zero since it was never really
2033 * active. We must remove it from the free list
2034 * before sleeping so that multiple processes do
2035 * not try to recycle it.
2036 */
2037 VI_LOCK(vp);
2038 v_incr_usecount(vp, -1);
2039 if (VSHOULDFREE(vp))
2040 vfree(vp);
2041 else
2042 vlruvp(vp);
2043 VI_UNLOCK(vp);
2044 }
2045 return (error);
2046 }
2047 VI_UNLOCK(vp);
2048 return (0);
2049 }
2050
2051 /*
2052 * Increase the reference count of a vnode.
2053 */
2054 void
2055 vref(struct vnode *vp)
2056 {
2057
2058 VI_LOCK(vp);
2059 v_incr_usecount(vp, 1);
2060 VI_UNLOCK(vp);
2061 }
2062
2063 /*
2064 * Return reference count of a vnode.
2065 *
2066 * The results of this call are only guaranteed when some mechanism other
2067 * than the VI lock is used to stop other processes from gaining references
2068 * to the vnode. This may be the case if the caller holds the only reference.
2069 * This is also useful when stale data is acceptable as race conditions may
2070 * be accounted for by some other means.
2071 */
2072 int
2073 vrefcnt(struct vnode *vp)
2074 {
2075 int usecnt;
2076
2077 VI_LOCK(vp);
2078 usecnt = vp->v_usecount;
2079 VI_UNLOCK(vp);
2080
2081 return (usecnt);
2082 }
2083
2084
2085 /*
2086 * Vnode put/release.
2087 * If count drops to zero, call inactive routine and return to freelist.
2088 */
2089 void
2090 vrele(vp)
2091 struct vnode *vp;
2092 {
2093 struct thread *td = curthread; /* XXX */
2094
2095 GIANT_REQUIRED;
2096
2097 KASSERT(vp != NULL, ("vrele: null vp"));
2098
2099 VI_LOCK(vp);
2100
2101 /* Skip this v_writecount check if we're going to panic below. */
2102 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2103 ("vrele: missed vn_close"));
2104
2105 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2106 vp->v_usecount == 1)) {
2107 v_incr_usecount(vp, -1);
2108 VI_UNLOCK(vp);
2109
2110 return;
2111 }
2112
2113 if (vp->v_usecount == 1) {
2114 v_incr_usecount(vp, -1);
2115 /*
2116 * We must call VOP_INACTIVE with the node locked. Mark
2117 * as VI_DOINGINACT to avoid recursion.
2118 */
2119 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2120 VI_LOCK(vp);
2121 vp->v_iflag |= VI_DOINGINACT;
2122 VI_UNLOCK(vp);
2123 VOP_INACTIVE(vp, td);
2124 VI_LOCK(vp);
2125 KASSERT(vp->v_iflag & VI_DOINGINACT,
2126 ("vrele: lost VI_DOINGINACT"));
2127 vp->v_iflag &= ~VI_DOINGINACT;
2128 } else
2129 VI_LOCK(vp);
2130 if (VSHOULDFREE(vp))
2131 vfree(vp);
2132 else
2133 vlruvp(vp);
2134 VI_UNLOCK(vp);
2135
2136 } else {
2137 #ifdef DIAGNOSTIC
2138 vprint("vrele: negative ref count", vp);
2139 #endif
2140 VI_UNLOCK(vp);
2141 panic("vrele: negative ref cnt");
2142 }
2143 }
2144
2145 /*
2146 * Release an already locked vnode. This give the same effects as
2147 * unlock+vrele(), but takes less time and avoids releasing and
2148 * re-aquiring the lock (as vrele() aquires the lock internally.)
2149 */
2150 void
2151 vput(vp)
2152 struct vnode *vp;
2153 {
2154 struct thread *td = curthread; /* XXX */
2155
2156 GIANT_REQUIRED;
2157
2158 KASSERT(vp != NULL, ("vput: null vp"));
2159 VI_LOCK(vp);
2160 /* Skip this v_writecount check if we're going to panic below. */
2161 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2162 ("vput: missed vn_close"));
2163
2164 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2165 vp->v_usecount == 1)) {
2166 v_incr_usecount(vp, -1);
2167 VOP_UNLOCK(vp, LK_INTERLOCK, td);
2168 return;
2169 }
2170
2171 if (vp->v_usecount == 1) {
2172 v_incr_usecount(vp, -1);
2173 /*
2174 * We must call VOP_INACTIVE with the node locked, so
2175 * we just need to release the vnode mutex. Mark as
2176 * as VI_DOINGINACT to avoid recursion.
2177 */
2178 vp->v_iflag |= VI_DOINGINACT;
2179 VI_UNLOCK(vp);
2180 VOP_INACTIVE(vp, td);
2181 VI_LOCK(vp);
2182 KASSERT(vp->v_iflag & VI_DOINGINACT,
2183 ("vput: lost VI_DOINGINACT"));
2184 vp->v_iflag &= ~VI_DOINGINACT;
2185 if (VSHOULDFREE(vp))
2186 vfree(vp);
2187 else
2188 vlruvp(vp);
2189 VI_UNLOCK(vp);
2190
2191 } else {
2192 #ifdef DIAGNOSTIC
2193 vprint("vput: negative ref count", vp);
2194 #endif
2195 panic("vput: negative ref cnt");
2196 }
2197 }
2198
2199 /*
2200 * Somebody doesn't want the vnode recycled.
2201 */
2202 void
2203 vhold(struct vnode *vp)
2204 {
2205
2206 VI_LOCK(vp);
2207 vholdl(vp);
2208 VI_UNLOCK(vp);
2209 }
2210
2211 void
2212 vholdl(vp)
2213 register struct vnode *vp;
2214 {
2215
2216 vp->v_holdcnt++;
2217 if (VSHOULDBUSY(vp))
2218 vbusy(vp);
2219 }
2220
2221 /*
2222 * Note that there is one less who cares about this vnode. vdrop() is the
2223 * opposite of vhold().
2224 */
2225 void
2226 vdrop(struct vnode *vp)
2227 {
2228
2229 VI_LOCK(vp);
2230 vdropl(vp);
2231 VI_UNLOCK(vp);
2232 }
2233
2234 void
2235 vdropl(vp)
2236 register struct vnode *vp;
2237 {
2238
2239 if (vp->v_holdcnt <= 0)
2240 panic("vdrop: holdcnt");
2241 vp->v_holdcnt--;
2242 if (VSHOULDFREE(vp))
2243 vfree(vp);
2244 else
2245 vlruvp(vp);
2246 }
2247
2248 /*
2249 * Remove any vnodes in the vnode table belonging to mount point mp.
2250 *
2251 * If FORCECLOSE is not specified, there should not be any active ones,
2252 * return error if any are found (nb: this is a user error, not a
2253 * system error). If FORCECLOSE is specified, detach any active vnodes
2254 * that are found.
2255 *
2256 * If WRITECLOSE is set, only flush out regular file vnodes open for
2257 * writing.
2258 *
2259 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2260 *
2261 * `rootrefs' specifies the base reference count for the root vnode
2262 * of this filesystem. The root vnode is considered busy if its
2263 * v_usecount exceeds this value. On a successful return, vflush(, td)
2264 * will call vrele() on the root vnode exactly rootrefs times.
2265 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2266 * be zero.
2267 */
2268 #ifdef DIAGNOSTIC
2269 static int busyprt = 0; /* print out busy vnodes */
2270 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2271 #endif
2272
2273 int
2274 vflush(mp, rootrefs, flags, td)
2275 struct mount *mp;
2276 int rootrefs;
2277 int flags;
2278 struct thread *td;
2279 {
2280 struct vnode *vp, *nvp, *rootvp = NULL;
2281 struct vattr vattr;
2282 int busy = 0, error;
2283
2284 if (rootrefs > 0) {
2285 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2286 ("vflush: bad args"));
2287 /*
2288 * Get the filesystem root vnode. We can vput() it
2289 * immediately, since with rootrefs > 0, it won't go away.
2290 */
2291 if ((error = VFS_ROOT(mp, &rootvp, td)) != 0)
2292 return (error);
2293 vput(rootvp);
2294
2295 }
2296 MNT_ILOCK(mp);
2297 loop:
2298 MNT_VNODE_FOREACH(vp, mp, nvp) {
2299
2300 VI_LOCK(vp);
2301 MNT_IUNLOCK(mp);
2302 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
2303 if (error) {
2304 MNT_ILOCK(mp);
2305 goto loop;
2306 }
2307 /*
2308 * Skip over a vnodes marked VV_SYSTEM.
2309 */
2310 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2311 VOP_UNLOCK(vp, 0, td);
2312 MNT_ILOCK(mp);
2313 continue;
2314 }
2315 /*
2316 * If WRITECLOSE is set, flush out unlinked but still open
2317 * files (even if open only for reading) and regular file
2318 * vnodes open for writing.
2319 */
2320 if (flags & WRITECLOSE) {
2321 error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2322 VI_LOCK(vp);
2323
2324 if ((vp->v_type == VNON ||
2325 (error == 0 && vattr.va_nlink > 0)) &&
2326 (vp->v_writecount == 0 || vp->v_type != VREG)) {
2327 VOP_UNLOCK(vp, LK_INTERLOCK, td);
2328 MNT_ILOCK(mp);
2329 continue;
2330 }
2331 } else
2332 VI_LOCK(vp);
2333
2334 VOP_UNLOCK(vp, 0, td);
2335
2336 /*
2337 * With v_usecount == 0, all we need to do is clear out the
2338 * vnode data structures and we are done.
2339 */
2340 if (vp->v_usecount == 0) {
2341 vgonel(vp, td);
2342 MNT_ILOCK(mp);
2343 continue;
2344 }
2345
2346 /*
2347 * If FORCECLOSE is set, forcibly close the vnode. For block
2348 * or character devices, revert to an anonymous device. For
2349 * all other files, just kill them.
2350 */
2351 if (flags & FORCECLOSE) {
2352 if (vp->v_type != VCHR)
2353 vgonel(vp, td);
2354 else
2355 vgonechrl(vp, td);
2356 MNT_ILOCK(mp);
2357 continue;
2358 }
2359 #ifdef DIAGNOSTIC
2360 if (busyprt)
2361 vprint("vflush: busy vnode", vp);
2362 #endif
2363 VI_UNLOCK(vp);
2364 MNT_ILOCK(mp);
2365 busy++;
2366 }
2367 MNT_IUNLOCK(mp);
2368 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2369 /*
2370 * If just the root vnode is busy, and if its refcount
2371 * is equal to `rootrefs', then go ahead and kill it.
2372 */
2373 VI_LOCK(rootvp);
2374 KASSERT(busy > 0, ("vflush: not busy"));
2375 KASSERT(rootvp->v_usecount >= rootrefs,
2376 ("vflush: usecount %d < rootrefs %d",
2377 rootvp->v_usecount, rootrefs));
2378 if (busy == 1 && rootvp->v_usecount == rootrefs) {
2379 vgonel(rootvp, td);
2380 busy = 0;
2381 } else
2382 VI_UNLOCK(rootvp);
2383 }
2384 if (busy)
2385 return (EBUSY);
2386 for (; rootrefs > 0; rootrefs--)
2387 vrele(rootvp);
2388 return (0);
2389 }
2390
2391 /*
2392 * This moves a now (likely recyclable) vnode to the end of the
2393 * mountlist. XXX However, it is temporarily disabled until we
2394 * can clean up ffs_sync() and friends, which have loop restart
2395 * conditions which this code causes to operate O(N^2).
2396 */
2397 static void
2398 vlruvp(struct vnode *vp)
2399 {
2400 #if 0
2401 struct mount *mp;
2402
2403 if ((mp = vp->v_mount) != NULL) {
2404 MNT_ILOCK(mp);
2405 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2406 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2407 MNT_IUNLOCK(mp);
2408 }
2409 #endif
2410 }
2411
2412 static void
2413 vx_lock(struct vnode *vp)
2414 {
2415
2416 ASSERT_VI_LOCKED(vp, "vx_lock");
2417
2418 /*
2419 * Prevent the vnode from being recycled or brought into use while we
2420 * clean it out.
2421 */
2422 if (vp->v_iflag & VI_XLOCK)
2423 panic("vclean: deadlock");
2424 vp->v_iflag |= VI_XLOCK;
2425 vp->v_vxthread = curthread;
2426 }
2427
2428 static void
2429 vx_unlock(struct vnode *vp)
2430 {
2431 ASSERT_VI_LOCKED(vp, "vx_unlock");
2432 vp->v_iflag &= ~VI_XLOCK;
2433 vp->v_vxthread = NULL;
2434 if (vp->v_iflag & VI_XWANT) {
2435 vp->v_iflag &= ~VI_XWANT;
2436 wakeup(vp);
2437 }
2438 }
2439
2440 /*
2441 * Disassociate the underlying filesystem from a vnode.
2442 */
2443 static void
2444 vclean(vp, flags, td)
2445 struct vnode *vp;
2446 int flags;
2447 struct thread *td;
2448 {
2449 int active;
2450
2451 ASSERT_VI_LOCKED(vp, "vclean");
2452 /*
2453 * Check to see if the vnode is in use. If so we have to reference it
2454 * before we clean it out so that its count cannot fall to zero and
2455 * generate a race against ourselves to recycle it.
2456 */
2457 if ((active = vp->v_usecount))
2458 v_incr_usecount(vp, 1);
2459
2460 /*
2461 * Even if the count is zero, the VOP_INACTIVE routine may still
2462 * have the object locked while it cleans it out. The VOP_LOCK
2463 * ensures that the VOP_INACTIVE routine is done with its work.
2464 * For active vnodes, it ensures that no other activity can
2465 * occur while the underlying object is being cleaned out.
2466 */
2467 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2468
2469 /*
2470 * Clean out any buffers associated with the vnode.
2471 * If the flush fails, just toss the buffers.
2472 */
2473 if (flags & DOCLOSE) {
2474 struct buf *bp;
2475 bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
2476 if (bp != NULL)
2477 (void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2478 if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2479 vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2480 }
2481
2482 VOP_DESTROYVOBJECT(vp);
2483
2484 /*
2485 * Any other processes trying to obtain this lock must first
2486 * wait for VXLOCK to clear, then call the new lock operation.
2487 */
2488 VOP_UNLOCK(vp, 0, td);
2489
2490 /*
2491 * If purging an active vnode, it must be closed and
2492 * deactivated before being reclaimed. Note that the
2493 * VOP_INACTIVE will unlock the vnode.
2494 */
2495 if (active) {
2496 if (flags & DOCLOSE)
2497 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2498 VI_LOCK(vp);
2499 if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2500 vp->v_iflag |= VI_DOINGINACT;
2501 VI_UNLOCK(vp);
2502 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2503 panic("vclean: cannot relock.");
2504 VOP_INACTIVE(vp, td);
2505 VI_LOCK(vp);
2506 KASSERT(vp->v_iflag & VI_DOINGINACT,
2507 ("vclean: lost VI_DOINGINACT"));
2508 vp->v_iflag &= ~VI_DOINGINACT;
2509 }
2510 VI_UNLOCK(vp);
2511 }
2512 /*
2513 * Reclaim the vnode.
2514 */
2515 if (VOP_RECLAIM(vp, td))
2516 panic("vclean: cannot reclaim");
2517
2518 if (active) {
2519 /*
2520 * Inline copy of vrele() since VOP_INACTIVE
2521 * has already been called.
2522 */
2523 VI_LOCK(vp);
2524 v_incr_usecount(vp, -1);
2525 if (vp->v_usecount <= 0) {
2526 #ifdef INVARIANTS
2527 if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2528 vprint("vclean: bad ref count", vp);
2529 panic("vclean: ref cnt");
2530 }
2531 #endif
2532 if (VSHOULDFREE(vp))
2533 vfree(vp);
2534 }
2535 VI_UNLOCK(vp);
2536 }
2537 /*
2538 * Delete from old mount point vnode list.
2539 */
2540 delmntque(vp);
2541 cache_purge(vp);
2542 VI_LOCK(vp);
2543 if (VSHOULDFREE(vp))
2544 vfree(vp);
2545
2546 /*
2547 * Done with purge, reset to the standard lock and
2548 * notify sleepers of the grim news.
2549 */
2550 vp->v_vnlock = &vp->v_lock;
2551 vp->v_op = dead_vnodeop_p;
2552 if (vp->v_pollinfo != NULL)
2553 vn_pollgone(vp);
2554 vp->v_tag = "none";
2555 }
2556
2557 /*
2558 * Eliminate all activity associated with the requested vnode
2559 * and with all vnodes aliased to the requested vnode.
2560 */
2561 int
2562 vop_revoke(ap)
2563 struct vop_revoke_args /* {
2564 struct vnode *a_vp;
2565 int a_flags;
2566 } */ *ap;
2567 {
2568 struct vnode *vp, *vq;
2569 struct cdev *dev;
2570
2571 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2572 vp = ap->a_vp;
2573 KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
2574
2575 VI_LOCK(vp);
2576 /*
2577 * If a vgone (or vclean) is already in progress,
2578 * wait until it is done and return.
2579 */
2580 if (vp->v_iflag & VI_XLOCK) {
2581 vp->v_iflag |= VI_XWANT;
2582 msleep(vp, VI_MTX(vp), PINOD | PDROP,
2583 "vop_revokeall", 0);
2584 return (0);
2585 }
2586 VI_UNLOCK(vp);
2587 dev = vp->v_rdev;
2588 for (;;) {
2589 mtx_lock(&spechash_mtx);
2590 vq = SLIST_FIRST(&dev->si_hlist);
2591 mtx_unlock(&spechash_mtx);
2592 if (vq == NULL)
2593 break;
2594 vgone(vq);
2595 }
2596 return (0);
2597 }
2598
2599 /*
2600 * Recycle an unused vnode to the front of the free list.
2601 * Release the passed interlock if the vnode will be recycled.
2602 */
2603 int
2604 vrecycle(struct vnode *vp, void *dummyarg, struct thread *td)
2605 {
2606
2607 KASSERT(dummyarg == NULL,
2608 ("vrecycle with non-dummy arg %p", dummyarg));
2609 VI_LOCK(vp);
2610 if (vp->v_usecount == 0) {
2611 vgonel(vp, td);
2612 return (1);
2613 }
2614 VI_UNLOCK(vp);
2615 return (0);
2616 }
2617
2618 /*
2619 * Eliminate all activity associated with a vnode
2620 * in preparation for reuse.
2621 */
2622 void
2623 vgone(vp)
2624 register struct vnode *vp;
2625 {
2626 struct thread *td = curthread; /* XXX */
2627
2628 VI_LOCK(vp);
2629 vgonel(vp, td);
2630 }
2631
2632 /*
2633 * Disassociate a character device from the its underlying filesystem and
2634 * attach it to spec. This is for use when the chr device is still active
2635 * and the filesystem is going away.
2636 */
2637 static void
2638 vgonechrl(struct vnode *vp, struct thread *td)
2639 {
2640 ASSERT_VI_LOCKED(vp, "vgonechrl");
2641 vx_lock(vp);
2642 /*
2643 * This is a custom version of vclean() which does not tearm down
2644 * the bufs or vm objects held by this vnode. This allows filesystems
2645 * to continue using devices which were discovered via another
2646 * filesystem that has been unmounted.
2647 */
2648 if (vp->v_usecount != 0) {
2649 v_incr_usecount(vp, 1);
2650 /*
2651 * Ensure that no other activity can occur while the
2652 * underlying object is being cleaned out.
2653 */
2654 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2655 /*
2656 * Any other processes trying to obtain this lock must first
2657 * wait for VXLOCK to clear, then call the new lock operation.
2658 */
2659 VOP_UNLOCK(vp, 0, td);
2660 vp->v_vnlock = &vp->v_lock;
2661 vp->v_tag = "orphanchr";
2662 vp->v_op = spec_vnodeop_p;
2663 delmntque(vp);
2664 cache_purge(vp);
2665 vrele(vp);
2666 VI_LOCK(vp);
2667 } else
2668 vclean(vp, 0, td);
2669 vp->v_op = spec_vnodeop_p;
2670 vx_unlock(vp);
2671 VI_UNLOCK(vp);
2672 }
2673
2674 /*
2675 * vgone, with the vp interlock held.
2676 */
2677 void
2678 vgonel(vp, td)
2679 struct vnode *vp;
2680 struct thread *td;
2681 {
2682 /*
2683 * If a vgone (or vclean) is already in progress,
2684 * wait until it is done and return.
2685 */
2686 ASSERT_VI_LOCKED(vp, "vgonel");
2687 if (vp->v_iflag & VI_XLOCK) {
2688 vp->v_iflag |= VI_XWANT;
2689 msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2690 return;
2691 }
2692 vx_lock(vp);
2693
2694 /*
2695 * Clean out the filesystem specific data.
2696 */
2697 vclean(vp, DOCLOSE, td);
2698 VI_UNLOCK(vp);
2699
2700 /*
2701 * If special device, remove it from special device alias list
2702 * if it is on one.
2703 */
2704 VI_LOCK(vp);
2705 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2706 mtx_lock(&spechash_mtx);
2707 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
2708 vp->v_rdev->si_usecount -= vp->v_usecount;
2709 mtx_unlock(&spechash_mtx);
2710 dev_rel(vp->v_rdev);
2711 vp->v_rdev = NULL;
2712 }
2713
2714 /*
2715 * If it is on the freelist and not already at the head,
2716 * move it to the head of the list. The test of the
2717 * VDOOMED flag and the reference count of zero is because
2718 * it will be removed from the free list by getnewvnode,
2719 * but will not have its reference count incremented until
2720 * after calling vgone. If the reference count were
2721 * incremented first, vgone would (incorrectly) try to
2722 * close the previous instance of the underlying object.
2723 */
2724 if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2725 mtx_lock(&vnode_free_list_mtx);
2726 if (vp->v_iflag & VI_FREE) {
2727 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2728 } else {
2729 vp->v_iflag |= VI_FREE;
2730 freevnodes++;
2731 }
2732 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2733 mtx_unlock(&vnode_free_list_mtx);
2734 }
2735
2736 vp->v_type = VBAD;
2737 vx_unlock(vp);
2738 VI_UNLOCK(vp);
2739 }
2740
2741 /*
2742 * Lookup a vnode by device number.
2743 */
2744 int
2745 vfinddev(dev, vpp)
2746 struct cdev *dev;
2747 struct vnode **vpp;
2748 {
2749 struct vnode *vp;
2750
2751 mtx_lock(&spechash_mtx);
2752 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2753 *vpp = vp;
2754 mtx_unlock(&spechash_mtx);
2755 return (1);
2756 }
2757 mtx_unlock(&spechash_mtx);
2758 return (0);
2759 }
2760
2761 /*
2762 * Calculate the total number of references to a special device.
2763 */
2764 int
2765 vcount(vp)
2766 struct vnode *vp;
2767 {
2768 int count;
2769
2770 mtx_lock(&spechash_mtx);
2771 count = vp->v_rdev->si_usecount;
2772 mtx_unlock(&spechash_mtx);
2773 return (count);
2774 }
2775
2776 /*
2777 * Same as above, but using the struct cdev *as argument
2778 */
2779 int
2780 count_dev(dev)
2781 struct cdev *dev;
2782 {
2783 int count;
2784
2785 mtx_lock(&spechash_mtx);
2786 count = dev->si_usecount;
2787 mtx_unlock(&spechash_mtx);
2788 return(count);
2789 }
2790
2791 /*
2792 * Print out a description of a vnode.
2793 */
2794 static char *typename[] =
2795 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2796
2797 void
2798 vprint(label, vp)
2799 char *label;
2800 struct vnode *vp;
2801 {
2802 char buf[96];
2803
2804 if (label != NULL)
2805 printf("%s: %p: ", label, (void *)vp);
2806 else
2807 printf("%p: ", (void *)vp);
2808 printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2809 vp->v_tag, typename[vp->v_type], vp->v_usecount,
2810 vp->v_writecount, vp->v_holdcnt);
2811 buf[0] = '\0';
2812 if (vp->v_vflag & VV_ROOT)
2813 strcat(buf, "|VV_ROOT");
2814 if (vp->v_vflag & VV_TEXT)
2815 strcat(buf, "|VV_TEXT");
2816 if (vp->v_vflag & VV_SYSTEM)
2817 strcat(buf, "|VV_SYSTEM");
2818 if (vp->v_iflag & VI_XLOCK)
2819 strcat(buf, "|VI_XLOCK");
2820 if (vp->v_iflag & VI_XWANT)
2821 strcat(buf, "|VI_XWANT");
2822 if (vp->v_iflag & VI_BWAIT)
2823 strcat(buf, "|VI_BWAIT");
2824 if (vp->v_iflag & VI_DOOMED)
2825 strcat(buf, "|VI_DOOMED");
2826 if (vp->v_iflag & VI_FREE)
2827 strcat(buf, "|VI_FREE");
2828 if (vp->v_vflag & VV_OBJBUF)
2829 strcat(buf, "|VV_OBJBUF");
2830 if (buf[0] != '\0')
2831 printf(" flags (%s),", &buf[1]);
2832 lockmgr_printinfo(vp->v_vnlock);
2833 printf("\n");
2834 if (vp->v_data != NULL)
2835 VOP_PRINT(vp);
2836 }
2837
2838 #ifdef DDB
2839 #include <ddb/ddb.h>
2840 /*
2841 * List all of the locked vnodes in the system.
2842 * Called when debugging the kernel.
2843 */
2844 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2845 {
2846 struct mount *mp, *nmp;
2847 struct vnode *vp;
2848
2849 /*
2850 * Note: because this is DDB, we can't obey the locking semantics
2851 * for these structures, which means we could catch an inconsistent
2852 * state and dereference a nasty pointer. Not much to be done
2853 * about that.
2854 */
2855 printf("Locked vnodes\n");
2856 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2857 nmp = TAILQ_NEXT(mp, mnt_list);
2858 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2859 if (VOP_ISLOCKED(vp, NULL))
2860 vprint(NULL, vp);
2861 }
2862 nmp = TAILQ_NEXT(mp, mnt_list);
2863 }
2864 }
2865 #endif
2866
2867 /*
2868 * Fill in a struct xvfsconf based on a struct vfsconf.
2869 */
2870 static void
2871 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2872 {
2873
2874 strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2875 xvfsp->vfc_typenum = vfsp->vfc_typenum;
2876 xvfsp->vfc_refcount = vfsp->vfc_refcount;
2877 xvfsp->vfc_flags = vfsp->vfc_flags;
2878 /*
2879 * These are unused in userland, we keep them
2880 * to not break binary compatibility.
2881 */
2882 xvfsp->vfc_vfsops = NULL;
2883 xvfsp->vfc_next = NULL;
2884 }
2885
2886 /*
2887 * Top level filesystem related information gathering.
2888 */
2889 static int
2890 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2891 {
2892 struct vfsconf *vfsp;
2893 struct xvfsconf xvfsp;
2894 int error;
2895
2896 error = 0;
2897 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2898 bzero(&xvfsp, sizeof(xvfsp));
2899 vfsconf2x(vfsp, &xvfsp);
2900 error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp);
2901 if (error)
2902 break;
2903 }
2904 return (error);
2905 }
2906
2907 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2908 "S,xvfsconf", "List of all configured filesystems");
2909
2910 #ifndef BURN_BRIDGES
2911 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2912
2913 static int
2914 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2915 {
2916 int *name = (int *)arg1 - 1; /* XXX */
2917 u_int namelen = arg2 + 1; /* XXX */
2918 struct vfsconf *vfsp;
2919 struct xvfsconf xvfsp;
2920
2921 printf("WARNING: userland calling deprecated sysctl, "
2922 "please rebuild world\n");
2923
2924 #if 1 || defined(COMPAT_PRELITE2)
2925 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2926 if (namelen == 1)
2927 return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2928 #endif
2929
2930 switch (name[1]) {
2931 case VFS_MAXTYPENUM:
2932 if (namelen != 2)
2933 return (ENOTDIR);
2934 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2935 case VFS_CONF:
2936 if (namelen != 3)
2937 return (ENOTDIR); /* overloaded */
2938 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
2939 if (vfsp->vfc_typenum == name[2])
2940 break;
2941 if (vfsp == NULL)
2942 return (EOPNOTSUPP);
2943 bzero(&xvfsp, sizeof(xvfsp));
2944 vfsconf2x(vfsp, &xvfsp);
2945 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2946 }
2947 return (EOPNOTSUPP);
2948 }
2949
2950 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
2951 "Generic filesystem");
2952
2953 #if 1 || defined(COMPAT_PRELITE2)
2954
2955 static int
2956 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2957 {
2958 int error;
2959 struct vfsconf *vfsp;
2960 struct ovfsconf ovfs;
2961
2962 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2963 bzero(&ovfs, sizeof(ovfs));
2964 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */
2965 strcpy(ovfs.vfc_name, vfsp->vfc_name);
2966 ovfs.vfc_index = vfsp->vfc_typenum;
2967 ovfs.vfc_refcount = vfsp->vfc_refcount;
2968 ovfs.vfc_flags = vfsp->vfc_flags;
2969 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2970 if (error)
2971 return error;
2972 }
2973 return 0;
2974 }
2975
2976 #endif /* 1 || COMPAT_PRELITE2 */
2977 #endif /* !BURN_BRIDGES */
2978
2979 #define KINFO_VNODESLOP 10
2980 #ifdef notyet
2981 /*
2982 * Dump vnode list (via sysctl).
2983 */
2984 /* ARGSUSED */
2985 static int
2986 sysctl_vnode(SYSCTL_HANDLER_ARGS)
2987 {
2988 struct xvnode *xvn;
2989 struct thread *td = req->td;
2990 struct mount *mp;
2991 struct vnode *vp;
2992 int error, len, n;
2993
2994 /*
2995 * Stale numvnodes access is not fatal here.
2996 */
2997 req->lock = 0;
2998 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
2999 if (!req->oldptr)
3000 /* Make an estimate */
3001 return (SYSCTL_OUT(req, 0, len));
3002
3003 error = sysctl_wire_old_buffer(req, 0);
3004 if (error != 0)
3005 return (error);
3006 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3007 n = 0;
3008 mtx_lock(&mountlist_mtx);
3009 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3010 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
3011 continue;
3012 MNT_ILOCK(mp);
3013 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3014 if (n == len)
3015 break;
3016 vref(vp);
3017 xvn[n].xv_size = sizeof *xvn;
3018 xvn[n].xv_vnode = vp;
3019 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3020 XV_COPY(usecount);
3021 XV_COPY(writecount);
3022 XV_COPY(holdcnt);
3023 XV_COPY(id);
3024 XV_COPY(mount);
3025 XV_COPY(numoutput);
3026 XV_COPY(type);
3027 #undef XV_COPY
3028 xvn[n].xv_flag = vp->v_vflag;
3029
3030 switch (vp->v_type) {
3031 case VREG:
3032 case VDIR:
3033 case VLNK:
3034 xvn[n].xv_dev = vp->v_cachedfs;
3035 xvn[n].xv_ino = vp->v_cachedid;
3036 break;
3037 case VBLK:
3038 case VCHR:
3039 if (vp->v_rdev == NULL) {
3040 vrele(vp);
3041 continue;
3042 }
3043 xvn[n].xv_dev = dev2udev(vp->v_rdev);
3044 break;
3045 case VSOCK:
3046 xvn[n].xv_socket = vp->v_socket;
3047 break;
3048 case VFIFO:
3049 xvn[n].xv_fifo = vp->v_fifoinfo;
3050 break;
3051 case VNON:
3052 case VBAD:
3053 default:
3054 /* shouldn't happen? */
3055 vrele(vp);
3056 continue;
3057 }
3058 vrele(vp);
3059 ++n;
3060 }
3061 MNT_IUNLOCK(mp);
3062 mtx_lock(&mountlist_mtx);
3063 vfs_unbusy(mp, td);
3064 if (n == len)
3065 break;
3066 }
3067 mtx_unlock(&mountlist_mtx);
3068
3069 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3070 free(xvn, M_TEMP);
3071 return (error);
3072 }
3073
3074 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3075 0, 0, sysctl_vnode, "S,xvnode", "");
3076 #endif
3077
3078 /*
3079 * Check to see if a filesystem is mounted on a block device.
3080 */
3081 int
3082 vfs_mountedon(vp)
3083 struct vnode *vp;
3084 {
3085
3086 if (vp->v_rdev->si_mountpoint != NULL)
3087 return (EBUSY);
3088 return (0);
3089 }
3090
3091 /*
3092 * Unmount all filesystems. The list is traversed in reverse order
3093 * of mounting to avoid dependencies.
3094 */
3095 void
3096 vfs_unmountall()
3097 {
3098 struct mount *mp;
3099 struct thread *td;
3100 int error;
3101
3102 KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread"));
3103 td = curthread;
3104 /*
3105 * Since this only runs when rebooting, it is not interlocked.
3106 */
3107 while(!TAILQ_EMPTY(&mountlist)) {
3108 mp = TAILQ_LAST(&mountlist, mntlist);
3109 error = dounmount(mp, MNT_FORCE, td);
3110 if (error) {
3111 TAILQ_REMOVE(&mountlist, mp, mnt_list);
3112 printf("unmount of %s failed (",
3113 mp->mnt_stat.f_mntonname);
3114 if (error == EBUSY)
3115 printf("BUSY)\n");
3116 else
3117 printf("%d)\n", error);
3118 } else {
3119 /* The unmount has removed mp from the mountlist */
3120 }
3121 }
3122 }
3123
3124 /*
3125 * perform msync on all vnodes under a mount point
3126 * the mount point must be locked.
3127 */
3128 void
3129 vfs_msync(struct mount *mp, int flags)
3130 {
3131 struct vnode *vp, *nvp;
3132 struct vm_object *obj;
3133 int tries;
3134
3135 GIANT_REQUIRED;
3136
3137 tries = 5;
3138 MNT_ILOCK(mp);
3139 loop:
3140 TAILQ_FOREACH_SAFE(vp, &mp->mnt_nvnodelist, v_nmntvnodes, nvp) {
3141 if (vp->v_mount != mp) {
3142 if (--tries > 0)
3143 goto loop;
3144 break;
3145 }
3146
3147 VI_LOCK(vp);
3148 if (vp->v_iflag & VI_XLOCK) {
3149 VI_UNLOCK(vp);
3150 continue;
3151 }
3152
3153 if ((vp->v_iflag & VI_OBJDIRTY) &&
3154 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3155 MNT_IUNLOCK(mp);
3156 if (!vget(vp,
3157 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3158 curthread)) {
3159 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */
3160 vput(vp);
3161 MNT_ILOCK(mp);
3162 continue;
3163 }
3164
3165 if (VOP_GETVOBJECT(vp, &obj) == 0) {
3166 VM_OBJECT_LOCK(obj);
3167 vm_object_page_clean(obj, 0, 0,
3168 flags == MNT_WAIT ?
3169 OBJPC_SYNC : OBJPC_NOSYNC);
3170 VM_OBJECT_UNLOCK(obj);
3171 }
3172 vput(vp);
3173 }
3174 MNT_ILOCK(mp);
3175 if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3176 if (--tries > 0)
3177 goto loop;
3178 break;
3179 }
3180 } else
3181 VI_UNLOCK(vp);
3182 }
3183 MNT_IUNLOCK(mp);
3184 }
3185
3186 /*
3187 * Create the VM object needed for VMIO and mmap support. This
3188 * is done for all VREG files in the system. Some filesystems might
3189 * afford the additional metadata buffering capability of the
3190 * VMIO code by making the device node be VMIO mode also.
3191 *
3192 * vp must be locked when vfs_object_create is called.
3193 */
3194 int
3195 vfs_object_create(vp, td, cred)
3196 struct vnode *vp;
3197 struct thread *td;
3198 struct ucred *cred;
3199 {
3200
3201 GIANT_REQUIRED;
3202 return (VOP_CREATEVOBJECT(vp, cred, td));
3203 }
3204
3205 /*
3206 * Mark a vnode as free, putting it up for recycling.
3207 */
3208 void
3209 vfree(vp)
3210 struct vnode *vp;
3211 {
3212
3213 ASSERT_VI_LOCKED(vp, "vfree");
3214 mtx_lock(&vnode_free_list_mtx);
3215 KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3216 if (vp->v_iflag & VI_AGE) {
3217 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3218 } else {
3219 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3220 }
3221 freevnodes++;
3222 mtx_unlock(&vnode_free_list_mtx);
3223 vp->v_iflag &= ~VI_AGE;
3224 vp->v_iflag |= VI_FREE;
3225 }
3226
3227 /*
3228 * Opposite of vfree() - mark a vnode as in use.
3229 */
3230 void
3231 vbusy(vp)
3232 struct vnode *vp;
3233 {
3234
3235 ASSERT_VI_LOCKED(vp, "vbusy");
3236 KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3237
3238 mtx_lock(&vnode_free_list_mtx);
3239 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3240 freevnodes--;
3241 mtx_unlock(&vnode_free_list_mtx);
3242
3243 vp->v_iflag &= ~(VI_FREE|VI_AGE);
3244 }
3245
3246 /*
3247 * Initalize per-vnode helper structure to hold poll-related state.
3248 */
3249 void
3250 v_addpollinfo(struct vnode *vp)
3251 {
3252 struct vpollinfo *vi;
3253
3254 vi = uma_zalloc(vnodepoll_zone, M_WAITOK);
3255 if (vp->v_pollinfo != NULL) {
3256 uma_zfree(vnodepoll_zone, vi);
3257 return;
3258 }
3259 vp->v_pollinfo = vi;
3260 mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3261 knlist_init(&vp->v_pollinfo->vpi_selinfo.si_note,
3262 &vp->v_pollinfo->vpi_lock);
3263 }
3264
3265 /*
3266 * Record a process's interest in events which might happen to
3267 * a vnode. Because poll uses the historic select-style interface
3268 * internally, this routine serves as both the ``check for any
3269 * pending events'' and the ``record my interest in future events''
3270 * functions. (These are done together, while the lock is held,
3271 * to avoid race conditions.)
3272 */
3273 int
3274 vn_pollrecord(vp, td, events)
3275 struct vnode *vp;
3276 struct thread *td;
3277 short events;
3278 {
3279
3280 if (vp->v_pollinfo == NULL)
3281 v_addpollinfo(vp);
3282 mtx_lock(&vp->v_pollinfo->vpi_lock);
3283 if (vp->v_pollinfo->vpi_revents & events) {
3284 /*
3285 * This leaves events we are not interested
3286 * in available for the other process which
3287 * which presumably had requested them
3288 * (otherwise they would never have been
3289 * recorded).
3290 */
3291 events &= vp->v_pollinfo->vpi_revents;
3292 vp->v_pollinfo->vpi_revents &= ~events;
3293
3294 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3295 return events;
3296 }
3297 vp->v_pollinfo->vpi_events |= events;
3298 selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3299 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3300 return 0;
3301 }
3302
3303 /*
3304 * Note the occurrence of an event. If the VN_POLLEVENT macro is used,
3305 * it is possible for us to miss an event due to race conditions, but
3306 * that condition is expected to be rare, so for the moment it is the
3307 * preferred interface.
3308 */
3309 void
3310 vn_pollevent(vp, events)
3311 struct vnode *vp;
3312 short events;
3313 {
3314
3315 if (vp->v_pollinfo == NULL)
3316 v_addpollinfo(vp);
3317 mtx_lock(&vp->v_pollinfo->vpi_lock);
3318 if (vp->v_pollinfo->vpi_events & events) {
3319 /*
3320 * We clear vpi_events so that we don't
3321 * call selwakeup() twice if two events are
3322 * posted before the polling process(es) is
3323 * awakened. This also ensures that we take at
3324 * most one selwakeup() if the polling process
3325 * is no longer interested. However, it does
3326 * mean that only one event can be noticed at
3327 * a time. (Perhaps we should only clear those
3328 * event bits which we note?) XXX
3329 */
3330 vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */
3331 vp->v_pollinfo->vpi_revents |= events;
3332 selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
3333 }
3334 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3335 }
3336
3337 /*
3338 * Wake up anyone polling on vp because it is being revoked.
3339 * This depends on dead_poll() returning POLLHUP for correct
3340 * behavior.
3341 */
3342 void
3343 vn_pollgone(vp)
3344 struct vnode *vp;
3345 {
3346
3347 mtx_lock(&vp->v_pollinfo->vpi_lock);
3348 VN_KNOTE_LOCKED(vp, NOTE_REVOKE);
3349 if (vp->v_pollinfo->vpi_events) {
3350 vp->v_pollinfo->vpi_events = 0;
3351 selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
3352 }
3353 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3354 }
3355
3356
3357
3358 /*
3359 * Routine to create and manage a filesystem syncer vnode.
3360 */
3361 #define sync_close ((int (*)(struct vop_close_args *))nullop)
3362 static int sync_fsync(struct vop_fsync_args *);
3363 static int sync_inactive(struct vop_inactive_args *);
3364 static int sync_reclaim(struct vop_reclaim_args *);
3365
3366 static vop_t **sync_vnodeop_p;
3367 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3368 { &vop_default_desc, (vop_t *) vop_eopnotsupp },
3369 { &vop_close_desc, (vop_t *) sync_close }, /* close */
3370 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */
3371 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */
3372 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */
3373 { &vop_lock_desc, (vop_t *) vop_stdlock }, /* lock */
3374 { &vop_unlock_desc, (vop_t *) vop_stdunlock }, /* unlock */
3375 { &vop_islocked_desc, (vop_t *) vop_stdislocked }, /* islocked */
3376 { NULL, NULL }
3377 };
3378 static struct vnodeopv_desc sync_vnodeop_opv_desc =
3379 { &sync_vnodeop_p, sync_vnodeop_entries };
3380
3381 VNODEOP_SET(sync_vnodeop_opv_desc);
3382
3383 /*
3384 * Create a new filesystem syncer vnode for the specified mount point.
3385 */
3386 int
3387 vfs_allocate_syncvnode(mp)
3388 struct mount *mp;
3389 {
3390 struct vnode *vp;
3391 static long start, incr, next;
3392 int error;
3393
3394 /* Allocate a new vnode */
3395 if ((error = getnewvnode("syncer", mp, sync_vnodeop_p, &vp)) != 0) {
3396 mp->mnt_syncer = NULL;
3397 return (error);
3398 }
3399 vp->v_type = VNON;
3400 /*
3401 * Place the vnode onto the syncer worklist. We attempt to
3402 * scatter them about on the list so that they will go off
3403 * at evenly distributed times even if all the filesystems
3404 * are mounted at once.
3405 */
3406 next += incr;
3407 if (next == 0 || next > syncer_maxdelay) {
3408 start /= 2;
3409 incr /= 2;
3410 if (start == 0) {
3411 start = syncer_maxdelay / 2;
3412 incr = syncer_maxdelay;
3413 }
3414 next = start;
3415 }
3416 VI_LOCK(vp);
3417 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
3418 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3419 mtx_lock(&sync_mtx);
3420 sync_vnode_count++;
3421 mtx_unlock(&sync_mtx);
3422 VI_UNLOCK(vp);
3423 mp->mnt_syncer = vp;
3424 return (0);
3425 }
3426
3427 /*
3428 * Do a lazy sync of the filesystem.
3429 */
3430 static int
3431 sync_fsync(ap)
3432 struct vop_fsync_args /* {
3433 struct vnode *a_vp;
3434 struct ucred *a_cred;
3435 int a_waitfor;
3436 struct thread *a_td;
3437 } */ *ap;
3438 {
3439 struct vnode *syncvp = ap->a_vp;
3440 struct mount *mp = syncvp->v_mount;
3441 struct thread *td = ap->a_td;
3442 int error, asyncflag;
3443
3444 /*
3445 * We only need to do something if this is a lazy evaluation.
3446 */
3447 if (ap->a_waitfor != MNT_LAZY)
3448 return (0);
3449
3450 /*
3451 * Move ourselves to the back of the sync list.
3452 */
3453 VI_LOCK(syncvp);
3454 vn_syncer_add_to_worklist(syncvp, syncdelay);
3455 VI_UNLOCK(syncvp);
3456
3457 /*
3458 * Walk the list of vnodes pushing all that are dirty and
3459 * not already on the sync list.
3460 */
3461 mtx_lock(&mountlist_mtx);
3462 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3463 mtx_unlock(&mountlist_mtx);
3464 return (0);
3465 }
3466 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3467 vfs_unbusy(mp, td);
3468 return (0);
3469 }
3470 asyncflag = mp->mnt_flag & MNT_ASYNC;
3471 mp->mnt_flag &= ~MNT_ASYNC;
3472 vfs_msync(mp, MNT_NOWAIT);
3473 error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3474 if (asyncflag)
3475 mp->mnt_flag |= MNT_ASYNC;
3476 vn_finished_write(mp);
3477 vfs_unbusy(mp, td);
3478 return (error);
3479 }
3480
3481 /*
3482 * The syncer vnode is no referenced.
3483 */
3484 static int
3485 sync_inactive(ap)
3486 struct vop_inactive_args /* {
3487 struct vnode *a_vp;
3488 struct thread *a_td;
3489 } */ *ap;
3490 {
3491
3492 VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3493 vgone(ap->a_vp);
3494 return (0);
3495 }
3496
3497 /*
3498 * The syncer vnode is no longer needed and is being decommissioned.
3499 *
3500 * Modifications to the worklist must be protected by sync_mtx.
3501 */
3502 static int
3503 sync_reclaim(ap)
3504 struct vop_reclaim_args /* {
3505 struct vnode *a_vp;
3506 } */ *ap;
3507 {
3508 struct vnode *vp = ap->a_vp;
3509
3510 VI_LOCK(vp);
3511 vp->v_mount->mnt_syncer = NULL;
3512 if (vp->v_iflag & VI_ONWORKLST) {
3513 mtx_lock(&sync_mtx);
3514 LIST_REMOVE(vp, v_synclist);
3515 syncer_worklist_len--;
3516 sync_vnode_count--;
3517 mtx_unlock(&sync_mtx);
3518 vp->v_iflag &= ~VI_ONWORKLST;
3519 }
3520 VI_UNLOCK(vp);
3521
3522 return (0);
3523 }
3524
3525 /*
3526 * extract the struct cdev *from a VCHR
3527 */
3528 struct cdev *
3529 vn_todev(vp)
3530 struct vnode *vp;
3531 {
3532
3533 if (vp->v_type != VCHR)
3534 return (NULL);
3535 return (vp->v_rdev);
3536 }
3537
3538 /*
3539 * Check if vnode represents a disk device
3540 */
3541 int
3542 vn_isdisk(vp, errp)
3543 struct vnode *vp;
3544 int *errp;
3545 {
3546 int error;
3547
3548 error = 0;
3549 if (vp->v_type != VCHR)
3550 error = ENOTBLK;
3551 else if (vp->v_rdev == NULL)
3552 error = ENXIO;
3553 else if (!(devsw(vp->v_rdev)->d_flags & D_DISK))
3554 error = ENOTBLK;
3555 if (errp != NULL)
3556 *errp = error;
3557 return (error == 0);
3558 }
3559
3560 /*
3561 * Free data allocated by namei(); see namei(9) for details.
3562 */
3563 void
3564 NDFREE(ndp, flags)
3565 struct nameidata *ndp;
3566 const u_int flags;
3567 {
3568
3569 if (!(flags & NDF_NO_FREE_PNBUF) &&
3570 (ndp->ni_cnd.cn_flags & HASBUF)) {
3571 uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3572 ndp->ni_cnd.cn_flags &= ~HASBUF;
3573 }
3574 if (!(flags & NDF_NO_DVP_UNLOCK) &&
3575 (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3576 ndp->ni_dvp != ndp->ni_vp)
3577 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3578 if (!(flags & NDF_NO_DVP_RELE) &&
3579 (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3580 vrele(ndp->ni_dvp);
3581 ndp->ni_dvp = NULL;
3582 }
3583 if (!(flags & NDF_NO_VP_UNLOCK) &&
3584 (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3585 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3586 if (!(flags & NDF_NO_VP_RELE) &&
3587 ndp->ni_vp) {
3588 vrele(ndp->ni_vp);
3589 ndp->ni_vp = NULL;
3590 }
3591 if (!(flags & NDF_NO_STARTDIR_RELE) &&
3592 (ndp->ni_cnd.cn_flags & SAVESTART)) {
3593 vrele(ndp->ni_startdir);
3594 ndp->ni_startdir = NULL;
3595 }
3596 }
3597
3598 /*
3599 * Common filesystem object access control check routine. Accepts a
3600 * vnode's type, "mode", uid and gid, requested access mode, credentials,
3601 * and optional call-by-reference privused argument allowing vaccess()
3602 * to indicate to the caller whether privilege was used to satisfy the
3603 * request (obsoleted). Returns 0 on success, or an errno on failure.
3604 */
3605 int
3606 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3607 enum vtype type;
3608 mode_t file_mode;
3609 uid_t file_uid;
3610 gid_t file_gid;
3611 mode_t acc_mode;
3612 struct ucred *cred;
3613 int *privused;
3614 {
3615 mode_t dac_granted;
3616 #ifdef CAPABILITIES
3617 mode_t cap_granted;
3618 #endif
3619
3620 /*
3621 * Look for a normal, non-privileged way to access the file/directory
3622 * as requested. If it exists, go with that.
3623 */
3624
3625 if (privused != NULL)
3626 *privused = 0;
3627
3628 dac_granted = 0;
3629
3630 /* Check the owner. */
3631 if (cred->cr_uid == file_uid) {
3632 dac_granted |= VADMIN;
3633 if (file_mode & S_IXUSR)
3634 dac_granted |= VEXEC;
3635 if (file_mode & S_IRUSR)
3636 dac_granted |= VREAD;
3637 if (file_mode & S_IWUSR)
3638 dac_granted |= (VWRITE | VAPPEND);
3639
3640 if ((acc_mode & dac_granted) == acc_mode)
3641 return (0);
3642
3643 goto privcheck;
3644 }
3645
3646 /* Otherwise, check the groups (first match) */
3647 if (groupmember(file_gid, cred)) {
3648 if (file_mode & S_IXGRP)
3649 dac_granted |= VEXEC;
3650 if (file_mode & S_IRGRP)
3651 dac_granted |= VREAD;
3652 if (file_mode & S_IWGRP)
3653 dac_granted |= (VWRITE | VAPPEND);
3654
3655 if ((acc_mode & dac_granted) == acc_mode)
3656 return (0);
3657
3658 goto privcheck;
3659 }
3660
3661 /* Otherwise, check everyone else. */
3662 if (file_mode & S_IXOTH)
3663 dac_granted |= VEXEC;
3664 if (file_mode & S_IROTH)
3665 dac_granted |= VREAD;
3666 if (file_mode & S_IWOTH)
3667 dac_granted |= (VWRITE | VAPPEND);
3668 if ((acc_mode & dac_granted) == acc_mode)
3669 return (0);
3670
3671 privcheck:
3672 if (!suser_cred(cred, SUSER_ALLOWJAIL)) {
3673 /* XXX audit: privilege used */
3674 if (privused != NULL)
3675 *privused = 1;
3676 return (0);
3677 }
3678
3679 #ifdef CAPABILITIES
3680 /*
3681 * Build a capability mask to determine if the set of capabilities
3682 * satisfies the requirements when combined with the granted mask
3683 * from above.
3684 * For each capability, if the capability is required, bitwise
3685 * or the request type onto the cap_granted mask.
3686 */
3687 cap_granted = 0;
3688
3689 if (type == VDIR) {
3690 /*
3691 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3692 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3693 */
3694 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3695 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
3696 cap_granted |= VEXEC;
3697 } else {
3698 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3699 !cap_check(cred, NULL, CAP_DAC_EXECUTE, SUSER_ALLOWJAIL))
3700 cap_granted |= VEXEC;
3701 }
3702
3703 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3704 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
3705 cap_granted |= VREAD;
3706
3707 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3708 !cap_check(cred, NULL, CAP_DAC_WRITE, SUSER_ALLOWJAIL))
3709 cap_granted |= (VWRITE | VAPPEND);
3710
3711 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3712 !cap_check(cred, NULL, CAP_FOWNER, SUSER_ALLOWJAIL))
3713 cap_granted |= VADMIN;
3714
3715 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3716 /* XXX audit: privilege used */
3717 if (privused != NULL)
3718 *privused = 1;
3719 return (0);
3720 }
3721 #endif
3722
3723 return ((acc_mode & VADMIN) ? EPERM : EACCES);
3724 }
3725
3726 /*
3727 * Credential check based on process requesting service, and per-attribute
3728 * permissions.
3729 */
3730 int
3731 extattr_check_cred(struct vnode *vp, int attrnamespace,
3732 struct ucred *cred, struct thread *td, int access)
3733 {
3734
3735 /*
3736 * Kernel-invoked always succeeds.
3737 */
3738 if (cred == NOCRED)
3739 return (0);
3740
3741 /*
3742 * Do not allow privileged processes in jail to directly
3743 * manipulate system attributes.
3744 *
3745 * XXX What capability should apply here?
3746 * Probably CAP_SYS_SETFFLAG.
3747 */
3748 switch (attrnamespace) {
3749 case EXTATTR_NAMESPACE_SYSTEM:
3750 /* Potentially should be: return (EPERM); */
3751 return (suser_cred(cred, 0));
3752 case EXTATTR_NAMESPACE_USER:
3753 return (VOP_ACCESS(vp, access, cred, td));
3754 default:
3755 return (EPERM);
3756 }
3757 }
3758
3759 #ifdef DEBUG_VFS_LOCKS
3760 /*
3761 * This only exists to supress warnings from unlocked specfs accesses. It is
3762 * no longer ok to have an unlocked VFS.
3763 */
3764 #define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
3765
3766 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */
3767 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
3768
3769 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */
3770 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "");
3771
3772 int vfs_badlock_print = 1; /* Print lock violations. */
3773 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "");
3774
3775 #ifdef KDB
3776 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */
3777 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "");
3778 #endif
3779
3780 static void
3781 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3782 {
3783
3784 #ifdef KDB
3785 if (vfs_badlock_backtrace)
3786 kdb_backtrace();
3787 #endif
3788 if (vfs_badlock_print)
3789 printf("%s: %p %s\n", str, (void *)vp, msg);
3790 if (vfs_badlock_ddb)
3791 kdb_enter("lock violation");
3792 }
3793
3794 void
3795 assert_vi_locked(struct vnode *vp, const char *str)
3796 {
3797
3798 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3799 vfs_badlock("interlock is not locked but should be", str, vp);
3800 }
3801
3802 void
3803 assert_vi_unlocked(struct vnode *vp, const char *str)
3804 {
3805
3806 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3807 vfs_badlock("interlock is locked but should not be", str, vp);
3808 }
3809
3810 void
3811 assert_vop_locked(struct vnode *vp, const char *str)
3812 {
3813
3814 if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, NULL) == 0)
3815 vfs_badlock("is not locked but should be", str, vp);
3816 }
3817
3818 void
3819 assert_vop_unlocked(struct vnode *vp, const char *str)
3820 {
3821
3822 if (vp && !IGNORE_LOCK(vp) &&
3823 VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
3824 vfs_badlock("is locked but should not be", str, vp);
3825 }
3826
3827 #if 0
3828 void
3829 assert_vop_elocked(struct vnode *vp, const char *str)
3830 {
3831
3832 if (vp && !IGNORE_LOCK(vp) &&
3833 VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
3834 vfs_badlock("is not exclusive locked but should be", str, vp);
3835 }
3836
3837 void
3838 assert_vop_elocked_other(struct vnode *vp, const char *str)
3839 {
3840
3841 if (vp && !IGNORE_LOCK(vp) &&
3842 VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
3843 vfs_badlock("is not exclusive locked by another thread",
3844 str, vp);
3845 }
3846
3847 void
3848 assert_vop_slocked(struct vnode *vp, const char *str)
3849 {
3850
3851 if (vp && !IGNORE_LOCK(vp) &&
3852 VOP_ISLOCKED(vp, curthread) != LK_SHARED)
3853 vfs_badlock("is not locked shared but should be", str, vp);
3854 }
3855 #endif /* 0 */
3856
3857 void
3858 vop_rename_pre(void *ap)
3859 {
3860 struct vop_rename_args *a = ap;
3861
3862 if (a->a_tvp)
3863 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3864 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3865 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3866 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3867
3868 /* Check the source (from). */
3869 if (a->a_tdvp != a->a_fdvp)
3870 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3871 if (a->a_tvp != a->a_fvp)
3872 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked");
3873
3874 /* Check the target. */
3875 if (a->a_tvp)
3876 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3877 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3878 }
3879
3880 void
3881 vop_strategy_pre(void *ap)
3882 {
3883 struct vop_strategy_args *a;
3884 struct buf *bp;
3885
3886 a = ap;
3887 bp = a->a_bp;
3888
3889 /*
3890 * Cluster ops lock their component buffers but not the IO container.
3891 */
3892 if ((bp->b_flags & B_CLUSTER) != 0)
3893 return;
3894
3895 if (BUF_REFCNT(bp) < 1) {
3896 if (vfs_badlock_print)
3897 printf(
3898 "VOP_STRATEGY: bp is not locked but should be\n");
3899 if (vfs_badlock_ddb)
3900 kdb_enter("lock violation");
3901 }
3902 }
3903
3904 void
3905 vop_lookup_pre(void *ap)
3906 {
3907 struct vop_lookup_args *a;
3908 struct vnode *dvp;
3909
3910 a = ap;
3911 dvp = a->a_dvp;
3912 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3913 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3914 }
3915
3916 void
3917 vop_lookup_post(void *ap, int rc)
3918 {
3919 struct vop_lookup_args *a;
3920 struct componentname *cnp;
3921 struct vnode *dvp;
3922 struct vnode *vp;
3923 int flags;
3924
3925 a = ap;
3926 dvp = a->a_dvp;
3927 cnp = a->a_cnp;
3928 vp = *(a->a_vpp);
3929 flags = cnp->cn_flags;
3930
3931 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3932
3933 /*
3934 * If this is the last path component for this lookup and LOCKPARENT
3935 * is set, OR if there is an error the directory has to be locked.
3936 */
3937 if ((flags & LOCKPARENT) && (flags & ISLASTCN))
3938 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
3939 else if (rc != 0)
3940 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
3941 else if (dvp != vp)
3942 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
3943 if (flags & PDIRUNLOCK)
3944 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
3945 }
3946
3947 void
3948 vop_lock_pre(void *ap)
3949 {
3950 struct vop_lock_args *a = ap;
3951
3952 if ((a->a_flags & LK_INTERLOCK) == 0)
3953 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3954 else
3955 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3956 }
3957
3958 void
3959 vop_lock_post(void *ap, int rc)
3960 {
3961 struct vop_lock_args *a = ap;
3962
3963 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3964 if (rc == 0)
3965 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3966 }
3967
3968 void
3969 vop_unlock_pre(void *ap)
3970 {
3971 struct vop_unlock_args *a = ap;
3972
3973 if (a->a_flags & LK_INTERLOCK)
3974 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3975 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3976 }
3977
3978 void
3979 vop_unlock_post(void *ap, int rc)
3980 {
3981 struct vop_unlock_args *a = ap;
3982
3983 if (a->a_flags & LK_INTERLOCK)
3984 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3985 }
3986 #endif /* DEBUG_VFS_LOCKS */
3987
3988 static struct knlist fs_knlist;
3989
3990 static void
3991 vfs_event_init(void *arg)
3992 {
3993 knlist_init(&fs_knlist, NULL);
3994 }
3995 /* XXX - correct order? */
3996 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
3997
3998 void
3999 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
4000 {
4001
4002 KNOTE_UNLOCKED(&fs_knlist, event);
4003 }
4004
4005 static int filt_fsattach(struct knote *kn);
4006 static void filt_fsdetach(struct knote *kn);
4007 static int filt_fsevent(struct knote *kn, long hint);
4008
4009 struct filterops fs_filtops =
4010 { 0, filt_fsattach, filt_fsdetach, filt_fsevent };
4011
4012 static int
4013 filt_fsattach(struct knote *kn)
4014 {
4015
4016 kn->kn_flags |= EV_CLEAR;
4017 knlist_add(&fs_knlist, kn, 0);
4018 return (0);
4019 }
4020
4021 static void
4022 filt_fsdetach(struct knote *kn)
4023 {
4024
4025 knlist_remove(&fs_knlist, kn, 0);
4026 }
4027
4028 static int
4029 filt_fsevent(struct knote *kn, long hint)
4030 {
4031
4032 kn->kn_fflags |= hint;
4033 return (kn->kn_fflags != 0);
4034 }
4035
4036 static int
4037 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
4038 {
4039 struct vfsidctl vc;
4040 int error;
4041 struct mount *mp;
4042
4043 error = SYSCTL_IN(req, &vc, sizeof(vc));
4044 if (error)
4045 return (error);
4046 if (vc.vc_vers != VFS_CTL_VERS1)
4047 return (EINVAL);
4048 mp = vfs_getvfs(&vc.vc_fsid);
4049 if (mp == NULL)
4050 return (ENOENT);
4051 /* ensure that a specific sysctl goes to the right filesystem. */
4052 if (strcmp(vc.vc_fstypename, "*") != 0 &&
4053 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
4054 return (EINVAL);
4055 }
4056 VCTLTOREQ(&vc, req);
4057 return (VFS_SYSCTL(mp, vc.vc_op, req));
4058 }
4059
4060 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR,
4061 NULL, 0, sysctl_vfs_ctl, "", "Sysctl by fsid");
Cache object: 29bb22b27a23cac4debe8a544eb816ac
|